Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (226 commits)
mtd: tests: annotate as DANGEROUS in Kconfig
mtd: tests: don't use mtd0 as a default
mtd: clean up usage of MTD_DOCPROBE_ADDRESS
jffs2: add compr=lzo and compr=zlib options
jffs2: implement mount option parsing and compression overriding
mtd: nand: initialize ops.mode
mtd: provide an alias for the redboot module name
mtd: m25p80: don't probe device which has status of 'disabled'
mtd: nand_h1900 never worked
mtd: Add DiskOnChip G3 support
mtd: m25p80: add EON flash EN25Q32B into spi flash id table
mtd: mark block device queue as non-rotational
mtd: r852: make r852_pm_ops static
mtd: m25p80: add support for at25df321a spi data flash
mtd: mxc_nand: preset_v1_v2: unlock all NAND flash blocks
mtd: nand: switch `check_pattern()' to standard `memcmp()'
mtd: nand: invalidate cache on unaligned reads
mtd: nand: do not scan bad blocks with NAND_BBT_NO_OOB set
mtd: nand: wait to set BBT version
mtd: nand: scrub BBT on ECC errors
...

Fix up trivial conflicts:
- arch/arm/mach-at91/board-usb-a9260.c
Merged into board-usb-a926x.c
- drivers/mtd/maps/lantiq-flash.c
add_mtd_partitions -> mtd_device_register vs changed to use
mtd_device_parse_register.

+7578 -4041
+1 -18
Documentation/DocBook/mtdnand.tmpl
··· 572 572 </para> 573 573 <para> 574 574 The simplest way to activate the FLASH based bad block table support 575 - is to set the option NAND_USE_FLASH_BBT in the option field of 575 + is to set the option NAND_BBT_USE_FLASH in the bbt_option field of 576 576 the nand chip structure before calling nand_scan(). For AG-AND 577 577 chips is this done by default. 578 578 This activates the default FLASH based bad block table functionality ··· 772 772 If the spare area buffer is NULL then only the ECC placement is 773 773 done according to the default builtin scheme. 774 774 </para> 775 - </sect2> 776 - <sect2 id="User_space_placement_selection"> 777 - <title>User space placement selection</title> 778 - <para> 779 - All non ecc functions like mtd->read and mtd->write use an internal 780 - structure, which can be set by an ioctl. This structure is preset 781 - to the autoplacement default. 782 - <programlisting> 783 - ioctl (fd, MEMSETOOBSEL, oobsel); 784 - </programlisting> 785 - oobsel is a pointer to a user supplied structure of type 786 - nand_oobconfig. The contents of this structure must match the 787 - criteria of the filesystem, which will be used. See an example in utils/nandwrite.c. 788 - </para> 789 775 </sect2> 790 776 </sect1> 791 777 <sect1 id="Spare_area_autoplacement_default"> ··· 1144 1158 These constants are defined in nand.h. They are ored together to describe 1145 1159 the functionality. 1146 1160 <programlisting> 1147 - /* Use a flash based bad block table. This option is parsed by the 1148 - * default bad block table function (nand_default_bbt). */ 1149 - #define NAND_USE_FLASH_BBT 0x00010000 1150 1161 /* The hw ecc generator provides a syndrome instead a ecc value on read 1151 1162 * This can only work if we have the ecc bytes directly behind the 1152 1163 * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
+14
Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
··· 1 + * Atmel Data Flash 2 + 3 + Required properties: 4 + - compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash". 5 + 6 + Example: 7 + 8 + flash@1 { 9 + #address-cells = <1>; 10 + #size-cells = <1>; 11 + compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash"; 12 + spi-max-frequency = <25000000>; 13 + reg = <1>; 14 + };
+2 -7
arch/arm/mach-at91/board-afeb-9260v1.c
··· 130 130 }, 131 131 }; 132 132 133 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 134 - { 135 - *num_partitions = ARRAY_SIZE(afeb9260_nand_partition); 136 - return afeb9260_nand_partition; 137 - } 138 - 139 133 static struct atmel_nand_data __initdata afeb9260_nand_data = { 140 134 .ale = 21, 141 135 .cle = 22, 142 136 .rdy_pin = AT91_PIN_PC13, 143 137 .enable_pin = AT91_PIN_PC14, 144 - .partition_info = nand_partitions, 145 138 .bus_width_16 = 0, 139 + .parts = afeb9260_nand_partition, 140 + .num_parts = ARRAY_SIZE(afeb9260_nand_partition), 146 141 }; 147 142 148 143
+2 -7
arch/arm/mach-at91/board-cam60.c
··· 132 132 }, 133 133 }; 134 134 135 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 136 - { 137 - *num_partitions = ARRAY_SIZE(cam60_nand_partition); 138 - return cam60_nand_partition; 139 - } 140 - 141 135 static struct atmel_nand_data __initdata cam60_nand_data = { 142 136 .ale = 21, 143 137 .cle = 22, 144 138 // .det_pin = ... not there 145 139 .rdy_pin = AT91_PIN_PA9, 146 140 .enable_pin = AT91_PIN_PA7, 147 - .partition_info = nand_partitions, 141 + .parts = cam60_nand_partition, 142 + .num_parts = ARRAY_SIZE(cam60_nand_partition), 148 143 }; 149 144 150 145 static struct sam9_smc_config __initdata cam60_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-cap9adk.c
··· 169 169 }, 170 170 }; 171 171 172 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 173 - { 174 - *num_partitions = ARRAY_SIZE(cap9adk_nand_partitions); 175 - return cap9adk_nand_partitions; 176 - } 177 - 178 172 static struct atmel_nand_data __initdata cap9adk_nand_data = { 179 173 .ale = 21, 180 174 .cle = 22, 181 175 // .det_pin = ... not connected 182 176 // .rdy_pin = ... not connected 183 177 .enable_pin = AT91_PIN_PD15, 184 - .partition_info = nand_partitions, 178 + .parts = cap9adk_nand_partitions, 179 + .num_parts = ARRAY_SIZE(cap9adk_nand_partitions), 185 180 }; 186 181 187 182 static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-kb9202.c
··· 97 97 }, 98 98 }; 99 99 100 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 101 - { 102 - *num_partitions = ARRAY_SIZE(kb9202_nand_partition); 103 - return kb9202_nand_partition; 104 - } 105 - 106 100 static struct atmel_nand_data __initdata kb9202_nand_data = { 107 101 .ale = 22, 108 102 .cle = 21, 109 103 // .det_pin = ... not there 110 104 .rdy_pin = AT91_PIN_PC29, 111 105 .enable_pin = AT91_PIN_PC28, 112 - .partition_info = nand_partitions, 106 + .parts = kb9202_nand_partition, 107 + .num_parts = ARRAY_SIZE(kb9202_nand_partition), 113 108 }; 114 109 115 110 static void __init kb9202_board_init(void)
+2 -7
arch/arm/mach-at91/board-neocore926.c
··· 182 182 }, 183 183 }; 184 184 185 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 186 - { 187 - *num_partitions = ARRAY_SIZE(neocore926_nand_partition); 188 - return neocore926_nand_partition; 189 - } 190 - 191 185 static struct atmel_nand_data __initdata neocore926_nand_data = { 192 186 .ale = 21, 193 187 .cle = 22, 194 188 .rdy_pin = AT91_PIN_PB19, 195 189 .rdy_pin_active_low = 1, 196 190 .enable_pin = AT91_PIN_PD15, 197 - .partition_info = nand_partitions, 191 + .parts = neocore926_nand_partition, 192 + .num_parts = ARRAY_SIZE(neocore926_nand_partition), 198 193 }; 199 194 200 195 static struct sam9_smc_config __initdata neocore926_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-qil-a9260.c
··· 130 130 }, 131 131 }; 132 132 133 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 134 - { 135 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 136 - return ek_nand_partition; 137 - } 138 - 139 133 static struct atmel_nand_data __initdata ek_nand_data = { 140 134 .ale = 21, 141 135 .cle = 22, 142 136 // .det_pin = ... not connected 143 137 .rdy_pin = AT91_PIN_PC13, 144 138 .enable_pin = AT91_PIN_PC14, 145 - .partition_info = nand_partitions, 139 + .parts = ek_nand_partition, 140 + .num_parts = ARRAY_SIZE(ek_nand_partition), 146 141 }; 147 142 148 143 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-rm9200dk.c
··· 138 138 }, 139 139 }; 140 140 141 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 142 - { 143 - *num_partitions = ARRAY_SIZE(dk_nand_partition); 144 - return dk_nand_partition; 145 - } 146 - 147 141 static struct atmel_nand_data __initdata dk_nand_data = { 148 142 .ale = 22, 149 143 .cle = 21, 150 144 .det_pin = AT91_PIN_PB1, 151 145 .rdy_pin = AT91_PIN_PC2, 152 146 // .enable_pin = ... not there 153 - .partition_info = nand_partitions, 147 + .parts = dk_nand_partition, 148 + .num_parts = ARRAY_SIZE(dk_nand_partition), 154 149 }; 155 150 156 151 #define DK_FLASH_BASE AT91_CHIPSELECT_0
+2 -7
arch/arm/mach-at91/board-sam9-l9260.c
··· 131 131 }, 132 132 }; 133 133 134 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 135 - { 136 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 137 - return ek_nand_partition; 138 - } 139 - 140 134 static struct atmel_nand_data __initdata ek_nand_data = { 141 135 .ale = 21, 142 136 .cle = 22, 143 137 // .det_pin = ... not connected 144 138 .rdy_pin = AT91_PIN_PC13, 145 139 .enable_pin = AT91_PIN_PC14, 146 - .partition_info = nand_partitions, 140 + .parts = ek_nand_partition, 141 + .num_parts = ARRAY_SIZE(ek_nand_partition), 147 142 }; 148 143 149 144 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9260ek.c
··· 173 173 }, 174 174 }; 175 175 176 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 177 - { 178 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 179 - return ek_nand_partition; 180 - } 181 - 182 176 static struct atmel_nand_data __initdata ek_nand_data = { 183 177 .ale = 21, 184 178 .cle = 22, 185 179 // .det_pin = ... not connected 186 180 .rdy_pin = AT91_PIN_PC13, 187 181 .enable_pin = AT91_PIN_PC14, 188 - .partition_info = nand_partitions, 182 + .parts = ek_nand_partition, 183 + .num_parts = ARRAY_SIZE(ek_nand_partition), 189 184 }; 190 185 191 186 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9261ek.c
··· 179 179 }, 180 180 }; 181 181 182 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 183 - { 184 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 185 - return ek_nand_partition; 186 - } 187 - 188 182 static struct atmel_nand_data __initdata ek_nand_data = { 189 183 .ale = 22, 190 184 .cle = 21, 191 185 // .det_pin = ... not connected 192 186 .rdy_pin = AT91_PIN_PC15, 193 187 .enable_pin = AT91_PIN_PC14, 194 - .partition_info = nand_partitions, 188 + .parts = ek_nand_partition, 189 + .num_parts = ARRAY_SIZE(ek_nand_partition), 195 190 }; 196 191 197 192 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9263ek.c
··· 180 180 }, 181 181 }; 182 182 183 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 184 - { 185 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 186 - return ek_nand_partition; 187 - } 188 - 189 183 static struct atmel_nand_data __initdata ek_nand_data = { 190 184 .ale = 21, 191 185 .cle = 22, 192 186 // .det_pin = ... not connected 193 187 .rdy_pin = AT91_PIN_PA22, 194 188 .enable_pin = AT91_PIN_PD15, 195 - .partition_info = nand_partitions, 189 + .parts = ek_nand_partition, 190 + .num_parts = ARRAY_SIZE(ek_nand_partition), 196 191 }; 197 192 198 193 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9g20ek.c
··· 157 157 }, 158 158 }; 159 159 160 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 161 - { 162 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 163 - return ek_nand_partition; 164 - } 165 - 166 160 /* det_pin is not connected */ 167 161 static struct atmel_nand_data __initdata ek_nand_data = { 168 162 .ale = 21, 169 163 .cle = 22, 170 164 .rdy_pin = AT91_PIN_PC13, 171 165 .enable_pin = AT91_PIN_PC14, 172 - .partition_info = nand_partitions, 166 + .parts = ek_nand_partition, 167 + .num_parts = ARRAY_SIZE(ek_nand_partition), 173 168 }; 174 169 175 170 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9m10g45ek.c
··· 137 137 }, 138 138 }; 139 139 140 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 141 - { 142 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 143 - return ek_nand_partition; 144 - } 145 - 146 140 /* det_pin is not connected */ 147 141 static struct atmel_nand_data __initdata ek_nand_data = { 148 142 .ale = 21, 149 143 .cle = 22, 150 144 .rdy_pin = AT91_PIN_PC8, 151 145 .enable_pin = AT91_PIN_PC14, 152 - .partition_info = nand_partitions, 146 + .parts = ek_nand_partition, 147 + .num_parts = ARRAY_SIZE(ek_nand_partition), 153 148 }; 154 149 155 150 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-sam9rlek.c
··· 88 88 }, 89 89 }; 90 90 91 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 92 - { 93 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 94 - return ek_nand_partition; 95 - } 96 - 97 91 static struct atmel_nand_data __initdata ek_nand_data = { 98 92 .ale = 21, 99 93 .cle = 22, 100 94 // .det_pin = ... not connected 101 95 .rdy_pin = AT91_PIN_PD17, 102 96 .enable_pin = AT91_PIN_PB6, 103 - .partition_info = nand_partitions, 97 + .parts = ek_nand_partition, 98 + .num_parts = ARRAY_SIZE(ek_nand_partition), 104 99 }; 105 100 106 101 static struct sam9_smc_config __initdata ek_nand_smc_config = {
+2 -8
arch/arm/mach-at91/board-snapper9260.c
··· 97 97 }, 98 98 }; 99 99 100 - static struct mtd_partition * __init 101 - snapper9260_nand_partition_info(int size, int *num_partitions) 102 - { 103 - *num_partitions = ARRAY_SIZE(snapper9260_nand_partitions); 104 - return snapper9260_nand_partitions; 105 - } 106 - 107 100 static struct atmel_nand_data __initdata snapper9260_nand_data = { 108 101 .ale = 21, 109 102 .cle = 22, 110 103 .rdy_pin = AT91_PIN_PC13, 111 - .partition_info = snapper9260_nand_partition_info, 104 + .parts = snapper9260_nand_partitions, 105 + .num_parts = ARRAY_SIZE(snapper9260_nand_partitions), 112 106 .bus_width_16 = 0, 113 107 }; 114 108
+2 -7
arch/arm/mach-at91/board-usb-a926x.c
··· 190 190 } 191 191 }; 192 192 193 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 194 - { 195 - *num_partitions = ARRAY_SIZE(ek_nand_partition); 196 - return ek_nand_partition; 197 - } 198 - 199 193 static struct atmel_nand_data __initdata ek_nand_data = { 200 194 .ale = 21, 201 195 .cle = 22, 202 196 // .det_pin = ... not connected 203 197 .rdy_pin = AT91_PIN_PA22, 204 198 .enable_pin = AT91_PIN_PD15, 205 - .partition_info = nand_partitions, 199 + .parts = ek_nand_partition, 200 + .num_parts = ARRAY_SIZE(ek_nand_partition), 206 201 }; 207 202 208 203 static struct sam9_smc_config __initdata usb_a9260_nand_smc_config = {
+2 -7
arch/arm/mach-at91/board-yl-9200.c
··· 172 172 } 173 173 }; 174 174 175 - static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) 176 - { 177 - *num_partitions = ARRAY_SIZE(yl9200_nand_partition); 178 - return yl9200_nand_partition; 179 - } 180 - 181 175 static struct atmel_nand_data __initdata yl9200_nand_data = { 182 176 .ale = 6, 183 177 .cle = 7, 184 178 // .det_pin = ... not connected 185 179 .rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */ 186 180 .enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */ 187 - .partition_info = nand_partitions, 181 + .parts = yl9200_nand_partition, 182 + .num_parts = ARRAY_SIZE(yl9200_nand_partition), 188 183 }; 189 184 190 185 /*
+2 -1
arch/arm/mach-at91/include/mach/board.h
··· 117 117 u8 ale; /* address line number connected to ALE */ 118 118 u8 cle; /* address line number connected to CLE */ 119 119 u8 bus_width_16; /* buswidth is 16 bit */ 120 - struct mtd_partition* (*partition_info)(int, int*); 120 + struct mtd_partition *parts; 121 + unsigned int num_parts; 121 122 }; 122 123 extern void __init at91_add_device_nand(struct atmel_nand_data *data); 123 124
+1 -1
arch/arm/mach-davinci/board-da830-evm.c
··· 377 377 .nr_parts = ARRAY_SIZE(da830_evm_nand_partitions), 378 378 .ecc_mode = NAND_ECC_HW, 379 379 .ecc_bits = 4, 380 - .options = NAND_USE_FLASH_BBT, 380 + .bbt_options = NAND_BBT_USE_FLASH, 381 381 .bbt_td = &da830_evm_nand_bbt_main_descr, 382 382 .bbt_md = &da830_evm_nand_bbt_mirror_descr, 383 383 .timing = &da830_evm_nandflash_timing,
+1 -1
arch/arm/mach-davinci/board-da850-evm.c
··· 256 256 .nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition), 257 257 .ecc_mode = NAND_ECC_HW, 258 258 .ecc_bits = 4, 259 - .options = NAND_USE_FLASH_BBT, 259 + .bbt_options = NAND_BBT_USE_FLASH, 260 260 .timing = &da850_evm_nandflash_timing, 261 261 }; 262 262
+1 -1
arch/arm/mach-davinci/board-dm355-evm.c
··· 77 77 .parts = davinci_nand_partitions, 78 78 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 79 79 .ecc_mode = NAND_ECC_HW, 80 - .options = NAND_USE_FLASH_BBT, 80 + .bbt_options = NAND_BBT_USE_FLASH, 81 81 .ecc_bits = 4, 82 82 }; 83 83
+1 -1
arch/arm/mach-davinci/board-dm355-leopard.c
··· 74 74 .parts = davinci_nand_partitions, 75 75 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 76 76 .ecc_mode = NAND_ECC_HW_SYNDROME, 77 - .options = NAND_USE_FLASH_BBT, 77 + .bbt_options = NAND_BBT_USE_FLASH, 78 78 }; 79 79 80 80 static struct resource davinci_nand_resources[] = {
+1 -1
arch/arm/mach-davinci/board-dm365-evm.c
··· 139 139 .parts = davinci_nand_partitions, 140 140 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 141 141 .ecc_mode = NAND_ECC_HW, 142 - .options = NAND_USE_FLASH_BBT, 142 + .bbt_options = NAND_BBT_USE_FLASH, 143 143 .ecc_bits = 4, 144 144 }; 145 145
+1 -1
arch/arm/mach-davinci/board-dm644x-evm.c
··· 151 151 .parts = davinci_evm_nandflash_partition, 152 152 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 153 153 .ecc_mode = NAND_ECC_HW, 154 - .options = NAND_USE_FLASH_BBT, 154 + .bbt_options = NAND_BBT_USE_FLASH, 155 155 .timing = &davinci_evm_nandflash_timing, 156 156 }; 157 157
+2 -1
arch/arm/mach-davinci/board-mityomapl138.c
··· 396 396 .parts = mityomapl138_nandflash_partition, 397 397 .nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition), 398 398 .ecc_mode = NAND_ECC_HW, 399 - .options = NAND_USE_FLASH_BBT | NAND_BUSWIDTH_16, 399 + .bbt_options = NAND_BBT_USE_FLASH, 400 + .options = NAND_BUSWIDTH_16, 400 401 .ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */ 401 402 }; 402 403
+1 -1
arch/arm/mach-davinci/board-neuros-osd2.c
··· 87 87 .parts = davinci_ntosd2_nandflash_partition, 88 88 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 89 89 .ecc_mode = NAND_ECC_HW, 90 - .options = NAND_USE_FLASH_BBT, 90 + .bbt_options = NAND_BBT_USE_FLASH, 91 91 }; 92 92 93 93 static struct resource davinci_ntosd2_nandflash_resource[] = {
+1 -1
arch/arm/mach-davinci/board-tnetv107x-evm.c
··· 144 144 .parts = nand_partitions, 145 145 .nr_parts = ARRAY_SIZE(nand_partitions), 146 146 .ecc_mode = NAND_ECC_HW, 147 - .options = NAND_USE_FLASH_BBT, 147 + .bbt_options = NAND_BBT_USE_FLASH, 148 148 .ecc_bits = 1, 149 149 }; 150 150
+3 -1
arch/arm/mach-davinci/include/mach/nand.h
··· 74 74 nand_ecc_modes_t ecc_mode; 75 75 u8 ecc_bits; 76 76 77 - /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */ 77 + /* e.g. NAND_BUSWIDTH_16 */ 78 78 unsigned options; 79 + /* e.g. NAND_BBT_USE_FLASH */ 80 + unsigned bbt_options; 79 81 80 82 /* Main and mirror bbt descriptor overrides */ 81 83 struct nand_bbt_descr *bbt_td;
+5 -18
arch/arm/mach-ep93xx/ts72xx.c
··· 116 116 .mask_flags = MTD_WRITEABLE, /* force read-only */ 117 117 }, { 118 118 .name = "Linux", 119 - .offset = MTDPART_OFS_APPEND, 120 - .size = 0, /* filled in later */ 119 + .offset = MTDPART_OFS_RETAIN, 120 + .size = TS72XX_REDBOOT_PART_SIZE, 121 + /* leave so much for last partition */ 121 122 }, { 122 123 .name = "RedBoot", 123 124 .offset = MTDPART_OFS_APPEND, ··· 127 126 }, 128 127 }; 129 128 130 - static void ts72xx_nand_set_parts(uint64_t size, 131 - struct platform_nand_chip *chip) 132 - { 133 - /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */ 134 - if (size == SZ_32M || size == SZ_128M) { 135 - /* Set the "Linux" partition size */ 136 - ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE; 137 - 138 - chip->partitions = ts72xx_nand_parts; 139 - chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts); 140 - } else { 141 - pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20); 142 - } 143 - } 144 - 145 129 static struct platform_nand_data ts72xx_nand_data = { 146 130 .chip = { 147 131 .nr_chips = 1, 148 132 .chip_offset = 0, 149 133 .chip_delay = 15, 150 134 .part_probe_types = ts72xx_nand_part_probes, 151 - .set_parts = ts72xx_nand_set_parts, 135 + .partitions = ts72xx_nand_parts, 136 + .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts), 152 137 }, 153 138 .ctrl = { 154 139 .cmd_ctrl = ts72xx_nand_hwcontrol,
+3 -2
arch/arm/mach-mmp/aspenite.c
··· 167 167 168 168 static struct pxa3xx_nand_platform_data aspenite_nand_info = { 169 169 .enable_arbiter = 1, 170 - .parts = aspenite_nand_partitions, 171 - .nr_parts = ARRAY_SIZE(aspenite_nand_partitions), 170 + .num_cs = 1, 171 + .parts[0] = aspenite_nand_partitions, 172 + .nr_parts[0] = ARRAY_SIZE(aspenite_nand_partitions), 172 173 }; 173 174 174 175 static struct i2c_board_info aspenite_i2c_info[] __initdata = {
+1 -1
arch/arm/mach-orion5x/ts78xx-setup.c
··· 275 275 .partitions = ts78xx_ts_nand_parts, 276 276 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), 277 277 .chip_delay = 15, 278 - .options = NAND_USE_FLASH_BBT, 278 + .bbt_options = NAND_BBT_USE_FLASH, 279 279 }, 280 280 .ctrl = { 281 281 /*
+3 -2
arch/arm/mach-pxa/cm-x300.c
··· 424 424 static struct pxa3xx_nand_platform_data cm_x300_nand_info = { 425 425 .enable_arbiter = 1, 426 426 .keep_config = 1, 427 - .parts = cm_x300_nand_partitions, 428 - .nr_parts = ARRAY_SIZE(cm_x300_nand_partitions), 427 + .num_cs = 1, 428 + .parts[0] = cm_x300_nand_partitions, 429 + .nr_parts[0] = ARRAY_SIZE(cm_x300_nand_partitions), 429 430 }; 430 431 431 432 static void __init cm_x300_init_nand(void)
+3 -2
arch/arm/mach-pxa/colibri-pxa3xx.c
··· 139 139 static struct pxa3xx_nand_platform_data colibri_nand_info = { 140 140 .enable_arbiter = 1, 141 141 .keep_config = 1, 142 - .parts = colibri_nand_partitions, 143 - .nr_parts = ARRAY_SIZE(colibri_nand_partitions), 142 + .num_cs = 1, 143 + .parts[0] = colibri_nand_partitions, 144 + .nr_parts[0] = ARRAY_SIZE(colibri_nand_partitions), 144 145 }; 145 146 146 147 void __init colibri_pxa3xx_init_nand(void)
+3 -2
arch/arm/mach-pxa/littleton.c
··· 325 325 326 326 static struct pxa3xx_nand_platform_data littleton_nand_info = { 327 327 .enable_arbiter = 1, 328 - .parts = littleton_nand_partitions, 329 - .nr_parts = ARRAY_SIZE(littleton_nand_partitions), 328 + .num_cs = 1, 329 + .parts[0] = littleton_nand_partitions, 330 + .nr_parts[0] = ARRAY_SIZE(littleton_nand_partitions), 330 331 }; 331 332 332 333 static void __init littleton_init_nand(void)
+5 -4
arch/arm/mach-pxa/mxm8x10.c
··· 389 389 }; 390 390 391 391 static struct pxa3xx_nand_platform_data mxm_8x10_nand_info = { 392 - .enable_arbiter = 1, 393 - .keep_config = 1, 394 - .parts = mxm_8x10_nand_partitions, 395 - .nr_parts = ARRAY_SIZE(mxm_8x10_nand_partitions) 392 + .enable_arbiter = 1, 393 + .keep_config = 1, 394 + .num_cs = 1, 395 + .parts[0] = mxm_8x10_nand_partitions, 396 + .nr_parts[0] = ARRAY_SIZE(mxm_8x10_nand_partitions) 396 397 }; 397 398 398 399 static void __init mxm_8x10_nand_init(void)
+3 -2
arch/arm/mach-pxa/raumfeld.c
··· 346 346 static struct pxa3xx_nand_platform_data raumfeld_nand_info = { 347 347 .enable_arbiter = 1, 348 348 .keep_config = 1, 349 - .parts = raumfeld_nand_partitions, 350 - .nr_parts = ARRAY_SIZE(raumfeld_nand_partitions), 349 + .num_cs = 1, 350 + .parts[0] = raumfeld_nand_partitions, 351 + .nr_parts[0] = ARRAY_SIZE(raumfeld_nand_partitions), 351 352 }; 352 353 353 354 /**
+3 -2
arch/arm/mach-pxa/zylonite.c
··· 366 366 367 367 static struct pxa3xx_nand_platform_data zylonite_nand_info = { 368 368 .enable_arbiter = 1, 369 - .parts = zylonite_nand_partitions, 370 - .nr_parts = ARRAY_SIZE(zylonite_nand_partitions), 369 + .num_cs = 1, 370 + .parts[0] = zylonite_nand_partitions, 371 + .nr_parts[0] = ARRAY_SIZE(zylonite_nand_partitions), 371 372 }; 372 373 373 374 static void __init zylonite_init_nand(void)
+18 -2
arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
··· 41 41 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ 42 42 }; 43 43 44 + /* 45 + * Current pxa3xx_nand controller has two chip select which 46 + * both be workable. 47 + * 48 + * Notice should be taken that: 49 + * When you want to use this feature, you should not enable the 50 + * keep configuration feature, for two chip select could be 51 + * attached with different nand chip. The different page size 52 + * and timing requirement make the keep configuration impossible. 53 + */ 54 + 55 + /* The max num of chip select current support */ 56 + #define NUM_CHIP_SELECT (2) 44 57 struct pxa3xx_nand_platform_data { 45 58 46 59 /* the data flash bus is shared between the Static Memory ··· 65 52 /* allow platform code to keep OBM/bootloader defined NFC config */ 66 53 int keep_config; 67 54 68 - const struct mtd_partition *parts; 69 - unsigned int nr_parts; 55 + /* indicate how many chip selects will be used */ 56 + int num_cs; 57 + 58 + const struct mtd_partition *parts[NUM_CHIP_SELECT]; 59 + unsigned int nr_parts[NUM_CHIP_SELECT]; 70 60 71 61 const struct pxa3xx_nand_flash * flash; 72 62 size_t num_flash;
+2 -6
arch/avr32/boards/atngw100/setup.c
··· 90 90 }, 91 91 }; 92 92 93 - static struct mtd_partition *nand_part_info(int size, int *num_partitions) 94 - { 95 - *num_partitions = ARRAY_SIZE(nand_partitions); 96 - return nand_partitions; 97 - } 98 93 99 94 static struct atmel_nand_data atngw100mkii_nand_data __initdata = { 100 95 .cle = 21, ··· 97 102 .rdy_pin = GPIO_PIN_PB(28), 98 103 .enable_pin = GPIO_PIN_PE(23), 99 104 .bus_width_16 = true, 100 - .partition_info = nand_part_info, 105 + .parts = nand_partitions, 106 + .num_parts = ARRAY_SIZE(nand_partitions), 101 107 }; 102 108 #endif 103 109
+2 -7
arch/avr32/boards/atstk1000/atstk1002.c
··· 90 90 }, 91 91 }; 92 92 93 - static struct mtd_partition *nand_part_info(int size, int *num_partitions) 94 - { 95 - *num_partitions = ARRAY_SIZE(nand_partitions); 96 - return nand_partitions; 97 - } 98 - 99 93 static struct atmel_nand_data atstk1006_nand_data __initdata = { 100 94 .cle = 21, 101 95 .ale = 22, 102 96 .rdy_pin = GPIO_PIN_PB(30), 103 97 .enable_pin = GPIO_PIN_PB(29), 104 - .partition_info = nand_part_info, 98 + .parts = nand_partitions, 99 + .num_parts = ARRAY_SIZE(num_partitions), 105 100 }; 106 101 #endif 107 102
+2 -1
arch/avr32/mach-at32ap/include/mach/board.h
··· 128 128 u8 ale; /* address line number connected to ALE */ 129 129 u8 cle; /* address line number connected to CLE */ 130 130 u8 bus_width_16; /* buswidth is 16 bit */ 131 - struct mtd_partition *(*partition_info)(int size, int *num_partitions); 131 + struct mtd_partition *parts; 132 + unsigned int num_parts; 132 133 }; 133 134 struct platform_device * 134 135 at32_add_device_nand(unsigned int id, struct atmel_nand_data *data);
+1 -1
arch/cris/arch-v32/drivers/mach-a3/nandflash.c
··· 163 163 this->ecc.mode = NAND_ECC_SOFT; 164 164 165 165 /* Enable the following for a flash based bad block table */ 166 - /* this->options = NAND_USE_FLASH_BBT; */ 166 + /* this->bbt_options = NAND_BBT_USE_FLASH; */ 167 167 168 168 /* Scan to find existence of the device */ 169 169 if (nand_scan(crisv32_mtd, 1)) {
+1 -1
arch/cris/arch-v32/drivers/mach-fs/nandflash.c
··· 154 154 this->ecc.mode = NAND_ECC_SOFT; 155 155 156 156 /* Enable the following for a flash based bad block table */ 157 - /* this->options = NAND_USE_FLASH_BBT; */ 157 + /* this->bbt_options = NAND_BBT_USE_FLASH; */ 158 158 159 159 /* Scan to find existence of the device */ 160 160 if (nand_scan(crisv32_mtd, 1)) {
+6 -15
drivers/mtd/Kconfig
··· 12 12 13 13 if MTD 14 14 15 - config MTD_DEBUG 16 - bool "Debugging" 17 - help 18 - This turns on low-level debugging for the entire MTD sub-system. 19 - Normally, you should say 'N'. 20 - 21 - config MTD_DEBUG_VERBOSE 22 - int "Debugging verbosity (0 = quiet, 3 = noisy)" 23 - depends on MTD_DEBUG 24 - default "0" 25 - help 26 - Determines the verbosity level of the MTD debugging messages. 27 - 28 15 config MTD_TESTS 29 - tristate "MTD tests support" 16 + tristate "MTD tests support (DANGEROUS)" 30 17 depends on m 31 18 help 32 19 This option includes various MTD tests into compilation. The tests 33 20 should normally be compiled as kernel modules. The modules perform 34 21 various checks and verifications when loaded. 22 + 23 + WARNING: some of the tests will ERASE entire MTD device which they 24 + test. Do not use these tests unless you really know what you do. 35 25 36 26 config MTD_REDBOOT_PARTS 37 27 tristate "RedBoot partition table parsing" ··· 127 137 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example. 128 138 129 139 config MTD_OF_PARTS 130 - def_bool y 140 + tristate "OpenFirmware partitioning information support" 141 + default Y 131 142 depends on OF 132 143 help 133 144 This provides a partition parsing function which derives
+1 -1
drivers/mtd/Makefile
··· 5 5 # Core functionality. 6 6 obj-$(CONFIG_MTD) += mtd.o 7 7 mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o 8 - mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o 9 8 9 + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o 10 10 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 11 11 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 12 12 obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+2 -2
drivers/mtd/afs.c
··· 162 162 } 163 163 164 164 static int parse_afs_partitions(struct mtd_info *mtd, 165 - struct mtd_partition **pparts, 166 - unsigned long origin) 165 + struct mtd_partition **pparts, 166 + struct mtd_part_parser_data *data) 167 167 { 168 168 struct mtd_partition *parts; 169 169 u_int mask, off, idx, sz;
+1 -1
drivers/mtd/ar7part.c
··· 47 47 48 48 static int create_mtd_partitions(struct mtd_info *master, 49 49 struct mtd_partition **pparts, 50 - unsigned long origin) 50 + struct mtd_part_parser_data *data) 51 51 { 52 52 struct ar7_bin_rec header; 53 53 unsigned int offset;
+13 -18
drivers/mtd/chips/cfi_cmdset_0002.c
··· 145 145 if (((major << 8) | minor) < 0x3131) { 146 146 /* CFI version 1.0 => don't trust bootloc */ 147 147 148 - DEBUG(MTD_DEBUG_LEVEL1, 149 - "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 148 + pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 150 149 map->name, cfi->mfr, cfi->id); 151 150 152 151 /* AFAICS all 29LV400 with a bottom boot block have a device ID ··· 165 166 * the 8-bit device ID. 166 167 */ 167 168 (cfi->mfr == CFI_MFR_MACRONIX)) { 168 - DEBUG(MTD_DEBUG_LEVEL1, 169 - "%s: Macronix MX29LV400C with bottom boot block" 169 + pr_debug("%s: Macronix MX29LV400C with bottom boot block" 170 170 " detected\n", map->name); 171 171 extp->TopBottom = 2; /* bottom boot */ 172 172 } else ··· 176 178 extp->TopBottom = 2; /* bottom boot */ 177 179 } 178 180 179 - DEBUG(MTD_DEBUG_LEVEL1, 180 - "%s: AMD CFI PRI V%c.%c has no boot block field;" 181 + pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 181 182 " deduced %s from Device ID\n", map->name, major, minor, 182 183 extp->TopBottom == 2 ? "bottom" : "top"); 183 184 } ··· 188 191 struct map_info *map = mtd->priv; 189 192 struct cfi_private *cfi = map->fldrv_priv; 190 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 191 - DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 194 + pr_debug("Using buffer write method\n" ); 192 195 mtd->write = cfi_amdstd_write_buffers; 193 196 } 194 197 } ··· 440 443 mtd->writesize = 1; 441 444 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 442 445 443 - DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", 444 - __func__, mtd->writebufsize); 446 + pr_debug("MTD %s(): write buffer size %d\n", __func__, 447 + mtd->writebufsize); 445 448 446 449 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 447 450 ··· 1160 1163 return ret; 1161 1164 } 1162 1165 1163 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1166 + pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1164 1167 __func__, adr, datum.x[0] ); 1165 1168 1166 1169 /* ··· 1171 1174 */ 1172 1175 oldd = map_read(map, adr); 1173 1176 if (map_word_equal(map, oldd, datum)) { 1174 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1177 + pr_debug("MTD %s(): NOP\n", 1175 1178 __func__); 1176 1179 goto op_done; 1177 1180 } ··· 1397 1400 1398 1401 datum = map_word_load(map, buf); 1399 1402 1400 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1403 + pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1401 1404 __func__, adr, datum.x[0] ); 1402 1405 1403 1406 XIP_INVAL_CACHED_RANGE(map, adr, len); ··· 1584 1587 return ret; 1585 1588 } 1586 1589 1587 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1590 + pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1588 1591 __func__, chip->start ); 1589 1592 1590 1593 XIP_INVAL_CACHED_RANGE(map, adr, map->size); ··· 1672 1675 return ret; 1673 1676 } 1674 1677 1675 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1678 + pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1676 1679 __func__, adr ); 1677 1680 1678 1681 XIP_INVAL_CACHED_RANGE(map, adr, len); ··· 1798 1801 goto out_unlock; 1799 1802 chip->state = FL_LOCKING; 1800 1803 1801 - DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1802 - __func__, adr, len); 1804 + pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1803 1805 1804 1806 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1805 1807 cfi->device_type, NULL); ··· 1833 1837 goto out_unlock; 1834 1838 chip->state = FL_UNLOCKING; 1835 1839 1836 - DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1837 - __func__, adr, len); 1840 + pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1838 1841 1839 1842 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1840 1843 cfi->device_type, NULL);
+1 -2
drivers/mtd/chips/fwh_lock.h
··· 34 34 35 35 /* Refuse the operation if the we cannot look behind the chip */ 36 36 if (chip->start < 0x400000) { 37 - DEBUG( MTD_DEBUG_LEVEL3, 38 - "MTD %s(): chip->start: %lx wanted >= 0x400000\n", 37 + pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n", 39 38 __func__, chip->start ); 40 39 return -EIO; 41 40 }
+13 -21
drivers/mtd/chips/jedec_probe.c
··· 1914 1914 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset 1915 1915 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at 1916 1916 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1917 - * as they will ignore the writes and dont care what address 1917 + * as they will ignore the writes and don't care what address 1918 1918 * the F0 is written to */ 1919 1919 if (cfi->addr_unlock1) { 1920 - DEBUG( MTD_DEBUG_LEVEL3, 1921 - "reset unlock called %x %x \n", 1920 + pr_debug( "reset unlock called %x %x \n", 1922 1921 cfi->addr_unlock1,cfi->addr_unlock2); 1923 1922 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1924 1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); ··· 1940 1941 uint8_t uaddr; 1941 1942 1942 1943 if (!(jedec_table[index].devtypes & cfi->device_type)) { 1943 - DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1944 + pr_debug("Rejecting potential %s with incompatible %d-bit device type\n", 1944 1945 jedec_table[index].name, 4 * (1<<cfi->device_type)); 1945 1946 return 0; 1946 1947 } ··· 2020 2021 * there aren't. 2021 2022 */ 2022 2023 if (finfo->dev_id > 0xff) { 2023 - DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n", 2024 + pr_debug("%s(): ID is not 8bit\n", 2024 2025 __func__); 2025 2026 goto match_done; 2026 2027 } ··· 2044 2045 } 2045 2046 2046 2047 /* the part size must fit in the memory window */ 2047 - DEBUG( MTD_DEBUG_LEVEL3, 2048 - "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 2048 + pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 2049 2049 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); 2050 2050 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { 2051 - DEBUG( MTD_DEBUG_LEVEL3, 2052 - "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 2051 + pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 2053 2052 __func__, finfo->mfr_id, finfo->dev_id, 2054 2053 1 << finfo->dev_size ); 2055 2054 goto match_done; ··· 2058 2061 2059 2062 uaddr = finfo->uaddr; 2060 2063 2061 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 2064 + pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 2062 2065 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 2063 2066 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 2064 2067 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || 2065 2068 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { 2066 - DEBUG( MTD_DEBUG_LEVEL3, 2067 - "MTD %s(): 0x%.4x 0x%.4x did not match\n", 2069 + pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n", 2068 2070 __func__, 2069 2071 unlock_addrs[uaddr].addr1, 2070 2072 unlock_addrs[uaddr].addr2); ··· 2079 2083 * FIXME - write a driver that takes all of the chip info as 2080 2084 * module parameters, doesn't probe but forces a load. 2081 2085 */ 2082 - DEBUG( MTD_DEBUG_LEVEL3, 2083 - "MTD %s(): check ID's disappear when not in ID mode\n", 2086 + pr_debug("MTD %s(): check ID's disappear when not in ID mode\n", 2084 2087 __func__ ); 2085 2088 jedec_reset( base, map, cfi ); 2086 2089 mfr = jedec_read_mfr( map, base, cfi ); 2087 2090 id = jedec_read_id( map, base, cfi ); 2088 2091 if ( mfr == cfi->mfr && id == cfi->id ) { 2089 - DEBUG( MTD_DEBUG_LEVEL3, 2090 - "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" 2092 + pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" 2091 2093 "You might need to manually specify JEDEC parameters.\n", 2092 2094 __func__, cfi->mfr, cfi->id ); 2093 2095 goto match_done; ··· 2098 2104 * Put the device back in ID mode - only need to do this if we 2099 2105 * were truly frobbing a real device. 2100 2106 */ 2101 - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 2107 + pr_debug("MTD %s(): return to ID mode\n", __func__ ); 2102 2108 if (cfi->addr_unlock1) { 2103 2109 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 2104 2110 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); ··· 2161 2167 2162 2168 cfi->mfr = jedec_read_mfr(map, base, cfi); 2163 2169 cfi->id = jedec_read_id(map, base, cfi); 2164 - DEBUG(MTD_DEBUG_LEVEL3, 2165 - "Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2170 + pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2166 2171 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2167 2172 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { 2168 2173 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2169 - DEBUG( MTD_DEBUG_LEVEL3, 2170 - "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2174 + pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2171 2175 __func__, cfi->mfr, cfi->id, 2172 2176 cfi->addr_unlock1, cfi->addr_unlock2 ); 2173 2177 if (!cfi_jedec_setup(map, cfi, i))
+2 -5
drivers/mtd/cmdlinepart.c
··· 189 189 extra_mem_size; 190 190 parts = kzalloc(alloc_size, GFP_KERNEL); 191 191 if (!parts) 192 - { 193 - printk(KERN_ERR ERRP "out of memory\n"); 194 192 return NULL; 195 - } 196 193 extra_mem = (unsigned char *)(parts + *num_parts); 197 194 } 198 195 /* enter this partition (offset will be calculated later if it is zero at this point) */ ··· 314 317 * the first one in the chain if a NULL mtd_id is passed in. 315 318 */ 316 319 static int parse_cmdline_partitions(struct mtd_info *master, 317 - struct mtd_partition **pparts, 318 - unsigned long origin) 320 + struct mtd_partition **pparts, 321 + struct mtd_part_parser_data *data) 319 322 { 320 323 unsigned long offset; 321 324 int i;
+11 -2
drivers/mtd/devices/Kconfig
··· 249 249 under "NAND Flash Device Drivers" (currently that driver does not 250 250 support all Millennium Plus devices). 251 251 252 + config MTD_DOCG3 253 + tristate "M-Systems Disk-On-Chip G3" 254 + ---help--- 255 + This provides an MTD device driver for the M-Systems DiskOnChip 256 + G3 devices. 257 + 258 + The driver provides access to G3 DiskOnChip, distributed by 259 + M-Systems and now Sandisk. The support is very experimental, 260 + and doesn't give access to any write operations. 261 + 252 262 config MTD_DOCPROBE 253 263 tristate 254 264 select MTD_DOCECC ··· 278 268 config MTD_DOCPROBE_ADDRESS 279 269 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED 280 270 depends on MTD_DOCPROBE 281 - default "0x0000" if MTD_DOCPROBE_ADVANCED 282 - default "0" if !MTD_DOCPROBE_ADVANCED 271 + default "0x0" 283 272 ---help--- 284 273 By default, the probe for DiskOnChip devices will look for a 285 274 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+3
drivers/mtd/devices/Makefile
··· 5 5 obj-$(CONFIG_MTD_DOC2000) += doc2000.o 6 6 obj-$(CONFIG_MTD_DOC2001) += doc2001.o 7 7 obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o 8 + obj-$(CONFIG_MTD_DOCG3) += docg3.o 8 9 obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o 9 10 obj-$(CONFIG_MTD_DOCECC) += docecc.o 10 11 obj-$(CONFIG_MTD_SLRAM) += slram.o ··· 18 17 obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 19 18 obj-$(CONFIG_MTD_M25P80) += m25p80.o 20 19 obj-$(CONFIG_MTD_SST25L) += sst25l.o 20 + 21 + CFLAGS_docg3.o += -I$(src)
+7 -10
drivers/mtd/devices/doc2000.c
··· 82 82 void __iomem *docptr = doc->virtadr; 83 83 unsigned long timeo = jiffies + (HZ * 10); 84 84 85 - DEBUG(MTD_DEBUG_LEVEL3, 86 - "_DoC_WaitReady called for out-of-line wait\n"); 85 + pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 87 86 88 87 /* Out-of-line routine to wait for chip response */ 89 88 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { ··· 91 92 DoC_Delay(doc, 2); 92 93 93 94 if (time_after(jiffies, timeo)) { 94 - DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 95 + pr_debug("_DoC_WaitReady timed out.\n"); 95 96 return -EIO; 96 97 } 97 98 udelay(1); ··· 322 323 323 324 /* Reset the chip */ 324 325 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { 325 - DEBUG(MTD_DEBUG_LEVEL2, 326 - "DoC_Command (reset) for %d,%d returned true\n", 326 + pr_debug("DoC_Command (reset) for %d,%d returned true\n", 327 327 floor, chip); 328 328 return 0; 329 329 } ··· 330 332 331 333 /* Read the NAND chip ID: 1. Send ReadID command */ 332 334 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { 333 - DEBUG(MTD_DEBUG_LEVEL2, 334 - "DoC_Command (ReadID) for %d,%d returned true\n", 335 + pr_debug("DoC_Command (ReadID) for %d,%d returned true\n", 335 336 floor, chip); 336 337 return 0; 337 338 } ··· 696 699 #ifdef ECC_DEBUG 697 700 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); 698 701 #endif 699 - /* Read the ECC syndrom through the DiskOnChip ECC 702 + /* Read the ECC syndrome through the DiskOnChip ECC 700 703 logic. These syndrome will be all ZERO when there 701 704 is no error */ 702 705 for (i = 0; i < 6; i++) { ··· 927 930 uint8_t *buf = ops->oobbuf; 928 931 size_t len = ops->len; 929 932 930 - BUG_ON(ops->mode != MTD_OOB_PLACE); 933 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 931 934 932 935 ofs += ops->ooboffs; 933 936 ··· 1091 1094 struct DiskOnChip *this = mtd->priv; 1092 1095 int ret; 1093 1096 1094 - BUG_ON(ops->mode != MTD_OOB_PLACE); 1097 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 1095 1098 1096 1099 mutex_lock(&this->lock); 1097 1100 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
+5 -6
drivers/mtd/devices/doc2001.c
··· 55 55 { 56 56 unsigned short c = 0xffff; 57 57 58 - DEBUG(MTD_DEBUG_LEVEL3, 59 - "_DoC_WaitReady called for out-of-line wait\n"); 58 + pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 60 59 61 60 /* Out-of-line routine to wait for chip response */ 62 61 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) 63 62 ; 64 63 65 64 if (c == 0) 66 - DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 65 + pr_debug("_DoC_WaitReady timed out.\n"); 67 66 68 67 return (c == 0); 69 68 } ··· 463 464 #ifdef ECC_DEBUG 464 465 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 465 466 #endif 466 - /* Read the ECC syndrom through the DiskOnChip ECC logic. 467 + /* Read the ECC syndrome through the DiskOnChip ECC logic. 467 468 These syndrome will be all ZERO when there is no error */ 468 469 for (i = 0; i < 6; i++) { 469 470 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); ··· 631 632 uint8_t *buf = ops->oobbuf; 632 633 size_t len = ops->len; 633 634 634 - BUG_ON(ops->mode != MTD_OOB_PLACE); 635 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 635 636 636 637 ofs += ops->ooboffs; 637 638 ··· 689 690 uint8_t *buf = ops->oobbuf; 690 691 size_t len = ops->len; 691 692 692 - BUG_ON(ops->mode != MTD_OOB_PLACE); 693 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 693 694 694 695 ofs += ops->ooboffs; 695 696
+5 -6
drivers/mtd/devices/doc2001plus.c
··· 61 61 { 62 62 unsigned int c = 0xffff; 63 63 64 - DEBUG(MTD_DEBUG_LEVEL3, 65 - "_DoC_WaitReady called for out-of-line wait\n"); 64 + pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 66 65 67 66 /* Out-of-line routine to wait for chip response */ 68 67 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) 69 68 ; 70 69 71 70 if (c == 0) 72 - DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 71 + pr_debug("_DoC_WaitReady timed out.\n"); 73 72 74 73 return (c == 0); 75 74 } ··· 654 655 #ifdef ECC_DEBUG 655 656 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 656 657 #endif 657 - /* Read the ECC syndrom through the DiskOnChip ECC logic. 658 + /* Read the ECC syndrome through the DiskOnChip ECC logic. 658 659 These syndrome will be all ZERO when there is no error */ 659 660 for (i = 0; i < 6; i++) 660 661 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); ··· 834 835 uint8_t *buf = ops->oobbuf; 835 836 size_t len = ops->len; 836 837 837 - BUG_ON(ops->mode != MTD_OOB_PLACE); 838 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 838 839 839 840 ofs += ops->ooboffs; 840 841 ··· 919 920 uint8_t *buf = ops->oobbuf; 920 921 size_t len = ops->len; 921 922 922 - BUG_ON(ops->mode != MTD_OOB_PLACE); 923 + BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 923 924 924 925 ofs += ops->ooboffs; 925 926
+1 -1
drivers/mtd/devices/docecc.c
··· 2 2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed 3 3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the 4 4 * GNU GPL License. The rest is simply to convert the disk on chip 5 - * syndrom into a standard syndom. 5 + * syndrome into a standard syndome. 6 6 * 7 7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 8 8 * Copyright (C) 2000 Netgem S.A.
+1114
drivers/mtd/devices/docg3.c
··· 1 + /* 2 + * Handles the M-Systems DiskOnChip G3 chip 3 + * 4 + * Copyright (C) 2011 Robert Jarzmik 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 + * 20 + */ 21 + 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 24 + #include <linux/errno.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/string.h> 27 + #include <linux/slab.h> 28 + #include <linux/io.h> 29 + #include <linux/delay.h> 30 + #include <linux/mtd/mtd.h> 31 + #include <linux/mtd/partitions.h> 32 + 33 + #include <linux/debugfs.h> 34 + #include <linux/seq_file.h> 35 + 36 + #define CREATE_TRACE_POINTS 37 + #include "docg3.h" 38 + 39 + /* 40 + * This driver handles the DiskOnChip G3 flash memory. 41 + * 42 + * As no specification is available from M-Systems/Sandisk, this drivers lacks 43 + * several functions available on the chip, as : 44 + * - block erase 45 + * - page write 46 + * - IPL write 47 + * - ECC fixing (lack of BCH algorith understanding) 48 + * - powerdown / powerup 49 + * 50 + * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and 51 + * the driver assumes a 16bits data bus. 52 + * 53 + * DocG3 relies on 2 ECC algorithms, which are handled in hardware : 54 + * - a 1 byte Hamming code stored in the OOB for each page 55 + * - a 7 bytes BCH code stored in the OOB for each page 56 + * The BCH part is only used for check purpose, no correction is available as 57 + * some information is missing. What is known is that : 58 + * - BCH is in GF(2^14) 59 + * - BCH is over data of 520 bytes (512 page + 7 page_info bytes 60 + * + 1 hamming byte) 61 + * - BCH can correct up to 4 bits (t = 4) 62 + * - BCH syndroms are calculated in hardware, and checked in hardware as well 63 + * 64 + */ 65 + 66 + static inline u8 doc_readb(struct docg3 *docg3, u16 reg) 67 + { 68 + u8 val = readb(docg3->base + reg); 69 + 70 + trace_docg3_io(0, 8, reg, (int)val); 71 + return val; 72 + } 73 + 74 + static inline u16 doc_readw(struct docg3 *docg3, u16 reg) 75 + { 76 + u16 val = readw(docg3->base + reg); 77 + 78 + trace_docg3_io(0, 16, reg, (int)val); 79 + return val; 80 + } 81 + 82 + static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) 83 + { 84 + writeb(val, docg3->base + reg); 85 + trace_docg3_io(1, 16, reg, val); 86 + } 87 + 88 + static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) 89 + { 90 + writew(val, docg3->base + reg); 91 + trace_docg3_io(1, 16, reg, val); 92 + } 93 + 94 + static inline void doc_flash_command(struct docg3 *docg3, u8 cmd) 95 + { 96 + doc_writeb(docg3, cmd, DOC_FLASHCOMMAND); 97 + } 98 + 99 + static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq) 100 + { 101 + doc_writeb(docg3, seq, DOC_FLASHSEQUENCE); 102 + } 103 + 104 + static inline void doc_flash_address(struct docg3 *docg3, u8 addr) 105 + { 106 + doc_writeb(docg3, addr, DOC_FLASHADDRESS); 107 + } 108 + 109 + static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL }; 110 + 111 + static int doc_register_readb(struct docg3 *docg3, int reg) 112 + { 113 + u8 val; 114 + 115 + doc_writew(docg3, reg, DOC_READADDRESS); 116 + val = doc_readb(docg3, reg); 117 + doc_vdbg("Read register %04x : %02x\n", reg, val); 118 + return val; 119 + } 120 + 121 + static int doc_register_readw(struct docg3 *docg3, int reg) 122 + { 123 + u16 val; 124 + 125 + doc_writew(docg3, reg, DOC_READADDRESS); 126 + val = doc_readw(docg3, reg); 127 + doc_vdbg("Read register %04x : %04x\n", reg, val); 128 + return val; 129 + } 130 + 131 + /** 132 + * doc_delay - delay docg3 operations 133 + * @docg3: the device 134 + * @nbNOPs: the number of NOPs to issue 135 + * 136 + * As no specification is available, the right timings between chip commands are 137 + * unknown. The only available piece of information are the observed nops on a 138 + * working docg3 chip. 139 + * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler 140 + * friendlier msleep() functions or blocking mdelay(). 141 + */ 142 + static void doc_delay(struct docg3 *docg3, int nbNOPs) 143 + { 144 + int i; 145 + 146 + doc_dbg("NOP x %d\n", nbNOPs); 147 + for (i = 0; i < nbNOPs; i++) 148 + doc_writeb(docg3, 0, DOC_NOP); 149 + } 150 + 151 + static int is_prot_seq_error(struct docg3 *docg3) 152 + { 153 + int ctrl; 154 + 155 + ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 156 + return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR); 157 + } 158 + 159 + static int doc_is_ready(struct docg3 *docg3) 160 + { 161 + int ctrl; 162 + 163 + ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 164 + return ctrl & DOC_CTRL_FLASHREADY; 165 + } 166 + 167 + static int doc_wait_ready(struct docg3 *docg3) 168 + { 169 + int maxWaitCycles = 100; 170 + 171 + do { 172 + doc_delay(docg3, 4); 173 + cpu_relax(); 174 + } while (!doc_is_ready(docg3) && maxWaitCycles--); 175 + doc_delay(docg3, 2); 176 + if (maxWaitCycles > 0) 177 + return 0; 178 + else 179 + return -EIO; 180 + } 181 + 182 + static int doc_reset_seq(struct docg3 *docg3) 183 + { 184 + int ret; 185 + 186 + doc_writeb(docg3, 0x10, DOC_FLASHCONTROL); 187 + doc_flash_sequence(docg3, DOC_SEQ_RESET); 188 + doc_flash_command(docg3, DOC_CMD_RESET); 189 + doc_delay(docg3, 2); 190 + ret = doc_wait_ready(docg3); 191 + 192 + doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true"); 193 + return ret; 194 + } 195 + 196 + /** 197 + * doc_read_data_area - Read data from data area 198 + * @docg3: the device 199 + * @buf: the buffer to fill in 200 + * @len: the lenght to read 201 + * @first: first time read, DOC_READADDRESS should be set 202 + * 203 + * Reads bytes from flash data. Handles the single byte / even bytes reads. 204 + */ 205 + static void doc_read_data_area(struct docg3 *docg3, void *buf, int len, 206 + int first) 207 + { 208 + int i, cdr, len4; 209 + u16 data16, *dst16; 210 + u8 data8, *dst8; 211 + 212 + doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); 213 + cdr = len & 0x3; 214 + len4 = len - cdr; 215 + 216 + if (first) 217 + doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS); 218 + dst16 = buf; 219 + for (i = 0; i < len4; i += 2) { 220 + data16 = doc_readw(docg3, DOC_IOSPACE_DATA); 221 + *dst16 = data16; 222 + dst16++; 223 + } 224 + 225 + if (cdr) { 226 + doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE, 227 + DOC_READADDRESS); 228 + doc_delay(docg3, 1); 229 + dst8 = (u8 *)dst16; 230 + for (i = 0; i < cdr; i++) { 231 + data8 = doc_readb(docg3, DOC_IOSPACE_DATA); 232 + *dst8 = data8; 233 + dst8++; 234 + } 235 + } 236 + } 237 + 238 + /** 239 + * doc_set_data_mode - Sets the flash to reliable data mode 240 + * @docg3: the device 241 + * 242 + * The reliable data mode is a bit slower than the fast mode, but less errors 243 + * occur. Entering the reliable mode cannot be done without entering the fast 244 + * mode first. 245 + */ 246 + static void doc_set_reliable_mode(struct docg3 *docg3) 247 + { 248 + doc_dbg("doc_set_reliable_mode()\n"); 249 + doc_flash_sequence(docg3, DOC_SEQ_SET_MODE); 250 + doc_flash_command(docg3, DOC_CMD_FAST_MODE); 251 + doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE); 252 + doc_delay(docg3, 2); 253 + } 254 + 255 + /** 256 + * doc_set_asic_mode - Set the ASIC mode 257 + * @docg3: the device 258 + * @mode: the mode 259 + * 260 + * The ASIC can work in 3 modes : 261 + * - RESET: all registers are zeroed 262 + * - NORMAL: receives and handles commands 263 + * - POWERDOWN: minimal poweruse, flash parts shut off 264 + */ 265 + static void doc_set_asic_mode(struct docg3 *docg3, u8 mode) 266 + { 267 + int i; 268 + 269 + for (i = 0; i < 12; i++) 270 + doc_readb(docg3, DOC_IOSPACE_IPL); 271 + 272 + mode |= DOC_ASICMODE_MDWREN; 273 + doc_dbg("doc_set_asic_mode(%02x)\n", mode); 274 + doc_writeb(docg3, mode, DOC_ASICMODE); 275 + doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM); 276 + doc_delay(docg3, 1); 277 + } 278 + 279 + /** 280 + * doc_set_device_id - Sets the devices id for cascaded G3 chips 281 + * @docg3: the device 282 + * @id: the chip to select (amongst 0, 1, 2, 3) 283 + * 284 + * There can be 4 cascaded G3 chips. This function selects the one which will 285 + * should be the active one. 286 + */ 287 + static void doc_set_device_id(struct docg3 *docg3, int id) 288 + { 289 + u8 ctrl; 290 + 291 + doc_dbg("doc_set_device_id(%d)\n", id); 292 + doc_writeb(docg3, id, DOC_DEVICESELECT); 293 + ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 294 + 295 + ctrl &= ~DOC_CTRL_VIOLATION; 296 + ctrl |= DOC_CTRL_CE; 297 + doc_writeb(docg3, ctrl, DOC_FLASHCONTROL); 298 + } 299 + 300 + /** 301 + * doc_set_extra_page_mode - Change flash page layout 302 + * @docg3: the device 303 + * 304 + * Normally, the flash page is split into the data (512 bytes) and the out of 305 + * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear 306 + * leveling counters are stored. To access this last area of 4 bytes, a special 307 + * mode must be input to the flash ASIC. 308 + * 309 + * Returns 0 if no error occured, -EIO else. 310 + */ 311 + static int doc_set_extra_page_mode(struct docg3 *docg3) 312 + { 313 + int fctrl; 314 + 315 + doc_dbg("doc_set_extra_page_mode()\n"); 316 + doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532); 317 + doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532); 318 + doc_delay(docg3, 2); 319 + 320 + fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 321 + if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR)) 322 + return -EIO; 323 + else 324 + return 0; 325 + } 326 + 327 + /** 328 + * doc_seek - Set both flash planes to the specified block, page for reading 329 + * @docg3: the device 330 + * @block0: the first plane block index 331 + * @block1: the second plane block index 332 + * @page: the page index within the block 333 + * @wear: if true, read will occur on the 4 extra bytes of the wear area 334 + * @ofs: offset in page to read 335 + * 336 + * Programs the flash even and odd planes to the specific block and page. 337 + * Alternatively, programs the flash to the wear area of the specified page. 338 + */ 339 + static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page, 340 + int wear, int ofs) 341 + { 342 + int sector, ret = 0; 343 + 344 + doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n", 345 + block0, block1, page, ofs, wear); 346 + 347 + if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) { 348 + doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1); 349 + doc_flash_command(docg3, DOC_CMD_READ_PLANE1); 350 + doc_delay(docg3, 2); 351 + } else { 352 + doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2); 353 + doc_flash_command(docg3, DOC_CMD_READ_PLANE2); 354 + doc_delay(docg3, 2); 355 + } 356 + 357 + doc_set_reliable_mode(docg3); 358 + if (wear) 359 + ret = doc_set_extra_page_mode(docg3); 360 + if (ret) 361 + goto out; 362 + 363 + sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 364 + doc_flash_sequence(docg3, DOC_SEQ_READ); 365 + doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 366 + doc_delay(docg3, 1); 367 + doc_flash_address(docg3, sector & 0xff); 368 + doc_flash_address(docg3, (sector >> 8) & 0xff); 369 + doc_flash_address(docg3, (sector >> 16) & 0xff); 370 + doc_delay(docg3, 1); 371 + 372 + sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); 373 + doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); 374 + doc_delay(docg3, 1); 375 + doc_flash_address(docg3, sector & 0xff); 376 + doc_flash_address(docg3, (sector >> 8) & 0xff); 377 + doc_flash_address(docg3, (sector >> 16) & 0xff); 378 + doc_delay(docg3, 2); 379 + 380 + out: 381 + return ret; 382 + } 383 + 384 + /** 385 + * doc_read_page_ecc_init - Initialize hardware ECC engine 386 + * @docg3: the device 387 + * @len: the number of bytes covered by the ECC (BCH covered) 388 + * 389 + * The function does initialize the hardware ECC engine to compute the Hamming 390 + * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes). 391 + * 392 + * Return 0 if succeeded, -EIO on error 393 + */ 394 + static int doc_read_page_ecc_init(struct docg3 *docg3, int len) 395 + { 396 + doc_writew(docg3, DOC_ECCCONF0_READ_MODE 397 + | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE 398 + | (len & DOC_ECCCONF0_DATA_BYTES_MASK), 399 + DOC_ECCCONF0); 400 + doc_delay(docg3, 4); 401 + doc_register_readb(docg3, DOC_FLASHCONTROL); 402 + return doc_wait_ready(docg3); 403 + } 404 + 405 + /** 406 + * doc_read_page_prepare - Prepares reading data from a flash page 407 + * @docg3: the device 408 + * @block0: the first plane block index on flash memory 409 + * @block1: the second plane block index on flash memory 410 + * @page: the page index in the block 411 + * @offset: the offset in the page (must be a multiple of 4) 412 + * 413 + * Prepares the page to be read in the flash memory : 414 + * - tell ASIC to map the flash pages 415 + * - tell ASIC to be in read mode 416 + * 417 + * After a call to this method, a call to doc_read_page_finish is mandatory, 418 + * to end the read cycle of the flash. 419 + * 420 + * Read data from a flash page. The length to be read must be between 0 and 421 + * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because 422 + * the extra bytes reading is not implemented). 423 + * 424 + * As pages are grouped by 2 (in 2 planes), reading from a page must be done 425 + * in two steps: 426 + * - one read of 512 bytes at offset 0 427 + * - one read of 512 bytes at offset 512 + 16 428 + * 429 + * Returns 0 if successful, -EIO if a read error occured. 430 + */ 431 + static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1, 432 + int page, int offset) 433 + { 434 + int wear_area = 0, ret = 0; 435 + 436 + doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n", 437 + block0, block1, page, offset); 438 + if (offset >= DOC_LAYOUT_WEAR_OFFSET) 439 + wear_area = 1; 440 + if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2)) 441 + return -EINVAL; 442 + 443 + doc_set_device_id(docg3, docg3->device_id); 444 + ret = doc_reset_seq(docg3); 445 + if (ret) 446 + goto err; 447 + 448 + /* Program the flash address block and page */ 449 + ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset); 450 + if (ret) 451 + goto err; 452 + 453 + doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES); 454 + doc_delay(docg3, 2); 455 + doc_wait_ready(docg3); 456 + 457 + doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ); 458 + doc_delay(docg3, 1); 459 + if (offset >= DOC_LAYOUT_PAGE_SIZE * 2) 460 + offset -= 2 * DOC_LAYOUT_PAGE_SIZE; 461 + doc_flash_address(docg3, offset >> 2); 462 + doc_delay(docg3, 1); 463 + doc_wait_ready(docg3); 464 + 465 + doc_flash_command(docg3, DOC_CMD_READ_FLASH); 466 + 467 + return 0; 468 + err: 469 + doc_writeb(docg3, 0, DOC_DATAEND); 470 + doc_delay(docg3, 2); 471 + return -EIO; 472 + } 473 + 474 + /** 475 + * doc_read_page_getbytes - Reads bytes from a prepared page 476 + * @docg3: the device 477 + * @len: the number of bytes to be read (must be a multiple of 4) 478 + * @buf: the buffer to be filled in 479 + * @first: 1 if first time read, DOC_READADDRESS should be set 480 + * 481 + */ 482 + static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, 483 + int first) 484 + { 485 + doc_read_data_area(docg3, buf, len, first); 486 + doc_delay(docg3, 2); 487 + return len; 488 + } 489 + 490 + /** 491 + * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms 492 + * @docg3: the device 493 + * @syns: the array of 7 integers where the syndroms will be stored 494 + */ 495 + static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns) 496 + { 497 + int i; 498 + 499 + for (i = 0; i < DOC_ECC_BCH_SIZE; i++) 500 + syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i)); 501 + } 502 + 503 + /** 504 + * doc_read_page_finish - Ends reading of a flash page 505 + * @docg3: the device 506 + * 507 + * As a side effect, resets the chip selector to 0. This ensures that after each 508 + * read operation, the floor 0 is selected. Therefore, if the systems halts, the 509 + * reboot will boot on floor 0, where the IPL is. 510 + */ 511 + static void doc_read_page_finish(struct docg3 *docg3) 512 + { 513 + doc_writeb(docg3, 0, DOC_DATAEND); 514 + doc_delay(docg3, 2); 515 + doc_set_device_id(docg3, 0); 516 + } 517 + 518 + /** 519 + * calc_block_sector - Calculate blocks, pages and ofs. 520 + 521 + * @from: offset in flash 522 + * @block0: first plane block index calculated 523 + * @block1: second plane block index calculated 524 + * @page: page calculated 525 + * @ofs: offset in page 526 + */ 527 + static void calc_block_sector(loff_t from, int *block0, int *block1, int *page, 528 + int *ofs) 529 + { 530 + uint sector; 531 + 532 + sector = from / DOC_LAYOUT_PAGE_SIZE; 533 + *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES) 534 + * DOC_LAYOUT_NBPLANES; 535 + *block1 = *block0 + 1; 536 + *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES); 537 + *page /= DOC_LAYOUT_NBPLANES; 538 + if (sector % 2) 539 + *ofs = DOC_LAYOUT_PAGE_OOB_SIZE; 540 + else 541 + *ofs = 0; 542 + } 543 + 544 + /** 545 + * doc_read - Read bytes from flash 546 + * @mtd: the device 547 + * @from: the offset from first block and first page, in bytes, aligned on page 548 + * size 549 + * @len: the number of bytes to read (must be a multiple of 4) 550 + * @retlen: the number of bytes actually read 551 + * @buf: the filled in buffer 552 + * 553 + * Reads flash memory pages. This function does not read the OOB chunk, but only 554 + * the page data. 555 + * 556 + * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured 557 + */ 558 + static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, 559 + size_t *retlen, u_char *buf) 560 + { 561 + struct docg3 *docg3 = mtd->priv; 562 + int block0, block1, page, readlen, ret, ofs = 0; 563 + int syn[DOC_ECC_BCH_SIZE], eccconf1; 564 + u8 oob[DOC_LAYOUT_OOB_SIZE]; 565 + 566 + ret = -EINVAL; 567 + doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf); 568 + if (from % DOC_LAYOUT_PAGE_SIZE) 569 + goto err; 570 + if (len % 4) 571 + goto err; 572 + calc_block_sector(from, &block0, &block1, &page, &ofs); 573 + if (block1 > docg3->max_block) 574 + goto err; 575 + 576 + *retlen = 0; 577 + ret = 0; 578 + readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 579 + while (!ret && len > 0) { 580 + readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 581 + ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); 582 + if (ret < 0) 583 + goto err; 584 + ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES); 585 + if (ret < 0) 586 + goto err_in_read; 587 + ret = doc_read_page_getbytes(docg3, readlen, buf, 1); 588 + if (ret < readlen) 589 + goto err_in_read; 590 + ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, 591 + oob, 0); 592 + if (ret < DOC_LAYOUT_OOB_SIZE) 593 + goto err_in_read; 594 + 595 + *retlen += readlen; 596 + buf += readlen; 597 + len -= readlen; 598 + 599 + ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE; 600 + if (ofs == 0) 601 + page += 2; 602 + if (page > DOC_ADDR_PAGE_MASK) { 603 + page = 0; 604 + block0 += 2; 605 + block1 += 2; 606 + } 607 + 608 + /* 609 + * There should be a BCH bitstream fixing algorithm here ... 610 + * By now, a page read failure is triggered by BCH error 611 + */ 612 + doc_get_hw_bch_syndroms(docg3, syn); 613 + eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 614 + 615 + doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 616 + oob[0], oob[1], oob[2], oob[3], oob[4], 617 + oob[5], oob[6]); 618 + doc_dbg("OOB - HAMMING: %02x\n", oob[7]); 619 + doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 620 + oob[8], oob[9], oob[10], oob[11], oob[12], 621 + oob[13], oob[14]); 622 + doc_dbg("OOB - UNUSED: %02x\n", oob[15]); 623 + doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); 624 + doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 625 + syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]); 626 + 627 + ret = -EBADMSG; 628 + if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) { 629 + if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) 630 + goto err_in_read; 631 + if (is_prot_seq_error(docg3)) 632 + goto err_in_read; 633 + } 634 + doc_read_page_finish(docg3); 635 + } 636 + 637 + return 0; 638 + err_in_read: 639 + doc_read_page_finish(docg3); 640 + err: 641 + return ret; 642 + } 643 + 644 + /** 645 + * doc_read_oob - Read out of band bytes from flash 646 + * @mtd: the device 647 + * @from: the offset from first block and first page, in bytes, aligned on page 648 + * size 649 + * @ops: the mtd oob structure 650 + * 651 + * Reads flash memory OOB area of pages. 652 + * 653 + * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured 654 + */ 655 + static int doc_read_oob(struct mtd_info *mtd, loff_t from, 656 + struct mtd_oob_ops *ops) 657 + { 658 + struct docg3 *docg3 = mtd->priv; 659 + int block0, block1, page, ofs, ret; 660 + u8 *buf = ops->oobbuf; 661 + size_t len = ops->ooblen; 662 + 663 + doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len); 664 + if (len != DOC_LAYOUT_OOB_SIZE) 665 + return -EINVAL; 666 + 667 + switch (ops->mode) { 668 + case MTD_OPS_PLACE_OOB: 669 + buf += ops->ooboffs; 670 + break; 671 + default: 672 + break; 673 + } 674 + 675 + calc_block_sector(from, &block0, &block1, &page, &ofs); 676 + if (block1 > docg3->max_block) 677 + return -EINVAL; 678 + 679 + ret = doc_read_page_prepare(docg3, block0, block1, page, 680 + ofs + DOC_LAYOUT_PAGE_SIZE); 681 + if (!ret) 682 + ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE); 683 + if (!ret) 684 + ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, 685 + buf, 1); 686 + doc_read_page_finish(docg3); 687 + 688 + if (ret > 0) 689 + ops->oobretlen = ret; 690 + else 691 + ops->oobretlen = 0; 692 + return (ret > 0) ? 0 : ret; 693 + } 694 + 695 + static int doc_reload_bbt(struct docg3 *docg3) 696 + { 697 + int block = DOC_LAYOUT_BLOCK_BBT; 698 + int ret = 0, nbpages, page; 699 + u_char *buf = docg3->bbt; 700 + 701 + nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE); 702 + for (page = 0; !ret && (page < nbpages); page++) { 703 + ret = doc_read_page_prepare(docg3, block, block + 1, 704 + page + DOC_LAYOUT_PAGE_BBT, 0); 705 + if (!ret) 706 + ret = doc_read_page_ecc_init(docg3, 707 + DOC_LAYOUT_PAGE_SIZE); 708 + if (!ret) 709 + doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, 710 + buf, 1); 711 + buf += DOC_LAYOUT_PAGE_SIZE; 712 + } 713 + doc_read_page_finish(docg3); 714 + return ret; 715 + } 716 + 717 + /** 718 + * doc_block_isbad - Checks whether a block is good or not 719 + * @mtd: the device 720 + * @from: the offset to find the correct block 721 + * 722 + * Returns 1 if block is bad, 0 if block is good 723 + */ 724 + static int doc_block_isbad(struct mtd_info *mtd, loff_t from) 725 + { 726 + struct docg3 *docg3 = mtd->priv; 727 + int block0, block1, page, ofs, is_good; 728 + 729 + calc_block_sector(from, &block0, &block1, &page, &ofs); 730 + doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n", 731 + from, block0, block1, page, ofs); 732 + 733 + if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA) 734 + return 0; 735 + if (block1 > docg3->max_block) 736 + return -EINVAL; 737 + 738 + is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7)); 739 + return !is_good; 740 + } 741 + 742 + /** 743 + * doc_get_erase_count - Get block erase count 744 + * @docg3: the device 745 + * @from: the offset in which the block is. 746 + * 747 + * Get the number of times a block was erased. The number is the maximum of 748 + * erase times between first and second plane (which should be equal normally). 749 + * 750 + * Returns The number of erases, or -EINVAL or -EIO on error. 751 + */ 752 + static int doc_get_erase_count(struct docg3 *docg3, loff_t from) 753 + { 754 + u8 buf[DOC_LAYOUT_WEAR_SIZE]; 755 + int ret, plane1_erase_count, plane2_erase_count; 756 + int block0, block1, page, ofs; 757 + 758 + doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf); 759 + if (from % DOC_LAYOUT_PAGE_SIZE) 760 + return -EINVAL; 761 + calc_block_sector(from, &block0, &block1, &page, &ofs); 762 + if (block1 > docg3->max_block) 763 + return -EINVAL; 764 + 765 + ret = doc_reset_seq(docg3); 766 + if (!ret) 767 + ret = doc_read_page_prepare(docg3, block0, block1, page, 768 + ofs + DOC_LAYOUT_WEAR_OFFSET); 769 + if (!ret) 770 + ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE, 771 + buf, 1); 772 + doc_read_page_finish(docg3); 773 + 774 + if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK)) 775 + return -EIO; 776 + plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8) 777 + | ((u8)(~buf[5]) << 16); 778 + plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8) 779 + | ((u8)(~buf[7]) << 16); 780 + 781 + return max(plane1_erase_count, plane2_erase_count); 782 + } 783 + 784 + /* 785 + * Debug sysfs entries 786 + */ 787 + static int dbg_flashctrl_show(struct seq_file *s, void *p) 788 + { 789 + struct docg3 *docg3 = (struct docg3 *)s->private; 790 + 791 + int pos = 0; 792 + u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 793 + 794 + pos += seq_printf(s, 795 + "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", 796 + fctrl, 797 + fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-", 798 + fctrl & DOC_CTRL_CE ? "active" : "inactive", 799 + fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-", 800 + fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-", 801 + fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready"); 802 + return pos; 803 + } 804 + DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show); 805 + 806 + static int dbg_asicmode_show(struct seq_file *s, void *p) 807 + { 808 + struct docg3 *docg3 = (struct docg3 *)s->private; 809 + 810 + int pos = 0; 811 + int pctrl = doc_register_readb(docg3, DOC_ASICMODE); 812 + int mode = pctrl & 0x03; 813 + 814 + pos += seq_printf(s, 815 + "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", 816 + pctrl, 817 + pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0, 818 + pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0, 819 + pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0, 820 + pctrl & DOC_ASICMODE_MDWREN ? 1 : 0, 821 + pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0, 822 + mode >> 1, mode & 0x1); 823 + 824 + switch (mode) { 825 + case DOC_ASICMODE_RESET: 826 + pos += seq_printf(s, "reset"); 827 + break; 828 + case DOC_ASICMODE_NORMAL: 829 + pos += seq_printf(s, "normal"); 830 + break; 831 + case DOC_ASICMODE_POWERDOWN: 832 + pos += seq_printf(s, "powerdown"); 833 + break; 834 + } 835 + pos += seq_printf(s, ")\n"); 836 + return pos; 837 + } 838 + DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show); 839 + 840 + static int dbg_device_id_show(struct seq_file *s, void *p) 841 + { 842 + struct docg3 *docg3 = (struct docg3 *)s->private; 843 + int pos = 0; 844 + int id = doc_register_readb(docg3, DOC_DEVICESELECT); 845 + 846 + pos += seq_printf(s, "DeviceId = %d\n", id); 847 + return pos; 848 + } 849 + DEBUGFS_RO_ATTR(device_id, dbg_device_id_show); 850 + 851 + static int dbg_protection_show(struct seq_file *s, void *p) 852 + { 853 + struct docg3 *docg3 = (struct docg3 *)s->private; 854 + int pos = 0; 855 + int protect = doc_register_readb(docg3, DOC_PROTECTION); 856 + int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 857 + int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW); 858 + int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH); 859 + int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 860 + int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW); 861 + int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH); 862 + 863 + pos += seq_printf(s, "Protection = 0x%02x (", 864 + protect); 865 + if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK) 866 + pos += seq_printf(s, "FOUNDRY_OTP_LOCK,"); 867 + if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK) 868 + pos += seq_printf(s, "CUSTOMER_OTP_LOCK,"); 869 + if (protect & DOC_PROTECT_LOCK_INPUT) 870 + pos += seq_printf(s, "LOCK_INPUT,"); 871 + if (protect & DOC_PROTECT_STICKY_LOCK) 872 + pos += seq_printf(s, "STICKY_LOCK,"); 873 + if (protect & DOC_PROTECT_PROTECTION_ENABLED) 874 + pos += seq_printf(s, "PROTECTION ON,"); 875 + if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK) 876 + pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,"); 877 + if (protect & DOC_PROTECT_PROTECTION_ERROR) 878 + pos += seq_printf(s, "PROTECT_ERR,"); 879 + else 880 + pos += seq_printf(s, "NO_PROTECT_ERR"); 881 + pos += seq_printf(s, ")\n"); 882 + 883 + pos += seq_printf(s, "DPS0 = 0x%02x : " 884 + "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, " 885 + "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n", 886 + dps0, dps0_low, dps0_high, 887 + !!(dps0 & DOC_DPS_OTP_PROTECTED), 888 + !!(dps0 & DOC_DPS_READ_PROTECTED), 889 + !!(dps0 & DOC_DPS_WRITE_PROTECTED), 890 + !!(dps0 & DOC_DPS_HW_LOCK_ENABLED), 891 + !!(dps0 & DOC_DPS_KEY_OK)); 892 + pos += seq_printf(s, "DPS1 = 0x%02x : " 893 + "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, " 894 + "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n", 895 + dps1, dps1_low, dps1_high, 896 + !!(dps1 & DOC_DPS_OTP_PROTECTED), 897 + !!(dps1 & DOC_DPS_READ_PROTECTED), 898 + !!(dps1 & DOC_DPS_WRITE_PROTECTED), 899 + !!(dps1 & DOC_DPS_HW_LOCK_ENABLED), 900 + !!(dps1 & DOC_DPS_KEY_OK)); 901 + return pos; 902 + } 903 + DEBUGFS_RO_ATTR(protection, dbg_protection_show); 904 + 905 + static int __init doc_dbg_register(struct docg3 *docg3) 906 + { 907 + struct dentry *root, *entry; 908 + 909 + root = debugfs_create_dir("docg3", NULL); 910 + if (!root) 911 + return -ENOMEM; 912 + 913 + entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3, 914 + &flashcontrol_fops); 915 + if (entry) 916 + entry = debugfs_create_file("asic_mode", S_IRUSR, root, 917 + docg3, &asic_mode_fops); 918 + if (entry) 919 + entry = debugfs_create_file("device_id", S_IRUSR, root, 920 + docg3, &device_id_fops); 921 + if (entry) 922 + entry = debugfs_create_file("protection", S_IRUSR, root, 923 + docg3, &protection_fops); 924 + if (entry) { 925 + docg3->debugfs_root = root; 926 + return 0; 927 + } else { 928 + debugfs_remove_recursive(root); 929 + return -ENOMEM; 930 + } 931 + } 932 + 933 + static void __exit doc_dbg_unregister(struct docg3 *docg3) 934 + { 935 + debugfs_remove_recursive(docg3->debugfs_root); 936 + } 937 + 938 + /** 939 + * doc_set_driver_info - Fill the mtd_info structure and docg3 structure 940 + * @chip_id: The chip ID of the supported chip 941 + * @mtd: The structure to fill 942 + */ 943 + static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) 944 + { 945 + struct docg3 *docg3 = mtd->priv; 946 + int cfg; 947 + 948 + cfg = doc_register_readb(docg3, DOC_CONFIGURATION); 949 + docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0); 950 + 951 + switch (chip_id) { 952 + case DOC_CHIPID_G3: 953 + mtd->name = "DiskOnChip G3"; 954 + docg3->max_block = 2047; 955 + break; 956 + } 957 + mtd->type = MTD_NANDFLASH; 958 + /* 959 + * Once write methods are added, the correct flags will be set. 960 + * mtd->flags = MTD_CAP_NANDFLASH; 961 + */ 962 + mtd->flags = MTD_CAP_ROM; 963 + mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE; 964 + mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; 965 + mtd->writesize = DOC_LAYOUT_PAGE_SIZE; 966 + mtd->oobsize = DOC_LAYOUT_OOB_SIZE; 967 + mtd->owner = THIS_MODULE; 968 + mtd->erase = NULL; 969 + mtd->point = NULL; 970 + mtd->unpoint = NULL; 971 + mtd->read = doc_read; 972 + mtd->write = NULL; 973 + mtd->read_oob = doc_read_oob; 974 + mtd->write_oob = NULL; 975 + mtd->sync = NULL; 976 + mtd->block_isbad = doc_block_isbad; 977 + } 978 + 979 + /** 980 + * doc_probe - Probe the IO space for a DiskOnChip G3 chip 981 + * @pdev: platform device 982 + * 983 + * Probes for a G3 chip at the specified IO space in the platform data 984 + * ressources. 985 + * 986 + * Returns 0 on success, -ENOMEM, -ENXIO on error 987 + */ 988 + static int __init docg3_probe(struct platform_device *pdev) 989 + { 990 + struct device *dev = &pdev->dev; 991 + struct docg3 *docg3; 992 + struct mtd_info *mtd; 993 + struct resource *ress; 994 + int ret, bbt_nbpages; 995 + u16 chip_id, chip_id_inv; 996 + 997 + ret = -ENOMEM; 998 + docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL); 999 + if (!docg3) 1000 + goto nomem1; 1001 + mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); 1002 + if (!mtd) 1003 + goto nomem2; 1004 + mtd->priv = docg3; 1005 + 1006 + ret = -ENXIO; 1007 + ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1008 + if (!ress) { 1009 + dev_err(dev, "No I/O memory resource defined\n"); 1010 + goto noress; 1011 + } 1012 + docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE); 1013 + 1014 + docg3->dev = &pdev->dev; 1015 + docg3->device_id = 0; 1016 + doc_set_device_id(docg3, docg3->device_id); 1017 + doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); 1018 + doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL); 1019 + 1020 + chip_id = doc_register_readw(docg3, DOC_CHIPID); 1021 + chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV); 1022 + 1023 + ret = -ENODEV; 1024 + if (chip_id != (u16)(~chip_id_inv)) { 1025 + doc_info("No device found at IO addr %p\n", 1026 + (void *)ress->start); 1027 + goto nochipfound; 1028 + } 1029 + 1030 + switch (chip_id) { 1031 + case DOC_CHIPID_G3: 1032 + doc_info("Found a G3 DiskOnChip at addr %p\n", 1033 + (void *)ress->start); 1034 + break; 1035 + default: 1036 + doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); 1037 + goto nochipfound; 1038 + } 1039 + 1040 + doc_set_driver_info(chip_id, mtd); 1041 + platform_set_drvdata(pdev, mtd); 1042 + 1043 + ret = -ENOMEM; 1044 + bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1, 1045 + 8 * DOC_LAYOUT_PAGE_SIZE); 1046 + docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL); 1047 + if (!docg3->bbt) 1048 + goto nochipfound; 1049 + doc_reload_bbt(docg3); 1050 + 1051 + ret = mtd_device_parse_register(mtd, part_probes, 1052 + NULL, NULL, 0); 1053 + if (ret) 1054 + goto register_error; 1055 + 1056 + doc_dbg_register(docg3); 1057 + return 0; 1058 + 1059 + register_error: 1060 + kfree(docg3->bbt); 1061 + nochipfound: 1062 + iounmap(docg3->base); 1063 + noress: 1064 + kfree(mtd); 1065 + nomem2: 1066 + kfree(docg3); 1067 + nomem1: 1068 + return ret; 1069 + } 1070 + 1071 + /** 1072 + * docg3_release - Release the driver 1073 + * @pdev: the platform device 1074 + * 1075 + * Returns 0 1076 + */ 1077 + static int __exit docg3_release(struct platform_device *pdev) 1078 + { 1079 + struct mtd_info *mtd = platform_get_drvdata(pdev); 1080 + struct docg3 *docg3 = mtd->priv; 1081 + 1082 + doc_dbg_unregister(docg3); 1083 + mtd_device_unregister(mtd); 1084 + iounmap(docg3->base); 1085 + kfree(docg3->bbt); 1086 + kfree(docg3); 1087 + kfree(mtd); 1088 + return 0; 1089 + } 1090 + 1091 + static struct platform_driver g3_driver = { 1092 + .driver = { 1093 + .name = "docg3", 1094 + .owner = THIS_MODULE, 1095 + }, 1096 + .remove = __exit_p(docg3_release), 1097 + }; 1098 + 1099 + static int __init docg3_init(void) 1100 + { 1101 + return platform_driver_probe(&g3_driver, docg3_probe); 1102 + } 1103 + module_init(docg3_init); 1104 + 1105 + 1106 + static void __exit docg3_exit(void) 1107 + { 1108 + platform_driver_unregister(&g3_driver); 1109 + } 1110 + module_exit(docg3_exit); 1111 + 1112 + MODULE_LICENSE("GPL"); 1113 + MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); 1114 + MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
+297
drivers/mtd/devices/docg3.h
··· 1 + /* 2 + * Handles the M-Systems DiskOnChip G3 chip 3 + * 4 + * Copyright (C) 2011 Robert Jarzmik 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 + * 20 + */ 21 + 22 + #ifndef _MTD_DOCG3_H 23 + #define _MTD_DOCG3_H 24 + 25 + /* 26 + * Flash memory areas : 27 + * - 0x0000 .. 0x07ff : IPL 28 + * - 0x0800 .. 0x0fff : Data area 29 + * - 0x1000 .. 0x17ff : Registers 30 + * - 0x1800 .. 0x1fff : Unknown 31 + */ 32 + #define DOC_IOSPACE_IPL 0x0000 33 + #define DOC_IOSPACE_DATA 0x0800 34 + #define DOC_IOSPACE_SIZE 0x2000 35 + 36 + /* 37 + * DOC G3 layout and adressing scheme 38 + * A page address for the block "b", plane "P" and page "p": 39 + * address = [bbbb bPpp pppp] 40 + */ 41 + 42 + #define DOC_ADDR_PAGE_MASK 0x3f 43 + #define DOC_ADDR_BLOCK_SHIFT 6 44 + #define DOC_LAYOUT_NBPLANES 2 45 + #define DOC_LAYOUT_PAGES_PER_BLOCK 64 46 + #define DOC_LAYOUT_PAGE_SIZE 512 47 + #define DOC_LAYOUT_OOB_SIZE 16 48 + #define DOC_LAYOUT_WEAR_SIZE 8 49 + #define DOC_LAYOUT_PAGE_OOB_SIZE \ 50 + (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE) 51 + #define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2) 52 + #define DOC_LAYOUT_BLOCK_SIZE \ 53 + (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE) 54 + #define DOC_ECC_BCH_SIZE 7 55 + #define DOC_ECC_BCH_COVERED_BYTES \ 56 + (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \ 57 + DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ) 58 + 59 + /* 60 + * Blocks distribution 61 + */ 62 + #define DOC_LAYOUT_BLOCK_BBT 0 63 + #define DOC_LAYOUT_BLOCK_OTP 0 64 + #define DOC_LAYOUT_BLOCK_FIRST_DATA 6 65 + 66 + #define DOC_LAYOUT_PAGE_BBT 4 67 + 68 + /* 69 + * Extra page OOB (16 bytes wide) layout 70 + */ 71 + #define DOC_LAYOUT_OOB_PAGEINFO_OFS 0 72 + #define DOC_LAYOUT_OOB_HAMMING_OFS 7 73 + #define DOC_LAYOUT_OOB_BCH_OFS 8 74 + #define DOC_LAYOUT_OOB_UNUSED_OFS 15 75 + #define DOC_LAYOUT_OOB_PAGEINFO_SZ 7 76 + #define DOC_LAYOUT_OOB_HAMMING_SZ 1 77 + #define DOC_LAYOUT_OOB_BCH_SZ 7 78 + #define DOC_LAYOUT_OOB_UNUSED_SZ 1 79 + 80 + 81 + #define DOC_CHIPID_G3 0x200 82 + #define DOC_ERASE_MARK 0xaa 83 + /* 84 + * Flash registers 85 + */ 86 + #define DOC_CHIPID 0x1000 87 + #define DOC_TEST 0x1004 88 + #define DOC_BUSLOCK 0x1006 89 + #define DOC_ENDIANCONTROL 0x1008 90 + #define DOC_DEVICESELECT 0x100a 91 + #define DOC_ASICMODE 0x100c 92 + #define DOC_CONFIGURATION 0x100e 93 + #define DOC_INTERRUPTCONTROL 0x1010 94 + #define DOC_READADDRESS 0x101a 95 + #define DOC_DATAEND 0x101e 96 + #define DOC_INTERRUPTSTATUS 0x1020 97 + 98 + #define DOC_FLASHSEQUENCE 0x1032 99 + #define DOC_FLASHCOMMAND 0x1034 100 + #define DOC_FLASHADDRESS 0x1036 101 + #define DOC_FLASHCONTROL 0x1038 102 + #define DOC_NOP 0x103e 103 + 104 + #define DOC_ECCCONF0 0x1040 105 + #define DOC_ECCCONF1 0x1042 106 + #define DOC_ECCPRESET 0x1044 107 + #define DOC_HAMMINGPARITY 0x1046 108 + #define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1)) 109 + 110 + #define DOC_PROTECTION 0x1056 111 + #define DOC_DPS0_ADDRLOW 0x1060 112 + #define DOC_DPS0_ADDRHIGH 0x1062 113 + #define DOC_DPS1_ADDRLOW 0x1064 114 + #define DOC_DPS1_ADDRHIGH 0x1066 115 + #define DOC_DPS0_STATUS 0x106c 116 + #define DOC_DPS1_STATUS 0x106e 117 + 118 + #define DOC_ASICMODECONFIRM 0x1072 119 + #define DOC_CHIPID_INV 0x1074 120 + 121 + /* 122 + * Flash sequences 123 + * A sequence is preset before one or more commands are input to the chip. 124 + */ 125 + #define DOC_SEQ_RESET 0x00 126 + #define DOC_SEQ_PAGE_SIZE_532 0x03 127 + #define DOC_SEQ_SET_MODE 0x09 128 + #define DOC_SEQ_READ 0x12 129 + #define DOC_SEQ_SET_PLANE1 0x0e 130 + #define DOC_SEQ_SET_PLANE2 0x10 131 + #define DOC_SEQ_PAGE_SETUP 0x1d 132 + 133 + /* 134 + * Flash commands 135 + */ 136 + #define DOC_CMD_READ_PLANE1 0x00 137 + #define DOC_CMD_SET_ADDR_READ 0x05 138 + #define DOC_CMD_READ_ALL_PLANES 0x30 139 + #define DOC_CMD_READ_PLANE2 0x50 140 + #define DOC_CMD_READ_FLASH 0xe0 141 + #define DOC_CMD_PAGE_SIZE_532 0x3c 142 + 143 + #define DOC_CMD_PROG_BLOCK_ADDR 0x60 144 + #define DOC_CMD_PROG_CYCLE1 0x80 145 + #define DOC_CMD_PROG_CYCLE2 0x10 146 + #define DOC_CMD_ERASECYCLE2 0xd0 147 + 148 + #define DOC_CMD_RELIABLE_MODE 0x22 149 + #define DOC_CMD_FAST_MODE 0xa2 150 + 151 + #define DOC_CMD_RESET 0xff 152 + 153 + /* 154 + * Flash register : DOC_FLASHCONTROL 155 + */ 156 + #define DOC_CTRL_VIOLATION 0x20 157 + #define DOC_CTRL_CE 0x10 158 + #define DOC_CTRL_UNKNOWN_BITS 0x08 159 + #define DOC_CTRL_PROTECTION_ERROR 0x04 160 + #define DOC_CTRL_SEQUENCE_ERROR 0x02 161 + #define DOC_CTRL_FLASHREADY 0x01 162 + 163 + /* 164 + * Flash register : DOC_ASICMODE 165 + */ 166 + #define DOC_ASICMODE_RESET 0x00 167 + #define DOC_ASICMODE_NORMAL 0x01 168 + #define DOC_ASICMODE_POWERDOWN 0x02 169 + #define DOC_ASICMODE_MDWREN 0x04 170 + #define DOC_ASICMODE_BDETCT_RESET 0x08 171 + #define DOC_ASICMODE_RSTIN_RESET 0x10 172 + #define DOC_ASICMODE_RAM_WE 0x20 173 + 174 + /* 175 + * Flash register : DOC_ECCCONF0 176 + */ 177 + #define DOC_ECCCONF0_READ_MODE 0x8000 178 + #define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000 179 + #define DOC_ECCCONF0_HAMMING_ENABLE 0x1000 180 + #define DOC_ECCCONF0_BCH_ENABLE 0x0800 181 + #define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff 182 + 183 + /* 184 + * Flash register : DOC_ECCCONF1 185 + */ 186 + #define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80 187 + #define DOC_ECCCONF1_UNKOWN1 0x40 188 + #define DOC_ECCCONF1_UNKOWN2 0x20 189 + #define DOC_ECCCONF1_UNKOWN3 0x10 190 + #define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f 191 + 192 + /* 193 + * Flash register : DOC_PROTECTION 194 + */ 195 + #define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01 196 + #define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02 197 + #define DOC_PROTECT_LOCK_INPUT 0x04 198 + #define DOC_PROTECT_STICKY_LOCK 0x08 199 + #define DOC_PROTECT_PROTECTION_ENABLED 0x10 200 + #define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20 201 + #define DOC_PROTECT_PROTECTION_ERROR 0x80 202 + 203 + /* 204 + * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS 205 + */ 206 + #define DOC_DPS_OTP_PROTECTED 0x01 207 + #define DOC_DPS_READ_PROTECTED 0x02 208 + #define DOC_DPS_WRITE_PROTECTED 0x04 209 + #define DOC_DPS_HW_LOCK_ENABLED 0x08 210 + #define DOC_DPS_KEY_OK 0x80 211 + 212 + /* 213 + * Flash register : DOC_CONFIGURATION 214 + */ 215 + #define DOC_CONF_IF_CFG 0x80 216 + #define DOC_CONF_MAX_ID_MASK 0x30 217 + #define DOC_CONF_VCCQ_3V 0x01 218 + 219 + /* 220 + * Flash register : DOC_READADDRESS 221 + */ 222 + #define DOC_READADDR_INC 0x8000 223 + #define DOC_READADDR_ONE_BYTE 0x4000 224 + #define DOC_READADDR_ADDR_MASK 0x1fff 225 + 226 + /** 227 + * struct docg3 - DiskOnChip driver private data 228 + * @dev: the device currently under control 229 + * @base: mapped IO space 230 + * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) 231 + * @if_cfg: if true, reads are on 16bits, else reads are on 8bits 232 + * @bbt: bad block table cache 233 + * @debugfs_root: debugfs root node 234 + */ 235 + struct docg3 { 236 + struct device *dev; 237 + void __iomem *base; 238 + unsigned int device_id:4; 239 + unsigned int if_cfg:1; 240 + int max_block; 241 + u8 *bbt; 242 + struct dentry *debugfs_root; 243 + }; 244 + 245 + #define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg) 246 + #define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg) 247 + #define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg) 248 + #define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg) 249 + 250 + #define DEBUGFS_RO_ATTR(name, show_fct) \ 251 + static int name##_open(struct inode *inode, struct file *file) \ 252 + { return single_open(file, show_fct, inode->i_private); } \ 253 + static const struct file_operations name##_fops = { \ 254 + .owner = THIS_MODULE, \ 255 + .open = name##_open, \ 256 + .llseek = seq_lseek, \ 257 + .read = seq_read, \ 258 + .release = single_release \ 259 + }; 260 + #endif 261 + 262 + /* 263 + * Trace events part 264 + */ 265 + #undef TRACE_SYSTEM 266 + #define TRACE_SYSTEM docg3 267 + 268 + #if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ) 269 + #define _MTD_DOCG3_TRACE 270 + 271 + #include <linux/tracepoint.h> 272 + 273 + TRACE_EVENT(docg3_io, 274 + TP_PROTO(int op, int width, u16 reg, int val), 275 + TP_ARGS(op, width, reg, val), 276 + TP_STRUCT__entry( 277 + __field(int, op) 278 + __field(unsigned char, width) 279 + __field(u16, reg) 280 + __field(int, val)), 281 + TP_fast_assign( 282 + __entry->op = op; 283 + __entry->width = width; 284 + __entry->reg = reg; 285 + __entry->val = val;), 286 + TP_printk("docg3: %s%02d reg=%04x, val=%04x", 287 + __entry->op ? "write" : "read", __entry->width, 288 + __entry->reg, __entry->val) 289 + ); 290 + #endif 291 + 292 + /* This part must be outside protection */ 293 + #undef TRACE_INCLUDE_PATH 294 + #undef TRACE_INCLUDE_FILE 295 + #define TRACE_INCLUDE_PATH . 296 + #define TRACE_INCLUDE_FILE docg3 297 + #include <trace/define_trace.h>
-5
drivers/mtd/devices/docprobe.c
··· 50 50 #include <linux/mtd/nand.h> 51 51 #include <linux/mtd/doc2000.h> 52 52 53 - /* Where to look for the devices? */ 54 - #ifndef CONFIG_MTD_DOCPROBE_ADDRESS 55 - #define CONFIG_MTD_DOCPROBE_ADDRESS 0 56 - #endif 57 - 58 53 59 54 static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; 60 55 module_param(doc_config_location, ulong, 0);
+1 -17
drivers/mtd/devices/lart.c
··· 34 34 /* debugging */ 35 35 //#define LART_DEBUG 36 36 37 - /* partition support */ 38 - #define HAVE_PARTITIONS 39 - 40 37 #include <linux/kernel.h> 41 38 #include <linux/module.h> 42 39 #include <linux/types.h> ··· 41 44 #include <linux/errno.h> 42 45 #include <linux/string.h> 43 46 #include <linux/mtd/mtd.h> 44 - #ifdef HAVE_PARTITIONS 45 47 #include <linux/mtd/partitions.h> 46 - #endif 47 48 48 49 #ifndef CONFIG_SA1100_LART 49 50 #error This is for LART architecture only ··· 593 598 } 594 599 }; 595 600 596 - #ifdef HAVE_PARTITIONS 597 601 static struct mtd_partition lart_partitions[] = { 598 602 /* blob */ 599 603 { ··· 613 619 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ 614 620 } 615 621 }; 616 - #endif 622 + #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) 617 623 618 624 static int __init lart_flash_init (void) 619 625 { ··· 662 668 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024, 663 669 result,mtd.eraseregions[result].numblocks); 664 670 665 - #ifdef HAVE_PARTITIONS 666 671 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions)); 667 672 668 673 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++) ··· 674 681 result,lart_partitions[result].offset, 675 682 result,lart_partitions[result].size,lart_partitions[result].size / 1024); 676 683 #endif 677 - #endif 678 684 679 - #ifndef HAVE_PARTITIONS 680 - result = mtd_device_register(&mtd, NULL, 0); 681 - #else 682 685 result = mtd_device_register(&mtd, lart_partitions, 683 686 ARRAY_SIZE(lart_partitions)); 684 - #endif 685 687 686 688 return (result); 687 689 } 688 690 689 691 static void __exit lart_flash_exit (void) 690 692 { 691 - #ifndef HAVE_PARTITIONS 692 693 mtd_device_unregister(&mtd); 693 - #else 694 - mtd_device_unregister(&mtd); 695 - #endif 696 694 } 697 695 698 696 module_init (lart_flash_init);
+30 -62
drivers/mtd/devices/m25p80.c
··· 30 30 #include <linux/mtd/cfi.h> 31 31 #include <linux/mtd/mtd.h> 32 32 #include <linux/mtd/partitions.h> 33 + #include <linux/of_platform.h> 33 34 34 35 #include <linux/spi/spi.h> 35 36 #include <linux/spi/flash.h> ··· 89 88 struct spi_device *spi; 90 89 struct mutex lock; 91 90 struct mtd_info mtd; 92 - unsigned partitioned:1; 93 91 u16 page_size; 94 92 u16 addr_width; 95 93 u8 erase_opcode; ··· 209 209 */ 210 210 static int erase_chip(struct m25p *flash) 211 211 { 212 - DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n", 213 - dev_name(&flash->spi->dev), __func__, 214 - (long long)(flash->mtd.size >> 10)); 212 + pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, 213 + (long long)(flash->mtd.size >> 10)); 215 214 216 215 /* Wait until finished previous write command. */ 217 216 if (wait_till_ready(flash)) ··· 249 250 */ 250 251 static int erase_sector(struct m25p *flash, u32 offset) 251 252 { 252 - DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", 253 - dev_name(&flash->spi->dev), __func__, 254 - flash->mtd.erasesize / 1024, offset); 253 + pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), 254 + __func__, flash->mtd.erasesize / 1024, offset); 255 255 256 256 /* Wait until finished previous write command. */ 257 257 if (wait_till_ready(flash)) ··· 284 286 u32 addr,len; 285 287 uint32_t rem; 286 288 287 - DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n", 288 - dev_name(&flash->spi->dev), __func__, "at", 289 - (long long)instr->addr, (long long)instr->len); 289 + pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), 290 + __func__, (long long)instr->addr, 291 + (long long)instr->len); 290 292 291 293 /* sanity checks */ 292 294 if (instr->addr + instr->len > flash->mtd.size) ··· 346 348 struct spi_transfer t[2]; 347 349 struct spi_message m; 348 350 349 - DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 350 - dev_name(&flash->spi->dev), __func__, "from", 351 - (u32)from, len); 351 + pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 352 + __func__, (u32)from, len); 352 353 353 354 /* sanity checks */ 354 355 if (!len) ··· 414 417 struct spi_transfer t[2]; 415 418 struct spi_message m; 416 419 417 - DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 418 - dev_name(&flash->spi->dev), __func__, "to", 419 - (u32)to, len); 420 + pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 421 + __func__, (u32)to, len); 420 422 421 423 *retlen = 0; 422 424 ··· 506 510 size_t actual; 507 511 int cmd_sz, ret; 508 512 509 - DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 510 - dev_name(&flash->spi->dev), __func__, "to", 511 - (u32)to, len); 513 + pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 514 + __func__, (u32)to, len); 512 515 513 516 *retlen = 0; 514 517 ··· 656 661 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, 657 662 658 663 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, 664 + { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, 659 665 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, 660 666 661 667 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, ··· 667 671 /* EON -- en25xxx */ 668 672 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, 669 673 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 674 + { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, 670 675 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 671 676 672 677 /* Intel/Numonyx -- xxxs33b */ ··· 785 788 */ 786 789 tmp = spi_write_then_read(spi, &code, 1, id, 5); 787 790 if (tmp < 0) { 788 - DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 789 - dev_name(&spi->dev), tmp); 791 + pr_debug("%s: error %d reading JEDEC ID\n", 792 + dev_name(&spi->dev), tmp); 790 793 return ERR_PTR(tmp); 791 794 } 792 795 jedec = id[0]; ··· 822 825 struct m25p *flash; 823 826 struct flash_info *info; 824 827 unsigned i; 825 - struct mtd_partition *parts = NULL; 826 - int nr_parts = 0; 828 + struct mtd_part_parser_data ppdata; 829 + 830 + #ifdef CONFIG_MTD_OF_PARTS 831 + if (!of_device_is_available(spi->dev.of_node)) 832 + return -ENODEV; 833 + #endif 827 834 828 835 /* Platform data helps sort out which chip type we have, as 829 836 * well as how this board partitions it. If we don't have ··· 929 928 if (info->flags & M25P_NO_ERASE) 930 929 flash->mtd.flags |= MTD_NO_ERASE; 931 930 931 + ppdata.of_node = spi->dev.of_node; 932 932 flash->mtd.dev.parent = &spi->dev; 933 933 flash->page_size = info->page_size; 934 934 ··· 947 945 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, 948 946 (long long)flash->mtd.size >> 10); 949 947 950 - DEBUG(MTD_DEBUG_LEVEL2, 951 - "mtd .name = %s, .size = 0x%llx (%lldMiB) " 948 + pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " 952 949 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 953 950 flash->mtd.name, 954 951 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), ··· 956 955 957 956 if (flash->mtd.numeraseregions) 958 957 for (i = 0; i < flash->mtd.numeraseregions; i++) 959 - DEBUG(MTD_DEBUG_LEVEL2, 960 - "mtd.eraseregions[%d] = { .offset = 0x%llx, " 958 + pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " 961 959 ".erasesize = 0x%.8x (%uKiB), " 962 960 ".numblocks = %d }\n", 963 961 i, (long long)flash->mtd.eraseregions[i].offset, ··· 968 968 /* partitions should match sector boundaries; and it may be good to 969 969 * use readonly partitions for writeprotected sectors (BP2..BP0). 970 970 */ 971 - if (mtd_has_cmdlinepart()) { 972 - static const char *part_probes[] 973 - = { "cmdlinepart", NULL, }; 974 - 975 - nr_parts = parse_mtd_partitions(&flash->mtd, 976 - part_probes, &parts, 0); 977 - } 978 - 979 - if (nr_parts <= 0 && data && data->parts) { 980 - parts = data->parts; 981 - nr_parts = data->nr_parts; 982 - } 983 - 984 - #ifdef CONFIG_MTD_OF_PARTS 985 - if (nr_parts <= 0 && spi->dev.of_node) { 986 - nr_parts = of_mtd_parse_partitions(&spi->dev, 987 - spi->dev.of_node, &parts); 988 - } 989 - #endif 990 - 991 - if (nr_parts > 0) { 992 - for (i = 0; i < nr_parts; i++) { 993 - DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 994 - "{.name = %s, .offset = 0x%llx, " 995 - ".size = 0x%llx (%lldKiB) }\n", 996 - i, parts[i].name, 997 - (long long)parts[i].offset, 998 - (long long)parts[i].size, 999 - (long long)(parts[i].size >> 10)); 1000 - } 1001 - flash->partitioned = 1; 1002 - } 1003 - 1004 - return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ? 1005 - -ENODEV : 0; 971 + return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, 972 + data ? data->parts : NULL, 973 + data ? data->nr_parts : 0); 1006 974 } 1007 975 1008 976
+42 -49
drivers/mtd/devices/mtd_dataflash.c
··· 17 17 #include <linux/mutex.h> 18 18 #include <linux/err.h> 19 19 #include <linux/math64.h> 20 + #include <linux/of.h> 21 + #include <linux/of_device.h> 20 22 21 23 #include <linux/spi/spi.h> 22 24 #include <linux/spi/flash.h> 23 25 24 26 #include <linux/mtd/mtd.h> 25 27 #include <linux/mtd/partitions.h> 26 - 27 28 28 29 /* 29 30 * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in ··· 99 98 struct mtd_info mtd; 100 99 }; 101 100 101 + #ifdef CONFIG_OF 102 + static const struct of_device_id dataflash_dt_ids[] = { 103 + { .compatible = "atmel,at45", }, 104 + { .compatible = "atmel,dataflash", }, 105 + { /* sentinel */ } 106 + }; 107 + #else 108 + #define dataflash_dt_ids NULL 109 + #endif 110 + 102 111 /* ......................................................................... */ 103 112 104 113 /* ··· 133 122 for (;;) { 134 123 status = dataflash_status(spi); 135 124 if (status < 0) { 136 - DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", 125 + pr_debug("%s: status %d?\n", 137 126 dev_name(&spi->dev), status); 138 127 status = 0; 139 128 } ··· 160 149 uint8_t *command; 161 150 uint32_t rem; 162 151 163 - DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n", 152 + pr_debug("%s: erase addr=0x%llx len 0x%llx\n", 164 153 dev_name(&spi->dev), (long long)instr->addr, 165 154 (long long)instr->len); 166 155 ··· 198 187 command[2] = (uint8_t)(pageaddr >> 8); 199 188 command[3] = 0; 200 189 201 - DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n", 190 + pr_debug("ERASE %s: (%x) %x %x %x [%i]\n", 202 191 do_block ? "block" : "page", 203 192 command[0], command[1], command[2], command[3], 204 193 pageaddr); ··· 249 238 uint8_t *command; 250 239 int status; 251 240 252 - DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", 253 - dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len)); 241 + pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), 242 + (unsigned)from, (unsigned)(from + len)); 254 243 255 244 *retlen = 0; 256 245 ··· 266 255 267 256 command = priv->command; 268 257 269 - DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", 258 + pr_debug("READ: (%x) %x %x %x\n", 270 259 command[0], command[1], command[2], command[3]); 271 260 272 261 spi_message_init(&msg); ··· 298 287 *retlen = msg.actual_length - 8; 299 288 status = 0; 300 289 } else 301 - DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", 290 + pr_debug("%s: read %x..%x --> %d\n", 302 291 dev_name(&priv->spi->dev), 303 292 (unsigned)from, (unsigned)(from + len), 304 293 status); ··· 325 314 int status = -EINVAL; 326 315 uint8_t *command; 327 316 328 - DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", 317 + pr_debug("%s: write 0x%x..0x%x\n", 329 318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); 330 319 331 320 *retlen = 0; ··· 351 340 352 341 mutex_lock(&priv->lock); 353 342 while (remaining > 0) { 354 - DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n", 343 + pr_debug("write @ %i:%i len=%i\n", 355 344 pageaddr, offset, writelen); 356 345 357 346 /* REVISIT: ··· 379 368 command[2] = (addr & 0x0000FF00) >> 8; 380 369 command[3] = 0; 381 370 382 - DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", 371 + pr_debug("TRANSFER: (%x) %x %x %x\n", 383 372 command[0], command[1], command[2], command[3]); 384 373 385 374 status = spi_sync(spi, &msg); 386 375 if (status < 0) 387 - DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", 376 + pr_debug("%s: xfer %u -> %d\n", 388 377 dev_name(&spi->dev), addr, status); 389 378 390 379 (void) dataflash_waitready(priv->spi); ··· 397 386 command[2] = (addr & 0x0000FF00) >> 8; 398 387 command[3] = (addr & 0x000000FF); 399 388 400 - DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n", 389 + pr_debug("PROGRAM: (%x) %x %x %x\n", 401 390 command[0], command[1], command[2], command[3]); 402 391 403 392 x[1].tx_buf = writebuf; ··· 406 395 status = spi_sync(spi, &msg); 407 396 spi_transfer_del(x + 1); 408 397 if (status < 0) 409 - DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 398 + pr_debug("%s: pgm %u/%u -> %d\n", 410 399 dev_name(&spi->dev), addr, writelen, status); 411 400 412 401 (void) dataflash_waitready(priv->spi); ··· 421 410 command[2] = (addr & 0x0000FF00) >> 8; 422 411 command[3] = 0; 423 412 424 - DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", 413 + pr_debug("COMPARE: (%x) %x %x %x\n", 425 414 command[0], command[1], command[2], command[3]); 426 415 427 416 status = spi_sync(spi, &msg); 428 417 if (status < 0) 429 - DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", 418 + pr_debug("%s: compare %u -> %d\n", 430 419 dev_name(&spi->dev), addr, status); 431 420 432 421 status = dataflash_waitready(priv->spi); ··· 645 634 { 646 635 struct dataflash *priv; 647 636 struct mtd_info *device; 637 + struct mtd_part_parser_data ppdata; 648 638 struct flash_platform_data *pdata = spi->dev.platform_data; 649 639 char *otp_tag = ""; 650 640 int err = 0; 651 - struct mtd_partition *parts; 652 - int nr_parts = 0; 653 641 654 642 priv = kzalloc(sizeof *priv, GFP_KERNEL); 655 643 if (!priv) ··· 687 677 pagesize, otp_tag); 688 678 dev_set_drvdata(&spi->dev, priv); 689 679 690 - if (mtd_has_cmdlinepart()) { 691 - static const char *part_probes[] = { "cmdlinepart", NULL, }; 680 + ppdata.of_node = spi->dev.of_node; 681 + err = mtd_device_parse_register(device, NULL, &ppdata, 682 + pdata ? pdata->parts : NULL, 683 + pdata ? pdata->nr_parts : 0); 692 684 693 - nr_parts = parse_mtd_partitions(device, part_probes, &parts, 694 - 0); 695 - } 696 - 697 - if (nr_parts <= 0 && pdata && pdata->parts) { 698 - parts = pdata->parts; 699 - nr_parts = pdata->nr_parts; 700 - } 701 - 702 - if (nr_parts > 0) { 703 - priv->partitioned = 1; 704 - err = mtd_device_register(device, parts, nr_parts); 705 - goto out; 706 - } 707 - 708 - if (mtd_device_register(device, NULL, 0) == 1) 709 - err = -ENODEV; 710 - 711 - out: 712 685 if (!err) 713 686 return 0; 714 687 ··· 780 787 */ 781 788 tmp = spi_write_then_read(spi, &code, 1, id, 3); 782 789 if (tmp < 0) { 783 - DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 790 + pr_debug("%s: error %d reading JEDEC ID\n", 784 791 dev_name(&spi->dev), tmp); 785 792 return ERR_PTR(tmp); 786 793 } ··· 797 804 tmp < ARRAY_SIZE(dataflash_data); 798 805 tmp++, info++) { 799 806 if (info->jedec_id == jedec) { 800 - DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n", 807 + pr_debug("%s: OTP, sector protect%s\n", 801 808 dev_name(&spi->dev), 802 809 (info->flags & SUP_POW2PS) 803 810 ? ", binary pagesize" : "" ··· 805 812 if (info->flags & SUP_POW2PS) { 806 813 status = dataflash_status(spi); 807 814 if (status < 0) { 808 - DEBUG(MTD_DEBUG_LEVEL1, 809 - "%s: status error %d\n", 815 + pr_debug("%s: status error %d\n", 810 816 dev_name(&spi->dev), status); 811 817 return ERR_PTR(status); 812 818 } ··· 870 878 */ 871 879 status = dataflash_status(spi); 872 880 if (status <= 0 || status == 0xff) { 873 - DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", 881 + pr_debug("%s: status error %d\n", 874 882 dev_name(&spi->dev), status); 875 883 if (status == 0 || status == 0xff) 876 884 status = -ENODEV; ··· 906 914 break; 907 915 /* obsolete AT45DB1282 not (yet?) supported */ 908 916 default: 909 - DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", 910 - dev_name(&spi->dev), status & 0x3c); 917 + pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev), 918 + status & 0x3c); 911 919 status = -ENODEV; 912 920 } 913 921 914 922 if (status < 0) 915 - DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", 916 - dev_name(&spi->dev), status); 923 + pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev), 924 + status); 917 925 918 926 return status; 919 927 } ··· 923 931 struct dataflash *flash = dev_get_drvdata(&spi->dev); 924 932 int status; 925 933 926 - DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); 934 + pr_debug("%s: remove\n", dev_name(&spi->dev)); 927 935 928 936 status = mtd_device_unregister(&flash->mtd); 929 937 if (status == 0) { ··· 938 946 .name = "mtd_dataflash", 939 947 .bus = &spi_bus_type, 940 948 .owner = THIS_MODULE, 949 + .of_match_table = dataflash_dt_ids, 941 950 }, 942 951 943 952 .probe = dataflash_probe,
+5 -37
drivers/mtd/devices/sst25l.c
··· 52 52 struct spi_device *spi; 53 53 struct mutex lock; 54 54 struct mtd_info mtd; 55 - 56 - int partitioned; 57 55 }; 58 56 59 57 struct flash_info { ··· 379 381 struct sst25l_flash *flash; 380 382 struct flash_platform_data *data; 381 383 int ret, i; 382 - struct mtd_partition *parts = NULL; 383 - int nr_parts = 0; 384 384 385 385 flash_info = sst25l_match_device(spi); 386 386 if (!flash_info) ··· 410 414 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 411 415 (long long)flash->mtd.size >> 10); 412 416 413 - DEBUG(MTD_DEBUG_LEVEL2, 414 - "mtd .name = %s, .size = 0x%llx (%lldMiB) " 417 + pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " 415 418 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 416 419 flash->mtd.name, 417 420 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), ··· 418 423 flash->mtd.numeraseregions); 419 424 420 425 421 - if (mtd_has_cmdlinepart()) { 422 - static const char *part_probes[] = {"cmdlinepart", NULL}; 423 - 424 - nr_parts = parse_mtd_partitions(&flash->mtd, 425 - part_probes, 426 - &parts, 0); 427 - } 428 - 429 - if (nr_parts <= 0 && data && data->parts) { 430 - parts = data->parts; 431 - nr_parts = data->nr_parts; 432 - } 433 - 434 - if (nr_parts > 0) { 435 - for (i = 0; i < nr_parts; i++) { 436 - DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 437 - "{.name = %s, .offset = 0x%llx, " 438 - ".size = 0x%llx (%lldKiB) }\n", 439 - i, parts[i].name, 440 - (long long)parts[i].offset, 441 - (long long)parts[i].size, 442 - (long long)(parts[i].size >> 10)); 443 - } 444 - 445 - flash->partitioned = 1; 446 - return mtd_device_register(&flash->mtd, parts, 447 - nr_parts); 448 - } 449 - 450 - ret = mtd_device_register(&flash->mtd, NULL, 0); 451 - if (ret == 1) { 426 + ret = mtd_device_parse_register(&flash->mtd, NULL, 0, 427 + data ? data->parts : NULL, 428 + data ? data->nr_parts : 0); 429 + if (ret) { 452 430 kfree(flash); 453 431 dev_set_drvdata(&spi->dev, NULL); 454 432 return -ENODEV;
+20 -20
drivers/mtd/ftl.c
··· 339 339 struct erase_info *erase; 340 340 341 341 xfer = &part->XferInfo[xfernum]; 342 - DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); 342 + pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); 343 343 xfer->state = XFER_ERASING; 344 344 345 345 /* Is there a free erase slot? Always in MTD. */ ··· 415 415 xfer = &part->XferInfo[i]; 416 416 xfer->state = XFER_FAILED; 417 417 418 - DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); 418 + pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); 419 419 420 420 /* Write the transfer unit header */ 421 421 header = part->header; ··· 476 476 477 477 eun = &part->EUNInfo[srcunit]; 478 478 xfer = &part->XferInfo[xferunit]; 479 - DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", 479 + pr_debug("ftl_cs: copying block 0x%x to 0x%x\n", 480 480 eun->Offset, xfer->Offset); 481 481 482 482 ··· 598 598 unit with the fewest erases, and usually pick the data unit with 599 599 the most deleted blocks. But with a small probability, pick the 600 600 oldest data unit instead. This means that we generally postpone 601 - the next reclaimation as long as possible, but shuffle static 601 + the next reclamation as long as possible, but shuffle static 602 602 stuff around a bit for wear leveling. 603 603 604 604 ======================================================================*/ ··· 609 609 uint32_t best; 610 610 int queued, ret; 611 611 612 - DEBUG(0, "ftl_cs: reclaiming space...\n"); 613 - DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits); 612 + pr_debug("ftl_cs: reclaiming space...\n"); 613 + pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits); 614 614 /* Pick the least erased transfer unit */ 615 615 best = 0xffffffff; xfer = 0xffff; 616 616 do { ··· 618 618 for (i = 0; i < part->header.NumTransferUnits; i++) { 619 619 int n=0; 620 620 if (part->XferInfo[i].state == XFER_UNKNOWN) { 621 - DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i); 621 + pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i); 622 622 n=1; 623 623 erase_xfer(part, i); 624 624 } 625 625 if (part->XferInfo[i].state == XFER_ERASING) { 626 - DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i); 626 + pr_debug("XferInfo[%d].state == XFER_ERASING\n",i); 627 627 n=1; 628 628 queued = 1; 629 629 } 630 630 else if (part->XferInfo[i].state == XFER_ERASED) { 631 - DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i); 631 + pr_debug("XferInfo[%d].state == XFER_ERASED\n",i); 632 632 n=1; 633 633 prepare_xfer(part, i); 634 634 } 635 635 if (part->XferInfo[i].state == XFER_PREPARED) { 636 - DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i); 636 + pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i); 637 637 n=1; 638 638 if (part->XferInfo[i].EraseCount <= best) { 639 639 best = part->XferInfo[i].EraseCount; ··· 641 641 } 642 642 } 643 643 if (!n) 644 - DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); 644 + pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); 645 645 646 646 } 647 647 if (xfer == 0xffff) { 648 648 if (queued) { 649 - DEBUG(1, "ftl_cs: waiting for transfer " 649 + pr_debug("ftl_cs: waiting for transfer " 650 650 "unit to be prepared...\n"); 651 651 if (part->mbd.mtd->sync) 652 652 part->mbd.mtd->sync(part->mbd.mtd); ··· 656 656 printk(KERN_NOTICE "ftl_cs: reclaim failed: no " 657 657 "suitable transfer units!\n"); 658 658 else 659 - DEBUG(1, "ftl_cs: reclaim failed: no " 659 + pr_debug("ftl_cs: reclaim failed: no " 660 660 "suitable transfer units!\n"); 661 661 662 662 return -EIO; ··· 666 666 667 667 eun = 0; 668 668 if ((jiffies % shuffle_freq) == 0) { 669 - DEBUG(1, "ftl_cs: recycling freshest block...\n"); 669 + pr_debug("ftl_cs: recycling freshest block...\n"); 670 670 best = 0xffffffff; 671 671 for (i = 0; i < part->DataUnits; i++) 672 672 if (part->EUNInfo[i].EraseCount <= best) { ··· 686 686 printk(KERN_NOTICE "ftl_cs: reclaim failed: " 687 687 "no free blocks!\n"); 688 688 else 689 - DEBUG(1,"ftl_cs: reclaim failed: " 689 + pr_debug("ftl_cs: reclaim failed: " 690 690 "no free blocks!\n"); 691 691 692 692 return -EIO; ··· 771 771 printk(KERN_NOTICE "ftl_cs: bad free list!\n"); 772 772 return 0; 773 773 } 774 - DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun); 774 + pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun); 775 775 return blk; 776 776 777 777 } /* find_free */ ··· 791 791 int ret; 792 792 size_t offset, retlen; 793 793 794 - DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", 794 + pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", 795 795 part, sector, nblocks); 796 796 if (!(part->state & FTL_FORMATTED)) { 797 797 printk(KERN_NOTICE "ftl_cs: bad partition\n"); ··· 840 840 int ret; 841 841 size_t retlen, offset; 842 842 843 - DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", 843 + pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", 844 844 part, log_addr, virt_addr); 845 845 bsize = 1 << part->header.EraseUnitSize; 846 846 eun = log_addr / bsize; ··· 905 905 int ret; 906 906 size_t retlen, offset; 907 907 908 - DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n", 908 + pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n", 909 909 part, sector, nblocks); 910 910 if (!(part->state & FTL_FORMATTED)) { 911 911 printk(KERN_NOTICE "ftl_cs: bad partition\n"); ··· 1011 1011 partition_t *part = (void *)dev; 1012 1012 uint32_t bsize = 1 << part->header.EraseUnitSize; 1013 1013 1014 - DEBUG(1, "FTL erase sector %ld for %d sectors\n", 1014 + pr_debug("FTL erase sector %ld for %d sectors\n", 1015 1015 sector, nr_sects); 1016 1016 1017 1017 while (nr_sects) {
+32 -37
drivers/mtd/inftlcore.c
··· 63 63 return; 64 64 } 65 65 66 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name); 66 + pr_debug("INFTL: add_mtd for %s\n", mtd->name); 67 67 68 68 inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); 69 69 70 - if (!inftl) { 71 - printk(KERN_WARNING "INFTL: Out of memory for data structures\n"); 70 + if (!inftl) 72 71 return; 73 - } 74 72 75 73 inftl->mbd.mtd = mtd; 76 74 inftl->mbd.devnum = -1; ··· 131 133 { 132 134 struct INFTLrecord *inftl = (void *)dev; 133 135 134 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum); 136 + pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum); 135 137 136 138 del_mtd_blktrans_dev(dev); 137 139 ··· 152 154 struct mtd_oob_ops ops; 153 155 int res; 154 156 155 - ops.mode = MTD_OOB_PLACE; 157 + ops.mode = MTD_OPS_PLACE_OOB; 156 158 ops.ooboffs = offs & (mtd->writesize - 1); 157 159 ops.ooblen = len; 158 160 ops.oobbuf = buf; ··· 172 174 struct mtd_oob_ops ops; 173 175 int res; 174 176 175 - ops.mode = MTD_OOB_PLACE; 177 + ops.mode = MTD_OPS_PLACE_OOB; 176 178 ops.ooboffs = offs & (mtd->writesize - 1); 177 179 ops.ooblen = len; 178 180 ops.oobbuf = buf; ··· 192 194 struct mtd_oob_ops ops; 193 195 int res; 194 196 195 - ops.mode = MTD_OOB_PLACE; 197 + ops.mode = MTD_OPS_PLACE_OOB; 196 198 ops.ooboffs = offs; 197 199 ops.ooblen = mtd->oobsize; 198 200 ops.oobbuf = oob; ··· 213 215 u16 pot = inftl->LastFreeEUN; 214 216 int silly = inftl->nb_blocks; 215 217 216 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p," 217 - "desperate=%d)\n", inftl, desperate); 218 + pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n", 219 + inftl, desperate); 218 220 219 221 /* 220 222 * Normally, we force a fold to happen before we run out of free 221 223 * blocks completely. 222 224 */ 223 225 if (!desperate && inftl->numfreeEUNs < 2) { 224 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " 225 - "EUNs (%d)\n", inftl->numfreeEUNs); 226 + pr_debug("INFTL: there are too few free EUNs (%d)\n", 227 + inftl->numfreeEUNs); 226 228 return BLOCK_NIL; 227 229 } 228 230 ··· 257 259 struct inftl_oob oob; 258 260 size_t retlen; 259 261 260 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," 261 - "pending=%d)\n", inftl, thisVUC, pendingblock); 262 + pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n", 263 + inftl, thisVUC, pendingblock); 262 264 263 265 memset(BlockMap, 0xff, sizeof(BlockMap)); 264 266 memset(BlockDeleted, 0, sizeof(BlockDeleted)); ··· 321 323 * Chain, and the Erase Unit into which we are supposed to be copying. 322 324 * Go for it. 323 325 */ 324 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n", 325 - thisVUC, targetEUN); 326 + pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); 326 327 327 328 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { 328 329 unsigned char movebuf[SECTORSIZE]; ··· 346 349 ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + 347 350 (block * SECTORSIZE), SECTORSIZE, &retlen, 348 351 movebuf); 349 - if (ret < 0 && ret != -EUCLEAN) { 352 + if (ret < 0 && !mtd_is_bitflip(ret)) { 350 353 ret = mtd->read(mtd, 351 354 (inftl->EraseSize * BlockMap[block]) + 352 355 (block * SECTORSIZE), SECTORSIZE, 353 356 &retlen, movebuf); 354 357 if (ret != -EIO) 355 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " 356 - "away on retry?\n"); 358 + pr_debug("INFTL: error went away on retry?\n"); 357 359 } 358 360 memset(&oob, 0xff, sizeof(struct inftl_oob)); 359 361 oob.b.Status = oob.b.Status1 = SECTOR_USED; ··· 368 372 * is important, by doing oldest first if we crash/reboot then it 369 373 * it is relatively simple to clean up the mess). 370 374 */ 371 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n", 372 - thisVUC); 375 + pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC); 373 376 374 377 for (;;) { 375 378 /* Find oldest unit in chain. */ ··· 416 421 u16 ChainLength = 0, thislen; 417 422 u16 chain, EUN; 418 423 419 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p," 424 + pr_debug("INFTL: INFTL_makefreeblock(inftl=%p," 420 425 "pending=%d)\n", inftl, pendingblock); 421 426 422 427 for (chain = 0; chain < inftl->nb_blocks; chain++) { ··· 479 484 size_t retlen; 480 485 int silly, silly2 = 3; 481 486 482 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p," 483 - "block=%d)\n", inftl, block); 487 + pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n", 488 + inftl, block); 484 489 485 490 do { 486 491 /* ··· 496 501 blockofs, 8, &retlen, (char *)&bci); 497 502 498 503 status = bci.Status | bci.Status1; 499 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in " 500 - "EUN %d is %x\n", block , writeEUN, status); 504 + pr_debug("INFTL: status of block %d in EUN %d is %x\n", 505 + block , writeEUN, status); 501 506 502 507 switch(status) { 503 508 case SECTOR_FREE: ··· 550 555 * Hopefully we free something, lets try again. 551 556 * This time we are desperate... 552 557 */ 553 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 " 554 - "to find free EUN to accommodate write to " 555 - "VUC %d\n", thisVUC); 558 + pr_debug("INFTL: using desperate==1 to find free EUN " 559 + "to accommodate write to VUC %d\n", 560 + thisVUC); 556 561 writeEUN = INFTL_findfreeblock(inftl, 1); 557 562 if (writeEUN == BLOCK_NIL) { 558 563 /* ··· 642 647 struct inftl_bci bci; 643 648 size_t retlen; 644 649 645 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p," 650 + pr_debug("INFTL: INFTL_trydeletechain(inftl=%p," 646 651 "thisVUC=%d)\n", inftl, thisVUC); 647 652 648 653 memset(BlockUsed, 0, sizeof(BlockUsed)); ··· 706 711 * For each block in the chain free it and make it available 707 712 * for future use. Erase from the oldest unit first. 708 713 */ 709 - DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC); 714 + pr_debug("INFTL: deleting empty VUC %d\n", thisVUC); 710 715 711 716 for (;;) { 712 717 u16 *prevEUN = &inftl->VUtable[thisVUC]; ··· 714 719 715 720 /* If the chain is all gone already, we're done */ 716 721 if (thisEUN == BLOCK_NIL) { 717 - DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); 722 + pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); 718 723 return; 719 724 } 720 725 ··· 726 731 thisEUN = *prevEUN; 727 732 } 728 733 729 - DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n", 734 + pr_debug("Deleting EUN %d from VUC %d\n", 730 735 thisEUN, thisVUC); 731 736 732 737 if (INFTL_formatblock(inftl, thisEUN) < 0) { ··· 762 767 size_t retlen; 763 768 struct inftl_bci bci; 764 769 765 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p," 770 + pr_debug("INFTL: INFTL_deleteblock(inftl=%p," 766 771 "block=%d)\n", inftl, block); 767 772 768 773 while (thisEUN < inftl->nb_blocks) { ··· 821 826 struct inftl_oob oob; 822 827 char *p, *pend; 823 828 824 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld," 829 + pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld," 825 830 "buffer=%p)\n", inftl, block, buffer); 826 831 827 832 /* Is block all zero? */ ··· 871 876 struct inftl_bci bci; 872 877 size_t retlen; 873 878 874 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld," 879 + pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld," 875 880 "buffer=%p)\n", inftl, block, buffer); 876 881 877 882 while (thisEUN < inftl->nb_blocks) { ··· 917 922 int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer); 918 923 919 924 /* Handle corrected bit flips gracefully */ 920 - if (ret < 0 && ret != -EUCLEAN) 925 + if (ret < 0 && !mtd_is_bitflip(ret)) 921 926 return -EIO; 922 927 } 923 928 return 0;
+50 -66
drivers/mtd/inftlmount.c
··· 53 53 struct INFTLPartition *ip; 54 54 size_t retlen; 55 55 56 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); 56 + pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl); 57 57 58 58 /* 59 59 * Assume logical EraseSize == physical erasesize for starting the ··· 139 139 mh->FormatFlags = le32_to_cpu(mh->FormatFlags); 140 140 mh->PercentUsed = le32_to_cpu(mh->PercentUsed); 141 141 142 - #ifdef CONFIG_MTD_DEBUG_VERBOSE 143 - if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { 144 - printk("INFTL: Media Header ->\n" 145 - " bootRecordID = %s\n" 146 - " NoOfBootImageBlocks = %d\n" 147 - " NoOfBinaryPartitions = %d\n" 148 - " NoOfBDTLPartitions = %d\n" 149 - " BlockMultiplerBits = %d\n" 150 - " FormatFlgs = %d\n" 151 - " OsakVersion = 0x%x\n" 152 - " PercentUsed = %d\n", 153 - mh->bootRecordID, mh->NoOfBootImageBlocks, 154 - mh->NoOfBinaryPartitions, 155 - mh->NoOfBDTLPartitions, 156 - mh->BlockMultiplierBits, mh->FormatFlags, 157 - mh->OsakVersion, mh->PercentUsed); 158 - } 159 - #endif 142 + pr_debug("INFTL: Media Header ->\n" 143 + " bootRecordID = %s\n" 144 + " NoOfBootImageBlocks = %d\n" 145 + " NoOfBinaryPartitions = %d\n" 146 + " NoOfBDTLPartitions = %d\n" 147 + " BlockMultiplerBits = %d\n" 148 + " FormatFlgs = %d\n" 149 + " OsakVersion = 0x%x\n" 150 + " PercentUsed = %d\n", 151 + mh->bootRecordID, mh->NoOfBootImageBlocks, 152 + mh->NoOfBinaryPartitions, 153 + mh->NoOfBDTLPartitions, 154 + mh->BlockMultiplierBits, mh->FormatFlags, 155 + mh->OsakVersion, mh->PercentUsed); 160 156 161 157 if (mh->NoOfBDTLPartitions == 0) { 162 158 printk(KERN_WARNING "INFTL: Media Header sanity check " ··· 196 200 ip->spareUnits = le32_to_cpu(ip->spareUnits); 197 201 ip->Reserved0 = le32_to_cpu(ip->Reserved0); 198 202 199 - #ifdef CONFIG_MTD_DEBUG_VERBOSE 200 - if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { 201 - printk(" PARTITION[%d] ->\n" 202 - " virtualUnits = %d\n" 203 - " firstUnit = %d\n" 204 - " lastUnit = %d\n" 205 - " flags = 0x%x\n" 206 - " spareUnits = %d\n", 207 - i, ip->virtualUnits, ip->firstUnit, 208 - ip->lastUnit, ip->flags, 209 - ip->spareUnits); 210 - } 211 - #endif 203 + pr_debug(" PARTITION[%d] ->\n" 204 + " virtualUnits = %d\n" 205 + " firstUnit = %d\n" 206 + " lastUnit = %d\n" 207 + " flags = 0x%x\n" 208 + " spareUnits = %d\n", 209 + i, ip->virtualUnits, ip->firstUnit, 210 + ip->lastUnit, ip->flags, 211 + ip->spareUnits); 212 212 213 213 if (ip->Reserved0 != ip->firstUnit) { 214 214 struct erase_info *instr = &inftl->instr; ··· 367 375 * 368 376 * Return: 0 when succeed, -1 on error. 369 377 * 370 - * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? 378 + * ToDo: 1. Is it necessary to check_free_sector after erasing ?? 371 379 */ 372 380 int INFTL_formatblock(struct INFTLrecord *inftl, int block) 373 381 { ··· 377 385 struct mtd_info *mtd = inftl->mbd.mtd; 378 386 int physblock; 379 387 380 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p," 381 - "block=%d)\n", inftl, block); 388 + pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block); 382 389 383 390 memset(instr, 0, sizeof(struct erase_info)); 384 391 ··· 467 476 { 468 477 int i; 469 478 470 - printk("-------------------------------------------" 479 + pr_debug("-------------------------------------------" 471 480 "----------------------------------\n"); 472 481 473 - printk("VUtable[%d] ->", s->nb_blocks); 482 + pr_debug("VUtable[%d] ->", s->nb_blocks); 474 483 for (i = 0; i < s->nb_blocks; i++) { 475 484 if ((i % 8) == 0) 476 - printk("\n%04x: ", i); 477 - printk("%04x ", s->VUtable[i]); 485 + pr_debug("\n%04x: ", i); 486 + pr_debug("%04x ", s->VUtable[i]); 478 487 } 479 488 480 - printk("\n-------------------------------------------" 489 + pr_debug("\n-------------------------------------------" 481 490 "----------------------------------\n"); 482 491 483 - printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); 492 + pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); 484 493 for (i = 0; i <= s->lastEUN; i++) { 485 494 if ((i % 8) == 0) 486 - printk("\n%04x: ", i); 487 - printk("%04x ", s->PUtable[i]); 495 + pr_debug("\n%04x: ", i); 496 + pr_debug("%04x ", s->PUtable[i]); 488 497 } 489 498 490 - printk("\n-------------------------------------------" 499 + pr_debug("\n-------------------------------------------" 491 500 "----------------------------------\n"); 492 501 493 - printk("INFTL ->\n" 502 + pr_debug("INFTL ->\n" 494 503 " EraseSize = %d\n" 495 504 " h/s/c = %d/%d/%d\n" 496 505 " numvunits = %d\n" ··· 504 513 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, 505 514 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); 506 515 507 - printk("\n-------------------------------------------" 516 + pr_debug("\n-------------------------------------------" 508 517 "----------------------------------\n"); 509 518 } 510 519 ··· 512 521 { 513 522 int logical, block, i; 514 523 515 - printk("-------------------------------------------" 524 + pr_debug("-------------------------------------------" 516 525 "----------------------------------\n"); 517 526 518 - printk("INFTL Virtual Unit Chains:\n"); 527 + pr_debug("INFTL Virtual Unit Chains:\n"); 519 528 for (logical = 0; logical < s->nb_blocks; logical++) { 520 529 block = s->VUtable[logical]; 521 530 if (block > s->nb_blocks) 522 531 continue; 523 - printk(" LOGICAL %d --> %d ", logical, block); 532 + pr_debug(" LOGICAL %d --> %d ", logical, block); 524 533 for (i = 0; i < s->nb_blocks; i++) { 525 534 if (s->PUtable[block] == BLOCK_NIL) 526 535 break; 527 536 block = s->PUtable[block]; 528 - printk("%d ", block); 537 + pr_debug("%d ", block); 529 538 } 530 - printk("\n"); 539 + pr_debug("\n"); 531 540 } 532 541 533 - printk("-------------------------------------------" 542 + pr_debug("-------------------------------------------" 534 543 "----------------------------------\n"); 535 544 } 536 545 ··· 546 555 int i; 547 556 u8 *ANACtable, ANAC; 548 557 549 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s); 558 + pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s); 550 559 551 560 /* Search for INFTL MediaHeader and Spare INFTL Media Header */ 552 561 if (find_boot_record(s) < 0) { ··· 576 585 * NOTEXPLORED state. Then at the end we will try to format it and 577 586 * mark it as free. 578 587 */ 579 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n"); 588 + pr_debug("INFTL: pass 1, explore each unit\n"); 580 589 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { 581 590 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) 582 591 continue; ··· 708 717 logical_block = BLOCK_NIL; 709 718 } 710 719 711 - #ifdef CONFIG_MTD_DEBUG_VERBOSE 712 - if (CONFIG_MTD_DEBUG_VERBOSE >= 2) 713 - INFTL_dumptables(s); 714 - #endif 720 + INFTL_dumptables(s); 715 721 716 722 /* 717 723 * Second pass, check for infinite loops in chains. These are 718 724 * possible because we don't update the previous pointers when 719 725 * we fold chains. No big deal, just fix them up in PUtable. 720 726 */ 721 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n"); 727 + pr_debug("INFTL: pass 2, validate virtual chains\n"); 722 728 for (logical_block = 0; logical_block < s->numvunits; logical_block++) { 723 729 block = s->VUtable[logical_block]; 724 730 last_block = BLOCK_NIL; ··· 760 772 } 761 773 } 762 774 763 - #ifdef CONFIG_MTD_DEBUG_VERBOSE 764 - if (CONFIG_MTD_DEBUG_VERBOSE >= 2) 765 - INFTL_dumptables(s); 766 - if (CONFIG_MTD_DEBUG_VERBOSE >= 2) 767 - INFTL_dumpVUchains(s); 768 - #endif 775 + INFTL_dumptables(s); 776 + INFTL_dumpVUchains(s); 769 777 770 778 /* 771 779 * Third pass, format unreferenced blocks and init free block count. ··· 769 785 s->numfreeEUNs = 0; 770 786 s->LastFreeEUN = BLOCK_NIL; 771 787 772 - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n"); 788 + pr_debug("INFTL: pass 3, format unused blocks\n"); 773 789 for (block = s->firstEUN; block <= s->lastEUN; block++) { 774 790 if (s->PUtable[block] == BLOCK_NOTEXPLORED) { 775 791 printk("INFTL: unreferenced block %d, formatting it\n",
-26
drivers/mtd/maps/Kconfig
··· 41 41 are mapped on your particular target board. Refer to the 42 42 memory map which should hopefully be in the documentation for 43 43 your board. 44 - Ignore this option if you use run-time physmap configuration 45 - (i.e., run-time calling physmap_configure()). 46 44 47 45 config MTD_PHYSMAP_LEN 48 46 hex "Physical length of flash mapping" ··· 53 55 than the total amount of flash present. Refer to the memory 54 56 map which should hopefully be in the documentation for your 55 57 board. 56 - Ignore this option if you use run-time physmap configuration 57 - (i.e., run-time calling physmap_configure()). 58 58 59 59 config MTD_PHYSMAP_BANKWIDTH 60 60 int "Bank width in octets" ··· 63 67 in octets. For example, if you have a data bus width of 32 64 68 bits, you would set the bus width octet value to 4. This is 65 69 used internally by the CFI drivers. 66 - Ignore this option if you use run-time physmap configuration 67 - (i.e., run-time calling physmap_configure()). 68 70 69 71 config MTD_PHYSMAP_OF 70 72 tristate "Flash device in physical memory map based on OF description" ··· 254 260 config MTD_LANTIQ 255 261 tristate "Lantiq SoC NOR support" 256 262 depends on LANTIQ 257 - select MTD_PARTITIONS 258 263 help 259 264 Support for NOR flash attached to the Lantiq SoC's External Bus Unit. 260 265 ··· 332 339 This enables access to the flash chips on the Hitachi SolutionEngine and 333 340 similar boards. Say 'Y' if you are building a kernel for such a board. 334 341 335 - config MTD_ARM_INTEGRATOR 336 - tristate "CFI Flash device mapped on ARM Integrator/P720T" 337 - depends on ARM && MTD_CFI 338 - 339 342 config MTD_CDB89712 340 343 tristate "Cirrus CDB89712 evaluation board mappings" 341 344 depends on MTD_CFI && ARCH_CDB89712 ··· 387 398 This enables access to the NV-RAM on autronix autcpu12 board. 388 399 If you have such a board, say 'Y'. 389 400 390 - config MTD_EDB7312 391 - tristate "CFI Flash device mapped on EDB7312" 392 - depends on ARCH_EDB7312 && MTD_CFI 393 - help 394 - This enables access to the CFI Flash on the Cogent EDB7312 board. 395 - If you have such a board, say 'Y' here. 396 - 397 401 config MTD_IMPA7 398 402 tristate "JEDEC Flash device mapped on impA7" 399 403 depends on ARM && MTD_JEDECPROBE 400 404 help 401 405 This enables access to the NOR Flash on the impA7 board of 402 406 implementa GmbH. If you have such a board, say 'Y' here. 403 - 404 - config MTD_CEIVA 405 - tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame" 406 - depends on MTD_JEDECPROBE && ARCH_CEIVA 407 - help 408 - This enables access to the flash chips on the Ceiva/Polaroid 409 - PhotoMax Digital Picture Frame. 410 - If you have such a device, say 'Y'. 411 407 412 408 config MTD_H720X 413 409 tristate "Hynix evaluation board mappings"
-2
drivers/mtd/maps/Makefile
··· 19 19 obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o 20 20 obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o 21 21 obj-$(CONFIG_MTD_MBX860) += mbx860.o 22 - obj-$(CONFIG_MTD_CEIVA) += ceiva.o 23 22 obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 24 23 obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 25 24 obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o ··· 39 40 obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 40 41 obj-$(CONFIG_MTD_PCI) += pci.o 41 42 obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 42 - obj-$(CONFIG_MTD_EDB7312) += edb7312.o 43 43 obj-$(CONFIG_MTD_IMPA7) += impa7.o 44 44 obj-$(CONFIG_MTD_FORTUNET) += fortunet.o 45 45 obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
+2 -14
drivers/mtd/maps/bfin-async-flash.c
··· 41 41 uint32_t flash_ambctl0, flash_ambctl1; 42 42 uint32_t save_ambctl0, save_ambctl1; 43 43 unsigned long irq_flags; 44 - struct mtd_partition *parts; 45 44 }; 46 45 47 46 static void switch_to_flash(struct async_state *state) ··· 164 165 return -ENXIO; 165 166 } 166 167 167 - ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 168 - if (ret > 0) { 169 - pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); 170 - mtd_device_register(state->mtd, pdata->parts, ret); 171 - state->parts = pdata->parts; 172 - } else if (pdata->nr_parts) { 173 - pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); 174 - mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts); 175 - } else { 176 - pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n"); 177 - mtd_device_register(state->mtd, NULL, 0); 178 - } 168 + mtd_device_parse_register(state->mtd, part_probe_types, 0, 169 + pdata->parts, pdata->nr_parts); 179 170 180 171 platform_set_drvdata(pdev, state); 181 172 ··· 177 188 struct async_state *state = platform_get_drvdata(pdev); 178 189 gpio_free(state->enet_flash_pin); 179 190 mtd_device_unregister(state->mtd); 180 - kfree(state->parts); 181 191 map_destroy(state->mtd); 182 192 kfree(state); 183 193 return 0;
-341
drivers/mtd/maps/ceiva.c
··· 1 - /* 2 - * Ceiva flash memory driver. 3 - * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net> 4 - * 5 - * Note: this driver supports jedec compatible devices. Modification 6 - * for CFI compatible devices should be straight forward: change 7 - * jedec_probe to cfi_probe. 8 - * 9 - * Based on: sa1100-flash.c, which has the following copyright: 10 - * Flash memory access on SA11x0 based devices 11 - * 12 - * (C) 2000 Nicolas Pitre <nico@fluxnic.net> 13 - * 14 - */ 15 - 16 - #include <linux/module.h> 17 - #include <linux/types.h> 18 - #include <linux/ioport.h> 19 - #include <linux/kernel.h> 20 - #include <linux/init.h> 21 - #include <linux/slab.h> 22 - 23 - #include <linux/mtd/mtd.h> 24 - #include <linux/mtd/map.h> 25 - #include <linux/mtd/partitions.h> 26 - #include <linux/mtd/concat.h> 27 - 28 - #include <mach/hardware.h> 29 - #include <asm/mach-types.h> 30 - #include <asm/io.h> 31 - #include <asm/sizes.h> 32 - 33 - /* 34 - * This isn't complete yet, so... 35 - */ 36 - #define CONFIG_MTD_CEIVA_STATICMAP 37 - 38 - #ifdef CONFIG_MTD_CEIVA_STATICMAP 39 - /* 40 - * See include/linux/mtd/partitions.h for definition of the mtd_partition 41 - * structure. 42 - * 43 - * Please note: 44 - * 1. The flash size given should be the largest flash size that can 45 - * be accommodated. 46 - * 47 - * 2. The bus width must defined in clps_setup_flash. 48 - * 49 - * The MTD layer will detect flash chip aliasing and reduce the size of 50 - * the map accordingly. 51 - * 52 - */ 53 - 54 - #ifdef CONFIG_ARCH_CEIVA 55 - /* Flash / Partition sizing */ 56 - /* For the 28F8003, we use the block mapping to calcuate the sizes */ 57 - #define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128)) 58 - #define BOOT_PARTITION_SIZE_KiB (16) 59 - #define PARAMS_PARTITION_SIZE_KiB (8) 60 - #define KERNEL_PARTITION_SIZE_KiB (4*128) 61 - /* Use both remaining portion of first flash, and all of second flash */ 62 - #define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128) 63 - 64 - static struct mtd_partition ceiva_partitions[] = { 65 - { 66 - .name = "Ceiva BOOT partition", 67 - .size = BOOT_PARTITION_SIZE_KiB*1024, 68 - .offset = 0, 69 - 70 - },{ 71 - .name = "Ceiva parameters partition", 72 - .size = PARAMS_PARTITION_SIZE_KiB*1024, 73 - .offset = (16 + 8) * 1024, 74 - },{ 75 - .name = "Ceiva kernel partition", 76 - .size = (KERNEL_PARTITION_SIZE_KiB)*1024, 77 - .offset = 0x20000, 78 - 79 - },{ 80 - .name = "Ceiva root filesystem partition", 81 - .offset = MTDPART_OFS_APPEND, 82 - .size = (ROOT_PARTITION_SIZE_KiB)*1024, 83 - } 84 - }; 85 - #endif 86 - 87 - static int __init clps_static_partitions(struct mtd_partition **parts) 88 - { 89 - int nb_parts = 0; 90 - 91 - #ifdef CONFIG_ARCH_CEIVA 92 - if (machine_is_ceiva()) { 93 - *parts = ceiva_partitions; 94 - nb_parts = ARRAY_SIZE(ceiva_partitions); 95 - } 96 - #endif 97 - return nb_parts; 98 - } 99 - #endif 100 - 101 - struct clps_info { 102 - unsigned long base; 103 - unsigned long size; 104 - int width; 105 - void *vbase; 106 - struct map_info *map; 107 - struct mtd_info *mtd; 108 - struct resource *res; 109 - }; 110 - 111 - #define NR_SUBMTD 4 112 - 113 - static struct clps_info info[NR_SUBMTD]; 114 - 115 - static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd) 116 - { 117 - struct mtd_info *subdev[nr]; 118 - struct map_info *maps; 119 - int i, found = 0, ret = 0; 120 - 121 - /* 122 - * Allocate the map_info structs in one go. 123 - */ 124 - maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL); 125 - if (!maps) 126 - return -ENOMEM; 127 - /* 128 - * Claim and then map the memory regions. 129 - */ 130 - for (i = 0; i < nr; i++) { 131 - if (clps[i].base == (unsigned long)-1) 132 - break; 133 - 134 - clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash"); 135 - if (!clps[i].res) { 136 - ret = -EBUSY; 137 - break; 138 - } 139 - 140 - clps[i].map = maps + i; 141 - 142 - clps[i].map->name = "clps flash"; 143 - clps[i].map->phys = clps[i].base; 144 - 145 - clps[i].vbase = ioremap(clps[i].base, clps[i].size); 146 - if (!clps[i].vbase) { 147 - ret = -ENOMEM; 148 - break; 149 - } 150 - 151 - clps[i].map->virt = (void __iomem *)clps[i].vbase; 152 - clps[i].map->bankwidth = clps[i].width; 153 - clps[i].map->size = clps[i].size; 154 - 155 - simple_map_init(&clps[i].map); 156 - 157 - clps[i].mtd = do_map_probe("jedec_probe", clps[i].map); 158 - if (clps[i].mtd == NULL) { 159 - ret = -ENXIO; 160 - break; 161 - } 162 - clps[i].mtd->owner = THIS_MODULE; 163 - subdev[i] = clps[i].mtd; 164 - 165 - printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, " 166 - "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20, 167 - clps[i].width * 8); 168 - found += 1; 169 - } 170 - 171 - /* 172 - * ENXIO is special. It means we didn't find a chip when 173 - * we probed. We need to tear down the mapping, free the 174 - * resource and mark it as such. 175 - */ 176 - if (ret == -ENXIO) { 177 - iounmap(clps[i].vbase); 178 - clps[i].vbase = NULL; 179 - release_resource(clps[i].res); 180 - clps[i].res = NULL; 181 - } 182 - 183 - /* 184 - * If we found one device, don't bother with concat support. 185 - * If we found multiple devices, use concat if we have it 186 - * available, otherwise fail. 187 - */ 188 - if (ret == 0 || ret == -ENXIO) { 189 - if (found == 1) { 190 - *rmtd = subdev[0]; 191 - ret = 0; 192 - } else if (found > 1) { 193 - /* 194 - * We detected multiple devices. Concatenate 195 - * them together. 196 - */ 197 - *rmtd = mtd_concat_create(subdev, found, 198 - "clps flash"); 199 - if (*rmtd == NULL) 200 - ret = -ENXIO; 201 - } 202 - } 203 - 204 - /* 205 - * If we failed, clean up. 206 - */ 207 - if (ret) { 208 - do { 209 - if (clps[i].mtd) 210 - map_destroy(clps[i].mtd); 211 - if (clps[i].vbase) 212 - iounmap(clps[i].vbase); 213 - if (clps[i].res) 214 - release_resource(clps[i].res); 215 - } while (i--); 216 - 217 - kfree(maps); 218 - } 219 - 220 - return ret; 221 - } 222 - 223 - static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd) 224 - { 225 - int i; 226 - 227 - mtd_device_unregister(mtd); 228 - 229 - if (mtd != clps[0].mtd) 230 - mtd_concat_destroy(mtd); 231 - 232 - for (i = NR_SUBMTD; i >= 0; i--) { 233 - if (clps[i].mtd) 234 - map_destroy(clps[i].mtd); 235 - if (clps[i].vbase) 236 - iounmap(clps[i].vbase); 237 - if (clps[i].res) 238 - release_resource(clps[i].res); 239 - } 240 - kfree(clps[0].map); 241 - } 242 - 243 - /* 244 - * We define the memory space, size, and width for the flash memory 245 - * space here. 246 - */ 247 - 248 - static int __init clps_setup_flash(void) 249 - { 250 - int nr = 0; 251 - 252 - #ifdef CONFIG_ARCH_CEIVA 253 - if (machine_is_ceiva()) { 254 - info[0].base = CS0_PHYS_BASE; 255 - info[0].size = SZ_32M; 256 - info[0].width = CEIVA_FLASH_WIDTH; 257 - info[1].base = CS1_PHYS_BASE; 258 - info[1].size = SZ_32M; 259 - info[1].width = CEIVA_FLASH_WIDTH; 260 - nr = 2; 261 - } 262 - #endif 263 - return nr; 264 - } 265 - 266 - static struct mtd_partition *parsed_parts; 267 - static const char *probes[] = { "cmdlinepart", "RedBoot", NULL }; 268 - 269 - static void __init clps_locate_partitions(struct mtd_info *mtd) 270 - { 271 - const char *part_type = NULL; 272 - int nr_parts = 0; 273 - do { 274 - /* 275 - * Partition selection stuff. 276 - */ 277 - nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0); 278 - if (nr_parts > 0) { 279 - part_type = "command line"; 280 - break; 281 - } 282 - #ifdef CONFIG_MTD_CEIVA_STATICMAP 283 - nr_parts = clps_static_partitions(&parsed_parts); 284 - if (nr_parts > 0) { 285 - part_type = "static"; 286 - break; 287 - } 288 - printk("found: %d partitions\n", nr_parts); 289 - #endif 290 - } while (0); 291 - 292 - if (nr_parts == 0) { 293 - printk(KERN_NOTICE "clps flash: no partition info " 294 - "available, registering whole flash\n"); 295 - mtd_device_register(mtd, NULL, 0); 296 - } else { 297 - printk(KERN_NOTICE "clps flash: using %s partition " 298 - "definition\n", part_type); 299 - mtd_device_register(mtd, parsed_parts, nr_parts); 300 - } 301 - 302 - /* Always succeeds. */ 303 - } 304 - 305 - static void __exit clps_destroy_partitions(void) 306 - { 307 - kfree(parsed_parts); 308 - } 309 - 310 - static struct mtd_info *mymtd; 311 - 312 - static int __init clps_mtd_init(void) 313 - { 314 - int ret; 315 - int nr; 316 - 317 - nr = clps_setup_flash(); 318 - if (nr < 0) 319 - return nr; 320 - 321 - ret = clps_setup_mtd(info, nr, &mymtd); 322 - if (ret) 323 - return ret; 324 - 325 - clps_locate_partitions(mymtd); 326 - 327 - return 0; 328 - } 329 - 330 - static void __exit clps_mtd_cleanup(void) 331 - { 332 - clps_destroy_mtd(info, mymtd); 333 - clps_destroy_partitions(); 334 - } 335 - 336 - module_init(clps_mtd_init); 337 - module_exit(clps_mtd_cleanup); 338 - 339 - MODULE_AUTHOR("Rob Scott"); 340 - MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver"); 341 - MODULE_LICENSE("GPL");
+1 -8
drivers/mtd/maps/dc21285.c
··· 145 145 146 146 147 147 /* Partition stuff */ 148 - static struct mtd_partition *dc21285_parts; 149 148 static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 150 149 151 150 static int __init init_dc21285(void) 152 151 { 153 - 154 - int nrparts; 155 - 156 152 /* Determine bankwidth */ 157 153 switch (*CSR_SA110_CNTL & (3<<14)) { 158 154 case SA110_CNTL_ROMWIDTH_8: ··· 196 200 197 201 dc21285_mtd->owner = THIS_MODULE; 198 202 199 - nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); 200 - mtd_device_register(dc21285_mtd, dc21285_parts, nrparts); 203 + mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0); 201 204 202 205 if(machine_is_ebsa285()) { 203 206 /* ··· 219 224 static void __exit cleanup_dc21285(void) 220 225 { 221 226 mtd_device_unregister(dc21285_mtd); 222 - if (dc21285_parts) 223 - kfree(dc21285_parts); 224 227 map_destroy(dc21285_mtd); 225 228 iounmap(dc21285_map.virt); 226 229 }
-134
drivers/mtd/maps/edb7312.c
··· 1 - /* 2 - * Handle mapping of the NOR flash on Cogent EDB7312 boards 3 - * 4 - * Copyright 2002 SYSGO Real-Time Solutions GmbH 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <linux/types.h> 13 - #include <linux/kernel.h> 14 - #include <linux/init.h> 15 - #include <asm/io.h> 16 - #include <linux/mtd/mtd.h> 17 - #include <linux/mtd/map.h> 18 - #include <linux/mtd/partitions.h> 19 - 20 - #define WINDOW_ADDR 0x00000000 /* physical properties of flash */ 21 - #define WINDOW_SIZE 0x01000000 22 - #define BUSWIDTH 2 23 - #define FLASH_BLOCKSIZE_MAIN 0x20000 24 - #define FLASH_NUMBLOCKS_MAIN 128 25 - /* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */ 26 - #define PROBETYPES { "cfi_probe", NULL } 27 - 28 - #define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */ 29 - #define MTDID "edb7312-nor" /* for mtdparts= partitioning */ 30 - 31 - static struct mtd_info *mymtd; 32 - 33 - struct map_info edb7312nor_map = { 34 - .name = "NOR flash on EDB7312", 35 - .size = WINDOW_SIZE, 36 - .bankwidth = BUSWIDTH, 37 - .phys = WINDOW_ADDR, 38 - }; 39 - 40 - /* 41 - * MTD partitioning stuff 42 - */ 43 - static struct mtd_partition static_partitions[3] = 44 - { 45 - { 46 - .name = "ARMboot", 47 - .size = 0x40000, 48 - .offset = 0 49 - }, 50 - { 51 - .name = "Kernel", 52 - .size = 0x200000, 53 - .offset = 0x40000 54 - }, 55 - { 56 - .name = "RootFS", 57 - .size = 0xDC0000, 58 - .offset = 0x240000 59 - }, 60 - }; 61 - 62 - static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 63 - 64 - static int mtd_parts_nb = 0; 65 - static struct mtd_partition *mtd_parts = 0; 66 - 67 - static int __init init_edb7312nor(void) 68 - { 69 - static const char *rom_probe_types[] = PROBETYPES; 70 - const char **type; 71 - const char *part_type = 0; 72 - 73 - printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n", 74 - WINDOW_SIZE, WINDOW_ADDR); 75 - edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); 76 - 77 - if (!edb7312nor_map.virt) { 78 - printk(MSG_PREFIX "failed to ioremap\n"); 79 - return -EIO; 80 - } 81 - 82 - simple_map_init(&edb7312nor_map); 83 - 84 - mymtd = 0; 85 - type = rom_probe_types; 86 - for(; !mymtd && *type; type++) { 87 - mymtd = do_map_probe(*type, &edb7312nor_map); 88 - } 89 - if (mymtd) { 90 - mymtd->owner = THIS_MODULE; 91 - 92 - mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID); 93 - if (mtd_parts_nb > 0) 94 - part_type = "detected"; 95 - 96 - if (mtd_parts_nb == 0) { 97 - mtd_parts = static_partitions; 98 - mtd_parts_nb = ARRAY_SIZE(static_partitions); 99 - part_type = "static"; 100 - } 101 - 102 - if (mtd_parts_nb == 0) 103 - printk(KERN_NOTICE MSG_PREFIX "no partition info available\n"); 104 - else 105 - printk(KERN_NOTICE MSG_PREFIX 106 - "using %s partition definition\n", part_type); 107 - /* Register the whole device first. */ 108 - mtd_device_register(mymtd, NULL, 0); 109 - mtd_device_register(mymtd, mtd_parts, mtd_parts_nb); 110 - return 0; 111 - } 112 - 113 - iounmap((void *)edb7312nor_map.virt); 114 - return -ENXIO; 115 - } 116 - 117 - static void __exit cleanup_edb7312nor(void) 118 - { 119 - if (mymtd) { 120 - mtd_device_unregister(mymtd); 121 - map_destroy(mymtd); 122 - } 123 - if (edb7312nor_map.virt) { 124 - iounmap((void *)edb7312nor_map.virt); 125 - edb7312nor_map.virt = 0; 126 - } 127 - } 128 - 129 - module_init(init_edb7312nor); 130 - module_exit(cleanup_edb7312nor); 131 - 132 - MODULE_LICENSE("GPL"); 133 - MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); 134 - MODULE_DESCRIPTION("Generic configurable MTD map driver");
+2 -14
drivers/mtd/maps/gpio-addr-flash.c
··· 187 187 */ 188 188 static int __devinit gpio_flash_probe(struct platform_device *pdev) 189 189 { 190 - int nr_parts; 191 190 size_t i, arr_size; 192 191 struct physmap_flash_data *pdata; 193 192 struct resource *memory; ··· 251 252 return -ENXIO; 252 253 } 253 254 254 - nr_parts = parse_mtd_partitions(state->mtd, part_probe_types, 255 - &pdata->parts, 0); 256 - if (nr_parts > 0) { 257 - pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); 258 - kfree(pdata->parts); 259 - } else if (pdata->nr_parts) { 260 - pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); 261 - nr_parts = pdata->nr_parts; 262 - } else { 263 - pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); 264 - nr_parts = 0; 265 - } 266 255 267 - mtd_device_register(state->mtd, pdata->parts, nr_parts); 256 + mtd_device_parse_register(state->mtd, part_probe_types, 0, 257 + pdata->parts, pdata->nr_parts); 268 258 269 259 return 0; 270 260 }
+2 -21
drivers/mtd/maps/h720x-flash.c
··· 58 58 59 59 #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) 60 60 61 - static int nr_mtd_parts; 62 - static struct mtd_partition *mtd_parts; 63 - static const char *probes[] = { "cmdlinepart", NULL }; 64 - 65 61 /* 66 62 * Initialize FLASH support 67 63 */ 68 64 static int __init h720x_mtd_init(void) 69 65 { 70 - 71 - char *part_type = NULL; 72 - 73 66 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); 74 67 75 68 if (!h720x_map.virt) { ··· 85 92 if (mymtd) { 86 93 mymtd->owner = THIS_MODULE; 87 94 88 - nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0); 89 - if (nr_mtd_parts > 0) 90 - part_type = "command line"; 91 - if (nr_mtd_parts <= 0) { 92 - mtd_parts = h720x_partitions; 93 - nr_mtd_parts = NUM_PARTITIONS; 94 - part_type = "builtin"; 95 - } 96 - printk(KERN_INFO "Using %s partition table\n", part_type); 97 - mtd_device_register(mymtd, mtd_parts, nr_mtd_parts); 95 + mtd_device_parse_register(mymtd, NULL, 0, 96 + h720x_partitions, NUM_PARTITIONS); 98 97 return 0; 99 98 } 100 99 ··· 104 119 mtd_device_unregister(mymtd); 105 120 map_destroy(mymtd); 106 121 } 107 - 108 - /* Free partition info, if commandline partition was used */ 109 - if (mtd_parts && (mtd_parts != h720x_partitions)) 110 - kfree (mtd_parts); 111 122 112 123 if (h720x_map.virt) { 113 124 iounmap((void *)h720x_map.virt);
+4 -24
drivers/mtd/maps/impa7.c
··· 49 49 /* 50 50 * MTD partitioning stuff 51 51 */ 52 - static struct mtd_partition static_partitions[] = 52 + static struct mtd_partition partitions[] = 53 53 { 54 54 { 55 55 .name = "FileSystem", ··· 58 58 }, 59 59 }; 60 60 61 - static int mtd_parts_nb[NUM_FLASHBANKS]; 62 - static struct mtd_partition *mtd_parts[NUM_FLASHBANKS]; 63 - 64 - static const char *probes[] = { "cmdlinepart", NULL }; 65 - 66 61 static int __init init_impa7(void) 67 62 { 68 63 static const char *rom_probe_types[] = PROBETYPES; 69 64 const char **type; 70 - const char *part_type = 0; 71 65 int i; 72 66 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { 73 67 { WINDOW_ADDR0, WINDOW_SIZE0 }, ··· 91 97 if (impa7_mtd[i]) { 92 98 impa7_mtd[i]->owner = THIS_MODULE; 93 99 devicesfound++; 94 - mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i], 95 - probes, 96 - &mtd_parts[i], 97 - 0); 98 - if (mtd_parts_nb[i] > 0) { 99 - part_type = "command line"; 100 - } else { 101 - mtd_parts[i] = static_partitions; 102 - mtd_parts_nb[i] = ARRAY_SIZE(static_partitions); 103 - part_type = "static"; 104 - } 105 - 106 - printk(KERN_NOTICE MSG_PREFIX 107 - "using %s partition definition\n", 108 - part_type); 109 - mtd_device_register(impa7_mtd[i], 110 - mtd_parts[i], mtd_parts_nb[i]); 100 + mtd_device_parse_register(impa7_mtd[i], NULL, 0, 101 + partitions, 102 + ARRAY_SIZE(partitions)); 111 103 } 112 104 else 113 105 iounmap((void *)impa7_map[i].virt);
+1 -6
drivers/mtd/maps/intel_vr_nor.c
··· 44 44 void __iomem *csr_base; 45 45 struct map_info map; 46 46 struct mtd_info *info; 47 - int nr_parts; 48 47 struct pci_dev *dev; 49 48 }; 50 49 ··· 70 71 71 72 static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 72 73 { 73 - struct mtd_partition *parts; 74 - static const char *part_probes[] = { "cmdlinepart", NULL }; 75 - 76 74 /* register the flash bank */ 77 75 /* partition the flash bank */ 78 - p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0); 79 - return mtd_device_register(p->info, parts, p->nr_parts); 76 + return mtd_device_parse_register(p->info, NULL, 0, NULL, 0); 80 77 } 81 78 82 79 static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+1 -10
drivers/mtd/maps/ixp2000.c
··· 38 38 struct ixp2000_flash_info { 39 39 struct mtd_info *mtd; 40 40 struct map_info map; 41 - struct mtd_partition *partitions; 42 41 struct resource *res; 43 42 }; 44 43 ··· 123 124 } 124 125 if (info->map.map_priv_1) 125 126 iounmap((void *) info->map.map_priv_1); 126 - 127 - kfree(info->partitions); 128 127 129 128 if (info->res) { 130 129 release_resource(info->res); ··· 226 229 } 227 230 info->mtd->owner = THIS_MODULE; 228 231 229 - err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); 230 - if (err > 0) { 231 - err = mtd_device_register(info->mtd, info->partitions, err); 232 - if(err) 233 - dev_err(&dev->dev, "Could not parse partitions\n"); 234 - } 235 - 232 + err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); 236 233 if (err) 237 234 goto Error; 238 235
+4 -25
drivers/mtd/maps/ixp4xx.c
··· 145 145 struct ixp4xx_flash_info { 146 146 struct mtd_info *mtd; 147 147 struct map_info map; 148 - struct mtd_partition *partitions; 149 148 struct resource *res; 150 149 }; 151 150 ··· 167 168 if (info->map.virt) 168 169 iounmap(info->map.virt); 169 170 170 - kfree(info->partitions); 171 - 172 171 if (info->res) { 173 172 release_resource(info->res); 174 173 kfree(info->res); ··· 182 185 { 183 186 struct flash_platform_data *plat = dev->dev.platform_data; 184 187 struct ixp4xx_flash_info *info; 185 - const char *part_type = NULL; 186 - int nr_parts = 0; 187 188 int err = -1; 188 189 189 190 if (!plat) ··· 247 252 /* Use the fast version */ 248 253 info->map.write = ixp4xx_write16; 249 254 250 - nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, 251 - dev->resource->start); 252 - if (nr_parts > 0) { 253 - part_type = "dynamic"; 254 - } else { 255 - info->partitions = plat->parts; 256 - nr_parts = plat->nr_parts; 257 - part_type = "static"; 258 - } 259 - if (nr_parts == 0) 260 - printk(KERN_NOTICE "IXP4xx flash: no partition info " 261 - "available, registering whole flash\n"); 262 - else 263 - printk(KERN_NOTICE "IXP4xx flash: using %s partition " 264 - "definition\n", part_type); 265 - 266 - err = mtd_device_register(info->mtd, info->partitions, nr_parts); 267 - if (err) 255 + err = mtd_device_parse_register(info->mtd, probes, dev->resource->start, 256 + plat->parts, plat->nr_parts); 257 + if (err) { 268 258 printk(KERN_ERR "Could not parse partitions\n"); 269 - 270 - if (err) 271 259 goto Error; 260 + } 272 261 273 262 return 0; 274 263
+2 -15
drivers/mtd/maps/lantiq-flash.c
··· 107 107 spin_unlock_irqrestore(&ebu_lock, flags); 108 108 } 109 109 110 - static const char const *part_probe_types[] = { "cmdlinepart", NULL }; 111 - 112 110 static int __init 113 111 ltq_mtd_probe(struct platform_device *pdev) 114 112 { 115 113 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); 116 114 struct ltq_mtd *ltq_mtd; 117 - struct mtd_partition *parts; 118 115 struct resource *res; 119 - int nr_parts = 0; 120 116 struct cfi_private *cfi; 121 117 int err; 122 118 ··· 168 172 cfi->addr_unlock1 ^= 1; 169 173 cfi->addr_unlock2 ^= 1; 170 174 171 - nr_parts = parse_mtd_partitions(ltq_mtd->mtd, 172 - part_probe_types, &parts, 0); 173 - if (nr_parts > 0) { 174 - dev_info(&pdev->dev, 175 - "using %d partitions from cmdline", nr_parts); 176 - } else { 177 - nr_parts = ltq_mtd_data->nr_parts; 178 - parts = ltq_mtd_data->parts; 179 - } 180 - 181 - err = mtd_device_register(ltq_mtd->mtd, parts, nr_parts); 175 + err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0, 176 + ltq_mtd_data->parts, ltq_mtd_data->nr_parts); 182 177 if (err) { 183 178 dev_err(&pdev->dev, "failed to add partitions\n"); 184 179 goto err_destroy;
+2 -22
drivers/mtd/maps/latch-addr-flash.c
··· 33 33 /* cache; could be found out of res */ 34 34 unsigned long win_mask; 35 35 36 - int nr_parts; 37 - struct mtd_partition *parts; 38 - 39 36 spinlock_t lock; 40 37 }; 41 38 ··· 94 97 95 98 static char *rom_probe_types[] = { "cfi_probe", NULL }; 96 99 97 - static char *part_probe_types[] = { "cmdlinepart", NULL }; 98 - 99 100 static int latch_addr_flash_remove(struct platform_device *dev) 100 101 { 101 102 struct latch_addr_flash_info *info; ··· 107 112 latch_addr_data = dev->dev.platform_data; 108 113 109 114 if (info->mtd != NULL) { 110 - if (info->nr_parts) 111 - kfree(info->parts); 112 115 mtd_device_unregister(info->mtd); 113 116 map_destroy(info->mtd); 114 117 } ··· 199 206 } 200 207 info->mtd->owner = THIS_MODULE; 201 208 202 - err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types, 203 - &info->parts, 0); 204 - if (err > 0) { 205 - mtd_device_register(info->mtd, info->parts, err); 206 - return 0; 207 - } 208 - if (latch_addr_data->nr_parts) { 209 - pr_notice("Using latch-addr-flash partition information\n"); 210 - mtd_device_register(info->mtd, 211 - latch_addr_data->parts, 212 - latch_addr_data->nr_parts); 213 - return 0; 214 - } 215 - 216 - mtd_device_register(info->mtd, NULL, 0); 209 + mtd_device_parse_register(info->mtd, NULL, 0, 210 + latch_addr_data->parts, latch_addr_data->nr_parts); 217 211 return 0; 218 212 219 213 iounmap:
+52 -72
drivers/mtd/maps/pcmciamtd.c
··· 22 22 #include <linux/mtd/map.h> 23 23 #include <linux/mtd/mtd.h> 24 24 25 - #ifdef CONFIG_MTD_DEBUG 26 - static int debug = CONFIG_MTD_DEBUG_VERBOSE; 27 - module_param(debug, int, 0); 28 - MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy"); 29 - #undef DEBUG 30 - #define DEBUG(n, format, arg...) \ 31 - if (n <= debug) { \ 32 - printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \ 33 - } 34 - 35 - #else 36 - #undef DEBUG 37 - #define DEBUG(n, arg...) 38 - static const int debug = 0; 39 - #endif 40 - 41 25 #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) 42 26 43 27 #define DRIVER_DESC "PCMCIA Flash memory card driver" ··· 89 105 int ret; 90 106 91 107 if (!pcmcia_dev_present(dev->p_dev)) { 92 - DEBUG(1, "device removed"); 108 + pr_debug("device removed\n"); 93 109 return 0; 94 110 } 95 111 96 112 offset = to & ~(dev->win_size-1); 97 113 if (offset != dev->offset) { 98 - DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", 114 + pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n", 99 115 dev->offset, offset); 100 116 ret = pcmcia_map_mem_page(dev->p_dev, win, offset); 101 117 if (ret != 0) ··· 116 132 return d; 117 133 118 134 d.x[0] = readb(addr); 119 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]); 135 + pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]); 120 136 return d; 121 137 } 122 138 ··· 131 147 return d; 132 148 133 149 d.x[0] = readw(addr); 134 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]); 150 + pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]); 135 151 return d; 136 152 } 137 153 ··· 141 157 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 142 158 unsigned long win_size = dev->win_size; 143 159 144 - DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 160 + pr_debug("to = %p from = %lu len = %zd\n", to, from, len); 145 161 while(len) { 146 162 int toread = win_size - (from & (win_size-1)); 147 163 caddr_t addr; ··· 153 169 if(!addr) 154 170 return; 155 171 156 - DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread); 172 + pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread); 157 173 memcpy_fromio(to, addr, toread); 158 174 len -= toread; 159 175 to += toread; ··· 169 185 if(!addr) 170 186 return; 171 187 172 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]); 188 + pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]); 173 189 writeb(d.x[0], addr); 174 190 } 175 191 ··· 180 196 if(!addr) 181 197 return; 182 198 183 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]); 199 + pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]); 184 200 writew(d.x[0], addr); 185 201 } 186 202 ··· 190 206 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 191 207 unsigned long win_size = dev->win_size; 192 208 193 - DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 209 + pr_debug("to = %lu from = %p len = %zd\n", to, from, len); 194 210 while(len) { 195 211 int towrite = win_size - (to & (win_size-1)); 196 212 caddr_t addr; ··· 202 218 if(!addr) 203 219 return; 204 220 205 - DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite); 221 + pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite); 206 222 memcpy_toio(addr, from, towrite); 207 223 len -= towrite; 208 224 to += towrite; ··· 224 240 return d; 225 241 226 242 d.x[0] = readb(win_base + ofs); 227 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", 243 + pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", 228 244 ofs, win_base + ofs, d.x[0]); 229 245 return d; 230 246 } ··· 239 255 return d; 240 256 241 257 d.x[0] = readw(win_base + ofs); 242 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", 258 + pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", 243 259 ofs, win_base + ofs, d.x[0]); 244 260 return d; 245 261 } ··· 252 268 if(DEV_REMOVED(map)) 253 269 return; 254 270 255 - DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 271 + pr_debug("to = %p from = %lu len = %zd\n", to, from, len); 256 272 memcpy_fromio(to, win_base + from, len); 257 273 } 258 274 ··· 264 280 if(DEV_REMOVED(map)) 265 281 return; 266 282 267 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", 283 + pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", 268 284 adr, win_base + adr, d.x[0]); 269 285 writeb(d.x[0], win_base + adr); 270 286 } ··· 277 293 if(DEV_REMOVED(map)) 278 294 return; 279 295 280 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", 296 + pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", 281 297 adr, win_base + adr, d.x[0]); 282 298 writew(d.x[0], win_base + adr); 283 299 } ··· 290 306 if(DEV_REMOVED(map)) 291 307 return; 292 308 293 - DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 309 + pr_debug("to = %lu from = %p len = %zd\n", to, from, len); 294 310 memcpy_toio(win_base + to, from, len); 295 311 } 296 312 ··· 300 316 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 301 317 struct pcmcia_device *link = dev->p_dev; 302 318 303 - DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); 319 + pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); 304 320 pcmcia_fixup_vpp(link, on ? dev->vpp : 0); 305 321 } 306 322 ··· 309 325 { 310 326 struct pcmciamtd_dev *dev = link->priv; 311 327 312 - DEBUG(3, "link = 0x%p", link); 328 + pr_debug("link = 0x%p\n", link); 313 329 314 330 if (link->resource[2]->end) { 315 331 if(dev->win_base) { ··· 321 337 } 322 338 323 339 324 - #ifdef CONFIG_MTD_DEBUG 325 340 static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, 326 341 tuple_t *tuple, 327 342 void *priv_data) ··· 330 347 if (!pcmcia_parse_tuple(tuple, &parse)) { 331 348 cistpl_format_t *t = &parse.format; 332 349 (void)t; /* Shut up, gcc */ 333 - DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", 350 + pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n", 334 351 t->type, t->edc, t->offset, t->length); 335 352 } 336 353 return -ENOSPC; ··· 346 363 if (!pcmcia_parse_tuple(tuple, &parse)) { 347 364 cistpl_jedec_t *t = &parse.jedec; 348 365 for (i = 0; i < t->nid; i++) 349 - DEBUG(2, "JEDEC: 0x%02x 0x%02x", 366 + pr_debug("JEDEC: 0x%02x 0x%02x\n", 350 367 t->id[i].mfr, t->id[i].info); 351 368 } 352 369 return -ENOSPC; 353 370 } 354 - #endif 355 371 356 372 static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, 357 373 tuple_t *tuple, ··· 364 382 if (pcmcia_parse_tuple(tuple, &parse)) 365 383 return -EINVAL; 366 384 367 - DEBUG(2, "Common memory:"); 385 + pr_debug("Common memory:\n"); 368 386 dev->pcmcia_map.size = t->dev[0].size; 369 387 /* from here on: DEBUG only */ 370 388 for (i = 0; i < t->ndev; i++) { 371 - DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); 372 - DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); 373 - DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed); 374 - DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size); 389 + pr_debug("Region %d, type = %u\n", i, t->dev[i].type); 390 + pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp); 391 + pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed); 392 + pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size); 375 393 } 376 394 return 0; 377 395 } ··· 391 409 dev->pcmcia_map.bankwidth = t->geo[0].buswidth; 392 410 /* from here on: DEBUG only */ 393 411 for (i = 0; i < t->ngeo; i++) { 394 - DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); 395 - DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); 396 - DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); 397 - DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block); 398 - DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition); 399 - DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave); 412 + pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth); 413 + pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block); 414 + pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block); 415 + pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block); 416 + pr_debug("region: %d partition = %u\n", i, t->geo[i].partition); 417 + pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave); 400 418 } 401 419 return 0; 402 420 } ··· 414 432 if (p_dev->prod_id[i]) 415 433 strcat(dev->mtd_name, p_dev->prod_id[i]); 416 434 } 417 - DEBUG(2, "Found name: %s", dev->mtd_name); 435 + pr_debug("Found name: %s\n", dev->mtd_name); 418 436 } 419 437 420 - #ifdef CONFIG_MTD_DEBUG 421 438 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); 422 439 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); 423 - #endif 424 440 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); 425 441 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); 426 442 ··· 430 450 431 451 if(force_size) { 432 452 dev->pcmcia_map.size = force_size << 20; 433 - DEBUG(2, "size forced to %dM", force_size); 453 + pr_debug("size forced to %dM\n", force_size); 434 454 } 435 455 436 456 if(bankwidth) { 437 457 dev->pcmcia_map.bankwidth = bankwidth; 438 - DEBUG(2, "bankwidth forced to %d", bankwidth); 458 + pr_debug("bankwidth forced to %d\n", bankwidth); 439 459 } 440 460 441 461 dev->pcmcia_map.name = dev->mtd_name; ··· 444 464 *new_name = 1; 445 465 } 446 466 447 - DEBUG(1, "Device: Size: %lu Width:%d Name: %s", 467 + pr_debug("Device: Size: %lu Width:%d Name: %s\n", 448 468 dev->pcmcia_map.size, 449 469 dev->pcmcia_map.bankwidth << 3, dev->mtd_name); 450 470 } ··· 459 479 static char *probes[] = { "jedec_probe", "cfi_probe" }; 460 480 int new_name = 0; 461 481 462 - DEBUG(3, "link=0x%p", link); 482 + pr_debug("link=0x%p\n", link); 463 483 464 484 card_settings(dev, link, &new_name); 465 485 ··· 492 512 493 513 do { 494 514 int ret; 495 - DEBUG(2, "requesting window with size = %luKiB memspeed = %d", 515 + pr_debug("requesting window with size = %luKiB memspeed = %d\n", 496 516 (unsigned long) resource_size(link->resource[2]) >> 10, 497 517 mem_speed); 498 518 ret = pcmcia_request_window(link, link->resource[2], mem_speed); 499 - DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); 519 + pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size); 500 520 if(ret) { 501 521 j++; 502 522 link->resource[2]->start = 0; ··· 504 524 force_size << 20 : MAX_PCMCIA_ADDR; 505 525 link->resource[2]->end >>= j; 506 526 } else { 507 - DEBUG(2, "Got window of size %luKiB", (unsigned long) 527 + pr_debug("Got window of size %luKiB\n", (unsigned long) 508 528 resource_size(link->resource[2]) >> 10); 509 529 dev->win_size = resource_size(link->resource[2]); 510 530 break; 511 531 } 512 532 } while (link->resource[2]->end >= 0x1000); 513 533 514 - DEBUG(2, "dev->win_size = %d", dev->win_size); 534 + pr_debug("dev->win_size = %d\n", dev->win_size); 515 535 516 536 if(!dev->win_size) { 517 537 dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); 518 538 pcmciamtd_release(link); 519 539 return -ENODEV; 520 540 } 521 - DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); 541 + pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10); 522 542 523 543 /* Get write protect status */ 524 544 dev->win_base = ioremap(link->resource[2]->start, ··· 529 549 pcmciamtd_release(link); 530 550 return -ENODEV; 531 551 } 532 - DEBUG(1, "mapped window dev = %p @ %pR, base = %p", 552 + pr_debug("mapped window dev = %p @ %pR, base = %p\n", 533 553 dev, link->resource[2], dev->win_base); 534 554 535 555 dev->offset = 0; ··· 544 564 } 545 565 546 566 link->config_index = 0; 547 - DEBUG(2, "Setting Configuration"); 567 + pr_debug("Setting Configuration\n"); 548 568 ret = pcmcia_enable_device(link); 549 569 if (ret != 0) { 550 570 if (dev->win_base) { ··· 560 580 mtd = do_map_probe("map_rom", &dev->pcmcia_map); 561 581 } else { 562 582 for(i = 0; i < ARRAY_SIZE(probes); i++) { 563 - DEBUG(1, "Trying %s", probes[i]); 583 + pr_debug("Trying %s\n", probes[i]); 564 584 mtd = do_map_probe(probes[i], &dev->pcmcia_map); 565 585 if(mtd) 566 586 break; 567 587 568 - DEBUG(1, "FAILED: %s", probes[i]); 588 + pr_debug("FAILED: %s\n", probes[i]); 569 589 } 570 590 } 571 591 572 592 if(!mtd) { 573 - DEBUG(1, "Can not find an MTD"); 593 + pr_debug("Can not find an MTD\n"); 574 594 pcmciamtd_release(link); 575 595 return -ENODEV; 576 596 } ··· 597 617 /* If the memory found is fits completely into the mapped PCMCIA window, 598 618 use the faster non-remapping read/write functions */ 599 619 if(mtd->size <= dev->win_size) { 600 - DEBUG(1, "Using non remapping memory functions"); 620 + pr_debug("Using non remapping memory functions\n"); 601 621 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; 602 622 if (dev->pcmcia_map.bankwidth == 1) { 603 623 dev->pcmcia_map.read = pcmcia_read8; ··· 625 645 626 646 static int pcmciamtd_suspend(struct pcmcia_device *dev) 627 647 { 628 - DEBUG(2, "EVENT_PM_RESUME"); 648 + pr_debug("EVENT_PM_RESUME\n"); 629 649 630 650 /* get_lock(link); */ 631 651 ··· 634 654 635 655 static int pcmciamtd_resume(struct pcmcia_device *dev) 636 656 { 637 - DEBUG(2, "EVENT_PM_SUSPEND"); 657 + pr_debug("EVENT_PM_SUSPEND\n"); 638 658 639 659 /* free_lock(link); */ 640 660 ··· 646 666 { 647 667 struct pcmciamtd_dev *dev = link->priv; 648 668 649 - DEBUG(3, "link=0x%p", link); 669 + pr_debug("link=0x%p\n", link); 650 670 651 671 if(dev->mtd_info) { 652 672 mtd_device_unregister(dev->mtd_info); ··· 666 686 /* Create new memory card device */ 667 687 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 668 688 if (!dev) return -ENOMEM; 669 - DEBUG(1, "dev=0x%p", dev); 689 + pr_debug("dev=0x%p\n", dev); 670 690 671 691 dev->p_dev = link; 672 692 link->priv = dev; ··· 735 755 736 756 static void __exit exit_pcmciamtd(void) 737 757 { 738 - DEBUG(1, DRIVER_DESC " unloading"); 758 + pr_debug(DRIVER_DESC " unloading"); 739 759 pcmcia_unregister_driver(&pcmciamtd_driver); 740 760 } 741 761
+2 -36
drivers/mtd/maps/physmap.c
··· 27 27 struct mtd_info *mtd[MAX_RESOURCES]; 28 28 struct mtd_info *cmtd; 29 29 struct map_info map[MAX_RESOURCES]; 30 - int nr_parts; 31 - struct mtd_partition *parts; 32 30 }; 33 31 34 32 static int physmap_flash_remove(struct platform_device *dev) ··· 44 46 45 47 if (info->cmtd) { 46 48 mtd_device_unregister(info->cmtd); 47 - if (info->nr_parts) 48 - kfree(info->parts); 49 49 if (info->cmtd != info->mtd[0]) 50 50 mtd_concat_destroy(info->cmtd); 51 51 } ··· 171 175 if (err) 172 176 goto err_out; 173 177 174 - err = parse_mtd_partitions(info->cmtd, part_probe_types, 175 - &info->parts, 0); 176 - if (err > 0) { 177 - mtd_device_register(info->cmtd, info->parts, err); 178 - info->nr_parts = err; 179 - return 0; 180 - } 181 - 182 - if (physmap_data->nr_parts) { 183 - printk(KERN_NOTICE "Using physmap partition information\n"); 184 - mtd_device_register(info->cmtd, physmap_data->parts, 185 - physmap_data->nr_parts); 186 - return 0; 187 - } 188 - 189 - mtd_device_register(info->cmtd, NULL, 0); 190 - 178 + mtd_device_parse_register(info->cmtd, part_probe_types, 0, 179 + physmap_data->parts, physmap_data->nr_parts); 191 180 return 0; 192 181 193 182 err_out: ··· 226 245 .num_resources = 1, 227 246 .resource = &physmap_flash_resource, 228 247 }; 229 - 230 - void physmap_configure(unsigned long addr, unsigned long size, 231 - int bankwidth, void (*set_vpp)(struct map_info *, int)) 232 - { 233 - physmap_flash_resource.start = addr; 234 - physmap_flash_resource.end = addr + size - 1; 235 - physmap_flash_data.width = bankwidth; 236 - physmap_flash_data.set_vpp = set_vpp; 237 - } 238 - 239 - void physmap_set_partitions(struct mtd_partition *parts, int num_parts) 240 - { 241 - physmap_flash_data.nr_parts = num_parts; 242 - physmap_flash_data.parts = parts; 243 - } 244 248 #endif 245 249 246 250 static int __init physmap_init(void)
+7 -73
drivers/mtd/maps/physmap_of.c
··· 34 34 35 35 struct of_flash { 36 36 struct mtd_info *cmtd; 37 - struct mtd_partition *parts; 38 37 int list_size; /* number of elements in of_flash_list */ 39 38 struct of_flash_list list[0]; 40 39 }; 41 - 42 - #define OF_FLASH_PARTS(info) ((info)->parts) 43 - static int parse_obsolete_partitions(struct platform_device *dev, 44 - struct of_flash *info, 45 - struct device_node *dp) 46 - { 47 - int i, plen, nr_parts; 48 - const struct { 49 - __be32 offset, len; 50 - } *part; 51 - const char *names; 52 - 53 - part = of_get_property(dp, "partitions", &plen); 54 - if (!part) 55 - return 0; /* No partitions found */ 56 - 57 - dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n"); 58 - 59 - nr_parts = plen / sizeof(part[0]); 60 - 61 - info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL); 62 - if (!info->parts) 63 - return -ENOMEM; 64 - 65 - names = of_get_property(dp, "partition-names", &plen); 66 - 67 - for (i = 0; i < nr_parts; i++) { 68 - info->parts[i].offset = be32_to_cpu(part->offset); 69 - info->parts[i].size = be32_to_cpu(part->len) & ~1; 70 - if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */ 71 - info->parts[i].mask_flags = MTD_WRITEABLE; 72 - 73 - if (names && (plen > 0)) { 74 - int len = strlen(names) + 1; 75 - 76 - info->parts[i].name = (char *)names; 77 - plen -= len; 78 - names += len; 79 - } else { 80 - info->parts[i].name = "unnamed"; 81 - } 82 - 83 - part++; 84 - } 85 - 86 - return nr_parts; 87 - } 88 40 89 41 static int of_flash_remove(struct platform_device *dev) 90 42 { ··· 53 101 mtd_concat_destroy(info->cmtd); 54 102 } 55 103 56 - if (info->cmtd) { 57 - if (OF_FLASH_PARTS(info)) 58 - kfree(OF_FLASH_PARTS(info)); 104 + if (info->cmtd) 59 105 mtd_device_unregister(info->cmtd); 60 - } 61 106 62 107 for (i = 0; i < info->list_size; i++) { 63 108 if (info->list[i].mtd) ··· 114 165 specifies the list of partition probers to use. If none is given then the 115 166 default is use. These take precedence over other device tree 116 167 information. */ 117 - static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL }; 168 + static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", 169 + "ofpart", "ofoldpart", NULL }; 118 170 static const char ** __devinit of_get_probes(struct device_node *dp) 119 171 { 120 172 const char *cp; ··· 168 218 int reg_tuple_size; 169 219 struct mtd_info **mtd_list = NULL; 170 220 resource_size_t res_size; 221 + struct mtd_part_parser_data ppdata; 171 222 172 223 match = of_match_device(of_flash_match, &dev->dev); 173 224 if (!match) ··· 282 331 if (err) 283 332 goto err_out; 284 333 334 + ppdata.of_node = dp; 285 335 part_probe_types = of_get_probes(dp); 286 - err = parse_mtd_partitions(info->cmtd, part_probe_types, 287 - &info->parts, 0); 288 - if (err < 0) { 289 - of_free_probes(part_probe_types); 290 - goto err_out; 291 - } 336 + mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata, 337 + NULL, 0); 292 338 of_free_probes(part_probe_types); 293 - 294 - if (err == 0) { 295 - err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts); 296 - if (err < 0) 297 - goto err_out; 298 - } 299 - 300 - if (err == 0) { 301 - err = parse_obsolete_partitions(dev, info, dp); 302 - if (err < 0) 303 - goto err_out; 304 - } 305 - 306 - mtd_device_register(info->cmtd, info->parts, err); 307 339 308 340 kfree(mtd_list); 309 341
+2 -21
drivers/mtd/maps/plat-ram.c
··· 44 44 struct device *dev; 45 45 struct mtd_info *mtd; 46 46 struct map_info map; 47 - struct mtd_partition *partitions; 48 - bool free_partitions; 49 47 struct resource *area; 50 48 struct platdata_mtd_ram *pdata; 51 49 }; ··· 93 95 94 96 if (info->mtd) { 95 97 mtd_device_unregister(info->mtd); 96 - if (info->partitions) { 97 - if (info->free_partitions) 98 - kfree(info->partitions); 99 - } 100 98 map_destroy(info->mtd); 101 99 } 102 100 ··· 222 228 /* check to see if there are any available partitions, or wether 223 229 * to add this device whole */ 224 230 225 - if (!pdata->nr_partitions) { 226 - /* try to probe using the supplied probe type */ 227 - if (pdata->probes) { 228 - err = parse_mtd_partitions(info->mtd, pdata->probes, 229 - &info->partitions, 0); 230 - info->free_partitions = 1; 231 - if (err > 0) 232 - err = mtd_device_register(info->mtd, 233 - info->partitions, err); 234 - } 235 - } 236 - /* use the static mapping */ 237 - else 238 - err = mtd_device_register(info->mtd, pdata->partitions, 239 - pdata->nr_partitions); 231 + err = mtd_device_parse_register(info->mtd, pdata->probes, 0, 232 + pdata->partitions, pdata->nr_partitions); 240 233 if (!err) 241 234 dev_info(&pdev->dev, "registered mtd device\n"); 242 235
+1 -19
drivers/mtd/maps/pxa2xx-flash.c
··· 41 41 } 42 42 43 43 struct pxa2xx_flash_info { 44 - struct mtd_partition *parts; 45 - int nr_parts; 46 44 struct mtd_info *mtd; 47 45 struct map_info map; 48 46 }; ··· 53 55 { 54 56 struct flash_platform_data *flash = pdev->dev.platform_data; 55 57 struct pxa2xx_flash_info *info; 56 - struct mtd_partition *parts; 57 58 struct resource *res; 58 - int ret = 0; 59 59 60 60 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 61 61 if (!res) ··· 67 71 info->map.bankwidth = flash->width; 68 72 info->map.phys = res->start; 69 73 info->map.size = resource_size(res); 70 - info->parts = flash->parts; 71 - info->nr_parts = flash->nr_parts; 72 74 73 75 info->map.virt = ioremap(info->map.phys, info->map.size); 74 76 if (!info->map.virt) { ··· 98 104 } 99 105 info->mtd->owner = THIS_MODULE; 100 106 101 - ret = parse_mtd_partitions(info->mtd, probes, &parts, 0); 102 - 103 - if (ret > 0) { 104 - info->nr_parts = ret; 105 - info->parts = parts; 106 - } 107 - 108 - if (!info->nr_parts) 109 - printk("Registering %s as whole device\n", 110 - info->map.name); 111 - 112 - mtd_device_register(info->mtd, info->parts, info->nr_parts); 107 + mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); 113 108 114 109 platform_set_drvdata(pdev, info); 115 110 return 0; ··· 116 133 iounmap(info->map.virt); 117 134 if (info->map.cached) 118 135 iounmap(info->map.cached); 119 - kfree(info->parts); 120 136 kfree(info); 121 137 return 0; 122 138 }
+4 -20
drivers/mtd/maps/rbtx4939-flash.c
··· 25 25 struct rbtx4939_flash_info { 26 26 struct mtd_info *mtd; 27 27 struct map_info map; 28 - int nr_parts; 29 - struct mtd_partition *parts; 30 28 }; 31 29 32 30 static int rbtx4939_flash_remove(struct platform_device *dev) ··· 39 41 if (info->mtd) { 40 42 struct rbtx4939_flash_data *pdata = dev->dev.platform_data; 41 43 42 - if (info->nr_parts) 43 - kfree(info->parts); 44 44 mtd_device_unregister(info->mtd); 45 45 map_destroy(info->mtd); 46 46 } ··· 46 50 } 47 51 48 52 static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 49 - static const char *part_probe_types[] = { "cmdlinepart", NULL }; 50 53 51 54 static int rbtx4939_flash_probe(struct platform_device *dev) 52 55 { ··· 102 107 info->mtd->owner = THIS_MODULE; 103 108 if (err) 104 109 goto err_out; 110 + err = mtd_device_parse_register(info->mtd, NULL, 0, 111 + pdata->parts, pdata->nr_parts); 105 112 106 - err = parse_mtd_partitions(info->mtd, part_probe_types, 107 - &info->parts, 0); 108 - if (err > 0) { 109 - mtd_device_register(info->mtd, info->parts, err); 110 - info->nr_parts = err; 111 - return 0; 112 - } 113 - 114 - if (pdata->nr_parts) { 115 - pr_notice("Using rbtx4939 partition information\n"); 116 - mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); 117 - return 0; 118 - } 119 - 120 - mtd_device_register(info->mtd, NULL, 0); 113 + if (err) 114 + goto err_out; 121 115 return 0; 122 116 123 117 err_out:
+3 -27
drivers/mtd/maps/sa1100-flash.c
··· 131 131 }; 132 132 133 133 struct sa_info { 134 - struct mtd_partition *parts; 135 134 struct mtd_info *mtd; 136 135 int num_subdev; 137 - unsigned int nr_parts; 138 136 struct sa_subdev_info subdev[0]; 139 137 }; 140 138 ··· 228 230 if (info->mtd != info->subdev[0].mtd) 229 231 mtd_concat_destroy(info->mtd); 230 232 } 231 - 232 - kfree(info->parts); 233 233 234 234 for (i = info->num_subdev - 1; i >= 0; i--) 235 235 sa1100_destroy_subdev(&info->subdev[i]); ··· 337 341 static int __devinit sa1100_mtd_probe(struct platform_device *pdev) 338 342 { 339 343 struct flash_platform_data *plat = pdev->dev.platform_data; 340 - struct mtd_partition *parts; 341 - const char *part_type = NULL; 342 344 struct sa_info *info; 343 - int err, nr_parts = 0; 345 + int err; 344 346 345 347 if (!plat) 346 348 return -ENODEV; ··· 352 358 /* 353 359 * Partition selection stuff. 354 360 */ 355 - nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); 356 - if (nr_parts > 0) { 357 - info->parts = parts; 358 - part_type = "dynamic"; 359 - } else { 360 - parts = plat->parts; 361 - nr_parts = plat->nr_parts; 362 - part_type = "static"; 363 - } 364 - 365 - if (nr_parts == 0) 366 - printk(KERN_NOTICE "SA1100 flash: no partition info " 367 - "available, registering whole flash\n"); 368 - else 369 - printk(KERN_NOTICE "SA1100 flash: using %s partition " 370 - "definition\n", part_type); 371 - 372 - mtd_device_register(info->mtd, parts, nr_parts); 373 - 374 - info->nr_parts = nr_parts; 361 + mtd_device_parse_register(info->mtd, part_probes, 0, 362 + plat->parts, plat->nr_parts); 375 363 376 364 platform_set_drvdata(pdev, info); 377 365 err = 0;
+7 -23
drivers/mtd/maps/solutionengine.c
··· 19 19 static struct mtd_info *flash_mtd; 20 20 static struct mtd_info *eprom_mtd; 21 21 22 - static struct mtd_partition *parsed_parts; 23 - 24 22 struct map_info soleng_eprom_map = { 25 23 .name = "Solution Engine EPROM", 26 24 .size = 0x400000, ··· 49 51 .size = MTDPART_SIZ_FULL, 50 52 } 51 53 }; 54 + #define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions) 55 + #else 56 + #define superh_se_partitions NULL 57 + #define NUM_PARTITIONS 0 52 58 #endif /* CONFIG_MTD_SUPERH_RESERVE */ 53 59 54 60 static int __init init_soleng_maps(void) 55 61 { 56 - int nr_parts = 0; 57 - 58 62 /* First probe at offset 0 */ 59 63 soleng_flash_map.phys = 0; 60 64 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); ··· 92 92 mtd_device_register(eprom_mtd, NULL, 0); 93 93 } 94 94 95 - nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0); 96 - 97 - #ifdef CONFIG_MTD_SUPERH_RESERVE 98 - if (nr_parts <= 0) { 99 - printk(KERN_NOTICE "Using configured partition at 0x%08x.\n", 100 - CONFIG_MTD_SUPERH_RESERVE); 101 - parsed_parts = superh_se_partitions; 102 - nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts); 103 - } 104 - #endif /* CONFIG_MTD_SUPERH_RESERVE */ 105 - 106 - if (nr_parts > 0) 107 - mtd_device_register(flash_mtd, parsed_parts, nr_parts); 108 - else 109 - mtd_device_register(flash_mtd, NULL, 0); 95 + mtd_device_parse_register(flash_mtd, probes, 0, 96 + superh_se_partitions, NUM_PARTITIONS); 110 97 111 98 return 0; 112 99 } ··· 105 118 map_destroy(eprom_mtd); 106 119 } 107 120 108 - if (parsed_parts) 109 - mtd_device_unregister(flash_mtd); 110 - else 111 - mtd_device_unregister(flash_mtd); 121 + mtd_device_unregister(flash_mtd); 112 122 map_destroy(flash_mtd); 113 123 } 114 124
+13 -20
drivers/mtd/maps/wr_sbc82xx_flash.c
··· 20 20 #include <asm/immap_cpm2.h> 21 21 22 22 static struct mtd_info *sbcmtd[3]; 23 - static struct mtd_partition *sbcmtd_parts[3]; 24 23 25 24 struct map_info sbc82xx_flash_map[3] = { 26 25 {.name = "Boot flash"}, ··· 100 101 for (i=0; i<3; i++) { 101 102 int8_t flashcs[3] = { 0, 6, 1 }; 102 103 int nr_parts; 104 + struct mtd_partition *defparts; 103 105 104 106 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", 105 107 sbc82xx_flash_map[i].name, ··· 113 113 } 114 114 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); 115 115 116 - sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size); 116 + sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, 117 + sbc82xx_flash_map[i].size); 117 118 118 119 if (!sbc82xx_flash_map[i].virt) { 119 120 printk("Failed to ioremap\n"); ··· 130 129 131 130 sbcmtd[i]->owner = THIS_MODULE; 132 131 133 - nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, 134 - &sbcmtd_parts[i], 0); 135 - if (nr_parts > 0) { 136 - mtd_device_register(sbcmtd[i], sbcmtd_parts[i], 137 - nr_parts); 138 - continue; 139 - } 140 - 141 132 /* No partitioning detected. Use default */ 142 133 if (i == 2) { 143 - mtd_device_register(sbcmtd[i], NULL, 0); 134 + defparts = NULL; 135 + nr_parts = 0; 144 136 } else if (i == bigflash) { 145 - mtd_device_register(sbcmtd[i], bigflash_parts, 146 - ARRAY_SIZE(bigflash_parts)); 137 + defparts = bigflash_parts; 138 + nr_parts = ARRAY_SIZE(bigflash_parts); 147 139 } else { 148 - mtd_device_register(sbcmtd[i], smallflash_parts, 149 - ARRAY_SIZE(smallflash_parts)); 140 + defparts = smallflash_parts; 141 + nr_parts = ARRAY_SIZE(smallflash_parts); 150 142 } 143 + 144 + mtd_device_parse_register(sbcmtd[i], part_probes, 0, 145 + defparts, nr_parts); 151 146 } 152 147 return 0; 153 148 } ··· 156 159 if (!sbcmtd[i]) 157 160 continue; 158 161 159 - if (i<2 || sbcmtd_parts[i]) 160 - mtd_device_unregister(sbcmtd[i]); 161 - else 162 - mtd_device_unregister(sbcmtd[i]); 162 + mtd_device_unregister(sbcmtd[i]); 163 163 164 - kfree(sbcmtd_parts[i]); 165 164 map_destroy(sbcmtd[i]); 166 165 167 166 iounmap((void *)sbc82xx_flash_map[i].virt);
+2
drivers/mtd/mtd_blkdevs.c
··· 426 426 new->rq->queuedata = new; 427 427 blk_queue_logical_block_size(new->rq, tr->blksize); 428 428 429 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); 430 + 429 431 if (tr->discard) { 430 432 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); 431 433 new->rq->limits.max_discard_sectors = UINT_MAX;
+8 -10
drivers/mtd/mtdblock.c
··· 44 44 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 45 45 }; 46 46 47 - static struct mutex mtdblks_lock; 47 + static DEFINE_MUTEX(mtdblks_lock); 48 48 49 49 /* 50 50 * Cache stuff... ··· 119 119 if (mtdblk->cache_state != STATE_DIRTY) 120 120 return 0; 121 121 122 - DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " 122 + pr_debug("mtdblock: writing cached data for \"%s\" " 123 123 "at 0x%lx, size 0x%x\n", mtd->name, 124 124 mtdblk->cache_offset, mtdblk->cache_size); 125 125 ··· 148 148 size_t retlen; 149 149 int ret; 150 150 151 - DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", 151 + pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", 152 152 mtd->name, pos, len); 153 153 154 154 if (!sect_size) ··· 218 218 size_t retlen; 219 219 int ret; 220 220 221 - DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", 221 + pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", 222 222 mtd->name, pos, len); 223 223 224 224 if (!sect_size) ··· 283 283 { 284 284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 285 285 286 - DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 286 + pr_debug("mtdblock_open\n"); 287 287 288 288 mutex_lock(&mtdblks_lock); 289 289 if (mtdblk->count) { ··· 303 303 304 304 mutex_unlock(&mtdblks_lock); 305 305 306 - DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 306 + pr_debug("ok\n"); 307 307 308 308 return 0; 309 309 } ··· 312 312 { 313 313 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 314 314 315 - DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 315 + pr_debug("mtdblock_release\n"); 316 316 317 317 mutex_lock(&mtdblks_lock); 318 318 ··· 329 329 330 330 mutex_unlock(&mtdblks_lock); 331 331 332 - DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 332 + pr_debug("ok\n"); 333 333 334 334 return 0; 335 335 } ··· 389 389 390 390 static int __init init_mtdblock(void) 391 391 { 392 - mutex_init(&mtdblks_lock); 393 - 394 392 return register_mtd_blktrans(&mtdblock_tr); 395 393 } 396 394
+120 -42
drivers/mtd/mtdchar.c
··· 43 43 44 44 /* 45 45 * Data structure to hold the pointer to the mtd device as well 46 - * as mode information ofr various use cases. 46 + * as mode information of various use cases. 47 47 */ 48 48 struct mtd_file_info { 49 49 struct mtd_info *mtd; ··· 86 86 struct mtd_file_info *mfi; 87 87 struct inode *mtd_ino; 88 88 89 - DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 89 + pr_debug("MTD_open\n"); 90 90 91 91 /* You can't open the RO devices RW */ 92 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) ··· 151 151 struct mtd_file_info *mfi = file->private_data; 152 152 struct mtd_info *mtd = mfi->mtd; 153 153 154 - DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 154 + pr_debug("MTD_close\n"); 155 155 156 156 /* Only sync if opened RW */ 157 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync) ··· 195 195 size_t size = count; 196 196 char *kbuf; 197 197 198 - DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 198 + pr_debug("MTD_read\n"); 199 199 200 200 if (*ppos + count > mtd->size) 201 201 count = mtd->size - *ppos; ··· 211 211 len = min_t(size_t, count, size); 212 212 213 213 switch (mfi->mode) { 214 - case MTD_MODE_OTP_FACTORY: 214 + case MTD_FILE_MODE_OTP_FACTORY: 215 215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 216 216 break; 217 - case MTD_MODE_OTP_USER: 217 + case MTD_FILE_MODE_OTP_USER: 218 218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 219 219 break; 220 - case MTD_MODE_RAW: 220 + case MTD_FILE_MODE_RAW: 221 221 { 222 222 struct mtd_oob_ops ops; 223 223 224 - ops.mode = MTD_OOB_RAW; 224 + ops.mode = MTD_OPS_RAW; 225 225 ops.datbuf = kbuf; 226 226 ops.oobbuf = NULL; 227 227 ops.len = len; ··· 233 233 default: 234 234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 235 235 } 236 - /* Nand returns -EBADMSG on ecc errors, but it returns 236 + /* Nand returns -EBADMSG on ECC errors, but it returns 237 237 * the data. For our userspace tools it is important 238 - * to dump areas with ecc errors ! 238 + * to dump areas with ECC errors! 239 239 * For kernel internal usage it also might return -EUCLEAN 240 240 * to signal the caller that a bitflip has occurred and has 241 241 * been corrected by the ECC algorithm. 242 242 * Userspace software which accesses NAND this way 243 243 * must be aware of the fact that it deals with NAND 244 244 */ 245 - if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 245 + if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 246 246 *ppos += retlen; 247 247 if (copy_to_user(buf, kbuf, retlen)) { 248 248 kfree(kbuf); ··· 278 278 int ret=0; 279 279 int len; 280 280 281 - DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 281 + pr_debug("MTD_write\n"); 282 282 283 283 if (*ppos == mtd->size) 284 284 return -ENOSPC; ··· 302 302 } 303 303 304 304 switch (mfi->mode) { 305 - case MTD_MODE_OTP_FACTORY: 305 + case MTD_FILE_MODE_OTP_FACTORY: 306 306 ret = -EROFS; 307 307 break; 308 - case MTD_MODE_OTP_USER: 308 + case MTD_FILE_MODE_OTP_USER: 309 309 if (!mtd->write_user_prot_reg) { 310 310 ret = -EOPNOTSUPP; 311 311 break; ··· 313 313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 314 314 break; 315 315 316 - case MTD_MODE_RAW: 316 + case MTD_FILE_MODE_RAW: 317 317 { 318 318 struct mtd_oob_ops ops; 319 319 320 - ops.mode = MTD_OOB_RAW; 320 + ops.mode = MTD_OPS_RAW; 321 321 ops.datbuf = kbuf; 322 322 ops.oobbuf = NULL; 323 + ops.ooboffs = 0; 323 324 ops.len = len; 324 325 325 326 ret = mtd->write_oob(mtd, *ppos, &ops); ··· 368 367 if (!mtd->read_fact_prot_reg) 369 368 ret = -EOPNOTSUPP; 370 369 else 371 - mfi->mode = MTD_MODE_OTP_FACTORY; 370 + mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 372 371 break; 373 372 case MTD_OTP_USER: 374 373 if (!mtd->read_fact_prot_reg) 375 374 ret = -EOPNOTSUPP; 376 375 else 377 - mfi->mode = MTD_MODE_OTP_USER; 376 + mfi->mode = MTD_FILE_MODE_OTP_USER; 378 377 break; 379 378 default: 380 379 ret = -EINVAL; ··· 391 390 uint64_t start, uint32_t length, void __user *ptr, 392 391 uint32_t __user *retp) 393 392 { 393 + struct mtd_file_info *mfi = file->private_data; 394 394 struct mtd_oob_ops ops; 395 395 uint32_t retlen; 396 396 int ret = 0; ··· 411 409 return ret; 412 410 413 411 ops.ooblen = length; 414 - ops.ooboffs = start & (mtd->oobsize - 1); 412 + ops.ooboffs = start & (mtd->writesize - 1); 415 413 ops.datbuf = NULL; 416 - ops.mode = MTD_OOB_PLACE; 414 + ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 415 + MTD_OPS_PLACE_OOB; 417 416 418 417 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 419 418 return -EINVAL; ··· 423 420 if (IS_ERR(ops.oobbuf)) 424 421 return PTR_ERR(ops.oobbuf); 425 422 426 - start &= ~((uint64_t)mtd->oobsize - 1); 423 + start &= ~((uint64_t)mtd->writesize - 1); 427 424 ret = mtd->write_oob(mtd, start, &ops); 428 425 429 426 if (ops.oobretlen > 0xFFFFFFFFU) ··· 436 433 return ret; 437 434 } 438 435 439 - static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, 440 - uint32_t length, void __user *ptr, uint32_t __user *retp) 436 + static int mtd_do_readoob(struct file *file, struct mtd_info *mtd, 437 + uint64_t start, uint32_t length, void __user *ptr, 438 + uint32_t __user *retp) 441 439 { 440 + struct mtd_file_info *mfi = file->private_data; 442 441 struct mtd_oob_ops ops; 443 442 int ret = 0; 444 443 ··· 456 451 return ret; 457 452 458 453 ops.ooblen = length; 459 - ops.ooboffs = start & (mtd->oobsize - 1); 454 + ops.ooboffs = start & (mtd->writesize - 1); 460 455 ops.datbuf = NULL; 461 - ops.mode = MTD_OOB_PLACE; 456 + ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 457 + MTD_OPS_PLACE_OOB; 462 458 463 459 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 464 460 return -EINVAL; ··· 468 462 if (!ops.oobbuf) 469 463 return -ENOMEM; 470 464 471 - start &= ~((uint64_t)mtd->oobsize - 1); 465 + start &= ~((uint64_t)mtd->writesize - 1); 472 466 ret = mtd->read_oob(mtd, start, &ops); 473 467 474 468 if (put_user(ops.oobretlen, retp)) ··· 478 472 ret = -EFAULT; 479 473 480 474 kfree(ops.oobbuf); 475 + 476 + /* 477 + * NAND returns -EBADMSG on ECC errors, but it returns the OOB 478 + * data. For our userspace tools it is important to dump areas 479 + * with ECC errors! 480 + * For kernel internal usage it also might return -EUCLEAN 481 + * to signal the caller that a bitflip has occured and has 482 + * been corrected by the ECC algorithm. 483 + * 484 + * Note: currently the standard NAND function, nand_read_oob_std, 485 + * does not calculate ECC for the OOB area, so do not rely on 486 + * this behavior unless you have replaced it with your own. 487 + */ 488 + if (mtd_is_bitflip_or_eccerr(ret)) 489 + return 0; 490 + 481 491 return ret; 482 492 } 483 493 484 494 /* 485 495 * Copies (and truncates, if necessary) data from the larger struct, 486 496 * nand_ecclayout, to the smaller, deprecated layout struct, 487 - * nand_ecclayout_user. This is necessary only to suppport the deprecated 497 + * nand_ecclayout_user. This is necessary only to support the deprecated 488 498 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 489 499 * nand_ecclayout flexibly (i.e. the struct may change size in new 490 500 * releases without requiring major rewrites). ··· 566 544 } 567 545 } 568 546 547 + static int mtd_write_ioctl(struct mtd_info *mtd, 548 + struct mtd_write_req __user *argp) 549 + { 550 + struct mtd_write_req req; 551 + struct mtd_oob_ops ops; 552 + void __user *usr_data, *usr_oob; 553 + int ret; 554 + 555 + if (copy_from_user(&req, argp, sizeof(req)) || 556 + !access_ok(VERIFY_READ, req.usr_data, req.len) || 557 + !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) 558 + return -EFAULT; 559 + if (!mtd->write_oob) 560 + return -EOPNOTSUPP; 561 + 562 + ops.mode = req.mode; 563 + ops.len = (size_t)req.len; 564 + ops.ooblen = (size_t)req.ooblen; 565 + ops.ooboffs = 0; 566 + 567 + usr_data = (void __user *)(uintptr_t)req.usr_data; 568 + usr_oob = (void __user *)(uintptr_t)req.usr_oob; 569 + 570 + if (req.usr_data) { 571 + ops.datbuf = memdup_user(usr_data, ops.len); 572 + if (IS_ERR(ops.datbuf)) 573 + return PTR_ERR(ops.datbuf); 574 + } else { 575 + ops.datbuf = NULL; 576 + } 577 + 578 + if (req.usr_oob) { 579 + ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 580 + if (IS_ERR(ops.oobbuf)) { 581 + kfree(ops.datbuf); 582 + return PTR_ERR(ops.oobbuf); 583 + } 584 + } else { 585 + ops.oobbuf = NULL; 586 + } 587 + 588 + ret = mtd->write_oob(mtd, (loff_t)req.start, &ops); 589 + 590 + kfree(ops.datbuf); 591 + kfree(ops.oobbuf); 592 + 593 + return ret; 594 + } 595 + 569 596 static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 570 597 { 571 598 struct mtd_file_info *mfi = file->private_data; ··· 624 553 u_long size; 625 554 struct mtd_info_user info; 626 555 627 - DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 556 + pr_debug("MTD_ioctl\n"); 628 557 629 558 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 630 559 if (cmd & IOC_IN) { ··· 672 601 info.erasesize = mtd->erasesize; 673 602 info.writesize = mtd->writesize; 674 603 info.oobsize = mtd->oobsize; 675 - /* The below fields are obsolete */ 676 - info.ecctype = -1; 604 + /* The below field is obsolete */ 605 + info.padding = 0; 677 606 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 678 607 return -EFAULT; 679 608 break; ··· 769 698 if (copy_from_user(&buf, argp, sizeof(buf))) 770 699 ret = -EFAULT; 771 700 else 772 - ret = mtd_do_readoob(mtd, buf.start, buf.length, 701 + ret = mtd_do_readoob(file, mtd, buf.start, buf.length, 773 702 buf.ptr, &buf_user->start); 774 703 break; 775 704 } ··· 796 725 if (copy_from_user(&buf, argp, sizeof(buf))) 797 726 ret = -EFAULT; 798 727 else 799 - ret = mtd_do_readoob(mtd, buf.start, buf.length, 728 + ret = mtd_do_readoob(file, mtd, buf.start, buf.length, 800 729 (void __user *)(uintptr_t)buf.usr_ptr, 801 730 &buf_user->length); 731 + break; 732 + } 733 + 734 + case MEMWRITE: 735 + { 736 + ret = mtd_write_ioctl(mtd, 737 + (struct mtd_write_req __user *)arg); 802 738 break; 803 739 } 804 740 ··· 905 827 if (copy_from_user(&mode, argp, sizeof(int))) 906 828 return -EFAULT; 907 829 908 - mfi->mode = MTD_MODE_NORMAL; 830 + mfi->mode = MTD_FILE_MODE_NORMAL; 909 831 910 832 ret = otp_select_filemode(mfi, mode); 911 833 ··· 921 843 return -ENOMEM; 922 844 ret = -EOPNOTSUPP; 923 845 switch (mfi->mode) { 924 - case MTD_MODE_OTP_FACTORY: 846 + case MTD_FILE_MODE_OTP_FACTORY: 925 847 if (mtd->get_fact_prot_info) 926 848 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 927 849 break; 928 - case MTD_MODE_OTP_USER: 850 + case MTD_FILE_MODE_OTP_USER: 929 851 if (mtd->get_user_prot_info) 930 852 ret = mtd->get_user_prot_info(mtd, buf, 4096); 931 853 break; ··· 949 871 { 950 872 struct otp_info oinfo; 951 873 952 - if (mfi->mode != MTD_MODE_OTP_USER) 874 + if (mfi->mode != MTD_FILE_MODE_OTP_USER) 953 875 return -EINVAL; 954 876 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 955 877 return -EFAULT; ··· 960 882 } 961 883 #endif 962 884 963 - /* This ioctl is being deprecated - it truncates the ecc layout */ 885 + /* This ioctl is being deprecated - it truncates the ECC layout */ 964 886 case ECCGETLAYOUT: 965 887 { 966 888 struct nand_ecclayout_user *usrlay; ··· 993 915 mfi->mode = 0; 994 916 995 917 switch(arg) { 996 - case MTD_MODE_OTP_FACTORY: 997 - case MTD_MODE_OTP_USER: 918 + case MTD_FILE_MODE_OTP_FACTORY: 919 + case MTD_FILE_MODE_OTP_USER: 998 920 ret = otp_select_filemode(mfi, arg); 999 921 break; 1000 922 1001 - case MTD_MODE_RAW: 923 + case MTD_FILE_MODE_RAW: 1002 924 if (!mtd->read_oob || !mtd->write_oob) 1003 925 return -EOPNOTSUPP; 1004 926 mfi->mode = arg; 1005 927 1006 - case MTD_MODE_NORMAL: 928 + case MTD_FILE_MODE_NORMAL: 1007 929 break; 1008 930 default: 1009 931 ret = -EINVAL; ··· 1089 1011 if (copy_from_user(&buf, argp, sizeof(buf))) 1090 1012 ret = -EFAULT; 1091 1013 else 1092 - ret = mtd_do_readoob(mtd, buf.start, 1014 + ret = mtd_do_readoob(file, mtd, buf.start, 1093 1015 buf.length, compat_ptr(buf.ptr), 1094 1016 &buf_user->start); 1095 1017 break;
+5 -5
drivers/mtd/mtdconcat.c
··· 95 95 96 96 /* Save information about bitflips! */ 97 97 if (unlikely(err)) { 98 - if (err == -EBADMSG) { 98 + if (mtd_is_eccerr(err)) { 99 99 mtd->ecc_stats.failed++; 100 100 ret = err; 101 - } else if (err == -EUCLEAN) { 101 + } else if (mtd_is_bitflip(err)) { 102 102 mtd->ecc_stats.corrected++; 103 103 /* Do not overwrite -EBADMSG !! */ 104 104 if (!ret) ··· 279 279 280 280 /* Save information about bitflips! */ 281 281 if (unlikely(err)) { 282 - if (err == -EBADMSG) { 282 + if (mtd_is_eccerr(err)) { 283 283 mtd->ecc_stats.failed++; 284 284 ret = err; 285 - } else if (err == -EUCLEAN) { 285 + } else if (mtd_is_bitflip(err)) { 286 286 mtd->ecc_stats.corrected++; 287 287 /* Do not overwrite -EBADMSG !! */ 288 288 if (!ret) ··· 770 770 771 771 /* 772 772 * Set up the new "super" device's MTD object structure, check for 773 - * incompatibilites between the subdevices. 773 + * incompatibilities between the subdevices. 774 774 */ 775 775 concat->mtd.type = subdev[0]->type; 776 776 concat->mtd.flags = subdev[0]->flags;
+53 -17
drivers/mtd/mtdcore.c
··· 362 362 MTD_DEVT(i) + 1, 363 363 NULL, "mtd%dro", i); 364 364 365 - DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name); 365 + pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 366 366 /* No need to get a refcount on the module containing 367 367 the notifier, since we hold the mtd_table_mutex */ 368 368 list_for_each_entry(not, &mtd_notifiers, list) ··· 429 429 } 430 430 431 431 /** 432 - * mtd_device_register - register an MTD device. 432 + * mtd_device_parse_register - parse partitions and register an MTD device. 433 433 * 434 - * @master: the MTD device to register 435 - * @parts: the partitions to register - only valid if nr_parts > 0 436 - * @nr_parts: the number of partitions in parts. If zero then the full MTD 437 - * device is registered 434 + * @mtd: the MTD device to register 435 + * @types: the list of MTD partition probes to try, see 436 + * 'parse_mtd_partitions()' for more information 437 + * @parser_data: MTD partition parser-specific data 438 + * @parts: fallback partition information to register, if parsing fails; 439 + * only valid if %nr_parts > %0 440 + * @nr_parts: the number of partitions in parts, if zero then the full 441 + * MTD device is registered if no partition info is found 438 442 * 439 - * Register an MTD device with the system and optionally, a number of 440 - * partitions. If nr_parts is 0 then the whole device is registered, otherwise 441 - * only the partitions are registered. To register both the full device *and* 442 - * the partitions, call mtd_device_register() twice, once with nr_parts == 0 443 - * and once equal to the number of partitions. 443 + * This function aggregates MTD partitions parsing (done by 444 + * 'parse_mtd_partitions()') and MTD device and partitions registering. It 445 + * basically follows the most common pattern found in many MTD drivers: 446 + * 447 + * * It first tries to probe partitions on MTD device @mtd using parsers 448 + * specified in @types (if @types is %NULL, then the default list of parsers 449 + * is used, see 'parse_mtd_partitions()' for more information). If none are 450 + * found this functions tries to fallback to information specified in 451 + * @parts/@nr_parts. 452 + * * If any partitioning info was found, this function registers the found 453 + * partitions. 454 + * * If no partitions were found this function just registers the MTD device 455 + * @mtd and exits. 456 + * 457 + * Returns zero in case of success and a negative error code in case of failure. 444 458 */ 445 - int mtd_device_register(struct mtd_info *master, 446 - const struct mtd_partition *parts, 447 - int nr_parts) 459 + int mtd_device_parse_register(struct mtd_info *mtd, const char **types, 460 + struct mtd_part_parser_data *parser_data, 461 + const struct mtd_partition *parts, 462 + int nr_parts) 448 463 { 449 - return parts ? add_mtd_partitions(master, parts, nr_parts) : 450 - add_mtd_device(master); 464 + int err; 465 + struct mtd_partition *real_parts; 466 + 467 + err = parse_mtd_partitions(mtd, types, &real_parts, parser_data); 468 + if (err <= 0 && nr_parts && parts) { 469 + real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, 470 + GFP_KERNEL); 471 + if (!real_parts) 472 + err = -ENOMEM; 473 + else 474 + err = nr_parts; 475 + } 476 + 477 + if (err > 0) { 478 + err = add_mtd_partitions(mtd, real_parts, err); 479 + kfree(real_parts); 480 + } else if (err == 0) { 481 + err = add_mtd_device(mtd); 482 + if (err == 1) 483 + err = -ENODEV; 484 + } 485 + 486 + return err; 451 487 } 452 - EXPORT_SYMBOL_GPL(mtd_device_register); 488 + EXPORT_SYMBOL_GPL(mtd_device_parse_register); 453 489 454 490 /** 455 491 * mtd_device_unregister - unregister an existing MTD device.
+3
drivers/mtd/mtdcore.h
··· 15 15 extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, 16 16 int); 17 17 extern int del_mtd_partitions(struct mtd_info *); 18 + extern int parse_mtd_partitions(struct mtd_info *master, const char **types, 19 + struct mtd_partition **pparts, 20 + struct mtd_part_parser_data *data); 18 21 19 22 #define mtd_for_each_device(mtd) \ 20 23 for ((mtd) = __mtd_next_device(0); \
+1 -1
drivers/mtd/mtdoops.c
··· 258 258 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 259 259 &retlen, (u_char *) &count[0]); 260 260 if (retlen != MTDOOPS_HEADER_SIZE || 261 - (ret < 0 && ret != -EUCLEAN)) { 261 + (ret < 0 && !mtd_is_bitflip(ret))) { 262 262 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", 263 263 page * record_size, retlen, 264 264 MTDOOPS_HEADER_SIZE, ret);
+54 -8
drivers/mtd/mtdpart.c
··· 73 73 res = part->master->read(part->master, from + part->offset, 74 74 len, retlen, buf); 75 75 if (unlikely(res)) { 76 - if (res == -EUCLEAN) 76 + if (mtd_is_bitflip(res)) 77 77 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 78 - if (res == -EBADMSG) 78 + if (mtd_is_eccerr(res)) 79 79 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 80 80 } 81 81 return res; ··· 130 130 if (ops->oobbuf) { 131 131 size_t len, pages; 132 132 133 - if (ops->mode == MTD_OOB_AUTO) 133 + if (ops->mode == MTD_OPS_AUTO_OOB) 134 134 len = mtd->oobavail; 135 135 else 136 136 len = mtd->oobsize; ··· 142 142 143 143 res = part->master->read_oob(part->master, from + part->offset, ops); 144 144 if (unlikely(res)) { 145 - if (res == -EUCLEAN) 145 + if (mtd_is_bitflip(res)) 146 146 mtd->ecc_stats.corrected++; 147 - if (res == -EBADMSG) 147 + if (mtd_is_eccerr(res)) 148 148 mtd->ecc_stats.failed++; 149 149 } 150 150 return res; ··· 479 479 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 480 480 } 481 481 } 482 + if (slave->offset == MTDPART_OFS_RETAIN) { 483 + slave->offset = cur_offset; 484 + if (master->size - slave->offset >= slave->mtd.size) { 485 + slave->mtd.size = master->size - slave->offset 486 + - slave->mtd.size; 487 + } else { 488 + printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 489 + part->name, master->size - slave->offset, 490 + slave->mtd.size); 491 + /* register to preserve ordering */ 492 + goto out_register; 493 + } 494 + } 482 495 if (slave->mtd.size == MTDPART_SIZ_FULL) 483 496 slave->mtd.size = master->size - slave->offset; 484 497 ··· 706 693 return ret; 707 694 } 708 695 696 + #define put_partition_parser(p) do { module_put((p)->owner); } while (0) 697 + 709 698 int register_mtd_parser(struct mtd_part_parser *p) 710 699 { 711 700 spin_lock(&part_parser_lock); ··· 727 712 } 728 713 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 729 714 715 + /* 716 + * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 717 + * are changing this array! 718 + */ 719 + static const char *default_mtd_part_types[] = { 720 + "cmdlinepart", 721 + "ofpart", 722 + NULL 723 + }; 724 + 725 + /** 726 + * parse_mtd_partitions - parse MTD partitions 727 + * @master: the master partition (describes whole MTD device) 728 + * @types: names of partition parsers to try or %NULL 729 + * @pparts: array of partitions found is returned here 730 + * @data: MTD partition parser-specific data 731 + * 732 + * This function tries to find partition on MTD device @master. It uses MTD 733 + * partition parsers, specified in @types. However, if @types is %NULL, then 734 + * the default list of parsers is used. The default list contains only the 735 + * "cmdlinepart" and "ofpart" parsers ATM. 736 + * 737 + * This function may return: 738 + * o a negative error code in case of failure 739 + * o zero if no partitions were found 740 + * o a positive number of found partitions, in which case on exit @pparts will 741 + * point to an array containing this number of &struct mtd_info objects. 742 + */ 730 743 int parse_mtd_partitions(struct mtd_info *master, const char **types, 731 - struct mtd_partition **pparts, unsigned long origin) 744 + struct mtd_partition **pparts, 745 + struct mtd_part_parser_data *data) 732 746 { 733 747 struct mtd_part_parser *parser; 734 748 int ret = 0; 749 + 750 + if (!types) 751 + types = default_mtd_part_types; 735 752 736 753 for ( ; ret <= 0 && *types; types++) { 737 754 parser = get_partition_parser(*types); ··· 771 724 parser = get_partition_parser(*types); 772 725 if (!parser) 773 726 continue; 774 - ret = (*parser->parse_fn)(master, pparts, origin); 727 + ret = (*parser->parse_fn)(master, pparts, data); 775 728 if (ret > 0) { 776 729 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 777 730 ret, parser->name, master->name); ··· 780 733 } 781 734 return ret; 782 735 } 783 - EXPORT_SYMBOL_GPL(parse_mtd_partitions); 784 736 785 737 int mtd_is_partition(struct mtd_info *mtd) 786 738 {
+10 -10
drivers/mtd/mtdsuper.c
··· 27 27 struct mtd_info *mtd = _mtd; 28 28 29 29 if (sb->s_mtd == mtd) { 30 - DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n", 30 + pr_debug("MTDSB: Match on device %d (\"%s\")\n", 31 31 mtd->index, mtd->name); 32 32 return 1; 33 33 } 34 34 35 - DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", 35 + pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", 36 36 sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); 37 37 return 0; 38 38 } ··· 71 71 goto already_mounted; 72 72 73 73 /* fresh new superblock */ 74 - DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", 74 + pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", 75 75 mtd->index, mtd->name); 76 76 77 77 sb->s_flags = flags; ··· 88 88 89 89 /* new mountpoint for an already mounted superblock */ 90 90 already_mounted: 91 - DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", 91 + pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n", 92 92 mtd->index, mtd->name); 93 93 put_mtd_device(mtd); 94 94 return dget(sb->s_root); ··· 109 109 110 110 mtd = get_mtd_device(NULL, mtdnr); 111 111 if (IS_ERR(mtd)) { 112 - DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr); 112 + pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr); 113 113 return ERR_CAST(mtd); 114 114 } 115 115 ··· 132 132 if (!dev_name) 133 133 return ERR_PTR(-EINVAL); 134 134 135 - DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); 135 + pr_debug("MTDSB: dev_name \"%s\"\n", dev_name); 136 136 137 137 /* the preferred way of mounting in future; especially when 138 138 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or ··· 143 143 struct mtd_info *mtd; 144 144 145 145 /* mount by MTD device name */ 146 - DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", 146 + pr_debug("MTDSB: mtd:%%s, name \"%s\"\n", 147 147 dev_name + 4); 148 148 149 149 mtd = get_mtd_device_nm(dev_name + 4); ··· 164 164 mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); 165 165 if (!*endptr) { 166 166 /* It was a valid number */ 167 - DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", 167 + pr_debug("MTDSB: mtd%%d, mtdnr %d\n", 168 168 mtdnr); 169 169 return mount_mtd_nr(fs_type, flags, 170 170 dev_name, data, ··· 180 180 bdev = lookup_bdev(dev_name); 181 181 if (IS_ERR(bdev)) { 182 182 ret = PTR_ERR(bdev); 183 - DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret); 183 + pr_debug("MTDSB: lookup_bdev() returned %d\n", ret); 184 184 return ERR_PTR(ret); 185 185 } 186 - DEBUG(1, "MTDSB: lookup_bdev() returned 0\n"); 186 + pr_debug("MTDSB: lookup_bdev() returned 0\n"); 187 187 188 188 ret = -EINVAL; 189 189
+15 -16
drivers/mtd/mtdswap.c
··· 86 86 unsigned int flags; 87 87 unsigned int active_count; 88 88 unsigned int erase_count; 89 - unsigned int pad; /* speeds up pointer decremtnt */ 89 + unsigned int pad; /* speeds up pointer decrement */ 90 90 }; 91 91 92 92 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ ··· 314 314 { 315 315 int ret = d->mtd->read_oob(d->mtd, from, ops); 316 316 317 - if (ret == -EUCLEAN) 317 + if (mtd_is_bitflip(ret)) 318 318 return ret; 319 319 320 320 if (ret) { ··· 350 350 ops.oobbuf = d->oob_buf; 351 351 ops.ooboffs = 0; 352 352 ops.datbuf = NULL; 353 - ops.mode = MTD_OOB_AUTO; 353 + ops.mode = MTD_OPS_AUTO_OOB; 354 354 355 355 ret = mtdswap_read_oob(d, offset, &ops); 356 356 357 - if (ret && ret != -EUCLEAN) 357 + if (ret && !mtd_is_bitflip(ret)) 358 358 return ret; 359 359 360 360 data = (struct mtdswap_oobdata *)d->oob_buf; ··· 363 363 364 364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { 365 365 eb->erase_count = le32_to_cpu(data->count); 366 - if (ret == -EUCLEAN) 366 + if (mtd_is_bitflip(ret)) 367 367 ret = MTDSWAP_SCANNED_BITFLIP; 368 368 else { 369 369 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) ··· 389 389 390 390 ops.ooboffs = 0; 391 391 ops.oobbuf = (uint8_t *)&n; 392 - ops.mode = MTD_OOB_AUTO; 392 + ops.mode = MTD_OPS_AUTO_OOB; 393 393 ops.datbuf = NULL; 394 394 395 395 if (marker == MTDSWAP_TYPE_CLEAN) { ··· 408 408 if (ret) { 409 409 dev_warn(d->dev, "Write OOB failed for block at %08llx " 410 410 "error %d\n", offset, ret); 411 - if (ret == -EIO || ret == -EBADMSG) 411 + if (ret == -EIO || mtd_is_eccerr(ret)) 412 412 mtdswap_handle_write_error(d, eb); 413 413 return ret; 414 414 } ··· 628 628 TREE_COUNT(d, CLEAN)--; 629 629 630 630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); 631 - } while (ret == -EIO || ret == -EBADMSG); 631 + } while (ret == -EIO || mtd_is_eccerr(ret)); 632 632 633 633 if (ret) 634 634 return ret; ··· 678 678 ret = mtdswap_map_free_block(d, page, bp); 679 679 eb = d->eb_data + (*bp / d->pages_per_eblk); 680 680 681 - if (ret == -EIO || ret == -EBADMSG) { 681 + if (ret == -EIO || mtd_is_eccerr(ret)) { 682 682 d->curr_write = NULL; 683 683 eb->active_count--; 684 684 d->revmap[*bp] = PAGE_UNDEF; ··· 690 690 691 691 writepos = (loff_t)*bp << PAGE_SHIFT; 692 692 ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); 693 - if (ret == -EIO || ret == -EBADMSG) { 693 + if (ret == -EIO || mtd_is_eccerr(ret)) { 694 694 d->curr_write_pos--; 695 695 eb->active_count--; 696 696 d->revmap[*bp] = PAGE_UNDEF; ··· 738 738 retry: 739 739 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); 740 740 741 - if (ret < 0 && ret != -EUCLEAN) { 741 + if (ret < 0 && !mtd_is_bitflip(ret)) { 742 742 oldeb = d->eb_data + oldblock / d->pages_per_eblk; 743 743 oldeb->flags |= EBLOCK_READERR; 744 744 ··· 931 931 struct mtd_oob_ops ops; 932 932 int ret; 933 933 934 - ops.mode = MTD_OOB_AUTO; 934 + ops.mode = MTD_OPS_AUTO_OOB; 935 935 ops.len = mtd->writesize; 936 936 ops.ooblen = mtd->ecclayout->oobavail; 937 937 ops.ooboffs = 0; ··· 1016 1016 1017 1017 if (ret == 0) 1018 1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); 1019 - else if (ret != -EIO && ret != -EBADMSG) 1019 + else if (ret != -EIO && !mtd_is_eccerr(ret)) 1020 1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); 1021 1021 1022 1022 return 0; ··· 1164 1164 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); 1165 1165 1166 1166 d->mtd_read_count++; 1167 - if (ret == -EUCLEAN) { 1167 + if (mtd_is_bitflip(ret)) { 1168 1168 eb->flags |= EBLOCK_BITFLIP; 1169 1169 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); 1170 1170 ret = 0; ··· 1374 1374 goto revmap_fail; 1375 1375 1376 1376 eblk_bytes = sizeof(struct swap_eb)*d->eblks; 1377 - d->eb_data = vmalloc(eblk_bytes); 1377 + d->eb_data = vzalloc(eblk_bytes); 1378 1378 if (!d->eb_data) 1379 1379 goto eb_data_fail; 1380 1380 1381 - memset(d->eb_data, 0, eblk_bytes); 1382 1381 for (i = 0; i < pages; i++) 1383 1382 d->page_data[i] = BLOCK_UNDEF; 1384 1383
+18 -11
drivers/mtd/nand/Kconfig
··· 83 83 scratch register here to enable this feature. On Intel Moorestown 84 84 boards, the scratch register is at 0xFF108018. 85 85 86 - config MTD_NAND_EDB7312 87 - tristate "Support for Cirrus Logic EBD7312 evaluation board" 88 - depends on ARCH_EDB7312 89 - help 90 - This enables the driver for the Cirrus Logic EBD7312 evaluation 91 - board to access the onboard NAND Flash. 92 - 93 86 config MTD_NAND_H1900 94 87 tristate "iPAQ H1900 flash" 95 - depends on ARCH_PXA 88 + depends on ARCH_PXA && BROKEN 96 89 help 97 90 This enables the driver for the iPAQ h1900 flash. 98 91 ··· 109 116 Support for NAND flash on Amstrad E3 (Delta). 110 117 111 118 config MTD_NAND_OMAP2 112 - tristate "NAND Flash device on OMAP2 and OMAP3" 113 - depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3) 119 + tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4" 120 + depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4) 114 121 help 115 - Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 122 + Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 123 + platforms. 116 124 117 125 config MTD_NAND_IDS 118 126 tristate ··· 416 422 help 417 423 The simulator may simulate various NAND flash chips for the 418 424 MTD nand layer. 425 + 426 + config MTD_NAND_GPMI_NAND 427 + bool "GPMI NAND Flash Controller driver" 428 + depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) 429 + select MTD_PARTITIONS 430 + select MTD_CMDLINE_PARTS 431 + help 432 + Enables NAND Flash support for IMX23 or IMX28. 433 + The GPMI controller is very powerful, with the help of BCH 434 + module, it can do the hardware ECC. The GPMI supports several 435 + NAND flashs at the same time. The GPMI may conflicts with other 436 + block, such as SD card. So pay attention to it when you enable 437 + the GPMI. 419 438 420 439 config MTD_NAND_PLATFORM 421 440 tristate "Support for generic platform NAND driver"
+1 -1
drivers/mtd/nand/Makefile
··· 13 13 obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 14 14 obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 15 15 obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 16 - obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 17 16 obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 18 17 obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 19 18 obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o ··· 48 49 obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 49 50 obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 50 51 obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 52 + obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ 51 53 52 54 nand-objs := nand_base.o nand_bbt.o
+8 -66
drivers/mtd/nand/atmel_nand.c
··· 161 161 !!host->board->rdy_pin_active_low; 162 162 } 163 163 164 - /* 165 - * Minimal-overhead PIO for data access. 166 - */ 167 - static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) 168 - { 169 - struct nand_chip *nand_chip = mtd->priv; 170 - 171 - __raw_readsb(nand_chip->IO_ADDR_R, buf, len); 172 - } 173 - 174 - static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) 175 - { 176 - struct nand_chip *nand_chip = mtd->priv; 177 - 178 - __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); 179 - } 180 - 181 - static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) 182 - { 183 - struct nand_chip *nand_chip = mtd->priv; 184 - 185 - __raw_writesb(nand_chip->IO_ADDR_W, buf, len); 186 - } 187 - 188 - static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) 189 - { 190 - struct nand_chip *nand_chip = mtd->priv; 191 - 192 - __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); 193 - } 194 - 195 164 static void dma_complete_func(void *completion) 196 165 { 197 166 complete(completion); ··· 235 266 static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 236 267 { 237 268 struct nand_chip *chip = mtd->priv; 238 - struct atmel_nand_host *host = chip->priv; 239 269 240 270 if (use_dma && len > mtd->oobsize) 241 271 /* only use DMA for bigger than oob size: better performances */ 242 272 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 243 273 return; 244 274 245 - if (host->board->bus_width_16) 246 - atmel_read_buf16(mtd, buf, len); 247 - else 248 - atmel_read_buf8(mtd, buf, len); 275 + /* if no DMA operation possible, use PIO */ 276 + memcpy_fromio(buf, chip->IO_ADDR_R, len); 249 277 } 250 278 251 279 static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 252 280 { 253 281 struct nand_chip *chip = mtd->priv; 254 - struct atmel_nand_host *host = chip->priv; 255 282 256 283 if (use_dma && len > mtd->oobsize) 257 284 /* only use DMA for bigger than oob size: better performances */ 258 285 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 259 286 return; 260 287 261 - if (host->board->bus_width_16) 262 - atmel_write_buf16(mtd, buf, len); 263 - else 264 - atmel_write_buf8(mtd, buf, len); 288 + /* if no DMA operation possible, use PIO */ 289 + memcpy_toio(chip->IO_ADDR_W, buf, len); 265 290 } 266 291 267 292 /* ··· 444 481 } 445 482 } 446 483 447 - #ifdef CONFIG_MTD_CMDLINE_PARTS 448 - static const char *part_probes[] = { "cmdlinepart", NULL }; 449 - #endif 450 - 451 484 /* 452 485 * Probe for the NAND device. 453 486 */ ··· 455 496 struct resource *regs; 456 497 struct resource *mem; 457 498 int res; 458 - struct mtd_partition *partitions = NULL; 459 - int num_partitions = 0; 460 499 461 500 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 462 501 if (!mem) { ··· 540 583 541 584 if (on_flash_bbt) { 542 585 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); 543 - nand_chip->options |= NAND_USE_FLASH_BBT; 586 + nand_chip->bbt_options |= NAND_BBT_USE_FLASH; 544 587 } 545 588 546 589 if (!cpu_has_dma()) ··· 551 594 552 595 dma_cap_zero(mask); 553 596 dma_cap_set(DMA_MEMCPY, mask); 554 - host->dma_chan = dma_request_channel(mask, 0, NULL); 597 + host->dma_chan = dma_request_channel(mask, NULL, NULL); 555 598 if (!host->dma_chan) { 556 599 dev_err(host->dev, "Failed to request DMA channel\n"); 557 600 use_dma = 0; ··· 612 655 goto err_scan_tail; 613 656 } 614 657 615 - #ifdef CONFIG_MTD_CMDLINE_PARTS 616 658 mtd->name = "atmel_nand"; 617 - num_partitions = parse_mtd_partitions(mtd, part_probes, 618 - &partitions, 0); 619 - #endif 620 - if (num_partitions <= 0 && host->board->partition_info) 621 - partitions = host->board->partition_info(mtd->size, 622 - &num_partitions); 623 - 624 - if ((!partitions) || (num_partitions == 0)) { 625 - printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); 626 - res = -ENXIO; 627 - goto err_no_partitions; 628 - } 629 - 630 - res = mtd_device_register(mtd, partitions, num_partitions); 659 + res = mtd_device_parse_register(mtd, NULL, 0, 660 + host->board->parts, host->board->num_parts); 631 661 if (!res) 632 662 return res; 633 663 634 - err_no_partitions: 635 - nand_release(mtd); 636 664 err_scan_tail: 637 665 err_scan_ident: 638 666 err_no_card:
+13 -16
drivers/mtd/nand/au1550nd.c
··· 52 52 * au_read_byte - read one byte from the chip 53 53 * @mtd: MTD device structure 54 54 * 55 - * read function for 8bit buswith 55 + * read function for 8bit buswidth 56 56 */ 57 57 static u_char au_read_byte(struct mtd_info *mtd) 58 58 { ··· 67 67 * @mtd: MTD device structure 68 68 * @byte: pointer to data byte to write 69 69 * 70 - * write function for 8it buswith 70 + * write function for 8it buswidth 71 71 */ 72 72 static void au_write_byte(struct mtd_info *mtd, u_char byte) 73 73 { ··· 77 77 } 78 78 79 79 /** 80 - * au_read_byte16 - read one byte endianess aware from the chip 80 + * au_read_byte16 - read one byte endianness aware from the chip 81 81 * @mtd: MTD device structure 82 82 * 83 - * read function for 16bit buswith with 84 - * endianess conversion 83 + * read function for 16bit buswidth with endianness conversion 85 84 */ 86 85 static u_char au_read_byte16(struct mtd_info *mtd) 87 86 { ··· 91 92 } 92 93 93 94 /** 94 - * au_write_byte16 - write one byte endianess aware to the chip 95 + * au_write_byte16 - write one byte endianness aware to the chip 95 96 * @mtd: MTD device structure 96 97 * @byte: pointer to data byte to write 97 98 * 98 - * write function for 16bit buswith with 99 - * endianess conversion 99 + * write function for 16bit buswidth with endianness conversion 100 100 */ 101 101 static void au_write_byte16(struct mtd_info *mtd, u_char byte) 102 102 { ··· 108 110 * au_read_word - read one word from the chip 109 111 * @mtd: MTD device structure 110 112 * 111 - * read function for 16bit buswith without 112 - * endianess conversion 113 + * read function for 16bit buswidth without endianness conversion 113 114 */ 114 115 static u16 au_read_word(struct mtd_info *mtd) 115 116 { ··· 124 127 * @buf: data buffer 125 128 * @len: number of bytes to write 126 129 * 127 - * write function for 8bit buswith 130 + * write function for 8bit buswidth 128 131 */ 129 132 static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 130 133 { ··· 143 146 * @buf: buffer to store date 144 147 * @len: number of bytes to read 145 148 * 146 - * read function for 8bit buswith 149 + * read function for 8bit buswidth 147 150 */ 148 151 static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) 149 152 { ··· 162 165 * @buf: buffer containing the data to compare 163 166 * @len: number of bytes to compare 164 167 * 165 - * verify function for 8bit buswith 168 + * verify function for 8bit buswidth 166 169 */ 167 170 static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 168 171 { ··· 184 187 * @buf: data buffer 185 188 * @len: number of bytes to write 186 189 * 187 - * write function for 16bit buswith 190 + * write function for 16bit buswidth 188 191 */ 189 192 static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) 190 193 { ··· 206 209 * @buf: buffer to store date 207 210 * @len: number of bytes to read 208 211 * 209 - * read function for 16bit buswith 212 + * read function for 16bit buswidth 210 213 */ 211 214 static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 212 215 { ··· 227 230 * @buf: buffer containing the data to compare 228 231 * @len: number of bytes to compare 229 232 * 230 - * verify function for 16bit buswith 233 + * verify function for 16bit buswidth 231 234 */ 232 235 static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) 233 236 {
+2 -2
drivers/mtd/nand/autcpu12.c
··· 172 172 173 173 /* Enable the following for a flash based bad block table */ 174 174 /* 175 - this->options = NAND_USE_FLASH_BBT; 175 + this->bbt_options = NAND_BBT_USE_FLASH; 176 176 */ 177 - this->options = NAND_USE_FLASH_BBT; 177 + this->bbt_options = NAND_BBT_USE_FLASH; 178 178 179 179 /* Scan to find existence of the device */ 180 180 if (nand_scan(autcpu12_mtd, 1)) {
+21 -36
drivers/mtd/nand/bcm_umi_nand.c
··· 52 52 static const __devinitconst char gBanner[] = KERN_INFO \ 53 53 "BCM UMI MTD NAND Driver: 1.00\n"; 54 54 55 - const char *part_probes[] = { "cmdlinepart", NULL }; 56 - 57 55 #if NAND_ECC_BCH 58 56 static uint8_t scan_ff_pattern[] = { 0xff }; 59 57 ··· 374 376 375 377 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 376 378 377 - if (!r) 378 - return -ENXIO; 379 + if (!r) { 380 + err = -ENXIO; 381 + goto out_free; 382 + } 379 383 380 384 /* map physical address */ 381 385 bcm_umi_io_base = ioremap(r->start, resource_size(r)); 382 386 383 387 if (!bcm_umi_io_base) { 384 388 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); 385 - kfree(board_mtd); 386 - return -EIO; 389 + err = -EIO; 390 + goto out_free; 387 391 } 388 392 389 393 /* Get pointer to private data */ ··· 401 401 /* Initialize the NAND hardware. */ 402 402 if (bcm_umi_nand_inithw() < 0) { 403 403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); 404 - iounmap(bcm_umi_io_base); 405 - kfree(board_mtd); 406 - return -EIO; 404 + err = -EIO; 405 + goto out_unmap; 407 406 } 408 407 409 408 /* Set address of NAND IO lines */ ··· 435 436 #if USE_DMA 436 437 err = nand_dma_init(); 437 438 if (err != 0) 438 - return err; 439 + goto out_unmap; 439 440 #endif 440 441 441 442 /* Figure out the size of the device that we have. ··· 446 447 err = nand_scan_ident(board_mtd, 1, NULL); 447 448 if (err) { 448 449 printk(KERN_ERR "nand_scan failed: %d\n", err); 449 - iounmap(bcm_umi_io_base); 450 - kfree(board_mtd); 451 - return err; 450 + goto out_unmap; 452 451 } 453 452 454 453 /* Now that we know the nand size, we can setup the ECC layout */ ··· 465 468 { 466 469 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", 467 470 board_mtd->writesize); 468 - return -EINVAL; 471 + err = -EINVAL; 472 + goto out_unmap; 469 473 } 470 474 } 471 475 472 476 #if NAND_ECC_BCH 473 477 if (board_mtd->writesize > 512) { 474 - if (this->options & NAND_USE_FLASH_BBT) 478 + if (this->bbt_options & NAND_BBT_USE_FLASH) 475 479 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; 476 480 this->badblock_pattern = &largepage_bbt; 477 481 } ··· 483 485 err = nand_scan_tail(board_mtd); 484 486 if (err) { 485 487 printk(KERN_ERR "nand_scan failed: %d\n", err); 486 - iounmap(bcm_umi_io_base); 487 - kfree(board_mtd); 488 - return err; 488 + goto out_unmap; 489 489 } 490 490 491 491 /* Register the partitions */ 492 - { 493 - int nr_partitions; 494 - struct mtd_partition *partition_info; 495 - 496 - board_mtd->name = "bcm_umi-nand"; 497 - nr_partitions = 498 - parse_mtd_partitions(board_mtd, part_probes, 499 - &partition_info, 0); 500 - 501 - if (nr_partitions <= 0) { 502 - printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n", 503 - nr_partitions); 504 - iounmap(bcm_umi_io_base); 505 - kfree(board_mtd); 506 - return -EIO; 507 - } 508 - mtd_device_register(board_mtd, partition_info, nr_partitions); 509 - } 492 + board_mtd->name = "bcm_umi-nand"; 493 + mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0); 510 494 511 495 /* Return happy */ 512 496 return 0; 497 + out_unmap: 498 + iounmap(bcm_umi_io_base); 499 + out_free: 500 + kfree(board_mtd); 501 + return err; 513 502 } 514 503 515 504 static int bcm_umi_nand_remove(struct platform_device *pdev)
+5 -16
drivers/mtd/nand/cafe_nand.c
··· 58 58 59 59 struct cafe_priv { 60 60 struct nand_chip nand; 61 - struct mtd_partition *parts; 62 61 struct pci_dev *pdev; 63 62 void __iomem *mmio; 64 63 struct rs_control *rs; ··· 371 372 return 1; 372 373 } 373 374 /** 374 - * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read 375 + * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read 375 376 * @mtd: mtd info structure 376 377 * @chip: nand chip info structure 377 378 * @buf: buffer to store read data ··· 630 631 struct cafe_priv *cafe; 631 632 uint32_t ctrl; 632 633 int err = 0; 633 - struct mtd_partition *parts; 634 - int nr_parts; 635 634 636 635 /* Very old versions shared the same PCI ident for all three 637 636 functions on the chip. Verify the class too... */ ··· 684 687 cafe->nand.chip_delay = 0; 685 688 686 689 /* Enable the following for a flash based bad block table */ 687 - cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 690 + cafe->nand.bbt_options = NAND_BBT_USE_FLASH; 691 + cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 688 692 689 693 if (skipbbt) { 690 694 cafe->nand.options |= NAND_SKIP_BBTSCAN; ··· 798 800 799 801 pci_set_drvdata(pdev, mtd); 800 802 801 - /* We register the whole device first, separate from the partitions */ 802 - mtd_device_register(mtd, NULL, 0); 803 - 804 - #ifdef CONFIG_MTD_CMDLINE_PARTS 805 803 mtd->name = "cafe_nand"; 806 - #endif 807 - nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 808 - if (nr_parts > 0) { 809 - cafe->parts = parts; 810 - dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts); 811 - mtd_device_register(mtd, parts, nr_parts); 812 - } 804 + mtd_device_parse_register(mtd, part_probes, 0, NULL, 0); 805 + 813 806 goto out; 814 807 815 808 out_irq:
+2 -21
drivers/mtd/nand/cmx270_nand.c
··· 51 51 }; 52 52 #define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) 53 53 54 - const char *part_probes[] = { "cmdlinepart", NULL }; 55 - 56 54 static u_char cmx270_read_byte(struct mtd_info *mtd) 57 55 { 58 56 struct nand_chip *this = mtd->priv; ··· 150 152 static int __init cmx270_init(void) 151 153 { 152 154 struct nand_chip *this; 153 - const char *part_type; 154 - struct mtd_partition *mtd_parts; 155 - int mtd_parts_nb = 0; 156 155 int ret; 157 156 158 157 if (!(machine_is_armcore() && cpu_is_pxa27x())) ··· 218 223 goto err_scan; 219 224 } 220 225 221 - #ifdef CONFIG_MTD_CMDLINE_PARTS 222 - mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes, 223 - &mtd_parts, 0); 224 - if (mtd_parts_nb > 0) 225 - part_type = "command line"; 226 - else 227 - mtd_parts_nb = 0; 228 - #endif 229 - if (!mtd_parts_nb) { 230 - mtd_parts = partition_info; 231 - mtd_parts_nb = NUM_PARTITIONS; 232 - part_type = "static"; 233 - } 234 - 235 226 /* Register the partitions */ 236 - pr_notice("Using %s partition definition\n", part_type); 237 - ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 227 + ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0, 228 + partition_info, NUM_PARTITIONS); 238 229 if (ret) 239 230 goto err_scan; 240 231
+4 -11
drivers/mtd/nand/cs553x_nand.c
··· 239 239 this->ecc.correct = nand_correct_data; 240 240 241 241 /* Enable the following for a flash based bad block table */ 242 - this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 242 + this->bbt_options = NAND_BBT_USE_FLASH; 243 + this->options = NAND_NO_AUTOINCR; 243 244 244 245 /* Scan to find existence of the device */ 245 246 if (nand_scan(new_mtd, 1)) { ··· 278 277 return 0; 279 278 } 280 279 281 - static const char *part_probes[] = { "cmdlinepart", NULL }; 282 - 283 280 static int __init cs553x_init(void) 284 281 { 285 282 int err = -ENXIO; 286 283 int i; 287 284 uint64_t val; 288 - int mtd_parts_nb = 0; 289 - struct mtd_partition *mtd_parts = NULL; 290 285 291 286 /* If the CPU isn't a Geode GX or LX, abort */ 292 287 if (!is_geode()) ··· 312 315 do mtdconcat etc. if we want to. */ 313 316 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 314 317 if (cs553x_mtd[i]) { 315 - 316 318 /* If any devices registered, return success. Else the last error. */ 317 - mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); 318 - if (mtd_parts_nb > 0) 319 - printk(KERN_NOTICE "Using command line partition definition\n"); 320 - mtd_device_register(cs553x_mtd[i], mtd_parts, 321 - mtd_parts_nb); 319 + mtd_device_parse_register(cs553x_mtd[i], NULL, 0, 320 + NULL, 0); 322 321 err = 0; 323 322 } 324 323 }
+5 -34
drivers/mtd/nand/davinci_nand.c
··· 57 57 58 58 struct device *dev; 59 59 struct clk *clk; 60 - bool partitioned; 61 60 62 61 bool is_readmode; 63 62 ··· 529 530 int ret; 530 531 uint32_t val; 531 532 nand_ecc_modes_t ecc_mode; 532 - struct mtd_partition *mtd_parts = NULL; 533 - int mtd_parts_nb = 0; 534 533 535 534 /* insist on board-specific configuration */ 536 535 if (!pdata) ··· 578 581 info->chip.chip_delay = 0; 579 582 info->chip.select_chip = nand_davinci_select_chip; 580 583 581 - /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 584 + /* options such as NAND_BBT_USE_FLASH */ 585 + info->chip.bbt_options = pdata->bbt_options; 586 + /* options such as 16-bit widths */ 582 587 info->chip.options = pdata->options; 583 588 info->chip.bbt_td = pdata->bbt_td; 584 589 info->chip.bbt_md = pdata->bbt_md; ··· 750 751 if (ret < 0) 751 752 goto err_scan; 752 753 753 - if (mtd_has_cmdlinepart()) { 754 - static const char *probes[] __initconst = { 755 - "cmdlinepart", NULL 756 - }; 757 - 758 - mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes, 759 - &mtd_parts, 0); 760 - } 761 - 762 - if (mtd_parts_nb <= 0) { 763 - mtd_parts = pdata->parts; 764 - mtd_parts_nb = pdata->nr_parts; 765 - } 766 - 767 - /* Register any partitions */ 768 - if (mtd_parts_nb > 0) { 769 - ret = mtd_device_register(&info->mtd, mtd_parts, 770 - mtd_parts_nb); 771 - if (ret == 0) 772 - info->partitioned = true; 773 - } 774 - 775 - /* If there's no partition info, just package the whole chip 776 - * as a single MTD device. 777 - */ 778 - if (!info->partitioned) 779 - ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0; 754 + ret = mtd_device_parse_register(&info->mtd, NULL, 0, 755 + pdata->parts, pdata->nr_parts); 780 756 781 757 if (ret < 0) 782 758 goto err_scan; ··· 790 816 static int __exit nand_davinci_remove(struct platform_device *pdev) 791 817 { 792 818 struct davinci_nand_info *info = platform_get_drvdata(pdev); 793 - int status; 794 - 795 - status = mtd_device_unregister(&info->mtd); 796 819 797 820 spin_lock_irq(&davinci_nand_lock); 798 821 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+3 -3
drivers/mtd/nand/denali.c
··· 1346 1346 * */ 1347 1347 denali->bbtskipbytes = ioread32(denali->flash_reg + 1348 1348 SPARE_AREA_SKIP_BYTES); 1349 + detect_max_banks(denali); 1349 1350 denali_nand_reset(denali); 1350 1351 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1351 1352 iowrite32(CHIP_EN_DONT_CARE__FLAG, ··· 1357 1356 /* Should set value for these registers when init */ 1358 1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1359 1358 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1360 - detect_max_banks(denali); 1361 1359 denali_nand_timing_set(denali); 1362 1360 denali_irq_init(denali); 1363 1361 } ··· 1577 1577 denali->nand.bbt_md = &bbt_mirror_descr; 1578 1578 1579 1579 /* skip the scan for now until we have OOB read and write support */ 1580 - denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 1580 + denali->nand.bbt_options |= NAND_BBT_USE_FLASH; 1581 + denali->nand.options |= NAND_SKIP_BBTSCAN; 1581 1582 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1582 1583 1583 1584 /* Denali Controller only support 15bit and 8bit ECC in MRST, ··· 1677 1676 struct denali_nand_info *denali = pci_get_drvdata(dev); 1678 1677 1679 1678 nand_release(&denali->mtd); 1680 - mtd_device_unregister(&denali->mtd); 1681 1679 1682 1680 denali_irq_cleanup(dev->irq, denali); 1683 1681
+4 -4
drivers/mtd/nand/diskonchip.c
··· 133 133 134 134 /* 135 135 * The HW decoder in the DoC ASIC's provides us a error syndrome, 136 - * which we must convert to a standard syndrom usable by the generic 136 + * which we must convert to a standard syndrome usable by the generic 137 137 * Reed-Solomon library code. 138 138 * 139 139 * Fabrice Bellard figured this out in the old docecc code. I added ··· 154 154 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); 155 155 parity = ecc[1]; 156 156 157 - /* Initialize the syndrom buffer */ 157 + /* Initialize the syndrome buffer */ 158 158 for (i = 0; i < NROOTS; i++) 159 159 s[i] = ds[0]; 160 160 /* ··· 1032 1032 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 1033 1033 else 1034 1034 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 1035 - if (no_ecc_failures && (ret == -EBADMSG)) { 1035 + if (no_ecc_failures && mtd_is_eccerr(ret)) { 1036 1036 printk(KERN_ERR "suppressing ECC failure\n"); 1037 1037 ret = 0; 1038 1038 } ··· 1653 1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1654 1654 nand->ecc.size = 512; 1655 1655 nand->ecc.bytes = 6; 1656 - nand->options = NAND_USE_FLASH_BBT; 1656 + nand->bbt_options = NAND_BBT_USE_FLASH; 1657 1657 1658 1658 doc->physadr = physadr; 1659 1659 doc->virtadr = virtadr;
-203
drivers/mtd/nand/edb7312.c
··· 1 - /* 2 - * drivers/mtd/nand/edb7312.c 3 - * 4 - * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) 5 - * 6 - * Derived from drivers/mtd/nand/autcpu12.c 7 - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License version 2 as 11 - * published by the Free Software Foundation. 12 - * 13 - * Overview: 14 - * This is a device driver for the NAND flash device found on the 15 - * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is 16 - * a 64Mibit (8MiB x 8 bits) NAND flash device. 17 - */ 18 - 19 - #include <linux/slab.h> 20 - #include <linux/module.h> 21 - #include <linux/init.h> 22 - #include <linux/mtd/mtd.h> 23 - #include <linux/mtd/nand.h> 24 - #include <linux/mtd/partitions.h> 25 - #include <asm/io.h> 26 - #include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */ 27 - #include <asm/sizes.h> 28 - #include <asm/hardware/clps7111.h> 29 - 30 - /* 31 - * MTD structure for EDB7312 board 32 - */ 33 - static struct mtd_info *ep7312_mtd = NULL; 34 - 35 - /* 36 - * Values specific to the EDB7312 board (used with EP7312 processor) 37 - */ 38 - #define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */ 39 - #define EP7312_PXDR 0x0001 /* 40 - * IO offset to Port B data register 41 - * where the CLE, ALE and NCE pins 42 - * are wired to. 43 - */ 44 - #define EP7312_PXDDR 0x0041 /* 45 - * IO offset to Port B data direction 46 - * register so we can control the IO 47 - * lines. 48 - */ 49 - 50 - /* 51 - * Module stuff 52 - */ 53 - 54 - static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE; 55 - static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR; 56 - static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR; 57 - 58 - /* 59 - * Define static partitions for flash device 60 - */ 61 - static struct mtd_partition partition_info[] = { 62 - {.name = "EP7312 Nand Flash", 63 - .offset = 0, 64 - .size = 8 * 1024 * 1024} 65 - }; 66 - 67 - #define NUM_PARTITIONS 1 68 - 69 - /* 70 - * hardware specific access to control-lines 71 - * 72 - * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1) 73 - * NAND_CLE: bit 1 -> bit 4 74 - * NAND_ALE: bit 2 -> bit 5 75 - */ 76 - static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 77 - { 78 - struct nand_chip *chip = mtd->priv; 79 - 80 - if (ctrl & NAND_CTRL_CHANGE) { 81 - unsigned char bits = 0x80; 82 - 83 - bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3; 84 - bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40; 85 - 86 - clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits, 87 - ep7312_pxdr); 88 - } 89 - if (cmd != NAND_CMD_NONE) 90 - writeb(cmd, chip->IO_ADDR_W); 91 - } 92 - 93 - /* 94 - * read device ready pin 95 - */ 96 - static int ep7312_device_ready(struct mtd_info *mtd) 97 - { 98 - return 1; 99 - } 100 - 101 - const char *part_probes[] = { "cmdlinepart", NULL }; 102 - 103 - /* 104 - * Main initialization routine 105 - */ 106 - static int __init ep7312_init(void) 107 - { 108 - struct nand_chip *this; 109 - const char *part_type = 0; 110 - int mtd_parts_nb = 0; 111 - struct mtd_partition *mtd_parts = 0; 112 - void __iomem *ep7312_fio_base; 113 - 114 - /* Allocate memory for MTD device structure and private data */ 115 - ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 116 - if (!ep7312_mtd) { 117 - printk("Unable to allocate EDB7312 NAND MTD device structure.\n"); 118 - return -ENOMEM; 119 - } 120 - 121 - /* map physical address */ 122 - ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K); 123 - if (!ep7312_fio_base) { 124 - printk("ioremap EDB7312 NAND flash failed\n"); 125 - kfree(ep7312_mtd); 126 - return -EIO; 127 - } 128 - 129 - /* Get pointer to private data */ 130 - this = (struct nand_chip *)(&ep7312_mtd[1]); 131 - 132 - /* Initialize structures */ 133 - memset(ep7312_mtd, 0, sizeof(struct mtd_info)); 134 - memset(this, 0, sizeof(struct nand_chip)); 135 - 136 - /* Link the private data with the MTD structure */ 137 - ep7312_mtd->priv = this; 138 - ep7312_mtd->owner = THIS_MODULE; 139 - 140 - /* 141 - * Set GPIO Port B control register so that the pins are configured 142 - * to be outputs for controlling the NAND flash. 143 - */ 144 - clps_writeb(0xf0, ep7312_pxddr); 145 - 146 - /* insert callbacks */ 147 - this->IO_ADDR_R = ep7312_fio_base; 148 - this->IO_ADDR_W = ep7312_fio_base; 149 - this->cmd_ctrl = ep7312_hwcontrol; 150 - this->dev_ready = ep7312_device_ready; 151 - /* 15 us command delay time */ 152 - this->chip_delay = 15; 153 - 154 - /* Scan to find existence of the device */ 155 - if (nand_scan(ep7312_mtd, 1)) { 156 - iounmap((void *)ep7312_fio_base); 157 - kfree(ep7312_mtd); 158 - return -ENXIO; 159 - } 160 - ep7312_mtd->name = "edb7312-nand"; 161 - mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0); 162 - if (mtd_parts_nb > 0) 163 - part_type = "command line"; 164 - else 165 - mtd_parts_nb = 0; 166 - if (mtd_parts_nb == 0) { 167 - mtd_parts = partition_info; 168 - mtd_parts_nb = NUM_PARTITIONS; 169 - part_type = "static"; 170 - } 171 - 172 - /* Register the partitions */ 173 - printk(KERN_NOTICE "Using %s partition definition\n", part_type); 174 - mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb); 175 - 176 - /* Return happy */ 177 - return 0; 178 - } 179 - 180 - module_init(ep7312_init); 181 - 182 - /* 183 - * Clean up routine 184 - */ 185 - static void __exit ep7312_cleanup(void) 186 - { 187 - struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1]; 188 - 189 - /* Release resources, unregister device */ 190 - nand_release(ap7312_mtd); 191 - 192 - /* Release io resource */ 193 - iounmap(this->IO_ADDR_R); 194 - 195 - /* Free the MTD device structure */ 196 - kfree(ep7312_mtd); 197 - } 198 - 199 - module_exit(ep7312_cleanup); 200 - 201 - MODULE_LICENSE("GPL"); 202 - MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); 203 - MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
+28 -47
drivers/mtd/nand/fsl_elbc_nand.c
··· 75 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 76 76 unsigned int oob; /* Non zero if operating on OOB data */ 77 77 unsigned int counter; /* counter for the initializations */ 78 - char *oob_poi; /* Place to write ECC after read back */ 79 78 }; 80 79 81 80 /* These map to the positions used by the FCM hardware ECC generator */ ··· 241 242 in_be32(&lbc->fir), in_be32(&lbc->fcr), 242 243 elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr); 243 244 return -EIO; 245 + } 246 + 247 + if (chip->ecc.mode != NAND_ECC_HW) 248 + return 0; 249 + 250 + if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { 251 + uint32_t lteccr = in_be32(&lbc->lteccr); 252 + /* 253 + * if command was a full page read and the ELBC 254 + * has the LTECCR register, then bits 12-15 (ppc order) of 255 + * LTECCR indicates which 512 byte sub-pages had fixed errors. 256 + * bits 28-31 are uncorrectable errors, marked elsewhere. 257 + * for small page nand only 1 bit is used. 258 + * if the ELBC doesn't have the lteccr register it reads 0 259 + */ 260 + if (lteccr & 0x000F000F) 261 + out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ 262 + if (lteccr & 0x000F0000) 263 + mtd->ecc_stats.corrected++; 244 264 } 245 265 246 266 return 0; ··· 453 435 454 436 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 455 437 case NAND_CMD_PAGEPROG: { 456 - int full_page; 457 438 dev_vdbg(priv->dev, 458 439 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 459 440 "writing %d bytes.\n", elbc_fcm_ctrl->index); ··· 462 445 * write so the HW generates the ECC. 463 446 */ 464 447 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || 465 - elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) { 448 + elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) 466 449 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); 467 - full_page = 0; 468 - } else { 450 + else 469 451 out_be32(&lbc->fbcr, 0); 470 - full_page = 1; 471 - } 472 452 473 453 fsl_elbc_run_command(mtd); 474 - 475 - /* Read back the page in order to fill in the ECC for the 476 - * caller. Is this really needed? 477 - */ 478 - if (full_page && elbc_fcm_ctrl->oob_poi) { 479 - out_be32(&lbc->fbcr, 3); 480 - set_addr(mtd, 6, page_addr, 1); 481 - 482 - elbc_fcm_ctrl->read_bytes = mtd->writesize + 9; 483 - 484 - fsl_elbc_do_read(chip, 1); 485 - fsl_elbc_run_command(mtd); 486 - 487 - memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6, 488 - &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3); 489 - elbc_fcm_ctrl->index += 3; 490 - } 491 - 492 - elbc_fcm_ctrl->oob_poi = NULL; 493 454 return; 494 455 } 495 456 ··· 747 752 struct nand_chip *chip, 748 753 const uint8_t *buf) 749 754 { 750 - struct fsl_elbc_mtd *priv = chip->priv; 751 - struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; 752 - 753 755 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 754 756 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 755 - 756 - elbc_fcm_ctrl->oob_poi = chip->oob_poi; 757 757 } 758 758 759 759 static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) ··· 781 791 chip->bbt_md = &bbt_mirror_descr; 782 792 783 793 /* set up nand options */ 784 - chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | 785 - NAND_USE_FLASH_BBT; 794 + chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 795 + chip->bbt_options = NAND_BBT_USE_FLASH; 786 796 787 797 chip->controller = &elbc_fcm_ctrl->controller; 788 798 chip->priv = priv; ··· 819 829 820 830 elbc_fcm_ctrl->chips[priv->bank] = NULL; 821 831 kfree(priv); 822 - kfree(elbc_fcm_ctrl); 823 832 return 0; 824 833 } 825 834 ··· 831 842 struct resource res; 832 843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 833 844 static const char *part_probe_types[] 834 - = { "cmdlinepart", "RedBoot", NULL }; 835 - struct mtd_partition *parts; 845 + = { "cmdlinepart", "RedBoot", "ofpart", NULL }; 836 846 int ret; 837 847 int bank; 838 848 struct device *dev; 839 849 struct device_node *node = pdev->dev.of_node; 850 + struct mtd_part_parser_data ppdata; 840 851 852 + ppdata.of_node = pdev->dev.of_node; 841 853 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 842 854 return -ENODEV; 843 855 lbc = fsl_lbc_ctrl_dev->regs; ··· 924 934 925 935 /* First look for RedBoot table or partitions on the command 926 936 * line, these take precedence over device tree information */ 927 - ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 928 - if (ret < 0) 929 - goto err; 930 - 931 - if (ret == 0) { 932 - ret = of_mtd_parse_partitions(priv->dev, node, &parts); 933 - if (ret < 0) 934 - goto err; 935 - } 936 - 937 - mtd_device_register(&priv->mtd, parts, ret); 937 + mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata, 938 + NULL, 0); 938 939 939 940 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 940 941 (unsigned long long)res.start, priv->bank);
+5 -11
drivers/mtd/nand/fsl_upm.c
··· 158 158 { 159 159 int ret; 160 160 struct device_node *flash_np; 161 - static const char *part_types[] = { "cmdlinepart", NULL, }; 161 + struct mtd_part_parser_data ppdata; 162 162 163 163 fun->chip.IO_ADDR_R = fun->io_base; 164 164 fun->chip.IO_ADDR_W = fun->io_base; ··· 192 192 if (ret) 193 193 goto err; 194 194 195 - ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 196 - 197 - #ifdef CONFIG_MTD_OF_PARTS 198 - if (ret == 0) { 199 - ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts); 200 - if (ret < 0) 201 - goto err; 202 - } 203 - #endif 204 - ret = mtd_device_register(&fun->mtd, fun->parts, ret); 195 + ppdata.of_node = flash_np; 196 + ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0); 205 197 err: 206 198 of_node_put(flash_np); 199 + if (ret) 200 + kfree(fun->mtd.name); 207 201 return ret; 208 202 } 209 203
+11 -66
drivers/mtd/nand/fsmc_nand.c
··· 146 146 { 147 147 .name = "Root File System", 148 148 .offset = 0x460000, 149 - .size = 0, 149 + .size = MTDPART_SIZ_FULL, 150 150 }, 151 151 }; 152 152 ··· 173 173 { 174 174 .name = "Root File System", 175 175 .offset = 0x800000, 176 - .size = 0, 176 + .size = MTDPART_SIZ_FULL, 177 177 }, 178 178 }; 179 179 180 - #ifdef CONFIG_MTD_CMDLINE_PARTS 181 - const char *part_probes[] = { "cmdlinepart", NULL }; 182 - #endif 183 180 184 181 /** 185 182 * struct fsmc_nand_data - structure for FSMC NAND device state ··· 184 187 * @pid: Part ID on the AMBA PrimeCell format 185 188 * @mtd: MTD info for a NAND flash. 186 189 * @nand: Chip related info for a NAND flash. 187 - * @partitions: Partition info for a NAND Flash. 188 - * @nr_partitions: Total number of partition of a NAND flash. 189 190 * 190 191 * @ecc_place: ECC placing locations in oobfree type format. 191 192 * @bank: Bank number for probed device. ··· 198 203 u32 pid; 199 204 struct mtd_info mtd; 200 205 struct nand_chip nand; 201 - struct mtd_partition *partitions; 202 - unsigned int nr_partitions; 203 206 204 207 struct fsmc_eccplace *ecc_place; 205 208 unsigned int bank; ··· 709 716 * platform data, 710 717 * default partition information present in driver. 711 718 */ 712 - #ifdef CONFIG_MTD_CMDLINE_PARTS 713 719 /* 714 - * Check if partition info passed via command line 720 + * Check for partition info passed 715 721 */ 716 722 host->mtd.name = "nand"; 717 - host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes, 718 - &host->partitions, 0); 719 - if (host->nr_partitions <= 0) { 720 - #endif 721 - /* 722 - * Check if partition info passed via command line 723 - */ 724 - if (pdata->partitions) { 725 - host->partitions = pdata->partitions; 726 - host->nr_partitions = pdata->nr_partitions; 727 - } else { 728 - struct mtd_partition *partition; 729 - int i; 730 - 731 - /* Select the default partitions info */ 732 - switch (host->mtd.size) { 733 - case 0x01000000: 734 - case 0x02000000: 735 - case 0x04000000: 736 - host->partitions = partition_info_16KB_blk; 737 - host->nr_partitions = 738 - sizeof(partition_info_16KB_blk) / 739 - sizeof(struct mtd_partition); 740 - break; 741 - case 0x08000000: 742 - case 0x10000000: 743 - case 0x20000000: 744 - case 0x40000000: 745 - host->partitions = partition_info_128KB_blk; 746 - host->nr_partitions = 747 - sizeof(partition_info_128KB_blk) / 748 - sizeof(struct mtd_partition); 749 - break; 750 - default: 751 - ret = -ENXIO; 752 - pr_err("Unsupported NAND size\n"); 753 - goto err_probe; 754 - } 755 - 756 - partition = host->partitions; 757 - for (i = 0; i < host->nr_partitions; i++, partition++) { 758 - if (partition->size == 0) { 759 - partition->size = host->mtd.size - 760 - partition->offset; 761 - break; 762 - } 763 - } 764 - } 765 - #ifdef CONFIG_MTD_CMDLINE_PARTS 766 - } 767 - #endif 768 - 769 - ret = mtd_device_register(&host->mtd, host->partitions, 770 - host->nr_partitions); 723 + ret = mtd_device_parse_register(&host->mtd, NULL, 0, 724 + host->mtd.size <= 0x04000000 ? 725 + partition_info_16KB_blk : 726 + partition_info_128KB_blk, 727 + host->mtd.size <= 0x04000000 ? 728 + ARRAY_SIZE(partition_info_16KB_blk) : 729 + ARRAY_SIZE(partition_info_128KB_blk)); 771 730 if (ret) 772 731 goto err_probe; 773 732 ··· 767 822 platform_set_drvdata(pdev, NULL); 768 823 769 824 if (host) { 770 - mtd_device_unregister(&host->mtd); 825 + nand_release(&host->mtd); 771 826 clk_disable(host->clk); 772 827 clk_put(host->clk); 773 828
+3
drivers/mtd/nand/gpmi-nand/Makefile
··· 1 + obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o 2 + gpmi_nand-objs += gpmi-nand.o 3 + gpmi_nand-objs += gpmi-lib.o
+84
drivers/mtd/nand/gpmi-nand/bch-regs.h
··· 1 + /* 2 + * Freescale GPMI NAND Flash Driver 3 + * 4 + * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 + * Copyright 2008 Embedded Alley Solutions, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along 18 + * with this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 + */ 21 + #ifndef __GPMI_NAND_BCH_REGS_H 22 + #define __GPMI_NAND_BCH_REGS_H 23 + 24 + #define HW_BCH_CTRL 0x00000000 25 + #define HW_BCH_CTRL_SET 0x00000004 26 + #define HW_BCH_CTRL_CLR 0x00000008 27 + #define HW_BCH_CTRL_TOG 0x0000000c 28 + 29 + #define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8) 30 + #define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0) 31 + 32 + #define HW_BCH_STATUS0 0x00000010 33 + #define HW_BCH_MODE 0x00000020 34 + #define HW_BCH_ENCODEPTR 0x00000030 35 + #define HW_BCH_DATAPTR 0x00000040 36 + #define HW_BCH_METAPTR 0x00000050 37 + #define HW_BCH_LAYOUTSELECT 0x00000070 38 + 39 + #define HW_BCH_FLASH0LAYOUT0 0x00000080 40 + 41 + #define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24 42 + #define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS) 43 + #define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \ 44 + (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS) 45 + 46 + #define BP_BCH_FLASH0LAYOUT0_META_SIZE 16 47 + #define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE) 48 + #define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \ 49 + (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\ 50 + & BM_BCH_FLASH0LAYOUT0_META_SIZE) 51 + 52 + #define BP_BCH_FLASH0LAYOUT0_ECC0 12 53 + #define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0) 54 + #define BF_BCH_FLASH0LAYOUT0_ECC0(v) \ 55 + (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0) 56 + 57 + #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 58 + #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 59 + (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) 60 + #define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \ 61 + (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\ 62 + & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) 63 + 64 + #define HW_BCH_FLASH0LAYOUT1 0x00000090 65 + 66 + #define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16 67 + #define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \ 68 + (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) 69 + #define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \ 70 + (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \ 71 + & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE) 72 + 73 + #define BP_BCH_FLASH0LAYOUT1_ECCN 12 74 + #define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN) 75 + #define BF_BCH_FLASH0LAYOUT1_ECCN(v) \ 76 + (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN) 77 + 78 + #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 79 + #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 80 + (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) 81 + #define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \ 82 + (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 83 + & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) 84 + #endif
+1057
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
··· 1 + /* 2 + * Freescale GPMI NAND Flash Driver 3 + * 4 + * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. 5 + * Copyright (C) 2008 Embedded Alley Solutions, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along 18 + * with this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 + */ 21 + #include <linux/mtd/gpmi-nand.h> 22 + #include <linux/delay.h> 23 + #include <linux/clk.h> 24 + #include <mach/mxs.h> 25 + 26 + #include "gpmi-nand.h" 27 + #include "gpmi-regs.h" 28 + #include "bch-regs.h" 29 + 30 + struct timing_threshod timing_default_threshold = { 31 + .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> 32 + BP_GPMI_TIMING0_DATA_SETUP), 33 + .internal_data_setup_in_ns = 0, 34 + .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >> 35 + BP_GPMI_CTRL1_RDN_DELAY), 36 + .max_dll_clock_period_in_ns = 32, 37 + .max_dll_delay_in_ns = 16, 38 + }; 39 + 40 + /* 41 + * Clear the bit and poll it cleared. This is usually called with 42 + * a reset address and mask being either SFTRST(bit 31) or CLKGATE 43 + * (bit 30). 44 + */ 45 + static int clear_poll_bit(void __iomem *addr, u32 mask) 46 + { 47 + int timeout = 0x400; 48 + 49 + /* clear the bit */ 50 + __mxs_clrl(mask, addr); 51 + 52 + /* 53 + * SFTRST needs 3 GPMI clocks to settle, the reference manual 54 + * recommends to wait 1us. 55 + */ 56 + udelay(1); 57 + 58 + /* poll the bit becoming clear */ 59 + while ((readl(addr) & mask) && --timeout) 60 + /* nothing */; 61 + 62 + return !timeout; 63 + } 64 + 65 + #define MODULE_CLKGATE (1 << 30) 66 + #define MODULE_SFTRST (1 << 31) 67 + /* 68 + * The current mxs_reset_block() will do two things: 69 + * [1] enable the module. 70 + * [2] reset the module. 71 + * 72 + * In most of the cases, it's ok. But there is a hardware bug in the BCH block. 73 + * If you try to soft reset the BCH block, it becomes unusable until 74 + * the next hard reset. This case occurs in the NAND boot mode. When the board 75 + * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. 76 + * So If the driver tries to reset the BCH again, the BCH will not work anymore. 77 + * You will see a DMA timeout in this case. 78 + * 79 + * To avoid this bug, just add a new parameter `just_enable` for 80 + * the mxs_reset_block(), and rewrite it here. 81 + */ 82 + int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) 83 + { 84 + int ret; 85 + int timeout = 0x400; 86 + 87 + /* clear and poll SFTRST */ 88 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 89 + if (unlikely(ret)) 90 + goto error; 91 + 92 + /* clear CLKGATE */ 93 + __mxs_clrl(MODULE_CLKGATE, reset_addr); 94 + 95 + if (!just_enable) { 96 + /* set SFTRST to reset the block */ 97 + __mxs_setl(MODULE_SFTRST, reset_addr); 98 + udelay(1); 99 + 100 + /* poll CLKGATE becoming set */ 101 + while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) 102 + /* nothing */; 103 + if (unlikely(!timeout)) 104 + goto error; 105 + } 106 + 107 + /* clear and poll SFTRST */ 108 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 109 + if (unlikely(ret)) 110 + goto error; 111 + 112 + /* clear and poll CLKGATE */ 113 + ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); 114 + if (unlikely(ret)) 115 + goto error; 116 + 117 + return 0; 118 + 119 + error: 120 + pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); 121 + return -ETIMEDOUT; 122 + } 123 + 124 + int gpmi_init(struct gpmi_nand_data *this) 125 + { 126 + struct resources *r = &this->resources; 127 + int ret; 128 + 129 + ret = clk_enable(r->clock); 130 + if (ret) 131 + goto err_out; 132 + ret = gpmi_reset_block(r->gpmi_regs, false); 133 + if (ret) 134 + goto err_out; 135 + 136 + /* Choose NAND mode. */ 137 + writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); 138 + 139 + /* Set the IRQ polarity. */ 140 + writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, 141 + r->gpmi_regs + HW_GPMI_CTRL1_SET); 142 + 143 + /* Disable Write-Protection. */ 144 + writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); 145 + 146 + /* Select BCH ECC. */ 147 + writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); 148 + 149 + clk_disable(r->clock); 150 + return 0; 151 + err_out: 152 + return ret; 153 + } 154 + 155 + /* This function is very useful. It is called only when the bug occur. */ 156 + void gpmi_dump_info(struct gpmi_nand_data *this) 157 + { 158 + struct resources *r = &this->resources; 159 + struct bch_geometry *geo = &this->bch_geometry; 160 + u32 reg; 161 + int i; 162 + 163 + pr_err("Show GPMI registers :\n"); 164 + for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { 165 + reg = readl(r->gpmi_regs + i * 0x10); 166 + pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 167 + } 168 + 169 + /* start to print out the BCH info */ 170 + pr_err("BCH Geometry :\n"); 171 + pr_err("GF length : %u\n", geo->gf_len); 172 + pr_err("ECC Strength : %u\n", geo->ecc_strength); 173 + pr_err("Page Size in Bytes : %u\n", geo->page_size); 174 + pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size); 175 + pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size); 176 + pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count); 177 + pr_err("Payload Size in Bytes : %u\n", geo->payload_size); 178 + pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size); 179 + pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset); 180 + pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset); 181 + pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset); 182 + } 183 + 184 + /* Configures the geometry for BCH. */ 185 + int bch_set_geometry(struct gpmi_nand_data *this) 186 + { 187 + struct resources *r = &this->resources; 188 + struct bch_geometry *bch_geo = &this->bch_geometry; 189 + unsigned int block_count; 190 + unsigned int block_size; 191 + unsigned int metadata_size; 192 + unsigned int ecc_strength; 193 + unsigned int page_size; 194 + int ret; 195 + 196 + if (common_nfc_set_geometry(this)) 197 + return !0; 198 + 199 + block_count = bch_geo->ecc_chunk_count - 1; 200 + block_size = bch_geo->ecc_chunk_size; 201 + metadata_size = bch_geo->metadata_size; 202 + ecc_strength = bch_geo->ecc_strength >> 1; 203 + page_size = bch_geo->page_size; 204 + 205 + ret = clk_enable(r->clock); 206 + if (ret) 207 + goto err_out; 208 + 209 + ret = gpmi_reset_block(r->bch_regs, true); 210 + if (ret) 211 + goto err_out; 212 + 213 + /* Configure layout 0. */ 214 + writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) 215 + | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) 216 + | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength) 217 + | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size), 218 + r->bch_regs + HW_BCH_FLASH0LAYOUT0); 219 + 220 + writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) 221 + | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength) 222 + | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size), 223 + r->bch_regs + HW_BCH_FLASH0LAYOUT1); 224 + 225 + /* Set *all* chip selects to use layout 0. */ 226 + writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); 227 + 228 + /* Enable interrupts. */ 229 + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 230 + r->bch_regs + HW_BCH_CTRL_SET); 231 + 232 + clk_disable(r->clock); 233 + return 0; 234 + err_out: 235 + return ret; 236 + } 237 + 238 + /* Converts time in nanoseconds to cycles. */ 239 + static unsigned int ns_to_cycles(unsigned int time, 240 + unsigned int period, unsigned int min) 241 + { 242 + unsigned int k; 243 + 244 + k = (time + period - 1) / period; 245 + return max(k, min); 246 + } 247 + 248 + /* Apply timing to current hardware conditions. */ 249 + static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, 250 + struct gpmi_nfc_hardware_timing *hw) 251 + { 252 + struct gpmi_nand_platform_data *pdata = this->pdata; 253 + struct timing_threshod *nfc = &timing_default_threshold; 254 + struct nand_chip *nand = &this->nand; 255 + struct nand_timing target = this->timing; 256 + bool improved_timing_is_available; 257 + unsigned long clock_frequency_in_hz; 258 + unsigned int clock_period_in_ns; 259 + bool dll_use_half_periods; 260 + unsigned int dll_delay_shift; 261 + unsigned int max_sample_delay_in_ns; 262 + unsigned int address_setup_in_cycles; 263 + unsigned int data_setup_in_ns; 264 + unsigned int data_setup_in_cycles; 265 + unsigned int data_hold_in_cycles; 266 + int ideal_sample_delay_in_ns; 267 + unsigned int sample_delay_factor; 268 + int tEYE; 269 + unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns; 270 + unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns; 271 + 272 + /* 273 + * If there are multiple chips, we need to relax the timings to allow 274 + * for signal distortion due to higher capacitance. 275 + */ 276 + if (nand->numchips > 2) { 277 + target.data_setup_in_ns += 10; 278 + target.data_hold_in_ns += 10; 279 + target.address_setup_in_ns += 10; 280 + } else if (nand->numchips > 1) { 281 + target.data_setup_in_ns += 5; 282 + target.data_hold_in_ns += 5; 283 + target.address_setup_in_ns += 5; 284 + } 285 + 286 + /* Check if improved timing information is available. */ 287 + improved_timing_is_available = 288 + (target.tREA_in_ns >= 0) && 289 + (target.tRLOH_in_ns >= 0) && 290 + (target.tRHOH_in_ns >= 0) ; 291 + 292 + /* Inspect the clock. */ 293 + clock_frequency_in_hz = nfc->clock_frequency_in_hz; 294 + clock_period_in_ns = 1000000000 / clock_frequency_in_hz; 295 + 296 + /* 297 + * The NFC quantizes setup and hold parameters in terms of clock cycles. 298 + * Here, we quantize the setup and hold timing parameters to the 299 + * next-highest clock period to make sure we apply at least the 300 + * specified times. 301 + * 302 + * For data setup and data hold, the hardware interprets a value of zero 303 + * as the largest possible delay. This is not what's intended by a zero 304 + * in the input parameter, so we impose a minimum of one cycle. 305 + */ 306 + data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns, 307 + clock_period_in_ns, 1); 308 + data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns, 309 + clock_period_in_ns, 1); 310 + address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns, 311 + clock_period_in_ns, 0); 312 + 313 + /* 314 + * The clock's period affects the sample delay in a number of ways: 315 + * 316 + * (1) The NFC HAL tells us the maximum clock period the sample delay 317 + * DLL can tolerate. If the clock period is greater than half that 318 + * maximum, we must configure the DLL to be driven by half periods. 319 + * 320 + * (2) We need to convert from an ideal sample delay, in ns, to a 321 + * "sample delay factor," which the NFC uses. This factor depends on 322 + * whether we're driving the DLL with full or half periods. 323 + * Paraphrasing the reference manual: 324 + * 325 + * AD = SDF x 0.125 x RP 326 + * 327 + * where: 328 + * 329 + * AD is the applied delay, in ns. 330 + * SDF is the sample delay factor, which is dimensionless. 331 + * RP is the reference period, in ns, which is a full clock period 332 + * if the DLL is being driven by full periods, or half that if 333 + * the DLL is being driven by half periods. 334 + * 335 + * Let's re-arrange this in a way that's more useful to us: 336 + * 337 + * 8 338 + * SDF = AD x ---- 339 + * RP 340 + * 341 + * The reference period is either the clock period or half that, so this 342 + * is: 343 + * 344 + * 8 AD x DDF 345 + * SDF = AD x ----- = -------- 346 + * f x P P 347 + * 348 + * where: 349 + * 350 + * f is 1 or 1/2, depending on how we're driving the DLL. 351 + * P is the clock period. 352 + * DDF is the DLL Delay Factor, a dimensionless value that 353 + * incorporates all the constants in the conversion. 354 + * 355 + * DDF will be either 8 or 16, both of which are powers of two. We can 356 + * reduce the cost of this conversion by using bit shifts instead of 357 + * multiplication or division. Thus: 358 + * 359 + * AD << DDS 360 + * SDF = --------- 361 + * P 362 + * 363 + * or 364 + * 365 + * AD = (SDF >> DDS) x P 366 + * 367 + * where: 368 + * 369 + * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF. 370 + */ 371 + if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) { 372 + dll_use_half_periods = true; 373 + dll_delay_shift = 3 + 1; 374 + } else { 375 + dll_use_half_periods = false; 376 + dll_delay_shift = 3; 377 + } 378 + 379 + /* 380 + * Compute the maximum sample delay the NFC allows, under current 381 + * conditions. If the clock is running too slowly, no sample delay is 382 + * possible. 383 + */ 384 + if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns) 385 + max_sample_delay_in_ns = 0; 386 + else { 387 + /* 388 + * Compute the delay implied by the largest sample delay factor 389 + * the NFC allows. 390 + */ 391 + max_sample_delay_in_ns = 392 + (nfc->max_sample_delay_factor * clock_period_in_ns) >> 393 + dll_delay_shift; 394 + 395 + /* 396 + * Check if the implied sample delay larger than the NFC 397 + * actually allows. 398 + */ 399 + if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns) 400 + max_sample_delay_in_ns = nfc->max_dll_delay_in_ns; 401 + } 402 + 403 + /* 404 + * Check if improved timing information is available. If not, we have to 405 + * use a less-sophisticated algorithm. 406 + */ 407 + if (!improved_timing_is_available) { 408 + /* 409 + * Fold the read setup time required by the NFC into the ideal 410 + * sample delay. 411 + */ 412 + ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns + 413 + nfc->internal_data_setup_in_ns; 414 + 415 + /* 416 + * The ideal sample delay may be greater than the maximum 417 + * allowed by the NFC. If so, we can trade off sample delay time 418 + * for more data setup time. 419 + * 420 + * In each iteration of the following loop, we add a cycle to 421 + * the data setup time and subtract a corresponding amount from 422 + * the sample delay until we've satisified the constraints or 423 + * can't do any better. 424 + */ 425 + while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) && 426 + (data_setup_in_cycles < nfc->max_data_setup_cycles)) { 427 + 428 + data_setup_in_cycles++; 429 + ideal_sample_delay_in_ns -= clock_period_in_ns; 430 + 431 + if (ideal_sample_delay_in_ns < 0) 432 + ideal_sample_delay_in_ns = 0; 433 + 434 + } 435 + 436 + /* 437 + * Compute the sample delay factor that corresponds most closely 438 + * to the ideal sample delay. If the result is too large for the 439 + * NFC, use the maximum value. 440 + * 441 + * Notice that we use the ns_to_cycles function to compute the 442 + * sample delay factor. We do this because the form of the 443 + * computation is the same as that for calculating cycles. 444 + */ 445 + sample_delay_factor = 446 + ns_to_cycles( 447 + ideal_sample_delay_in_ns << dll_delay_shift, 448 + clock_period_in_ns, 0); 449 + 450 + if (sample_delay_factor > nfc->max_sample_delay_factor) 451 + sample_delay_factor = nfc->max_sample_delay_factor; 452 + 453 + /* Skip to the part where we return our results. */ 454 + goto return_results; 455 + } 456 + 457 + /* 458 + * If control arrives here, we have more detailed timing information, 459 + * so we can use a better algorithm. 460 + */ 461 + 462 + /* 463 + * Fold the read setup time required by the NFC into the maximum 464 + * propagation delay. 465 + */ 466 + max_prop_delay_in_ns += nfc->internal_data_setup_in_ns; 467 + 468 + /* 469 + * Earlier, we computed the number of clock cycles required to satisfy 470 + * the data setup time. Now, we need to know the actual nanoseconds. 471 + */ 472 + data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles; 473 + 474 + /* 475 + * Compute tEYE, the width of the data eye when reading from the NAND 476 + * Flash. The eye width is fundamentally determined by the data setup 477 + * time, perturbed by propagation delays and some characteristics of the 478 + * NAND Flash device. 479 + * 480 + * start of the eye = max_prop_delay + tREA 481 + * end of the eye = min_prop_delay + tRHOH + data_setup 482 + */ 483 + tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns + 484 + (int)data_setup_in_ns; 485 + 486 + tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns; 487 + 488 + /* 489 + * The eye must be open. If it's not, we can try to open it by 490 + * increasing its main forcer, the data setup time. 491 + * 492 + * In each iteration of the following loop, we increase the data setup 493 + * time by a single clock cycle. We do this until either the eye is 494 + * open or we run into NFC limits. 495 + */ 496 + while ((tEYE <= 0) && 497 + (data_setup_in_cycles < nfc->max_data_setup_cycles)) { 498 + /* Give a cycle to data setup. */ 499 + data_setup_in_cycles++; 500 + /* Synchronize the data setup time with the cycles. */ 501 + data_setup_in_ns += clock_period_in_ns; 502 + /* Adjust tEYE accordingly. */ 503 + tEYE += clock_period_in_ns; 504 + } 505 + 506 + /* 507 + * When control arrives here, the eye is open. The ideal time to sample 508 + * the data is in the center of the eye: 509 + * 510 + * end of the eye + start of the eye 511 + * --------------------------------- - data_setup 512 + * 2 513 + * 514 + * After some algebra, this simplifies to the code immediately below. 515 + */ 516 + ideal_sample_delay_in_ns = 517 + ((int)max_prop_delay_in_ns + 518 + (int)target.tREA_in_ns + 519 + (int)min_prop_delay_in_ns + 520 + (int)target.tRHOH_in_ns - 521 + (int)data_setup_in_ns) >> 1; 522 + 523 + /* 524 + * The following figure illustrates some aspects of a NAND Flash read: 525 + * 526 + * 527 + * __ _____________________________________ 528 + * RDN \_________________/ 529 + * 530 + * <---- tEYE -----> 531 + * /-----------------\ 532 + * Read Data ----------------------------< >--------- 533 + * \-----------------/ 534 + * ^ ^ ^ ^ 535 + * | | | | 536 + * |<--Data Setup -->|<--Delay Time -->| | 537 + * | | | | 538 + * | | | 539 + * | |<-- Quantized Delay Time -->| 540 + * | | | 541 + * 542 + * 543 + * We have some issues we must now address: 544 + * 545 + * (1) The *ideal* sample delay time must not be negative. If it is, we 546 + * jam it to zero. 547 + * 548 + * (2) The *ideal* sample delay time must not be greater than that 549 + * allowed by the NFC. If it is, we can increase the data setup 550 + * time, which will reduce the delay between the end of the data 551 + * setup and the center of the eye. It will also make the eye 552 + * larger, which might help with the next issue... 553 + * 554 + * (3) The *quantized* sample delay time must not fall either before the 555 + * eye opens or after it closes (the latter is the problem 556 + * illustrated in the above figure). 557 + */ 558 + 559 + /* Jam a negative ideal sample delay to zero. */ 560 + if (ideal_sample_delay_in_ns < 0) 561 + ideal_sample_delay_in_ns = 0; 562 + 563 + /* 564 + * Extend the data setup as needed to reduce the ideal sample delay 565 + * below the maximum permitted by the NFC. 566 + */ 567 + while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) && 568 + (data_setup_in_cycles < nfc->max_data_setup_cycles)) { 569 + 570 + /* Give a cycle to data setup. */ 571 + data_setup_in_cycles++; 572 + /* Synchronize the data setup time with the cycles. */ 573 + data_setup_in_ns += clock_period_in_ns; 574 + /* Adjust tEYE accordingly. */ 575 + tEYE += clock_period_in_ns; 576 + 577 + /* 578 + * Decrease the ideal sample delay by one half cycle, to keep it 579 + * in the middle of the eye. 580 + */ 581 + ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1); 582 + 583 + /* Jam a negative ideal sample delay to zero. */ 584 + if (ideal_sample_delay_in_ns < 0) 585 + ideal_sample_delay_in_ns = 0; 586 + } 587 + 588 + /* 589 + * Compute the sample delay factor that corresponds to the ideal sample 590 + * delay. If the result is too large, then use the maximum allowed 591 + * value. 592 + * 593 + * Notice that we use the ns_to_cycles function to compute the sample 594 + * delay factor. We do this because the form of the computation is the 595 + * same as that for calculating cycles. 596 + */ 597 + sample_delay_factor = 598 + ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift, 599 + clock_period_in_ns, 0); 600 + 601 + if (sample_delay_factor > nfc->max_sample_delay_factor) 602 + sample_delay_factor = nfc->max_sample_delay_factor; 603 + 604 + /* 605 + * These macros conveniently encapsulate a computation we'll use to 606 + * continuously evaluate whether or not the data sample delay is inside 607 + * the eye. 608 + */ 609 + #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns) 610 + 611 + #define QUANTIZED_DELAY \ 612 + ((int) ((sample_delay_factor * clock_period_in_ns) >> \ 613 + dll_delay_shift)) 614 + 615 + #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY)) 616 + 617 + #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1)) 618 + 619 + /* 620 + * While the quantized sample time falls outside the eye, reduce the 621 + * sample delay or extend the data setup to move the sampling point back 622 + * toward the eye. Do not allow the number of data setup cycles to 623 + * exceed the maximum allowed by the NFC. 624 + */ 625 + while (SAMPLE_IS_NOT_WITHIN_THE_EYE && 626 + (data_setup_in_cycles < nfc->max_data_setup_cycles)) { 627 + /* 628 + * If control arrives here, the quantized sample delay falls 629 + * outside the eye. Check if it's before the eye opens, or after 630 + * the eye closes. 631 + */ 632 + if (QUANTIZED_DELAY > IDEAL_DELAY) { 633 + /* 634 + * If control arrives here, the quantized sample delay 635 + * falls after the eye closes. Decrease the quantized 636 + * delay time and then go back to re-evaluate. 637 + */ 638 + if (sample_delay_factor != 0) 639 + sample_delay_factor--; 640 + continue; 641 + } 642 + 643 + /* 644 + * If control arrives here, the quantized sample delay falls 645 + * before the eye opens. Shift the sample point by increasing 646 + * data setup time. This will also make the eye larger. 647 + */ 648 + 649 + /* Give a cycle to data setup. */ 650 + data_setup_in_cycles++; 651 + /* Synchronize the data setup time with the cycles. */ 652 + data_setup_in_ns += clock_period_in_ns; 653 + /* Adjust tEYE accordingly. */ 654 + tEYE += clock_period_in_ns; 655 + 656 + /* 657 + * Decrease the ideal sample delay by one half cycle, to keep it 658 + * in the middle of the eye. 659 + */ 660 + ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1); 661 + 662 + /* ...and one less period for the delay time. */ 663 + ideal_sample_delay_in_ns -= clock_period_in_ns; 664 + 665 + /* Jam a negative ideal sample delay to zero. */ 666 + if (ideal_sample_delay_in_ns < 0) 667 + ideal_sample_delay_in_ns = 0; 668 + 669 + /* 670 + * We have a new ideal sample delay, so re-compute the quantized 671 + * delay. 672 + */ 673 + sample_delay_factor = 674 + ns_to_cycles( 675 + ideal_sample_delay_in_ns << dll_delay_shift, 676 + clock_period_in_ns, 0); 677 + 678 + if (sample_delay_factor > nfc->max_sample_delay_factor) 679 + sample_delay_factor = nfc->max_sample_delay_factor; 680 + } 681 + 682 + /* Control arrives here when we're ready to return our results. */ 683 + return_results: 684 + hw->data_setup_in_cycles = data_setup_in_cycles; 685 + hw->data_hold_in_cycles = data_hold_in_cycles; 686 + hw->address_setup_in_cycles = address_setup_in_cycles; 687 + hw->use_half_periods = dll_use_half_periods; 688 + hw->sample_delay_factor = sample_delay_factor; 689 + 690 + /* Return success. */ 691 + return 0; 692 + } 693 + 694 + /* Begin the I/O */ 695 + void gpmi_begin(struct gpmi_nand_data *this) 696 + { 697 + struct resources *r = &this->resources; 698 + struct timing_threshod *nfc = &timing_default_threshold; 699 + unsigned char *gpmi_regs = r->gpmi_regs; 700 + unsigned int clock_period_in_ns; 701 + uint32_t reg; 702 + unsigned int dll_wait_time_in_us; 703 + struct gpmi_nfc_hardware_timing hw; 704 + int ret; 705 + 706 + /* Enable the clock. */ 707 + ret = clk_enable(r->clock); 708 + if (ret) { 709 + pr_err("We failed in enable the clk\n"); 710 + goto err_out; 711 + } 712 + 713 + /* set ready/busy timeout */ 714 + writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT, 715 + gpmi_regs + HW_GPMI_TIMING1); 716 + 717 + /* Get the timing information we need. */ 718 + nfc->clock_frequency_in_hz = clk_get_rate(r->clock); 719 + clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz; 720 + 721 + gpmi_nfc_compute_hardware_timing(this, &hw); 722 + 723 + /* Set up all the simple timing parameters. */ 724 + reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) | 725 + BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) | 726 + BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ; 727 + 728 + writel(reg, gpmi_regs + HW_GPMI_TIMING0); 729 + 730 + /* 731 + * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. 732 + */ 733 + writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR); 734 + 735 + /* Clear out the DLL control fields. */ 736 + writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR); 737 + writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR); 738 + 739 + /* If no sample delay is called for, return immediately. */ 740 + if (!hw.sample_delay_factor) 741 + return; 742 + 743 + /* Configure the HALF_PERIOD flag. */ 744 + if (hw.use_half_periods) 745 + writel(BM_GPMI_CTRL1_HALF_PERIOD, 746 + gpmi_regs + HW_GPMI_CTRL1_SET); 747 + 748 + /* Set the delay factor. */ 749 + writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor), 750 + gpmi_regs + HW_GPMI_CTRL1_SET); 751 + 752 + /* Enable the DLL. */ 753 + writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET); 754 + 755 + /* 756 + * After we enable the GPMI DLL, we have to wait 64 clock cycles before 757 + * we can use the GPMI. 758 + * 759 + * Calculate the amount of time we need to wait, in microseconds. 760 + */ 761 + dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000; 762 + 763 + if (!dll_wait_time_in_us) 764 + dll_wait_time_in_us = 1; 765 + 766 + /* Wait for the DLL to settle. */ 767 + udelay(dll_wait_time_in_us); 768 + 769 + err_out: 770 + return; 771 + } 772 + 773 + void gpmi_end(struct gpmi_nand_data *this) 774 + { 775 + struct resources *r = &this->resources; 776 + clk_disable(r->clock); 777 + } 778 + 779 + /* Clears a BCH interrupt. */ 780 + void gpmi_clear_bch(struct gpmi_nand_data *this) 781 + { 782 + struct resources *r = &this->resources; 783 + writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); 784 + } 785 + 786 + /* Returns the Ready/Busy status of the given chip. */ 787 + int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) 788 + { 789 + struct resources *r = &this->resources; 790 + uint32_t mask = 0; 791 + uint32_t reg = 0; 792 + 793 + if (GPMI_IS_MX23(this)) { 794 + mask = MX23_BM_GPMI_DEBUG_READY0 << chip; 795 + reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); 796 + } else if (GPMI_IS_MX28(this)) { 797 + mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); 798 + reg = readl(r->gpmi_regs + HW_GPMI_STAT); 799 + } else 800 + pr_err("unknow arch.\n"); 801 + return reg & mask; 802 + } 803 + 804 + static inline void set_dma_type(struct gpmi_nand_data *this, 805 + enum dma_ops_type type) 806 + { 807 + this->last_dma_type = this->dma_type; 808 + this->dma_type = type; 809 + } 810 + 811 + int gpmi_send_command(struct gpmi_nand_data *this) 812 + { 813 + struct dma_chan *channel = get_dma_chan(this); 814 + struct dma_async_tx_descriptor *desc; 815 + struct scatterlist *sgl; 816 + int chip = this->current_chip; 817 + u32 pio[3]; 818 + 819 + /* [1] send out the PIO words */ 820 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) 821 + | BM_GPMI_CTRL0_WORD_LENGTH 822 + | BF_GPMI_CTRL0_CS(chip, this) 823 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 824 + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) 825 + | BM_GPMI_CTRL0_ADDRESS_INCREMENT 826 + | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); 827 + pio[1] = pio[2] = 0; 828 + desc = channel->device->device_prep_slave_sg(channel, 829 + (struct scatterlist *)pio, 830 + ARRAY_SIZE(pio), DMA_NONE, 0); 831 + if (!desc) { 832 + pr_err("step 1 error\n"); 833 + return -1; 834 + } 835 + 836 + /* [2] send out the COMMAND + ADDRESS string stored in @buffer */ 837 + sgl = &this->cmd_sgl; 838 + 839 + sg_init_one(sgl, this->cmd_buffer, this->command_length); 840 + dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); 841 + desc = channel->device->device_prep_slave_sg(channel, 842 + sgl, 1, DMA_TO_DEVICE, 1); 843 + if (!desc) { 844 + pr_err("step 2 error\n"); 845 + return -1; 846 + } 847 + 848 + /* [3] submit the DMA */ 849 + set_dma_type(this, DMA_FOR_COMMAND); 850 + return start_dma_without_bch_irq(this, desc); 851 + } 852 + 853 + int gpmi_send_data(struct gpmi_nand_data *this) 854 + { 855 + struct dma_async_tx_descriptor *desc; 856 + struct dma_chan *channel = get_dma_chan(this); 857 + int chip = this->current_chip; 858 + uint32_t command_mode; 859 + uint32_t address; 860 + u32 pio[2]; 861 + 862 + /* [1] PIO */ 863 + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; 864 + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; 865 + 866 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) 867 + | BM_GPMI_CTRL0_WORD_LENGTH 868 + | BF_GPMI_CTRL0_CS(chip, this) 869 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 870 + | BF_GPMI_CTRL0_ADDRESS(address) 871 + | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); 872 + pio[1] = 0; 873 + desc = channel->device->device_prep_slave_sg(channel, 874 + (struct scatterlist *)pio, 875 + ARRAY_SIZE(pio), DMA_NONE, 0); 876 + if (!desc) { 877 + pr_err("step 1 error\n"); 878 + return -1; 879 + } 880 + 881 + /* [2] send DMA request */ 882 + prepare_data_dma(this, DMA_TO_DEVICE); 883 + desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 884 + 1, DMA_TO_DEVICE, 1); 885 + if (!desc) { 886 + pr_err("step 2 error\n"); 887 + return -1; 888 + } 889 + /* [3] submit the DMA */ 890 + set_dma_type(this, DMA_FOR_WRITE_DATA); 891 + return start_dma_without_bch_irq(this, desc); 892 + } 893 + 894 + int gpmi_read_data(struct gpmi_nand_data *this) 895 + { 896 + struct dma_async_tx_descriptor *desc; 897 + struct dma_chan *channel = get_dma_chan(this); 898 + int chip = this->current_chip; 899 + u32 pio[2]; 900 + 901 + /* [1] : send PIO */ 902 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) 903 + | BM_GPMI_CTRL0_WORD_LENGTH 904 + | BF_GPMI_CTRL0_CS(chip, this) 905 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 906 + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 907 + | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); 908 + pio[1] = 0; 909 + desc = channel->device->device_prep_slave_sg(channel, 910 + (struct scatterlist *)pio, 911 + ARRAY_SIZE(pio), DMA_NONE, 0); 912 + if (!desc) { 913 + pr_err("step 1 error\n"); 914 + return -1; 915 + } 916 + 917 + /* [2] : send DMA request */ 918 + prepare_data_dma(this, DMA_FROM_DEVICE); 919 + desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 920 + 1, DMA_FROM_DEVICE, 1); 921 + if (!desc) { 922 + pr_err("step 2 error\n"); 923 + return -1; 924 + } 925 + 926 + /* [3] : submit the DMA */ 927 + set_dma_type(this, DMA_FOR_READ_DATA); 928 + return start_dma_without_bch_irq(this, desc); 929 + } 930 + 931 + int gpmi_send_page(struct gpmi_nand_data *this, 932 + dma_addr_t payload, dma_addr_t auxiliary) 933 + { 934 + struct bch_geometry *geo = &this->bch_geometry; 935 + uint32_t command_mode; 936 + uint32_t address; 937 + uint32_t ecc_command; 938 + uint32_t buffer_mask; 939 + struct dma_async_tx_descriptor *desc; 940 + struct dma_chan *channel = get_dma_chan(this); 941 + int chip = this->current_chip; 942 + u32 pio[6]; 943 + 944 + /* A DMA descriptor that does an ECC page read. */ 945 + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; 946 + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; 947 + ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE; 948 + buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | 949 + BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; 950 + 951 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) 952 + | BM_GPMI_CTRL0_WORD_LENGTH 953 + | BF_GPMI_CTRL0_CS(chip, this) 954 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 955 + | BF_GPMI_CTRL0_ADDRESS(address) 956 + | BF_GPMI_CTRL0_XFER_COUNT(0); 957 + pio[1] = 0; 958 + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 959 + | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) 960 + | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); 961 + pio[3] = geo->page_size; 962 + pio[4] = payload; 963 + pio[5] = auxiliary; 964 + 965 + desc = channel->device->device_prep_slave_sg(channel, 966 + (struct scatterlist *)pio, 967 + ARRAY_SIZE(pio), DMA_NONE, 0); 968 + if (!desc) { 969 + pr_err("step 2 error\n"); 970 + return -1; 971 + } 972 + set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE); 973 + return start_dma_with_bch_irq(this, desc); 974 + } 975 + 976 + int gpmi_read_page(struct gpmi_nand_data *this, 977 + dma_addr_t payload, dma_addr_t auxiliary) 978 + { 979 + struct bch_geometry *geo = &this->bch_geometry; 980 + uint32_t command_mode; 981 + uint32_t address; 982 + uint32_t ecc_command; 983 + uint32_t buffer_mask; 984 + struct dma_async_tx_descriptor *desc; 985 + struct dma_chan *channel = get_dma_chan(this); 986 + int chip = this->current_chip; 987 + u32 pio[6]; 988 + 989 + /* [1] Wait for the chip to report ready. */ 990 + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; 991 + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; 992 + 993 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) 994 + | BM_GPMI_CTRL0_WORD_LENGTH 995 + | BF_GPMI_CTRL0_CS(chip, this) 996 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 997 + | BF_GPMI_CTRL0_ADDRESS(address) 998 + | BF_GPMI_CTRL0_XFER_COUNT(0); 999 + pio[1] = 0; 1000 + desc = channel->device->device_prep_slave_sg(channel, 1001 + (struct scatterlist *)pio, 2, DMA_NONE, 0); 1002 + if (!desc) { 1003 + pr_err("step 1 error\n"); 1004 + return -1; 1005 + } 1006 + 1007 + /* [2] Enable the BCH block and read. */ 1008 + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; 1009 + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; 1010 + ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE; 1011 + buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 1012 + | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; 1013 + 1014 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) 1015 + | BM_GPMI_CTRL0_WORD_LENGTH 1016 + | BF_GPMI_CTRL0_CS(chip, this) 1017 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 1018 + | BF_GPMI_CTRL0_ADDRESS(address) 1019 + | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); 1020 + 1021 + pio[1] = 0; 1022 + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 1023 + | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) 1024 + | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); 1025 + pio[3] = geo->page_size; 1026 + pio[4] = payload; 1027 + pio[5] = auxiliary; 1028 + desc = channel->device->device_prep_slave_sg(channel, 1029 + (struct scatterlist *)pio, 1030 + ARRAY_SIZE(pio), DMA_NONE, 1); 1031 + if (!desc) { 1032 + pr_err("step 2 error\n"); 1033 + return -1; 1034 + } 1035 + 1036 + /* [3] Disable the BCH block */ 1037 + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; 1038 + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; 1039 + 1040 + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) 1041 + | BM_GPMI_CTRL0_WORD_LENGTH 1042 + | BF_GPMI_CTRL0_CS(chip, this) 1043 + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 1044 + | BF_GPMI_CTRL0_ADDRESS(address) 1045 + | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); 1046 + pio[1] = 0; 1047 + desc = channel->device->device_prep_slave_sg(channel, 1048 + (struct scatterlist *)pio, 2, DMA_NONE, 1); 1049 + if (!desc) { 1050 + pr_err("step 3 error\n"); 1051 + return -1; 1052 + } 1053 + 1054 + /* [4] submit the DMA */ 1055 + set_dma_type(this, DMA_FOR_READ_ECC_PAGE); 1056 + return start_dma_with_bch_irq(this, desc); 1057 + }
+1619
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
··· 1 + /* 2 + * Freescale GPMI NAND Flash Driver 3 + * 4 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 5 + * Copyright (C) 2008 Embedded Alley Solutions, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along 18 + * with this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 + */ 21 + #include <linux/clk.h> 22 + #include <linux/slab.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/mtd/gpmi-nand.h> 25 + #include <linux/mtd/partitions.h> 26 + 27 + #include "gpmi-nand.h" 28 + 29 + /* add our owner bbt descriptor */ 30 + static uint8_t scan_ff_pattern[] = { 0xff }; 31 + static struct nand_bbt_descr gpmi_bbt_descr = { 32 + .options = 0, 33 + .offs = 0, 34 + .len = 1, 35 + .pattern = scan_ff_pattern 36 + }; 37 + 38 + /* We will use all the (page + OOB). */ 39 + static struct nand_ecclayout gpmi_hw_ecclayout = { 40 + .eccbytes = 0, 41 + .eccpos = { 0, }, 42 + .oobfree = { {.offset = 0, .length = 0} } 43 + }; 44 + 45 + static irqreturn_t bch_irq(int irq, void *cookie) 46 + { 47 + struct gpmi_nand_data *this = cookie; 48 + 49 + gpmi_clear_bch(this); 50 + complete(&this->bch_done); 51 + return IRQ_HANDLED; 52 + } 53 + 54 + /* 55 + * Calculate the ECC strength by hand: 56 + * E : The ECC strength. 57 + * G : the length of Galois Field. 58 + * N : The chunk count of per page. 59 + * O : the oobsize of the NAND chip. 60 + * M : the metasize of per page. 61 + * 62 + * The formula is : 63 + * E * G * N 64 + * ------------ <= (O - M) 65 + * 8 66 + * 67 + * So, we get E by: 68 + * (O - M) * 8 69 + * E <= ------------- 70 + * G * N 71 + */ 72 + static inline int get_ecc_strength(struct gpmi_nand_data *this) 73 + { 74 + struct bch_geometry *geo = &this->bch_geometry; 75 + struct mtd_info *mtd = &this->mtd; 76 + int ecc_strength; 77 + 78 + ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) 79 + / (geo->gf_len * geo->ecc_chunk_count); 80 + 81 + /* We need the minor even number. */ 82 + return round_down(ecc_strength, 2); 83 + } 84 + 85 + int common_nfc_set_geometry(struct gpmi_nand_data *this) 86 + { 87 + struct bch_geometry *geo = &this->bch_geometry; 88 + struct mtd_info *mtd = &this->mtd; 89 + unsigned int metadata_size; 90 + unsigned int status_size; 91 + unsigned int block_mark_bit_offset; 92 + 93 + /* 94 + * The size of the metadata can be changed, though we set it to 10 95 + * bytes now. But it can't be too large, because we have to save 96 + * enough space for BCH. 97 + */ 98 + geo->metadata_size = 10; 99 + 100 + /* The default for the length of Galois Field. */ 101 + geo->gf_len = 13; 102 + 103 + /* The default for chunk size. There is no oobsize greater then 512. */ 104 + geo->ecc_chunk_size = 512; 105 + while (geo->ecc_chunk_size < mtd->oobsize) 106 + geo->ecc_chunk_size *= 2; /* keep C >= O */ 107 + 108 + geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 109 + 110 + /* We use the same ECC strength for all chunks. */ 111 + geo->ecc_strength = get_ecc_strength(this); 112 + if (!geo->ecc_strength) { 113 + pr_err("We get a wrong ECC strength.\n"); 114 + return -EINVAL; 115 + } 116 + 117 + geo->page_size = mtd->writesize + mtd->oobsize; 118 + geo->payload_size = mtd->writesize; 119 + 120 + /* 121 + * The auxiliary buffer contains the metadata and the ECC status. The 122 + * metadata is padded to the nearest 32-bit boundary. The ECC status 123 + * contains one byte for every ECC chunk, and is also padded to the 124 + * nearest 32-bit boundary. 125 + */ 126 + metadata_size = ALIGN(geo->metadata_size, 4); 127 + status_size = ALIGN(geo->ecc_chunk_count, 4); 128 + 129 + geo->auxiliary_size = metadata_size + status_size; 130 + geo->auxiliary_status_offset = metadata_size; 131 + 132 + if (!this->swap_block_mark) 133 + return 0; 134 + 135 + /* 136 + * We need to compute the byte and bit offsets of 137 + * the physical block mark within the ECC-based view of the page. 138 + * 139 + * NAND chip with 2K page shows below: 140 + * (Block Mark) 141 + * | | 142 + * | D | 143 + * |<---->| 144 + * V V 145 + * +---+----------+-+----------+-+----------+-+----------+-+ 146 + * | M | data |E| data |E| data |E| data |E| 147 + * +---+----------+-+----------+-+----------+-+----------+-+ 148 + * 149 + * The position of block mark moves forward in the ECC-based view 150 + * of page, and the delta is: 151 + * 152 + * E * G * (N - 1) 153 + * D = (---------------- + M) 154 + * 8 155 + * 156 + * With the formula to compute the ECC strength, and the condition 157 + * : C >= O (C is the ecc chunk size) 158 + * 159 + * It's easy to deduce to the following result: 160 + * 161 + * E * G (O - M) C - M C - M 162 + * ----------- <= ------- <= -------- < --------- 163 + * 8 N N (N - 1) 164 + * 165 + * So, we get: 166 + * 167 + * E * G * (N - 1) 168 + * D = (---------------- + M) < C 169 + * 8 170 + * 171 + * The above inequality means the position of block mark 172 + * within the ECC-based view of the page is still in the data chunk, 173 + * and it's NOT in the ECC bits of the chunk. 174 + * 175 + * Use the following to compute the bit position of the 176 + * physical block mark within the ECC-based view of the page: 177 + * (page_size - D) * 8 178 + * 179 + * --Huang Shijie 180 + */ 181 + block_mark_bit_offset = mtd->writesize * 8 - 182 + (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 183 + + geo->metadata_size * 8); 184 + 185 + geo->block_mark_byte_offset = block_mark_bit_offset / 8; 186 + geo->block_mark_bit_offset = block_mark_bit_offset % 8; 187 + return 0; 188 + } 189 + 190 + struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 191 + { 192 + int chipnr = this->current_chip; 193 + 194 + return this->dma_chans[chipnr]; 195 + } 196 + 197 + /* Can we use the upper's buffer directly for DMA? */ 198 + void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) 199 + { 200 + struct scatterlist *sgl = &this->data_sgl; 201 + int ret; 202 + 203 + this->direct_dma_map_ok = true; 204 + 205 + /* first try to map the upper buffer directly */ 206 + sg_init_one(sgl, this->upper_buf, this->upper_len); 207 + ret = dma_map_sg(this->dev, sgl, 1, dr); 208 + if (ret == 0) { 209 + /* We have to use our own DMA buffer. */ 210 + sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); 211 + 212 + if (dr == DMA_TO_DEVICE) 213 + memcpy(this->data_buffer_dma, this->upper_buf, 214 + this->upper_len); 215 + 216 + ret = dma_map_sg(this->dev, sgl, 1, dr); 217 + if (ret == 0) 218 + pr_err("map failed.\n"); 219 + 220 + this->direct_dma_map_ok = false; 221 + } 222 + } 223 + 224 + /* This will be called after the DMA operation is finished. */ 225 + static void dma_irq_callback(void *param) 226 + { 227 + struct gpmi_nand_data *this = param; 228 + struct completion *dma_c = &this->dma_done; 229 + 230 + complete(dma_c); 231 + 232 + switch (this->dma_type) { 233 + case DMA_FOR_COMMAND: 234 + dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); 235 + break; 236 + 237 + case DMA_FOR_READ_DATA: 238 + dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); 239 + if (this->direct_dma_map_ok == false) 240 + memcpy(this->upper_buf, this->data_buffer_dma, 241 + this->upper_len); 242 + break; 243 + 244 + case DMA_FOR_WRITE_DATA: 245 + dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); 246 + break; 247 + 248 + case DMA_FOR_READ_ECC_PAGE: 249 + case DMA_FOR_WRITE_ECC_PAGE: 250 + /* We have to wait the BCH interrupt to finish. */ 251 + break; 252 + 253 + default: 254 + pr_err("in wrong DMA operation.\n"); 255 + } 256 + } 257 + 258 + int start_dma_without_bch_irq(struct gpmi_nand_data *this, 259 + struct dma_async_tx_descriptor *desc) 260 + { 261 + struct completion *dma_c = &this->dma_done; 262 + int err; 263 + 264 + init_completion(dma_c); 265 + 266 + desc->callback = dma_irq_callback; 267 + desc->callback_param = this; 268 + dmaengine_submit(desc); 269 + 270 + /* Wait for the interrupt from the DMA block. */ 271 + err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); 272 + if (!err) { 273 + pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); 274 + gpmi_dump_info(this); 275 + return -ETIMEDOUT; 276 + } 277 + return 0; 278 + } 279 + 280 + /* 281 + * This function is used in BCH reading or BCH writing pages. 282 + * It will wait for the BCH interrupt as long as ONE second. 283 + * Actually, we must wait for two interrupts : 284 + * [1] firstly the DMA interrupt and 285 + * [2] secondly the BCH interrupt. 286 + */ 287 + int start_dma_with_bch_irq(struct gpmi_nand_data *this, 288 + struct dma_async_tx_descriptor *desc) 289 + { 290 + struct completion *bch_c = &this->bch_done; 291 + int err; 292 + 293 + /* Prepare to receive an interrupt from the BCH block. */ 294 + init_completion(bch_c); 295 + 296 + /* start the DMA */ 297 + start_dma_without_bch_irq(this, desc); 298 + 299 + /* Wait for the interrupt from the BCH block. */ 300 + err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); 301 + if (!err) { 302 + pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); 303 + gpmi_dump_info(this); 304 + return -ETIMEDOUT; 305 + } 306 + return 0; 307 + } 308 + 309 + static int __devinit 310 + acquire_register_block(struct gpmi_nand_data *this, const char *res_name) 311 + { 312 + struct platform_device *pdev = this->pdev; 313 + struct resources *res = &this->resources; 314 + struct resource *r; 315 + void *p; 316 + 317 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 318 + if (!r) { 319 + pr_err("Can't get resource for %s\n", res_name); 320 + return -ENXIO; 321 + } 322 + 323 + p = ioremap(r->start, resource_size(r)); 324 + if (!p) { 325 + pr_err("Can't remap %s\n", res_name); 326 + return -ENOMEM; 327 + } 328 + 329 + if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) 330 + res->gpmi_regs = p; 331 + else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) 332 + res->bch_regs = p; 333 + else 334 + pr_err("unknown resource name : %s\n", res_name); 335 + 336 + return 0; 337 + } 338 + 339 + static void release_register_block(struct gpmi_nand_data *this) 340 + { 341 + struct resources *res = &this->resources; 342 + if (res->gpmi_regs) 343 + iounmap(res->gpmi_regs); 344 + if (res->bch_regs) 345 + iounmap(res->bch_regs); 346 + res->gpmi_regs = NULL; 347 + res->bch_regs = NULL; 348 + } 349 + 350 + static int __devinit 351 + acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 352 + { 353 + struct platform_device *pdev = this->pdev; 354 + struct resources *res = &this->resources; 355 + const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; 356 + struct resource *r; 357 + int err; 358 + 359 + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); 360 + if (!r) { 361 + pr_err("Can't get resource for %s\n", res_name); 362 + return -ENXIO; 363 + } 364 + 365 + err = request_irq(r->start, irq_h, 0, res_name, this); 366 + if (err) { 367 + pr_err("Can't own %s\n", res_name); 368 + return err; 369 + } 370 + 371 + res->bch_low_interrupt = r->start; 372 + res->bch_high_interrupt = r->end; 373 + return 0; 374 + } 375 + 376 + static void release_bch_irq(struct gpmi_nand_data *this) 377 + { 378 + struct resources *res = &this->resources; 379 + int i = res->bch_low_interrupt; 380 + 381 + for (; i <= res->bch_high_interrupt; i++) 382 + free_irq(i, this); 383 + } 384 + 385 + static bool gpmi_dma_filter(struct dma_chan *chan, void *param) 386 + { 387 + struct gpmi_nand_data *this = param; 388 + struct resource *r = this->private; 389 + 390 + if (!mxs_dma_is_apbh(chan)) 391 + return false; 392 + /* 393 + * only catch the GPMI dma channels : 394 + * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3 395 + * (These four channels share the same IRQ!) 396 + * 397 + * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 398 + * (These eight channels share the same IRQ!) 399 + */ 400 + if (r->start <= chan->chan_id && chan->chan_id <= r->end) { 401 + chan->private = &this->dma_data; 402 + return true; 403 + } 404 + return false; 405 + } 406 + 407 + static void release_dma_channels(struct gpmi_nand_data *this) 408 + { 409 + unsigned int i; 410 + for (i = 0; i < DMA_CHANS; i++) 411 + if (this->dma_chans[i]) { 412 + dma_release_channel(this->dma_chans[i]); 413 + this->dma_chans[i] = NULL; 414 + } 415 + } 416 + 417 + static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) 418 + { 419 + struct platform_device *pdev = this->pdev; 420 + struct gpmi_nand_platform_data *pdata = this->pdata; 421 + struct resources *res = &this->resources; 422 + struct resource *r, *r_dma; 423 + unsigned int i; 424 + 425 + r = platform_get_resource_byname(pdev, IORESOURCE_DMA, 426 + GPMI_NAND_DMA_CHANNELS_RES_NAME); 427 + r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 428 + GPMI_NAND_DMA_INTERRUPT_RES_NAME); 429 + if (!r || !r_dma) { 430 + pr_err("Can't get resource for DMA\n"); 431 + return -ENXIO; 432 + } 433 + 434 + /* used in gpmi_dma_filter() */ 435 + this->private = r; 436 + 437 + for (i = r->start; i <= r->end; i++) { 438 + struct dma_chan *dma_chan; 439 + dma_cap_mask_t mask; 440 + 441 + if (i - r->start >= pdata->max_chip_count) 442 + break; 443 + 444 + dma_cap_zero(mask); 445 + dma_cap_set(DMA_SLAVE, mask); 446 + 447 + /* get the DMA interrupt */ 448 + if (r_dma->start == r_dma->end) { 449 + /* only register the first. */ 450 + if (i == r->start) 451 + this->dma_data.chan_irq = r_dma->start; 452 + else 453 + this->dma_data.chan_irq = NO_IRQ; 454 + } else 455 + this->dma_data.chan_irq = r_dma->start + (i - r->start); 456 + 457 + dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); 458 + if (!dma_chan) 459 + goto acquire_err; 460 + 461 + /* fill the first empty item */ 462 + this->dma_chans[i - r->start] = dma_chan; 463 + } 464 + 465 + res->dma_low_channel = r->start; 466 + res->dma_high_channel = i; 467 + return 0; 468 + 469 + acquire_err: 470 + pr_err("Can't acquire DMA channel %u\n", i); 471 + release_dma_channels(this); 472 + return -EINVAL; 473 + } 474 + 475 + static int __devinit acquire_resources(struct gpmi_nand_data *this) 476 + { 477 + struct resources *res = &this->resources; 478 + int ret; 479 + 480 + ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); 481 + if (ret) 482 + goto exit_regs; 483 + 484 + ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); 485 + if (ret) 486 + goto exit_regs; 487 + 488 + ret = acquire_bch_irq(this, bch_irq); 489 + if (ret) 490 + goto exit_regs; 491 + 492 + ret = acquire_dma_channels(this); 493 + if (ret) 494 + goto exit_dma_channels; 495 + 496 + res->clock = clk_get(&this->pdev->dev, NULL); 497 + if (IS_ERR(res->clock)) { 498 + pr_err("can not get the clock\n"); 499 + ret = -ENOENT; 500 + goto exit_clock; 501 + } 502 + return 0; 503 + 504 + exit_clock: 505 + release_dma_channels(this); 506 + exit_dma_channels: 507 + release_bch_irq(this); 508 + exit_regs: 509 + release_register_block(this); 510 + return ret; 511 + } 512 + 513 + static void release_resources(struct gpmi_nand_data *this) 514 + { 515 + struct resources *r = &this->resources; 516 + 517 + clk_put(r->clock); 518 + release_register_block(this); 519 + release_bch_irq(this); 520 + release_dma_channels(this); 521 + } 522 + 523 + static int __devinit init_hardware(struct gpmi_nand_data *this) 524 + { 525 + int ret; 526 + 527 + /* 528 + * This structure contains the "safe" GPMI timing that should succeed 529 + * with any NAND Flash device 530 + * (although, with less-than-optimal performance). 531 + */ 532 + struct nand_timing safe_timing = { 533 + .data_setup_in_ns = 80, 534 + .data_hold_in_ns = 60, 535 + .address_setup_in_ns = 25, 536 + .gpmi_sample_delay_in_ns = 6, 537 + .tREA_in_ns = -1, 538 + .tRLOH_in_ns = -1, 539 + .tRHOH_in_ns = -1, 540 + }; 541 + 542 + /* Initialize the hardwares. */ 543 + ret = gpmi_init(this); 544 + if (ret) 545 + return ret; 546 + 547 + this->timing = safe_timing; 548 + return 0; 549 + } 550 + 551 + static int read_page_prepare(struct gpmi_nand_data *this, 552 + void *destination, unsigned length, 553 + void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 554 + void **use_virt, dma_addr_t *use_phys) 555 + { 556 + struct device *dev = this->dev; 557 + 558 + if (virt_addr_valid(destination)) { 559 + dma_addr_t dest_phys; 560 + 561 + dest_phys = dma_map_single(dev, destination, 562 + length, DMA_FROM_DEVICE); 563 + if (dma_mapping_error(dev, dest_phys)) { 564 + if (alt_size < length) { 565 + pr_err("Alternate buffer is too small\n"); 566 + return -ENOMEM; 567 + } 568 + goto map_failed; 569 + } 570 + *use_virt = destination; 571 + *use_phys = dest_phys; 572 + this->direct_dma_map_ok = true; 573 + return 0; 574 + } 575 + 576 + map_failed: 577 + *use_virt = alt_virt; 578 + *use_phys = alt_phys; 579 + this->direct_dma_map_ok = false; 580 + return 0; 581 + } 582 + 583 + static inline void read_page_end(struct gpmi_nand_data *this, 584 + void *destination, unsigned length, 585 + void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 586 + void *used_virt, dma_addr_t used_phys) 587 + { 588 + if (this->direct_dma_map_ok) 589 + dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE); 590 + } 591 + 592 + static inline void read_page_swap_end(struct gpmi_nand_data *this, 593 + void *destination, unsigned length, 594 + void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 595 + void *used_virt, dma_addr_t used_phys) 596 + { 597 + if (!this->direct_dma_map_ok) 598 + memcpy(destination, alt_virt, length); 599 + } 600 + 601 + static int send_page_prepare(struct gpmi_nand_data *this, 602 + const void *source, unsigned length, 603 + void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 604 + const void **use_virt, dma_addr_t *use_phys) 605 + { 606 + struct device *dev = this->dev; 607 + 608 + if (virt_addr_valid(source)) { 609 + dma_addr_t source_phys; 610 + 611 + source_phys = dma_map_single(dev, (void *)source, length, 612 + DMA_TO_DEVICE); 613 + if (dma_mapping_error(dev, source_phys)) { 614 + if (alt_size < length) { 615 + pr_err("Alternate buffer is too small\n"); 616 + return -ENOMEM; 617 + } 618 + goto map_failed; 619 + } 620 + *use_virt = source; 621 + *use_phys = source_phys; 622 + return 0; 623 + } 624 + map_failed: 625 + /* 626 + * Copy the content of the source buffer into the alternate 627 + * buffer and set up the return values accordingly. 628 + */ 629 + memcpy(alt_virt, source, length); 630 + 631 + *use_virt = alt_virt; 632 + *use_phys = alt_phys; 633 + return 0; 634 + } 635 + 636 + static void send_page_end(struct gpmi_nand_data *this, 637 + const void *source, unsigned length, 638 + void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, 639 + const void *used_virt, dma_addr_t used_phys) 640 + { 641 + struct device *dev = this->dev; 642 + if (used_virt == source) 643 + dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); 644 + } 645 + 646 + static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) 647 + { 648 + struct device *dev = this->dev; 649 + 650 + if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) 651 + dma_free_coherent(dev, this->page_buffer_size, 652 + this->page_buffer_virt, 653 + this->page_buffer_phys); 654 + kfree(this->cmd_buffer); 655 + kfree(this->data_buffer_dma); 656 + 657 + this->cmd_buffer = NULL; 658 + this->data_buffer_dma = NULL; 659 + this->page_buffer_virt = NULL; 660 + this->page_buffer_size = 0; 661 + } 662 + 663 + /* Allocate the DMA buffers */ 664 + static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) 665 + { 666 + struct bch_geometry *geo = &this->bch_geometry; 667 + struct device *dev = this->dev; 668 + 669 + /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ 670 + this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA); 671 + if (this->cmd_buffer == NULL) 672 + goto error_alloc; 673 + 674 + /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ 675 + this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA); 676 + if (this->data_buffer_dma == NULL) 677 + goto error_alloc; 678 + 679 + /* 680 + * [3] Allocate the page buffer. 681 + * 682 + * Both the payload buffer and the auxiliary buffer must appear on 683 + * 32-bit boundaries. We presume the size of the payload buffer is a 684 + * power of two and is much larger than four, which guarantees the 685 + * auxiliary buffer will appear on a 32-bit boundary. 686 + */ 687 + this->page_buffer_size = geo->payload_size + geo->auxiliary_size; 688 + this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, 689 + &this->page_buffer_phys, GFP_DMA); 690 + if (!this->page_buffer_virt) 691 + goto error_alloc; 692 + 693 + 694 + /* Slice up the page buffer. */ 695 + this->payload_virt = this->page_buffer_virt; 696 + this->payload_phys = this->page_buffer_phys; 697 + this->auxiliary_virt = this->payload_virt + geo->payload_size; 698 + this->auxiliary_phys = this->payload_phys + geo->payload_size; 699 + return 0; 700 + 701 + error_alloc: 702 + gpmi_free_dma_buffer(this); 703 + pr_err("allocate DMA buffer ret!!\n"); 704 + return -ENOMEM; 705 + } 706 + 707 + static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 708 + { 709 + struct nand_chip *chip = mtd->priv; 710 + struct gpmi_nand_data *this = chip->priv; 711 + int ret; 712 + 713 + /* 714 + * Every operation begins with a command byte and a series of zero or 715 + * more address bytes. These are distinguished by either the Address 716 + * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 717 + * asserted. When MTD is ready to execute the command, it will deassert 718 + * both latch enables. 719 + * 720 + * Rather than run a separate DMA operation for every single byte, we 721 + * queue them up and run a single DMA operation for the entire series 722 + * of command and data bytes. NAND_CMD_NONE means the END of the queue. 723 + */ 724 + if ((ctrl & (NAND_ALE | NAND_CLE))) { 725 + if (data != NAND_CMD_NONE) 726 + this->cmd_buffer[this->command_length++] = data; 727 + return; 728 + } 729 + 730 + if (!this->command_length) 731 + return; 732 + 733 + ret = gpmi_send_command(this); 734 + if (ret) 735 + pr_err("Chip: %u, Error %d\n", this->current_chip, ret); 736 + 737 + this->command_length = 0; 738 + } 739 + 740 + static int gpmi_dev_ready(struct mtd_info *mtd) 741 + { 742 + struct nand_chip *chip = mtd->priv; 743 + struct gpmi_nand_data *this = chip->priv; 744 + 745 + return gpmi_is_ready(this, this->current_chip); 746 + } 747 + 748 + static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) 749 + { 750 + struct nand_chip *chip = mtd->priv; 751 + struct gpmi_nand_data *this = chip->priv; 752 + 753 + if ((this->current_chip < 0) && (chipnr >= 0)) 754 + gpmi_begin(this); 755 + else if ((this->current_chip >= 0) && (chipnr < 0)) 756 + gpmi_end(this); 757 + 758 + this->current_chip = chipnr; 759 + } 760 + 761 + static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 762 + { 763 + struct nand_chip *chip = mtd->priv; 764 + struct gpmi_nand_data *this = chip->priv; 765 + 766 + pr_debug("len is %d\n", len); 767 + this->upper_buf = buf; 768 + this->upper_len = len; 769 + 770 + gpmi_read_data(this); 771 + } 772 + 773 + static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 774 + { 775 + struct nand_chip *chip = mtd->priv; 776 + struct gpmi_nand_data *this = chip->priv; 777 + 778 + pr_debug("len is %d\n", len); 779 + this->upper_buf = (uint8_t *)buf; 780 + this->upper_len = len; 781 + 782 + gpmi_send_data(this); 783 + } 784 + 785 + static uint8_t gpmi_read_byte(struct mtd_info *mtd) 786 + { 787 + struct nand_chip *chip = mtd->priv; 788 + struct gpmi_nand_data *this = chip->priv; 789 + uint8_t *buf = this->data_buffer_dma; 790 + 791 + gpmi_read_buf(mtd, buf, 1); 792 + return buf[0]; 793 + } 794 + 795 + /* 796 + * Handles block mark swapping. 797 + * It can be called in swapping the block mark, or swapping it back, 798 + * because the the operations are the same. 799 + */ 800 + static void block_mark_swapping(struct gpmi_nand_data *this, 801 + void *payload, void *auxiliary) 802 + { 803 + struct bch_geometry *nfc_geo = &this->bch_geometry; 804 + unsigned char *p; 805 + unsigned char *a; 806 + unsigned int bit; 807 + unsigned char mask; 808 + unsigned char from_data; 809 + unsigned char from_oob; 810 + 811 + if (!this->swap_block_mark) 812 + return; 813 + 814 + /* 815 + * If control arrives here, we're swapping. Make some convenience 816 + * variables. 817 + */ 818 + bit = nfc_geo->block_mark_bit_offset; 819 + p = payload + nfc_geo->block_mark_byte_offset; 820 + a = auxiliary; 821 + 822 + /* 823 + * Get the byte from the data area that overlays the block mark. Since 824 + * the ECC engine applies its own view to the bits in the page, the 825 + * physical block mark won't (in general) appear on a byte boundary in 826 + * the data. 827 + */ 828 + from_data = (p[0] >> bit) | (p[1] << (8 - bit)); 829 + 830 + /* Get the byte from the OOB. */ 831 + from_oob = a[0]; 832 + 833 + /* Swap them. */ 834 + a[0] = from_data; 835 + 836 + mask = (0x1 << bit) - 1; 837 + p[0] = (p[0] & mask) | (from_oob << bit); 838 + 839 + mask = ~0 << bit; 840 + p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 841 + } 842 + 843 + static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 844 + uint8_t *buf, int page) 845 + { 846 + struct gpmi_nand_data *this = chip->priv; 847 + struct bch_geometry *nfc_geo = &this->bch_geometry; 848 + void *payload_virt; 849 + dma_addr_t payload_phys; 850 + void *auxiliary_virt; 851 + dma_addr_t auxiliary_phys; 852 + unsigned int i; 853 + unsigned char *status; 854 + unsigned int failed; 855 + unsigned int corrected; 856 + int ret; 857 + 858 + pr_debug("page number is : %d\n", page); 859 + ret = read_page_prepare(this, buf, mtd->writesize, 860 + this->payload_virt, this->payload_phys, 861 + nfc_geo->payload_size, 862 + &payload_virt, &payload_phys); 863 + if (ret) { 864 + pr_err("Inadequate DMA buffer\n"); 865 + ret = -ENOMEM; 866 + return ret; 867 + } 868 + auxiliary_virt = this->auxiliary_virt; 869 + auxiliary_phys = this->auxiliary_phys; 870 + 871 + /* go! */ 872 + ret = gpmi_read_page(this, payload_phys, auxiliary_phys); 873 + read_page_end(this, buf, mtd->writesize, 874 + this->payload_virt, this->payload_phys, 875 + nfc_geo->payload_size, 876 + payload_virt, payload_phys); 877 + if (ret) { 878 + pr_err("Error in ECC-based read: %d\n", ret); 879 + goto exit_nfc; 880 + } 881 + 882 + /* handle the block mark swapping */ 883 + block_mark_swapping(this, payload_virt, auxiliary_virt); 884 + 885 + /* Loop over status bytes, accumulating ECC status. */ 886 + failed = 0; 887 + corrected = 0; 888 + status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 889 + 890 + for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 891 + if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 892 + continue; 893 + 894 + if (*status == STATUS_UNCORRECTABLE) { 895 + failed++; 896 + continue; 897 + } 898 + corrected += *status; 899 + } 900 + 901 + /* 902 + * Propagate ECC status to the owning MTD only when failed or 903 + * corrected times nearly reaches our ECC correction threshold. 904 + */ 905 + if (failed || corrected >= (nfc_geo->ecc_strength - 1)) { 906 + mtd->ecc_stats.failed += failed; 907 + mtd->ecc_stats.corrected += corrected; 908 + } 909 + 910 + /* 911 + * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for 912 + * details about our policy for delivering the OOB. 913 + * 914 + * We fill the caller's buffer with set bits, and then copy the block 915 + * mark to th caller's buffer. Note that, if block mark swapping was 916 + * necessary, it has already been done, so we can rely on the first 917 + * byte of the auxiliary buffer to contain the block mark. 918 + */ 919 + memset(chip->oob_poi, ~0, mtd->oobsize); 920 + chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; 921 + 922 + read_page_swap_end(this, buf, mtd->writesize, 923 + this->payload_virt, this->payload_phys, 924 + nfc_geo->payload_size, 925 + payload_virt, payload_phys); 926 + exit_nfc: 927 + return ret; 928 + } 929 + 930 + static void gpmi_ecc_write_page(struct mtd_info *mtd, 931 + struct nand_chip *chip, const uint8_t *buf) 932 + { 933 + struct gpmi_nand_data *this = chip->priv; 934 + struct bch_geometry *nfc_geo = &this->bch_geometry; 935 + const void *payload_virt; 936 + dma_addr_t payload_phys; 937 + const void *auxiliary_virt; 938 + dma_addr_t auxiliary_phys; 939 + int ret; 940 + 941 + pr_debug("ecc write page.\n"); 942 + if (this->swap_block_mark) { 943 + /* 944 + * If control arrives here, we're doing block mark swapping. 945 + * Since we can't modify the caller's buffers, we must copy them 946 + * into our own. 947 + */ 948 + memcpy(this->payload_virt, buf, mtd->writesize); 949 + payload_virt = this->payload_virt; 950 + payload_phys = this->payload_phys; 951 + 952 + memcpy(this->auxiliary_virt, chip->oob_poi, 953 + nfc_geo->auxiliary_size); 954 + auxiliary_virt = this->auxiliary_virt; 955 + auxiliary_phys = this->auxiliary_phys; 956 + 957 + /* Handle block mark swapping. */ 958 + block_mark_swapping(this, 959 + (void *) payload_virt, (void *) auxiliary_virt); 960 + } else { 961 + /* 962 + * If control arrives here, we're not doing block mark swapping, 963 + * so we can to try and use the caller's buffers. 964 + */ 965 + ret = send_page_prepare(this, 966 + buf, mtd->writesize, 967 + this->payload_virt, this->payload_phys, 968 + nfc_geo->payload_size, 969 + &payload_virt, &payload_phys); 970 + if (ret) { 971 + pr_err("Inadequate payload DMA buffer\n"); 972 + return; 973 + } 974 + 975 + ret = send_page_prepare(this, 976 + chip->oob_poi, mtd->oobsize, 977 + this->auxiliary_virt, this->auxiliary_phys, 978 + nfc_geo->auxiliary_size, 979 + &auxiliary_virt, &auxiliary_phys); 980 + if (ret) { 981 + pr_err("Inadequate auxiliary DMA buffer\n"); 982 + goto exit_auxiliary; 983 + } 984 + } 985 + 986 + /* Ask the NFC. */ 987 + ret = gpmi_send_page(this, payload_phys, auxiliary_phys); 988 + if (ret) 989 + pr_err("Error in ECC-based write: %d\n", ret); 990 + 991 + if (!this->swap_block_mark) { 992 + send_page_end(this, chip->oob_poi, mtd->oobsize, 993 + this->auxiliary_virt, this->auxiliary_phys, 994 + nfc_geo->auxiliary_size, 995 + auxiliary_virt, auxiliary_phys); 996 + exit_auxiliary: 997 + send_page_end(this, buf, mtd->writesize, 998 + this->payload_virt, this->payload_phys, 999 + nfc_geo->payload_size, 1000 + payload_virt, payload_phys); 1001 + } 1002 + } 1003 + 1004 + /* 1005 + * There are several places in this driver where we have to handle the OOB and 1006 + * block marks. This is the function where things are the most complicated, so 1007 + * this is where we try to explain it all. All the other places refer back to 1008 + * here. 1009 + * 1010 + * These are the rules, in order of decreasing importance: 1011 + * 1012 + * 1) Nothing the caller does can be allowed to imperil the block mark. 1013 + * 1014 + * 2) In read operations, the first byte of the OOB we return must reflect the 1015 + * true state of the block mark, no matter where that block mark appears in 1016 + * the physical page. 1017 + * 1018 + * 3) ECC-based read operations return an OOB full of set bits (since we never 1019 + * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 1020 + * return). 1021 + * 1022 + * 4) "Raw" read operations return a direct view of the physical bytes in the 1023 + * page, using the conventional definition of which bytes are data and which 1024 + * are OOB. This gives the caller a way to see the actual, physical bytes 1025 + * in the page, without the distortions applied by our ECC engine. 1026 + * 1027 + * 1028 + * What we do for this specific read operation depends on two questions: 1029 + * 1030 + * 1) Are we doing a "raw" read, or an ECC-based read? 1031 + * 1032 + * 2) Are we using block mark swapping or transcription? 1033 + * 1034 + * There are four cases, illustrated by the following Karnaugh map: 1035 + * 1036 + * | Raw | ECC-based | 1037 + * -------------+-------------------------+-------------------------+ 1038 + * | Read the conventional | | 1039 + * | OOB at the end of the | | 1040 + * Swapping | page and return it. It | | 1041 + * | contains exactly what | | 1042 + * | we want. | Read the block mark and | 1043 + * -------------+-------------------------+ return it in a buffer | 1044 + * | Read the conventional | full of set bits. | 1045 + * | OOB at the end of the | | 1046 + * | page and also the block | | 1047 + * Transcribing | mark in the metadata. | | 1048 + * | Copy the block mark | | 1049 + * | into the first byte of | | 1050 + * | the OOB. | | 1051 + * -------------+-------------------------+-------------------------+ 1052 + * 1053 + * Note that we break rule #4 in the Transcribing/Raw case because we're not 1054 + * giving an accurate view of the actual, physical bytes in the page (we're 1055 + * overwriting the block mark). That's OK because it's more important to follow 1056 + * rule #2. 1057 + * 1058 + * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1059 + * easy. When reading a page, for example, the NAND Flash MTD code calls our 1060 + * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1061 + * ECC-based or raw view of the page is implicit in which function it calls 1062 + * (there is a similar pair of ECC-based/raw functions for writing). 1063 + * 1064 + * Since MTD assumes the OOB is not covered by ECC, there is no pair of 1065 + * ECC-based/raw functions for reading or or writing the OOB. The fact that the 1066 + * caller wants an ECC-based or raw view of the page is not propagated down to 1067 + * this driver. 1068 + */ 1069 + static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1070 + int page, int sndcmd) 1071 + { 1072 + struct gpmi_nand_data *this = chip->priv; 1073 + 1074 + pr_debug("page number is %d\n", page); 1075 + /* clear the OOB buffer */ 1076 + memset(chip->oob_poi, ~0, mtd->oobsize); 1077 + 1078 + /* Read out the conventional OOB. */ 1079 + chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1080 + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1081 + 1082 + /* 1083 + * Now, we want to make sure the block mark is correct. In the 1084 + * Swapping/Raw case, we already have it. Otherwise, we need to 1085 + * explicitly read it. 1086 + */ 1087 + if (!this->swap_block_mark) { 1088 + /* Read the block mark into the first byte of the OOB buffer. */ 1089 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 1090 + chip->oob_poi[0] = chip->read_byte(mtd); 1091 + } 1092 + 1093 + /* 1094 + * Return true, indicating that the next call to this function must send 1095 + * a command. 1096 + */ 1097 + return true; 1098 + } 1099 + 1100 + static int 1101 + gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) 1102 + { 1103 + /* 1104 + * The BCH will use all the (page + oob). 1105 + * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. 1106 + * But it can not stop some ioctls such MEMWRITEOOB which uses 1107 + * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit 1108 + * these ioctls too. 1109 + */ 1110 + return -EPERM; 1111 + } 1112 + 1113 + static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) 1114 + { 1115 + struct nand_chip *chip = mtd->priv; 1116 + struct gpmi_nand_data *this = chip->priv; 1117 + int block, ret = 0; 1118 + uint8_t *block_mark; 1119 + int column, page, status, chipnr; 1120 + 1121 + /* Get block number */ 1122 + block = (int)(ofs >> chip->bbt_erase_shift); 1123 + if (chip->bbt) 1124 + chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 1125 + 1126 + /* Do we have a flash based bad block table ? */ 1127 + if (chip->options & NAND_BBT_USE_FLASH) 1128 + ret = nand_update_bbt(mtd, ofs); 1129 + else { 1130 + chipnr = (int)(ofs >> chip->chip_shift); 1131 + chip->select_chip(mtd, chipnr); 1132 + 1133 + column = this->swap_block_mark ? mtd->writesize : 0; 1134 + 1135 + /* Write the block mark. */ 1136 + block_mark = this->data_buffer_dma; 1137 + block_mark[0] = 0; /* bad block marker */ 1138 + 1139 + /* Shift to get page */ 1140 + page = (int)(ofs >> chip->page_shift); 1141 + 1142 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); 1143 + chip->write_buf(mtd, block_mark, 1); 1144 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1145 + 1146 + status = chip->waitfunc(mtd, chip); 1147 + if (status & NAND_STATUS_FAIL) 1148 + ret = -EIO; 1149 + 1150 + chip->select_chip(mtd, -1); 1151 + } 1152 + if (!ret) 1153 + mtd->ecc_stats.badblocks++; 1154 + 1155 + return ret; 1156 + } 1157 + 1158 + static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) 1159 + { 1160 + struct boot_rom_geometry *geometry = &this->rom_geometry; 1161 + 1162 + /* 1163 + * Set the boot block stride size. 1164 + * 1165 + * In principle, we should be reading this from the OTP bits, since 1166 + * that's where the ROM is going to get it. In fact, we don't have any 1167 + * way to read the OTP bits, so we go with the default and hope for the 1168 + * best. 1169 + */ 1170 + geometry->stride_size_in_pages = 64; 1171 + 1172 + /* 1173 + * Set the search area stride exponent. 1174 + * 1175 + * In principle, we should be reading this from the OTP bits, since 1176 + * that's where the ROM is going to get it. In fact, we don't have any 1177 + * way to read the OTP bits, so we go with the default and hope for the 1178 + * best. 1179 + */ 1180 + geometry->search_area_stride_exponent = 2; 1181 + return 0; 1182 + } 1183 + 1184 + static const char *fingerprint = "STMP"; 1185 + static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) 1186 + { 1187 + struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1188 + struct device *dev = this->dev; 1189 + struct mtd_info *mtd = &this->mtd; 1190 + struct nand_chip *chip = &this->nand; 1191 + unsigned int search_area_size_in_strides; 1192 + unsigned int stride; 1193 + unsigned int page; 1194 + loff_t byte; 1195 + uint8_t *buffer = chip->buffers->databuf; 1196 + int saved_chip_number; 1197 + int found_an_ncb_fingerprint = false; 1198 + 1199 + /* Compute the number of strides in a search area. */ 1200 + search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1201 + 1202 + saved_chip_number = this->current_chip; 1203 + chip->select_chip(mtd, 0); 1204 + 1205 + /* 1206 + * Loop through the first search area, looking for the NCB fingerprint. 1207 + */ 1208 + dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); 1209 + 1210 + for (stride = 0; stride < search_area_size_in_strides; stride++) { 1211 + /* Compute the page and byte addresses. */ 1212 + page = stride * rom_geo->stride_size_in_pages; 1213 + byte = page * mtd->writesize; 1214 + 1215 + dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); 1216 + 1217 + /* 1218 + * Read the NCB fingerprint. The fingerprint is four bytes long 1219 + * and starts in the 12th byte of the page. 1220 + */ 1221 + chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); 1222 + chip->read_buf(mtd, buffer, strlen(fingerprint)); 1223 + 1224 + /* Look for the fingerprint. */ 1225 + if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { 1226 + found_an_ncb_fingerprint = true; 1227 + break; 1228 + } 1229 + 1230 + } 1231 + 1232 + chip->select_chip(mtd, saved_chip_number); 1233 + 1234 + if (found_an_ncb_fingerprint) 1235 + dev_dbg(dev, "\tFound a fingerprint\n"); 1236 + else 1237 + dev_dbg(dev, "\tNo fingerprint found\n"); 1238 + return found_an_ncb_fingerprint; 1239 + } 1240 + 1241 + /* Writes a transcription stamp. */ 1242 + static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) 1243 + { 1244 + struct device *dev = this->dev; 1245 + struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1246 + struct mtd_info *mtd = &this->mtd; 1247 + struct nand_chip *chip = &this->nand; 1248 + unsigned int block_size_in_pages; 1249 + unsigned int search_area_size_in_strides; 1250 + unsigned int search_area_size_in_pages; 1251 + unsigned int search_area_size_in_blocks; 1252 + unsigned int block; 1253 + unsigned int stride; 1254 + unsigned int page; 1255 + loff_t byte; 1256 + uint8_t *buffer = chip->buffers->databuf; 1257 + int saved_chip_number; 1258 + int status; 1259 + 1260 + /* Compute the search area geometry. */ 1261 + block_size_in_pages = mtd->erasesize / mtd->writesize; 1262 + search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1263 + search_area_size_in_pages = search_area_size_in_strides * 1264 + rom_geo->stride_size_in_pages; 1265 + search_area_size_in_blocks = 1266 + (search_area_size_in_pages + (block_size_in_pages - 1)) / 1267 + block_size_in_pages; 1268 + 1269 + dev_dbg(dev, "Search Area Geometry :\n"); 1270 + dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); 1271 + dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); 1272 + dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); 1273 + 1274 + /* Select chip 0. */ 1275 + saved_chip_number = this->current_chip; 1276 + chip->select_chip(mtd, 0); 1277 + 1278 + /* Loop over blocks in the first search area, erasing them. */ 1279 + dev_dbg(dev, "Erasing the search area...\n"); 1280 + 1281 + for (block = 0; block < search_area_size_in_blocks; block++) { 1282 + /* Compute the page address. */ 1283 + page = block * block_size_in_pages; 1284 + 1285 + /* Erase this block. */ 1286 + dev_dbg(dev, "\tErasing block 0x%x\n", block); 1287 + chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); 1288 + chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); 1289 + 1290 + /* Wait for the erase to finish. */ 1291 + status = chip->waitfunc(mtd, chip); 1292 + if (status & NAND_STATUS_FAIL) 1293 + dev_err(dev, "[%s] Erase failed.\n", __func__); 1294 + } 1295 + 1296 + /* Write the NCB fingerprint into the page buffer. */ 1297 + memset(buffer, ~0, mtd->writesize); 1298 + memset(chip->oob_poi, ~0, mtd->oobsize); 1299 + memcpy(buffer + 12, fingerprint, strlen(fingerprint)); 1300 + 1301 + /* Loop through the first search area, writing NCB fingerprints. */ 1302 + dev_dbg(dev, "Writing NCB fingerprints...\n"); 1303 + for (stride = 0; stride < search_area_size_in_strides; stride++) { 1304 + /* Compute the page and byte addresses. */ 1305 + page = stride * rom_geo->stride_size_in_pages; 1306 + byte = page * mtd->writesize; 1307 + 1308 + /* Write the first page of the current stride. */ 1309 + dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1310 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 1311 + chip->ecc.write_page_raw(mtd, chip, buffer); 1312 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1313 + 1314 + /* Wait for the write to finish. */ 1315 + status = chip->waitfunc(mtd, chip); 1316 + if (status & NAND_STATUS_FAIL) 1317 + dev_err(dev, "[%s] Write failed.\n", __func__); 1318 + } 1319 + 1320 + /* Deselect chip 0. */ 1321 + chip->select_chip(mtd, saved_chip_number); 1322 + return 0; 1323 + } 1324 + 1325 + static int __devinit mx23_boot_init(struct gpmi_nand_data *this) 1326 + { 1327 + struct device *dev = this->dev; 1328 + struct nand_chip *chip = &this->nand; 1329 + struct mtd_info *mtd = &this->mtd; 1330 + unsigned int block_count; 1331 + unsigned int block; 1332 + int chipnr; 1333 + int page; 1334 + loff_t byte; 1335 + uint8_t block_mark; 1336 + int ret = 0; 1337 + 1338 + /* 1339 + * If control arrives here, we can't use block mark swapping, which 1340 + * means we're forced to use transcription. First, scan for the 1341 + * transcription stamp. If we find it, then we don't have to do 1342 + * anything -- the block marks are already transcribed. 1343 + */ 1344 + if (mx23_check_transcription_stamp(this)) 1345 + return 0; 1346 + 1347 + /* 1348 + * If control arrives here, we couldn't find a transcription stamp, so 1349 + * so we presume the block marks are in the conventional location. 1350 + */ 1351 + dev_dbg(dev, "Transcribing bad block marks...\n"); 1352 + 1353 + /* Compute the number of blocks in the entire medium. */ 1354 + block_count = chip->chipsize >> chip->phys_erase_shift; 1355 + 1356 + /* 1357 + * Loop over all the blocks in the medium, transcribing block marks as 1358 + * we go. 1359 + */ 1360 + for (block = 0; block < block_count; block++) { 1361 + /* 1362 + * Compute the chip, page and byte addresses for this block's 1363 + * conventional mark. 1364 + */ 1365 + chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); 1366 + page = block << (chip->phys_erase_shift - chip->page_shift); 1367 + byte = block << chip->phys_erase_shift; 1368 + 1369 + /* Send the command to read the conventional block mark. */ 1370 + chip->select_chip(mtd, chipnr); 1371 + chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1372 + block_mark = chip->read_byte(mtd); 1373 + chip->select_chip(mtd, -1); 1374 + 1375 + /* 1376 + * Check if the block is marked bad. If so, we need to mark it 1377 + * again, but this time the result will be a mark in the 1378 + * location where we transcribe block marks. 1379 + */ 1380 + if (block_mark != 0xff) { 1381 + dev_dbg(dev, "Transcribing mark in block %u\n", block); 1382 + ret = chip->block_markbad(mtd, byte); 1383 + if (ret) 1384 + dev_err(dev, "Failed to mark block bad with " 1385 + "ret %d\n", ret); 1386 + } 1387 + } 1388 + 1389 + /* Write the stamp that indicates we've transcribed the block marks. */ 1390 + mx23_write_transcription_stamp(this); 1391 + return 0; 1392 + } 1393 + 1394 + static int __devinit nand_boot_init(struct gpmi_nand_data *this) 1395 + { 1396 + nand_boot_set_geometry(this); 1397 + 1398 + /* This is ROM arch-specific initilization before the BBT scanning. */ 1399 + if (GPMI_IS_MX23(this)) 1400 + return mx23_boot_init(this); 1401 + return 0; 1402 + } 1403 + 1404 + static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this) 1405 + { 1406 + int ret; 1407 + 1408 + /* Free the temporary DMA memory for reading ID. */ 1409 + gpmi_free_dma_buffer(this); 1410 + 1411 + /* Set up the NFC geometry which is used by BCH. */ 1412 + ret = bch_set_geometry(this); 1413 + if (ret) { 1414 + pr_err("set geometry ret : %d\n", ret); 1415 + return ret; 1416 + } 1417 + 1418 + /* Alloc the new DMA buffers according to the pagesize and oobsize */ 1419 + return gpmi_alloc_dma_buffer(this); 1420 + } 1421 + 1422 + static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) 1423 + { 1424 + int ret; 1425 + 1426 + /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 1427 + if (GPMI_IS_MX23(this)) 1428 + this->swap_block_mark = false; 1429 + else 1430 + this->swap_block_mark = true; 1431 + 1432 + /* Set up the medium geometry */ 1433 + ret = gpmi_set_geometry(this); 1434 + if (ret) 1435 + return ret; 1436 + 1437 + /* NAND boot init, depends on the gpmi_set_geometry(). */ 1438 + return nand_boot_init(this); 1439 + } 1440 + 1441 + static int gpmi_scan_bbt(struct mtd_info *mtd) 1442 + { 1443 + struct nand_chip *chip = mtd->priv; 1444 + struct gpmi_nand_data *this = chip->priv; 1445 + int ret; 1446 + 1447 + /* Prepare for the BBT scan. */ 1448 + ret = gpmi_pre_bbt_scan(this); 1449 + if (ret) 1450 + return ret; 1451 + 1452 + /* use the default BBT implementation */ 1453 + return nand_default_bbt(mtd); 1454 + } 1455 + 1456 + void gpmi_nfc_exit(struct gpmi_nand_data *this) 1457 + { 1458 + nand_release(&this->mtd); 1459 + gpmi_free_dma_buffer(this); 1460 + } 1461 + 1462 + static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) 1463 + { 1464 + struct gpmi_nand_platform_data *pdata = this->pdata; 1465 + struct mtd_info *mtd = &this->mtd; 1466 + struct nand_chip *chip = &this->nand; 1467 + int ret; 1468 + 1469 + /* init current chip */ 1470 + this->current_chip = -1; 1471 + 1472 + /* init the MTD data structures */ 1473 + mtd->priv = chip; 1474 + mtd->name = "gpmi-nand"; 1475 + mtd->owner = THIS_MODULE; 1476 + 1477 + /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ 1478 + chip->priv = this; 1479 + chip->select_chip = gpmi_select_chip; 1480 + chip->cmd_ctrl = gpmi_cmd_ctrl; 1481 + chip->dev_ready = gpmi_dev_ready; 1482 + chip->read_byte = gpmi_read_byte; 1483 + chip->read_buf = gpmi_read_buf; 1484 + chip->write_buf = gpmi_write_buf; 1485 + chip->ecc.read_page = gpmi_ecc_read_page; 1486 + chip->ecc.write_page = gpmi_ecc_write_page; 1487 + chip->ecc.read_oob = gpmi_ecc_read_oob; 1488 + chip->ecc.write_oob = gpmi_ecc_write_oob; 1489 + chip->scan_bbt = gpmi_scan_bbt; 1490 + chip->badblock_pattern = &gpmi_bbt_descr; 1491 + chip->block_markbad = gpmi_block_markbad; 1492 + chip->options |= NAND_NO_SUBPAGE_WRITE; 1493 + chip->ecc.mode = NAND_ECC_HW; 1494 + chip->ecc.size = 1; 1495 + chip->ecc.layout = &gpmi_hw_ecclayout; 1496 + 1497 + /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ 1498 + this->bch_geometry.payload_size = 1024; 1499 + this->bch_geometry.auxiliary_size = 128; 1500 + ret = gpmi_alloc_dma_buffer(this); 1501 + if (ret) 1502 + goto err_out; 1503 + 1504 + ret = nand_scan(mtd, pdata->max_chip_count); 1505 + if (ret) { 1506 + pr_err("Chip scan failed\n"); 1507 + goto err_out; 1508 + } 1509 + 1510 + ret = mtd_device_parse_register(mtd, NULL, NULL, 1511 + pdata->partitions, pdata->partition_count); 1512 + if (ret) 1513 + goto err_out; 1514 + return 0; 1515 + 1516 + err_out: 1517 + gpmi_nfc_exit(this); 1518 + return ret; 1519 + } 1520 + 1521 + static int __devinit gpmi_nand_probe(struct platform_device *pdev) 1522 + { 1523 + struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data; 1524 + struct gpmi_nand_data *this; 1525 + int ret; 1526 + 1527 + this = kzalloc(sizeof(*this), GFP_KERNEL); 1528 + if (!this) { 1529 + pr_err("Failed to allocate per-device memory\n"); 1530 + return -ENOMEM; 1531 + } 1532 + 1533 + platform_set_drvdata(pdev, this); 1534 + this->pdev = pdev; 1535 + this->dev = &pdev->dev; 1536 + this->pdata = pdata; 1537 + 1538 + if (pdata->platform_init) { 1539 + ret = pdata->platform_init(); 1540 + if (ret) 1541 + goto platform_init_error; 1542 + } 1543 + 1544 + ret = acquire_resources(this); 1545 + if (ret) 1546 + goto exit_acquire_resources; 1547 + 1548 + ret = init_hardware(this); 1549 + if (ret) 1550 + goto exit_nfc_init; 1551 + 1552 + ret = gpmi_nfc_init(this); 1553 + if (ret) 1554 + goto exit_nfc_init; 1555 + 1556 + return 0; 1557 + 1558 + exit_nfc_init: 1559 + release_resources(this); 1560 + platform_init_error: 1561 + exit_acquire_resources: 1562 + platform_set_drvdata(pdev, NULL); 1563 + kfree(this); 1564 + return ret; 1565 + } 1566 + 1567 + static int __exit gpmi_nand_remove(struct platform_device *pdev) 1568 + { 1569 + struct gpmi_nand_data *this = platform_get_drvdata(pdev); 1570 + 1571 + gpmi_nfc_exit(this); 1572 + release_resources(this); 1573 + platform_set_drvdata(pdev, NULL); 1574 + kfree(this); 1575 + return 0; 1576 + } 1577 + 1578 + static const struct platform_device_id gpmi_ids[] = { 1579 + { 1580 + .name = "imx23-gpmi-nand", 1581 + .driver_data = IS_MX23, 1582 + }, { 1583 + .name = "imx28-gpmi-nand", 1584 + .driver_data = IS_MX28, 1585 + }, {}, 1586 + }; 1587 + 1588 + static struct platform_driver gpmi_nand_driver = { 1589 + .driver = { 1590 + .name = "gpmi-nand", 1591 + }, 1592 + .probe = gpmi_nand_probe, 1593 + .remove = __exit_p(gpmi_nand_remove), 1594 + .id_table = gpmi_ids, 1595 + }; 1596 + 1597 + static int __init gpmi_nand_init(void) 1598 + { 1599 + int err; 1600 + 1601 + err = platform_driver_register(&gpmi_nand_driver); 1602 + if (err == 0) 1603 + printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n"); 1604 + else 1605 + pr_err("i.MX GPMI NAND driver registration failed\n"); 1606 + return err; 1607 + } 1608 + 1609 + static void __exit gpmi_nand_exit(void) 1610 + { 1611 + platform_driver_unregister(&gpmi_nand_driver); 1612 + } 1613 + 1614 + module_init(gpmi_nand_init); 1615 + module_exit(gpmi_nand_exit); 1616 + 1617 + MODULE_AUTHOR("Freescale Semiconductor, Inc."); 1618 + MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); 1619 + MODULE_LICENSE("GPL");
+273
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
··· 1 + /* 2 + * Freescale GPMI NAND Flash Driver 3 + * 4 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 5 + * Copyright (C) 2008 Embedded Alley Solutions, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + */ 17 + #ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H 18 + #define __DRIVERS_MTD_NAND_GPMI_NAND_H 19 + 20 + #include <linux/mtd/nand.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/dma-mapping.h> 23 + #include <mach/dma.h> 24 + 25 + struct resources { 26 + void *gpmi_regs; 27 + void *bch_regs; 28 + unsigned int bch_low_interrupt; 29 + unsigned int bch_high_interrupt; 30 + unsigned int dma_low_channel; 31 + unsigned int dma_high_channel; 32 + struct clk *clock; 33 + }; 34 + 35 + /** 36 + * struct bch_geometry - BCH geometry description. 37 + * @gf_len: The length of Galois Field. (e.g., 13 or 14) 38 + * @ecc_strength: A number that describes the strength of the ECC 39 + * algorithm. 40 + * @page_size: The size, in bytes, of a physical page, including 41 + * both data and OOB. 42 + * @metadata_size: The size, in bytes, of the metadata. 43 + * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note 44 + * the first chunk in the page includes both data and 45 + * metadata, so it's a bit larger than this value. 46 + * @ecc_chunk_count: The number of ECC chunks in the page, 47 + * @payload_size: The size, in bytes, of the payload buffer. 48 + * @auxiliary_size: The size, in bytes, of the auxiliary buffer. 49 + * @auxiliary_status_offset: The offset into the auxiliary buffer at which 50 + * the ECC status appears. 51 + * @block_mark_byte_offset: The byte offset in the ECC-based page view at 52 + * which the underlying physical block mark appears. 53 + * @block_mark_bit_offset: The bit offset into the ECC-based page view at 54 + * which the underlying physical block mark appears. 55 + */ 56 + struct bch_geometry { 57 + unsigned int gf_len; 58 + unsigned int ecc_strength; 59 + unsigned int page_size; 60 + unsigned int metadata_size; 61 + unsigned int ecc_chunk_size; 62 + unsigned int ecc_chunk_count; 63 + unsigned int payload_size; 64 + unsigned int auxiliary_size; 65 + unsigned int auxiliary_status_offset; 66 + unsigned int block_mark_byte_offset; 67 + unsigned int block_mark_bit_offset; 68 + }; 69 + 70 + /** 71 + * struct boot_rom_geometry - Boot ROM geometry description. 72 + * @stride_size_in_pages: The size of a boot block stride, in pages. 73 + * @search_area_stride_exponent: The logarithm to base 2 of the size of a 74 + * search area in boot block strides. 75 + */ 76 + struct boot_rom_geometry { 77 + unsigned int stride_size_in_pages; 78 + unsigned int search_area_stride_exponent; 79 + }; 80 + 81 + /* DMA operations types */ 82 + enum dma_ops_type { 83 + DMA_FOR_COMMAND = 1, 84 + DMA_FOR_READ_DATA, 85 + DMA_FOR_WRITE_DATA, 86 + DMA_FOR_READ_ECC_PAGE, 87 + DMA_FOR_WRITE_ECC_PAGE 88 + }; 89 + 90 + /** 91 + * struct nand_timing - Fundamental timing attributes for NAND. 92 + * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the 93 + * maximum of tDS and tWP. A negative value 94 + * indicates this characteristic isn't known. 95 + * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the 96 + * maximum of tDH, tWH and tREH. A negative value 97 + * indicates this characteristic isn't known. 98 + * @address_setup_in_ns: The address setup time, in nanoseconds. Usually 99 + * the maximum of tCLS, tCS and tALS. A negative 100 + * value indicates this characteristic isn't known. 101 + * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value 102 + * indicates this characteristic isn't known. 103 + * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A 104 + * negative value indicates this characteristic isn't 105 + * known. 106 + * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A 107 + * negative value indicates this characteristic isn't 108 + * known. 109 + * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A 110 + * negative value indicates this characteristic isn't 111 + * known. 112 + */ 113 + struct nand_timing { 114 + int8_t data_setup_in_ns; 115 + int8_t data_hold_in_ns; 116 + int8_t address_setup_in_ns; 117 + int8_t gpmi_sample_delay_in_ns; 118 + int8_t tREA_in_ns; 119 + int8_t tRLOH_in_ns; 120 + int8_t tRHOH_in_ns; 121 + }; 122 + 123 + struct gpmi_nand_data { 124 + /* System Interface */ 125 + struct device *dev; 126 + struct platform_device *pdev; 127 + struct gpmi_nand_platform_data *pdata; 128 + 129 + /* Resources */ 130 + struct resources resources; 131 + 132 + /* Flash Hardware */ 133 + struct nand_timing timing; 134 + 135 + /* BCH */ 136 + struct bch_geometry bch_geometry; 137 + struct completion bch_done; 138 + 139 + /* NAND Boot issue */ 140 + bool swap_block_mark; 141 + struct boot_rom_geometry rom_geometry; 142 + 143 + /* MTD / NAND */ 144 + struct nand_chip nand; 145 + struct mtd_info mtd; 146 + 147 + /* General-use Variables */ 148 + int current_chip; 149 + unsigned int command_length; 150 + 151 + /* passed from upper layer */ 152 + uint8_t *upper_buf; 153 + int upper_len; 154 + 155 + /* for DMA operations */ 156 + bool direct_dma_map_ok; 157 + 158 + struct scatterlist cmd_sgl; 159 + char *cmd_buffer; 160 + 161 + struct scatterlist data_sgl; 162 + char *data_buffer_dma; 163 + 164 + void *page_buffer_virt; 165 + dma_addr_t page_buffer_phys; 166 + unsigned int page_buffer_size; 167 + 168 + void *payload_virt; 169 + dma_addr_t payload_phys; 170 + 171 + void *auxiliary_virt; 172 + dma_addr_t auxiliary_phys; 173 + 174 + /* DMA channels */ 175 + #define DMA_CHANS 8 176 + struct dma_chan *dma_chans[DMA_CHANS]; 177 + struct mxs_dma_data dma_data; 178 + enum dma_ops_type last_dma_type; 179 + enum dma_ops_type dma_type; 180 + struct completion dma_done; 181 + 182 + /* private */ 183 + void *private; 184 + }; 185 + 186 + /** 187 + * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters. 188 + * @data_setup_in_cycles: The data setup time, in cycles. 189 + * @data_hold_in_cycles: The data hold time, in cycles. 190 + * @address_setup_in_cycles: The address setup time, in cycles. 191 + * @use_half_periods: Indicates the clock is running slowly, so the 192 + * NFC DLL should use half-periods. 193 + * @sample_delay_factor: The sample delay factor. 194 + */ 195 + struct gpmi_nfc_hardware_timing { 196 + uint8_t data_setup_in_cycles; 197 + uint8_t data_hold_in_cycles; 198 + uint8_t address_setup_in_cycles; 199 + bool use_half_periods; 200 + uint8_t sample_delay_factor; 201 + }; 202 + 203 + /** 204 + * struct timing_threshod - Timing threshold 205 + * @max_data_setup_cycles: The maximum number of data setup cycles that 206 + * can be expressed in the hardware. 207 + * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires 208 + * for data read internal setup. In the Reference 209 + * Manual, see the chapter "High-Speed NAND 210 + * Timing" for more details. 211 + * @max_sample_delay_factor: The maximum sample delay factor that can be 212 + * expressed in the hardware. 213 + * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the 214 + * sample delay DLL hardware can possibly work 215 + * with (the DLL is unusable with longer periods). 216 + * If the full-cycle period is greater than HALF 217 + * this value, the DLL must be configured to use 218 + * half-periods. 219 + * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the 220 + * DLL can implement. 221 + * @clock_frequency_in_hz: The clock frequency, in Hz, during the current 222 + * I/O transaction. If no I/O transaction is in 223 + * progress, this is the clock frequency during 224 + * the most recent I/O transaction. 225 + */ 226 + struct timing_threshod { 227 + const unsigned int max_chip_count; 228 + const unsigned int max_data_setup_cycles; 229 + const unsigned int internal_data_setup_in_ns; 230 + const unsigned int max_sample_delay_factor; 231 + const unsigned int max_dll_clock_period_in_ns; 232 + const unsigned int max_dll_delay_in_ns; 233 + unsigned long clock_frequency_in_hz; 234 + 235 + }; 236 + 237 + /* Common Services */ 238 + extern int common_nfc_set_geometry(struct gpmi_nand_data *); 239 + extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *); 240 + extern void prepare_data_dma(struct gpmi_nand_data *, 241 + enum dma_data_direction dr); 242 + extern int start_dma_without_bch_irq(struct gpmi_nand_data *, 243 + struct dma_async_tx_descriptor *); 244 + extern int start_dma_with_bch_irq(struct gpmi_nand_data *, 245 + struct dma_async_tx_descriptor *); 246 + 247 + /* GPMI-NAND helper function library */ 248 + extern int gpmi_init(struct gpmi_nand_data *); 249 + extern void gpmi_clear_bch(struct gpmi_nand_data *); 250 + extern void gpmi_dump_info(struct gpmi_nand_data *); 251 + extern int bch_set_geometry(struct gpmi_nand_data *); 252 + extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip); 253 + extern int gpmi_send_command(struct gpmi_nand_data *); 254 + extern void gpmi_begin(struct gpmi_nand_data *); 255 + extern void gpmi_end(struct gpmi_nand_data *); 256 + extern int gpmi_read_data(struct gpmi_nand_data *); 257 + extern int gpmi_send_data(struct gpmi_nand_data *); 258 + extern int gpmi_send_page(struct gpmi_nand_data *, 259 + dma_addr_t payload, dma_addr_t auxiliary); 260 + extern int gpmi_read_page(struct gpmi_nand_data *, 261 + dma_addr_t payload, dma_addr_t auxiliary); 262 + 263 + /* BCH : Status Block Completion Codes */ 264 + #define STATUS_GOOD 0x00 265 + #define STATUS_ERASED 0xff 266 + #define STATUS_UNCORRECTABLE 0xfe 267 + 268 + /* Use the platform_id to distinguish different Archs. */ 269 + #define IS_MX23 0x1 270 + #define IS_MX28 0x2 271 + #define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23) 272 + #define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28) 273 + #endif
+172
drivers/mtd/nand/gpmi-nand/gpmi-regs.h
··· 1 + /* 2 + * Freescale GPMI NAND Flash Driver 3 + * 4 + * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 + * Copyright 2008 Embedded Alley Solutions, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along 18 + * with this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 + */ 21 + #ifndef __GPMI_NAND_GPMI_REGS_H 22 + #define __GPMI_NAND_GPMI_REGS_H 23 + 24 + #define HW_GPMI_CTRL0 0x00000000 25 + #define HW_GPMI_CTRL0_SET 0x00000004 26 + #define HW_GPMI_CTRL0_CLR 0x00000008 27 + #define HW_GPMI_CTRL0_TOG 0x0000000c 28 + 29 + #define BP_GPMI_CTRL0_COMMAND_MODE 24 30 + #define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE) 31 + #define BF_GPMI_CTRL0_COMMAND_MODE(v) \ 32 + (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE) 33 + #define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0 34 + #define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1 35 + #define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2 36 + #define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3 37 + 38 + #define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23) 39 + #define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0 40 + #define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1 41 + 42 + /* 43 + * Difference in LOCK_CS between imx23 and imx28 : 44 + * This bit may impact the _POWER_ consumption. So some chips 45 + * do not set it. 46 + */ 47 + #define MX23_BP_GPMI_CTRL0_LOCK_CS 22 48 + #define MX28_BP_GPMI_CTRL0_LOCK_CS 27 49 + #define LOCK_CS_ENABLE 0x1 50 + #define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0 51 + 52 + /* Difference in CS between imx23 and imx28 */ 53 + #define BP_GPMI_CTRL0_CS 20 54 + #define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS) 55 + #define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS) 56 + #define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \ 57 + (GPMI_IS_MX23((x)) \ 58 + ? MX23_BM_GPMI_CTRL0_CS \ 59 + : MX28_BM_GPMI_CTRL0_CS)) 60 + 61 + #define BP_GPMI_CTRL0_ADDRESS 17 62 + #define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS) 63 + #define BF_GPMI_CTRL0_ADDRESS(v) \ 64 + (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS) 65 + #define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0 66 + #define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1 67 + #define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2 68 + 69 + #define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16) 70 + #define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0 71 + #define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1 72 + 73 + #define BP_GPMI_CTRL0_XFER_COUNT 0 74 + #define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT) 75 + #define BF_GPMI_CTRL0_XFER_COUNT(v) \ 76 + (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT) 77 + 78 + #define HW_GPMI_COMPARE 0x00000010 79 + 80 + #define HW_GPMI_ECCCTRL 0x00000020 81 + #define HW_GPMI_ECCCTRL_SET 0x00000024 82 + #define HW_GPMI_ECCCTRL_CLR 0x00000028 83 + #define HW_GPMI_ECCCTRL_TOG 0x0000002c 84 + 85 + #define BP_GPMI_ECCCTRL_ECC_CMD 13 86 + #define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD) 87 + #define BF_GPMI_ECCCTRL_ECC_CMD(v) \ 88 + (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD) 89 + #define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0 90 + #define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1 91 + 92 + #define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12) 93 + #define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1 94 + #define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0 95 + 96 + #define BP_GPMI_ECCCTRL_BUFFER_MASK 0 97 + #define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK) 98 + #define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \ 99 + (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK) 100 + #define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100 101 + #define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF 102 + 103 + #define HW_GPMI_ECCCOUNT 0x00000030 104 + #define HW_GPMI_PAYLOAD 0x00000040 105 + #define HW_GPMI_AUXILIARY 0x00000050 106 + #define HW_GPMI_CTRL1 0x00000060 107 + #define HW_GPMI_CTRL1_SET 0x00000064 108 + #define HW_GPMI_CTRL1_CLR 0x00000068 109 + #define HW_GPMI_CTRL1_TOG 0x0000006c 110 + 111 + #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) 112 + 113 + #define BP_GPMI_CTRL1_DLL_ENABLE 17 114 + #define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE) 115 + 116 + #define BP_GPMI_CTRL1_HALF_PERIOD 16 117 + #define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD) 118 + 119 + #define BP_GPMI_CTRL1_RDN_DELAY 12 120 + #define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY) 121 + #define BF_GPMI_CTRL1_RDN_DELAY(v) \ 122 + (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY) 123 + 124 + #define BM_GPMI_CTRL1_DEV_RESET (1 << 3) 125 + #define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0 126 + #define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1 127 + 128 + #define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2) 129 + #define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0 130 + #define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1 131 + 132 + #define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1) 133 + #define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0 134 + #define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1 135 + 136 + #define BM_GPMI_CTRL1_GPMI_MODE (1 << 0) 137 + 138 + #define HW_GPMI_TIMING0 0x00000070 139 + 140 + #define BP_GPMI_TIMING0_ADDRESS_SETUP 16 141 + #define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP) 142 + #define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \ 143 + (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP) 144 + 145 + #define BP_GPMI_TIMING0_DATA_HOLD 8 146 + #define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD) 147 + #define BF_GPMI_TIMING0_DATA_HOLD(v) \ 148 + (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD) 149 + 150 + #define BP_GPMI_TIMING0_DATA_SETUP 0 151 + #define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP) 152 + #define BF_GPMI_TIMING0_DATA_SETUP(v) \ 153 + (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP) 154 + 155 + #define HW_GPMI_TIMING1 0x00000080 156 + #define BP_GPMI_TIMING1_BUSY_TIMEOUT 16 157 + 158 + #define HW_GPMI_TIMING2 0x00000090 159 + #define HW_GPMI_DATA 0x000000a0 160 + 161 + /* MX28 uses this to detect READY. */ 162 + #define HW_GPMI_STAT 0x000000b0 163 + #define MX28_BP_GPMI_STAT_READY_BUSY 24 164 + #define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY) 165 + #define MX28_BF_GPMI_STAT_READY_BUSY(v) \ 166 + (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY) 167 + 168 + /* MX23 uses this to detect READY. */ 169 + #define HW_GPMI_DEBUG 0x000000c0 170 + #define MX23_BP_GPMI_DEBUG_READY0 28 171 + #define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0) 172 + #endif
+2 -17
drivers/mtd/nand/h1910.c
··· 81 81 static int __init h1910_init(void) 82 82 { 83 83 struct nand_chip *this; 84 - const char *part_type = 0; 85 - int mtd_parts_nb = 0; 86 - struct mtd_partition *mtd_parts = 0; 87 84 void __iomem *nandaddr; 88 85 89 86 if (!machine_is_h1900()) ··· 133 136 iounmap((void *)nandaddr); 134 137 return -ENXIO; 135 138 } 136 - #ifdef CONFIG_MTD_CMDLINE_PARTS 137 - mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand"); 138 - if (mtd_parts_nb > 0) 139 - part_type = "command line"; 140 - else 141 - mtd_parts_nb = 0; 142 - #endif 143 - if (mtd_parts_nb == 0) { 144 - mtd_parts = partition_info; 145 - mtd_parts_nb = NUM_PARTITIONS; 146 - part_type = "static"; 147 - } 148 139 149 140 /* Register the partitions */ 150 - printk(KERN_NOTICE "Using %s partition definition\n", part_type); 151 - mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb); 141 + mtd_device_parse_register(h1910_nand_mtd, NULL, 0, 142 + partition_info, NUM_PARTITIONS); 152 143 153 144 /* Return happy */ 154 145 return 0;
+3 -15
drivers/mtd/nand/jz4740_nand.c
··· 251 251 return 0; 252 252 } 253 253 254 - #ifdef CONFIG_MTD_CMDLINE_PARTS 255 - static const char *part_probes[] = {"cmdline", NULL}; 256 - #endif 257 - 258 254 static int jz_nand_ioremap_resource(struct platform_device *pdev, 259 255 const char *name, struct resource **res, void __iomem **base) 260 256 { ··· 295 299 struct nand_chip *chip; 296 300 struct mtd_info *mtd; 297 301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 298 - struct mtd_partition *partition_info; 299 - int num_partitions = 0; 300 302 301 303 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 302 304 if (!nand) { ··· 367 373 goto err_gpio_free; 368 374 } 369 375 370 - #ifdef CONFIG_MTD_CMDLINE_PARTS 371 - num_partitions = parse_mtd_partitions(mtd, part_probes, 372 - &partition_info, 0); 373 - #endif 374 - if (num_partitions <= 0 && pdata) { 375 - num_partitions = pdata->num_partitions; 376 - partition_info = pdata->partitions; 377 - } 378 - ret = mtd_device_register(mtd, partition_info, num_partitions); 376 + ret = mtd_device_parse_register(mtd, NULL, 0, 377 + pdata ? pdata->partitions : NULL, 378 + pdata ? pdata->num_partitions : 0); 379 379 380 380 if (ret) { 381 381 dev_err(&pdev->dev, "Failed to add mtd device\n");
+5 -17
drivers/mtd/nand/mpc5121_nfc.c
··· 131 131 132 132 static void mpc5121_nfc_done(struct mtd_info *mtd); 133 133 134 - static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; 135 - 136 134 /* Read NFC register */ 137 135 static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 138 136 { ··· 654 656 struct mpc5121_nfc_prv *prv; 655 657 struct resource res; 656 658 struct mtd_info *mtd; 657 - struct mtd_partition *parts; 658 659 struct nand_chip *chip; 659 660 unsigned long regs_paddr, regs_size; 660 661 const __be32 *chips_no; 661 662 int resettime = 0; 662 663 int retval = 0; 663 664 int rev, len; 665 + struct mtd_part_parser_data ppdata; 664 666 665 667 /* 666 668 * Check SoC revision. This driver supports only NFC ··· 725 727 } 726 728 727 729 mtd->name = "MPC5121 NAND"; 730 + ppdata.of_node = dn; 728 731 chip->dev_ready = mpc5121_nfc_dev_ready; 729 732 chip->cmdfunc = mpc5121_nfc_command; 730 733 chip->read_byte = mpc5121_nfc_read_byte; ··· 734 735 chip->write_buf = mpc5121_nfc_write_buf; 735 736 chip->verify_buf = mpc5121_nfc_verify_buf; 736 737 chip->select_chip = mpc5121_nfc_select_chip; 737 - chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; 738 + chip->options = NAND_NO_AUTOINCR; 739 + chip->bbt_options = NAND_BBT_USE_FLASH; 738 740 chip->ecc.mode = NAND_ECC_SOFT; 739 741 740 742 /* Support external chip-select logic on ADS5121 board */ ··· 837 837 dev_set_drvdata(dev, mtd); 838 838 839 839 /* Register device in MTD */ 840 - retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 841 - #ifdef CONFIG_MTD_OF_PARTS 842 - if (retval == 0) 843 - retval = of_mtd_parse_partitions(dev, dn, &parts); 844 - #endif 845 - if (retval < 0) { 846 - dev_err(dev, "Error parsing MTD partitions!\n"); 847 - devm_free_irq(dev, prv->irq, mtd); 848 - retval = -EINVAL; 849 - goto error; 850 - } 851 - 852 - retval = mtd_device_register(mtd, parts, retval); 840 + retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 853 841 if (retval) { 854 842 dev_err(dev, "Error adding MTD device!\n"); 855 843 devm_free_irq(dev, prv->irq, mtd);
+12 -25
drivers/mtd/nand/mxc_nand.c
··· 41 41 42 42 #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 43 43 #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) 44 - #define nfc_is_v3_2() cpu_is_mx51() 44 + #define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53()) 45 45 #define nfc_is_v3() nfc_is_v3_2() 46 46 47 47 /* Addresses for NFC registers */ ··· 143 143 struct mxc_nand_host { 144 144 struct mtd_info mtd; 145 145 struct nand_chip nand; 146 - struct mtd_partition *parts; 147 146 struct device *dev; 148 147 149 148 void *spare0; ··· 349 350 udelay(1); 350 351 } 351 352 if (max_retries < 0) 352 - DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n", 353 - __func__); 353 + pr_debug("%s: INT not set\n", __func__); 354 354 } 355 355 } 356 356 ··· 369 371 * waits for completion. */ 370 372 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 371 373 { 372 - DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); 374 + pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); 373 375 374 376 writew(cmd, NFC_V1_V2_FLASH_CMD); 375 377 writew(NFC_CMD, NFC_V1_V2_CONFIG2); ··· 385 387 udelay(1); 386 388 } 387 389 if (max_retries < 0) 388 - DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", 389 - __func__); 390 + pr_debug("%s: RESET failed\n", __func__); 390 391 } else { 391 392 /* Wait for operation to complete */ 392 393 wait_op_done(host, useirq); ··· 408 411 * a NAND command. */ 409 412 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 410 413 { 411 - DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); 414 + pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); 412 415 413 416 writew(addr, NFC_V1_V2_FLASH_ADDR); 414 417 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); ··· 558 561 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); 559 562 560 563 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 561 - DEBUG(MTD_DEBUG_LEVEL0, 562 - "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 564 + pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 563 565 return -1; 564 566 } 565 567 ··· 845 849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); 846 850 } else if (nfc_is_v1()) { 847 851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); 848 - writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); 852 + writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR); 849 853 } else 850 854 BUG(); 851 855 ··· 928 932 struct nand_chip *nand_chip = mtd->priv; 929 933 struct mxc_nand_host *host = nand_chip->priv; 930 934 931 - DEBUG(MTD_DEBUG_LEVEL3, 932 - "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 935 + pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 933 936 command, column, page_addr); 934 937 935 938 /* Reset command state information */ ··· 1039 1044 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 1040 1045 struct mxc_nand_host *host; 1041 1046 struct resource *res; 1042 - int err = 0, __maybe_unused nr_parts = 0; 1047 + int err = 0; 1043 1048 struct nand_ecclayout *oob_smallpage, *oob_largepage; 1044 1049 1045 1050 /* Allocate memory for MTD device structure and private data */ ··· 1174 1179 this->bbt_td = &bbt_main_descr; 1175 1180 this->bbt_md = &bbt_mirror_descr; 1176 1181 /* update flash based bbt */ 1177 - this->options |= NAND_USE_FLASH_BBT; 1182 + this->bbt_options |= NAND_BBT_USE_FLASH; 1178 1183 } 1179 1184 1180 1185 init_completion(&host->op_completion); ··· 1226 1231 } 1227 1232 1228 1233 /* Register the partitions */ 1229 - nr_parts = 1230 - parse_mtd_partitions(mtd, part_probes, &host->parts, 0); 1231 - if (nr_parts > 0) 1232 - mtd_device_register(mtd, host->parts, nr_parts); 1233 - else if (pdata->parts) 1234 - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 1235 - else { 1236 - pr_info("Registering %s as whole device\n", mtd->name); 1237 - mtd_device_register(mtd, NULL, 0); 1238 - } 1234 + mtd_device_parse_register(mtd, part_probes, 0, 1235 + pdata->parts, pdata->nr_parts); 1239 1236 1240 1237 platform_set_drvdata(pdev, host); 1241 1238
+558 -551
drivers/mtd/nand/nand_base.c
··· 21 21 * TODO: 22 22 * Enable cached programming for 2k page size chips 23 23 * Check, if mtd->ecctype should be set to MTD_ECC_HW 24 - * if we have HW ecc support. 24 + * if we have HW ECC support. 25 25 * The AG-AND chips have nice features for speed improvement, 26 26 * which are not supported yet. Read / program 4 pages in one go. 27 27 * BBT table is not serialized, has to be fixed ··· 113 113 114 114 /* Start address must align on block boundary */ 115 115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) { 116 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 116 + pr_debug("%s: unaligned address\n", __func__); 117 117 ret = -EINVAL; 118 118 } 119 119 120 120 /* Length must align on block boundary */ 121 121 if (len & ((1 << chip->phys_erase_shift) - 1)) { 122 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 123 - __func__); 122 + pr_debug("%s: length not block aligned\n", __func__); 124 123 ret = -EINVAL; 125 124 } 126 125 127 126 /* Do not allow past end of device */ 128 127 if (ofs + len > mtd->size) { 129 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", 130 - __func__); 128 + pr_debug("%s: past end of device\n", __func__); 131 129 ret = -EINVAL; 132 130 } 133 131 ··· 134 136 135 137 /** 136 138 * nand_release_device - [GENERIC] release chip 137 - * @mtd: MTD device structure 139 + * @mtd: MTD device structure 138 140 * 139 - * Deselect, release chip lock and wake up anyone waiting on the device 141 + * Deselect, release chip lock and wake up anyone waiting on the device. 140 142 */ 141 143 static void nand_release_device(struct mtd_info *mtd) 142 144 { ··· 155 157 156 158 /** 157 159 * nand_read_byte - [DEFAULT] read one byte from the chip 158 - * @mtd: MTD device structure 160 + * @mtd: MTD device structure 159 161 * 160 - * Default read function for 8bit buswith 162 + * Default read function for 8bit buswidth 161 163 */ 162 164 static uint8_t nand_read_byte(struct mtd_info *mtd) 163 165 { ··· 167 169 168 170 /** 169 171 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip 170 - * @mtd: MTD device structure 172 + * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 173 + * @mtd: MTD device structure 171 174 * 172 - * Default read function for 16bit buswith with 173 - * endianess conversion 175 + * Default read function for 16bit buswidth with endianness conversion. 176 + * 174 177 */ 175 178 static uint8_t nand_read_byte16(struct mtd_info *mtd) 176 179 { ··· 181 182 182 183 /** 183 184 * nand_read_word - [DEFAULT] read one word from the chip 184 - * @mtd: MTD device structure 185 + * @mtd: MTD device structure 185 186 * 186 - * Default read function for 16bit buswith without 187 - * endianess conversion 187 + * Default read function for 16bit buswidth without endianness conversion. 188 188 */ 189 189 static u16 nand_read_word(struct mtd_info *mtd) 190 190 { ··· 193 195 194 196 /** 195 197 * nand_select_chip - [DEFAULT] control CE line 196 - * @mtd: MTD device structure 197 - * @chipnr: chipnumber to select, -1 for deselect 198 + * @mtd: MTD device structure 199 + * @chipnr: chipnumber to select, -1 for deselect 198 200 * 199 201 * Default select function for 1 chip devices. 200 202 */ ··· 216 218 217 219 /** 218 220 * nand_write_buf - [DEFAULT] write buffer to chip 219 - * @mtd: MTD device structure 220 - * @buf: data buffer 221 - * @len: number of bytes to write 221 + * @mtd: MTD device structure 222 + * @buf: data buffer 223 + * @len: number of bytes to write 222 224 * 223 - * Default write function for 8bit buswith 225 + * Default write function for 8bit buswidth. 224 226 */ 225 227 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 226 228 { ··· 233 235 234 236 /** 235 237 * nand_read_buf - [DEFAULT] read chip data into buffer 236 - * @mtd: MTD device structure 237 - * @buf: buffer to store date 238 - * @len: number of bytes to read 238 + * @mtd: MTD device structure 239 + * @buf: buffer to store date 240 + * @len: number of bytes to read 239 241 * 240 - * Default read function for 8bit buswith 242 + * Default read function for 8bit buswidth. 241 243 */ 242 244 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 243 245 { ··· 250 252 251 253 /** 252 254 * nand_verify_buf - [DEFAULT] Verify chip data against buffer 253 - * @mtd: MTD device structure 254 - * @buf: buffer containing the data to compare 255 - * @len: number of bytes to compare 255 + * @mtd: MTD device structure 256 + * @buf: buffer containing the data to compare 257 + * @len: number of bytes to compare 256 258 * 257 - * Default verify function for 8bit buswith 259 + * Default verify function for 8bit buswidth. 258 260 */ 259 261 static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 260 262 { ··· 269 271 270 272 /** 271 273 * nand_write_buf16 - [DEFAULT] write buffer to chip 272 - * @mtd: MTD device structure 273 - * @buf: data buffer 274 - * @len: number of bytes to write 274 + * @mtd: MTD device structure 275 + * @buf: data buffer 276 + * @len: number of bytes to write 275 277 * 276 - * Default write function for 16bit buswith 278 + * Default write function for 16bit buswidth. 277 279 */ 278 280 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 279 281 { ··· 289 291 290 292 /** 291 293 * nand_read_buf16 - [DEFAULT] read chip data into buffer 292 - * @mtd: MTD device structure 293 - * @buf: buffer to store date 294 - * @len: number of bytes to read 294 + * @mtd: MTD device structure 295 + * @buf: buffer to store date 296 + * @len: number of bytes to read 295 297 * 296 - * Default read function for 16bit buswith 298 + * Default read function for 16bit buswidth. 297 299 */ 298 300 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 299 301 { ··· 308 310 309 311 /** 310 312 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer 311 - * @mtd: MTD device structure 312 - * @buf: buffer containing the data to compare 313 - * @len: number of bytes to compare 313 + * @mtd: MTD device structure 314 + * @buf: buffer containing the data to compare 315 + * @len: number of bytes to compare 314 316 * 315 - * Default verify function for 16bit buswith 317 + * Default verify function for 16bit buswidth. 316 318 */ 317 319 static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 318 320 { ··· 330 332 331 333 /** 332 334 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 333 - * @mtd: MTD device structure 334 - * @ofs: offset from device start 335 - * @getchip: 0, if the chip is already selected 335 + * @mtd: MTD device structure 336 + * @ofs: offset from device start 337 + * @getchip: 0, if the chip is already selected 336 338 * 337 339 * Check, if the block is bad. 338 340 */ ··· 342 344 struct nand_chip *chip = mtd->priv; 343 345 u16 bad; 344 346 345 - if (chip->options & NAND_BBT_SCANLASTPAGE) 347 + if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 346 348 ofs += mtd->erasesize - mtd->writesize; 347 349 348 350 page = (int)(ofs >> chip->page_shift) & chip->pagemask; ··· 382 384 383 385 /** 384 386 * nand_default_block_markbad - [DEFAULT] mark a block bad 385 - * @mtd: MTD device structure 386 - * @ofs: offset from device start 387 + * @mtd: MTD device structure 388 + * @ofs: offset from device start 387 389 * 388 - * This is the default implementation, which can be overridden by 389 - * a hardware specific driver. 390 + * This is the default implementation, which can be overridden by a hardware 391 + * specific driver. 390 392 */ 391 393 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 392 394 { ··· 394 396 uint8_t buf[2] = { 0, 0 }; 395 397 int block, ret, i = 0; 396 398 397 - if (chip->options & NAND_BBT_SCANLASTPAGE) 399 + if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 398 400 ofs += mtd->erasesize - mtd->writesize; 399 401 400 402 /* Get block number */ ··· 402 404 if (chip->bbt) 403 405 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 404 406 405 - /* Do we have a flash based bad block table ? */ 406 - if (chip->options & NAND_USE_FLASH_BBT) 407 + /* Do we have a flash based bad block table? */ 408 + if (chip->bbt_options & NAND_BBT_USE_FLASH) 407 409 ret = nand_update_bbt(mtd, ofs); 408 410 else { 411 + struct mtd_oob_ops ops; 412 + 409 413 nand_get_device(chip, mtd, FL_WRITING); 410 414 411 - /* Write to first two pages and to byte 1 and 6 if necessary. 412 - * If we write to more than one location, the first error 413 - * encountered quits the procedure. We write two bytes per 414 - * location, so we dont have to mess with 16 bit access. 415 + /* 416 + * Write to first two pages if necessary. If we write to more 417 + * than one location, the first error encountered quits the 418 + * procedure. We write two bytes per location, so we dont have 419 + * to mess with 16 bit access. 415 420 */ 421 + ops.len = ops.ooblen = 2; 422 + ops.datbuf = NULL; 423 + ops.oobbuf = buf; 424 + ops.ooboffs = chip->badblockpos & ~0x01; 425 + ops.mode = MTD_OPS_PLACE_OOB; 416 426 do { 417 - chip->ops.len = chip->ops.ooblen = 2; 418 - chip->ops.datbuf = NULL; 419 - chip->ops.oobbuf = buf; 420 - chip->ops.ooboffs = chip->badblockpos & ~0x01; 427 + ret = nand_do_write_oob(mtd, ofs, &ops); 421 428 422 - ret = nand_do_write_oob(mtd, ofs, &chip->ops); 423 - 424 - if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) { 425 - chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS 426 - & ~0x01; 427 - ret = nand_do_write_oob(mtd, ofs, &chip->ops); 428 - } 429 429 i++; 430 430 ofs += mtd->writesize; 431 - } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) && 431 + } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && 432 432 i < 2); 433 433 434 434 nand_release_device(mtd); ··· 439 443 440 444 /** 441 445 * nand_check_wp - [GENERIC] check if the chip is write protected 442 - * @mtd: MTD device structure 443 - * Check, if the device is write protected 446 + * @mtd: MTD device structure 444 447 * 445 - * The function expects, that the device is already selected 448 + * Check, if the device is write protected. The function expects, that the 449 + * device is already selected. 446 450 */ 447 451 static int nand_check_wp(struct mtd_info *mtd) 448 452 { 449 453 struct nand_chip *chip = mtd->priv; 450 454 451 - /* broken xD cards report WP despite being writable */ 455 + /* Broken xD cards report WP despite being writable */ 452 456 if (chip->options & NAND_BROKEN_XD) 453 457 return 0; 454 458 ··· 459 463 460 464 /** 461 465 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 462 - * @mtd: MTD device structure 463 - * @ofs: offset from device start 464 - * @getchip: 0, if the chip is already selected 465 - * @allowbbt: 1, if its allowed to access the bbt area 466 + * @mtd: MTD device structure 467 + * @ofs: offset from device start 468 + * @getchip: 0, if the chip is already selected 469 + * @allowbbt: 1, if its allowed to access the bbt area 466 470 * 467 471 * Check, if the block is bad. Either by reading the bad block table or 468 472 * calling of the scan function. ··· 481 485 482 486 /** 483 487 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. 484 - * @mtd: MTD device structure 485 - * @timeo: Timeout 488 + * @mtd: MTD device structure 489 + * @timeo: Timeout 486 490 * 487 491 * Helper function for nand_wait_ready used when needing to wait in interrupt 488 492 * context. ··· 501 505 } 502 506 } 503 507 504 - /* 505 - * Wait for the ready pin, after a command 506 - * The timeout is catched later. 507 - */ 508 + /* Wait for the ready pin, after a command. The timeout is caught later. */ 508 509 void nand_wait_ready(struct mtd_info *mtd) 509 510 { 510 511 struct nand_chip *chip = mtd->priv; ··· 512 519 return panic_nand_wait_ready(mtd, 400); 513 520 514 521 led_trigger_event(nand_led_trigger, LED_FULL); 515 - /* wait until command is processed or timeout occures */ 522 + /* Wait until command is processed or timeout occurs */ 516 523 do { 517 524 if (chip->dev_ready(mtd)) 518 525 break; ··· 524 531 525 532 /** 526 533 * nand_command - [DEFAULT] Send command to NAND device 527 - * @mtd: MTD device structure 528 - * @command: the command to be sent 529 - * @column: the column address for this command, -1 if none 530 - * @page_addr: the page address for this command, -1 if none 534 + * @mtd: MTD device structure 535 + * @command: the command to be sent 536 + * @column: the column address for this command, -1 if none 537 + * @page_addr: the page address for this command, -1 if none 531 538 * 532 - * Send command to NAND device. This function is used for small page 533 - * devices (256/512 Bytes per page) 539 + * Send command to NAND device. This function is used for small page devices 540 + * (256/512 Bytes per page). 534 541 */ 535 542 static void nand_command(struct mtd_info *mtd, unsigned int command, 536 543 int column, int page_addr) ··· 538 545 register struct nand_chip *chip = mtd->priv; 539 546 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; 540 547 541 - /* 542 - * Write out the command to the device. 543 - */ 548 + /* Write out the command to the device */ 544 549 if (command == NAND_CMD_SEQIN) { 545 550 int readcmd; 546 551 ··· 558 567 } 559 568 chip->cmd_ctrl(mtd, command, ctrl); 560 569 561 - /* 562 - * Address cycle, when necessary 563 - */ 570 + /* Address cycle, when necessary */ 564 571 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 565 572 /* Serially input address */ 566 573 if (column != -1) { ··· 579 590 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 580 591 581 592 /* 582 - * program and erase have their own busy handlers 583 - * status and sequential in needs no delay 593 + * Program and erase have their own busy handlers status and sequential 594 + * in needs no delay 584 595 */ 585 596 switch (command) { 586 597 ··· 614 625 return; 615 626 } 616 627 } 617 - /* Apply this short delay always to ensure that we do wait tWB in 618 - * any case on any machine. */ 628 + /* 629 + * Apply this short delay always to ensure that we do wait tWB in 630 + * any case on any machine. 631 + */ 619 632 ndelay(100); 620 633 621 634 nand_wait_ready(mtd); ··· 625 634 626 635 /** 627 636 * nand_command_lp - [DEFAULT] Send command to NAND large page device 628 - * @mtd: MTD device structure 629 - * @command: the command to be sent 630 - * @column: the column address for this command, -1 if none 631 - * @page_addr: the page address for this command, -1 if none 637 + * @mtd: MTD device structure 638 + * @command: the command to be sent 639 + * @column: the column address for this command, -1 if none 640 + * @page_addr: the page address for this command, -1 if none 632 641 * 633 642 * Send command to NAND device. This is the version for the new large page 634 - * devices We dont have the separate regions as we have in the small page 635 - * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 643 + * devices. We don't have the separate regions as we have in the small page 644 + * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 636 645 */ 637 646 static void nand_command_lp(struct mtd_info *mtd, unsigned int command, 638 647 int column, int page_addr) ··· 674 683 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 675 684 676 685 /* 677 - * program and erase have their own busy handlers 678 - * status, sequential in, and deplete1 need no delay 686 + * Program and erase have their own busy handlers status, sequential 687 + * in, and deplete1 need no delay. 679 688 */ 680 689 switch (command) { 681 690 ··· 689 698 case NAND_CMD_DEPLETE1: 690 699 return; 691 700 692 - /* 693 - * read error status commands require only a short delay 694 - */ 695 701 case NAND_CMD_STATUS_ERROR: 696 702 case NAND_CMD_STATUS_ERROR0: 697 703 case NAND_CMD_STATUS_ERROR1: 698 704 case NAND_CMD_STATUS_ERROR2: 699 705 case NAND_CMD_STATUS_ERROR3: 706 + /* Read error status commands require only a short delay */ 700 707 udelay(chip->chip_delay); 701 708 return; 702 709 ··· 728 739 default: 729 740 /* 730 741 * If we don't have access to the busy pin, we apply the given 731 - * command delay 742 + * command delay. 732 743 */ 733 744 if (!chip->dev_ready) { 734 745 udelay(chip->chip_delay); ··· 736 747 } 737 748 } 738 749 739 - /* Apply this short delay always to ensure that we do wait tWB in 740 - * any case on any machine. */ 750 + /* 751 + * Apply this short delay always to ensure that we do wait tWB in 752 + * any case on any machine. 753 + */ 741 754 ndelay(100); 742 755 743 756 nand_wait_ready(mtd); ··· 747 756 748 757 /** 749 758 * panic_nand_get_device - [GENERIC] Get chip for selected access 750 - * @chip: the nand chip descriptor 751 - * @mtd: MTD device structure 752 - * @new_state: the state which is requested 759 + * @chip: the nand chip descriptor 760 + * @mtd: MTD device structure 761 + * @new_state: the state which is requested 753 762 * 754 763 * Used when in panic, no locks are taken. 755 764 */ 756 765 static void panic_nand_get_device(struct nand_chip *chip, 757 766 struct mtd_info *mtd, int new_state) 758 767 { 759 - /* Hardware controller shared among independend devices */ 768 + /* Hardware controller shared among independent devices */ 760 769 chip->controller->active = chip; 761 770 chip->state = new_state; 762 771 } 763 772 764 773 /** 765 774 * nand_get_device - [GENERIC] Get chip for selected access 766 - * @chip: the nand chip descriptor 767 - * @mtd: MTD device structure 768 - * @new_state: the state which is requested 775 + * @chip: the nand chip descriptor 776 + * @mtd: MTD device structure 777 + * @new_state: the state which is requested 769 778 * 770 779 * Get the device and lock it for exclusive access 771 780 */ ··· 803 812 } 804 813 805 814 /** 806 - * panic_nand_wait - [GENERIC] wait until the command is done 807 - * @mtd: MTD device structure 808 - * @chip: NAND chip structure 809 - * @timeo: Timeout 815 + * panic_nand_wait - [GENERIC] wait until the command is done 816 + * @mtd: MTD device structure 817 + * @chip: NAND chip structure 818 + * @timeo: timeout 810 819 * 811 820 * Wait for command done. This is a helper function for nand_wait used when 812 821 * we are in interrupt context. May happen when in panic and trying to write ··· 829 838 } 830 839 831 840 /** 832 - * nand_wait - [DEFAULT] wait until the command is done 833 - * @mtd: MTD device structure 834 - * @chip: NAND chip structure 841 + * nand_wait - [DEFAULT] wait until the command is done 842 + * @mtd: MTD device structure 843 + * @chip: NAND chip structure 835 844 * 836 - * Wait for command done. This applies to erase and program only 837 - * Erase can take up to 400ms and program up to 20ms according to 838 - * general NAND and SmartMedia specs 845 + * Wait for command done. This applies to erase and program only. Erase can 846 + * take up to 400ms and program up to 20ms according to general NAND and 847 + * SmartMedia specs. 839 848 */ 840 849 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 841 850 { ··· 850 859 851 860 led_trigger_event(nand_led_trigger, LED_FULL); 852 861 853 - /* Apply this short delay always to ensure that we do wait tWB in 854 - * any case on any machine. */ 862 + /* 863 + * Apply this short delay always to ensure that we do wait tWB in any 864 + * case on any machine. 865 + */ 855 866 ndelay(100); 856 867 857 868 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) ··· 883 890 884 891 /** 885 892 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks 886 - * 887 893 * @mtd: mtd info 888 894 * @ofs: offset to start unlock from 889 895 * @len: length to unlock 890 - * @invert: when = 0, unlock the range of blocks within the lower and 891 - * upper boundary address 892 - * when = 1, unlock the range of blocks outside the boundaries 893 - * of the lower and upper boundary address 896 + * @invert: when = 0, unlock the range of blocks within the lower and 897 + * upper boundary address 898 + * when = 1, unlock the range of blocks outside the boundaries 899 + * of the lower and upper boundary address 894 900 * 895 - * return - unlock status 901 + * Returs unlock status. 896 902 */ 897 903 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, 898 904 uint64_t len, int invert) ··· 911 919 912 920 /* Call wait ready function */ 913 921 status = chip->waitfunc(mtd, chip); 914 - udelay(1000); 915 922 /* See if device thinks it succeeded */ 916 923 if (status & 0x01) { 917 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 924 + pr_debug("%s: error status = 0x%08x\n", 918 925 __func__, status); 919 926 ret = -EIO; 920 927 } ··· 923 932 924 933 /** 925 934 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks 926 - * 927 935 * @mtd: mtd info 928 936 * @ofs: offset to start unlock from 929 937 * @len: length to unlock 930 938 * 931 - * return - unlock status 939 + * Returns unlock status. 932 940 */ 933 941 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 934 942 { ··· 935 945 int chipnr; 936 946 struct nand_chip *chip = mtd->priv; 937 947 938 - DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 948 + pr_debug("%s: start = 0x%012llx, len = %llu\n", 939 949 __func__, (unsigned long long)ofs, len); 940 950 941 951 if (check_offs_len(mtd, ofs, len)) ··· 954 964 955 965 /* Check, if it is write protected */ 956 966 if (nand_check_wp(mtd)) { 957 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 967 + pr_debug("%s: device is write protected!\n", 958 968 __func__); 959 969 ret = -EIO; 960 970 goto out; ··· 971 981 972 982 /** 973 983 * nand_lock - [REPLACEABLE] locks all blocks present in the device 974 - * 975 984 * @mtd: mtd info 976 985 * @ofs: offset to start unlock from 977 986 * @len: length to unlock 978 987 * 979 - * return - lock status 988 + * This feature is not supported in many NAND parts. 'Micron' NAND parts do 989 + * have this feature, but it allows only to lock all blocks, not for specified 990 + * range for block. Implementing 'lock' feature by making use of 'unlock', for 991 + * now. 980 992 * 981 - * This feature is not supported in many NAND parts. 'Micron' NAND parts 982 - * do have this feature, but it allows only to lock all blocks, not for 983 - * specified range for block. 984 - * 985 - * Implementing 'lock' feature by making use of 'unlock', for now. 993 + * Returns lock status. 986 994 */ 987 995 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 988 996 { ··· 988 1000 int chipnr, status, page; 989 1001 struct nand_chip *chip = mtd->priv; 990 1002 991 - DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 1003 + pr_debug("%s: start = 0x%012llx, len = %llu\n", 992 1004 __func__, (unsigned long long)ofs, len); 993 1005 994 1006 if (check_offs_len(mtd, ofs, len)) ··· 1003 1015 1004 1016 /* Check, if it is write protected */ 1005 1017 if (nand_check_wp(mtd)) { 1006 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 1018 + pr_debug("%s: device is write protected!\n", 1007 1019 __func__); 1008 1020 status = MTD_ERASE_FAILED; 1009 1021 ret = -EIO; ··· 1016 1028 1017 1029 /* Call wait ready function */ 1018 1030 status = chip->waitfunc(mtd, chip); 1019 - udelay(1000); 1020 1031 /* See if device thinks it succeeded */ 1021 1032 if (status & 0x01) { 1022 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 1033 + pr_debug("%s: error status = 0x%08x\n", 1023 1034 __func__, status); 1024 1035 ret = -EIO; 1025 1036 goto out; ··· 1034 1047 EXPORT_SYMBOL(nand_lock); 1035 1048 1036 1049 /** 1037 - * nand_read_page_raw - [Intern] read raw page data without ecc 1038 - * @mtd: mtd info structure 1039 - * @chip: nand chip info structure 1040 - * @buf: buffer to store read data 1041 - * @page: page number to read 1050 + * nand_read_page_raw - [INTERN] read raw page data without ecc 1051 + * @mtd: mtd info structure 1052 + * @chip: nand chip info structure 1053 + * @buf: buffer to store read data 1054 + * @page: page number to read 1042 1055 * 1043 - * Not for syndrome calculating ecc controllers, which use a special oob layout 1056 + * Not for syndrome calculating ECC controllers, which use a special oob layout. 1044 1057 */ 1045 1058 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1046 1059 uint8_t *buf, int page) ··· 1051 1064 } 1052 1065 1053 1066 /** 1054 - * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc 1055 - * @mtd: mtd info structure 1056 - * @chip: nand chip info structure 1057 - * @buf: buffer to store read data 1058 - * @page: page number to read 1067 + * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 1068 + * @mtd: mtd info structure 1069 + * @chip: nand chip info structure 1070 + * @buf: buffer to store read data 1071 + * @page: page number to read 1059 1072 * 1060 1073 * We need a special oob layout and handling even when OOB isn't used. 1061 1074 */ ··· 1094 1107 } 1095 1108 1096 1109 /** 1097 - * nand_read_page_swecc - [REPLACABLE] software ecc based page read function 1098 - * @mtd: mtd info structure 1099 - * @chip: nand chip info structure 1100 - * @buf: buffer to store read data 1101 - * @page: page number to read 1110 + * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 1111 + * @mtd: mtd info structure 1112 + * @chip: nand chip info structure 1113 + * @buf: buffer to store read data 1114 + * @page: page number to read 1102 1115 */ 1103 1116 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1104 1117 uint8_t *buf, int page) ··· 1135 1148 } 1136 1149 1137 1150 /** 1138 - * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function 1139 - * @mtd: mtd info structure 1140 - * @chip: nand chip info structure 1141 - * @data_offs: offset of requested data within the page 1142 - * @readlen: data length 1143 - * @bufpoi: buffer to store read data 1151 + * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function 1152 + * @mtd: mtd info structure 1153 + * @chip: nand chip info structure 1154 + * @data_offs: offset of requested data within the page 1155 + * @readlen: data length 1156 + * @bufpoi: buffer to store read data 1144 1157 */ 1145 1158 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1146 1159 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) ··· 1153 1166 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1154 1167 int index = 0; 1155 1168 1156 - /* Column address wihin the page aligned to ECC size (256bytes). */ 1169 + /* Column address within the page aligned to ECC size (256bytes) */ 1157 1170 start_step = data_offs / chip->ecc.size; 1158 1171 end_step = (data_offs + readlen - 1) / chip->ecc.size; 1159 1172 num_steps = end_step - start_step + 1; 1160 1173 1161 - /* Data size aligned to ECC ecc.size*/ 1174 + /* Data size aligned to ECC ecc.size */ 1162 1175 datafrag_len = num_steps * chip->ecc.size; 1163 1176 eccfrag_len = num_steps * chip->ecc.bytes; 1164 1177 ··· 1170 1183 p = bufpoi + data_col_addr; 1171 1184 chip->read_buf(mtd, p, datafrag_len); 1172 1185 1173 - /* Calculate ECC */ 1186 + /* Calculate ECC */ 1174 1187 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 1175 1188 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); 1176 1189 1177 - /* The performance is faster if to position offsets 1178 - according to ecc.pos. Let make sure here that 1179 - there are no gaps in ecc positions */ 1190 + /* 1191 + * The performance is faster if we position offsets according to 1192 + * ecc.pos. Let's make sure that there are no gaps in ECC positions. 1193 + */ 1180 1194 for (i = 0; i < eccfrag_len - 1; i++) { 1181 1195 if (eccpos[i + start_step * chip->ecc.bytes] + 1 != 1182 1196 eccpos[i + start_step * chip->ecc.bytes + 1]) { ··· 1189 1201 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 1190 1202 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1191 1203 } else { 1192 - /* send the command to read the particular ecc bytes */ 1193 - /* take care about buswidth alignment in read_buf */ 1204 + /* 1205 + * Send the command to read the particular ECC bytes take care 1206 + * about buswidth alignment in read_buf. 1207 + */ 1194 1208 index = start_step * chip->ecc.bytes; 1195 1209 1196 1210 aligned_pos = eccpos[index] & ~(busw - 1); ··· 1225 1235 } 1226 1236 1227 1237 /** 1228 - * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function 1229 - * @mtd: mtd info structure 1230 - * @chip: nand chip info structure 1231 - * @buf: buffer to store read data 1232 - * @page: page number to read 1238 + * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 1239 + * @mtd: mtd info structure 1240 + * @chip: nand chip info structure 1241 + * @buf: buffer to store read data 1242 + * @page: page number to read 1233 1243 * 1234 - * Not for syndrome calculating ecc controllers which need a special oob layout 1244 + * Not for syndrome calculating ECC controllers which need a special oob layout. 1235 1245 */ 1236 1246 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1237 1247 uint8_t *buf, int page) ··· 1270 1280 } 1271 1281 1272 1282 /** 1273 - * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first 1274 - * @mtd: mtd info structure 1275 - * @chip: nand chip info structure 1276 - * @buf: buffer to store read data 1277 - * @page: page number to read 1283 + * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first 1284 + * @mtd: mtd info structure 1285 + * @chip: nand chip info structure 1286 + * @buf: buffer to store read data 1287 + * @page: page number to read 1278 1288 * 1279 - * Hardware ECC for large page chips, require OOB to be read first. 1280 - * For this ECC mode, the write_page method is re-used from ECC_HW. 1281 - * These methods read/write ECC from the OOB area, unlike the 1282 - * ECC_HW_SYNDROME support with multiple ECC steps, follows the 1283 - * "infix ECC" scheme and reads/writes ECC from the data area, by 1284 - * overwriting the NAND manufacturer bad block markings. 1289 + * Hardware ECC for large page chips, require OOB to be read first. For this 1290 + * ECC mode, the write_page method is re-used from ECC_HW. These methods 1291 + * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with 1292 + * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from 1293 + * the data area, by overwriting the NAND manufacturer bad block markings. 1285 1294 */ 1286 1295 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1287 1296 struct nand_chip *chip, uint8_t *buf, int page) ··· 1318 1329 } 1319 1330 1320 1331 /** 1321 - * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read 1322 - * @mtd: mtd info structure 1323 - * @chip: nand chip info structure 1324 - * @buf: buffer to store read data 1325 - * @page: page number to read 1332 + * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 1333 + * @mtd: mtd info structure 1334 + * @chip: nand chip info structure 1335 + * @buf: buffer to store read data 1336 + * @page: page number to read 1326 1337 * 1327 - * The hw generator calculates the error syndrome automatically. Therefor 1328 - * we need a special oob layout and handling. 1338 + * The hw generator calculates the error syndrome automatically. Therefore we 1339 + * need a special oob layout and handling. 1329 1340 */ 1330 1341 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1331 1342 uint8_t *buf, int page) ··· 1373 1384 } 1374 1385 1375 1386 /** 1376 - * nand_transfer_oob - [Internal] Transfer oob to client buffer 1377 - * @chip: nand chip structure 1378 - * @oob: oob destination address 1379 - * @ops: oob ops structure 1380 - * @len: size of oob to transfer 1387 + * nand_transfer_oob - [INTERN] Transfer oob to client buffer 1388 + * @chip: nand chip structure 1389 + * @oob: oob destination address 1390 + * @ops: oob ops structure 1391 + * @len: size of oob to transfer 1381 1392 */ 1382 1393 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1383 1394 struct mtd_oob_ops *ops, size_t len) 1384 1395 { 1385 1396 switch (ops->mode) { 1386 1397 1387 - case MTD_OOB_PLACE: 1388 - case MTD_OOB_RAW: 1398 + case MTD_OPS_PLACE_OOB: 1399 + case MTD_OPS_RAW: 1389 1400 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 1390 1401 return oob + len; 1391 1402 1392 - case MTD_OOB_AUTO: { 1403 + case MTD_OPS_AUTO_OOB: { 1393 1404 struct nand_oobfree *free = chip->ecc.layout->oobfree; 1394 1405 uint32_t boffs = 0, roffs = ops->ooboffs; 1395 1406 size_t bytes = 0; 1396 1407 1397 1408 for (; free->length && len; free++, len -= bytes) { 1398 - /* Read request not from offset 0 ? */ 1409 + /* Read request not from offset 0? */ 1399 1410 if (unlikely(roffs)) { 1400 1411 if (roffs >= free->length) { 1401 1412 roffs -= free->length; ··· 1421 1432 } 1422 1433 1423 1434 /** 1424 - * nand_do_read_ops - [Internal] Read data with ECC 1425 - * 1426 - * @mtd: MTD device structure 1427 - * @from: offset to read from 1428 - * @ops: oob ops structure 1435 + * nand_do_read_ops - [INTERN] Read data with ECC 1436 + * @mtd: MTD device structure 1437 + * @from: offset to read from 1438 + * @ops: oob ops structure 1429 1439 * 1430 1440 * Internal function. Called with chip held. 1431 1441 */ ··· 1439 1451 int ret = 0; 1440 1452 uint32_t readlen = ops->len; 1441 1453 uint32_t oobreadlen = ops->ooblen; 1442 - uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? 1454 + uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? 1443 1455 mtd->oobavail : mtd->oobsize; 1444 1456 1445 1457 uint8_t *bufpoi, *oob, *buf; ··· 1461 1473 bytes = min(mtd->writesize - col, readlen); 1462 1474 aligned = (bytes == mtd->writesize); 1463 1475 1464 - /* Is the current page in the buffer ? */ 1476 + /* Is the current page in the buffer? */ 1465 1477 if (realpage != chip->pagebuf || oob) { 1466 1478 bufpoi = aligned ? buf : chip->buffers->databuf; 1467 1479 ··· 1471 1483 } 1472 1484 1473 1485 /* Now read the page into the buffer */ 1474 - if (unlikely(ops->mode == MTD_OOB_RAW)) 1486 + if (unlikely(ops->mode == MTD_OPS_RAW)) 1475 1487 ret = chip->ecc.read_page_raw(mtd, chip, 1476 1488 bufpoi, page); 1477 1489 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) ··· 1480 1492 else 1481 1493 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1482 1494 page); 1483 - if (ret < 0) 1495 + if (ret < 0) { 1496 + if (!aligned) 1497 + /* Invalidate page cache */ 1498 + chip->pagebuf = -1; 1484 1499 break; 1500 + } 1485 1501 1486 1502 /* Transfer not aligned data */ 1487 1503 if (!aligned) { 1488 1504 if (!NAND_SUBPAGE_READ(chip) && !oob && 1489 - !(mtd->ecc_stats.failed - stats.failed)) 1505 + !(mtd->ecc_stats.failed - stats.failed) && 1506 + (ops->mode != MTD_OPS_RAW)) 1490 1507 chip->pagebuf = realpage; 1508 + else 1509 + /* Invalidate page cache */ 1510 + chip->pagebuf = -1; 1491 1511 memcpy(buf, chip->buffers->databuf + col, bytes); 1492 1512 } 1493 1513 ··· 1535 1539 if (!readlen) 1536 1540 break; 1537 1541 1538 - /* For subsequent reads align to page boundary. */ 1542 + /* For subsequent reads align to page boundary */ 1539 1543 col = 0; 1540 1544 /* Increment page address */ 1541 1545 realpage++; ··· 1548 1552 chip->select_chip(mtd, chipnr); 1549 1553 } 1550 1554 1551 - /* Check, if the chip supports auto page increment 1552 - * or if we have hit a block boundary. 1555 + /* 1556 + * Check, if the chip supports auto page increment or if we 1557 + * have hit a block boundary. 1553 1558 */ 1554 1559 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1555 1560 sndcmd = 1; ··· 1571 1574 1572 1575 /** 1573 1576 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc 1574 - * @mtd: MTD device structure 1575 - * @from: offset to read from 1576 - * @len: number of bytes to read 1577 - * @retlen: pointer to variable to store the number of read bytes 1578 - * @buf: the databuffer to put data 1577 + * @mtd: MTD device structure 1578 + * @from: offset to read from 1579 + * @len: number of bytes to read 1580 + * @retlen: pointer to variable to store the number of read bytes 1581 + * @buf: the databuffer to put data 1579 1582 * 1580 - * Get hold of the chip and call nand_do_read 1583 + * Get hold of the chip and call nand_do_read. 1581 1584 */ 1582 1585 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1583 1586 size_t *retlen, uint8_t *buf) 1584 1587 { 1585 1588 struct nand_chip *chip = mtd->priv; 1589 + struct mtd_oob_ops ops; 1586 1590 int ret; 1587 1591 1588 1592 /* Do not allow reads past end of device */ ··· 1594 1596 1595 1597 nand_get_device(chip, mtd, FL_READING); 1596 1598 1597 - chip->ops.len = len; 1598 - chip->ops.datbuf = buf; 1599 - chip->ops.oobbuf = NULL; 1599 + ops.len = len; 1600 + ops.datbuf = buf; 1601 + ops.oobbuf = NULL; 1602 + ops.mode = 0; 1600 1603 1601 - ret = nand_do_read_ops(mtd, from, &chip->ops); 1604 + ret = nand_do_read_ops(mtd, from, &ops); 1602 1605 1603 - *retlen = chip->ops.retlen; 1606 + *retlen = ops.retlen; 1604 1607 1605 1608 nand_release_device(mtd); 1606 1609 ··· 1609 1610 } 1610 1611 1611 1612 /** 1612 - * nand_read_oob_std - [REPLACABLE] the most common OOB data read function 1613 - * @mtd: mtd info structure 1614 - * @chip: nand chip info structure 1615 - * @page: page number to read 1616 - * @sndcmd: flag whether to issue read command or not 1613 + * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 1614 + * @mtd: mtd info structure 1615 + * @chip: nand chip info structure 1616 + * @page: page number to read 1617 + * @sndcmd: flag whether to issue read command or not 1617 1618 */ 1618 1619 static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1619 1620 int page, int sndcmd) ··· 1627 1628 } 1628 1629 1629 1630 /** 1630 - * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC 1631 + * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 1631 1632 * with syndromes 1632 - * @mtd: mtd info structure 1633 - * @chip: nand chip info structure 1634 - * @page: page number to read 1635 - * @sndcmd: flag whether to issue read command or not 1633 + * @mtd: mtd info structure 1634 + * @chip: nand chip info structure 1635 + * @page: page number to read 1636 + * @sndcmd: flag whether to issue read command or not 1636 1637 */ 1637 1638 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1638 1639 int page, int sndcmd) ··· 1666 1667 } 1667 1668 1668 1669 /** 1669 - * nand_write_oob_std - [REPLACABLE] the most common OOB data write function 1670 - * @mtd: mtd info structure 1671 - * @chip: nand chip info structure 1672 - * @page: page number to write 1670 + * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 1671 + * @mtd: mtd info structure 1672 + * @chip: nand chip info structure 1673 + * @page: page number to write 1673 1674 */ 1674 1675 static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1675 1676 int page) ··· 1689 1690 } 1690 1691 1691 1692 /** 1692 - * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC 1693 - * with syndrome - only for large page flash ! 1694 - * @mtd: mtd info structure 1695 - * @chip: nand chip info structure 1696 - * @page: page number to write 1693 + * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 1694 + * with syndrome - only for large page flash 1695 + * @mtd: mtd info structure 1696 + * @chip: nand chip info structure 1697 + * @page: page number to write 1697 1698 */ 1698 1699 static int nand_write_oob_syndrome(struct mtd_info *mtd, 1699 1700 struct nand_chip *chip, int page) ··· 1748 1749 } 1749 1750 1750 1751 /** 1751 - * nand_do_read_oob - [Intern] NAND read out-of-band 1752 - * @mtd: MTD device structure 1753 - * @from: offset to read from 1754 - * @ops: oob operations description structure 1752 + * nand_do_read_oob - [INTERN] NAND read out-of-band 1753 + * @mtd: MTD device structure 1754 + * @from: offset to read from 1755 + * @ops: oob operations description structure 1755 1756 * 1756 - * NAND read out-of-band data from the spare area 1757 + * NAND read out-of-band data from the spare area. 1757 1758 */ 1758 1759 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1759 1760 struct mtd_oob_ops *ops) 1760 1761 { 1761 1762 int page, realpage, chipnr, sndcmd = 1; 1762 1763 struct nand_chip *chip = mtd->priv; 1764 + struct mtd_ecc_stats stats; 1763 1765 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; 1764 1766 int readlen = ops->ooblen; 1765 1767 int len; 1766 1768 uint8_t *buf = ops->oobbuf; 1767 1769 1768 - DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", 1770 + pr_debug("%s: from = 0x%08Lx, len = %i\n", 1769 1771 __func__, (unsigned long long)from, readlen); 1770 1772 1771 - if (ops->mode == MTD_OOB_AUTO) 1773 + stats = mtd->ecc_stats; 1774 + 1775 + if (ops->mode == MTD_OPS_AUTO_OOB) 1772 1776 len = chip->ecc.layout->oobavail; 1773 1777 else 1774 1778 len = mtd->oobsize; 1775 1779 1776 1780 if (unlikely(ops->ooboffs >= len)) { 1777 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " 1778 - "outside oob\n", __func__); 1781 + pr_debug("%s: attempt to start read outside oob\n", 1782 + __func__); 1779 1783 return -EINVAL; 1780 1784 } 1781 1785 ··· 1786 1784 if (unlikely(from >= mtd->size || 1787 1785 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 1788 1786 (from >> chip->page_shift)) * len)) { 1789 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " 1790 - "of device\n", __func__); 1787 + pr_debug("%s: attempt to read beyond end of device\n", 1788 + __func__); 1791 1789 return -EINVAL; 1792 1790 } 1793 1791 ··· 1799 1797 page = realpage & chip->pagemask; 1800 1798 1801 1799 while (1) { 1802 - sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1800 + if (ops->mode == MTD_OPS_RAW) 1801 + sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd); 1802 + else 1803 + sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1803 1804 1804 1805 len = min(len, readlen); 1805 1806 buf = nand_transfer_oob(chip, buf, ops, len); ··· 1835 1830 chip->select_chip(mtd, chipnr); 1836 1831 } 1837 1832 1838 - /* Check, if the chip supports auto page increment 1839 - * or if we have hit a block boundary. 1833 + /* 1834 + * Check, if the chip supports auto page increment or if we 1835 + * have hit a block boundary. 1840 1836 */ 1841 1837 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1842 1838 sndcmd = 1; 1843 1839 } 1844 1840 1845 1841 ops->oobretlen = ops->ooblen; 1846 - return 0; 1842 + 1843 + if (mtd->ecc_stats.failed - stats.failed) 1844 + return -EBADMSG; 1845 + 1846 + return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1847 1847 } 1848 1848 1849 1849 /** 1850 1850 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 1851 - * @mtd: MTD device structure 1852 - * @from: offset to read from 1853 - * @ops: oob operation description structure 1851 + * @mtd: MTD device structure 1852 + * @from: offset to read from 1853 + * @ops: oob operation description structure 1854 1854 * 1855 - * NAND read data and/or out-of-band data 1855 + * NAND read data and/or out-of-band data. 1856 1856 */ 1857 1857 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1858 1858 struct mtd_oob_ops *ops) ··· 1869 1859 1870 1860 /* Do not allow reads past end of device */ 1871 1861 if (ops->datbuf && (from + ops->len) > mtd->size) { 1872 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " 1873 - "beyond end of device\n", __func__); 1862 + pr_debug("%s: attempt to read beyond end of device\n", 1863 + __func__); 1874 1864 return -EINVAL; 1875 1865 } 1876 1866 1877 1867 nand_get_device(chip, mtd, FL_READING); 1878 1868 1879 1869 switch (ops->mode) { 1880 - case MTD_OOB_PLACE: 1881 - case MTD_OOB_AUTO: 1882 - case MTD_OOB_RAW: 1870 + case MTD_OPS_PLACE_OOB: 1871 + case MTD_OPS_AUTO_OOB: 1872 + case MTD_OPS_RAW: 1883 1873 break; 1884 1874 1885 1875 default: ··· 1898 1888 1899 1889 1900 1890 /** 1901 - * nand_write_page_raw - [Intern] raw page write function 1902 - * @mtd: mtd info structure 1903 - * @chip: nand chip info structure 1904 - * @buf: data buffer 1891 + * nand_write_page_raw - [INTERN] raw page write function 1892 + * @mtd: mtd info structure 1893 + * @chip: nand chip info structure 1894 + * @buf: data buffer 1905 1895 * 1906 - * Not for syndrome calculating ecc controllers, which use a special oob layout 1896 + * Not for syndrome calculating ECC controllers, which use a special oob layout. 1907 1897 */ 1908 1898 static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1909 1899 const uint8_t *buf) ··· 1913 1903 } 1914 1904 1915 1905 /** 1916 - * nand_write_page_raw_syndrome - [Intern] raw page write function 1917 - * @mtd: mtd info structure 1918 - * @chip: nand chip info structure 1919 - * @buf: data buffer 1906 + * nand_write_page_raw_syndrome - [INTERN] raw page write function 1907 + * @mtd: mtd info structure 1908 + * @chip: nand chip info structure 1909 + * @buf: data buffer 1920 1910 * 1921 1911 * We need a special oob layout and handling even when ECC isn't checked. 1922 1912 */ ··· 1952 1942 chip->write_buf(mtd, oob, size); 1953 1943 } 1954 1944 /** 1955 - * nand_write_page_swecc - [REPLACABLE] software ecc based page write function 1956 - * @mtd: mtd info structure 1957 - * @chip: nand chip info structure 1958 - * @buf: data buffer 1945 + * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 1946 + * @mtd: mtd info structure 1947 + * @chip: nand chip info structure 1948 + * @buf: data buffer 1959 1949 */ 1960 1950 static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1961 1951 const uint8_t *buf) ··· 1967 1957 const uint8_t *p = buf; 1968 1958 uint32_t *eccpos = chip->ecc.layout->eccpos; 1969 1959 1970 - /* Software ecc calculation */ 1960 + /* Software ECC calculation */ 1971 1961 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1972 1962 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1973 1963 ··· 1978 1968 } 1979 1969 1980 1970 /** 1981 - * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function 1982 - * @mtd: mtd info structure 1983 - * @chip: nand chip info structure 1984 - * @buf: data buffer 1971 + * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 1972 + * @mtd: mtd info structure 1973 + * @chip: nand chip info structure 1974 + * @buf: data buffer 1985 1975 */ 1986 1976 static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1987 1977 const uint8_t *buf) ··· 2006 1996 } 2007 1997 2008 1998 /** 2009 - * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write 2010 - * @mtd: mtd info structure 2011 - * @chip: nand chip info structure 2012 - * @buf: data buffer 1999 + * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 2000 + * @mtd: mtd info structure 2001 + * @chip: nand chip info structure 2002 + * @buf: data buffer 2013 2003 * 2014 - * The hw generator calculates the error syndrome automatically. Therefor 2015 - * we need a special oob layout and handling. 2004 + * The hw generator calculates the error syndrome automatically. Therefore we 2005 + * need a special oob layout and handling. 2016 2006 */ 2017 2007 static void nand_write_page_syndrome(struct mtd_info *mtd, 2018 2008 struct nand_chip *chip, const uint8_t *buf) ··· 2051 2041 2052 2042 /** 2053 2043 * nand_write_page - [REPLACEABLE] write one page 2054 - * @mtd: MTD device structure 2055 - * @chip: NAND chip descriptor 2056 - * @buf: the data to write 2057 - * @page: page number to write 2058 - * @cached: cached programming 2059 - * @raw: use _raw version of write_page 2044 + * @mtd: MTD device structure 2045 + * @chip: NAND chip descriptor 2046 + * @buf: the data to write 2047 + * @page: page number to write 2048 + * @cached: cached programming 2049 + * @raw: use _raw version of write_page 2060 2050 */ 2061 2051 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2062 2052 const uint8_t *buf, int page, int cached, int raw) ··· 2071 2061 chip->ecc.write_page(mtd, chip, buf); 2072 2062 2073 2063 /* 2074 - * Cached progamming disabled for now, Not sure if its worth the 2075 - * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s) 2064 + * Cached progamming disabled for now. Not sure if it's worth the 2065 + * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s). 2076 2066 */ 2077 2067 cached = 0; 2078 2068 ··· 2082 2072 status = chip->waitfunc(mtd, chip); 2083 2073 /* 2084 2074 * See if operation failed and additional status checks are 2085 - * available 2075 + * available. 2086 2076 */ 2087 2077 if ((status & NAND_STATUS_FAIL) && (chip->errstat)) 2088 2078 status = chip->errstat(mtd, chip, FL_WRITING, status, ··· 2106 2096 } 2107 2097 2108 2098 /** 2109 - * nand_fill_oob - [Internal] Transfer client buffer to oob 2110 - * @chip: nand chip structure 2111 - * @oob: oob data buffer 2112 - * @len: oob data write length 2113 - * @ops: oob ops structure 2099 + * nand_fill_oob - [INTERN] Transfer client buffer to oob 2100 + * @mtd: MTD device structure 2101 + * @oob: oob data buffer 2102 + * @len: oob data write length 2103 + * @ops: oob ops structure 2114 2104 */ 2115 - static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 2116 - struct mtd_oob_ops *ops) 2105 + static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, 2106 + struct mtd_oob_ops *ops) 2117 2107 { 2108 + struct nand_chip *chip = mtd->priv; 2109 + 2110 + /* 2111 + * Initialise to all 0xFF, to avoid the possibility of left over OOB 2112 + * data from a previous OOB read. 2113 + */ 2114 + memset(chip->oob_poi, 0xff, mtd->oobsize); 2115 + 2118 2116 switch (ops->mode) { 2119 2117 2120 - case MTD_OOB_PLACE: 2121 - case MTD_OOB_RAW: 2118 + case MTD_OPS_PLACE_OOB: 2119 + case MTD_OPS_RAW: 2122 2120 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 2123 2121 return oob + len; 2124 2122 2125 - case MTD_OOB_AUTO: { 2123 + case MTD_OPS_AUTO_OOB: { 2126 2124 struct nand_oobfree *free = chip->ecc.layout->oobfree; 2127 2125 uint32_t boffs = 0, woffs = ops->ooboffs; 2128 2126 size_t bytes = 0; 2129 2127 2130 2128 for (; free->length && len; free++, len -= bytes) { 2131 - /* Write request not from offset 0 ? */ 2129 + /* Write request not from offset 0? */ 2132 2130 if (unlikely(woffs)) { 2133 2131 if (woffs >= free->length) { 2134 2132 woffs -= free->length; ··· 2164 2146 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 2165 2147 2166 2148 /** 2167 - * nand_do_write_ops - [Internal] NAND write with ECC 2168 - * @mtd: MTD device structure 2169 - * @to: offset to write to 2170 - * @ops: oob operations description structure 2149 + * nand_do_write_ops - [INTERN] NAND write with ECC 2150 + * @mtd: MTD device structure 2151 + * @to: offset to write to 2152 + * @ops: oob operations description structure 2171 2153 * 2172 - * NAND write with ECC 2154 + * NAND write with ECC. 2173 2155 */ 2174 2156 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 2175 2157 struct mtd_oob_ops *ops) ··· 2179 2161 uint32_t writelen = ops->len; 2180 2162 2181 2163 uint32_t oobwritelen = ops->ooblen; 2182 - uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? 2164 + uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? 2183 2165 mtd->oobavail : mtd->oobsize; 2184 2166 2185 2167 uint8_t *oob = ops->oobbuf; ··· 2190 2172 if (!writelen) 2191 2173 return 0; 2192 2174 2193 - /* reject writes, which are not page aligned */ 2175 + /* Reject writes, which are not page aligned */ 2194 2176 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 2195 - printk(KERN_NOTICE "%s: Attempt to write not " 2196 - "page aligned data\n", __func__); 2177 + pr_notice("%s: attempt to write non page aligned data\n", 2178 + __func__); 2197 2179 return -EINVAL; 2198 2180 } 2199 2181 ··· 2219 2201 (chip->pagebuf << chip->page_shift) < (to + ops->len)) 2220 2202 chip->pagebuf = -1; 2221 2203 2222 - /* If we're not given explicit OOB data, let it be 0xFF */ 2223 - if (likely(!oob)) 2224 - memset(chip->oob_poi, 0xff, mtd->oobsize); 2225 - 2226 2204 /* Don't allow multipage oob writes with offset */ 2227 2205 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2228 2206 return -EINVAL; ··· 2228 2214 int cached = writelen > bytes && page != blockmask; 2229 2215 uint8_t *wbuf = buf; 2230 2216 2231 - /* Partial page write ? */ 2217 + /* Partial page write? */ 2232 2218 if (unlikely(column || writelen < (mtd->writesize - 1))) { 2233 2219 cached = 0; 2234 2220 bytes = min_t(int, bytes - column, (int) writelen); ··· 2240 2226 2241 2227 if (unlikely(oob)) { 2242 2228 size_t len = min(oobwritelen, oobmaxlen); 2243 - oob = nand_fill_oob(chip, oob, len, ops); 2229 + oob = nand_fill_oob(mtd, oob, len, ops); 2244 2230 oobwritelen -= len; 2231 + } else { 2232 + /* We still need to erase leftover OOB data */ 2233 + memset(chip->oob_poi, 0xff, mtd->oobsize); 2245 2234 } 2246 2235 2247 2236 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2248 - (ops->mode == MTD_OOB_RAW)); 2237 + (ops->mode == MTD_OPS_RAW)); 2249 2238 if (ret) 2250 2239 break; 2251 2240 ··· 2277 2260 2278 2261 /** 2279 2262 * panic_nand_write - [MTD Interface] NAND write with ECC 2280 - * @mtd: MTD device structure 2281 - * @to: offset to write to 2282 - * @len: number of bytes to write 2283 - * @retlen: pointer to variable to store the number of written bytes 2284 - * @buf: the data to write 2263 + * @mtd: MTD device structure 2264 + * @to: offset to write to 2265 + * @len: number of bytes to write 2266 + * @retlen: pointer to variable to store the number of written bytes 2267 + * @buf: the data to write 2285 2268 * 2286 2269 * NAND write with ECC. Used when performing writes in interrupt context, this 2287 2270 * may for example be called by mtdoops when writing an oops while in panic. ··· 2290 2273 size_t *retlen, const uint8_t *buf) 2291 2274 { 2292 2275 struct nand_chip *chip = mtd->priv; 2276 + struct mtd_oob_ops ops; 2293 2277 int ret; 2294 2278 2295 2279 /* Do not allow reads past end of device */ ··· 2299 2281 if (!len) 2300 2282 return 0; 2301 2283 2302 - /* Wait for the device to get ready. */ 2284 + /* Wait for the device to get ready */ 2303 2285 panic_nand_wait(mtd, chip, 400); 2304 2286 2305 - /* Grab the device. */ 2287 + /* Grab the device */ 2306 2288 panic_nand_get_device(chip, mtd, FL_WRITING); 2307 2289 2308 - chip->ops.len = len; 2309 - chip->ops.datbuf = (uint8_t *)buf; 2310 - chip->ops.oobbuf = NULL; 2290 + ops.len = len; 2291 + ops.datbuf = (uint8_t *)buf; 2292 + ops.oobbuf = NULL; 2293 + ops.mode = 0; 2311 2294 2312 - ret = nand_do_write_ops(mtd, to, &chip->ops); 2295 + ret = nand_do_write_ops(mtd, to, &ops); 2313 2296 2314 - *retlen = chip->ops.retlen; 2297 + *retlen = ops.retlen; 2315 2298 return ret; 2316 2299 } 2317 2300 2318 2301 /** 2319 2302 * nand_write - [MTD Interface] NAND write with ECC 2320 - * @mtd: MTD device structure 2321 - * @to: offset to write to 2322 - * @len: number of bytes to write 2323 - * @retlen: pointer to variable to store the number of written bytes 2324 - * @buf: the data to write 2303 + * @mtd: MTD device structure 2304 + * @to: offset to write to 2305 + * @len: number of bytes to write 2306 + * @retlen: pointer to variable to store the number of written bytes 2307 + * @buf: the data to write 2325 2308 * 2326 - * NAND write with ECC 2309 + * NAND write with ECC. 2327 2310 */ 2328 2311 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2329 2312 size_t *retlen, const uint8_t *buf) 2330 2313 { 2331 2314 struct nand_chip *chip = mtd->priv; 2315 + struct mtd_oob_ops ops; 2332 2316 int ret; 2333 2317 2334 2318 /* Do not allow reads past end of device */ ··· 2341 2321 2342 2322 nand_get_device(chip, mtd, FL_WRITING); 2343 2323 2344 - chip->ops.len = len; 2345 - chip->ops.datbuf = (uint8_t *)buf; 2346 - chip->ops.oobbuf = NULL; 2324 + ops.len = len; 2325 + ops.datbuf = (uint8_t *)buf; 2326 + ops.oobbuf = NULL; 2327 + ops.mode = 0; 2347 2328 2348 - ret = nand_do_write_ops(mtd, to, &chip->ops); 2329 + ret = nand_do_write_ops(mtd, to, &ops); 2349 2330 2350 - *retlen = chip->ops.retlen; 2331 + *retlen = ops.retlen; 2351 2332 2352 2333 nand_release_device(mtd); 2353 2334 ··· 2357 2336 2358 2337 /** 2359 2338 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 2360 - * @mtd: MTD device structure 2361 - * @to: offset to write to 2362 - * @ops: oob operation description structure 2339 + * @mtd: MTD device structure 2340 + * @to: offset to write to 2341 + * @ops: oob operation description structure 2363 2342 * 2364 - * NAND write out-of-band 2343 + * NAND write out-of-band. 2365 2344 */ 2366 2345 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 2367 2346 struct mtd_oob_ops *ops) ··· 2369 2348 int chipnr, page, status, len; 2370 2349 struct nand_chip *chip = mtd->priv; 2371 2350 2372 - DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 2351 + pr_debug("%s: to = 0x%08x, len = %i\n", 2373 2352 __func__, (unsigned int)to, (int)ops->ooblen); 2374 2353 2375 - if (ops->mode == MTD_OOB_AUTO) 2354 + if (ops->mode == MTD_OPS_AUTO_OOB) 2376 2355 len = chip->ecc.layout->oobavail; 2377 2356 else 2378 2357 len = mtd->oobsize; 2379 2358 2380 2359 /* Do not allow write past end of page */ 2381 2360 if ((ops->ooboffs + ops->ooblen) > len) { 2382 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " 2383 - "past end of page\n", __func__); 2361 + pr_debug("%s: attempt to write past end of page\n", 2362 + __func__); 2384 2363 return -EINVAL; 2385 2364 } 2386 2365 2387 2366 if (unlikely(ops->ooboffs >= len)) { 2388 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " 2389 - "write outside oob\n", __func__); 2367 + pr_debug("%s: attempt to start write outside oob\n", 2368 + __func__); 2390 2369 return -EINVAL; 2391 2370 } 2392 2371 ··· 2395 2374 ops->ooboffs + ops->ooblen > 2396 2375 ((mtd->size >> chip->page_shift) - 2397 2376 (to >> chip->page_shift)) * len)) { 2398 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2399 - "end of device\n", __func__); 2377 + pr_debug("%s: attempt to write beyond end of device\n", 2378 + __func__); 2400 2379 return -EINVAL; 2401 2380 } 2402 2381 ··· 2422 2401 if (page == chip->pagebuf) 2423 2402 chip->pagebuf = -1; 2424 2403 2425 - memset(chip->oob_poi, 0xff, mtd->oobsize); 2426 - nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 2427 - status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2428 - memset(chip->oob_poi, 0xff, mtd->oobsize); 2404 + nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops); 2405 + 2406 + if (ops->mode == MTD_OPS_RAW) 2407 + status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask); 2408 + else 2409 + status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2429 2410 2430 2411 if (status) 2431 2412 return status; ··· 2439 2416 2440 2417 /** 2441 2418 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 2442 - * @mtd: MTD device structure 2443 - * @to: offset to write to 2444 - * @ops: oob operation description structure 2419 + * @mtd: MTD device structure 2420 + * @to: offset to write to 2421 + * @ops: oob operation description structure 2445 2422 */ 2446 2423 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2447 2424 struct mtd_oob_ops *ops) ··· 2453 2430 2454 2431 /* Do not allow writes past end of device */ 2455 2432 if (ops->datbuf && (to + ops->len) > mtd->size) { 2456 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2457 - "end of device\n", __func__); 2433 + pr_debug("%s: attempt to write beyond end of device\n", 2434 + __func__); 2458 2435 return -EINVAL; 2459 2436 } 2460 2437 2461 2438 nand_get_device(chip, mtd, FL_WRITING); 2462 2439 2463 2440 switch (ops->mode) { 2464 - case MTD_OOB_PLACE: 2465 - case MTD_OOB_AUTO: 2466 - case MTD_OOB_RAW: 2441 + case MTD_OPS_PLACE_OOB: 2442 + case MTD_OPS_AUTO_OOB: 2443 + case MTD_OPS_RAW: 2467 2444 break; 2468 2445 2469 2446 default: ··· 2481 2458 } 2482 2459 2483 2460 /** 2484 - * single_erease_cmd - [GENERIC] NAND standard block erase command function 2485 - * @mtd: MTD device structure 2486 - * @page: the page address of the block which will be erased 2461 + * single_erase_cmd - [GENERIC] NAND standard block erase command function 2462 + * @mtd: MTD device structure 2463 + * @page: the page address of the block which will be erased 2487 2464 * 2488 - * Standard erase command for NAND chips 2465 + * Standard erase command for NAND chips. 2489 2466 */ 2490 2467 static void single_erase_cmd(struct mtd_info *mtd, int page) 2491 2468 { ··· 2496 2473 } 2497 2474 2498 2475 /** 2499 - * multi_erease_cmd - [GENERIC] AND specific block erase command function 2500 - * @mtd: MTD device structure 2501 - * @page: the page address of the block which will be erased 2476 + * multi_erase_cmd - [GENERIC] AND specific block erase command function 2477 + * @mtd: MTD device structure 2478 + * @page: the page address of the block which will be erased 2502 2479 * 2503 - * AND multi block erase command function 2504 - * Erase 4 consecutive blocks 2480 + * AND multi block erase command function. Erase 4 consecutive blocks. 2505 2481 */ 2506 2482 static void multi_erase_cmd(struct mtd_info *mtd, int page) 2507 2483 { ··· 2515 2493 2516 2494 /** 2517 2495 * nand_erase - [MTD Interface] erase block(s) 2518 - * @mtd: MTD device structure 2519 - * @instr: erase instruction 2496 + * @mtd: MTD device structure 2497 + * @instr: erase instruction 2520 2498 * 2521 - * Erase one ore more blocks 2499 + * Erase one ore more blocks. 2522 2500 */ 2523 2501 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 2524 2502 { ··· 2527 2505 2528 2506 #define BBT_PAGE_MASK 0xffffff3f 2529 2507 /** 2530 - * nand_erase_nand - [Internal] erase block(s) 2531 - * @mtd: MTD device structure 2532 - * @instr: erase instruction 2533 - * @allowbbt: allow erasing the bbt area 2508 + * nand_erase_nand - [INTERN] erase block(s) 2509 + * @mtd: MTD device structure 2510 + * @instr: erase instruction 2511 + * @allowbbt: allow erasing the bbt area 2534 2512 * 2535 - * Erase one ore more blocks 2513 + * Erase one ore more blocks. 2536 2514 */ 2537 2515 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2538 2516 int allowbbt) ··· 2543 2521 unsigned int bbt_masked_page = 0xffffffff; 2544 2522 loff_t len; 2545 2523 2546 - DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 2547 - __func__, (unsigned long long)instr->addr, 2548 - (unsigned long long)instr->len); 2524 + pr_debug("%s: start = 0x%012llx, len = %llu\n", 2525 + __func__, (unsigned long long)instr->addr, 2526 + (unsigned long long)instr->len); 2549 2527 2550 2528 if (check_offs_len(mtd, instr->addr, instr->len)) 2551 2529 return -EINVAL; ··· 2567 2545 2568 2546 /* Check, if it is write protected */ 2569 2547 if (nand_check_wp(mtd)) { 2570 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 2571 - __func__); 2548 + pr_debug("%s: device is write protected!\n", 2549 + __func__); 2572 2550 instr->state = MTD_ERASE_FAILED; 2573 2551 goto erase_exit; 2574 2552 } ··· 2577 2555 * If BBT requires refresh, set the BBT page mask to see if the BBT 2578 2556 * should be rewritten. Otherwise the mask is set to 0xffffffff which 2579 2557 * can not be matched. This is also done when the bbt is actually 2580 - * erased to avoid recusrsive updates 2558 + * erased to avoid recursive updates. 2581 2559 */ 2582 2560 if (chip->options & BBT_AUTO_REFRESH && !allowbbt) 2583 2561 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; ··· 2588 2566 instr->state = MTD_ERASING; 2589 2567 2590 2568 while (len) { 2591 - /* 2592 - * heck if we have a bad block, we do not erase bad blocks ! 2593 - */ 2569 + /* Heck if we have a bad block, we do not erase bad blocks! */ 2594 2570 if (nand_block_checkbad(mtd, ((loff_t) page) << 2595 2571 chip->page_shift, 0, allowbbt)) { 2596 - printk(KERN_WARNING "%s: attempt to erase a bad block " 2597 - "at page 0x%08x\n", __func__, page); 2572 + pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 2573 + __func__, page); 2598 2574 instr->state = MTD_ERASE_FAILED; 2599 2575 goto erase_exit; 2600 2576 } 2601 2577 2602 2578 /* 2603 2579 * Invalidate the page cache, if we erase the block which 2604 - * contains the current cached page 2580 + * contains the current cached page. 2605 2581 */ 2606 2582 if (page <= chip->pagebuf && chip->pagebuf < 2607 2583 (page + pages_per_block)) ··· 2619 2599 2620 2600 /* See if block erase succeeded */ 2621 2601 if (status & NAND_STATUS_FAIL) { 2622 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " 2623 - "page 0x%08x\n", __func__, page); 2602 + pr_debug("%s: failed erase, page 0x%08x\n", 2603 + __func__, page); 2624 2604 instr->state = MTD_ERASE_FAILED; 2625 2605 instr->fail_addr = 2626 2606 ((loff_t)page << chip->page_shift); ··· 2629 2609 2630 2610 /* 2631 2611 * If BBT requires refresh, set the BBT rewrite flag to the 2632 - * page being erased 2612 + * page being erased. 2633 2613 */ 2634 2614 if (bbt_masked_page != 0xffffffff && 2635 2615 (page & BBT_PAGE_MASK) == bbt_masked_page) ··· 2648 2628 2649 2629 /* 2650 2630 * If BBT requires refresh and BBT-PERCHIP, set the BBT 2651 - * page mask to see if this BBT should be rewritten 2631 + * page mask to see if this BBT should be rewritten. 2652 2632 */ 2653 2633 if (bbt_masked_page != 0xffffffff && 2654 2634 (chip->bbt_td->options & NAND_BBT_PERCHIP)) ··· 2671 2651 2672 2652 /* 2673 2653 * If BBT requires refresh and erase was successful, rewrite any 2674 - * selected bad block tables 2654 + * selected bad block tables. 2675 2655 */ 2676 2656 if (bbt_masked_page == 0xffffffff || ret) 2677 2657 return ret; ··· 2679 2659 for (chipnr = 0; chipnr < chip->numchips; chipnr++) { 2680 2660 if (!rewrite_bbt[chipnr]) 2681 2661 continue; 2682 - /* update the BBT for chip */ 2683 - DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " 2684 - "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, 2685 - rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); 2662 + /* Update the BBT for chip */ 2663 + pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n", 2664 + __func__, chipnr, rewrite_bbt[chipnr], 2665 + chip->bbt_td->pages[chipnr]); 2686 2666 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2687 2667 } 2688 2668 ··· 2692 2672 2693 2673 /** 2694 2674 * nand_sync - [MTD Interface] sync 2695 - * @mtd: MTD device structure 2675 + * @mtd: MTD device structure 2696 2676 * 2697 - * Sync is actually a wait for chip ready function 2677 + * Sync is actually a wait for chip ready function. 2698 2678 */ 2699 2679 static void nand_sync(struct mtd_info *mtd) 2700 2680 { 2701 2681 struct nand_chip *chip = mtd->priv; 2702 2682 2703 - DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2683 + pr_debug("%s: called\n", __func__); 2704 2684 2705 2685 /* Grab the lock and see if the device is available */ 2706 2686 nand_get_device(chip, mtd, FL_SYNCING); ··· 2710 2690 2711 2691 /** 2712 2692 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 2713 - * @mtd: MTD device structure 2714 - * @offs: offset relative to mtd start 2693 + * @mtd: MTD device structure 2694 + * @offs: offset relative to mtd start 2715 2695 */ 2716 2696 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2717 2697 { ··· 2724 2704 2725 2705 /** 2726 2706 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 2727 - * @mtd: MTD device structure 2728 - * @ofs: offset relative to mtd start 2707 + * @mtd: MTD device structure 2708 + * @ofs: offset relative to mtd start 2729 2709 */ 2730 2710 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2731 2711 { ··· 2734 2714 2735 2715 ret = nand_block_isbad(mtd, ofs); 2736 2716 if (ret) { 2737 - /* If it was bad already, return success and do nothing. */ 2717 + /* If it was bad already, return success and do nothing */ 2738 2718 if (ret > 0) 2739 2719 return 0; 2740 2720 return ret; ··· 2745 2725 2746 2726 /** 2747 2727 * nand_suspend - [MTD Interface] Suspend the NAND flash 2748 - * @mtd: MTD device structure 2728 + * @mtd: MTD device structure 2749 2729 */ 2750 2730 static int nand_suspend(struct mtd_info *mtd) 2751 2731 { ··· 2756 2736 2757 2737 /** 2758 2738 * nand_resume - [MTD Interface] Resume the NAND flash 2759 - * @mtd: MTD device structure 2739 + * @mtd: MTD device structure 2760 2740 */ 2761 2741 static void nand_resume(struct mtd_info *mtd) 2762 2742 { ··· 2765 2745 if (chip->state == FL_PM_SUSPENDED) 2766 2746 nand_release_device(mtd); 2767 2747 else 2768 - printk(KERN_ERR "%s called for a chip which is not " 2769 - "in suspended state\n", __func__); 2748 + pr_err("%s called for a chip which is not in suspended state\n", 2749 + __func__); 2770 2750 } 2771 2751 2772 - /* 2773 - * Set default functions 2774 - */ 2752 + /* Set default functions */ 2775 2753 static void nand_set_defaults(struct nand_chip *chip, int busw) 2776 2754 { 2777 2755 /* check for proper chip_delay setup, set 20us if not */ ··· 2811 2793 2812 2794 } 2813 2795 2814 - /* 2815 - * sanitize ONFI strings so we can safely print them 2816 - */ 2796 + /* Sanitize ONFI strings so we can safely print them */ 2817 2797 static void sanitize_string(uint8_t *s, size_t len) 2818 2798 { 2819 2799 ssize_t i; 2820 2800 2821 - /* null terminate */ 2801 + /* Null terminate */ 2822 2802 s[len - 1] = 0; 2823 2803 2824 - /* remove non printable chars */ 2804 + /* Remove non printable chars */ 2825 2805 for (i = 0; i < len - 1; i++) { 2826 2806 if (s[i] < ' ' || s[i] > 127) 2827 2807 s[i] = '?'; 2828 2808 } 2829 2809 2830 - /* remove trailing spaces */ 2810 + /* Remove trailing spaces */ 2831 2811 strim(s); 2832 2812 } 2833 2813 ··· 2842 2826 } 2843 2827 2844 2828 /* 2845 - * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise 2829 + * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. 2846 2830 */ 2847 2831 static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, 2848 - int busw) 2832 + int *busw) 2849 2833 { 2850 2834 struct nand_onfi_params *p = &chip->onfi_params; 2851 2835 int i; 2852 2836 int val; 2853 2837 2854 - /* try ONFI for unknow chip or LP */ 2838 + /* Try ONFI for unknown chip or LP */ 2855 2839 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2856 2840 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2857 2841 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2858 2842 return 0; 2859 2843 2860 - printk(KERN_INFO "ONFI flash detected\n"); 2844 + pr_info("ONFI flash detected\n"); 2861 2845 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2862 2846 for (i = 0; i < 3; i++) { 2863 2847 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2864 2848 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == 2865 2849 le16_to_cpu(p->crc)) { 2866 - printk(KERN_INFO "ONFI param page %d valid\n", i); 2850 + pr_info("ONFI param page %d valid\n", i); 2867 2851 break; 2868 2852 } 2869 2853 } ··· 2871 2855 if (i == 3) 2872 2856 return 0; 2873 2857 2874 - /* check version */ 2858 + /* Check version */ 2875 2859 val = le16_to_cpu(p->revision); 2876 2860 if (val & (1 << 5)) 2877 2861 chip->onfi_version = 23; ··· 2887 2871 chip->onfi_version = 0; 2888 2872 2889 2873 if (!chip->onfi_version) { 2890 - printk(KERN_INFO "%s: unsupported ONFI version: %d\n", 2891 - __func__, val); 2874 + pr_info("%s: unsupported ONFI version: %d\n", __func__, val); 2892 2875 return 0; 2893 2876 } 2894 2877 ··· 2899 2884 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2900 2885 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2901 2886 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2902 - busw = 0; 2887 + *busw = 0; 2903 2888 if (le16_to_cpu(p->features) & 1) 2904 - busw = NAND_BUSWIDTH_16; 2889 + *busw = NAND_BUSWIDTH_16; 2905 2890 2906 2891 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2907 2892 chip->options |= (NAND_NO_READRDY | ··· 2911 2896 } 2912 2897 2913 2898 /* 2914 - * Get the flash and manufacturer id and lookup if the type is supported 2899 + * Get the flash and manufacturer id and lookup if the type is supported. 2915 2900 */ 2916 2901 static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2917 2902 struct nand_chip *chip, ··· 2928 2913 2929 2914 /* 2930 2915 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 2931 - * after power-up 2916 + * after power-up. 2932 2917 */ 2933 2918 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2934 2919 ··· 2939 2924 *maf_id = chip->read_byte(mtd); 2940 2925 *dev_id = chip->read_byte(mtd); 2941 2926 2942 - /* Try again to make sure, as some systems the bus-hold or other 2927 + /* 2928 + * Try again to make sure, as some systems the bus-hold or other 2943 2929 * interface concerns can cause random data which looks like a 2944 2930 * possibly credible NAND flash to appear. If the two results do 2945 2931 * not match, ignore the device completely. ··· 2952 2936 id_data[i] = chip->read_byte(mtd); 2953 2937 2954 2938 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 2955 - printk(KERN_INFO "%s: second ID read did not match " 2956 - "%02x,%02x against %02x,%02x\n", __func__, 2957 - *maf_id, *dev_id, id_data[0], id_data[1]); 2939 + pr_info("%s: second ID read did not match " 2940 + "%02x,%02x against %02x,%02x\n", __func__, 2941 + *maf_id, *dev_id, id_data[0], id_data[1]); 2958 2942 return ERR_PTR(-ENODEV); 2959 2943 } 2960 2944 ··· 2968 2952 chip->onfi_version = 0; 2969 2953 if (!type->name || !type->pagesize) { 2970 2954 /* Check is chip is ONFI compliant */ 2971 - ret = nand_flash_detect_onfi(mtd, chip, busw); 2955 + ret = nand_flash_detect_onfi(mtd, chip, &busw); 2972 2956 if (ret) 2973 2957 goto ident_done; 2974 2958 } ··· 2989 2973 chip->chipsize = (uint64_t)type->chipsize << 20; 2990 2974 2991 2975 if (!type->pagesize && chip->init_size) { 2992 - /* set the pagesize, oobsize, erasesize by the driver*/ 2976 + /* Set the pagesize, oobsize, erasesize by the driver */ 2993 2977 busw = chip->init_size(mtd, chip, id_data); 2994 2978 } else if (!type->pagesize) { 2995 2979 int extid; ··· 3049 3033 } 3050 3034 } else { 3051 3035 /* 3052 - * Old devices have chip data hardcoded in the device id table 3036 + * Old devices have chip data hardcoded in the device id table. 3053 3037 */ 3054 3038 mtd->erasesize = type->erasesize; 3055 3039 mtd->writesize = type->pagesize; ··· 3059 3043 /* 3060 3044 * Check for Spansion/AMD ID + repeating 5th, 6th byte since 3061 3045 * some Spansion chips have erasesize that conflicts with size 3062 - * listed in nand_ids table 3046 + * listed in nand_ids table. 3063 3047 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) 3064 3048 */ 3065 3049 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && ··· 3073 3057 chip->options &= ~NAND_CHIPOPTIONS_MSK; 3074 3058 chip->options |= type->options & NAND_CHIPOPTIONS_MSK; 3075 3059 3076 - /* Check if chip is a not a samsung device. Do not clear the 3077 - * options for chips which are not having an extended id. 3060 + /* 3061 + * Check if chip is not a Samsung device. Do not clear the 3062 + * options for chips which do not have an extended id. 3078 3063 */ 3079 3064 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 3080 3065 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3081 3066 ident_done: 3082 3067 3083 3068 /* 3084 - * Set chip as a default. Board drivers can override it, if necessary 3069 + * Set chip as a default. Board drivers can override it, if necessary. 3085 3070 */ 3086 3071 chip->options |= NAND_NO_AUTOINCR; 3087 3072 ··· 3094 3077 3095 3078 /* 3096 3079 * Check, if buswidth is correct. Hardware drivers should set 3097 - * chip correct ! 3080 + * chip correct! 3098 3081 */ 3099 3082 if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3100 - printk(KERN_INFO "NAND device: Manufacturer ID:" 3101 - " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3102 - *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3103 - printk(KERN_WARNING "NAND bus width %d instead %d bit\n", 3104 - (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 3105 - busw ? 16 : 8); 3083 + pr_info("NAND device: Manufacturer ID:" 3084 + " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3085 + *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3086 + pr_warn("NAND bus width %d instead %d bit\n", 3087 + (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 3088 + busw ? 16 : 8); 3106 3089 return ERR_PTR(-EINVAL); 3107 3090 } 3108 3091 3109 3092 /* Calculate the address shift from the page size */ 3110 3093 chip->page_shift = ffs(mtd->writesize) - 1; 3111 - /* Convert chipsize to number of pages per chip -1. */ 3094 + /* Convert chipsize to number of pages per chip -1 */ 3112 3095 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 3113 3096 3114 3097 chip->bbt_erase_shift = chip->phys_erase_shift = ··· 3138 3121 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3139 3122 (*maf_id == NAND_MFR_SAMSUNG || 3140 3123 *maf_id == NAND_MFR_HYNIX)) 3141 - chip->options |= NAND_BBT_SCANLASTPAGE; 3124 + chip->bbt_options |= NAND_BBT_SCANLASTPAGE; 3142 3125 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3143 3126 (*maf_id == NAND_MFR_SAMSUNG || 3144 3127 *maf_id == NAND_MFR_HYNIX || ··· 3146 3129 *maf_id == NAND_MFR_AMD)) || 3147 3130 (mtd->writesize == 2048 && 3148 3131 *maf_id == NAND_MFR_MICRON)) 3149 - chip->options |= NAND_BBT_SCAN2NDPAGE; 3150 - 3151 - /* 3152 - * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6 3153 - */ 3154 - if (!(busw & NAND_BUSWIDTH_16) && 3155 - *maf_id == NAND_MFR_STMICRO && 3156 - mtd->writesize == 2048) { 3157 - chip->options |= NAND_BBT_SCANBYTE1AND6; 3158 - chip->badblockpos = 0; 3159 - } 3132 + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; 3160 3133 3161 3134 /* Check for AND chips with 4 page planes */ 3162 3135 if (chip->options & NAND_4PAGE_ARRAY) ··· 3154 3147 else 3155 3148 chip->erase_cmd = single_erase_cmd; 3156 3149 3157 - /* Do not replace user supplied command function ! */ 3150 + /* Do not replace user supplied command function! */ 3158 3151 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3159 3152 chip->cmdfunc = nand_command_lp; 3160 3153 3161 - /* TODO onfi flash name */ 3162 - printk(KERN_INFO "NAND device: Manufacturer ID:" 3154 + pr_info("NAND device: Manufacturer ID:" 3163 3155 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3164 3156 nand_manuf_ids[maf_idx].name, 3165 3157 chip->onfi_version ? chip->onfi_params.model : type->name); ··· 3168 3162 3169 3163 /** 3170 3164 * nand_scan_ident - [NAND Interface] Scan for the NAND device 3171 - * @mtd: MTD device structure 3172 - * @maxchips: Number of chips to scan for 3173 - * @table: Alternative NAND ID table 3165 + * @mtd: MTD device structure 3166 + * @maxchips: number of chips to scan for 3167 + * @table: alternative NAND ID table 3174 3168 * 3175 - * This is the first phase of the normal nand_scan() function. It 3176 - * reads the flash ID and sets up MTD fields accordingly. 3169 + * This is the first phase of the normal nand_scan() function. It reads the 3170 + * flash ID and sets up MTD fields accordingly. 3177 3171 * 3178 3172 * The mtd->owner field must be set to the module of the caller. 3179 3173 */ ··· 3195 3189 3196 3190 if (IS_ERR(type)) { 3197 3191 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 3198 - printk(KERN_WARNING "No NAND device found.\n"); 3192 + pr_warn("No NAND device found\n"); 3199 3193 chip->select_chip(mtd, -1); 3200 3194 return PTR_ERR(type); 3201 3195 } ··· 3213 3207 break; 3214 3208 } 3215 3209 if (i > 1) 3216 - printk(KERN_INFO "%d NAND chips detected\n", i); 3210 + pr_info("%d NAND chips detected\n", i); 3217 3211 3218 3212 /* Store the number of chips and calc total size for mtd */ 3219 3213 chip->numchips = i; ··· 3226 3220 3227 3221 /** 3228 3222 * nand_scan_tail - [NAND Interface] Scan for the NAND device 3229 - * @mtd: MTD device structure 3223 + * @mtd: MTD device structure 3230 3224 * 3231 - * This is the second phase of the normal nand_scan() function. It 3232 - * fills out all the uninitialized function pointers with the defaults 3233 - * and scans for a bad block table if appropriate. 3225 + * This is the second phase of the normal nand_scan() function. It fills out 3226 + * all the uninitialized function pointers with the defaults and scans for a 3227 + * bad block table if appropriate. 3234 3228 */ 3235 3229 int nand_scan_tail(struct mtd_info *mtd) 3236 3230 { ··· 3246 3240 chip->oob_poi = chip->buffers->databuf + mtd->writesize; 3247 3241 3248 3242 /* 3249 - * If no default placement scheme is given, select an appropriate one 3243 + * If no default placement scheme is given, select an appropriate one. 3250 3244 */ 3251 3245 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { 3252 3246 switch (mtd->oobsize) { ··· 3263 3257 chip->ecc.layout = &nand_oob_128; 3264 3258 break; 3265 3259 default: 3266 - printk(KERN_WARNING "No oob scheme defined for " 3267 - "oobsize %d\n", mtd->oobsize); 3260 + pr_warn("No oob scheme defined for oobsize %d\n", 3261 + mtd->oobsize); 3268 3262 BUG(); 3269 3263 } 3270 3264 } ··· 3273 3267 chip->write_page = nand_write_page; 3274 3268 3275 3269 /* 3276 - * check ECC mode, default to software if 3byte/512byte hardware ECC is 3270 + * Check ECC mode, default to software if 3byte/512byte hardware ECC is 3277 3271 * selected and we have 256 byte pagesize fallback to software ECC 3278 3272 */ 3279 3273 ··· 3282 3276 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 3283 3277 if (!chip->ecc.calculate || !chip->ecc.correct || 3284 3278 !chip->ecc.hwctl) { 3285 - printk(KERN_WARNING "No ECC functions supplied; " 3286 - "Hardware ECC not possible\n"); 3279 + pr_warn("No ECC functions supplied; " 3280 + "hardware ECC not possible\n"); 3287 3281 BUG(); 3288 3282 } 3289 3283 if (!chip->ecc.read_page) 3290 3284 chip->ecc.read_page = nand_read_page_hwecc_oob_first; 3291 3285 3292 3286 case NAND_ECC_HW: 3293 - /* Use standard hwecc read page function ? */ 3287 + /* Use standard hwecc read page function? */ 3294 3288 if (!chip->ecc.read_page) 3295 3289 chip->ecc.read_page = nand_read_page_hwecc; 3296 3290 if (!chip->ecc.write_page) ··· 3311 3305 chip->ecc.read_page == nand_read_page_hwecc || 3312 3306 !chip->ecc.write_page || 3313 3307 chip->ecc.write_page == nand_write_page_hwecc)) { 3314 - printk(KERN_WARNING "No ECC functions supplied; " 3315 - "Hardware ECC not possible\n"); 3308 + pr_warn("No ECC functions supplied; " 3309 + "hardware ECC not possible\n"); 3316 3310 BUG(); 3317 3311 } 3318 - /* Use standard syndrome read/write page function ? */ 3312 + /* Use standard syndrome read/write page function? */ 3319 3313 if (!chip->ecc.read_page) 3320 3314 chip->ecc.read_page = nand_read_page_syndrome; 3321 3315 if (!chip->ecc.write_page) ··· 3331 3325 3332 3326 if (mtd->writesize >= chip->ecc.size) 3333 3327 break; 3334 - printk(KERN_WARNING "%d byte HW ECC not possible on " 3335 - "%d byte page size, fallback to SW ECC\n", 3336 - chip->ecc.size, mtd->writesize); 3328 + pr_warn("%d byte HW ECC not possible on " 3329 + "%d byte page size, fallback to SW ECC\n", 3330 + chip->ecc.size, mtd->writesize); 3337 3331 chip->ecc.mode = NAND_ECC_SOFT; 3338 3332 3339 3333 case NAND_ECC_SOFT: ··· 3353 3347 3354 3348 case NAND_ECC_SOFT_BCH: 3355 3349 if (!mtd_nand_has_bch()) { 3356 - printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); 3350 + pr_warn("CONFIG_MTD_ECC_BCH not enabled\n"); 3357 3351 BUG(); 3358 3352 } 3359 3353 chip->ecc.calculate = nand_bch_calculate_ecc; ··· 3368 3362 /* 3369 3363 * Board driver should supply ecc.size and ecc.bytes values to 3370 3364 * select how many bits are correctable; see nand_bch_init() 3371 - * for details. 3372 - * Otherwise, default to 4 bits for large page devices 3365 + * for details. Otherwise, default to 4 bits for large page 3366 + * devices. 3373 3367 */ 3374 3368 if (!chip->ecc.size && (mtd->oobsize >= 64)) { 3375 3369 chip->ecc.size = 512; ··· 3380 3374 chip->ecc.bytes, 3381 3375 &chip->ecc.layout); 3382 3376 if (!chip->ecc.priv) { 3383 - printk(KERN_WARNING "BCH ECC initialization failed!\n"); 3377 + pr_warn("BCH ECC initialization failed!\n"); 3384 3378 BUG(); 3385 3379 } 3386 3380 break; 3387 3381 3388 3382 case NAND_ECC_NONE: 3389 - printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " 3390 - "This is not recommended !!\n"); 3383 + pr_warn("NAND_ECC_NONE selected by board driver. " 3384 + "This is not recommended!\n"); 3391 3385 chip->ecc.read_page = nand_read_page_raw; 3392 3386 chip->ecc.write_page = nand_write_page_raw; 3393 3387 chip->ecc.read_oob = nand_read_oob_std; ··· 3399 3393 break; 3400 3394 3401 3395 default: 3402 - printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n", 3403 - chip->ecc.mode); 3396 + pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode); 3404 3397 BUG(); 3405 3398 } 3406 3399 3400 + /* For many systems, the standard OOB write also works for raw */ 3401 + if (!chip->ecc.read_oob_raw) 3402 + chip->ecc.read_oob_raw = chip->ecc.read_oob; 3403 + if (!chip->ecc.write_oob_raw) 3404 + chip->ecc.write_oob_raw = chip->ecc.write_oob; 3405 + 3407 3406 /* 3408 3407 * The number of bytes available for a client to place data into 3409 - * the out of band area 3408 + * the out of band area. 3410 3409 */ 3411 3410 chip->ecc.layout->oobavail = 0; 3412 3411 for (i = 0; chip->ecc.layout->oobfree[i].length ··· 3422 3411 3423 3412 /* 3424 3413 * Set the number of read / write steps for one page depending on ECC 3425 - * mode 3414 + * mode. 3426 3415 */ 3427 3416 chip->ecc.steps = mtd->writesize / chip->ecc.size; 3428 3417 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { 3429 - printk(KERN_WARNING "Invalid ecc parameters\n"); 3418 + pr_warn("Invalid ECC parameters\n"); 3430 3419 BUG(); 3431 3420 } 3432 3421 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 3433 3422 3434 - /* 3435 - * Allow subpage writes up to ecc.steps. Not possible for MLC 3436 - * FLASH. 3437 - */ 3423 + /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 3438 3424 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3439 3425 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 3440 3426 switch (chip->ecc.steps) { ··· 3489 3481 } 3490 3482 EXPORT_SYMBOL(nand_scan_tail); 3491 3483 3492 - /* is_module_text_address() isn't exported, and it's mostly a pointless 3484 + /* 3485 + * is_module_text_address() isn't exported, and it's mostly a pointless 3493 3486 * test if this is a module _anyway_ -- they'd have to try _really_ hard 3494 - * to call us from in-kernel code if the core NAND support is modular. */ 3487 + * to call us from in-kernel code if the core NAND support is modular. 3488 + */ 3495 3489 #ifdef MODULE 3496 3490 #define caller_is_module() (1) 3497 3491 #else ··· 3503 3493 3504 3494 /** 3505 3495 * nand_scan - [NAND Interface] Scan for the NAND device 3506 - * @mtd: MTD device structure 3507 - * @maxchips: Number of chips to scan for 3496 + * @mtd: MTD device structure 3497 + * @maxchips: number of chips to scan for 3508 3498 * 3509 - * This fills out all the uninitialized function pointers 3510 - * with the defaults. 3511 - * The flash ID is read and the mtd/chip structures are 3512 - * filled with the appropriate values. 3513 - * The mtd->owner field must be set to the module of the caller 3514 - * 3499 + * This fills out all the uninitialized function pointers with the defaults. 3500 + * The flash ID is read and the mtd/chip structures are filled with the 3501 + * appropriate values. The mtd->owner field must be set to the module of the 3502 + * caller. 3515 3503 */ 3516 3504 int nand_scan(struct mtd_info *mtd, int maxchips) 3517 3505 { ··· 3517 3509 3518 3510 /* Many callers got this wrong, so check for it for a while... */ 3519 3511 if (!mtd->owner && caller_is_module()) { 3520 - printk(KERN_CRIT "%s called with NULL mtd->owner!\n", 3521 - __func__); 3512 + pr_crit("%s called with NULL mtd->owner!\n", __func__); 3522 3513 BUG(); 3523 3514 } 3524 3515 ··· 3530 3523 3531 3524 /** 3532 3525 * nand_release - [NAND Interface] Free resources held by the NAND device 3533 - * @mtd: MTD device structure 3534 - */ 3526 + * @mtd: MTD device structure 3527 + */ 3535 3528 void nand_release(struct mtd_info *mtd) 3536 3529 { 3537 3530 struct nand_chip *chip = mtd->priv;
+322 -366
drivers/mtd/nand/nand_bbt.c
··· 14 14 * 15 15 * When nand_scan_bbt is called, then it tries to find the bad block table 16 16 * depending on the options in the BBT descriptor(s). If no flash based BBT 17 - * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory 17 + * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory 18 18 * marked good / bad blocks. This information is used to create a memory BBT. 19 19 * Once a new bad block is discovered then the "factory" information is updated 20 20 * on the device. ··· 36 36 * The table is marked in the OOB area with an ident pattern and a version 37 37 * number which indicates which of both tables is more up to date. If the NAND 38 38 * controller needs the complete OOB area for the ECC information then the 39 - * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern 40 - * and the version byte into the data area and the OOB area will remain 41 - * untouched. 39 + * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of 40 + * course): it moves the ident pattern and the version byte into the data area 41 + * and the OOB area will remain untouched. 42 42 * 43 43 * The table uses 2 bits per block 44 44 * 11b: block is good ··· 81 81 82 82 /** 83 83 * check_pattern - [GENERIC] check if a pattern is in the buffer 84 - * @buf: the buffer to search 85 - * @len: the length of buffer to search 86 - * @paglen: the pagelength 87 - * @td: search pattern descriptor 84 + * @buf: the buffer to search 85 + * @len: the length of buffer to search 86 + * @paglen: the pagelength 87 + * @td: search pattern descriptor 88 88 * 89 - * Check for a pattern at the given place. Used to search bad block 90 - * tables and good / bad block identifiers. 91 - * If the SCAN_EMPTY option is set then check, if all bytes except the 92 - * pattern area contain 0xff 93 - * 94 - */ 89 + * Check for a pattern at the given place. Used to search bad block tables and 90 + * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if 91 + * all bytes except the pattern area contain 0xff. 92 + */ 95 93 static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 96 94 { 97 95 int i, end = 0; ··· 108 110 p += end; 109 111 110 112 /* Compare the pattern */ 111 - for (i = 0; i < td->len; i++) { 112 - if (p[i] != td->pattern[i]) 113 - return -1; 114 - } 115 - 116 - /* Check both positions 1 and 6 for pattern? */ 117 - if (td->options & NAND_BBT_SCANBYTE1AND6) { 118 - if (td->options & NAND_BBT_SCANEMPTY) { 119 - p += td->len; 120 - end += NAND_SMALL_BADBLOCK_POS - td->offs; 121 - /* Check region between positions 1 and 6 */ 122 - for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len; 123 - i++) { 124 - if (*p++ != 0xff) 125 - return -1; 126 - } 127 - } 128 - else { 129 - p += NAND_SMALL_BADBLOCK_POS - td->offs; 130 - } 131 - /* Compare the pattern */ 132 - for (i = 0; i < td->len; i++) { 133 - if (p[i] != td->pattern[i]) 134 - return -1; 135 - } 136 - } 113 + if (memcmp(p, td->pattern, td->len)) 114 + return -1; 137 115 138 116 if (td->options & NAND_BBT_SCANEMPTY) { 139 117 p += td->len; ··· 124 150 125 151 /** 126 152 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 127 - * @buf: the buffer to search 128 - * @td: search pattern descriptor 153 + * @buf: the buffer to search 154 + * @td: search pattern descriptor 129 155 * 130 - * Check for a pattern at the given place. Used to search bad block 131 - * tables and good / bad block identifiers. Same as check_pattern, but 132 - * no optional empty check 133 - * 134 - */ 156 + * Check for a pattern at the given place. Used to search bad block tables and 157 + * good / bad block identifiers. Same as check_pattern, but no optional empty 158 + * check. 159 + */ 135 160 static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) 136 161 { 137 162 int i; ··· 141 168 if (p[td->offs + i] != td->pattern[i]) 142 169 return -1; 143 170 } 144 - /* Need to check location 1 AND 6? */ 145 - if (td->options & NAND_BBT_SCANBYTE1AND6) { 146 - for (i = 0; i < td->len; i++) { 147 - if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i]) 148 - return -1; 149 - } 150 - } 151 171 return 0; 152 172 } 153 173 154 174 /** 155 175 * add_marker_len - compute the length of the marker in data area 156 - * @td: BBT descriptor used for computation 176 + * @td: BBT descriptor used for computation 157 177 * 158 - * The length will be 0 if the markeris located in OOB area. 178 + * The length will be 0 if the marker is located in OOB area. 159 179 */ 160 180 static u32 add_marker_len(struct nand_bbt_descr *td) 161 181 { ··· 165 199 166 200 /** 167 201 * read_bbt - [GENERIC] Read the bad block table starting from page 168 - * @mtd: MTD device structure 169 - * @buf: temporary buffer 170 - * @page: the starting page 171 - * @num: the number of bbt descriptors to read 172 - * @td: the bbt describtion table 173 - * @offs: offset in the memory table 202 + * @mtd: MTD device structure 203 + * @buf: temporary buffer 204 + * @page: the starting page 205 + * @num: the number of bbt descriptors to read 206 + * @td: the bbt describtion table 207 + * @offs: offset in the memory table 174 208 * 175 209 * Read the bad block table starting from page. 176 - * 177 210 */ 178 211 static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, 179 212 struct nand_bbt_descr *td, int offs) 180 213 { 181 - int res, i, j, act = 0; 214 + int res, ret = 0, i, j, act = 0; 182 215 struct nand_chip *this = mtd->priv; 183 216 size_t retlen, len, totlen; 184 217 loff_t from; 185 218 int bits = td->options & NAND_BBT_NRBITS_MSK; 186 - uint8_t msk = (uint8_t) ((1 << bits) - 1); 219 + uint8_t msk = (uint8_t)((1 << bits) - 1); 187 220 u32 marker_len; 188 221 int reserved_block_code = td->reserved_block_code; 189 222 190 223 totlen = (num * bits) >> 3; 191 224 marker_len = add_marker_len(td); 192 - from = ((loff_t) page) << this->page_shift; 225 + from = ((loff_t)page) << this->page_shift; 193 226 194 227 while (totlen) { 195 - len = min(totlen, (size_t) (1 << this->bbt_erase_shift)); 228 + len = min(totlen, (size_t)(1 << this->bbt_erase_shift)); 196 229 if (marker_len) { 197 230 /* 198 231 * In case the BBT marker is not in the OOB area it ··· 203 238 } 204 239 res = mtd->read(mtd, from, len, &retlen, buf); 205 240 if (res < 0) { 206 - if (retlen != len) { 207 - printk(KERN_INFO "nand_bbt: Error reading bad block table\n"); 241 + if (mtd_is_eccerr(res)) { 242 + pr_info("nand_bbt: ECC error in BBT at " 243 + "0x%012llx\n", from & ~mtd->writesize); 244 + return res; 245 + } else if (mtd_is_bitflip(res)) { 246 + pr_info("nand_bbt: corrected error in BBT at " 247 + "0x%012llx\n", from & ~mtd->writesize); 248 + ret = res; 249 + } else { 250 + pr_info("nand_bbt: error reading BBT\n"); 208 251 return res; 209 252 } 210 - printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n"); 211 253 } 212 254 213 255 /* Analyse data */ ··· 225 253 if (tmp == msk) 226 254 continue; 227 255 if (reserved_block_code && (tmp == reserved_block_code)) { 228 - printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", 229 - (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 256 + pr_info("nand_read_bbt: reserved block at 0x%012llx\n", 257 + (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 230 258 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 231 259 mtd->ecc_stats.bbtblocks++; 232 260 continue; 233 261 } 234 - /* Leave it for now, if its matured we can move this 235 - * message to MTD_DEBUG_LEVEL0 */ 236 - printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", 237 - (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 238 - /* Factory marked bad or worn out ? */ 262 + /* 263 + * Leave it for now, if it's matured we can 264 + * move this message to pr_debug. 265 + */ 266 + pr_info("nand_read_bbt: bad block at 0x%012llx\n", 267 + (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 268 + /* Factory marked bad or worn out? */ 239 269 if (tmp == 0) 240 270 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 241 271 else ··· 248 274 totlen -= len; 249 275 from += len; 250 276 } 251 - return 0; 277 + return ret; 252 278 } 253 279 254 280 /** 255 281 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page 256 - * @mtd: MTD device structure 257 - * @buf: temporary buffer 258 - * @td: descriptor for the bad block table 259 - * @chip: read the table for a specific chip, -1 read all chips. 260 - * Applies only if NAND_BBT_PERCHIP option is set 282 + * @mtd: MTD device structure 283 + * @buf: temporary buffer 284 + * @td: descriptor for the bad block table 285 + * @chip: read the table for a specific chip, -1 read all chips; applies only if 286 + * NAND_BBT_PERCHIP option is set 261 287 * 262 - * Read the bad block table for all chips starting at a given page 263 - * We assume that the bbt bits are in consecutive order. 264 - */ 288 + * Read the bad block table for all chips starting at a given page. We assume 289 + * that the bbt bits are in consecutive order. 290 + */ 265 291 static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) 266 292 { 267 293 struct nand_chip *this = mtd->priv; ··· 287 313 return 0; 288 314 } 289 315 290 - /* 291 - * BBT marker is in the first page, no OOB. 292 - */ 316 + /* BBT marker is in the first page, no OOB */ 293 317 static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 294 318 struct nand_bbt_descr *td) 295 319 { ··· 301 329 return mtd->read(mtd, offs, len, &retlen, buf); 302 330 } 303 331 304 - /* 305 - * Scan read raw data from flash 306 - */ 332 + /* Scan read raw data from flash */ 307 333 static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 308 334 size_t len) 309 335 { 310 336 struct mtd_oob_ops ops; 311 337 int res; 312 338 313 - ops.mode = MTD_OOB_RAW; 339 + ops.mode = MTD_OPS_RAW; 314 340 ops.ooboffs = 0; 315 341 ops.ooblen = mtd->oobsize; 316 342 317 - 318 343 while (len > 0) { 319 - if (len <= mtd->writesize) { 320 - ops.oobbuf = buf + len; 321 - ops.datbuf = buf; 322 - ops.len = len; 323 - return mtd->read_oob(mtd, offs, &ops); 324 - } else { 325 - ops.oobbuf = buf + mtd->writesize; 326 - ops.datbuf = buf; 327 - ops.len = mtd->writesize; 328 - res = mtd->read_oob(mtd, offs, &ops); 344 + ops.datbuf = buf; 345 + ops.len = min(len, (size_t)mtd->writesize); 346 + ops.oobbuf = buf + ops.len; 329 347 330 - if (res) 331 - return res; 332 - } 348 + res = mtd->read_oob(mtd, offs, &ops); 349 + 350 + if (res) 351 + return res; 333 352 334 353 buf += mtd->oobsize + mtd->writesize; 335 354 len -= mtd->writesize; ··· 337 374 return scan_read_raw_oob(mtd, buf, offs, len); 338 375 } 339 376 340 - /* 341 - * Scan write data with oob to flash 342 - */ 377 + /* Scan write data with oob to flash */ 343 378 static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, 344 379 uint8_t *buf, uint8_t *oob) 345 380 { 346 381 struct mtd_oob_ops ops; 347 382 348 - ops.mode = MTD_OOB_PLACE; 383 + ops.mode = MTD_OPS_PLACE_OOB; 349 384 ops.ooboffs = 0; 350 385 ops.ooblen = mtd->oobsize; 351 386 ops.datbuf = buf; ··· 364 403 365 404 /** 366 405 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 367 - * @mtd: MTD device structure 368 - * @buf: temporary buffer 369 - * @td: descriptor for the bad block table 370 - * @md: descriptor for the bad block table mirror 406 + * @mtd: MTD device structure 407 + * @buf: temporary buffer 408 + * @td: descriptor for the bad block table 409 + * @md: descriptor for the bad block table mirror 371 410 * 372 - * Read the bad block table(s) for all chips starting at a given page 373 - * We assume that the bbt bits are in consecutive order. 374 - * 375 - */ 411 + * Read the bad block table(s) for all chips starting at a given page. We 412 + * assume that the bbt bits are in consecutive order. 413 + */ 376 414 static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, 377 415 struct nand_bbt_descr *td, struct nand_bbt_descr *md) 378 416 { ··· 382 422 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 383 423 mtd->writesize, td); 384 424 td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; 385 - printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 386 - td->pages[0], td->version[0]); 425 + pr_info("Bad block table at page %d, version 0x%02X\n", 426 + td->pages[0], td->version[0]); 387 427 } 388 428 389 429 /* Read the mirror version, if available */ ··· 391 431 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 392 432 mtd->writesize, td); 393 433 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; 394 - printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 395 - md->pages[0], md->version[0]); 434 + pr_info("Bad block table at page %d, version 0x%02X\n", 435 + md->pages[0], md->version[0]); 396 436 } 397 437 return 1; 398 438 } 399 439 400 - /* 401 - * Scan a given block full 402 - */ 440 + /* Scan a given block full */ 403 441 static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, 404 442 loff_t offs, uint8_t *buf, size_t readlen, 405 443 int scanlen, int len) ··· 405 447 int ret, j; 406 448 407 449 ret = scan_read_raw_oob(mtd, buf, offs, readlen); 408 - if (ret) 450 + /* Ignore ECC errors when checking for BBM */ 451 + if (ret && !mtd_is_bitflip_or_eccerr(ret)) 409 452 return ret; 410 453 411 454 for (j = 0; j < len; j++, buf += scanlen) { ··· 416 457 return 0; 417 458 } 418 459 419 - /* 420 - * Scan a given block partially 421 - */ 460 + /* Scan a given block partially */ 422 461 static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, 423 462 loff_t offs, uint8_t *buf, int len) 424 463 { ··· 427 470 ops.oobbuf = buf; 428 471 ops.ooboffs = 0; 429 472 ops.datbuf = NULL; 430 - ops.mode = MTD_OOB_PLACE; 473 + ops.mode = MTD_OPS_PLACE_OOB; 431 474 432 475 for (j = 0; j < len; j++) { 433 476 /* 434 - * Read the full oob until read_oob is fixed to 435 - * handle single byte reads for 16 bit 436 - * buswidth 477 + * Read the full oob until read_oob is fixed to handle single 478 + * byte reads for 16 bit buswidth. 437 479 */ 438 480 ret = mtd->read_oob(mtd, offs, &ops); 439 - if (ret) 481 + /* Ignore ECC errors when checking for BBM */ 482 + if (ret && !mtd_is_bitflip_or_eccerr(ret)) 440 483 return ret; 441 484 442 485 if (check_short_pattern(buf, bd)) ··· 449 492 450 493 /** 451 494 * create_bbt - [GENERIC] Create a bad block table by scanning the device 452 - * @mtd: MTD device structure 453 - * @buf: temporary buffer 454 - * @bd: descriptor for the good/bad block search pattern 455 - * @chip: create the table for a specific chip, -1 read all chips. 456 - * Applies only if NAND_BBT_PERCHIP option is set 495 + * @mtd: MTD device structure 496 + * @buf: temporary buffer 497 + * @bd: descriptor for the good/bad block search pattern 498 + * @chip: create the table for a specific chip, -1 read all chips; applies only 499 + * if NAND_BBT_PERCHIP option is set 457 500 * 458 - * Create a bad block table by scanning the device 459 - * for the given good/bad block identify pattern 501 + * Create a bad block table by scanning the device for the given good/bad block 502 + * identify pattern. 460 503 */ 461 504 static int create_bbt(struct mtd_info *mtd, uint8_t *buf, 462 505 struct nand_bbt_descr *bd, int chip) ··· 467 510 loff_t from; 468 511 size_t readlen; 469 512 470 - printk(KERN_INFO "Scanning device for bad blocks\n"); 513 + pr_info("Scanning device for bad blocks\n"); 471 514 472 515 if (bd->options & NAND_BBT_SCANALLPAGES) 473 516 len = 1 << (this->bbt_erase_shift - this->page_shift); ··· 487 530 } 488 531 489 532 if (chip == -1) { 490 - /* Note that numblocks is 2 * (real numblocks) here, see i+=2 491 - * below as it makes shifting and masking less painful */ 533 + /* 534 + * Note that numblocks is 2 * (real numblocks) here, see i+=2 535 + * below as it makes shifting and masking less painful 536 + */ 492 537 numblocks = mtd->size >> (this->bbt_erase_shift - 1); 493 538 startblock = 0; 494 539 from = 0; 495 540 } else { 496 541 if (chip >= this->numchips) { 497 - printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", 542 + pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n", 498 543 chip + 1, this->numchips); 499 544 return -EINVAL; 500 545 } ··· 506 547 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 507 548 } 508 549 509 - if (this->options & NAND_BBT_SCANLASTPAGE) 550 + if (this->bbt_options & NAND_BBT_SCANLASTPAGE) 510 551 from += mtd->erasesize - (mtd->writesize * len); 511 552 512 553 for (i = startblock; i < numblocks;) { ··· 525 566 526 567 if (ret) { 527 568 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 528 - printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", 529 - i >> 1, (unsigned long long)from); 569 + pr_warn("Bad eraseblock %d at 0x%012llx\n", 570 + i >> 1, (unsigned long long)from); 530 571 mtd->ecc_stats.badblocks++; 531 572 } 532 573 ··· 538 579 539 580 /** 540 581 * search_bbt - [GENERIC] scan the device for a specific bad block table 541 - * @mtd: MTD device structure 542 - * @buf: temporary buffer 543 - * @td: descriptor for the bad block table 582 + * @mtd: MTD device structure 583 + * @buf: temporary buffer 584 + * @td: descriptor for the bad block table 544 585 * 545 - * Read the bad block table by searching for a given ident pattern. 546 - * Search is preformed either from the beginning up or from the end of 547 - * the device downwards. The search starts always at the start of a 548 - * block. 549 - * If the option NAND_BBT_PERCHIP is given, each chip is searched 550 - * for a bbt, which contains the bad block information of this chip. 551 - * This is necessary to provide support for certain DOC devices. 586 + * Read the bad block table by searching for a given ident pattern. Search is 587 + * preformed either from the beginning up or from the end of the device 588 + * downwards. The search starts always at the start of a block. If the option 589 + * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains 590 + * the bad block information of this chip. This is necessary to provide support 591 + * for certain DOC devices. 552 592 * 553 - * The bbt ident pattern resides in the oob area of the first page 554 - * in a block. 593 + * The bbt ident pattern resides in the oob area of the first page in a block. 555 594 */ 556 595 static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) 557 596 { ··· 560 603 int bbtblocks; 561 604 int blocktopage = this->bbt_erase_shift - this->page_shift; 562 605 563 - /* Search direction top -> down ? */ 606 + /* Search direction top -> down? */ 564 607 if (td->options & NAND_BBT_LASTBLOCK) { 565 608 startblock = (mtd->size >> this->bbt_erase_shift) - 1; 566 609 dir = -1; ··· 569 612 dir = 1; 570 613 } 571 614 572 - /* Do we have a bbt per chip ? */ 615 + /* Do we have a bbt per chip? */ 573 616 if (td->options & NAND_BBT_PERCHIP) { 574 617 chips = this->numchips; 575 618 bbtblocks = this->chipsize >> this->bbt_erase_shift; ··· 608 651 /* Check, if we found a bbt for each requested chip */ 609 652 for (i = 0; i < chips; i++) { 610 653 if (td->pages[i] == -1) 611 - printk(KERN_WARNING "Bad block table not found for chip %d\n", i); 654 + pr_warn("Bad block table not found for chip %d\n", i); 612 655 else 613 - printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], 614 - td->version[i]); 656 + pr_info("Bad block table found at page %d, version " 657 + "0x%02X\n", td->pages[i], td->version[i]); 615 658 } 616 659 return 0; 617 660 } 618 661 619 662 /** 620 663 * search_read_bbts - [GENERIC] scan the device for bad block table(s) 621 - * @mtd: MTD device structure 622 - * @buf: temporary buffer 623 - * @td: descriptor for the bad block table 624 - * @md: descriptor for the bad block table mirror 664 + * @mtd: MTD device structure 665 + * @buf: temporary buffer 666 + * @td: descriptor for the bad block table 667 + * @md: descriptor for the bad block table mirror 625 668 * 626 - * Search and read the bad block table(s) 627 - */ 669 + * Search and read the bad block table(s). 670 + */ 628 671 static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) 629 672 { 630 673 /* Search the primary table */ ··· 640 683 641 684 /** 642 685 * write_bbt - [GENERIC] (Re)write the bad block table 686 + * @mtd: MTD device structure 687 + * @buf: temporary buffer 688 + * @td: descriptor for the bad block table 689 + * @md: descriptor for the bad block table mirror 690 + * @chipsel: selector for a specific chip, -1 for all 643 691 * 644 - * @mtd: MTD device structure 645 - * @buf: temporary buffer 646 - * @td: descriptor for the bad block table 647 - * @md: descriptor for the bad block table mirror 648 - * @chipsel: selector for a specific chip, -1 for all 649 - * 650 - * (Re)write the bad block table 651 - * 652 - */ 692 + * (Re)write the bad block table. 693 + */ 653 694 static int write_bbt(struct mtd_info *mtd, uint8_t *buf, 654 695 struct nand_bbt_descr *td, struct nand_bbt_descr *md, 655 696 int chipsel) ··· 666 711 ops.ooblen = mtd->oobsize; 667 712 ops.ooboffs = 0; 668 713 ops.datbuf = NULL; 669 - ops.mode = MTD_OOB_PLACE; 714 + ops.mode = MTD_OPS_PLACE_OOB; 670 715 671 716 if (!rcode) 672 717 rcode = 0xff; 673 - /* Write bad block table per chip rather than per device ? */ 718 + /* Write bad block table per chip rather than per device? */ 674 719 if (td->options & NAND_BBT_PERCHIP) { 675 720 numblocks = (int)(this->chipsize >> this->bbt_erase_shift); 676 - /* Full device write or specific chip ? */ 721 + /* Full device write or specific chip? */ 677 722 if (chipsel == -1) { 678 723 nrchips = this->numchips; 679 724 } else { ··· 687 732 688 733 /* Loop through the chips */ 689 734 for (; chip < nrchips; chip++) { 690 - 691 - /* There was already a version of the table, reuse the page 735 + /* 736 + * There was already a version of the table, reuse the page 692 737 * This applies for absolute placement too, as we have the 693 738 * page nr. in td->pages. 694 739 */ ··· 697 742 goto write; 698 743 } 699 744 700 - /* Automatic placement of the bad block table */ 701 - /* Search direction top -> down ? */ 745 + /* 746 + * Automatic placement of the bad block table. Search direction 747 + * top -> down? 748 + */ 702 749 if (td->options & NAND_BBT_LASTBLOCK) { 703 750 startblock = numblocks * (chip + 1) - 1; 704 751 dir = -1; ··· 724 767 if (!md || md->pages[chip] != page) 725 768 goto write; 726 769 } 727 - printk(KERN_ERR "No space left to write bad block table\n"); 770 + pr_err("No space left to write bad block table\n"); 728 771 return -ENOSPC; 729 772 write: 730 773 ··· 749 792 750 793 bbtoffs = chip * (numblocks >> 2); 751 794 752 - to = ((loff_t) page) << this->page_shift; 795 + to = ((loff_t)page) << this->page_shift; 753 796 754 - /* Must we save the block contents ? */ 797 + /* Must we save the block contents? */ 755 798 if (td->options & NAND_BBT_SAVECONTENT) { 756 799 /* Make it block aligned */ 757 - to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); 800 + to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); 758 801 len = 1 << this->bbt_erase_shift; 759 802 res = mtd->read(mtd, to, len, &retlen, buf); 760 803 if (res < 0) { 761 804 if (retlen != len) { 762 - printk(KERN_INFO "nand_bbt: Error " 763 - "reading block for writing " 764 - "the bad block table\n"); 805 + pr_info("nand_bbt: error reading block " 806 + "for writing the bad block table\n"); 765 807 return res; 766 808 } 767 - printk(KERN_WARNING "nand_bbt: ECC error " 768 - "while reading block for writing " 769 - "bad block table\n"); 809 + pr_warn("nand_bbt: ECC error while reading " 810 + "block for writing bad block table\n"); 770 811 } 771 812 /* Read oob data */ 772 813 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; ··· 777 822 pageoffs = page - (int)(to >> this->page_shift); 778 823 offs = pageoffs << this->page_shift; 779 824 /* Preset the bbt area with 0xff */ 780 - memset(&buf[offs], 0xff, (size_t) (numblocks >> sft)); 825 + memset(&buf[offs], 0xff, (size_t)(numblocks >> sft)); 781 826 ooboffs = len + (pageoffs * mtd->oobsize); 782 827 783 828 } else if (td->options & NAND_BBT_NO_OOB) { 784 829 ooboffs = 0; 785 830 offs = td->len; 786 - /* the version byte */ 831 + /* The version byte */ 787 832 if (td->options & NAND_BBT_VERSION) 788 833 offs++; 789 834 /* Calc length */ 790 - len = (size_t) (numblocks >> sft); 835 + len = (size_t)(numblocks >> sft); 791 836 len += offs; 792 - /* Make it page aligned ! */ 837 + /* Make it page aligned! */ 793 838 len = ALIGN(len, mtd->writesize); 794 839 /* Preset the buffer with 0xff */ 795 840 memset(buf, 0xff, len); ··· 797 842 memcpy(buf, td->pattern, td->len); 798 843 } else { 799 844 /* Calc length */ 800 - len = (size_t) (numblocks >> sft); 801 - /* Make it page aligned ! */ 845 + len = (size_t)(numblocks >> sft); 846 + /* Make it page aligned! */ 802 847 len = ALIGN(len, mtd->writesize); 803 848 /* Preset the buffer with 0xff */ 804 849 memset(buf, 0xff, len + ··· 812 857 if (td->options & NAND_BBT_VERSION) 813 858 buf[ooboffs + td->veroffs] = td->version[chip]; 814 859 815 - /* walk through the memory table */ 860 + /* Walk through the memory table */ 816 861 for (i = 0; i < numblocks;) { 817 862 uint8_t dat; 818 863 dat = this->bbt[bbtoffs + (i >> 2)]; 819 864 for (j = 0; j < 4; j++, i++) { 820 865 int sftcnt = (i << (3 - sft)) & sftmsk; 821 - /* Do not store the reserved bbt blocks ! */ 866 + /* Do not store the reserved bbt blocks! */ 822 867 buf[offs + (i >> sft)] &= 823 868 ~(msk[dat & 0x03] << sftcnt); 824 869 dat >>= 2; ··· 839 884 if (res < 0) 840 885 goto outerr; 841 886 842 - printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " 843 - "0x%02X\n", (unsigned long long)to, td->version[chip]); 887 + pr_info("Bad block table written to 0x%012llx, version 0x%02X\n", 888 + (unsigned long long)to, td->version[chip]); 844 889 845 890 /* Mark it as used */ 846 891 td->pages[chip] = page; ··· 848 893 return 0; 849 894 850 895 outerr: 851 - printk(KERN_WARNING 852 - "nand_bbt: Error while writing bad block table %d\n", res); 896 + pr_warn("nand_bbt: error while writing bad block table %d\n", res); 853 897 return res; 854 898 } 855 899 856 900 /** 857 901 * nand_memory_bbt - [GENERIC] create a memory based bad block table 858 - * @mtd: MTD device structure 859 - * @bd: descriptor for the good/bad block search pattern 902 + * @mtd: MTD device structure 903 + * @bd: descriptor for the good/bad block search pattern 860 904 * 861 - * The function creates a memory based bbt by scanning the device 862 - * for manufacturer / software marked good / bad blocks 863 - */ 905 + * The function creates a memory based bbt by scanning the device for 906 + * manufacturer / software marked good / bad blocks. 907 + */ 864 908 static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 865 909 { 866 910 struct nand_chip *this = mtd->priv; ··· 870 916 871 917 /** 872 918 * check_create - [GENERIC] create and write bbt(s) if necessary 873 - * @mtd: MTD device structure 874 - * @buf: temporary buffer 875 - * @bd: descriptor for the good/bad block search pattern 919 + * @mtd: MTD device structure 920 + * @buf: temporary buffer 921 + * @bd: descriptor for the good/bad block search pattern 876 922 * 877 - * The function checks the results of the previous call to read_bbt 878 - * and creates / updates the bbt(s) if necessary 879 - * Creation is necessary if no bbt was found for the chip/device 880 - * Update is necessary if one of the tables is missing or the 881 - * version nr. of one table is less than the other 882 - */ 923 + * The function checks the results of the previous call to read_bbt and creates 924 + * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found 925 + * for the chip/device. Update is necessary if one of the tables is missing or 926 + * the version nr. of one table is less than the other. 927 + */ 883 928 static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) 884 929 { 885 - int i, chips, writeops, chipsel, res; 930 + int i, chips, writeops, create, chipsel, res, res2; 886 931 struct nand_chip *this = mtd->priv; 887 932 struct nand_bbt_descr *td = this->bbt_td; 888 933 struct nand_bbt_descr *md = this->bbt_md; 889 934 struct nand_bbt_descr *rd, *rd2; 890 935 891 - /* Do we have a bbt per chip ? */ 936 + /* Do we have a bbt per chip? */ 892 937 if (td->options & NAND_BBT_PERCHIP) 893 938 chips = this->numchips; 894 939 else ··· 895 942 896 943 for (i = 0; i < chips; i++) { 897 944 writeops = 0; 945 + create = 0; 898 946 rd = NULL; 899 947 rd2 = NULL; 900 - /* Per chip or per device ? */ 948 + res = res2 = 0; 949 + /* Per chip or per device? */ 901 950 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; 902 - /* Mirrored table available ? */ 951 + /* Mirrored table available? */ 903 952 if (md) { 904 953 if (td->pages[i] == -1 && md->pages[i] == -1) { 954 + create = 1; 905 955 writeops = 0x03; 906 - goto create; 907 - } 908 - 909 - if (td->pages[i] == -1) { 956 + } else if (td->pages[i] == -1) { 910 957 rd = md; 911 - td->version[i] = md->version[i]; 912 - writeops = 1; 913 - goto writecheck; 914 - } 915 - 916 - if (md->pages[i] == -1) { 958 + writeops = 0x01; 959 + } else if (md->pages[i] == -1) { 917 960 rd = td; 918 - md->version[i] = td->version[i]; 919 - writeops = 2; 920 - goto writecheck; 921 - } 922 - 923 - if (td->version[i] == md->version[i]) { 961 + writeops = 0x02; 962 + } else if (td->version[i] == md->version[i]) { 924 963 rd = td; 925 964 if (!(td->options & NAND_BBT_VERSION)) 926 965 rd2 = md; 927 - goto writecheck; 928 - } 929 - 930 - if (((int8_t) (td->version[i] - md->version[i])) > 0) { 966 + } else if (((int8_t)(td->version[i] - md->version[i])) > 0) { 931 967 rd = td; 932 - md->version[i] = td->version[i]; 933 - writeops = 2; 968 + writeops = 0x02; 934 969 } else { 935 970 rd = md; 936 - td->version[i] = md->version[i]; 937 - writeops = 1; 971 + writeops = 0x01; 938 972 } 939 - 940 - goto writecheck; 941 - 942 973 } else { 943 974 if (td->pages[i] == -1) { 975 + create = 1; 944 976 writeops = 0x01; 945 - goto create; 977 + } else { 978 + rd = td; 946 979 } 947 - rd = td; 948 - goto writecheck; 949 980 } 950 - create: 951 - /* Create the bad block table by scanning the device ? */ 952 - if (!(td->options & NAND_BBT_CREATE)) 953 - continue; 954 981 955 - /* Create the table in memory by scanning the chip(s) */ 956 - if (!(this->options & NAND_CREATE_EMPTY_BBT)) 957 - create_bbt(mtd, buf, bd, chipsel); 982 + if (create) { 983 + /* Create the bad block table by scanning the device? */ 984 + if (!(td->options & NAND_BBT_CREATE)) 985 + continue; 958 986 959 - td->version[i] = 1; 960 - if (md) 961 - md->version[i] = 1; 962 - writecheck: 963 - /* read back first ? */ 964 - if (rd) 965 - read_abs_bbt(mtd, buf, rd, chipsel); 966 - /* If they weren't versioned, read both. */ 967 - if (rd2) 968 - read_abs_bbt(mtd, buf, rd2, chipsel); 987 + /* Create the table in memory by scanning the chip(s) */ 988 + if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) 989 + create_bbt(mtd, buf, bd, chipsel); 969 990 970 - /* Write the bad block table to the device ? */ 991 + td->version[i] = 1; 992 + if (md) 993 + md->version[i] = 1; 994 + } 995 + 996 + /* Read back first? */ 997 + if (rd) { 998 + res = read_abs_bbt(mtd, buf, rd, chipsel); 999 + if (mtd_is_eccerr(res)) { 1000 + /* Mark table as invalid */ 1001 + rd->pages[i] = -1; 1002 + rd->version[i] = 0; 1003 + i--; 1004 + continue; 1005 + } 1006 + } 1007 + /* If they weren't versioned, read both */ 1008 + if (rd2) { 1009 + res2 = read_abs_bbt(mtd, buf, rd2, chipsel); 1010 + if (mtd_is_eccerr(res2)) { 1011 + /* Mark table as invalid */ 1012 + rd2->pages[i] = -1; 1013 + rd2->version[i] = 0; 1014 + i--; 1015 + continue; 1016 + } 1017 + } 1018 + 1019 + /* Scrub the flash table(s)? */ 1020 + if (mtd_is_bitflip(res) || mtd_is_bitflip(res2)) 1021 + writeops = 0x03; 1022 + 1023 + /* Update version numbers before writing */ 1024 + if (md) { 1025 + td->version[i] = max(td->version[i], md->version[i]); 1026 + md->version[i] = td->version[i]; 1027 + } 1028 + 1029 + /* Write the bad block table to the device? */ 971 1030 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 972 1031 res = write_bbt(mtd, buf, td, md, chipsel); 973 1032 if (res < 0) 974 1033 return res; 975 1034 } 976 1035 977 - /* Write the mirror bad block table to the device ? */ 1036 + /* Write the mirror bad block table to the device? */ 978 1037 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 979 1038 res = write_bbt(mtd, buf, md, td, chipsel); 980 1039 if (res < 0) ··· 998 1033 999 1034 /** 1000 1035 * mark_bbt_regions - [GENERIC] mark the bad block table regions 1001 - * @mtd: MTD device structure 1002 - * @td: bad block table descriptor 1036 + * @mtd: MTD device structure 1037 + * @td: bad block table descriptor 1003 1038 * 1004 - * The bad block table regions are marked as "bad" to prevent 1005 - * accidental erasures / writes. The regions are identified by 1006 - * the mark 0x02. 1007 - */ 1039 + * The bad block table regions are marked as "bad" to prevent accidental 1040 + * erasures / writes. The regions are identified by the mark 0x02. 1041 + */ 1008 1042 static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) 1009 1043 { 1010 1044 struct nand_chip *this = mtd->priv; 1011 1045 int i, j, chips, block, nrblocks, update; 1012 1046 uint8_t oldval, newval; 1013 1047 1014 - /* Do we have a bbt per chip ? */ 1048 + /* Do we have a bbt per chip? */ 1015 1049 if (td->options & NAND_BBT_PERCHIP) { 1016 1050 chips = this->numchips; 1017 1051 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); ··· 1047 1083 update = 1; 1048 1084 block += 2; 1049 1085 } 1050 - /* If we want reserved blocks to be recorded to flash, and some 1051 - new ones have been marked, then we need to update the stored 1052 - bbts. This should only happen once. */ 1086 + /* 1087 + * If we want reserved blocks to be recorded to flash, and some 1088 + * new ones have been marked, then we need to update the stored 1089 + * bbts. This should only happen once. 1090 + */ 1053 1091 if (update && td->reserved_block_code) 1054 1092 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); 1055 1093 } ··· 1059 1093 1060 1094 /** 1061 1095 * verify_bbt_descr - verify the bad block description 1062 - * @mtd: MTD device structure 1063 - * @bd: the table to verify 1096 + * @mtd: MTD device structure 1097 + * @bd: the table to verify 1064 1098 * 1065 1099 * This functions performs a few sanity checks on the bad block description 1066 1100 * table. ··· 1078 1112 pattern_len = bd->len; 1079 1113 bits = bd->options & NAND_BBT_NRBITS_MSK; 1080 1114 1081 - BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && 1082 - !(this->options & NAND_USE_FLASH_BBT)); 1115 + BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) && 1116 + !(this->bbt_options & NAND_BBT_USE_FLASH)); 1083 1117 BUG_ON(!bits); 1084 1118 1085 1119 if (bd->options & NAND_BBT_VERSION) 1086 1120 pattern_len++; 1087 1121 1088 1122 if (bd->options & NAND_BBT_NO_OOB) { 1089 - BUG_ON(!(this->options & NAND_USE_FLASH_BBT)); 1090 - BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB)); 1123 + BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH)); 1124 + BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB)); 1091 1125 BUG_ON(bd->offs); 1092 1126 if (bd->options & NAND_BBT_VERSION) 1093 1127 BUG_ON(bd->veroffs != bd->len); ··· 1107 1141 1108 1142 /** 1109 1143 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) 1110 - * @mtd: MTD device structure 1111 - * @bd: descriptor for the good/bad block search pattern 1144 + * @mtd: MTD device structure 1145 + * @bd: descriptor for the good/bad block search pattern 1112 1146 * 1113 - * The function checks, if a bad block table(s) is/are already 1114 - * available. If not it scans the device for manufacturer 1115 - * marked good / bad blocks and writes the bad block table(s) to 1116 - * the selected place. 1147 + * The function checks, if a bad block table(s) is/are already available. If 1148 + * not it scans the device for manufacturer marked good / bad blocks and writes 1149 + * the bad block table(s) to the selected place. 1117 1150 * 1118 - * The bad block table memory is allocated here. It must be freed 1119 - * by calling the nand_free_bbt function. 1120 - * 1121 - */ 1151 + * The bad block table memory is allocated here. It must be freed by calling 1152 + * the nand_free_bbt function. 1153 + */ 1122 1154 int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 1123 1155 { 1124 1156 struct nand_chip *this = mtd->priv; ··· 1126 1162 struct nand_bbt_descr *md = this->bbt_md; 1127 1163 1128 1164 len = mtd->size >> (this->bbt_erase_shift + 2); 1129 - /* Allocate memory (2bit per block) and clear the memory bad block table */ 1165 + /* 1166 + * Allocate memory (2bit per block) and clear the memory bad block 1167 + * table. 1168 + */ 1130 1169 this->bbt = kzalloc(len, GFP_KERNEL); 1131 - if (!this->bbt) { 1132 - printk(KERN_ERR "nand_scan_bbt: Out of memory\n"); 1170 + if (!this->bbt) 1133 1171 return -ENOMEM; 1134 - } 1135 1172 1136 - /* If no primary table decriptor is given, scan the device 1137 - * to build a memory based bad block table 1173 + /* 1174 + * If no primary table decriptor is given, scan the device to build a 1175 + * memory based bad block table. 1138 1176 */ 1139 1177 if (!td) { 1140 1178 if ((res = nand_memory_bbt(mtd, bd))) { 1141 - printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n"); 1179 + pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); 1142 1180 kfree(this->bbt); 1143 1181 this->bbt = NULL; 1144 1182 } ··· 1154 1188 len += (len >> this->page_shift) * mtd->oobsize; 1155 1189 buf = vmalloc(len); 1156 1190 if (!buf) { 1157 - printk(KERN_ERR "nand_bbt: Out of memory\n"); 1158 1191 kfree(this->bbt); 1159 1192 this->bbt = NULL; 1160 1193 return -ENOMEM; 1161 1194 } 1162 1195 1163 - /* Is the bbt at a given page ? */ 1196 + /* Is the bbt at a given page? */ 1164 1197 if (td->options & NAND_BBT_ABSPAGE) { 1165 1198 res = read_abs_bbts(mtd, buf, td, md); 1166 1199 } else { ··· 1181 1216 1182 1217 /** 1183 1218 * nand_update_bbt - [NAND Interface] update bad block table(s) 1184 - * @mtd: MTD device structure 1185 - * @offs: the offset of the newly marked block 1219 + * @mtd: MTD device structure 1220 + * @offs: the offset of the newly marked block 1186 1221 * 1187 - * The function updates the bad block table(s) 1188 - */ 1222 + * The function updates the bad block table(s). 1223 + */ 1189 1224 int nand_update_bbt(struct mtd_info *mtd, loff_t offs) 1190 1225 { 1191 1226 struct nand_chip *this = mtd->priv; 1192 - int len, res = 0, writeops = 0; 1227 + int len, res = 0; 1193 1228 int chip, chipsel; 1194 1229 uint8_t *buf; 1195 1230 struct nand_bbt_descr *td = this->bbt_td; ··· 1202 1237 len = (1 << this->bbt_erase_shift); 1203 1238 len += (len >> this->page_shift) * mtd->oobsize; 1204 1239 buf = kmalloc(len, GFP_KERNEL); 1205 - if (!buf) { 1206 - printk(KERN_ERR "nand_update_bbt: Out of memory\n"); 1240 + if (!buf) 1207 1241 return -ENOMEM; 1208 - } 1209 1242 1210 - writeops = md != NULL ? 0x03 : 0x01; 1211 - 1212 - /* Do we have a bbt per chip ? */ 1243 + /* Do we have a bbt per chip? */ 1213 1244 if (td->options & NAND_BBT_PERCHIP) { 1214 1245 chip = (int)(offs >> this->chip_shift); 1215 1246 chipsel = chip; ··· 1218 1257 if (md) 1219 1258 md->version[chip]++; 1220 1259 1221 - /* Write the bad block table to the device ? */ 1222 - if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 1260 + /* Write the bad block table to the device? */ 1261 + if (td->options & NAND_BBT_WRITE) { 1223 1262 res = write_bbt(mtd, buf, td, md, chipsel); 1224 1263 if (res < 0) 1225 1264 goto out; 1226 1265 } 1227 - /* Write the mirror bad block table to the device ? */ 1228 - if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 1266 + /* Write the mirror bad block table to the device? */ 1267 + if (md && (md->options & NAND_BBT_WRITE)) { 1229 1268 res = write_bbt(mtd, buf, md, td, chipsel); 1230 1269 } 1231 1270 ··· 1234 1273 return res; 1235 1274 } 1236 1275 1237 - /* Define some generic bad / good block scan pattern which are used 1238 - * while scanning a device for factory marked good / bad blocks. */ 1276 + /* 1277 + * Define some generic bad / good block scan pattern which are used 1278 + * while scanning a device for factory marked good / bad blocks. 1279 + */ 1239 1280 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1240 1281 1241 1282 static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; ··· 1249 1286 .pattern = scan_agand_pattern 1250 1287 }; 1251 1288 1252 - /* Generic flash bbt decriptors 1253 - */ 1289 + /* Generic flash bbt descriptors */ 1254 1290 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1255 1291 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 1256 1292 ··· 1293 1331 .pattern = mirror_pattern 1294 1332 }; 1295 1333 1296 - #define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ 1297 - NAND_BBT_SCANBYTE1AND6) 1334 + #define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB) 1298 1335 /** 1299 - * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure 1300 - * @this: NAND chip to create descriptor for 1336 + * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure 1337 + * @this: NAND chip to create descriptor for 1301 1338 * 1302 1339 * This function allocates and initializes a nand_bbt_descr for BBM detection 1303 - * based on the properties of "this". The new descriptor is stored in 1340 + * based on the properties of @this. The new descriptor is stored in 1304 1341 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1305 1342 * passed to this function. 1306 - * 1307 1343 */ 1308 - static int nand_create_default_bbt_descr(struct nand_chip *this) 1344 + static int nand_create_badblock_pattern(struct nand_chip *this) 1309 1345 { 1310 1346 struct nand_bbt_descr *bd; 1311 1347 if (this->badblock_pattern) { 1312 - printk(KERN_WARNING "BBT descr already allocated; not replacing.\n"); 1348 + pr_warn("Bad block pattern already allocated; not replacing\n"); 1313 1349 return -EINVAL; 1314 1350 } 1315 1351 bd = kzalloc(sizeof(*bd), GFP_KERNEL); 1316 - if (!bd) { 1317 - printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n"); 1352 + if (!bd) 1318 1353 return -ENOMEM; 1319 - } 1320 - bd->options = this->options & BBT_SCAN_OPTIONS; 1354 + bd->options = this->bbt_options & BADBLOCK_SCAN_MASK; 1321 1355 bd->offs = this->badblockpos; 1322 1356 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; 1323 1357 bd->pattern = scan_ff_pattern; ··· 1324 1366 1325 1367 /** 1326 1368 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device 1327 - * @mtd: MTD device structure 1369 + * @mtd: MTD device structure 1328 1370 * 1329 - * This function selects the default bad block table 1330 - * support for the device and calls the nand_scan_bbt function 1331 - * 1332 - */ 1371 + * This function selects the default bad block table support for the device and 1372 + * calls the nand_scan_bbt function. 1373 + */ 1333 1374 int nand_default_bbt(struct mtd_info *mtd) 1334 1375 { 1335 1376 struct nand_chip *this = mtd->priv; 1336 1377 1337 - /* Default for AG-AND. We must use a flash based 1338 - * bad block table as the devices have factory marked 1339 - * _good_ blocks. Erasing those blocks leads to loss 1340 - * of the good / bad information, so we _must_ store 1341 - * this information in a good / bad table during 1342 - * startup 1378 + /* 1379 + * Default for AG-AND. We must use a flash based bad block table as the 1380 + * devices have factory marked _good_ blocks. Erasing those blocks 1381 + * leads to loss of the good / bad information, so we _must_ store this 1382 + * information in a good / bad table during startup. 1343 1383 */ 1344 1384 if (this->options & NAND_IS_AND) { 1345 1385 /* Use the default pattern descriptors */ ··· 1345 1389 this->bbt_td = &bbt_main_descr; 1346 1390 this->bbt_md = &bbt_mirror_descr; 1347 1391 } 1348 - this->options |= NAND_USE_FLASH_BBT; 1392 + this->bbt_options |= NAND_BBT_USE_FLASH; 1349 1393 return nand_scan_bbt(mtd, &agand_flashbased); 1350 1394 } 1351 1395 1352 - /* Is a flash based bad block table requested ? */ 1353 - if (this->options & NAND_USE_FLASH_BBT) { 1396 + /* Is a flash based bad block table requested? */ 1397 + if (this->bbt_options & NAND_BBT_USE_FLASH) { 1354 1398 /* Use the default pattern descriptors */ 1355 1399 if (!this->bbt_td) { 1356 - if (this->options & NAND_USE_FLASH_BBT_NO_OOB) { 1400 + if (this->bbt_options & NAND_BBT_NO_OOB) { 1357 1401 this->bbt_td = &bbt_main_no_bbt_descr; 1358 1402 this->bbt_md = &bbt_mirror_no_bbt_descr; 1359 1403 } else { ··· 1367 1411 } 1368 1412 1369 1413 if (!this->badblock_pattern) 1370 - nand_create_default_bbt_descr(this); 1414 + nand_create_badblock_pattern(this); 1371 1415 1372 1416 return nand_scan_bbt(mtd, this->badblock_pattern); 1373 1417 } 1374 1418 1375 1419 /** 1376 1420 * nand_isbad_bbt - [NAND Interface] Check if a block is bad 1377 - * @mtd: MTD device structure 1378 - * @offs: offset in the device 1379 - * @allowbbt: allow access to bad block table region 1380 - * 1381 - */ 1421 + * @mtd: MTD device structure 1422 + * @offs: offset in the device 1423 + * @allowbbt: allow access to bad block table region 1424 + */ 1382 1425 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) 1383 1426 { 1384 1427 struct nand_chip *this = mtd->priv; ··· 1388 1433 block = (int)(offs >> (this->bbt_erase_shift - 1)); 1389 1434 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; 1390 1435 1391 - DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", 1392 - (unsigned int)offs, block >> 1, res); 1436 + pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: " 1437 + "(block %d) 0x%02x\n", 1438 + (unsigned int)offs, block >> 1, res); 1393 1439 1394 1440 switch ((int)res) { 1395 1441 case 0x00:
+2 -2
drivers/mtd/nand/nand_bch.c
··· 93 93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); 94 94 /* else error in ecc, no action needed */ 95 95 96 - DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", 97 - __func__, errloc[i]); 96 + pr_debug("%s: corrected bitflip %u\n", __func__, 97 + errloc[i]); 98 98 } 99 99 } else if (count < 0) { 100 100 printk(KERN_ERR "ecc unrecoverable error\n");
+5 -5
drivers/mtd/nand/nand_ecc.c
··· 110 110 111 111 /* 112 112 * addressbits is a lookup table to filter out the bits from the xor-ed 113 - * ecc data that identify the faulty location. 113 + * ECC data that identify the faulty location. 114 114 * this is only used for repairing parity 115 115 * see the comments in nand_correct_data for more details 116 116 */ ··· 153 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 154 154 * block 155 155 * @buf: input buffer with raw data 156 - * @eccsize: data bytes per ecc step (256 or 512) 156 + * @eccsize: data bytes per ECC step (256 or 512) 157 157 * @code: output buffer with ECC 158 158 */ 159 159 void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, ··· 348 348 rp17 = (par ^ rp16) & 0xff; 349 349 350 350 /* 351 - * Finally calculate the ecc bits. 351 + * Finally calculate the ECC bits. 352 352 * Again here it might seem that there are performance optimisations 353 353 * possible, but benchmarks showed that on the system this is developed 354 354 * the code below is the fastest ··· 436 436 * @buf: raw data read from the chip 437 437 * @read_ecc: ECC from the chip 438 438 * @calc_ecc: the ECC calculated from raw data 439 - * @eccsize: data bytes per ecc step (256 or 512) 439 + * @eccsize: data bytes per ECC step (256 or 512) 440 440 * 441 441 * Detect and correct a 1 bit error for eccsize byte block 442 442 */ ··· 505 505 } 506 506 /* count nr of bits; use table lookup, faster than calculating it */ 507 507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 508 - return 1; /* error in ecc data; no action needed */ 508 + return 1; /* error in ECC data; no action needed */ 509 509 510 510 printk(KERN_ERR "uncorrectable error : "); 511 511 return -1;
+2 -2
drivers/mtd/nand/nandsim.c
··· 2273 2273 2274 2274 switch (bbt) { 2275 2275 case 2: 2276 - chip->options |= NAND_USE_FLASH_BBT_NO_OOB; 2276 + chip->bbt_options |= NAND_BBT_NO_OOB; 2277 2277 case 1: 2278 - chip->options |= NAND_USE_FLASH_BBT; 2278 + chip->bbt_options |= NAND_BBT_USE_FLASH; 2279 2279 case 0: 2280 2280 break; 2281 2281 default:
+4 -18
drivers/mtd/nand/ndfc.c
··· 42 42 struct nand_chip chip; 43 43 int chip_select; 44 44 struct nand_hw_control ndfc_control; 45 - struct mtd_partition *parts; 46 45 }; 47 46 48 47 static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; ··· 158 159 static int ndfc_chip_init(struct ndfc_controller *ndfc, 159 160 struct device_node *node) 160 161 { 161 - #ifdef CONFIG_MTD_CMDLINE_PARTS 162 - static const char *part_types[] = { "cmdlinepart", NULL }; 163 - #else 164 - static const char *part_types[] = { NULL }; 165 - #endif 166 162 struct device_node *flash_np; 167 163 struct nand_chip *chip = &ndfc->chip; 164 + struct mtd_part_parser_data ppdata; 168 165 int ret; 169 166 170 167 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; ··· 188 193 if (!flash_np) 189 194 return -ENODEV; 190 195 196 + ppdata->of_node = flash_np; 191 197 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", 192 198 dev_name(&ndfc->ofdev->dev), flash_np->name); 193 199 if (!ndfc->mtd.name) { ··· 200 204 if (ret) 201 205 goto err; 202 206 203 - ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); 204 - if (ret < 0) 205 - goto err; 206 - 207 - if (ret == 0) { 208 - ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, 209 - &ndfc->parts); 210 - if (ret < 0) 211 - goto err; 212 - } 213 - 214 - ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret); 207 + ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0); 215 208 216 209 err: 217 210 of_node_put(flash_np); ··· 273 288 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 274 289 275 290 nand_release(&ndfc->mtd); 291 + kfree(ndfc->mtd.name); 276 292 277 293 return 0; 278 294 }
+1
drivers/mtd/nand/nomadik_nand.c
··· 187 187 pdata->exit(); 188 188 189 189 if (host) { 190 + nand_release(&host->mtd); 190 191 iounmap(host->cmd_va); 191 192 iounmap(host->data_va); 192 193 iounmap(host->addr_va);
+1
drivers/mtd/nand/nuc900_nand.c
··· 339 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 340 340 struct resource *res; 341 341 342 + nand_release(&nuc900_nand->mtd); 342 343 iounmap(nuc900_nand->reg); 343 344 344 345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+7 -15
drivers/mtd/nand/omap2.c
··· 95 95 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 96 96 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 97 97 98 - static const char *part_probes[] = { "cmdlinepart", NULL }; 99 - 100 98 /* oob info generated runtime depending on ecc algorithm and layout selected */ 101 99 static struct nand_ecclayout omap_oobinfo; 102 100 /* Define some generic bad / good block scan pattern which are used ··· 113 115 struct nand_hw_control controller; 114 116 struct omap_nand_platform_data *pdata; 115 117 struct mtd_info mtd; 116 - struct mtd_partition *parts; 117 118 struct nand_chip nand; 118 119 struct platform_device *pdev; 119 120 ··· 742 745 743 746 case 1: 744 747 /* Uncorrectable error */ 745 - DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); 748 + pr_debug("ECC UNCORRECTED_ERROR 1\n"); 746 749 return -1; 747 750 748 751 case 11: 749 752 /* UN-Correctable error */ 750 - DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); 753 + pr_debug("ECC UNCORRECTED_ERROR B\n"); 751 754 return -1; 752 755 753 756 case 12: ··· 764 767 765 768 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 766 769 767 - DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " 768 - "offset: %d, bit: %d\n", find_byte, find_bit); 770 + pr_debug("Correcting single bit ECC error at offset: " 771 + "%d, bit: %d\n", find_byte, find_bit); 769 772 770 773 page_data[find_byte] ^= (1 << find_bit); 771 774 ··· 777 780 ecc_data2[2] == 0) 778 781 return 0; 779 782 } 780 - DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); 783 + pr_debug("UNCORRECTED_ERROR default\n"); 781 784 return -1; 782 785 } 783 786 } ··· 1101 1104 goto out_release_mem_region; 1102 1105 } 1103 1106 1104 - err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1105 - if (err > 0) 1106 - mtd_device_register(&info->mtd, info->parts, err); 1107 - else if (pdata->parts) 1108 - mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); 1109 - else 1110 - mtd_device_register(&info->mtd, NULL, 0); 1107 + mtd_device_parse_register(&info->mtd, NULL, 0, 1108 + pdata->parts, pdata->nr_parts); 1111 1109 1112 1110 platform_set_drvdata(pdev, &info->mtd); 1113 1111
+2 -14
drivers/mtd/nand/orion_nand.c
··· 21 21 #include <mach/hardware.h> 22 22 #include <plat/orion_nand.h> 23 23 24 - static const char *part_probes[] = { "cmdlinepart", NULL }; 25 - 26 24 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 27 25 { 28 26 struct nand_chip *nc = mtd->priv; ··· 79 81 struct resource *res; 80 82 void __iomem *io_base; 81 83 int ret = 0; 82 - struct mtd_partition *partitions = NULL; 83 - int num_part = 0; 84 84 85 85 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 86 86 if (!nc) { ··· 128 132 goto no_dev; 129 133 } 130 134 131 - #ifdef CONFIG_MTD_CMDLINE_PARTS 132 135 mtd->name = "orion_nand"; 133 - num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 134 - #endif 135 - /* If cmdline partitions have been passed, let them be used */ 136 - if (num_part <= 0) { 137 - num_part = board->nr_parts; 138 - partitions = board->parts; 139 - } 140 - 141 - ret = mtd_device_register(mtd, partitions, num_part); 136 + ret = mtd_device_parse_register(mtd, NULL, 0, 137 + board->parts, board->nr_parts); 142 138 if (ret) { 143 139 nand_release(mtd); 144 140 goto no_dev;
+2 -1
drivers/mtd/nand/pasemi_nand.c
··· 155 155 chip->ecc.mode = NAND_ECC_SOFT; 156 156 157 157 /* Enable the following for a flash based bad block table */ 158 - chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 158 + chip->options = NAND_NO_AUTOINCR; 159 + chip->bbt_options = NAND_BBT_USE_FLASH; 159 160 160 161 /* Scan to find existence of the device */ 161 162 if (nand_scan(pasemi_nand_mtd, 1)) {
+4 -21
drivers/mtd/nand/plat_nand.c
··· 21 21 struct nand_chip chip; 22 22 struct mtd_info mtd; 23 23 void __iomem *io_base; 24 - int nr_parts; 25 - struct mtd_partition *parts; 26 24 }; 27 25 28 26 /* ··· 77 79 data->chip.read_buf = pdata->ctrl.read_buf; 78 80 data->chip.chip_delay = pdata->chip.chip_delay; 79 81 data->chip.options |= pdata->chip.options; 82 + data->chip.bbt_options |= pdata->chip.bbt_options; 80 83 81 84 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 82 85 data->chip.ecc.layout = pdata->chip.ecclayout; ··· 98 99 goto out; 99 100 } 100 101 101 - if (pdata->chip.part_probe_types) { 102 - err = parse_mtd_partitions(&data->mtd, 103 - pdata->chip.part_probe_types, 104 - &data->parts, 0); 105 - if (err > 0) { 106 - mtd_device_register(&data->mtd, data->parts, err); 107 - return 0; 108 - } 109 - } 110 - if (pdata->chip.set_parts) 111 - pdata->chip.set_parts(data->mtd.size, &pdata->chip); 112 - if (pdata->chip.partitions) { 113 - data->parts = pdata->chip.partitions; 114 - err = mtd_device_register(&data->mtd, data->parts, 115 - pdata->chip.nr_partitions); 116 - } else 117 - err = mtd_device_register(&data->mtd, NULL, 0); 102 + err = mtd_device_parse_register(&data->mtd, 103 + pdata->chip.part_probe_types, 0, 104 + pdata->chip.partitions, pdata->chip.nr_partitions); 118 105 119 106 if (!err) 120 107 return err; ··· 130 145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 131 146 132 147 nand_release(&data->mtd); 133 - if (data->parts && data->parts != pdata->chip.partitions) 134 - kfree(data->parts); 135 148 if (pdata->ctrl.remove) 136 149 pdata->ctrl.remove(pdev); 137 150 iounmap(data->io_base);
+10 -37
drivers/mtd/nand/ppchameleonevb.c
··· 99 99 100 100 #define NUM_PARTITIONS 1 101 101 102 - extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id); 103 - 104 102 /* 105 103 * hardware specific access to control-lines 106 104 */ ··· 185 187 } 186 188 #endif 187 189 188 - const char *part_probes[] = { "cmdlinepart", NULL }; 189 - const char *part_probes_evb[] = { "cmdlinepart", NULL }; 190 - 191 190 /* 192 191 * Main initialization routine 193 192 */ 194 193 static int __init ppchameleonevb_init(void) 195 194 { 196 195 struct nand_chip *this; 197 - const char *part_type = 0; 198 - int mtd_parts_nb = 0; 199 - struct mtd_partition *mtd_parts = 0; 200 196 void __iomem *ppchameleon_fio_base; 201 197 void __iomem *ppchameleonevb_fio_base; 202 198 ··· 273 281 #endif 274 282 275 283 ppchameleon_mtd->name = "ppchameleon-nand"; 276 - mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); 277 - if (mtd_parts_nb > 0) 278 - part_type = "command line"; 279 - else 280 - mtd_parts_nb = 0; 281 - 282 - if (mtd_parts_nb == 0) { 283 - if (ppchameleon_mtd->size == NAND_SMALL_SIZE) 284 - mtd_parts = partition_info_me; 285 - else 286 - mtd_parts = partition_info_hi; 287 - mtd_parts_nb = NUM_PARTITIONS; 288 - part_type = "static"; 289 - } 290 284 291 285 /* Register the partitions */ 292 - printk(KERN_NOTICE "Using %s partition definition\n", part_type); 293 - mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 286 + mtd_device_parse_register(ppchameleon_mtd, NULL, 0, 287 + ppchameleon_mtd->size == NAND_SMALL_SIZE ? 288 + partition_info_me : 289 + partition_info_hi, 290 + NUM_PARTITIONS); 294 291 295 292 nand_evb_init: 296 293 /**************************** ··· 363 382 } 364 383 365 384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 366 - mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); 367 - if (mtd_parts_nb > 0) 368 - part_type = "command line"; 369 - else 370 - mtd_parts_nb = 0; 371 - 372 - if (mtd_parts_nb == 0) { 373 - mtd_parts = partition_info_evb; 374 - mtd_parts_nb = NUM_PARTITIONS; 375 - part_type = "static"; 376 - } 377 385 378 386 /* Register the partitions */ 379 - printk(KERN_NOTICE "Using %s partition definition\n", part_type); 380 - mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); 387 + mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0, 388 + ppchameleon_mtd->size == NAND_SMALL_SIZE ? 389 + partition_info_me : 390 + partition_info_hi, 391 + NUM_PARTITIONS); 381 392 382 393 /* Return happy */ 383 394 return 0;
+291 -180
drivers/mtd/nand/pxa3xx_nand.c
··· 110 110 111 111 enum { 112 112 STATE_IDLE = 0, 113 + STATE_PREPARED, 113 114 STATE_CMD_HANDLE, 114 115 STATE_DMA_READING, 115 116 STATE_DMA_WRITING, ··· 121 120 STATE_READY, 122 121 }; 123 122 124 - struct pxa3xx_nand_info { 125 - struct nand_chip nand_chip; 123 + struct pxa3xx_nand_host { 124 + struct nand_chip chip; 125 + struct pxa3xx_nand_cmdset *cmdset; 126 + struct mtd_info *mtd; 127 + void *info_data; 126 128 129 + /* page size of attached chip */ 130 + unsigned int page_size; 131 + int use_ecc; 132 + int cs; 133 + 134 + /* calculated from pxa3xx_nand_flash data */ 135 + unsigned int col_addr_cycles; 136 + unsigned int row_addr_cycles; 137 + size_t read_id_bytes; 138 + 139 + /* cached register value */ 140 + uint32_t reg_ndcr; 141 + uint32_t ndtr0cs0; 142 + uint32_t ndtr1cs0; 143 + }; 144 + 145 + struct pxa3xx_nand_info { 127 146 struct nand_hw_control controller; 128 147 struct platform_device *pdev; 129 - struct pxa3xx_nand_cmdset *cmdset; 130 148 131 149 struct clk *clk; 132 150 void __iomem *mmio_base; 133 151 unsigned long mmio_phys; 152 + struct completion cmd_complete; 134 153 135 154 unsigned int buf_start; 136 155 unsigned int buf_count; 137 156 138 - struct mtd_info *mtd; 139 157 /* DMA information */ 140 158 int drcmr_dat; 141 159 int drcmr_cmd; ··· 162 142 unsigned char *data_buff; 163 143 unsigned char *oob_buff; 164 144 dma_addr_t data_buff_phys; 165 - size_t data_buff_size; 166 145 int data_dma_ch; 167 146 struct pxa_dma_desc *data_desc; 168 147 dma_addr_t data_desc_addr; 169 148 170 - uint32_t reg_ndcr; 171 - 172 - /* saved column/page_addr during CMD_SEQIN */ 173 - int seqin_column; 174 - int seqin_page_addr; 175 - 176 - /* relate to the command */ 149 + struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; 177 150 unsigned int state; 178 151 152 + int cs; 179 153 int use_ecc; /* use HW ECC ? */ 180 154 int use_dma; /* use DMA ? */ 181 155 int is_ready; 182 156 183 157 unsigned int page_size; /* page size of attached chip */ 184 158 unsigned int data_size; /* data size in FIFO */ 159 + unsigned int oob_size; 185 160 int retcode; 186 - struct completion cmd_complete; 187 161 188 162 /* generated NDCBx register values */ 189 163 uint32_t ndcb0; 190 164 uint32_t ndcb1; 191 165 uint32_t ndcb2; 192 - 193 - /* timing calcuted from setting */ 194 - uint32_t ndtr0cs0; 195 - uint32_t ndtr1cs0; 196 - 197 - /* calculated from pxa3xx_nand_flash data */ 198 - size_t oob_size; 199 - size_t read_id_bytes; 200 - 201 - unsigned int col_addr_cycles; 202 - unsigned int row_addr_cycles; 203 166 }; 204 167 205 168 static int use_dma = 1; ··· 228 225 /* Define a default flash type setting serve as flash detecting only */ 229 226 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) 230 227 231 - const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; 228 + const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL}; 232 229 233 230 #define NDTR0_tCH(c) (min((c), 7) << 19) 234 231 #define NDTR0_tCS(c) (min((c), 7) << 16) ··· 244 241 /* convert nano-seconds to nand flash controller clock cycles */ 245 242 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 246 243 247 - static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 244 + static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, 248 245 const struct pxa3xx_nand_timing *t) 249 246 { 247 + struct pxa3xx_nand_info *info = host->info_data; 250 248 unsigned long nand_clk = clk_get_rate(info->clk); 251 249 uint32_t ndtr0, ndtr1; 252 250 ··· 262 258 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 263 259 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 264 260 265 - info->ndtr0cs0 = ndtr0; 266 - info->ndtr1cs0 = ndtr1; 261 + host->ndtr0cs0 = ndtr0; 262 + host->ndtr1cs0 = ndtr1; 267 263 nand_writel(info, NDTR0CS0, ndtr0); 268 264 nand_writel(info, NDTR1CS0, ndtr1); 269 265 } 270 266 271 267 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) 272 268 { 273 - int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; 269 + struct pxa3xx_nand_host *host = info->host[info->cs]; 270 + int oob_enable = host->reg_ndcr & NDCR_SPARE_EN; 274 271 275 - info->data_size = info->page_size; 272 + info->data_size = host->page_size; 276 273 if (!oob_enable) { 277 274 info->oob_size = 0; 278 275 return; 279 276 } 280 277 281 - switch (info->page_size) { 278 + switch (host->page_size) { 282 279 case 2048: 283 280 info->oob_size = (info->use_ecc) ? 40 : 64; 284 281 break; ··· 297 292 */ 298 293 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) 299 294 { 295 + struct pxa3xx_nand_host *host = info->host[info->cs]; 300 296 uint32_t ndcr; 301 297 302 - ndcr = info->reg_ndcr; 298 + ndcr = host->reg_ndcr; 303 299 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; 304 300 ndcr |= info->use_dma ? NDCR_DMA_EN : 0; 305 301 ndcr |= NDCR_ND_RUN; ··· 365 359 DIV_ROUND_UP(info->oob_size, 4)); 366 360 break; 367 361 default: 368 - printk(KERN_ERR "%s: invalid state %d\n", __func__, 362 + dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 369 363 info->state); 370 364 BUG(); 371 365 } ··· 391 385 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; 392 386 break; 393 387 default: 394 - printk(KERN_ERR "%s: invalid state %d\n", __func__, 388 + dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 395 389 info->state); 396 390 BUG(); 397 391 } ··· 422 416 { 423 417 struct pxa3xx_nand_info *info = devid; 424 418 unsigned int status, is_completed = 0; 419 + unsigned int ready, cmd_done; 420 + 421 + if (info->cs == 0) { 422 + ready = NDSR_FLASH_RDY; 423 + cmd_done = NDSR_CS0_CMDD; 424 + } else { 425 + ready = NDSR_RDY; 426 + cmd_done = NDSR_CS1_CMDD; 427 + } 425 428 426 429 status = nand_readl(info, NDSR); 427 430 ··· 452 437 handle_data_pio(info); 453 438 } 454 439 } 455 - if (status & NDSR_CS0_CMDD) { 440 + if (status & cmd_done) { 456 441 info->state = STATE_CMD_DONE; 457 442 is_completed = 1; 458 443 } 459 - if (status & NDSR_FLASH_RDY) { 444 + if (status & ready) { 460 445 info->is_ready = 1; 461 446 info->state = STATE_READY; 462 447 } ··· 478 463 return IRQ_HANDLED; 479 464 } 480 465 481 - static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) 482 - { 483 - struct pxa3xx_nand_info *info = mtd->priv; 484 - return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0; 485 - } 486 - 487 466 static inline int is_buf_blank(uint8_t *buf, size_t len) 488 467 { 489 468 for (; len > 0; len--) ··· 490 481 uint16_t column, int page_addr) 491 482 { 492 483 uint16_t cmd; 493 - int addr_cycle, exec_cmd, ndcb0; 494 - struct mtd_info *mtd = info->mtd; 484 + int addr_cycle, exec_cmd; 485 + struct pxa3xx_nand_host *host; 486 + struct mtd_info *mtd; 495 487 496 - ndcb0 = 0; 488 + host = info->host[info->cs]; 489 + mtd = host->mtd; 497 490 addr_cycle = 0; 498 491 exec_cmd = 1; 499 492 ··· 506 495 info->use_ecc = 0; 507 496 info->is_ready = 0; 508 497 info->retcode = ERR_NONE; 498 + if (info->cs != 0) 499 + info->ndcb0 = NDCB0_CSEL; 500 + else 501 + info->ndcb0 = 0; 509 502 510 503 switch (command) { 511 504 case NAND_CMD_READ0: ··· 527 512 break; 528 513 } 529 514 530 - info->ndcb0 = ndcb0; 531 - addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles 532 - + info->col_addr_cycles); 515 + addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles 516 + + host->col_addr_cycles); 533 517 534 518 switch (command) { 535 519 case NAND_CMD_READOOB: 536 520 case NAND_CMD_READ0: 537 - cmd = info->cmdset->read1; 521 + cmd = host->cmdset->read1; 538 522 if (command == NAND_CMD_READOOB) 539 523 info->buf_start = mtd->writesize + column; 540 524 else 541 525 info->buf_start = column; 542 526 543 - if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) 527 + if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) 544 528 info->ndcb0 |= NDCB0_CMD_TYPE(0) 545 529 | addr_cycle 546 530 | (cmd & NDCB0_CMD1_MASK); ··· 551 537 552 538 case NAND_CMD_SEQIN: 553 539 /* small page addr setting */ 554 - if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { 540 + if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { 555 541 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) 556 542 | (column & 0xFF); 557 543 ··· 578 564 break; 579 565 } 580 566 581 - cmd = info->cmdset->program; 567 + cmd = host->cmdset->program; 582 568 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 583 569 | NDCB0_AUTO_RS 584 570 | NDCB0_ST_ROW_EN ··· 588 574 break; 589 575 590 576 case NAND_CMD_READID: 591 - cmd = info->cmdset->read_id; 592 - info->buf_count = info->read_id_bytes; 577 + cmd = host->cmdset->read_id; 578 + info->buf_count = host->read_id_bytes; 593 579 info->ndcb0 |= NDCB0_CMD_TYPE(3) 594 580 | NDCB0_ADDR_CYC(1) 595 581 | cmd; ··· 597 583 info->data_size = 8; 598 584 break; 599 585 case NAND_CMD_STATUS: 600 - cmd = info->cmdset->read_status; 586 + cmd = host->cmdset->read_status; 601 587 info->buf_count = 1; 602 588 info->ndcb0 |= NDCB0_CMD_TYPE(4) 603 589 | NDCB0_ADDR_CYC(1) ··· 607 593 break; 608 594 609 595 case NAND_CMD_ERASE1: 610 - cmd = info->cmdset->erase; 596 + cmd = host->cmdset->erase; 611 597 info->ndcb0 |= NDCB0_CMD_TYPE(2) 612 598 | NDCB0_AUTO_RS 613 599 | NDCB0_ADDR_CYC(3) ··· 618 604 619 605 break; 620 606 case NAND_CMD_RESET: 621 - cmd = info->cmdset->reset; 607 + cmd = host->cmdset->reset; 622 608 info->ndcb0 |= NDCB0_CMD_TYPE(5) 623 609 | cmd; 624 610 ··· 630 616 631 617 default: 632 618 exec_cmd = 0; 633 - printk(KERN_ERR "pxa3xx-nand: non-supported" 634 - " command %x\n", command); 619 + dev_err(&info->pdev->dev, "non-supported command %x\n", 620 + command); 635 621 break; 636 622 } 637 623 ··· 641 627 static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, 642 628 int column, int page_addr) 643 629 { 644 - struct pxa3xx_nand_info *info = mtd->priv; 630 + struct pxa3xx_nand_host *host = mtd->priv; 631 + struct pxa3xx_nand_info *info = host->info_data; 645 632 int ret, exec_cmd; 646 633 647 634 /* ··· 650 635 * "byte" address into a "word" address appropriate 651 636 * for indexing a word-oriented device 652 637 */ 653 - if (info->reg_ndcr & NDCR_DWIDTH_M) 638 + if (host->reg_ndcr & NDCR_DWIDTH_M) 654 639 column /= 2; 655 640 641 + /* 642 + * There may be different NAND chip hooked to 643 + * different chip select, so check whether 644 + * chip select has been changed, if yes, reset the timing 645 + */ 646 + if (info->cs != host->cs) { 647 + info->cs = host->cs; 648 + nand_writel(info, NDTR0CS0, host->ndtr0cs0); 649 + nand_writel(info, NDTR1CS0, host->ndtr1cs0); 650 + } 651 + 652 + info->state = STATE_PREPARED; 656 653 exec_cmd = prepare_command_pool(info, command, column, page_addr); 657 654 if (exec_cmd) { 658 655 init_completion(&info->cmd_complete); ··· 673 646 ret = wait_for_completion_timeout(&info->cmd_complete, 674 647 CHIP_DELAY_TIMEOUT); 675 648 if (!ret) { 676 - printk(KERN_ERR "Wait time out!!!\n"); 649 + dev_err(&info->pdev->dev, "Wait time out!!!\n"); 677 650 /* Stop State Machine for next command cycle */ 678 651 pxa3xx_nand_stop(info); 679 652 } 680 - info->state = STATE_IDLE; 681 653 } 654 + info->state = STATE_IDLE; 682 655 } 683 656 684 657 static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, ··· 691 664 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 692 665 struct nand_chip *chip, uint8_t *buf, int page) 693 666 { 694 - struct pxa3xx_nand_info *info = mtd->priv; 667 + struct pxa3xx_nand_host *host = mtd->priv; 668 + struct pxa3xx_nand_info *info = host->info_data; 695 669 696 670 chip->read_buf(mtd, buf, mtd->writesize); 697 671 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); ··· 713 685 * OOB, ignore such double bit errors 714 686 */ 715 687 if (is_buf_blank(buf, mtd->writesize)) 688 + info->retcode = ERR_NONE; 689 + else 716 690 mtd->ecc_stats.failed++; 717 691 } 718 692 ··· 723 693 724 694 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) 725 695 { 726 - struct pxa3xx_nand_info *info = mtd->priv; 696 + struct pxa3xx_nand_host *host = mtd->priv; 697 + struct pxa3xx_nand_info *info = host->info_data; 727 698 char retval = 0xFF; 728 699 729 700 if (info->buf_start < info->buf_count) ··· 736 705 737 706 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) 738 707 { 739 - struct pxa3xx_nand_info *info = mtd->priv; 708 + struct pxa3xx_nand_host *host = mtd->priv; 709 + struct pxa3xx_nand_info *info = host->info_data; 740 710 u16 retval = 0xFFFF; 741 711 742 712 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { ··· 749 717 750 718 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 751 719 { 752 - struct pxa3xx_nand_info *info = mtd->priv; 720 + struct pxa3xx_nand_host *host = mtd->priv; 721 + struct pxa3xx_nand_info *info = host->info_data; 753 722 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 754 723 755 724 memcpy(buf, info->data_buff + info->buf_start, real_len); ··· 760 727 static void pxa3xx_nand_write_buf(struct mtd_info *mtd, 761 728 const uint8_t *buf, int len) 762 729 { 763 - struct pxa3xx_nand_info *info = mtd->priv; 730 + struct pxa3xx_nand_host *host = mtd->priv; 731 + struct pxa3xx_nand_info *info = host->info_data; 764 732 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 765 733 766 734 memcpy(info->data_buff + info->buf_start, buf, real_len); ··· 781 747 782 748 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) 783 749 { 784 - struct pxa3xx_nand_info *info = mtd->priv; 750 + struct pxa3xx_nand_host *host = mtd->priv; 751 + struct pxa3xx_nand_info *info = host->info_data; 785 752 786 753 /* pxa3xx_nand_send_command has waited for command complete */ 787 754 if (this->state == FL_WRITING || this->state == FL_ERASING) { ··· 805 770 { 806 771 struct platform_device *pdev = info->pdev; 807 772 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 773 + struct pxa3xx_nand_host *host = info->host[info->cs]; 808 774 uint32_t ndcr = 0x0; /* enable all interrupts */ 809 775 810 - if (f->page_size != 2048 && f->page_size != 512) 776 + if (f->page_size != 2048 && f->page_size != 512) { 777 + dev_err(&pdev->dev, "Current only support 2048 and 512 size\n"); 811 778 return -EINVAL; 779 + } 812 780 813 - if (f->flash_width != 16 && f->flash_width != 8) 781 + if (f->flash_width != 16 && f->flash_width != 8) { 782 + dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n"); 814 783 return -EINVAL; 784 + } 815 785 816 786 /* calculate flash information */ 817 - info->cmdset = &default_cmdset; 818 - info->page_size = f->page_size; 819 - info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 787 + host->cmdset = &default_cmdset; 788 + host->page_size = f->page_size; 789 + host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 820 790 821 791 /* calculate addressing information */ 822 - info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 792 + host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 823 793 824 794 if (f->num_blocks * f->page_per_block > 65536) 825 - info->row_addr_cycles = 3; 795 + host->row_addr_cycles = 3; 826 796 else 827 - info->row_addr_cycles = 2; 797 + host->row_addr_cycles = 2; 828 798 829 799 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 830 - ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0; 800 + ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; 831 801 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 832 802 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 833 803 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 834 804 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 835 805 836 - ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes); 806 + ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); 837 807 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 838 808 839 - info->reg_ndcr = ndcr; 809 + host->reg_ndcr = ndcr; 840 810 841 - pxa3xx_nand_set_timing(info, f->timing); 811 + pxa3xx_nand_set_timing(host, f->timing); 842 812 return 0; 843 813 } 844 814 845 815 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 846 816 { 817 + /* 818 + * We set 0 by hard coding here, for we don't support keep_config 819 + * when there is more than one chip attached to the controller 820 + */ 821 + struct pxa3xx_nand_host *host = info->host[0]; 847 822 uint32_t ndcr = nand_readl(info, NDCR); 848 - info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; 849 - /* set info fields needed to read id */ 850 - info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; 851 - info->reg_ndcr = ndcr; 852 - info->cmdset = &default_cmdset; 853 823 854 - info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 855 - info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 824 + if (ndcr & NDCR_PAGE_SZ) { 825 + host->page_size = 2048; 826 + host->read_id_bytes = 4; 827 + } else { 828 + host->page_size = 512; 829 + host->read_id_bytes = 2; 830 + } 831 + 832 + host->reg_ndcr = ndcr & ~NDCR_INT_MASK; 833 + host->cmdset = &default_cmdset; 834 + 835 + host->ndtr0cs0 = nand_readl(info, NDTR0CS0); 836 + host->ndtr1cs0 = nand_readl(info, NDTR1CS0); 856 837 857 838 return 0; 858 839 } ··· 898 847 return -ENOMEM; 899 848 } 900 849 901 - info->data_buff_size = MAX_BUFF_SIZE; 902 850 info->data_desc = (void *)info->data_buff + data_desc_offset; 903 851 info->data_desc_addr = info->data_buff_phys + data_desc_offset; 904 852 ··· 905 855 pxa3xx_nand_data_dma_irq, info); 906 856 if (info->data_dma_ch < 0) { 907 857 dev_err(&pdev->dev, "failed to request data dma\n"); 908 - dma_free_coherent(&pdev->dev, info->data_buff_size, 858 + dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, 909 859 info->data_buff, info->data_buff_phys); 910 860 return info->data_dma_ch; 911 861 } ··· 915 865 916 866 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) 917 867 { 918 - struct mtd_info *mtd = info->mtd; 919 - struct nand_chip *chip = mtd->priv; 920 - 868 + struct mtd_info *mtd; 869 + int ret; 870 + mtd = info->host[info->cs]->mtd; 921 871 /* use the common timing to make a try */ 922 - pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); 923 - chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 872 + ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); 873 + if (ret) 874 + return ret; 875 + 876 + pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 924 877 if (info->is_ready) 925 - return 1; 926 - else 927 878 return 0; 879 + 880 + return -ENODEV; 928 881 } 929 882 930 883 static int pxa3xx_nand_scan(struct mtd_info *mtd) 931 884 { 932 - struct pxa3xx_nand_info *info = mtd->priv; 885 + struct pxa3xx_nand_host *host = mtd->priv; 886 + struct pxa3xx_nand_info *info = host->info_data; 933 887 struct platform_device *pdev = info->pdev; 934 888 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 935 - struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; 889 + struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; 936 890 const struct pxa3xx_nand_flash *f = NULL; 937 891 struct nand_chip *chip = mtd->priv; 938 892 uint32_t id = -1; ··· 947 893 goto KEEP_CONFIG; 948 894 949 895 ret = pxa3xx_nand_sensing(info); 950 - if (!ret) { 951 - kfree(mtd); 952 - info->mtd = NULL; 953 - printk(KERN_INFO "There is no nand chip on cs 0!\n"); 896 + if (ret) { 897 + dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", 898 + info->cs); 954 899 955 - return -EINVAL; 900 + return ret; 956 901 } 957 902 958 903 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); 959 904 id = *((uint16_t *)(info->data_buff)); 960 905 if (id != 0) 961 - printk(KERN_INFO "Detect a flash id %x\n", id); 906 + dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); 962 907 else { 963 - kfree(mtd); 964 - info->mtd = NULL; 965 - printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n"); 908 + dev_warn(&info->pdev->dev, 909 + "Read out ID 0, potential timing set wrong!!\n"); 966 910 967 911 return -EINVAL; 968 912 } ··· 978 926 } 979 927 980 928 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { 981 - kfree(mtd); 982 - info->mtd = NULL; 983 - printk(KERN_ERR "ERROR!! flash not defined!!!\n"); 929 + dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); 984 930 985 931 return -EINVAL; 986 932 } 987 933 988 - pxa3xx_nand_config_flash(info, f); 934 + ret = pxa3xx_nand_config_flash(info, f); 935 + if (ret) { 936 + dev_err(&info->pdev->dev, "ERROR! Configure failed\n"); 937 + return ret; 938 + } 939 + 989 940 pxa3xx_flash_ids[0].name = f->name; 990 941 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; 991 942 pxa3xx_flash_ids[0].pagesize = f->page_size; ··· 997 942 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; 998 943 if (f->flash_width == 16) 999 944 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; 945 + pxa3xx_flash_ids[1].name = NULL; 946 + def = pxa3xx_flash_ids; 1000 947 KEEP_CONFIG: 1001 - if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) 948 + chip->ecc.mode = NAND_ECC_HW; 949 + chip->ecc.size = host->page_size; 950 + 951 + chip->options = NAND_NO_AUTOINCR; 952 + chip->options |= NAND_NO_READRDY; 953 + if (host->reg_ndcr & NDCR_DWIDTH_M) 954 + chip->options |= NAND_BUSWIDTH_16; 955 + 956 + if (nand_scan_ident(mtd, 1, def)) 1002 957 return -ENODEV; 1003 958 /* calculate addressing information */ 1004 - info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; 959 + if (mtd->writesize >= 2048) 960 + host->col_addr_cycles = 2; 961 + else 962 + host->col_addr_cycles = 1; 963 + 1005 964 info->oob_buff = info->data_buff + mtd->writesize; 1006 965 if ((mtd->size >> chip->page_shift) > 65536) 1007 - info->row_addr_cycles = 3; 966 + host->row_addr_cycles = 3; 1008 967 else 1009 - info->row_addr_cycles = 2; 968 + host->row_addr_cycles = 2; 969 + 1010 970 mtd->name = mtd_names[0]; 1011 - chip->ecc.mode = NAND_ECC_HW; 1012 - chip->ecc.size = f->page_size; 1013 - 1014 - chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0; 1015 - chip->options |= NAND_NO_AUTOINCR; 1016 - chip->options |= NAND_NO_READRDY; 1017 - 1018 971 return nand_scan_tail(mtd); 1019 972 } 1020 973 1021 - static 1022 - struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) 974 + static int alloc_nand_resource(struct platform_device *pdev) 1023 975 { 976 + struct pxa3xx_nand_platform_data *pdata; 1024 977 struct pxa3xx_nand_info *info; 978 + struct pxa3xx_nand_host *host; 1025 979 struct nand_chip *chip; 1026 980 struct mtd_info *mtd; 1027 981 struct resource *r; 1028 - int ret, irq; 982 + int ret, irq, cs; 1029 983 1030 - mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), 1031 - GFP_KERNEL); 1032 - if (!mtd) { 984 + pdata = pdev->dev.platform_data; 985 + info = kzalloc(sizeof(*info) + (sizeof(*mtd) + 986 + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 987 + if (!info) { 1033 988 dev_err(&pdev->dev, "failed to allocate memory\n"); 1034 - return NULL; 989 + return -ENOMEM; 1035 990 } 1036 991 1037 - info = (struct pxa3xx_nand_info *)(&mtd[1]); 1038 - chip = (struct nand_chip *)(&mtd[1]); 1039 992 info->pdev = pdev; 1040 - info->mtd = mtd; 1041 - mtd->priv = info; 1042 - mtd->owner = THIS_MODULE; 993 + for (cs = 0; cs < pdata->num_cs; cs++) { 994 + mtd = (struct mtd_info *)((unsigned int)&info[1] + 995 + (sizeof(*mtd) + sizeof(*host)) * cs); 996 + chip = (struct nand_chip *)(&mtd[1]); 997 + host = (struct pxa3xx_nand_host *)chip; 998 + info->host[cs] = host; 999 + host->mtd = mtd; 1000 + host->cs = cs; 1001 + host->info_data = info; 1002 + mtd->priv = host; 1003 + mtd->owner = THIS_MODULE; 1043 1004 1044 - chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1045 - chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1046 - chip->controller = &info->controller; 1047 - chip->waitfunc = pxa3xx_nand_waitfunc; 1048 - chip->select_chip = pxa3xx_nand_select_chip; 1049 - chip->dev_ready = pxa3xx_nand_dev_ready; 1050 - chip->cmdfunc = pxa3xx_nand_cmdfunc; 1051 - chip->read_word = pxa3xx_nand_read_word; 1052 - chip->read_byte = pxa3xx_nand_read_byte; 1053 - chip->read_buf = pxa3xx_nand_read_buf; 1054 - chip->write_buf = pxa3xx_nand_write_buf; 1055 - chip->verify_buf = pxa3xx_nand_verify_buf; 1005 + chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1006 + chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1007 + chip->controller = &info->controller; 1008 + chip->waitfunc = pxa3xx_nand_waitfunc; 1009 + chip->select_chip = pxa3xx_nand_select_chip; 1010 + chip->cmdfunc = pxa3xx_nand_cmdfunc; 1011 + chip->read_word = pxa3xx_nand_read_word; 1012 + chip->read_byte = pxa3xx_nand_read_byte; 1013 + chip->read_buf = pxa3xx_nand_read_buf; 1014 + chip->write_buf = pxa3xx_nand_write_buf; 1015 + chip->verify_buf = pxa3xx_nand_verify_buf; 1016 + } 1056 1017 1057 1018 spin_lock_init(&chip->controller->lock); 1058 1019 init_waitqueue_head(&chip->controller->wq); ··· 1141 1070 1142 1071 platform_set_drvdata(pdev, info); 1143 1072 1144 - return info; 1073 + return 0; 1145 1074 1146 1075 fail_free_buf: 1147 1076 free_irq(irq, info); 1148 1077 if (use_dma) { 1149 1078 pxa_free_dma(info->data_dma_ch); 1150 - dma_free_coherent(&pdev->dev, info->data_buff_size, 1079 + dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, 1151 1080 info->data_buff, info->data_buff_phys); 1152 1081 } else 1153 1082 kfree(info->data_buff); ··· 1159 1088 clk_disable(info->clk); 1160 1089 clk_put(info->clk); 1161 1090 fail_free_mtd: 1162 - kfree(mtd); 1163 - return NULL; 1091 + kfree(info); 1092 + return ret; 1164 1093 } 1165 1094 1166 1095 static int pxa3xx_nand_remove(struct platform_device *pdev) 1167 1096 { 1168 1097 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1169 - struct mtd_info *mtd = info->mtd; 1098 + struct pxa3xx_nand_platform_data *pdata; 1170 1099 struct resource *r; 1171 - int irq; 1100 + int irq, cs; 1172 1101 1102 + if (!info) 1103 + return 0; 1104 + 1105 + pdata = pdev->dev.platform_data; 1173 1106 platform_set_drvdata(pdev, NULL); 1174 1107 1175 1108 irq = platform_get_irq(pdev, 0); ··· 1181 1106 free_irq(irq, info); 1182 1107 if (use_dma) { 1183 1108 pxa_free_dma(info->data_dma_ch); 1184 - dma_free_writecombine(&pdev->dev, info->data_buff_size, 1109 + dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE, 1185 1110 info->data_buff, info->data_buff_phys); 1186 1111 } else 1187 1112 kfree(info->data_buff); ··· 1193 1118 clk_disable(info->clk); 1194 1119 clk_put(info->clk); 1195 1120 1196 - if (mtd) { 1197 - mtd_device_unregister(mtd); 1198 - kfree(mtd); 1199 - } 1121 + for (cs = 0; cs < pdata->num_cs; cs++) 1122 + nand_release(info->host[cs]->mtd); 1123 + kfree(info); 1200 1124 return 0; 1201 1125 } 1202 1126 ··· 1203 1129 { 1204 1130 struct pxa3xx_nand_platform_data *pdata; 1205 1131 struct pxa3xx_nand_info *info; 1132 + int ret, cs, probe_success; 1206 1133 1207 1134 pdata = pdev->dev.platform_data; 1208 1135 if (!pdata) { ··· 1211 1136 return -ENODEV; 1212 1137 } 1213 1138 1214 - info = alloc_nand_resource(pdev); 1215 - if (info == NULL) 1216 - return -ENOMEM; 1139 + ret = alloc_nand_resource(pdev); 1140 + if (ret) { 1141 + dev_err(&pdev->dev, "alloc nand resource failed\n"); 1142 + return ret; 1143 + } 1217 1144 1218 - if (pxa3xx_nand_scan(info->mtd)) { 1219 - dev_err(&pdev->dev, "failed to scan nand\n"); 1145 + info = platform_get_drvdata(pdev); 1146 + probe_success = 0; 1147 + for (cs = 0; cs < pdata->num_cs; cs++) { 1148 + info->cs = cs; 1149 + ret = pxa3xx_nand_scan(info->host[cs]->mtd); 1150 + if (ret) { 1151 + dev_warn(&pdev->dev, "failed to scan nand at cs %d\n", 1152 + cs); 1153 + continue; 1154 + } 1155 + 1156 + ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0, 1157 + pdata->parts[cs], pdata->nr_parts[cs]); 1158 + if (!ret) 1159 + probe_success = 1; 1160 + } 1161 + 1162 + if (!probe_success) { 1220 1163 pxa3xx_nand_remove(pdev); 1221 1164 return -ENODEV; 1222 1165 } 1223 1166 1224 - if (mtd_has_cmdlinepart()) { 1225 - const char *probes[] = { "cmdlinepart", NULL }; 1226 - struct mtd_partition *parts; 1227 - int nr_parts; 1228 - 1229 - nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); 1230 - 1231 - if (nr_parts) 1232 - return mtd_device_register(info->mtd, parts, nr_parts); 1233 - } 1234 - 1235 - return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); 1167 + return 0; 1236 1168 } 1237 1169 1238 1170 #ifdef CONFIG_PM 1239 1171 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) 1240 1172 { 1241 1173 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1242 - struct mtd_info *mtd = info->mtd; 1174 + struct pxa3xx_nand_platform_data *pdata; 1175 + struct mtd_info *mtd; 1176 + int cs; 1243 1177 1178 + pdata = pdev->dev.platform_data; 1244 1179 if (info->state) { 1245 1180 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); 1246 1181 return -EAGAIN; 1182 + } 1183 + 1184 + for (cs = 0; cs < pdata->num_cs; cs++) { 1185 + mtd = info->host[cs]->mtd; 1186 + mtd->suspend(mtd); 1247 1187 } 1248 1188 1249 1189 return 0; ··· 1267 1177 static int pxa3xx_nand_resume(struct platform_device *pdev) 1268 1178 { 1269 1179 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1270 - struct mtd_info *mtd = info->mtd; 1180 + struct pxa3xx_nand_platform_data *pdata; 1181 + struct mtd_info *mtd; 1182 + int cs; 1271 1183 1272 - nand_writel(info, NDTR0CS0, info->ndtr0cs0); 1273 - nand_writel(info, NDTR1CS0, info->ndtr1cs0); 1274 - clk_enable(info->clk); 1184 + pdata = pdev->dev.platform_data; 1185 + /* We don't want to handle interrupt without calling mtd routine */ 1186 + disable_int(info, NDCR_INT_MASK); 1187 + 1188 + /* 1189 + * Directly set the chip select to a invalid value, 1190 + * then the driver would reset the timing according 1191 + * to current chip select at the beginning of cmdfunc 1192 + */ 1193 + info->cs = 0xff; 1194 + 1195 + /* 1196 + * As the spec says, the NDSR would be updated to 0x1800 when 1197 + * doing the nand_clk disable/enable. 1198 + * To prevent it damaging state machine of the driver, clear 1199 + * all status before resume 1200 + */ 1201 + nand_writel(info, NDSR, NDSR_MASK); 1202 + for (cs = 0; cs < pdata->num_cs; cs++) { 1203 + mtd = info->host[cs]->mtd; 1204 + mtd->resume(mtd); 1205 + } 1275 1206 1276 1207 return 0; 1277 1208 }
+3 -3
drivers/mtd/nand/r852.c
··· 1027 1027 } 1028 1028 1029 1029 #ifdef CONFIG_PM 1030 - int r852_suspend(struct device *device) 1030 + static int r852_suspend(struct device *device) 1031 1031 { 1032 1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1033 1033 ··· 1048 1048 return 0; 1049 1049 } 1050 1050 1051 - int r852_resume(struct device *device) 1051 + static int r852_resume(struct device *device) 1052 1052 { 1053 1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1054 1054 ··· 1092 1092 1093 1093 MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1094 1094 1095 - SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1095 + static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1096 1096 1097 1097 static struct pci_driver r852_pci_driver = { 1098 1098 .name = DRV_NAME,
+2 -3
drivers/mtd/nand/rtc_from4.c
··· 351 351 return 0; 352 352 } 353 353 354 - /* Read the syndrom pattern from the FPGA and correct the bitorder */ 354 + /* Read the syndrome pattern from the FPGA and correct the bitorder */ 355 355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); 356 356 for (i = 0; i < 8; i++) { 357 357 ecc[i] = bitrev8(*rs_ecc); ··· 380 380 /* Let the library code do its magic. */ 381 381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); 382 382 if (res > 0) { 383 - DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); 383 + pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); 384 384 } 385 385 return res; 386 386 } ··· 444 444 len = mtd->writesize; 445 445 buf = kmalloc(len, GFP_KERNEL); 446 446 if (!buf) { 447 - printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n"); 448 447 er_stat = 1; 449 448 goto out; 450 449 }
+9 -18
drivers/mtd/nand/s3c2410.c
··· 723 723 724 724 /* free the common resources */ 725 725 726 - if (info->clk != NULL && !IS_ERR(info->clk)) { 726 + if (!IS_ERR(info->clk)) { 727 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 728 728 clk_put(info->clk); 729 729 } ··· 744 744 return 0; 745 745 } 746 746 747 - const char *part_probes[] = { "cmdlinepart", NULL }; 748 747 static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 749 748 struct s3c2410_nand_mtd *mtd, 750 749 struct s3c2410_nand_set *set) 751 750 { 752 - struct mtd_partition *part_info; 753 - int nr_part = 0; 751 + if (set) 752 + mtd->mtd.name = set->name; 754 753 755 - if (set == NULL) 756 - return mtd_device_register(&mtd->mtd, NULL, 0); 757 - 758 - mtd->mtd.name = set->name; 759 - nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0); 760 - 761 - if (nr_part <= 0 && set->nr_partitions > 0) { 762 - nr_part = set->nr_partitions; 763 - part_info = set->partitions; 764 - } 765 - 766 - return mtd_device_register(&mtd->mtd, part_info, nr_part); 754 + return mtd_device_parse_register(&mtd->mtd, NULL, 0, 755 + set->partitions, set->nr_partitions); 767 756 } 768 757 769 758 /** ··· 869 880 /* If you use u-boot BBT creation code, specifying this flag will 870 881 * let the kernel fish out the BBT from the NAND, and also skip the 871 882 * full NAND scan that can take 1/2s or so. Little things... */ 872 - if (set->flash_bbt) 873 - chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 883 + if (set->flash_bbt) { 884 + chip->bbt_options |= NAND_BBT_USE_FLASH; 885 + chip->options |= NAND_SKIP_BBTSCAN; 886 + } 874 887 } 875 888 876 889 /**
+2 -11
drivers/mtd/nand/sharpsl.c
··· 103 103 return readb(sharpsl->io + ECCCNTR) != 0; 104 104 } 105 105 106 - static const char *part_probes[] = { "cmdlinepart", NULL }; 107 - 108 106 /* 109 107 * Main initialization routine 110 108 */ 111 109 static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 112 110 { 113 111 struct nand_chip *this; 114 - struct mtd_partition *sharpsl_partition_info; 115 - int nr_partitions; 116 112 struct resource *r; 117 113 int err = 0; 118 114 struct sharpsl_nand *sharpsl; ··· 180 184 181 185 /* Register the partitions */ 182 186 sharpsl->mtd.name = "sharpsl-nand"; 183 - nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0); 184 - if (nr_partitions <= 0) { 185 - nr_partitions = data->nr_partitions; 186 - sharpsl_partition_info = data->partitions; 187 - } 188 187 189 - err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info, 190 - nr_partitions); 188 + err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0, 189 + data->partitions, data->nr_partitions); 191 190 if (err) 192 191 goto err_add; 193 192
+1 -1
drivers/mtd/nand/sm_common.c
··· 48 48 49 49 /* As long as this function is called on erase block boundaries 50 50 it will work correctly for 256 byte nand */ 51 - ops.mode = MTD_OOB_PLACE; 51 + ops.mode = MTD_OPS_PLACE_OOB; 52 52 ops.ooboffs = 0; 53 53 ops.ooblen = mtd->oobsize; 54 54 ops.oobbuf = (void *)&oob;
+3 -25
drivers/mtd/nand/socrates_nand.c
··· 155 155 return 1; 156 156 } 157 157 158 - static const char *part_probes[] = { "cmdlinepart", NULL }; 159 - 160 158 /* 161 159 * Probe for the NAND device. 162 160 */ ··· 164 166 struct mtd_info *mtd; 165 167 struct nand_chip *nand_chip; 166 168 int res; 167 - struct mtd_partition *partitions = NULL; 168 - int num_partitions = 0; 169 + struct mtd_part_parser_data ppdata; 169 170 170 171 /* Allocate memory for the device structure (and zero it) */ 171 172 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); ··· 190 193 mtd->name = "socrates_nand"; 191 194 mtd->owner = THIS_MODULE; 192 195 mtd->dev.parent = &ofdev->dev; 196 + ppdata.of_node = ofdev->dev.of_node; 193 197 194 198 /*should never be accessed directly */ 195 199 nand_chip->IO_ADDR_R = (void *)0xdeadbeef; ··· 223 225 goto out; 224 226 } 225 227 226 - #ifdef CONFIG_MTD_CMDLINE_PARTS 227 - num_partitions = parse_mtd_partitions(mtd, part_probes, 228 - &partitions, 0); 229 - if (num_partitions < 0) { 230 - res = num_partitions; 231 - goto release; 232 - } 233 - #endif 234 - 235 - if (num_partitions == 0) { 236 - num_partitions = of_mtd_parse_partitions(&ofdev->dev, 237 - ofdev->dev.of_node, 238 - &partitions); 239 - if (num_partitions < 0) { 240 - res = num_partitions; 241 - goto release; 242 - } 243 - } 244 - 245 - res = mtd_device_register(mtd, partitions, num_partitions); 228 + res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 246 229 if (!res) 247 230 return res; 248 231 249 - release: 250 232 nand_release(mtd); 251 233 252 234 out:
+3 -14
drivers/mtd/nand/tmio_nand.c
··· 121 121 122 122 #define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) 123 123 124 - #ifdef CONFIG_MTD_CMDLINE_PARTS 125 - static const char *part_probes[] = { "cmdlinepart", NULL }; 126 - #endif 127 124 128 125 /*--------------------------------------------------------------------------*/ 129 126 ··· 378 381 struct tmio_nand *tmio; 379 382 struct mtd_info *mtd; 380 383 struct nand_chip *nand_chip; 381 - struct mtd_partition *parts; 382 - int nbparts = 0; 383 384 int retval; 384 385 385 386 if (data == NULL) ··· 456 461 goto err_scan; 457 462 } 458 463 /* Register the partitions */ 459 - #ifdef CONFIG_MTD_CMDLINE_PARTS 460 - nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 461 - #endif 462 - if (nbparts <= 0 && data) { 463 - parts = data->partition; 464 - nbparts = data->num_partitions; 465 - } 466 - 467 - retval = mtd_device_register(mtd, parts, nbparts); 464 + retval = mtd_device_parse_register(mtd, NULL, 0, 465 + data ? data->partition : NULL, 466 + data ? data->num_partitions : 0); 468 467 if (!retval) 469 468 return retval; 470 469
+1 -7
drivers/mtd/nand/txx9ndfmc.c
··· 74 74 unsigned char hold; /* in gbusclock */ 75 75 unsigned char spw; /* in gbusclock */ 76 76 struct nand_hw_control hw_control; 77 - struct mtd_partition *parts[MAX_TXX9NDFMC_DEV]; 78 77 }; 79 78 80 79 static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) ··· 286 287 static int __init txx9ndfmc_probe(struct platform_device *dev) 287 288 { 288 289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 289 - static const char *probes[] = { "cmdlinepart", NULL }; 290 290 int hold, spw; 291 291 int i; 292 292 struct txx9ndfmc_drvdata *drvdata; ··· 331 333 struct txx9ndfmc_priv *txx9_priv; 332 334 struct nand_chip *chip; 333 335 struct mtd_info *mtd; 334 - int nr_parts; 335 336 336 337 if (!(plat->ch_mask & (1 << i))) 337 338 continue; ··· 390 393 } 391 394 mtd->name = txx9_priv->mtdname; 392 395 393 - nr_parts = parse_mtd_partitions(mtd, probes, 394 - &drvdata->parts[i], 0); 395 - mtd_device_register(mtd, drvdata->parts[i], nr_parts); 396 + mtd_device_parse_register(mtd, NULL, 0, NULL, 0); 396 397 drvdata->mtds[i] = mtd; 397 398 } 398 399 ··· 416 421 txx9_priv = chip->priv; 417 422 418 423 nand_release(mtd); 419 - kfree(drvdata->parts[i]); 420 424 kfree(txx9_priv->mtdname); 421 425 kfree(txx9_priv); 422 426 }
+17 -20
drivers/mtd/nftlcore.c
··· 63 63 return; 64 64 } 65 65 66 - DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name); 66 + pr_debug("NFTL: add_mtd for %s\n", mtd->name); 67 67 68 68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); 69 69 70 - if (!nftl) { 71 - printk(KERN_WARNING "NFTL: out of memory for data structures\n"); 70 + if (!nftl) 72 71 return; 73 - } 74 72 75 73 nftl->mbd.mtd = mtd; 76 74 nftl->mbd.devnum = -1; ··· 130 132 { 131 133 struct NFTLrecord *nftl = (void *)dev; 132 134 133 - DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum); 135 + pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum); 134 136 135 137 del_mtd_blktrans_dev(dev); 136 138 kfree(nftl->ReplUnitTable); ··· 147 149 struct mtd_oob_ops ops; 148 150 int res; 149 151 150 - ops.mode = MTD_OOB_PLACE; 152 + ops.mode = MTD_OPS_PLACE_OOB; 151 153 ops.ooboffs = offs & mask; 152 154 ops.ooblen = len; 153 155 ops.oobbuf = buf; ··· 168 170 struct mtd_oob_ops ops; 169 171 int res; 170 172 171 - ops.mode = MTD_OOB_PLACE; 173 + ops.mode = MTD_OPS_PLACE_OOB; 172 174 ops.ooboffs = offs & mask; 173 175 ops.ooblen = len; 174 176 ops.oobbuf = buf; ··· 191 193 struct mtd_oob_ops ops; 192 194 int res; 193 195 194 - ops.mode = MTD_OOB_PLACE; 196 + ops.mode = MTD_OPS_PLACE_OOB; 195 197 ops.ooboffs = offs & mask; 196 198 ops.ooblen = mtd->oobsize; 197 199 ops.oobbuf = oob; ··· 218 220 219 221 /* Normally, we force a fold to happen before we run out of free blocks completely */ 220 222 if (!desperate && nftl->numfreeEUNs < 2) { 221 - DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); 223 + pr_debug("NFTL_findfreeblock: there are too few free EUNs\n"); 222 224 return BLOCK_NIL; 223 225 } 224 226 ··· 289 291 if (block == 2) { 290 292 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; 291 293 if (foldmark == FOLD_MARK_IN_PROGRESS) { 292 - DEBUG(MTD_DEBUG_LEVEL1, 293 - "Write Inhibited on EUN %d\n", thisEUN); 294 + pr_debug("Write Inhibited on EUN %d\n", thisEUN); 294 295 inplace = 0; 295 296 } else { 296 297 /* There's no other reason not to do inplace, ··· 354 357 if (BlockLastState[block] != SECTOR_FREE && 355 358 BlockMap[block] != BLOCK_NIL && 356 359 BlockMap[block] != targetEUN) { 357 - DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " 360 + pr_debug("Setting inplace to 0. VUC %d, " 358 361 "block %d was %x lastEUN, " 359 362 "and is in EUN %d (%s) %d\n", 360 363 thisVUC, block, BlockLastState[block], ··· 370 373 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && 371 374 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != 372 375 SECTOR_FREE) { 373 - DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " 376 + pr_debug("Pending write not free in EUN %d. " 374 377 "Folding out of place.\n", targetEUN); 375 378 inplace = 0; 376 379 } 377 380 } 378 381 379 382 if (!inplace) { 380 - DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " 383 + pr_debug("Cannot fold Virtual Unit Chain %d in place. " 381 384 "Trying out-of-place\n", thisVUC); 382 385 /* We need to find a targetEUN to fold into. */ 383 386 targetEUN = NFTL_findfreeblock(nftl, 1); ··· 407 410 and the Erase Unit into which we are supposed to be copying. 408 411 Go for it. 409 412 */ 410 - DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); 413 + pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN); 411 414 for (block = 0; block < nftl->EraseSize / 512 ; block++) { 412 415 unsigned char movebuf[512]; 413 416 int ret; ··· 425 428 426 429 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 427 430 512, &retlen, movebuf); 428 - if (ret < 0 && ret != -EUCLEAN) { 431 + if (ret < 0 && !mtd_is_bitflip(ret)) { 429 432 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) 430 433 + (block * 512), 512, &retlen, 431 434 movebuf); ··· 454 457 has duplicate chains, we need to free one of the chains because it's not necessary any more. 455 458 */ 456 459 thisEUN = nftl->EUNtable[thisVUC]; 457 - DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); 460 + pr_debug("Want to erase\n"); 458 461 459 462 /* For each block in the old chain (except the targetEUN of course), 460 463 free it and make it available for future use */ ··· 567 570 (writeEUN * nftl->EraseSize) + blockofs, 568 571 8, &retlen, (char *)&bci); 569 572 570 - DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n", 573 + pr_debug("Status of block %d in EUN %d is %x\n", 571 574 block , writeEUN, le16_to_cpu(bci.Status)); 572 575 573 576 status = bci.Status | bci.Status1; ··· 620 623 but they are reserved for when we're 621 624 desperate. Well, now we're desperate. 622 625 */ 623 - DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); 626 + pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); 624 627 writeEUN = NFTL_findfreeblock(nftl, 1); 625 628 } 626 629 if (writeEUN == BLOCK_NIL) { ··· 773 776 size_t retlen; 774 777 int res = mtd->read(mtd, ptr, 512, &retlen, buffer); 775 778 776 - if (res < 0 && res != -EUCLEAN) 779 + if (res < 0 && !mtd_is_bitflip(res)) 777 780 return -EIO; 778 781 } 779 782 return 0;
+13 -13
drivers/mtd/nftlmount.c
··· 32 32 33 33 /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 34 34 * various device information of the NFTL partition and Bad Unit Table. Update 35 - * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] 35 + * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[] 36 36 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c 37 37 */ 38 38 static int find_boot_record(struct NFTLrecord *nftl) ··· 297 297 * 298 298 * Return: 0 when succeed, -1 on error. 299 299 * 300 - * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? 300 + * ToDo: 1. Is it necessary to check_free_sector after erasing ?? 301 301 */ 302 302 int NFTL_formatblock(struct NFTLrecord *nftl, int block) 303 303 { ··· 337 337 nb_erases = le32_to_cpu(uci.WearInfo); 338 338 nb_erases++; 339 339 340 - /* wrap (almost impossible with current flashs) or free block */ 340 + /* wrap (almost impossible with current flash) or free block */ 341 341 if (nb_erases == 0) 342 342 nb_erases = 1; 343 343 ··· 363 363 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain 364 364 * was being folded when NFTL was interrupted. 365 365 * 366 - * The check_free_sectors in this function is neceressary. There is a possible 366 + * The check_free_sectors in this function is necessary. There is a possible 367 367 * situation that after writing the Data area, the Block Control Information is 368 368 * not updated according (due to power failure or something) which leaves the block 369 - * in an umconsistent state. So we have to check if a block is really FREE in this 369 + * in an inconsistent state. So we have to check if a block is really FREE in this 370 370 * case. */ 371 371 static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) 372 372 { ··· 428 428 429 429 for (;;) { 430 430 length++; 431 - /* avoid infinite loops, although this is guaranted not to 431 + /* avoid infinite loops, although this is guaranteed not to 432 432 happen because of the previous checks */ 433 433 if (length >= nftl->nb_blocks) { 434 434 printk("nftl: length too long %d !\n", length); ··· 447 447 /* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a 448 448 * Virtual Unit Chain, i.e. all the units are disconnected. 449 449 * 450 - * It is not stricly correct to begin from the first block of the chain because 450 + * It is not strictly correct to begin from the first block of the chain because 451 451 * if we stop the code, we may see again a valid chain if there was a first_block 452 452 * flag in a block inside it. But is it really a problem ? 453 453 * 454 - * FixMe: Figure out what the last statesment means. What if power failure when we are 454 + * FixMe: Figure out what the last statement means. What if power failure when we are 455 455 * in the for (;;) loop formatting blocks ?? 456 456 */ 457 457 static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) ··· 485 485 * totally free (only 0xff). 486 486 * 487 487 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the 488 - * following critia: 488 + * following criteria: 489 489 * 1. */ 490 490 static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) 491 491 { ··· 502 502 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); 503 503 if (erase_mark != ERASE_MARK) { 504 504 /* if no erase mark, the block must be totally free. This is 505 - possible in two cases : empty filsystem or interrupted erase (very unlikely) */ 505 + possible in two cases : empty filesystem or interrupted erase (very unlikely) */ 506 506 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0) 507 507 return -1; 508 508 ··· 544 544 /* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS 545 545 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2 546 546 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted 547 - * for some reason. A clean up/check of the VUC is neceressary in this case. 547 + * for some reason. A clean up/check of the VUC is necessary in this case. 548 548 * 549 549 * WARNING: return 0 if read error 550 550 */ ··· 657 657 printk("Block %d: incorrect logical block: %d expected: %d\n", 658 658 block, logical_block, first_logical_block); 659 659 /* the chain is incorrect : we must format it, 660 - but we need to read it completly */ 660 + but we need to read it completely */ 661 661 do_format_chain = 1; 662 662 } 663 663 if (is_first_block) { ··· 669 669 printk("Block %d: incorrectly marked as first block in chain\n", 670 670 block); 671 671 /* the chain is incorrect : we must format it, 672 - but we need to read it completly */ 672 + but we need to read it completely */ 673 673 do_format_chain = 1; 674 674 } else { 675 675 printk("Block %d: folding in progress - ignoring first block flag\n",
+107 -5
drivers/mtd/ofpart.c
··· 20 20 #include <linux/slab.h> 21 21 #include <linux/mtd/partitions.h> 22 22 23 - int __devinit of_mtd_parse_partitions(struct device *dev, 24 - struct device_node *node, 25 - struct mtd_partition **pparts) 23 + static int parse_ofpart_partitions(struct mtd_info *master, 24 + struct mtd_partition **pparts, 25 + struct mtd_part_parser_data *data) 26 26 { 27 + struct device_node *node; 27 28 const char *partname; 28 29 struct device_node *pp; 29 30 int nr_parts, i; 31 + 32 + 33 + if (!data) 34 + return 0; 35 + 36 + node = data->of_node; 37 + if (!node) 38 + return 0; 30 39 31 40 /* First count the subnodes */ 32 41 pp = NULL; ··· 78 69 79 70 if (!i) { 80 71 of_node_put(pp); 81 - dev_err(dev, "No valid partition found on %s\n", node->full_name); 72 + pr_err("No valid partition found on %s\n", node->full_name); 82 73 kfree(*pparts); 83 74 *pparts = NULL; 84 75 return -EINVAL; ··· 86 77 87 78 return nr_parts; 88 79 } 89 - EXPORT_SYMBOL(of_mtd_parse_partitions); 80 + 81 + static struct mtd_part_parser ofpart_parser = { 82 + .owner = THIS_MODULE, 83 + .parse_fn = parse_ofpart_partitions, 84 + .name = "ofpart", 85 + }; 86 + 87 + static int parse_ofoldpart_partitions(struct mtd_info *master, 88 + struct mtd_partition **pparts, 89 + struct mtd_part_parser_data *data) 90 + { 91 + struct device_node *dp; 92 + int i, plen, nr_parts; 93 + const struct { 94 + __be32 offset, len; 95 + } *part; 96 + const char *names; 97 + 98 + if (!data) 99 + return 0; 100 + 101 + dp = data->of_node; 102 + if (!dp) 103 + return 0; 104 + 105 + part = of_get_property(dp, "partitions", &plen); 106 + if (!part) 107 + return 0; /* No partitions found */ 108 + 109 + pr_warning("Device tree uses obsolete partition map binding: %s\n", 110 + dp->full_name); 111 + 112 + nr_parts = plen / sizeof(part[0]); 113 + 114 + *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); 115 + if (!pparts) 116 + return -ENOMEM; 117 + 118 + names = of_get_property(dp, "partition-names", &plen); 119 + 120 + for (i = 0; i < nr_parts; i++) { 121 + (*pparts)[i].offset = be32_to_cpu(part->offset); 122 + (*pparts)[i].size = be32_to_cpu(part->len) & ~1; 123 + /* bit 0 set signifies read only partition */ 124 + if (be32_to_cpu(part->len) & 1) 125 + (*pparts)[i].mask_flags = MTD_WRITEABLE; 126 + 127 + if (names && (plen > 0)) { 128 + int len = strlen(names) + 1; 129 + 130 + (*pparts)[i].name = (char *)names; 131 + plen -= len; 132 + names += len; 133 + } else { 134 + (*pparts)[i].name = "unnamed"; 135 + } 136 + 137 + part++; 138 + } 139 + 140 + return nr_parts; 141 + } 142 + 143 + static struct mtd_part_parser ofoldpart_parser = { 144 + .owner = THIS_MODULE, 145 + .parse_fn = parse_ofoldpart_partitions, 146 + .name = "ofoldpart", 147 + }; 148 + 149 + static int __init ofpart_parser_init(void) 150 + { 151 + int rc; 152 + rc = register_mtd_parser(&ofpart_parser); 153 + if (rc) 154 + goto out; 155 + 156 + rc = register_mtd_parser(&ofoldpart_parser); 157 + if (!rc) 158 + return 0; 159 + 160 + deregister_mtd_parser(&ofoldpart_parser); 161 + out: 162 + return rc; 163 + } 164 + 165 + module_init(ofpart_parser_init); 90 166 91 167 MODULE_LICENSE("GPL"); 168 + MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree"); 169 + MODULE_AUTHOR("Vitaly Wool, David Gibson"); 170 + /* 171 + * When MTD core cannot find the requested parser, it tries to load the module 172 + * with the same name. Since we provide the ofoldpart parser, we should have 173 + * the corresponding alias. 174 + */ 175 + MODULE_ALIAS("ofoldpart");
+3 -11
drivers/mtd/onenand/generic.c
··· 30 30 */ 31 31 #define DRIVER_NAME "onenand-flash" 32 32 33 - static const char *part_probes[] = { "cmdlinepart", NULL, }; 34 - 35 33 struct onenand_info { 36 34 struct mtd_info mtd; 37 - struct mtd_partition *parts; 38 35 struct onenand_chip onenand; 39 36 }; 40 37 ··· 70 73 goto out_iounmap; 71 74 } 72 75 73 - err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 74 - if (err > 0) 75 - mtd_device_register(&info->mtd, info->parts, err); 76 - else if (err <= 0 && pdata && pdata->parts) 77 - mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); 78 - else 79 - err = mtd_device_register(&info->mtd, NULL, 0); 76 + err = mtd_device_parse_register(&info->mtd, NULL, 0, 77 + pdata ? pdata->parts : NULL, 78 + pdata ? pdata->nr_parts : 0); 80 79 81 80 platform_set_drvdata(pdev, info); 82 81 ··· 97 104 platform_set_drvdata(pdev, NULL); 98 105 99 106 if (info) { 100 - mtd_device_unregister(&info->mtd); 101 107 onenand_release(&info->mtd); 102 108 release_mem_region(res->start, size); 103 109 iounmap(info->onenand.base);
+4 -12
drivers/mtd/onenand/omap2.c
··· 57 57 unsigned long phys_base; 58 58 int gpio_irq; 59 59 struct mtd_info mtd; 60 - struct mtd_partition *parts; 61 60 struct onenand_chip onenand; 62 61 struct completion irq_done; 63 62 struct completion dma_done; ··· 65 66 int (*setup)(void __iomem *base, int *freq_ptr); 66 67 struct regulator *regulator; 67 68 }; 68 - 69 - static const char *part_probes[] = { "cmdlinepart", NULL, }; 70 69 71 70 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 72 71 { ··· 738 741 c->regulator = regulator_get(&pdev->dev, "vonenand"); 739 742 if (IS_ERR(c->regulator)) { 740 743 dev_err(&pdev->dev, "Failed to get regulator\n"); 744 + r = PTR_ERR(c->regulator); 741 745 goto err_release_dma; 742 746 } 743 747 c->onenand.enable = omap2_onenand_enable; ··· 751 753 if ((r = onenand_scan(&c->mtd, 1)) < 0) 752 754 goto err_release_regulator; 753 755 754 - r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 755 - if (r > 0) 756 - r = mtd_device_register(&c->mtd, c->parts, r); 757 - else if (pdata->parts != NULL) 758 - r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts); 759 - else 760 - r = mtd_device_register(&c->mtd, NULL, 0); 756 + r = mtd_device_parse_register(&c->mtd, NULL, 0, 757 + pdata ? pdata->parts : NULL, 758 + pdata ? pdata->nr_parts : 0); 761 759 if (r) 762 760 goto err_release_onenand; 763 761 ··· 780 786 err_free_cs: 781 787 gpmc_cs_free(c->gpmc_cs); 782 788 err_kfree: 783 - kfree(c->parts); 784 789 kfree(c); 785 790 786 791 return r; ··· 802 809 iounmap(c->onenand.base); 803 810 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 804 811 gpmc_cs_free(c->gpmc_cs); 805 - kfree(c->parts); 806 812 kfree(c); 807 813 808 814 return 0;
+65 -49
drivers/mtd/onenand/onenand_base.c
··· 1015 1015 } 1016 1016 1017 1017 /** 1018 - * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer 1018 + * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer 1019 1019 * @param mtd MTD device structure 1020 1020 * @param buf destination address 1021 1021 * @param column oob offset to read from ··· 1079 1079 return status; 1080 1080 1081 1081 /* check if we failed due to uncorrectable error */ 1082 - if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR) 1082 + if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR) 1083 1083 return status; 1084 1084 1085 1085 /* check if address lies in MLC region */ ··· 1122 1122 int ret = 0; 1123 1123 int writesize = this->writesize; 1124 1124 1125 - DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1126 - __func__, (unsigned int) from, (int) len); 1125 + pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1126 + (int)len); 1127 1127 1128 - if (ops->mode == MTD_OOB_AUTO) 1128 + if (ops->mode == MTD_OPS_AUTO_OOB) 1129 1129 oobsize = this->ecclayout->oobavail; 1130 1130 else 1131 1131 oobsize = mtd->oobsize; ··· 1159 1159 if (unlikely(ret)) 1160 1160 ret = onenand_recover_lsb(mtd, from, ret); 1161 1161 onenand_update_bufferram(mtd, from, !ret); 1162 - if (ret == -EBADMSG) 1162 + if (mtd_is_eccerr(ret)) 1163 1163 ret = 0; 1164 1164 if (ret) 1165 1165 break; ··· 1170 1170 thisooblen = oobsize - oobcolumn; 1171 1171 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1172 1172 1173 - if (ops->mode == MTD_OOB_AUTO) 1173 + if (ops->mode == MTD_OPS_AUTO_OOB) 1174 1174 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1175 1175 else 1176 1176 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); ··· 1226 1226 int ret = 0, boundary = 0; 1227 1227 int writesize = this->writesize; 1228 1228 1229 - DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1230 - __func__, (unsigned int) from, (int) len); 1229 + pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1230 + (int)len); 1231 1231 1232 - if (ops->mode == MTD_OOB_AUTO) 1232 + if (ops->mode == MTD_OPS_AUTO_OOB) 1233 1233 oobsize = this->ecclayout->oobavail; 1234 1234 else 1235 1235 oobsize = mtd->oobsize; ··· 1255 1255 this->command(mtd, ONENAND_CMD_READ, from, writesize); 1256 1256 ret = this->wait(mtd, FL_READING); 1257 1257 onenand_update_bufferram(mtd, from, !ret); 1258 - if (ret == -EBADMSG) 1258 + if (mtd_is_eccerr(ret)) 1259 1259 ret = 0; 1260 1260 } 1261 1261 } ··· 1291 1291 thisooblen = oobsize - oobcolumn; 1292 1292 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1293 1293 1294 - if (ops->mode == MTD_OOB_AUTO) 1294 + if (ops->mode == MTD_OPS_AUTO_OOB) 1295 1295 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1296 1296 else 1297 1297 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); ··· 1315 1315 /* Now wait for load */ 1316 1316 ret = this->wait(mtd, FL_READING); 1317 1317 onenand_update_bufferram(mtd, from, !ret); 1318 - if (ret == -EBADMSG) 1318 + if (mtd_is_eccerr(ret)) 1319 1319 ret = 0; 1320 1320 } 1321 1321 ··· 1351 1351 struct mtd_ecc_stats stats; 1352 1352 int read = 0, thislen, column, oobsize; 1353 1353 size_t len = ops->ooblen; 1354 - mtd_oob_mode_t mode = ops->mode; 1354 + unsigned int mode = ops->mode; 1355 1355 u_char *buf = ops->oobbuf; 1356 1356 int ret = 0, readcmd; 1357 1357 1358 1358 from += ops->ooboffs; 1359 1359 1360 - DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1361 - __func__, (unsigned int) from, (int) len); 1360 + pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1361 + (int)len); 1362 1362 1363 1363 /* Initialize return length value */ 1364 1364 ops->oobretlen = 0; 1365 1365 1366 - if (mode == MTD_OOB_AUTO) 1366 + if (mode == MTD_OPS_AUTO_OOB) 1367 1367 oobsize = this->ecclayout->oobavail; 1368 1368 else 1369 1369 oobsize = mtd->oobsize; ··· 1403 1403 if (unlikely(ret)) 1404 1404 ret = onenand_recover_lsb(mtd, from, ret); 1405 1405 1406 - if (ret && ret != -EBADMSG) { 1406 + if (ret && !mtd_is_eccerr(ret)) { 1407 1407 printk(KERN_ERR "%s: read failed = 0x%x\n", 1408 1408 __func__, ret); 1409 1409 break; 1410 1410 } 1411 1411 1412 - if (mode == MTD_OOB_AUTO) 1412 + if (mode == MTD_OPS_AUTO_OOB) 1413 1413 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1414 1414 else 1415 1415 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); ··· 1487 1487 int ret; 1488 1488 1489 1489 switch (ops->mode) { 1490 - case MTD_OOB_PLACE: 1491 - case MTD_OOB_AUTO: 1490 + case MTD_OPS_PLACE_OOB: 1491 + case MTD_OPS_AUTO_OOB: 1492 1492 break; 1493 - case MTD_OOB_RAW: 1493 + case MTD_OPS_RAW: 1494 1494 /* Not implemented yet */ 1495 1495 default: 1496 1496 return -EINVAL; ··· 1576 1576 size_t len = ops->ooblen; 1577 1577 u_char *buf = ops->oobbuf; 1578 1578 1579 - DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n", 1580 - __func__, (unsigned int) from, len); 1579 + pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from, 1580 + len); 1581 1581 1582 1582 /* Initialize return value */ 1583 1583 ops->oobretlen = 0; ··· 1750 1750 /* Wait for any existing operation to clear */ 1751 1751 onenand_panic_wait(mtd); 1752 1752 1753 - DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1754 - __func__, (unsigned int) to, (int) len); 1753 + pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1754 + (int)len); 1755 1755 1756 1756 /* Initialize retlen, in case of early exit */ 1757 1757 *retlen = 0; ··· 1821 1821 } 1822 1822 1823 1823 /** 1824 - * onenand_fill_auto_oob - [Internal] oob auto-placement transfer 1824 + * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer 1825 1825 * @param mtd MTD device structure 1826 1826 * @param oob_buf oob buffer 1827 1827 * @param buf source address ··· 1883 1883 u_char *oobbuf; 1884 1884 int ret = 0, cmd; 1885 1885 1886 - DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1887 - __func__, (unsigned int) to, (int) len); 1886 + pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1887 + (int)len); 1888 1888 1889 1889 /* Initialize retlen, in case of early exit */ 1890 1890 ops->retlen = 0; ··· 1908 1908 if (!len) 1909 1909 return 0; 1910 1910 1911 - if (ops->mode == MTD_OOB_AUTO) 1911 + if (ops->mode == MTD_OPS_AUTO_OOB) 1912 1912 oobsize = this->ecclayout->oobavail; 1913 1913 else 1914 1914 oobsize = mtd->oobsize; ··· 1945 1945 /* We send data to spare ram with oobsize 1946 1946 * to prevent byte access */ 1947 1947 memset(oobbuf, 0xff, mtd->oobsize); 1948 - if (ops->mode == MTD_OOB_AUTO) 1948 + if (ops->mode == MTD_OPS_AUTO_OOB) 1949 1949 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); 1950 1950 else 1951 1951 memcpy(oobbuf + oobcolumn, oob, thisooblen); ··· 2055 2055 2056 2056 2057 2057 /** 2058 - * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band 2058 + * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band 2059 2059 * @param mtd MTD device structure 2060 2060 * @param to offset to write to 2061 2061 * @param len number of bytes to write ··· 2074 2074 u_char *oobbuf; 2075 2075 size_t len = ops->ooblen; 2076 2076 const u_char *buf = ops->oobbuf; 2077 - mtd_oob_mode_t mode = ops->mode; 2077 + unsigned int mode = ops->mode; 2078 2078 2079 2079 to += ops->ooboffs; 2080 2080 2081 - DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 2082 - __func__, (unsigned int) to, (int) len); 2081 + pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 2082 + (int)len); 2083 2083 2084 2084 /* Initialize retlen, in case of early exit */ 2085 2085 ops->oobretlen = 0; 2086 2086 2087 - if (mode == MTD_OOB_AUTO) 2087 + if (mode == MTD_OPS_AUTO_OOB) 2088 2088 oobsize = this->ecclayout->oobavail; 2089 2089 else 2090 2090 oobsize = mtd->oobsize; ··· 2128 2128 /* We send data to spare ram with oobsize 2129 2129 * to prevent byte access */ 2130 2130 memset(oobbuf, 0xff, mtd->oobsize); 2131 - if (mode == MTD_OOB_AUTO) 2131 + if (mode == MTD_OPS_AUTO_OOB) 2132 2132 onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); 2133 2133 else 2134 2134 memcpy(oobbuf + column, buf, thislen); ··· 2217 2217 int ret; 2218 2218 2219 2219 switch (ops->mode) { 2220 - case MTD_OOB_PLACE: 2221 - case MTD_OOB_AUTO: 2220 + case MTD_OPS_PLACE_OOB: 2221 + case MTD_OPS_AUTO_OOB: 2222 2222 break; 2223 - case MTD_OOB_RAW: 2223 + case MTD_OPS_RAW: 2224 2224 /* Not implemented yet */ 2225 2225 default: 2226 2226 return -EINVAL; ··· 2281 2281 } 2282 2282 2283 2283 /** 2284 - * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase 2284 + * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase 2285 2285 * @param mtd MTD device structure 2286 2286 * @param instr erase instruction 2287 2287 * @param region erase region ··· 2397 2397 2398 2398 2399 2399 /** 2400 - * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase 2400 + * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase 2401 2401 * @param mtd MTD device structure 2402 2402 * @param instr erase instruction 2403 2403 * @param region erase region ··· 2489 2489 struct mtd_erase_region_info *region = NULL; 2490 2490 loff_t region_offset = 0; 2491 2491 2492 - DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__, 2493 - (unsigned long long) instr->addr, (unsigned long long) instr->len); 2492 + pr_debug("%s: start=0x%012llx, len=%llu\n", __func__, 2493 + (unsigned long long)instr->addr, 2494 + (unsigned long long)instr->len); 2494 2495 2495 2496 /* Do not allow erase past end of device */ 2496 2497 if (unlikely((len + addr) > mtd->size)) { ··· 2559 2558 */ 2560 2559 static void onenand_sync(struct mtd_info *mtd) 2561 2560 { 2562 - DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2561 + pr_debug("%s: called\n", __func__); 2563 2562 2564 2563 /* Grab the lock and see if the device is available */ 2565 2564 onenand_get_device(mtd, FL_SYNCING); ··· 2603 2602 struct bbm_info *bbm = this->bbm; 2604 2603 u_char buf[2] = {0, 0}; 2605 2604 struct mtd_oob_ops ops = { 2606 - .mode = MTD_OOB_PLACE, 2605 + .mode = MTD_OPS_PLACE_OOB, 2607 2606 .ooblen = 2, 2608 2607 .oobbuf = buf, 2609 2608 .ooboffs = 0, ··· 2923 2922 } 2924 2923 2925 2924 /** 2926 - * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP 2925 + * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP 2927 2926 * @param mtd MTD device structure 2928 2927 * @param to offset to write to 2929 2928 * @param len number of bytes to write ··· 3171 3170 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 3172 3171 this->wait(mtd, FL_RESETING); 3173 3172 } else { 3174 - ops.mode = MTD_OOB_PLACE; 3173 + ops.mode = MTD_OPS_PLACE_OOB; 3175 3174 ops.ooblen = len; 3176 3175 ops.oobbuf = buf; 3177 3176 ops.ooboffs = 0; ··· 3430 3429 else if (numbufs == 1) { 3431 3430 this->options |= ONENAND_HAS_4KB_PAGE; 3432 3431 this->options |= ONENAND_HAS_CACHE_PROGRAM; 3432 + /* 3433 + * There are two different 4KiB pagesize chips 3434 + * and no way to detect it by H/W config values. 3435 + * 3436 + * To detect the correct NOP for each chips, 3437 + * It should check the version ID as workaround. 3438 + * 3439 + * Now it has as following 3440 + * KFM4G16Q4M has NOP 4 with version ID 0x0131 3441 + * KFM4G16Q5M has NOP 1 with versoin ID 0x013e 3442 + */ 3443 + if ((this->version_id & 0xf) == 0xe) 3444 + this->options |= ONENAND_HAS_NOP_1; 3433 3445 } 3434 3446 3435 3447 case ONENAND_DEVICE_DENSITY_2Gb: ··· 3677 3663 int i, ret; 3678 3664 int block; 3679 3665 struct mtd_oob_ops ops = { 3680 - .mode = MTD_OOB_PLACE, 3666 + .mode = MTD_OPS_PLACE_OOB, 3681 3667 .ooboffs = 0, 3682 3668 .ooblen = mtd->oobsize, 3683 3669 .datbuf = NULL, ··· 4068 4054 this->ecclayout = &onenand_oob_128; 4069 4055 mtd->subpage_sft = 2; 4070 4056 } 4057 + if (ONENAND_IS_NOP_1(this)) 4058 + mtd->subpage_sft = 0; 4071 4059 break; 4072 4060 case 64: 4073 4061 this->ecclayout = &onenand_oob_64;
+3 -5
drivers/mtd/onenand/onenand_bbt.c
··· 81 81 startblock = 0; 82 82 from = 0; 83 83 84 - ops.mode = MTD_OOB_PLACE; 84 + ops.mode = MTD_OPS_PLACE_OOB; 85 85 ops.ooblen = readlen; 86 86 ops.oobbuf = buf; 87 87 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ··· 154 154 block = (int) (onenand_block(this, offs) << 1); 155 155 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; 156 156 157 - DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", 157 + pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", 158 158 (unsigned int) offs, block >> 1, res); 159 159 160 160 switch ((int) res) { ··· 189 189 len = this->chipsize >> (this->erase_shift + 2); 190 190 /* Allocate memory (2bit per block) and clear the memory bad block table */ 191 191 bbm->bbt = kzalloc(len, GFP_KERNEL); 192 - if (!bbm->bbt) { 193 - printk(KERN_ERR "onenand_scan_bbt: Out of memory\n"); 192 + if (!bbm->bbt) 194 193 return -ENOMEM; 195 - } 196 194 197 195 /* Set the bad block position */ 198 196 bbm->badblockpos = ONENAND_BADBLOCK_POS;
+3 -10
drivers/mtd/onenand/samsung.c
··· 147 147 struct resource *dma_res; 148 148 unsigned long phys_base; 149 149 struct completion complete; 150 - struct mtd_partition *parts; 151 150 }; 152 151 153 152 #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) ··· 155 156 #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2))) 156 157 157 158 static struct s3c_onenand *onenand; 158 - 159 - static const char *part_probes[] = { "cmdlinepart", NULL, }; 160 159 161 160 static inline int s3c_read_reg(int offset) 162 161 { ··· 1014 1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1015 1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1016 1019 1017 - err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); 1018 - if (err > 0) 1019 - mtd_device_register(mtd, onenand->parts, err); 1020 - else if (err <= 0 && pdata && pdata->parts) 1021 - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 1022 - else 1023 - err = mtd_device_register(mtd, NULL, 0); 1020 + err = mtd_device_parse_register(mtd, NULL, 0, 1021 + pdata ? pdata->parts : NULL, 1022 + pdata ? pdata->nr_parts : 0); 1024 1023 1025 1024 platform_set_drvdata(pdev, mtd); 1026 1025
+9 -7
drivers/mtd/redboot.c
··· 57 57 } 58 58 59 59 static int parse_redboot_partitions(struct mtd_info *master, 60 - struct mtd_partition **pparts, 61 - unsigned long fis_origin) 60 + struct mtd_partition **pparts, 61 + struct mtd_part_parser_data *data) 62 62 { 63 63 int nrparts = 0; 64 64 struct fis_image_desc *buf; ··· 198 198 goto out; 199 199 } 200 200 new_fl->img = &buf[i]; 201 - if (fis_origin) { 202 - buf[i].flash_base -= fis_origin; 203 - } else { 204 - buf[i].flash_base &= master->size-1; 205 - } 201 + if (data && data->origin) 202 + buf[i].flash_base -= data->origin; 203 + else 204 + buf[i].flash_base &= master->size-1; 206 205 207 206 /* I'm sure the JFFS2 code has done me permanent damage. 208 207 * I now think the following is _normal_ ··· 296 297 .parse_fn = parse_redboot_partitions, 297 298 .name = "RedBoot", 298 299 }; 300 + 301 + /* mtd parsers will request the module by parser name */ 302 + MODULE_ALIAS("RedBoot"); 299 303 300 304 static int __init redboot_parser_init(void) 301 305 {
+13 -13
drivers/mtd/sm_ftl.c
··· 34 34 MODULE_PARM_DESC(debug, "Debug level (0-2)"); 35 35 36 36 37 - /* ------------------- sysfs attributtes ---------------------------------- */ 37 + /* ------------------- sysfs attributes ---------------------------------- */ 38 38 struct sm_sysfs_attribute { 39 39 struct device_attribute dev_attr; 40 40 char *data; ··· 138 138 if ((lba[0] & 0xF8) != 0x10) 139 139 return -2; 140 140 141 - /* check parity - endianess doesn't matter */ 141 + /* check parity - endianness doesn't matter */ 142 142 if (hweight16(*(uint16_t *)lba) & 1) 143 143 return -2; 144 144 ··· 147 147 148 148 149 149 /* 150 - * Read LBA asscociated with block 150 + * Read LBA associated with block 151 151 * returns -1, if block is erased 152 152 * returns -2 if error happens 153 153 */ ··· 252 252 return 0; 253 253 } 254 254 255 - /* User might not need the oob, but we do for data vertification */ 255 + /* User might not need the oob, but we do for data verification */ 256 256 if (!oob) 257 257 oob = &tmp_oob; 258 258 259 - ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 259 + ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 260 260 ops.ooboffs = 0; 261 261 ops.ooblen = SM_OOB_SIZE; 262 262 ops.oobbuf = (void *)oob; ··· 276 276 return ret; 277 277 } 278 278 279 - /* Unfortunelly, oob read will _always_ succeed, 279 + /* Unfortunately, oob read will _always_ succeed, 280 280 despite card removal..... */ 281 281 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 282 282 283 283 /* Test for unknown errors */ 284 - if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { 284 + if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) { 285 285 dbg("read of block %d at zone %d, failed due to error (%d)", 286 286 block, zone, ret); 287 287 goto again; ··· 306 306 } 307 307 308 308 /* Test ECC*/ 309 - if (ret == -EBADMSG || 309 + if (mtd_is_eccerr(ret) || 310 310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { 311 311 312 312 dbg("read of block %d at zone %d, failed due to ECC error", ··· 336 336 if (ftl->unstable) 337 337 return -EIO; 338 338 339 - ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 339 + ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 340 340 ops.len = SM_SECTOR_SIZE; 341 341 ops.datbuf = buffer; 342 342 ops.ooboffs = 0; ··· 447 447 448 448 /* We aren't checking the return value, because we don't care */ 449 449 /* This also fails on fake xD cards, but I guess these won't expose 450 - any bad blocks till fail completly */ 450 + any bad blocks till fail completely */ 451 451 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) 452 452 sm_write_sector(ftl, zone, block, boffset, NULL, &oob); 453 453 } 454 454 455 455 /* 456 456 * Erase a block within a zone 457 - * If erase succedes, it updates free block fifo, otherwise marks block as bad 457 + * If erase succeeds, it updates free block fifo, otherwise marks block as bad 458 458 */ 459 459 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 460 460 int put_free) ··· 510 510 complete(&ftl->erase_completion); 511 511 } 512 512 513 - /* Throughtly test that block is valid. */ 513 + /* Thoroughly test that block is valid. */ 514 514 static int sm_check_block(struct sm_ftl *ftl, int zone, int block) 515 515 { 516 516 int boffset; ··· 526 526 for (boffset = 0; boffset < ftl->block_size; 527 527 boffset += SM_SECTOR_SIZE) { 528 528 529 - /* This shoudn't happen anyway */ 529 + /* This shouldn't happen anyway */ 530 530 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) 531 531 return -2; 532 532
+16 -30
drivers/mtd/ssfdc.c
··· 135 135 /* Found */ 136 136 cis_sector = (int)(offset >> SECTOR_SHIFT); 137 137 } else { 138 - DEBUG(MTD_DEBUG_LEVEL1, 139 - "SSFDC_RO: CIS/IDI sector not found" 138 + pr_debug("SSFDC_RO: CIS/IDI sector not found" 140 139 " on %s (mtd%d)\n", mtd->name, 141 140 mtd->index); 142 141 } ··· 169 170 struct mtd_oob_ops ops; 170 171 int ret; 171 172 172 - ops.mode = MTD_OOB_RAW; 173 + ops.mode = MTD_OPS_RAW; 173 174 ops.ooboffs = 0; 174 175 ops.ooblen = OOB_SIZE; 175 176 ops.oobbuf = buf; ··· 220 221 block_address >>= 1; 221 222 222 223 if (get_parity(block_address, 10) != parity) { 223 - DEBUG(MTD_DEBUG_LEVEL0, 224 - "SSFDC_RO: logical address field%d" 224 + pr_debug("SSFDC_RO: logical address field%d" 225 225 "parity error(0x%04X)\n", j+1, 226 226 block_address); 227 227 } else { ··· 233 235 if (!ok) 234 236 block_address = -2; 235 237 236 - DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n", 238 + pr_debug("SSFDC_RO: get_logical_address() %d\n", 237 239 block_address); 238 240 239 241 return block_address; ··· 247 249 int ret, block_address, phys_block; 248 250 struct mtd_info *mtd = ssfdc->mbd.mtd; 249 251 250 - DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n", 252 + pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n", 251 253 ssfdc->map_len, 252 254 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024); 253 255 ··· 260 262 261 263 ret = read_raw_oob(mtd, offset, oob_buf); 262 264 if (ret < 0) { 263 - DEBUG(MTD_DEBUG_LEVEL0, 264 - "SSFDC_RO: mtd read_oob() failed at %lu\n", 265 + pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n", 265 266 offset); 266 267 return -1; 267 268 } ··· 276 279 ssfdc->logic_block_map[block_address] = 277 280 (unsigned short)phys_block; 278 281 279 - DEBUG(MTD_DEBUG_LEVEL2, 280 - "SSFDC_RO: build_block_map() phys_block=%d," 282 + pr_debug("SSFDC_RO: build_block_map() phys_block=%d," 281 283 "logic_block_addr=%d, zone=%d\n", 282 284 phys_block, block_address, zone_index); 283 285 } ··· 300 304 return; 301 305 302 306 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); 303 - if (!ssfdc) { 304 - printk(KERN_WARNING 305 - "SSFDC_RO: out of memory for data structures\n"); 307 + if (!ssfdc) 306 308 return; 307 - } 308 309 309 310 ssfdc->mbd.mtd = mtd; 310 311 ssfdc->mbd.devnum = -1; ··· 312 319 ssfdc->erase_size = mtd->erasesize; 313 320 ssfdc->map_len = (u32)mtd->size / mtd->erasesize; 314 321 315 - DEBUG(MTD_DEBUG_LEVEL1, 316 - "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 322 + pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 317 323 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, 318 324 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); 319 325 ··· 323 331 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / 324 332 ((long)ssfdc->sectors * (long)ssfdc->heads)); 325 333 326 - DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 334 + pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 327 335 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, 328 336 (long)ssfdc->cylinders * (long)ssfdc->heads * 329 337 (long)ssfdc->sectors); ··· 334 342 /* Allocate logical block map */ 335 343 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) * 336 344 ssfdc->map_len, GFP_KERNEL); 337 - if (!ssfdc->logic_block_map) { 338 - printk(KERN_WARNING 339 - "SSFDC_RO: out of memory for data structures\n"); 345 + if (!ssfdc->logic_block_map) 340 346 goto out_err; 341 - } 342 347 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * 343 348 ssfdc->map_len); 344 349 ··· 360 371 { 361 372 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 362 373 363 - DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); 374 + pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); 364 375 365 376 del_mtd_blktrans_dev(dev); 366 377 kfree(ssfdc->logic_block_map); ··· 376 387 offset = (int)(logic_sect_no % sectors_per_block); 377 388 block_address = (int)(logic_sect_no / sectors_per_block); 378 389 379 - DEBUG(MTD_DEBUG_LEVEL3, 380 - "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," 390 + pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," 381 391 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, 382 392 block_address); 383 393 ··· 385 397 386 398 block_address = ssfdc->logic_block_map[block_address]; 387 399 388 - DEBUG(MTD_DEBUG_LEVEL3, 389 - "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", 400 + pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", 390 401 block_address); 391 402 392 403 if (block_address < 0xffff) { ··· 394 407 sect_no = (unsigned long)block_address * sectors_per_block + 395 408 offset; 396 409 397 - DEBUG(MTD_DEBUG_LEVEL3, 398 - "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", 410 + pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", 399 411 sect_no); 400 412 401 413 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0) ··· 410 424 { 411 425 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 412 426 413 - DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", 427 + pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", 414 428 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); 415 429 416 430 geo->heads = ssfdc->heads;
+20 -13
drivers/mtd/tests/mtd_oobtest.c
··· 30 30 31 31 #define PRINT_PREF KERN_INFO "mtd_oobtest: " 32 32 33 - static int dev; 33 + static int dev = -EINVAL; 34 34 module_param(dev, int, S_IRUGO); 35 35 MODULE_PARM_DESC(dev, "MTD device number to use"); 36 36 ··· 131 131 132 132 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 133 133 set_random_data(writebuf, use_len); 134 - ops.mode = MTD_OOB_AUTO; 134 + ops.mode = MTD_OPS_AUTO_OOB; 135 135 ops.len = 0; 136 136 ops.retlen = 0; 137 137 ops.ooblen = use_len; ··· 184 184 185 185 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 186 186 set_random_data(writebuf, use_len); 187 - ops.mode = MTD_OOB_AUTO; 187 + ops.mode = MTD_OPS_AUTO_OOB; 188 188 ops.len = 0; 189 189 ops.retlen = 0; 190 190 ops.ooblen = use_len; ··· 211 211 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { 212 212 int k; 213 213 214 - ops.mode = MTD_OOB_AUTO; 214 + ops.mode = MTD_OPS_AUTO_OOB; 215 215 ops.len = 0; 216 216 ops.retlen = 0; 217 217 ops.ooblen = mtd->ecclayout->oobavail; ··· 276 276 size_t len = mtd->ecclayout->oobavail * pgcnt; 277 277 278 278 set_random_data(writebuf, len); 279 - ops.mode = MTD_OOB_AUTO; 279 + ops.mode = MTD_OPS_AUTO_OOB; 280 280 ops.len = 0; 281 281 ops.retlen = 0; 282 282 ops.ooblen = len; ··· 366 366 367 367 printk(KERN_INFO "\n"); 368 368 printk(KERN_INFO "=================================================\n"); 369 + 370 + if (dev < 0) { 371 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 372 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 373 + return -EINVAL; 374 + } 375 + 369 376 printk(PRINT_PREF "MTD device: %d\n", dev); 370 377 371 378 mtd = get_mtd_device(NULL, dev); ··· 514 507 addr0 += mtd->erasesize; 515 508 516 509 /* Attempt to write off end of OOB */ 517 - ops.mode = MTD_OOB_AUTO; 510 + ops.mode = MTD_OPS_AUTO_OOB; 518 511 ops.len = 0; 519 512 ops.retlen = 0; 520 513 ops.ooblen = 1; ··· 534 527 } 535 528 536 529 /* Attempt to read off end of OOB */ 537 - ops.mode = MTD_OOB_AUTO; 530 + ops.mode = MTD_OPS_AUTO_OOB; 538 531 ops.len = 0; 539 532 ops.retlen = 0; 540 533 ops.ooblen = 1; ··· 558 551 "block is bad\n"); 559 552 else { 560 553 /* Attempt to write off end of device */ 561 - ops.mode = MTD_OOB_AUTO; 554 + ops.mode = MTD_OPS_AUTO_OOB; 562 555 ops.len = 0; 563 556 ops.retlen = 0; 564 557 ops.ooblen = mtd->ecclayout->oobavail + 1; ··· 578 571 } 579 572 580 573 /* Attempt to read off end of device */ 581 - ops.mode = MTD_OOB_AUTO; 574 + ops.mode = MTD_OPS_AUTO_OOB; 582 575 ops.len = 0; 583 576 ops.retlen = 0; 584 577 ops.ooblen = mtd->ecclayout->oobavail + 1; ··· 602 595 goto out; 603 596 604 597 /* Attempt to write off end of device */ 605 - ops.mode = MTD_OOB_AUTO; 598 + ops.mode = MTD_OPS_AUTO_OOB; 606 599 ops.len = 0; 607 600 ops.retlen = 0; 608 601 ops.ooblen = mtd->ecclayout->oobavail; ··· 622 615 } 623 616 624 617 /* Attempt to read off end of device */ 625 - ops.mode = MTD_OOB_AUTO; 618 + ops.mode = MTD_OPS_AUTO_OOB; 626 619 ops.len = 0; 627 620 ops.retlen = 0; 628 621 ops.ooblen = mtd->ecclayout->oobavail; ··· 662 655 addr = (i + 1) * mtd->erasesize - mtd->writesize; 663 656 for (pg = 0; pg < cnt; ++pg) { 664 657 set_random_data(writebuf, sz); 665 - ops.mode = MTD_OOB_AUTO; 658 + ops.mode = MTD_OPS_AUTO_OOB; 666 659 ops.len = 0; 667 660 ops.retlen = 0; 668 661 ops.ooblen = sz; ··· 690 683 continue; 691 684 set_random_data(writebuf, mtd->ecclayout->oobavail * 2); 692 685 addr = (i + 1) * mtd->erasesize - mtd->writesize; 693 - ops.mode = MTD_OOB_AUTO; 686 + ops.mode = MTD_OPS_AUTO_OOB; 694 687 ops.len = 0; 695 688 ops.retlen = 0; 696 689 ops.ooblen = mtd->ecclayout->oobavail * 2;
+22 -15
drivers/mtd/tests/mtd_pagetest.c
··· 30 30 31 31 #define PRINT_PREF KERN_INFO "mtd_pagetest: " 32 32 33 - static int dev; 33 + static int dev = -EINVAL; 34 34 module_param(dev, int, S_IRUGO); 35 35 MODULE_PARM_DESC(dev, "MTD device number to use"); 36 36 ··· 128 128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { 129 129 /* Do a read to set the internal dataRAMs to different data */ 130 130 err = mtd->read(mtd, addr0, bufsize, &read, twopages); 131 - if (err == -EUCLEAN) 131 + if (mtd_is_bitflip(err)) 132 132 err = 0; 133 133 if (err || read != bufsize) { 134 134 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 136 136 return err; 137 137 } 138 138 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 139 - if (err == -EUCLEAN) 139 + if (mtd_is_bitflip(err)) 140 140 err = 0; 141 141 if (err || read != bufsize) { 142 142 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 146 146 memset(twopages, 0, bufsize); 147 147 read = 0; 148 148 err = mtd->read(mtd, addr, bufsize, &read, twopages); 149 - if (err == -EUCLEAN) 149 + if (mtd_is_bitflip(err)) 150 150 err = 0; 151 151 if (err || read != bufsize) { 152 152 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 164 164 unsigned long oldnext = next; 165 165 /* Do a read to set the internal dataRAMs to different data */ 166 166 err = mtd->read(mtd, addr0, bufsize, &read, twopages); 167 - if (err == -EUCLEAN) 167 + if (mtd_is_bitflip(err)) 168 168 err = 0; 169 169 if (err || read != bufsize) { 170 170 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 172 172 return err; 173 173 } 174 174 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 175 - if (err == -EUCLEAN) 175 + if (mtd_is_bitflip(err)) 176 176 err = 0; 177 177 if (err || read != bufsize) { 178 178 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 182 182 memset(twopages, 0, bufsize); 183 183 read = 0; 184 184 err = mtd->read(mtd, addr, bufsize, &read, twopages); 185 - if (err == -EUCLEAN) 185 + if (mtd_is_bitflip(err)) 186 186 err = 0; 187 187 if (err || read != bufsize) { 188 188 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 231 231 read = 0; 232 232 addr = addrn - pgsize - pgsize; 233 233 err = mtd->read(mtd, addr, pgsize, &read, pp1); 234 - if (err == -EUCLEAN) 234 + if (mtd_is_bitflip(err)) 235 235 err = 0; 236 236 if (err || read != pgsize) { 237 237 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 244 244 read = 0; 245 245 addr = addrn - pgsize - pgsize - pgsize; 246 246 err = mtd->read(mtd, addr, pgsize, &read, pp1); 247 - if (err == -EUCLEAN) 247 + if (mtd_is_bitflip(err)) 248 248 err = 0; 249 249 if (err || read != pgsize) { 250 250 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 258 258 addr = addr0; 259 259 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 260 260 err = mtd->read(mtd, addr, pgsize, &read, pp2); 261 - if (err == -EUCLEAN) 261 + if (mtd_is_bitflip(err)) 262 262 err = 0; 263 263 if (err || read != pgsize) { 264 264 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 272 272 addr = addrn - pgsize; 273 273 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 274 274 err = mtd->read(mtd, addr, pgsize, &read, pp3); 275 - if (err == -EUCLEAN) 275 + if (mtd_is_bitflip(err)) 276 276 err = 0; 277 277 if (err || read != pgsize) { 278 278 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 286 286 addr = addr0; 287 287 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 288 288 err = mtd->read(mtd, addr, pgsize, &read, pp4); 289 - if (err == -EUCLEAN) 289 + if (mtd_is_bitflip(err)) 290 290 err = 0; 291 291 if (err || read != pgsize) { 292 292 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 345 345 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 346 346 memset(readbuf, 0, pgsize); 347 347 err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 348 - if (err == -EUCLEAN) 348 + if (mtd_is_bitflip(err)) 349 349 err = 0; 350 350 if (err || read != pgsize) { 351 351 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 383 383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 384 384 memset(readbuf, 0, pgsize); 385 385 err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 386 - if (err == -EUCLEAN) 386 + if (mtd_is_bitflip(err)) 387 387 err = 0; 388 388 if (err || read != pgsize) { 389 389 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 439 439 440 440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 441 441 err = mtd->read(mtd, addr0, pgsize, &read, twopages); 442 - if (err == -EUCLEAN) 442 + if (mtd_is_bitflip(err)) 443 443 err = 0; 444 444 if (err || read != pgsize) { 445 445 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 504 504 505 505 printk(KERN_INFO "\n"); 506 506 printk(KERN_INFO "=================================================\n"); 507 + 508 + if (dev < 0) { 509 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 510 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 511 + return -EINVAL; 512 + } 513 + 507 514 printk(PRINT_PREF "MTD device: %d\n", dev); 508 515 509 516 mtd = get_mtd_device(NULL, dev);
+10 -3
drivers/mtd/tests/mtd_readtest.c
··· 29 29 30 30 #define PRINT_PREF KERN_INFO "mtd_readtest: " 31 31 32 - static int dev; 32 + static int dev = -EINVAL; 33 33 module_param(dev, int, S_IRUGO); 34 34 MODULE_PARM_DESC(dev, "MTD device number to use"); 35 35 ··· 66 66 if (mtd->oobsize) { 67 67 struct mtd_oob_ops ops; 68 68 69 - ops.mode = MTD_OOB_PLACE; 69 + ops.mode = MTD_OPS_PLACE_OOB; 70 70 ops.len = 0; 71 71 ops.retlen = 0; 72 72 ops.ooblen = mtd->oobsize; ··· 75 75 ops.datbuf = NULL; 76 76 ops.oobbuf = oobbuf; 77 77 ret = mtd->read_oob(mtd, addr, &ops); 78 - if (ret || ops.oobretlen != mtd->oobsize) { 78 + if ((ret && !mtd_is_bitflip(ret)) || 79 + ops.oobretlen != mtd->oobsize) { 79 80 printk(PRINT_PREF "error: read oob failed at " 80 81 "%#llx\n", (long long)addr); 81 82 if (!err) ··· 170 169 171 170 printk(KERN_INFO "\n"); 172 171 printk(KERN_INFO "=================================================\n"); 172 + 173 + if (dev < 0) { 174 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 175 + return -EINVAL; 176 + } 177 + 173 178 printk(PRINT_PREF "MTD device: %d\n", dev); 174 179 175 180 mtd = get_mtd_device(NULL, dev);
+12 -5
drivers/mtd/tests/mtd_speedtest.c
··· 29 29 30 30 #define PRINT_PREF KERN_INFO "mtd_speedtest: " 31 31 32 - static int dev; 32 + static int dev = -EINVAL; 33 33 module_param(dev, int, S_IRUGO); 34 34 MODULE_PARM_DESC(dev, "MTD device number to use"); 35 35 ··· 216 216 217 217 err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); 218 218 /* Ignore corrected ECC errors */ 219 - if (err == -EUCLEAN) 219 + if (mtd_is_bitflip(err)) 220 220 err = 0; 221 221 if (err || read != mtd->erasesize) { 222 222 printk(PRINT_PREF "error: read failed at %#llx\n", addr); ··· 237 237 for (i = 0; i < pgcnt; i++) { 238 238 err = mtd->read(mtd, addr, pgsize, &read, buf); 239 239 /* Ignore corrected ECC errors */ 240 - if (err == -EUCLEAN) 240 + if (mtd_is_bitflip(err)) 241 241 err = 0; 242 242 if (err || read != pgsize) { 243 243 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 263 263 for (i = 0; i < n; i++) { 264 264 err = mtd->read(mtd, addr, sz, &read, buf); 265 265 /* Ignore corrected ECC errors */ 266 - if (err == -EUCLEAN) 266 + if (mtd_is_bitflip(err)) 267 267 err = 0; 268 268 if (err || read != sz) { 269 269 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 278 278 if (pgcnt % 2) { 279 279 err = mtd->read(mtd, addr, pgsize, &read, buf); 280 280 /* Ignore corrected ECC errors */ 281 - if (err == -EUCLEAN) 281 + if (mtd_is_bitflip(err)) 282 282 err = 0; 283 283 if (err || read != pgsize) { 284 284 printk(PRINT_PREF "error: read failed at %#llx\n", ··· 361 361 362 362 printk(KERN_INFO "\n"); 363 363 printk(KERN_INFO "=================================================\n"); 364 + 365 + if (dev < 0) { 366 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 367 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 368 + return -EINVAL; 369 + } 370 + 364 371 if (count) 365 372 printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); 366 373 else
+9 -2
drivers/mtd/tests/mtd_stresstest.c
··· 30 30 31 31 #define PRINT_PREF KERN_INFO "mtd_stresstest: " 32 32 33 - static int dev; 33 + static int dev = -EINVAL; 34 34 module_param(dev, int, S_IRUGO); 35 35 MODULE_PARM_DESC(dev, "MTD device number to use"); 36 36 ··· 154 154 } 155 155 addr = eb * mtd->erasesize + offs; 156 156 err = mtd->read(mtd, addr, len, &read, readbuf); 157 - if (err == -EUCLEAN) 157 + if (mtd_is_bitflip(err)) 158 158 err = 0; 159 159 if (unlikely(err || read != len)) { 160 160 printk(PRINT_PREF "error: read failed at 0x%llx\n", ··· 250 250 251 251 printk(KERN_INFO "\n"); 252 252 printk(KERN_INFO "=================================================\n"); 253 + 254 + if (dev < 0) { 255 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 256 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 257 + return -EINVAL; 258 + } 259 + 253 260 printk(PRINT_PREF "MTD device: %d\n", dev); 254 261 255 262 mtd = get_mtd_device(NULL, dev);
+12 -5
drivers/mtd/tests/mtd_subpagetest.c
··· 29 29 30 30 #define PRINT_PREF KERN_INFO "mtd_subpagetest: " 31 31 32 - static int dev; 32 + static int dev = -EINVAL; 33 33 module_param(dev, int, S_IRUGO); 34 34 MODULE_PARM_DESC(dev, "MTD device number to use"); 35 35 ··· 198 198 read = 0; 199 199 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 200 200 if (unlikely(err || read != subpgsize)) { 201 - if (err == -EUCLEAN && read == subpgsize) { 201 + if (mtd_is_bitflip(err) && read == subpgsize) { 202 202 printk(PRINT_PREF "ECC correction at %#llx\n", 203 203 (long long)addr); 204 204 err = 0; ··· 226 226 read = 0; 227 227 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 228 228 if (unlikely(err || read != subpgsize)) { 229 - if (err == -EUCLEAN && read == subpgsize) { 229 + if (mtd_is_bitflip(err) && read == subpgsize) { 230 230 printk(PRINT_PREF "ECC correction at %#llx\n", 231 231 (long long)addr); 232 232 err = 0; ··· 264 264 read = 0; 265 265 err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); 266 266 if (unlikely(err || read != subpgsize * k)) { 267 - if (err == -EUCLEAN && read == subpgsize * k) { 267 + if (mtd_is_bitflip(err) && read == subpgsize * k) { 268 268 printk(PRINT_PREF "ECC correction at %#llx\n", 269 269 (long long)addr); 270 270 err = 0; ··· 298 298 read = 0; 299 299 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 300 300 if (unlikely(err || read != subpgsize)) { 301 - if (err == -EUCLEAN && read == subpgsize) { 301 + if (mtd_is_bitflip(err) && read == subpgsize) { 302 302 printk(PRINT_PREF "ECC correction at %#llx\n", 303 303 (long long)addr); 304 304 err = 0; ··· 379 379 380 380 printk(KERN_INFO "\n"); 381 381 printk(KERN_INFO "=================================================\n"); 382 + 383 + if (dev < 0) { 384 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 385 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 386 + return -EINVAL; 387 + } 388 + 382 389 printk(PRINT_PREF "MTD device: %d\n", dev); 383 390 384 391 mtd = get_mtd_device(NULL, dev);
+9 -2
drivers/mtd/tests/mtd_torturetest.c
··· 46 46 module_param(pgcnt, int, S_IRUGO); 47 47 MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); 48 48 49 - static int dev; 49 + static int dev = -EINVAL; 50 50 module_param(dev, int, S_IRUGO); 51 51 MODULE_PARM_DESC(dev, "MTD device number to use"); 52 52 ··· 138 138 139 139 retry: 140 140 err = mtd->read(mtd, addr, len, &read, check_buf); 141 - if (err == -EUCLEAN) 141 + if (mtd_is_bitflip(err)) 142 142 printk(PRINT_PREF "single bit flip occurred at EB %d " 143 143 "MTD reported that it was fixed.\n", ebnum); 144 144 else if (err) { ··· 213 213 printk(KERN_INFO "=================================================\n"); 214 214 printk(PRINT_PREF "Warning: this program is trying to wear out your " 215 215 "flash, stop it if this is not wanted.\n"); 216 + 217 + if (dev < 0) { 218 + printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 219 + printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 220 + return -EINVAL; 221 + } 222 + 216 223 printk(PRINT_PREF "MTD device: %d\n", dev); 217 224 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", 218 225 ebcnt, eb, eb + ebcnt - 1, dev);
+1 -1
drivers/mtd/ubi/eba.c
··· 443 443 if (err == UBI_IO_BITFLIPS) { 444 444 scrub = 1; 445 445 err = 0; 446 - } else if (err == -EBADMSG) { 446 + } else if (mtd_is_eccerr(err)) { 447 447 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 448 448 goto out_unlock; 449 449 scrub = 1;
+12 -12
drivers/mtd/ubi/io.c
··· 172 172 retry: 173 173 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 174 174 if (err) { 175 - const char *errstr = (err == -EBADMSG) ? " (ECC error)" : ""; 175 + const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; 176 176 177 - if (err == -EUCLEAN) { 177 + if (mtd_is_bitflip(err)) { 178 178 /* 179 179 * -EUCLEAN is reported if there was a bit-flip which 180 180 * was corrected, so this is harmless. ··· 205 205 * all the requested data. But some buggy drivers might do 206 206 * this, so we change it to -EIO. 207 207 */ 208 - if (read != len && err == -EBADMSG) { 208 + if (read != len && mtd_is_eccerr(err)) { 209 209 ubi_assert(0); 210 210 err = -EIO; 211 211 } ··· 469 469 470 470 out: 471 471 mutex_unlock(&ubi->buf_mutex); 472 - if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { 472 + if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { 473 473 /* 474 474 * If a bit-flip or data integrity error was detected, the test 475 475 * has not passed because it happened on a freshly erased ··· 760 760 761 761 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 762 762 if (read_err) { 763 - if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) 763 + if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) 764 764 return read_err; 765 765 766 766 /* ··· 776 776 777 777 magic = be32_to_cpu(ec_hdr->magic); 778 778 if (magic != UBI_EC_HDR_MAGIC) { 779 - if (read_err == -EBADMSG) 779 + if (mtd_is_eccerr(read_err)) 780 780 return UBI_IO_BAD_HDR_EBADMSG; 781 781 782 782 /* ··· 1032 1032 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1033 1033 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1034 1034 ubi->vid_hdr_alsize); 1035 - if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) 1035 + if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) 1036 1036 return read_err; 1037 1037 1038 1038 magic = be32_to_cpu(vid_hdr->magic); 1039 1039 if (magic != UBI_VID_HDR_MAGIC) { 1040 - if (read_err == -EBADMSG) 1040 + if (mtd_is_eccerr(read_err)) 1041 1041 return UBI_IO_BAD_HDR_EBADMSG; 1042 1042 1043 1043 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { ··· 1219 1219 return -ENOMEM; 1220 1220 1221 1221 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 1222 - if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 1222 + if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1223 1223 goto exit; 1224 1224 1225 1225 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); ··· 1306 1306 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1307 1307 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1308 1308 ubi->vid_hdr_alsize); 1309 - if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 1309 + if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1310 1310 goto exit; 1311 1311 1312 1312 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); ··· 1358 1358 } 1359 1359 1360 1360 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); 1361 - if (err && err != -EUCLEAN) 1361 + if (err && !mtd_is_bitflip(err)) 1362 1362 goto out_free; 1363 1363 1364 1364 for (i = 0; i < len; i++) { ··· 1422 1422 } 1423 1423 1424 1424 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 1425 - if (err && err != -EUCLEAN) { 1425 + if (err && !mtd_is_bitflip(err)) { 1426 1426 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 1427 1427 "read %zd bytes", err, len, pnum, offset, read); 1428 1428 goto error;
+1 -1
drivers/mtd/ubi/kapi.c
··· 410 410 return 0; 411 411 412 412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 413 - if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 413 + if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { 414 414 ubi_warn("mark volume %d as corrupted", vol_id); 415 415 vol->corrupted = 1; 416 416 }
+1 -1
drivers/mtd/ubi/misc.c
··· 81 81 82 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); 83 83 if (err) { 84 - if (err == -EBADMSG) 84 + if (mtd_is_eccerr(err)) 85 85 err = 1; 86 86 break; 87 87 }
+2 -2
drivers/mtd/ubi/scan.c
··· 395 395 } 396 396 397 397 err = ubi_io_read_data(ubi, buf, pnum, 0, len); 398 - if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 398 + if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 399 399 goto out_free_buf; 400 400 401 401 data_crc = be32_to_cpu(vid_hdr->data_crc); ··· 793 793 794 794 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start, 795 795 ubi->leb_size); 796 - if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { 796 + if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { 797 797 /* 798 798 * Bit-flips or integrity errors while reading the data area. 799 799 * It is difficult to say for sure what type of corruption is
+1 -1
drivers/mtd/ubi/vtbl.c
··· 423 423 424 424 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 425 425 ubi->vtbl_size); 426 - if (err == UBI_IO_BITFLIPS || err == -EBADMSG) 426 + if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) 427 427 /* 428 428 * Scrub the PEB later. Note, -EBADMSG indicates an 429 429 * uncorrectable ECC error, but we have our own CRC and
+3 -3
drivers/staging/spectra/lld_mtd.c
··· 340 340 struct mtd_oob_ops ops; 341 341 int ret; 342 342 343 - ops.mode = MTD_OOB_AUTO; 343 + ops.mode = MTD_OPS_AUTO_OOB; 344 344 ops.datbuf = read_data; 345 345 ops.len = DeviceInfo.wPageDataSize; 346 346 ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; ··· 400 400 struct mtd_oob_ops ops; 401 401 int ret; 402 402 403 - ops.mode = MTD_OOB_AUTO; 403 + ops.mode = MTD_OPS_AUTO_OOB; 404 404 ops.datbuf = write_data; 405 405 ops.len = DeviceInfo.wPageDataSize; 406 406 ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; ··· 473 473 struct mtd_oob_ops ops; 474 474 int ret; 475 475 476 - ops.mode = MTD_OOB_AUTO; 476 + ops.mode = MTD_OPS_AUTO_OOB; 477 477 ops.datbuf = NULL; 478 478 ops.len = 0; 479 479 ops.oobbuf = read_data;
+91 -37
fs/jffs2/compr.c
··· 53 53 return 0; 54 54 } 55 55 56 + /* 57 + * jffs2_selected_compress: 58 + * @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB). 59 + * If 0, just take the first available compression mode. 60 + * @data_in: Pointer to uncompressed data 61 + * @cpage_out: Pointer to returned pointer to buffer for compressed data 62 + * @datalen: On entry, holds the amount of data available for compression. 63 + * On exit, expected to hold the amount of data actually compressed. 64 + * @cdatalen: On entry, holds the amount of space available for compressed 65 + * data. On exit, expected to hold the actual size of the compressed 66 + * data. 67 + * 68 + * Returns: the compression type used. Zero is used to show that the data 69 + * could not be compressed; probably because we couldn't find the requested 70 + * compression mode. 71 + */ 72 + static int jffs2_selected_compress(u8 compr, unsigned char *data_in, 73 + unsigned char **cpage_out, u32 *datalen, u32 *cdatalen) 74 + { 75 + struct jffs2_compressor *this; 76 + int err, ret = JFFS2_COMPR_NONE; 77 + uint32_t orig_slen, orig_dlen; 78 + char *output_buf; 79 + 80 + output_buf = kmalloc(*cdatalen, GFP_KERNEL); 81 + if (!output_buf) { 82 + printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); 83 + return ret; 84 + } 85 + orig_slen = *datalen; 86 + orig_dlen = *cdatalen; 87 + spin_lock(&jffs2_compressor_list_lock); 88 + list_for_each_entry(this, &jffs2_compressor_list, list) { 89 + /* Skip decompress-only and disabled modules */ 90 + if (!this->compress || this->disabled) 91 + continue; 92 + 93 + /* Skip if not the desired compression type */ 94 + if (compr && (compr != this->compr)) 95 + continue; 96 + 97 + /* 98 + * Either compression type was unspecified, or we found our 99 + * compressor; either way, we're good to go. 100 + */ 101 + this->usecount++; 102 + spin_unlock(&jffs2_compressor_list_lock); 103 + 104 + *datalen = orig_slen; 105 + *cdatalen = orig_dlen; 106 + err = this->compress(data_in, output_buf, datalen, cdatalen); 107 + 108 + spin_lock(&jffs2_compressor_list_lock); 109 + this->usecount--; 110 + if (!err) { 111 + /* Success */ 112 + ret = this->compr; 113 + this->stat_compr_blocks++; 114 + this->stat_compr_orig_size += *datalen; 115 + this->stat_compr_new_size += *cdatalen; 116 + break; 117 + } 118 + } 119 + spin_unlock(&jffs2_compressor_list_lock); 120 + if (ret == JFFS2_COMPR_NONE) 121 + kfree(output_buf); 122 + else 123 + *cpage_out = output_buf; 124 + 125 + return ret; 126 + } 127 + 56 128 /* jffs2_compress: 57 129 * @data_in: Pointer to uncompressed data 58 130 * @cpage_out: Pointer to returned pointer to buffer for compressed data ··· 148 76 uint32_t *datalen, uint32_t *cdatalen) 149 77 { 150 78 int ret = JFFS2_COMPR_NONE; 151 - int compr_ret; 79 + int mode, compr_ret; 152 80 struct jffs2_compressor *this, *best=NULL; 153 81 unsigned char *output_buf = NULL, *tmp_buf; 154 82 uint32_t orig_slen, orig_dlen; 155 83 uint32_t best_slen=0, best_dlen=0; 156 84 157 - switch (jffs2_compression_mode) { 85 + if (c->mount_opts.override_compr) 86 + mode = c->mount_opts.compr; 87 + else 88 + mode = jffs2_compression_mode; 89 + 90 + switch (mode) { 158 91 case JFFS2_COMPR_MODE_NONE: 159 92 break; 160 93 case JFFS2_COMPR_MODE_PRIORITY: 161 - output_buf = kmalloc(*cdatalen,GFP_KERNEL); 162 - if (!output_buf) { 163 - printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); 164 - goto out; 165 - } 166 - orig_slen = *datalen; 167 - orig_dlen = *cdatalen; 168 - spin_lock(&jffs2_compressor_list_lock); 169 - list_for_each_entry(this, &jffs2_compressor_list, list) { 170 - /* Skip decompress-only backwards-compatibility and disabled modules */ 171 - if ((!this->compress)||(this->disabled)) 172 - continue; 173 - 174 - this->usecount++; 175 - spin_unlock(&jffs2_compressor_list_lock); 176 - *datalen = orig_slen; 177 - *cdatalen = orig_dlen; 178 - compr_ret = this->compress(data_in, output_buf, datalen, cdatalen); 179 - spin_lock(&jffs2_compressor_list_lock); 180 - this->usecount--; 181 - if (!compr_ret) { 182 - ret = this->compr; 183 - this->stat_compr_blocks++; 184 - this->stat_compr_orig_size += *datalen; 185 - this->stat_compr_new_size += *cdatalen; 186 - break; 187 - } 188 - } 189 - spin_unlock(&jffs2_compressor_list_lock); 190 - if (ret == JFFS2_COMPR_NONE) 191 - kfree(output_buf); 94 + ret = jffs2_selected_compress(0, data_in, cpage_out, datalen, 95 + cdatalen); 192 96 break; 193 97 case JFFS2_COMPR_MODE_SIZE: 194 98 case JFFS2_COMPR_MODE_FAVOURLZO: ··· 222 174 best->stat_compr_orig_size += best_slen; 223 175 best->stat_compr_new_size += best_dlen; 224 176 ret = best->compr; 177 + *cpage_out = output_buf; 225 178 } 226 179 spin_unlock(&jffs2_compressor_list_lock); 180 + break; 181 + case JFFS2_COMPR_MODE_FORCELZO: 182 + ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in, 183 + cpage_out, datalen, cdatalen); 184 + break; 185 + case JFFS2_COMPR_MODE_FORCEZLIB: 186 + ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in, 187 + cpage_out, datalen, cdatalen); 227 188 break; 228 189 default: 229 190 printk(KERN_ERR "JFFS2: unknown compression mode.\n"); 230 191 } 231 - out: 192 + 232 193 if (ret == JFFS2_COMPR_NONE) { 233 194 *cpage_out = data_in; 234 195 *datalen = *cdatalen; 235 196 none_stat_compr_blocks++; 236 197 none_stat_compr_size += *datalen; 237 - } 238 - else { 239 - *cpage_out = output_buf; 240 198 } 241 199 return ret; 242 200 }
+2
fs/jffs2/compr.h
··· 40 40 #define JFFS2_COMPR_MODE_PRIORITY 1 41 41 #define JFFS2_COMPR_MODE_SIZE 2 42 42 #define JFFS2_COMPR_MODE_FAVOURLZO 3 43 + #define JFFS2_COMPR_MODE_FORCELZO 4 44 + #define JFFS2_COMPR_MODE_FORCEZLIB 5 43 45 44 46 #define FAVOUR_LZO_PERCENT 80 45 47
+1 -1
fs/jffs2/fs.c
··· 379 379 jffs2_do_setattr(inode, &iattr); 380 380 } 381 381 382 - int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) 382 + int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data) 383 383 { 384 384 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 385 385
+6
fs/jffs2/jffs2_fs_sb.h
··· 29 29 30 30 struct jffs2_inodirty; 31 31 32 + struct jffs2_mount_opts { 33 + bool override_compr; 34 + unsigned int compr; 35 + }; 36 + 32 37 /* A struct for the overall file system control. Pointers to 33 38 jffs2_sb_info structs are named `c' in the source code. 34 39 Nee jffs_control ··· 131 126 #endif 132 127 133 128 struct jffs2_summary *summary; /* Summary information */ 129 + struct jffs2_mount_opts mount_opts; 134 130 135 131 #ifdef CONFIG_JFFS2_FS_XATTR 136 132 #define XATTRINDEX_HASHSIZE (57)
+1 -1
fs/jffs2/os-linux.h
··· 176 176 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, 177 177 struct jffs2_raw_inode *ri); 178 178 int jffs2_statfs (struct dentry *, struct kstatfs *); 179 - int jffs2_remount_fs (struct super_block *, int *, char *); 179 + int jffs2_do_remount_fs(struct super_block *, int *, char *); 180 180 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent); 181 181 void jffs2_gc_release_inode(struct jffs2_sb_info *c, 182 182 struct jffs2_inode_info *f);
+1 -3
fs/jffs2/scan.c
··· 275 275 else 276 276 c->mtd->unpoint(c->mtd, 0, c->mtd->size); 277 277 #endif 278 - if (s) 279 - kfree(s); 280 - 278 + kfree(s); 281 279 return ret; 282 280 } 283 281
+119
fs/jffs2/super.c
··· 17 17 #include <linux/fs.h> 18 18 #include <linux/err.h> 19 19 #include <linux/mount.h> 20 + #include <linux/parser.h> 20 21 #include <linux/jffs2.h> 21 22 #include <linux/pagemap.h> 22 23 #include <linux/mtd/super.h> 23 24 #include <linux/ctype.h> 24 25 #include <linux/namei.h> 26 + #include <linux/seq_file.h> 25 27 #include <linux/exportfs.h> 26 28 #include "compr.h" 27 29 #include "nodelist.h" ··· 75 73 } 76 74 77 75 unlock_super(sb); 76 + } 77 + 78 + static const char *jffs2_compr_name(unsigned int compr) 79 + { 80 + switch (compr) { 81 + case JFFS2_COMPR_MODE_NONE: 82 + return "none"; 83 + #ifdef CONFIG_JFFS2_LZO 84 + case JFFS2_COMPR_MODE_FORCELZO: 85 + return "lzo"; 86 + #endif 87 + #ifdef CONFIG_JFFS2_ZLIB 88 + case JFFS2_COMPR_MODE_FORCEZLIB: 89 + return "zlib"; 90 + #endif 91 + default: 92 + /* should never happen; programmer error */ 93 + WARN_ON(1); 94 + return ""; 95 + } 96 + } 97 + 98 + static int jffs2_show_options(struct seq_file *s, struct vfsmount *mnt) 99 + { 100 + struct jffs2_sb_info *c = JFFS2_SB_INFO(mnt->mnt_sb); 101 + struct jffs2_mount_opts *opts = &c->mount_opts; 102 + 103 + if (opts->override_compr) 104 + seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); 105 + 106 + return 0; 78 107 } 79 108 80 109 static int jffs2_sync_fs(struct super_block *sb, int wait) ··· 166 133 .fh_to_parent = jffs2_fh_to_parent, 167 134 }; 168 135 136 + /* 137 + * JFFS2 mount options. 138 + * 139 + * Opt_override_compr: override default compressor 140 + * Opt_err: just end of array marker 141 + */ 142 + enum { 143 + Opt_override_compr, 144 + Opt_err, 145 + }; 146 + 147 + static const match_table_t tokens = { 148 + {Opt_override_compr, "compr=%s"}, 149 + {Opt_err, NULL}, 150 + }; 151 + 152 + static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) 153 + { 154 + substring_t args[MAX_OPT_ARGS]; 155 + char *p, *name; 156 + 157 + if (!data) 158 + return 0; 159 + 160 + while ((p = strsep(&data, ","))) { 161 + int token; 162 + 163 + if (!*p) 164 + continue; 165 + 166 + token = match_token(p, tokens, args); 167 + switch (token) { 168 + case Opt_override_compr: 169 + name = match_strdup(&args[0]); 170 + 171 + if (!name) 172 + return -ENOMEM; 173 + if (!strcmp(name, "none")) 174 + c->mount_opts.compr = JFFS2_COMPR_MODE_NONE; 175 + #ifdef CONFIG_JFFS2_LZO 176 + else if (!strcmp(name, "lzo")) 177 + c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO; 178 + #endif 179 + #ifdef CONFIG_JFFS2_ZLIB 180 + else if (!strcmp(name, "zlib")) 181 + c->mount_opts.compr = 182 + JFFS2_COMPR_MODE_FORCEZLIB; 183 + #endif 184 + else { 185 + printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"", 186 + name); 187 + kfree(name); 188 + return -EINVAL; 189 + } 190 + kfree(name); 191 + c->mount_opts.override_compr = true; 192 + break; 193 + default: 194 + printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n", 195 + p); 196 + return -EINVAL; 197 + } 198 + } 199 + 200 + return 0; 201 + } 202 + 203 + static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data) 204 + { 205 + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 206 + int err; 207 + 208 + err = jffs2_parse_options(c, data); 209 + if (err) 210 + return -EINVAL; 211 + 212 + return jffs2_do_remount_fs(sb, flags, data); 213 + } 214 + 169 215 static const struct super_operations jffs2_super_operations = 170 216 { 171 217 .alloc_inode = jffs2_alloc_inode, ··· 255 143 .remount_fs = jffs2_remount_fs, 256 144 .evict_inode = jffs2_evict_inode, 257 145 .dirty_inode = jffs2_dirty_inode, 146 + .show_options = jffs2_show_options, 258 147 .sync_fs = jffs2_sync_fs, 259 148 }; 260 149 ··· 278 165 c->mtd = sb->s_mtd; 279 166 c->os_priv = sb; 280 167 sb->s_fs_info = c; 168 + 169 + ret = jffs2_parse_options(c, data); 170 + if (ret) { 171 + kfree(c); 172 + return -EINVAL; 173 + } 281 174 282 175 /* Initialize JFFS2 superblock locks, the further initialization will 283 176 * be done later */
+4 -5
fs/jffs2/wbuf.c
··· 578 578 if (!jffs2_is_writebuffered(c)) 579 579 return 0; 580 580 581 - if (mutex_trylock(&c->alloc_sem)) { 582 - mutex_unlock(&c->alloc_sem); 581 + if (!mutex_is_locked(&c->alloc_sem)) { 583 582 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); 584 583 BUG(); 585 584 } ··· 1025 1026 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1026 1027 struct mtd_oob_ops ops; 1027 1028 1028 - ops.mode = MTD_OOB_AUTO; 1029 + ops.mode = MTD_OPS_AUTO_OOB; 1029 1030 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; 1030 1031 ops.oobbuf = c->oobbuf; 1031 1032 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ··· 1068 1069 struct mtd_oob_ops ops; 1069 1070 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1070 1071 1071 - ops.mode = MTD_OOB_AUTO; 1072 + ops.mode = MTD_OPS_AUTO_OOB; 1072 1073 ops.ooblen = cmlen; 1073 1074 ops.oobbuf = c->oobbuf; 1074 1075 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ··· 1094 1095 struct mtd_oob_ops ops; 1095 1096 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1096 1097 1097 - ops.mode = MTD_OOB_AUTO; 1098 + ops.mode = MTD_OPS_AUTO_OOB; 1098 1099 ops.ooblen = cmlen; 1099 1100 ops.oobbuf = (uint8_t *)&oob_cleanmarker; 1100 1101 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+27 -12
include/linux/mtd/bbm.h
··· 86 86 #define NAND_BBT_VERSION 0x00000100 87 87 /* Create a bbt if none exists */ 88 88 #define NAND_BBT_CREATE 0x00000200 89 + /* 90 + * Create an empty BBT with no vendor information. Vendor's information may be 91 + * unavailable, for example, if the NAND controller has a different data and OOB 92 + * layout or if this information is already purged. Must be used in conjunction 93 + * with NAND_BBT_CREATE. 94 + */ 95 + #define NAND_BBT_CREATE_EMPTY 0x00000400 89 96 /* Search good / bad pattern through all pages of a block */ 90 - #define NAND_BBT_SCANALLPAGES 0x00000400 97 + #define NAND_BBT_SCANALLPAGES 0x00000800 91 98 /* Scan block empty during good / bad block scan */ 92 - #define NAND_BBT_SCANEMPTY 0x00000800 99 + #define NAND_BBT_SCANEMPTY 0x00001000 93 100 /* Write bbt if neccecary */ 94 - #define NAND_BBT_WRITE 0x00001000 101 + #define NAND_BBT_WRITE 0x00002000 95 102 /* Read and write back block contents when writing bbt */ 96 - #define NAND_BBT_SAVECONTENT 0x00002000 103 + #define NAND_BBT_SAVECONTENT 0x00004000 97 104 /* Search good / bad pattern on the first and the second page */ 98 - #define NAND_BBT_SCAN2NDPAGE 0x00004000 105 + #define NAND_BBT_SCAN2NDPAGE 0x00008000 99 106 /* Search good / bad pattern on the last page of the eraseblock */ 100 - #define NAND_BBT_SCANLASTPAGE 0x00008000 101 - /* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */ 102 - #define NAND_BBT_SCANBYTE1AND6 0x00100000 103 - /* The nand_bbt_descr was created dynamicaly and must be freed */ 104 - #define NAND_BBT_DYNAMICSTRUCT 0x00200000 105 - /* The bad block table does not OOB for marker */ 106 - #define NAND_BBT_NO_OOB 0x00400000 107 + #define NAND_BBT_SCANLASTPAGE 0x00010000 108 + /* 109 + * Use a flash based bad block table. By default, OOB identifier is saved in 110 + * OOB area. This option is passed to the default bad block table function. 111 + */ 112 + #define NAND_BBT_USE_FLASH 0x00020000 113 + /* Do not store flash based bad block table in OOB area; store it in-band */ 114 + #define NAND_BBT_NO_OOB 0x00040000 115 + 116 + /* 117 + * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr 118 + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning 119 + * in nand_chip.bbt_options. 120 + */ 121 + #define NAND_BBT_DYNAMICSTRUCT 0x80000000 107 122 108 123 /* The maximum number of blocks to scan for a bbt */ 109 124 #define NAND_BBT_SCAN_MAXBLOCKS 4
+29 -47
include/linux/mtd/mtd.h
··· 32 32 #define MTD_CHAR_MAJOR 90 33 33 #define MTD_BLOCK_MAJOR 31 34 34 35 - #define MTD_ERASE_PENDING 0x01 35 + #define MTD_ERASE_PENDING 0x01 36 36 #define MTD_ERASING 0x02 37 37 #define MTD_ERASE_SUSPEND 0x04 38 - #define MTD_ERASE_DONE 0x08 39 - #define MTD_ERASE_FAILED 0x10 38 + #define MTD_ERASE_DONE 0x08 39 + #define MTD_ERASE_FAILED 0x10 40 40 41 41 #define MTD_FAIL_ADDR_UNKNOWN -1LL 42 42 43 - /* If the erase fails, fail_addr might indicate exactly which block failed. If 44 - fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not 45 - specific to any particular block. */ 43 + /* 44 + * If the erase fails, fail_addr might indicate exactly which block failed. If 45 + * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level 46 + * or was not specific to any particular block. 47 + */ 46 48 struct erase_info { 47 49 struct mtd_info *mtd; 48 50 uint64_t addr; ··· 61 59 }; 62 60 63 61 struct mtd_erase_region_info { 64 - uint64_t offset; /* At which this region starts, from the beginning of the MTD */ 62 + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ 65 63 uint32_t erasesize; /* For this region */ 66 64 uint32_t numblocks; /* Number of blocks of erasesize in this region */ 67 65 unsigned long *lockmap; /* If keeping bitmap of locks */ 68 66 }; 69 - 70 - /* 71 - * oob operation modes 72 - * 73 - * MTD_OOB_PLACE: oob data are placed at the given offset 74 - * MTD_OOB_AUTO: oob data are automatically placed at the free areas 75 - * which are defined by the ecclayout 76 - * MTD_OOB_RAW: mode to read oob and data without doing ECC checking 77 - */ 78 - typedef enum { 79 - MTD_OOB_PLACE, 80 - MTD_OOB_AUTO, 81 - MTD_OOB_RAW, 82 - } mtd_oob_mode_t; 83 67 84 68 /** 85 69 * struct mtd_oob_ops - oob operation operands ··· 78 90 * @ooblen: number of oob bytes to write/read 79 91 * @oobretlen: number of oob bytes written/read 80 92 * @ooboffs: offset of oob data in the oob area (only relevant when 81 - * mode = MTD_OOB_PLACE) 93 + * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) 82 94 * @datbuf: data buffer - if NULL only oob data are read/written 83 95 * @oobbuf: oob data buffer 84 96 * ··· 87 99 * OOB area. 88 100 */ 89 101 struct mtd_oob_ops { 90 - mtd_oob_mode_t mode; 102 + unsigned int mode; 91 103 size_t len; 92 104 size_t retlen; 93 105 size_t ooblen; ··· 161 173 const char *name; 162 174 int index; 163 175 164 - /* ecc layout structure pointer - read only ! */ 176 + /* ECC layout structure pointer - read only! */ 165 177 struct nand_ecclayout *ecclayout; 166 178 167 179 /* Data for variable erase regions. If numeraseregions is zero, ··· 312 324 /* Kernel-side ioctl definitions */ 313 325 314 326 struct mtd_partition; 327 + struct mtd_part_parser_data; 315 328 316 - extern int mtd_device_register(struct mtd_info *master, 317 - const struct mtd_partition *parts, 318 - int nr_parts); 329 + extern int mtd_device_parse_register(struct mtd_info *mtd, 330 + const char **part_probe_types, 331 + struct mtd_part_parser_data *parser_data, 332 + const struct mtd_partition *defparts, 333 + int defnr_parts); 334 + #define mtd_device_register(master, parts, nr_parts) \ 335 + mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) 319 336 extern int mtd_device_unregister(struct mtd_info *master); 320 337 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); 321 338 extern int __get_mtd_device(struct mtd_info *mtd); ··· 349 356 350 357 void mtd_erase_callback(struct erase_info *instr); 351 358 352 - /* 353 - * Debugging macro and defines 354 - */ 355 - #define MTD_DEBUG_LEVEL0 (0) /* Quiet */ 356 - #define MTD_DEBUG_LEVEL1 (1) /* Audible */ 357 - #define MTD_DEBUG_LEVEL2 (2) /* Loud */ 358 - #define MTD_DEBUG_LEVEL3 (3) /* Noisy */ 359 + static inline int mtd_is_bitflip(int err) { 360 + return err == -EUCLEAN; 361 + } 359 362 360 - #ifdef CONFIG_MTD_DEBUG 361 - #define DEBUG(n, args...) \ 362 - do { \ 363 - if (n <= CONFIG_MTD_DEBUG_VERBOSE) \ 364 - printk(KERN_INFO args); \ 365 - } while(0) 366 - #else /* CONFIG_MTD_DEBUG */ 367 - #define DEBUG(n, args...) \ 368 - do { \ 369 - if (0) \ 370 - printk(KERN_INFO args); \ 371 - } while(0) 363 + static inline int mtd_is_eccerr(int err) { 364 + return err == -EBADMSG; 365 + } 372 366 373 - #endif /* CONFIG_MTD_DEBUG */ 367 + static inline int mtd_is_bitflip_or_eccerr(int err) { 368 + return mtd_is_bitflip(err) || mtd_is_eccerr(err); 369 + } 374 370 375 371 #endif /* __MTD_MTD_H__ */
+43 -49
include/linux/mtd/nand.h
··· 42 42 /* Internal helper for board drivers which need to override command function */ 43 43 extern void nand_wait_ready(struct mtd_info *mtd); 44 44 45 - /* locks all blockes present in the device */ 45 + /* locks all blocks present in the device */ 46 46 extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 47 47 48 - /* unlocks specified locked blockes */ 48 + /* unlocks specified locked blocks */ 49 49 extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 50 50 51 51 /* The maximum number of NAND chips in an array */ ··· 150 150 #define NAND_ECC_READ 0 151 151 /* Reset Hardware ECC for write */ 152 152 #define NAND_ECC_WRITE 1 153 - /* Enable Hardware ECC before syndrom is read back from flash */ 153 + /* Enable Hardware ECC before syndrome is read back from flash */ 154 154 #define NAND_ECC_READSYN 2 155 155 156 156 /* Bit mask for flags passed to do_nand_read_ecc */ ··· 163 163 */ 164 164 /* Chip can not auto increment pages */ 165 165 #define NAND_NO_AUTOINCR 0x00000001 166 - /* Buswitdh is 16 bit */ 166 + /* Buswidth is 16 bit */ 167 167 #define NAND_BUSWIDTH_16 0x00000002 168 168 /* Device supports partial programming without padding */ 169 169 #define NAND_NO_PADDING 0x00000004 ··· 219 219 #define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) 220 220 221 221 /* Non chip related options */ 222 - /* 223 - * Use a flash based bad block table. OOB identifier is saved in OOB area. 224 - * This option is passed to the default bad block table function. 225 - */ 226 - #define NAND_USE_FLASH_BBT 0x00010000 227 222 /* This option skips the bbt scan during initialization. */ 228 - #define NAND_SKIP_BBTSCAN 0x00020000 223 + #define NAND_SKIP_BBTSCAN 0x00010000 229 224 /* 230 225 * This option is defined if the board driver allocates its own buffers 231 226 * (e.g. because it needs them DMA-coherent). 232 227 */ 233 - #define NAND_OWN_BUFFERS 0x00040000 228 + #define NAND_OWN_BUFFERS 0x00020000 234 229 /* Chip may not exist, so silence any errors in scan */ 235 - #define NAND_SCAN_SILENT_NODEV 0x00080000 236 - /* 237 - * If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch 238 - * the OOB area. 239 - */ 240 - #define NAND_USE_FLASH_BBT_NO_OOB 0x00800000 241 - /* Create an empty BBT with no vendor information if the BBT is available */ 242 - #define NAND_CREATE_EMPTY_BBT 0x01000000 230 + #define NAND_SCAN_SILENT_NODEV 0x00040000 243 231 244 232 /* Options set by nand scan */ 245 233 /* Nand scan has allocated controller struct */ ··· 319 331 }; 320 332 321 333 /** 322 - * struct nand_ecc_ctrl - Control structure for ecc 323 - * @mode: ecc mode 324 - * @steps: number of ecc steps per page 325 - * @size: data bytes per ecc step 326 - * @bytes: ecc bytes per step 327 - * @total: total number of ecc bytes per page 328 - * @prepad: padding information for syndrome based ecc generators 329 - * @postpad: padding information for syndrome based ecc generators 334 + * struct nand_ecc_ctrl - Control structure for ECC 335 + * @mode: ECC mode 336 + * @steps: number of ECC steps per page 337 + * @size: data bytes per ECC step 338 + * @bytes: ECC bytes per step 339 + * @total: total number of ECC bytes per page 340 + * @prepad: padding information for syndrome based ECC generators 341 + * @postpad: padding information for syndrome based ECC generators 330 342 * @layout: ECC layout control struct pointer 331 - * @priv: pointer to private ecc control data 332 - * @hwctl: function to control hardware ecc generator. Must only 343 + * @priv: pointer to private ECC control data 344 + * @hwctl: function to control hardware ECC generator. Must only 333 345 * be provided if an hardware ECC is available 334 - * @calculate: function for ecc calculation or readback from ecc hardware 335 - * @correct: function for ecc correction, matching to ecc generator (sw/hw) 346 + * @calculate: function for ECC calculation or readback from ECC hardware 347 + * @correct: function for ECC correction, matching to ECC generator (sw/hw) 336 348 * @read_page_raw: function to read a raw page without ECC 337 349 * @write_page_raw: function to write a raw page without ECC 338 - * @read_page: function to read a page according to the ecc generator 350 + * @read_page: function to read a page according to the ECC generator 339 351 * requirements. 340 352 * @read_subpage: function to read parts of the page covered by ECC. 341 - * @write_page: function to write a page according to the ecc generator 353 + * @write_page: function to write a page according to the ECC generator 342 354 * requirements. 355 + * @write_oob_raw: function to write chip OOB data without ECC 356 + * @read_oob_raw: function to read chip OOB data without ECC 343 357 * @read_oob: function to read chip OOB data 344 358 * @write_oob: function to write chip OOB data 345 359 */ ··· 370 380 uint32_t offs, uint32_t len, uint8_t *buf); 371 381 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, 372 382 const uint8_t *buf); 383 + int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 384 + int page); 385 + int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 386 + int page, int sndcmd); 373 387 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page, 374 388 int sndcmd); 375 389 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, ··· 382 388 383 389 /** 384 390 * struct nand_buffers - buffer structure for read/write 385 - * @ecccalc: buffer for calculated ecc 386 - * @ecccode: buffer for ecc read from flash 391 + * @ecccalc: buffer for calculated ECC 392 + * @ecccode: buffer for ECC read from flash 387 393 * @databuf: buffer for data - dynamically sized 388 394 * 389 395 * Do not change the order of buffers. databuf and oobrbuf must be in ··· 416 422 * mtd->oobsize, mtd->writesize and so on. 417 423 * @id_data contains the 8 bytes values of NAND_CMD_READID. 418 424 * Return with the bus width. 419 - * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing 425 + * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing 420 426 * device ready/busy line. If set to NULL no access to 421 427 * ready/busy is available and the ready/busy information 422 428 * is read from the chip status register. ··· 424 430 * commands to the chip. 425 431 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on 426 432 * ready. 427 - * @ecc: [BOARDSPECIFIC] ecc control ctructure 433 + * @ecc: [BOARDSPECIFIC] ECC control structure 428 434 * @buffers: buffer structure for read/write 429 435 * @hwcontrol: platform-specific hardware control structure 430 - * @ops: oob operation operands 431 436 * @erase_cmd: [INTERN] erase command write function, selectable due 432 437 * to AND support. 433 438 * @scan_bbt: [REPLACEABLE] function to scan bad block table 434 439 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring 435 440 * data from array to read regs (tR). 436 441 * @state: [INTERN] the current state of the NAND device 437 - * @oob_poi: poison value buffer 442 + * @oob_poi: "poison value buffer," used for laying out OOB data 443 + * before writing 438 444 * @page_shift: [INTERN] number of address bits in a page (column 439 445 * address bits). 440 446 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock ··· 443 449 * @options: [BOARDSPECIFIC] various chip options. They can partly 444 450 * be set to inform nand_scan about special functionality. 445 451 * See the defines for further explanation. 452 + * @bbt_options: [INTERN] bad block specific options. All options used 453 + * here must come from bbm.h. By default, these options 454 + * will be copied to the appropriate nand_bbt_descr's. 446 455 * @badblockpos: [INTERN] position of the bad block marker in the oob 447 456 * area. 448 457 * @badblockbits: [INTERN] number of bits to left-shift the bad block ··· 461 464 * non 0 if ONFI supported. 462 465 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is 463 466 * supported, 0 otherwise. 464 - * @ecclayout: [REPLACEABLE] the default ecc placement scheme 467 + * @ecclayout: [REPLACEABLE] the default ECC placement scheme 465 468 * @bbt: [INTERN] bad block table pointer 466 469 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash 467 470 * lookup. ··· 469 472 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial 470 473 * bad block scan. 471 474 * @controller: [REPLACEABLE] a pointer to a hardware controller 472 - * structure which is shared among multiple independend 475 + * structure which is shared among multiple independent 473 476 * devices. 474 - * @priv: [OPTIONAL] pointer to private chip date 477 + * @priv: [OPTIONAL] pointer to private chip data 475 478 * @errstat: [OPTIONAL] hardware specific function to perform 476 479 * additional error status checks (determine if errors are 477 480 * correctable). ··· 506 509 507 510 int chip_delay; 508 511 unsigned int options; 512 + unsigned int bbt_options; 509 513 510 514 int page_shift; 511 515 int phys_erase_shift; ··· 533 535 struct nand_ecc_ctrl ecc; 534 536 struct nand_buffers *buffers; 535 537 struct nand_hw_control hwcontrol; 536 - 537 - struct mtd_oob_ops ops; 538 538 539 539 uint8_t *bbt; 540 540 struct nand_bbt_descr *bbt_td; ··· 607 611 * @partitions: mtd partition list 608 612 * @chip_delay: R/B delay value in us 609 613 * @options: Option flags, e.g. 16bit buswidth 610 - * @ecclayout: ecc layout info structure 614 + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH 615 + * @ecclayout: ECC layout info structure 611 616 * @part_probe_types: NULL-terminated array of probe types 612 - * @set_parts: platform specific function to set partitions 613 - * @priv: hardware controller specific settings 614 617 */ 615 618 struct platform_nand_chip { 616 619 int nr_chips; ··· 619 624 struct nand_ecclayout *ecclayout; 620 625 int chip_delay; 621 626 unsigned int options; 627 + unsigned int bbt_options; 622 628 const char **part_probe_types; 623 - void (*set_parts)(uint64_t size, struct platform_nand_chip *chip); 624 - void *priv; 625 629 }; 626 630 627 631 /* Keep gcc happy */
+4
include/linux/mtd/onenand.h
··· 184 184 #define ONENAND_IS_CACHE_PROGRAM(this) \ 185 185 (this->options & ONENAND_HAS_CACHE_PROGRAM) 186 186 187 + #define ONENAND_IS_NOP_1(this) \ 188 + (this->options & ONENAND_HAS_NOP_1) 189 + 187 190 /* Check byte access in OneNAND */ 188 191 #define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) 189 192 ··· 198 195 #define ONENAND_HAS_2PLANE (0x0004) 199 196 #define ONENAND_HAS_4KB_PAGE (0x0008) 200 197 #define ONENAND_HAS_CACHE_PROGRAM (0x0010) 198 + #define ONENAND_HAS_NOP_1 (0x0020) 201 199 #define ONENAND_SKIP_UNLOCK_CHECK (0x0100) 202 200 #define ONENAND_PAGEBUF_ALLOC (0x1000) 203 201 #define ONENAND_OOBBUF_ALLOC (0x2000)
+18 -28
include/linux/mtd/partitions.h
··· 24 24 * will extend to the end of the master MTD device. 25 25 * offset: absolute starting position within the master MTD device; if 26 26 * defined as MTDPART_OFS_APPEND, the partition will start where the 27 - * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block. 27 + * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block; 28 + * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size 29 + * after the end of partition. 28 30 * mask_flags: contains flags that have to be masked (removed) from the 29 31 * master MTD flag set for the corresponding MTD partition. 30 32 * For example, to force a read-only partition, simply adding ··· 44 42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ 45 43 }; 46 44 45 + #define MTDPART_OFS_RETAIN (-3) 47 46 #define MTDPART_OFS_NXTBLK (-2) 48 47 #define MTDPART_OFS_APPEND (-1) 49 48 #define MTDPART_SIZ_FULL (0) 50 49 51 50 52 51 struct mtd_info; 52 + struct device_node; 53 + 54 + /** 55 + * struct mtd_part_parser_data - used to pass data to MTD partition parsers. 56 + * @origin: for RedBoot, start address of MTD device 57 + * @of_node: for OF parsers, device node containing partitioning information 58 + */ 59 + struct mtd_part_parser_data { 60 + unsigned long origin; 61 + struct device_node *of_node; 62 + }; 63 + 53 64 54 65 /* 55 66 * Functions dealing with the various ways of partitioning the space ··· 72 57 struct list_head list; 73 58 struct module *owner; 74 59 const char *name; 75 - int (*parse_fn)(struct mtd_info *, struct mtd_partition **, unsigned long); 60 + int (*parse_fn)(struct mtd_info *, struct mtd_partition **, 61 + struct mtd_part_parser_data *); 76 62 }; 77 63 78 64 extern int register_mtd_parser(struct mtd_part_parser *parser); 79 65 extern int deregister_mtd_parser(struct mtd_part_parser *parser); 80 - extern int parse_mtd_partitions(struct mtd_info *master, const char **types, 81 - struct mtd_partition **pparts, unsigned long origin); 82 - 83 - #define put_partition_parser(p) do { module_put((p)->owner); } while(0) 84 - 85 - struct device; 86 - struct device_node; 87 - 88 - #ifdef CONFIG_MTD_OF_PARTS 89 - int __devinit of_mtd_parse_partitions(struct device *dev, 90 - struct device_node *node, 91 - struct mtd_partition **pparts); 92 - #else 93 - static inline int of_mtd_parse_partitions(struct device *dev, 94 - struct device_node *node, 95 - struct mtd_partition **pparts) 96 - { 97 - return 0; 98 - } 99 - #endif 100 - 101 - #ifdef CONFIG_MTD_CMDLINE_PARTS 102 - static inline int mtd_has_cmdlinepart(void) { return 1; } 103 - #else 104 - static inline int mtd_has_cmdlinepart(void) { return 0; } 105 - #endif 106 66 107 67 int mtd_is_partition(struct mtd_info *mtd); 108 68 int mtd_add_partition(struct mtd_info *master, char *name,
-17
include/linux/mtd/physmap.h
··· 32 32 struct mtd_partition *parts; 33 33 }; 34 34 35 - /* 36 - * Board needs to specify the exact mapping during their setup time. 37 - */ 38 - void physmap_configure(unsigned long addr, unsigned long size, 39 - int bankwidth, void (*set_vpp)(struct map_info *, int) ); 40 - 41 - /* 42 - * Machines that wish to do flash partition may want to call this function in 43 - * their setup routine. 44 - * 45 - * physmap_set_partitions(mypartitions, num_parts); 46 - * 47 - * Note that one can always override this hard-coded partition with 48 - * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS). 49 - */ 50 - void physmap_set_partitions(struct mtd_partition *parts, int num_parts); 51 - 52 35 #endif /* __LINUX_MTD_PHYSMAP__ */
+105 -17
include/mtd/mtd-abi.h
··· 45 45 __u64 usr_ptr; 46 46 }; 47 47 48 + /** 49 + * MTD operation modes 50 + * 51 + * @MTD_OPS_PLACE_OOB: OOB data are placed at the given offset (default) 52 + * @MTD_OPS_AUTO_OOB: OOB data are automatically placed at the free areas 53 + * which are defined by the internal ecclayout 54 + * @MTD_OPS_RAW: data are transferred as-is, with no error correction; 55 + * this mode implies %MTD_OPS_PLACE_OOB 56 + * 57 + * These modes can be passed to ioctl(MEMWRITE) and are also used internally. 58 + * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs. 59 + * %MTD_FILE_MODE_RAW. 60 + */ 61 + enum { 62 + MTD_OPS_PLACE_OOB = 0, 63 + MTD_OPS_AUTO_OOB = 1, 64 + MTD_OPS_RAW = 2, 65 + }; 66 + 67 + /** 68 + * struct mtd_write_req - data structure for requesting a write operation 69 + * 70 + * @start: start address 71 + * @len: length of data buffer 72 + * @ooblen: length of OOB buffer 73 + * @usr_data: user-provided data buffer 74 + * @usr_oob: user-provided OOB buffer 75 + * @mode: MTD mode (see "MTD operation modes") 76 + * @padding: reserved, must be set to 0 77 + * 78 + * This structure supports ioctl(MEMWRITE) operations, allowing data and/or OOB 79 + * writes in various modes. To write to OOB-only, set @usr_data == NULL, and to 80 + * write data-only, set @usr_oob == NULL. However, setting both @usr_data and 81 + * @usr_oob to NULL is not allowed. 82 + */ 83 + struct mtd_write_req { 84 + __u64 start; 85 + __u64 len; 86 + __u64 ooblen; 87 + __u64 usr_data; 88 + __u64 usr_oob; 89 + __u8 mode; 90 + __u8 padding[7]; 91 + }; 92 + 48 93 #define MTD_ABSENT 0 49 94 #define MTD_RAM 1 50 95 #define MTD_ROM 2 ··· 104 59 #define MTD_NO_ERASE 0x1000 /* No erase necessary */ 105 60 #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ 106 61 107 - // Some common devices / combinations of capabilities 62 + /* Some common devices / combinations of capabilities */ 108 63 #define MTD_CAP_ROM 0 109 64 #define MTD_CAP_RAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE) 110 65 #define MTD_CAP_NORFLASH (MTD_WRITEABLE | MTD_BIT_WRITEABLE) 111 66 #define MTD_CAP_NANDFLASH (MTD_WRITEABLE) 112 67 113 - /* ECC byte placement */ 68 + /* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */ 114 69 #define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) 115 70 #define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) 116 71 #define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme ··· 125 80 struct mtd_info_user { 126 81 __u8 type; 127 82 __u32 flags; 128 - __u32 size; // Total size of the MTD 83 + __u32 size; /* Total size of the MTD */ 129 84 __u32 erasesize; 130 85 __u32 writesize; 131 - __u32 oobsize; // Amount of OOB data per block (e.g. 16) 132 - /* The below two fields are obsolete and broken, do not use them 133 - * (TODO: remove at some point) */ 134 - __u32 ecctype; 135 - __u32 eccsize; 86 + __u32 oobsize; /* Amount of OOB data per block (e.g. 16) */ 87 + __u64 padding; /* Old obsolete field; do not use */ 136 88 }; 137 89 138 90 struct region_info_user { 139 91 __u32 offset; /* At which this region starts, 140 - * from the beginning of the MTD */ 141 - __u32 erasesize; /* For this region */ 142 - __u32 numblocks; /* Number of blocks in this region */ 92 + * from the beginning of the MTD */ 93 + __u32 erasesize; /* For this region */ 94 + __u32 numblocks; /* Number of blocks in this region */ 143 95 __u32 regionindex; 144 96 }; 145 97 ··· 146 104 __u32 locked; 147 105 }; 148 106 107 + /* 108 + * Note, the following ioctl existed in the past and was removed: 109 + * #define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) 110 + * Try to avoid adding a new ioctl with the same ioctl number. 111 + */ 112 + 113 + /* Get basic MTD characteristics info (better to use sysfs) */ 149 114 #define MEMGETINFO _IOR('M', 1, struct mtd_info_user) 115 + /* Erase segment of MTD */ 150 116 #define MEMERASE _IOW('M', 2, struct erase_info_user) 117 + /* Write out-of-band data from MTD */ 151 118 #define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf) 119 + /* Read out-of-band data from MTD */ 152 120 #define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf) 121 + /* Lock a chip (for MTD that supports it) */ 153 122 #define MEMLOCK _IOW('M', 5, struct erase_info_user) 123 + /* Unlock a chip (for MTD that supports it) */ 154 124 #define MEMUNLOCK _IOW('M', 6, struct erase_info_user) 125 + /* Get the number of different erase regions */ 155 126 #define MEMGETREGIONCOUNT _IOR('M', 7, int) 127 + /* Get information about the erase region for a specific index */ 156 128 #define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user) 157 - #define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) 129 + /* Get info about OOB modes (e.g., RAW, PLACE, AUTO) - legacy interface */ 158 130 #define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo) 131 + /* Check if an eraseblock is bad */ 159 132 #define MEMGETBADBLOCK _IOW('M', 11, __kernel_loff_t) 133 + /* Mark an eraseblock as bad */ 160 134 #define MEMSETBADBLOCK _IOW('M', 12, __kernel_loff_t) 135 + /* Set OTP (One-Time Programmable) mode (factory vs. user) */ 161 136 #define OTPSELECT _IOR('M', 13, int) 137 + /* Get number of OTP (One-Time Programmable) regions */ 162 138 #define OTPGETREGIONCOUNT _IOW('M', 14, int) 139 + /* Get all OTP (One-Time Programmable) info about MTD */ 163 140 #define OTPGETREGIONINFO _IOW('M', 15, struct otp_info) 141 + /* Lock a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */ 164 142 #define OTPLOCK _IOR('M', 16, struct otp_info) 143 + /* Get ECC layout (deprecated) */ 165 144 #define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout_user) 145 + /* Get statistics about corrected/uncorrected errors */ 166 146 #define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats) 147 + /* Set MTD mode on a per-file-descriptor basis (see "MTD file modes") */ 167 148 #define MTDFILEMODE _IO('M', 19) 149 + /* Erase segment of MTD (supports 64-bit address) */ 168 150 #define MEMERASE64 _IOW('M', 20, struct erase_info_user64) 151 + /* Write data to OOB (64-bit version) */ 169 152 #define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64) 153 + /* Read data from OOB (64-bit version) */ 170 154 #define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64) 155 + /* Check if chip is locked (for MTD that supports it) */ 171 156 #define MEMISLOCKED _IOR('M', 23, struct erase_info_user) 157 + /* 158 + * Most generic write interface; can write in-band and/or out-of-band in various 159 + * modes (see "struct mtd_write_req") 160 + */ 161 + #define MEMWRITE _IOWR('M', 24, struct mtd_write_req) 172 162 173 163 /* 174 164 * Obsolete legacy interface. Keep it in order not to break userspace ··· 251 177 }; 252 178 253 179 /* 254 - * Read/write file modes for access to MTD 180 + * MTD file modes - for read/write access to MTD 181 + * 182 + * @MTD_FILE_MODE_NORMAL: OTP disabled, ECC enabled 183 + * @MTD_FILE_MODE_OTP_FACTORY: OTP enabled in factory mode 184 + * @MTD_FILE_MODE_OTP_USER: OTP enabled in user mode 185 + * @MTD_FILE_MODE_RAW: OTP disabled, ECC disabled 186 + * 187 + * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained 188 + * separately for each open file descriptor. 189 + * 190 + * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW - 191 + * raw access to the flash, without error correction or autoplacement schemes. 192 + * Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode 193 + * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is 194 + * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)). 255 195 */ 256 196 enum mtd_file_modes { 257 - MTD_MODE_NORMAL = MTD_OTP_OFF, 258 - MTD_MODE_OTP_FACTORY = MTD_OTP_FACTORY, 259 - MTD_MODE_OTP_USER = MTD_OTP_USER, 260 - MTD_MODE_RAW, 197 + MTD_FILE_MODE_NORMAL = MTD_OTP_OFF, 198 + MTD_FILE_MODE_OTP_FACTORY = MTD_OTP_FACTORY, 199 + MTD_FILE_MODE_OTP_USER = MTD_OTP_USER, 200 + MTD_FILE_MODE_RAW, 261 201 }; 262 202 263 203 #endif /* __MTD_ABI_H__ */