at v2.6.13 455 lines 12 kB view raw
1 2/* Common Flash Interface structures 3 * See http://support.intel.com/design/flash/technote/index.htm 4 * $Id: cfi.h,v 1.54 2005/06/06 23:04:36 tpoynor Exp $ 5 */ 6 7#ifndef __MTD_CFI_H__ 8#define __MTD_CFI_H__ 9 10#include <linux/config.h> 11#include <linux/version.h> 12#include <linux/delay.h> 13#include <linux/types.h> 14#include <linux/interrupt.h> 15#include <linux/mtd/flashchip.h> 16#include <linux/mtd/map.h> 17#include <linux/mtd/cfi_endian.h> 18 19#ifdef CONFIG_MTD_CFI_I1 20#define cfi_interleave(cfi) 1 21#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) 22#else 23#define cfi_interleave_is_1(cfi) (0) 24#endif 25 26#ifdef CONFIG_MTD_CFI_I2 27# ifdef cfi_interleave 28# undef cfi_interleave 29# define cfi_interleave(cfi) ((cfi)->interleave) 30# else 31# define cfi_interleave(cfi) 2 32# endif 33#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) 34#else 35#define cfi_interleave_is_2(cfi) (0) 36#endif 37 38#ifdef CONFIG_MTD_CFI_I4 39# ifdef cfi_interleave 40# undef cfi_interleave 41# define cfi_interleave(cfi) ((cfi)->interleave) 42# else 43# define cfi_interleave(cfi) 4 44# endif 45#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) 46#else 47#define cfi_interleave_is_4(cfi) (0) 48#endif 49 50#ifdef CONFIG_MTD_CFI_I8 51# ifdef cfi_interleave 52# undef cfi_interleave 53# define cfi_interleave(cfi) ((cfi)->interleave) 54# else 55# define cfi_interleave(cfi) 8 56# endif 57#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) 58#else 59#define cfi_interleave_is_8(cfi) (0) 60#endif 61 62static inline int cfi_interleave_supported(int i) 63{ 64 switch (i) { 65#ifdef CONFIG_MTD_CFI_I1 66 case 1: 67#endif 68#ifdef CONFIG_MTD_CFI_I2 69 case 2: 70#endif 71#ifdef CONFIG_MTD_CFI_I4 72 case 4: 73#endif 74#ifdef CONFIG_MTD_CFI_I8 75 case 8: 76#endif 77 return 1; 78 79 default: 80 return 0; 81 } 82} 83 84 85/* NB: these values must represents the number of bytes needed to meet the 86 * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. 87 * These numbers are used in calculations. 88 */ 89#define CFI_DEVICETYPE_X8 (8 / 8) 90#define CFI_DEVICETYPE_X16 (16 / 8) 91#define CFI_DEVICETYPE_X32 (32 / 8) 92#define CFI_DEVICETYPE_X64 (64 / 8) 93 94/* NB: We keep these structures in memory in HOST byteorder, except 95 * where individually noted. 96 */ 97 98/* Basic Query Structure */ 99struct cfi_ident { 100 uint8_t qry[3]; 101 uint16_t P_ID; 102 uint16_t P_ADR; 103 uint16_t A_ID; 104 uint16_t A_ADR; 105 uint8_t VccMin; 106 uint8_t VccMax; 107 uint8_t VppMin; 108 uint8_t VppMax; 109 uint8_t WordWriteTimeoutTyp; 110 uint8_t BufWriteTimeoutTyp; 111 uint8_t BlockEraseTimeoutTyp; 112 uint8_t ChipEraseTimeoutTyp; 113 uint8_t WordWriteTimeoutMax; 114 uint8_t BufWriteTimeoutMax; 115 uint8_t BlockEraseTimeoutMax; 116 uint8_t ChipEraseTimeoutMax; 117 uint8_t DevSize; 118 uint16_t InterfaceDesc; 119 uint16_t MaxBufWriteSize; 120 uint8_t NumEraseRegions; 121 uint32_t EraseRegionInfo[0]; /* Not host ordered */ 122} __attribute__((packed)); 123 124/* Extended Query Structure for both PRI and ALT */ 125 126struct cfi_extquery { 127 uint8_t pri[3]; 128 uint8_t MajorVersion; 129 uint8_t MinorVersion; 130} __attribute__((packed)); 131 132/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ 133 134struct cfi_pri_intelext { 135 uint8_t pri[3]; 136 uint8_t MajorVersion; 137 uint8_t MinorVersion; 138 uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature 139 block follows - FIXME - not currently supported */ 140 uint8_t SuspendCmdSupport; 141 uint16_t BlkStatusRegMask; 142 uint8_t VccOptimal; 143 uint8_t VppOptimal; 144 uint8_t NumProtectionFields; 145 uint16_t ProtRegAddr; 146 uint8_t FactProtRegSize; 147 uint8_t UserProtRegSize; 148 uint8_t extra[0]; 149} __attribute__((packed)); 150 151struct cfi_intelext_otpinfo { 152 uint32_t ProtRegAddr; 153 uint16_t FactGroups; 154 uint8_t FactProtRegSize; 155 uint16_t UserGroups; 156 uint8_t UserProtRegSize; 157} __attribute__((packed)); 158 159struct cfi_intelext_blockinfo { 160 uint16_t NumIdentBlocks; 161 uint16_t BlockSize; 162 uint16_t MinBlockEraseCycles; 163 uint8_t BitsPerCell; 164 uint8_t BlockCap; 165} __attribute__((packed)); 166 167struct cfi_intelext_regioninfo { 168 uint16_t NumIdentPartitions; 169 uint8_t NumOpAllowed; 170 uint8_t NumOpAllowedSimProgMode; 171 uint8_t NumOpAllowedSimEraMode; 172 uint8_t NumBlockTypes; 173 struct cfi_intelext_blockinfo BlockTypes[1]; 174} __attribute__((packed)); 175 176/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ 177 178struct cfi_pri_amdstd { 179 uint8_t pri[3]; 180 uint8_t MajorVersion; 181 uint8_t MinorVersion; 182 uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ 183 uint8_t EraseSuspend; 184 uint8_t BlkProt; 185 uint8_t TmpBlkUnprotect; 186 uint8_t BlkProtUnprot; 187 uint8_t SimultaneousOps; 188 uint8_t BurstMode; 189 uint8_t PageMode; 190 uint8_t VppMin; 191 uint8_t VppMax; 192 uint8_t TopBottom; 193} __attribute__((packed)); 194 195struct cfi_pri_query { 196 uint8_t NumFields; 197 uint32_t ProtField[1]; /* Not host ordered */ 198} __attribute__((packed)); 199 200struct cfi_bri_query { 201 uint8_t PageModeReadCap; 202 uint8_t NumFields; 203 uint32_t ConfField[1]; /* Not host ordered */ 204} __attribute__((packed)); 205 206#define P_ID_NONE 0x0000 207#define P_ID_INTEL_EXT 0x0001 208#define P_ID_AMD_STD 0x0002 209#define P_ID_INTEL_STD 0x0003 210#define P_ID_AMD_EXT 0x0004 211#define P_ID_WINBOND 0x0006 212#define P_ID_ST_ADV 0x0020 213#define P_ID_MITSUBISHI_STD 0x0100 214#define P_ID_MITSUBISHI_EXT 0x0101 215#define P_ID_SST_PAGE 0x0102 216#define P_ID_INTEL_PERFORMANCE 0x0200 217#define P_ID_INTEL_DATA 0x0210 218#define P_ID_RESERVED 0xffff 219 220 221#define CFI_MODE_CFI 1 222#define CFI_MODE_JEDEC 0 223 224struct cfi_private { 225 uint16_t cmdset; 226 void *cmdset_priv; 227 int interleave; 228 int device_type; 229 int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ 230 int addr_unlock1; 231 int addr_unlock2; 232 struct mtd_info *(*cmdset_setup)(struct map_info *); 233 struct cfi_ident *cfiq; /* For now only one. We insist that all devs 234 must be of the same type. */ 235 int mfr, id; 236 int numchips; 237 unsigned long chipshift; /* Because they're of the same type */ 238 const char *im_name; /* inter_module name for cmdset_setup */ 239 struct flchip chips[0]; /* per-chip data structure for each chip */ 240}; 241 242/* 243 * Returns the command address according to the given geometry. 244 */ 245static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) 246{ 247 return (cmd_ofs * type) * interleave; 248} 249 250/* 251 * Transforms the CFI command for the given geometry (bus width & interleave). 252 * It looks too long to be inline, but in the common case it should almost all 253 * get optimised away. 254 */ 255static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) 256{ 257 map_word val = { {0} }; 258 int wordwidth, words_per_bus, chip_mode, chips_per_word; 259 unsigned long onecmd; 260 int i; 261 262 /* We do it this way to give the compiler a fighting chance 263 of optimising away all the crap for 'bankwidth' larger than 264 an unsigned long, in the common case where that support is 265 disabled */ 266 if (map_bankwidth_is_large(map)) { 267 wordwidth = sizeof(unsigned long); 268 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 269 } else { 270 wordwidth = map_bankwidth(map); 271 words_per_bus = 1; 272 } 273 274 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 275 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 276 277 /* First, determine what the bit-pattern should be for a single 278 device, according to chip mode and endianness... */ 279 switch (chip_mode) { 280 default: BUG(); 281 case 1: 282 onecmd = cmd; 283 break; 284 case 2: 285 onecmd = cpu_to_cfi16(cmd); 286 break; 287 case 4: 288 onecmd = cpu_to_cfi32(cmd); 289 break; 290 } 291 292 /* Now replicate it across the size of an unsigned long, or 293 just to the bus width as appropriate */ 294 switch (chips_per_word) { 295 default: BUG(); 296#if BITS_PER_LONG >= 64 297 case 8: 298 onecmd |= (onecmd << (chip_mode * 32)); 299#endif 300 case 4: 301 onecmd |= (onecmd << (chip_mode * 16)); 302 case 2: 303 onecmd |= (onecmd << (chip_mode * 8)); 304 case 1: 305 ; 306 } 307 308 /* And finally, for the multi-word case, replicate it 309 in all words in the structure */ 310 for (i=0; i < words_per_bus; i++) { 311 val.x[i] = onecmd; 312 } 313 314 return val; 315} 316#define CMD(x) cfi_build_cmd((x), map, cfi) 317 318 319static inline unsigned char cfi_merge_status(map_word val, struct map_info *map, 320 struct cfi_private *cfi) 321{ 322 int wordwidth, words_per_bus, chip_mode, chips_per_word; 323 unsigned long onestat, res = 0; 324 int i; 325 326 /* We do it this way to give the compiler a fighting chance 327 of optimising away all the crap for 'bankwidth' larger than 328 an unsigned long, in the common case where that support is 329 disabled */ 330 if (map_bankwidth_is_large(map)) { 331 wordwidth = sizeof(unsigned long); 332 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 333 } else { 334 wordwidth = map_bankwidth(map); 335 words_per_bus = 1; 336 } 337 338 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 339 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 340 341 onestat = val.x[0]; 342 /* Or all status words together */ 343 for (i=1; i < words_per_bus; i++) { 344 onestat |= val.x[i]; 345 } 346 347 res = onestat; 348 switch(chips_per_word) { 349 default: BUG(); 350#if BITS_PER_LONG >= 64 351 case 8: 352 res |= (onestat >> (chip_mode * 32)); 353#endif 354 case 4: 355 res |= (onestat >> (chip_mode * 16)); 356 case 2: 357 res |= (onestat >> (chip_mode * 8)); 358 case 1: 359 ; 360 } 361 362 /* Last, determine what the bit-pattern should be for a single 363 device, according to chip mode and endianness... */ 364 switch (chip_mode) { 365 case 1: 366 break; 367 case 2: 368 res = cfi16_to_cpu(res); 369 break; 370 case 4: 371 res = cfi32_to_cpu(res); 372 break; 373 default: BUG(); 374 } 375 return res; 376} 377 378#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) 379 380 381/* 382 * Sends a CFI command to a bank of flash for the given geometry. 383 * 384 * Returns the offset in flash where the command was written. 385 * If prev_val is non-null, it will be set to the value at the command address, 386 * before the command was written. 387 */ 388static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, 389 struct map_info *map, struct cfi_private *cfi, 390 int type, map_word *prev_val) 391{ 392 map_word val; 393 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); 394 395 val = cfi_build_cmd(cmd, map, cfi); 396 397 if (prev_val) 398 *prev_val = map_read(map, addr); 399 400 map_write(map, val, addr); 401 402 return addr - base; 403} 404 405static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) 406{ 407 map_word val = map_read(map, addr); 408 409 if (map_bankwidth_is_1(map)) { 410 return val.x[0]; 411 } else if (map_bankwidth_is_2(map)) { 412 return cfi16_to_cpu(val.x[0]); 413 } else { 414 /* No point in a 64-bit byteswap since that would just be 415 swapping the responses from different chips, and we are 416 only interested in one chip (a representative sample) */ 417 return cfi32_to_cpu(val.x[0]); 418 } 419} 420 421static inline void cfi_udelay(int us) 422{ 423 if (us >= 1000) { 424 msleep((us+999)/1000); 425 } else { 426 udelay(us); 427 cond_resched(); 428 } 429} 430 431struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, 432 const char* name); 433struct cfi_fixup { 434 uint16_t mfr; 435 uint16_t id; 436 void (*fixup)(struct mtd_info *mtd, void* param); 437 void* param; 438}; 439 440#define CFI_MFR_ANY 0xffff 441#define CFI_ID_ANY 0xffff 442 443#define CFI_MFR_AMD 0x0001 444#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 445 446void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 447 448typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, 449 unsigned long adr, int len, void *thunk); 450 451int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, 452 loff_t ofs, size_t len, void *thunk); 453 454 455#endif /* __MTD_CFI_H__ */