1 2/* Common Flash Interface structures 3 * See http://support.intel.com/design/flash/technote/index.htm 4 * $Id: cfi.h,v 1.56 2005/11/07 11:14:54 gleixner Exp $ 5 */ 6 7#ifndef __MTD_CFI_H__ 8#define __MTD_CFI_H__ 9 10#include <linux/config.h> 11#include <linux/delay.h> 12#include <linux/types.h> 13#include <linux/interrupt.h> 14#include <linux/mtd/flashchip.h> 15#include <linux/mtd/map.h> 16#include <linux/mtd/cfi_endian.h> 17 18#ifdef CONFIG_MTD_CFI_I1 19#define cfi_interleave(cfi) 1 20#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) 21#else 22#define cfi_interleave_is_1(cfi) (0) 23#endif 24 25#ifdef CONFIG_MTD_CFI_I2 26# ifdef cfi_interleave 27# undef cfi_interleave 28# define cfi_interleave(cfi) ((cfi)->interleave) 29# else 30# define cfi_interleave(cfi) 2 31# endif 32#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) 33#else 34#define cfi_interleave_is_2(cfi) (0) 35#endif 36 37#ifdef CONFIG_MTD_CFI_I4 38# ifdef cfi_interleave 39# undef cfi_interleave 40# define cfi_interleave(cfi) ((cfi)->interleave) 41# else 42# define cfi_interleave(cfi) 4 43# endif 44#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) 45#else 46#define cfi_interleave_is_4(cfi) (0) 47#endif 48 49#ifdef CONFIG_MTD_CFI_I8 50# ifdef cfi_interleave 51# undef cfi_interleave 52# define cfi_interleave(cfi) ((cfi)->interleave) 53# else 54# define cfi_interleave(cfi) 8 55# endif 56#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) 57#else 58#define cfi_interleave_is_8(cfi) (0) 59#endif 60 61static inline int cfi_interleave_supported(int i) 62{ 63 switch (i) { 64#ifdef CONFIG_MTD_CFI_I1 65 case 1: 66#endif 67#ifdef CONFIG_MTD_CFI_I2 68 case 2: 69#endif 70#ifdef CONFIG_MTD_CFI_I4 71 case 4: 72#endif 73#ifdef CONFIG_MTD_CFI_I8 74 case 8: 75#endif 76 return 1; 77 78 default: 79 return 0; 80 } 81} 82 83 84/* NB: these values must represents the number of bytes needed to meet the 85 * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. 86 * These numbers are used in calculations. 87 */ 88#define CFI_DEVICETYPE_X8 (8 / 8) 89#define CFI_DEVICETYPE_X16 (16 / 8) 90#define CFI_DEVICETYPE_X32 (32 / 8) 91#define CFI_DEVICETYPE_X64 (64 / 8) 92 93/* NB: We keep these structures in memory in HOST byteorder, except 94 * where individually noted. 95 */ 96 97/* Basic Query Structure */ 98struct cfi_ident { 99 uint8_t qry[3]; 100 uint16_t P_ID; 101 uint16_t P_ADR; 102 uint16_t A_ID; 103 uint16_t A_ADR; 104 uint8_t VccMin; 105 uint8_t VccMax; 106 uint8_t VppMin; 107 uint8_t VppMax; 108 uint8_t WordWriteTimeoutTyp; 109 uint8_t BufWriteTimeoutTyp; 110 uint8_t BlockEraseTimeoutTyp; 111 uint8_t ChipEraseTimeoutTyp; 112 uint8_t WordWriteTimeoutMax; 113 uint8_t BufWriteTimeoutMax; 114 uint8_t BlockEraseTimeoutMax; 115 uint8_t ChipEraseTimeoutMax; 116 uint8_t DevSize; 117 uint16_t InterfaceDesc; 118 uint16_t MaxBufWriteSize; 119 uint8_t NumEraseRegions; 120 uint32_t EraseRegionInfo[0]; /* Not host ordered */ 121} __attribute__((packed)); 122 123/* Extended Query Structure for both PRI and ALT */ 124 125struct cfi_extquery { 126 uint8_t pri[3]; 127 uint8_t MajorVersion; 128 uint8_t MinorVersion; 129} __attribute__((packed)); 130 131/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ 132 133struct cfi_pri_intelext { 134 uint8_t pri[3]; 135 uint8_t MajorVersion; 136 uint8_t MinorVersion; 137 uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature 138 block follows - FIXME - not currently supported */ 139 uint8_t SuspendCmdSupport; 140 uint16_t BlkStatusRegMask; 141 uint8_t VccOptimal; 142 uint8_t VppOptimal; 143 uint8_t NumProtectionFields; 144 uint16_t ProtRegAddr; 145 uint8_t FactProtRegSize; 146 uint8_t UserProtRegSize; 147 uint8_t extra[0]; 148} __attribute__((packed)); 149 150struct cfi_intelext_otpinfo { 151 uint32_t ProtRegAddr; 152 uint16_t FactGroups; 153 uint8_t FactProtRegSize; 154 uint16_t UserGroups; 155 uint8_t UserProtRegSize; 156} __attribute__((packed)); 157 158struct cfi_intelext_blockinfo { 159 uint16_t NumIdentBlocks; 160 uint16_t BlockSize; 161 uint16_t MinBlockEraseCycles; 162 uint8_t BitsPerCell; 163 uint8_t BlockCap; 164} __attribute__((packed)); 165 166struct cfi_intelext_regioninfo { 167 uint16_t NumIdentPartitions; 168 uint8_t NumOpAllowed; 169 uint8_t NumOpAllowedSimProgMode; 170 uint8_t NumOpAllowedSimEraMode; 171 uint8_t NumBlockTypes; 172 struct cfi_intelext_blockinfo BlockTypes[1]; 173} __attribute__((packed)); 174 175struct cfi_intelext_programming_regioninfo { 176 uint8_t ProgRegShift; 177 uint8_t Reserved1; 178 uint8_t ControlValid; 179 uint8_t Reserved2; 180 uint8_t ControlInvalid; 181 uint8_t Reserved3; 182} __attribute__((packed)); 183 184/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ 185 186struct cfi_pri_amdstd { 187 uint8_t pri[3]; 188 uint8_t MajorVersion; 189 uint8_t MinorVersion; 190 uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ 191 uint8_t EraseSuspend; 192 uint8_t BlkProt; 193 uint8_t TmpBlkUnprotect; 194 uint8_t BlkProtUnprot; 195 uint8_t SimultaneousOps; 196 uint8_t BurstMode; 197 uint8_t PageMode; 198 uint8_t VppMin; 199 uint8_t VppMax; 200 uint8_t TopBottom; 201} __attribute__((packed)); 202 203struct cfi_pri_query { 204 uint8_t NumFields; 205 uint32_t ProtField[1]; /* Not host ordered */ 206} __attribute__((packed)); 207 208struct cfi_bri_query { 209 uint8_t PageModeReadCap; 210 uint8_t NumFields; 211 uint32_t ConfField[1]; /* Not host ordered */ 212} __attribute__((packed)); 213 214#define P_ID_NONE 0x0000 215#define P_ID_INTEL_EXT 0x0001 216#define P_ID_AMD_STD 0x0002 217#define P_ID_INTEL_STD 0x0003 218#define P_ID_AMD_EXT 0x0004 219#define P_ID_WINBOND 0x0006 220#define P_ID_ST_ADV 0x0020 221#define P_ID_MITSUBISHI_STD 0x0100 222#define P_ID_MITSUBISHI_EXT 0x0101 223#define P_ID_SST_PAGE 0x0102 224#define P_ID_INTEL_PERFORMANCE 0x0200 225#define P_ID_INTEL_DATA 0x0210 226#define P_ID_RESERVED 0xffff 227 228 229#define CFI_MODE_CFI 1 230#define CFI_MODE_JEDEC 0 231 232struct cfi_private { 233 uint16_t cmdset; 234 void *cmdset_priv; 235 int interleave; 236 int device_type; 237 int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ 238 int addr_unlock1; 239 int addr_unlock2; 240 struct mtd_info *(*cmdset_setup)(struct map_info *); 241 struct cfi_ident *cfiq; /* For now only one. We insist that all devs 242 must be of the same type. */ 243 int mfr, id; 244 int numchips; 245 unsigned long chipshift; /* Because they're of the same type */ 246 const char *im_name; /* inter_module name for cmdset_setup */ 247 struct flchip chips[0]; /* per-chip data structure for each chip */ 248}; 249 250/* 251 * Returns the command address according to the given geometry. 252 */ 253static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) 254{ 255 return (cmd_ofs * type) * interleave; 256} 257 258/* 259 * Transforms the CFI command for the given geometry (bus width & interleave). 260 * It looks too long to be inline, but in the common case it should almost all 261 * get optimised away. 262 */ 263static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) 264{ 265 map_word val = { {0} }; 266 int wordwidth, words_per_bus, chip_mode, chips_per_word; 267 unsigned long onecmd; 268 int i; 269 270 /* We do it this way to give the compiler a fighting chance 271 of optimising away all the crap for 'bankwidth' larger than 272 an unsigned long, in the common case where that support is 273 disabled */ 274 if (map_bankwidth_is_large(map)) { 275 wordwidth = sizeof(unsigned long); 276 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 277 } else { 278 wordwidth = map_bankwidth(map); 279 words_per_bus = 1; 280 } 281 282 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 283 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 284 285 /* First, determine what the bit-pattern should be for a single 286 device, according to chip mode and endianness... */ 287 switch (chip_mode) { 288 default: BUG(); 289 case 1: 290 onecmd = cmd; 291 break; 292 case 2: 293 onecmd = cpu_to_cfi16(cmd); 294 break; 295 case 4: 296 onecmd = cpu_to_cfi32(cmd); 297 break; 298 } 299 300 /* Now replicate it across the size of an unsigned long, or 301 just to the bus width as appropriate */ 302 switch (chips_per_word) { 303 default: BUG(); 304#if BITS_PER_LONG >= 64 305 case 8: 306 onecmd |= (onecmd << (chip_mode * 32)); 307#endif 308 case 4: 309 onecmd |= (onecmd << (chip_mode * 16)); 310 case 2: 311 onecmd |= (onecmd << (chip_mode * 8)); 312 case 1: 313 ; 314 } 315 316 /* And finally, for the multi-word case, replicate it 317 in all words in the structure */ 318 for (i=0; i < words_per_bus; i++) { 319 val.x[i] = onecmd; 320 } 321 322 return val; 323} 324#define CMD(x) cfi_build_cmd((x), map, cfi) 325 326 327static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, 328 struct cfi_private *cfi) 329{ 330 int wordwidth, words_per_bus, chip_mode, chips_per_word; 331 unsigned long onestat, res = 0; 332 int i; 333 334 /* We do it this way to give the compiler a fighting chance 335 of optimising away all the crap for 'bankwidth' larger than 336 an unsigned long, in the common case where that support is 337 disabled */ 338 if (map_bankwidth_is_large(map)) { 339 wordwidth = sizeof(unsigned long); 340 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 341 } else { 342 wordwidth = map_bankwidth(map); 343 words_per_bus = 1; 344 } 345 346 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 347 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 348 349 onestat = val.x[0]; 350 /* Or all status words together */ 351 for (i=1; i < words_per_bus; i++) { 352 onestat |= val.x[i]; 353 } 354 355 res = onestat; 356 switch(chips_per_word) { 357 default: BUG(); 358#if BITS_PER_LONG >= 64 359 case 8: 360 res |= (onestat >> (chip_mode * 32)); 361#endif 362 case 4: 363 res |= (onestat >> (chip_mode * 16)); 364 case 2: 365 res |= (onestat >> (chip_mode * 8)); 366 case 1: 367 ; 368 } 369 370 /* Last, determine what the bit-pattern should be for a single 371 device, according to chip mode and endianness... */ 372 switch (chip_mode) { 373 case 1: 374 break; 375 case 2: 376 res = cfi16_to_cpu(res); 377 break; 378 case 4: 379 res = cfi32_to_cpu(res); 380 break; 381 default: BUG(); 382 } 383 return res; 384} 385 386#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) 387 388 389/* 390 * Sends a CFI command to a bank of flash for the given geometry. 391 * 392 * Returns the offset in flash where the command was written. 393 * If prev_val is non-null, it will be set to the value at the command address, 394 * before the command was written. 395 */ 396static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, 397 struct map_info *map, struct cfi_private *cfi, 398 int type, map_word *prev_val) 399{ 400 map_word val; 401 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); 402 403 val = cfi_build_cmd(cmd, map, cfi); 404 405 if (prev_val) 406 *prev_val = map_read(map, addr); 407 408 map_write(map, val, addr); 409 410 return addr - base; 411} 412 413static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) 414{ 415 map_word val = map_read(map, addr); 416 417 if (map_bankwidth_is_1(map)) { 418 return val.x[0]; 419 } else if (map_bankwidth_is_2(map)) { 420 return cfi16_to_cpu(val.x[0]); 421 } else { 422 /* No point in a 64-bit byteswap since that would just be 423 swapping the responses from different chips, and we are 424 only interested in one chip (a representative sample) */ 425 return cfi32_to_cpu(val.x[0]); 426 } 427} 428 429static inline void cfi_udelay(int us) 430{ 431 if (us >= 1000) { 432 msleep((us+999)/1000); 433 } else { 434 udelay(us); 435 cond_resched(); 436 } 437} 438 439struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, 440 const char* name); 441struct cfi_fixup { 442 uint16_t mfr; 443 uint16_t id; 444 void (*fixup)(struct mtd_info *mtd, void* param); 445 void* param; 446}; 447 448#define CFI_MFR_ANY 0xffff 449#define CFI_ID_ANY 0xffff 450 451#define CFI_MFR_AMD 0x0001 452#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 453 454void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 455 456typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, 457 unsigned long adr, int len, void *thunk); 458 459int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, 460 loff_t ofs, size_t len, void *thunk); 461 462 463#endif /* __MTD_CFI_H__ */