at master 13 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. 4 */ 5 6/* Overhauled routines for dealing with different mmap regions of flash */ 7 8#ifndef __LINUX_MTD_MAP_H__ 9#define __LINUX_MTD_MAP_H__ 10 11#include <linux/bug.h> 12#include <linux/io.h> 13#include <linux/ioport.h> 14#include <linux/string.h> 15#include <linux/types.h> 16#include <linux/unaligned.h> 17#include <asm/barrier.h> 18 19struct device_node; 20struct module; 21 22#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 23#define map_bankwidth(map) 1 24#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) 25#define map_bankwidth_is_large(map) (0) 26#define map_words(map) (1) 27#define MAX_MAP_BANKWIDTH 1 28#else 29#define map_bankwidth_is_1(map) (0) 30#endif 31 32#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 33# ifdef map_bankwidth 34# undef map_bankwidth 35# define map_bankwidth(map) ((map)->bankwidth) 36# else 37# define map_bankwidth(map) 2 38# define map_bankwidth_is_large(map) (0) 39# define map_words(map) (1) 40# endif 41#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) 42#undef MAX_MAP_BANKWIDTH 43#define MAX_MAP_BANKWIDTH 2 44#else 45#define map_bankwidth_is_2(map) (0) 46#endif 47 48#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 49# ifdef map_bankwidth 50# undef map_bankwidth 51# define map_bankwidth(map) ((map)->bankwidth) 52# else 53# define map_bankwidth(map) 4 54# define map_bankwidth_is_large(map) (0) 55# define map_words(map) (1) 56# endif 57#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) 58#undef MAX_MAP_BANKWIDTH 59#define MAX_MAP_BANKWIDTH 4 60#else 61#define map_bankwidth_is_4(map) (0) 62#endif 63 64/* ensure we never evaluate anything shorted than an unsigned long 65 * to zero, and ensure we'll never miss the end of an comparison (bjd) */ 66 67#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long)) 68 69#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 70# ifdef map_bankwidth 71# undef map_bankwidth 72# define map_bankwidth(map) ((map)->bankwidth) 73# if BITS_PER_LONG < 64 74# undef map_bankwidth_is_large 75# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 76# undef map_words 77# define map_words(map) map_calc_words(map) 78# endif 79# else 80# define map_bankwidth(map) 8 81# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) 82# define map_words(map) map_calc_words(map) 83# endif 84#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) 85#undef MAX_MAP_BANKWIDTH 86#define MAX_MAP_BANKWIDTH 8 87#else 88#define map_bankwidth_is_8(map) (0) 89#endif 90 91#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 92# ifdef map_bankwidth 93# undef map_bankwidth 94# define map_bankwidth(map) ((map)->bankwidth) 95# undef map_bankwidth_is_large 96# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 97# undef map_words 98# define map_words(map) map_calc_words(map) 99# else 100# define map_bankwidth(map) 16 101# define map_bankwidth_is_large(map) (1) 102# define map_words(map) map_calc_words(map) 103# endif 104#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) 105#undef MAX_MAP_BANKWIDTH 106#define MAX_MAP_BANKWIDTH 16 107#else 108#define map_bankwidth_is_16(map) (0) 109#endif 110 111#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 112/* always use indirect access for 256-bit to preserve kernel stack */ 113# undef map_bankwidth 114# define map_bankwidth(map) ((map)->bankwidth) 115# undef map_bankwidth_is_large 116# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 117# undef map_words 118# define map_words(map) map_calc_words(map) 119#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) 120#undef MAX_MAP_BANKWIDTH 121#define MAX_MAP_BANKWIDTH 32 122#else 123#define map_bankwidth_is_32(map) (0) 124#endif 125 126#ifndef map_bankwidth 127#ifdef CONFIG_MTD 128#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" 129#endif 130static inline int map_bankwidth(void *map) 131{ 132 BUG(); 133 return 0; 134} 135#define map_bankwidth_is_large(map) (0) 136#define map_words(map) (0) 137#define MAX_MAP_BANKWIDTH 1 138#endif 139 140static inline int map_bankwidth_supported(int w) 141{ 142 switch (w) { 143#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 144 case 1: 145#endif 146#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 147 case 2: 148#endif 149#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 150 case 4: 151#endif 152#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 153 case 8: 154#endif 155#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 156 case 16: 157#endif 158#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 159 case 32: 160#endif 161 return 1; 162 163 default: 164 return 0; 165 } 166} 167 168#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG) 169 170typedef union { 171 unsigned long x[MAX_MAP_LONGS]; 172} map_word; 173 174/* The map stuff is very simple. You fill in your struct map_info with 175 a handful of routines for accessing the device, making sure they handle 176 paging etc. correctly if your device needs it. Then you pass it off 177 to a chip probe routine -- either JEDEC or CFI probe or both -- via 178 do_map_probe(). If a chip is recognised, the probe code will invoke the 179 appropriate chip driver (if present) and return a struct mtd_info. 180 At which point, you fill in the mtd->module with your own module 181 address, and register it with the MTD core code. Or you could partition 182 it and register the partitions instead, or keep it for your own private 183 use; whatever. 184 185 The mtd->priv field will point to the struct map_info, and any further 186 private data required by the chip driver is linked from the 187 mtd->priv->fldrv_priv field. This allows the map driver to get at 188 the destructor function map->fldrv_destroy() when it's tired 189 of living. 190*/ 191 192struct mtd_chip_driver; 193struct map_info { 194 const char *name; 195 unsigned long size; 196 resource_size_t phys; 197#define NO_XIP (-1UL) 198 199 void __iomem *virt; 200 void *cached; 201 202 int swap; /* this mapping's byte-swapping requirement */ 203 int bankwidth; /* in octets. This isn't necessarily the width 204 of actual bus cycles -- it's the repeat interval 205 in bytes, before you are talking to the first chip again. 206 */ 207 208#ifdef CONFIG_MTD_COMPLEX_MAPPINGS 209 map_word (*read)(struct map_info *, unsigned long); 210 void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t); 211 212 void (*write)(struct map_info *, const map_word, unsigned long); 213 void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t); 214 215 /* We can perhaps put in 'point' and 'unpoint' methods, if we really 216 want to enable XIP for non-linear mappings. Not yet though. */ 217#endif 218 /* It's possible for the map driver to use cached memory in its 219 copy_from implementation (and _only_ with copy_from). However, 220 when the chip driver knows some flash area has changed contents, 221 it will signal it to the map driver through this routine to let 222 the map driver invalidate the corresponding cache as needed. 223 If there is no cache to care about this can be set to NULL. */ 224 void (*inval_cache)(struct map_info *, unsigned long, ssize_t); 225 226 /* This will be called with 1 as parameter when the first map user 227 * needs VPP, and called with 0 when the last user exits. The map 228 * core maintains a reference counter, and assumes that VPP is a 229 * global resource applying to all mapped flash chips on the system. 230 */ 231 void (*set_vpp)(struct map_info *, int); 232 233 unsigned long pfow_base; 234 unsigned long map_priv_1; 235 unsigned long map_priv_2; 236 struct device_node *device_node; 237 void *fldrv_priv; 238 struct mtd_chip_driver *fldrv; 239}; 240 241struct mtd_chip_driver { 242 struct mtd_info *(*probe)(struct map_info *map); 243 void (*destroy)(struct mtd_info *); 244 struct module *module; 245 char *name; 246 struct list_head list; 247}; 248 249void register_mtd_chip_driver(struct mtd_chip_driver *); 250void unregister_mtd_chip_driver(struct mtd_chip_driver *); 251 252struct mtd_info *do_map_probe(const char *name, struct map_info *map); 253void map_destroy(struct mtd_info *mtd); 254 255#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0) 256#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0) 257 258#define INVALIDATE_CACHED_RANGE(map, from, size) \ 259 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) 260 261#define map_word_equal(map, val1, val2) \ 262({ \ 263 int i, ret = 1; \ 264 for (i = 0; i < map_words(map); i++) \ 265 if ((val1).x[i] != (val2).x[i]) { \ 266 ret = 0; \ 267 break; \ 268 } \ 269 ret; \ 270}) 271 272#define map_word_and(map, val1, val2) \ 273({ \ 274 map_word r; \ 275 int i; \ 276 for (i = 0; i < map_words(map); i++) \ 277 r.x[i] = (val1).x[i] & (val2).x[i]; \ 278 r; \ 279}) 280 281#define map_word_clr(map, val1, val2) \ 282({ \ 283 map_word r; \ 284 int i; \ 285 for (i = 0; i < map_words(map); i++) \ 286 r.x[i] = (val1).x[i] & ~(val2).x[i]; \ 287 r; \ 288}) 289 290#define map_word_or(map, val1, val2) \ 291({ \ 292 map_word r; \ 293 int i; \ 294 for (i = 0; i < map_words(map); i++) \ 295 r.x[i] = (val1).x[i] | (val2).x[i]; \ 296 r; \ 297}) 298 299#define map_word_andequal(map, val1, val2, val3) \ 300({ \ 301 int i, ret = 1; \ 302 for (i = 0; i < map_words(map); i++) { \ 303 if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ 304 ret = 0; \ 305 break; \ 306 } \ 307 } \ 308 ret; \ 309}) 310 311#define map_word_bitsset(map, val1, val2) \ 312({ \ 313 int i, ret = 0; \ 314 for (i = 0; i < map_words(map); i++) { \ 315 if ((val1).x[i] & (val2).x[i]) { \ 316 ret = 1; \ 317 break; \ 318 } \ 319 } \ 320 ret; \ 321}) 322 323static inline map_word map_word_load(struct map_info *map, const void *ptr) 324{ 325 map_word r; 326 327 if (map_bankwidth_is_1(map)) 328 r.x[0] = *(unsigned char *)ptr; 329 else if (map_bankwidth_is_2(map)) 330 r.x[0] = get_unaligned((uint16_t *)ptr); 331 else if (map_bankwidth_is_4(map)) 332 r.x[0] = get_unaligned((uint32_t *)ptr); 333#if BITS_PER_LONG >= 64 334 else if (map_bankwidth_is_8(map)) 335 r.x[0] = get_unaligned((uint64_t *)ptr); 336#endif 337 else if (map_bankwidth_is_large(map)) 338 memcpy(r.x, ptr, map->bankwidth); 339 else 340 BUG(); 341 342 return r; 343} 344 345static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) 346{ 347 int i; 348 349 if (map_bankwidth_is_large(map)) { 350 char *dest = (char *)&orig; 351 352 memcpy(dest+start, buf, len); 353 } else { 354 for (i = start; i < start+len; i++) { 355 int bitpos; 356 357#ifdef __LITTLE_ENDIAN 358 bitpos = i * 8; 359#else /* __BIG_ENDIAN */ 360 bitpos = (map_bankwidth(map) - 1 - i) * 8; 361#endif 362 orig.x[0] &= ~(0xff << bitpos); 363 orig.x[0] |= (unsigned long)buf[i-start] << bitpos; 364 } 365 } 366 return orig; 367} 368 369#if BITS_PER_LONG < 64 370#define MAP_FF_LIMIT 4 371#else 372#define MAP_FF_LIMIT 8 373#endif 374 375static inline map_word map_word_ff(struct map_info *map) 376{ 377 map_word r; 378 int i; 379 380 if (map_bankwidth(map) < MAP_FF_LIMIT) { 381 int bw = 8 * map_bankwidth(map); 382 383 r.x[0] = (1UL << bw) - 1; 384 } else { 385 for (i = 0; i < map_words(map); i++) 386 r.x[i] = ~0UL; 387 } 388 return r; 389} 390 391static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) 392{ 393 map_word r; 394 395 if (map_bankwidth_is_1(map)) 396 r.x[0] = __raw_readb(map->virt + ofs); 397 else if (map_bankwidth_is_2(map)) 398 r.x[0] = __raw_readw(map->virt + ofs); 399 else if (map_bankwidth_is_4(map)) 400 r.x[0] = __raw_readl(map->virt + ofs); 401#if BITS_PER_LONG >= 64 402 else if (map_bankwidth_is_8(map)) 403 r.x[0] = __raw_readq(map->virt + ofs); 404#endif 405 else if (map_bankwidth_is_large(map)) 406 memcpy_fromio(r.x, map->virt + ofs, map->bankwidth); 407 else 408 BUG(); 409 410 return r; 411} 412 413static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) 414{ 415 if (map_bankwidth_is_1(map)) 416 __raw_writeb(datum.x[0], map->virt + ofs); 417 else if (map_bankwidth_is_2(map)) 418 __raw_writew(datum.x[0], map->virt + ofs); 419 else if (map_bankwidth_is_4(map)) 420 __raw_writel(datum.x[0], map->virt + ofs); 421#if BITS_PER_LONG >= 64 422 else if (map_bankwidth_is_8(map)) 423 __raw_writeq(datum.x[0], map->virt + ofs); 424#endif 425 else if (map_bankwidth_is_large(map)) 426 memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); 427 else 428 BUG(); 429 mb(); 430} 431 432static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 433{ 434 if (map->cached) 435 memcpy(to, (char *)map->cached + from, len); 436 else 437 memcpy_fromio(to, map->virt + from, len); 438} 439 440static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 441{ 442 memcpy_toio(map->virt + to, from, len); 443} 444 445#ifdef CONFIG_MTD_COMPLEX_MAPPINGS 446#define map_read(map, ofs) (map)->read(map, ofs) 447#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) 448#define map_write(map, datum, ofs) (map)->write(map, datum, ofs) 449#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) 450 451extern void simple_map_init(struct map_info *); 452#define map_is_linear(map) (map->phys != NO_XIP) 453 454#else 455#define map_read(map, ofs) inline_map_read(map, ofs) 456#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) 457#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) 458#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) 459 460 461#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) 462#define map_is_linear(map) ({ (void)(map); 1; }) 463 464#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */ 465 466#endif /* __LINUX_MTD_MAP_H__ */