Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MTD] update internal API to support 64-bit device size

MTD internal API presently uses 32-bit values to represent
device size. This patch updates them to 64-bits but leaves
the external API unchanged. Extending the external API
is a separate issue for several reasons. First, no one
needs it at the moment. Secondly, whether the implementation
is done with IOCTLs, sysfs or both is still debated. Thirdly
external API changes require the internal API to be accepted
first.

Note that although the MTD API will be able to support 64-bit
device sizes, existing drivers do not and are not required
to do so, although NAND base has been updated.

In general, changing from 32-bit to 64-bit values cause little
or no changes to the majority of the code with the following
exceptions:
- printk message formats
- division and modulus of 64-bit values
- NAND base support
- 32-bit local variables used by mtdpart and mtdconcat
- naughtily assuming one structure maps to another
in MEMERASE ioctl

Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>

authored by

Adrian Hunter and committed by
David Woodhouse
69423d99 8a4c2495

+216 -138
+6 -6
drivers/mtd/chips/cfi_cmdset_0001.c
··· 58 58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); 59 59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 60 60 static void cfi_intelext_sync (struct mtd_info *); 61 - static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 62 - static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 61 + static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 62 + static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 63 63 #ifdef CONFIG_MTD_OTP 64 64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 65 65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); ··· 558 558 } 559 559 560 560 for (i=0; i<mtd->numeraseregions;i++){ 561 - printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n", 562 - i,mtd->eraseregions[i].offset, 561 + printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n", 562 + i,(unsigned long long)mtd->eraseregions[i].offset, 563 563 mtd->eraseregions[i].erasesize, 564 564 mtd->eraseregions[i].numblocks); 565 565 } ··· 2058 2058 return ret; 2059 2059 } 2060 2060 2061 - static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2061 + static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2062 2062 { 2063 2063 int ret; 2064 2064 ··· 2082 2082 return ret; 2083 2083 } 2084 2084 2085 - static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2085 + static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2086 2086 { 2087 2087 int ret; 2088 2088
+4 -4
drivers/mtd/chips/cfi_cmdset_0002.c
··· 71 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 72 #include "fwh_lock.h" 73 73 74 - static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 75 - static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 74 + static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 75 + static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76 76 77 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 78 .probe = NULL, /* Not usable directly */ ··· 1774 1774 return ret; 1775 1775 } 1776 1776 1777 - static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1777 + static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1778 1778 { 1779 1779 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1780 1780 } 1781 1781 1782 - static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1782 + static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1783 1783 { 1784 1784 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1785 1785 }
+7 -7
drivers/mtd/chips/cfi_cmdset_0020.c
··· 42 42 unsigned long count, loff_t to, size_t *retlen); 43 43 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); 44 44 static void cfi_staa_sync (struct mtd_info *); 45 - static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 46 - static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 45 + static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 46 + static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 47 47 static int cfi_staa_suspend (struct mtd_info *); 48 48 static void cfi_staa_resume (struct mtd_info *); 49 49 ··· 221 221 } 222 222 223 223 for (i=0; i<mtd->numeraseregions;i++){ 224 - printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 225 - i,mtd->eraseregions[i].offset, 224 + printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n", 225 + i, (unsigned long long)mtd->eraseregions[i].offset, 226 226 mtd->eraseregions[i].erasesize, 227 227 mtd->eraseregions[i].numblocks); 228 228 } ··· 964 964 adr += regions[i].erasesize; 965 965 len -= regions[i].erasesize; 966 966 967 - if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) 967 + if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) 968 968 i++; 969 969 970 970 if (adr >> cfi->chipshift) { ··· 1135 1135 spin_unlock_bh(chip->mutex); 1136 1136 return 0; 1137 1137 } 1138 - static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1138 + static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1139 1139 { 1140 1140 struct map_info *map = mtd->priv; 1141 1141 struct cfi_private *cfi = map->fldrv_priv; ··· 1284 1284 spin_unlock_bh(chip->mutex); 1285 1285 return 0; 1286 1286 } 1287 - static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1287 + static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1288 1288 { 1289 1289 struct map_info *map = mtd->priv; 1290 1290 struct cfi_private *cfi = map->fldrv_priv;
+2 -2
drivers/mtd/chips/fwh_lock.h
··· 77 77 } 78 78 79 79 80 - static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 80 + static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) 81 81 { 82 82 int ret; 83 83 ··· 88 88 } 89 89 90 90 91 - static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 91 + static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) 92 92 { 93 93 int ret; 94 94
+1 -1
drivers/mtd/inftlcore.c
··· 50 50 struct INFTLrecord *inftl; 51 51 unsigned long temp; 52 52 53 - if (mtd->type != MTD_NANDFLASH) 53 + if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) 54 54 return; 55 55 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 56 56 if (memcmp(mtd->name, "DiskOnChip", 10))
+2 -2
drivers/mtd/inftlmount.c
··· 63 63 * otherwise. 64 64 */ 65 65 inftl->EraseSize = inftl->mbd.mtd->erasesize; 66 - inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 66 + inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; 67 67 68 68 inftl->MediaUnit = BLOCK_NIL; 69 69 ··· 187 187 mh->BlockMultiplierBits); 188 188 inftl->EraseSize = inftl->mbd.mtd->erasesize << 189 189 mh->BlockMultiplierBits; 190 - inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 190 + inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; 191 191 block >>= mh->BlockMultiplierBits; 192 192 } 193 193
+2 -2
drivers/mtd/maps/amd76xrom.c
··· 232 232 /* Trim the size if we are larger than the map */ 233 233 if (map->mtd->size > map->map.size) { 234 234 printk(KERN_WARNING MOD_NAME 235 - " rom(%u) larger than window(%lu). fixing...\n", 236 - map->mtd->size, map->map.size); 235 + " rom(%llu) larger than window(%lu). fixing...\n", 236 + (unsigned long long)map->mtd->size, map->map.size); 237 237 map->mtd->size = map->map.size; 238 238 } 239 239 if (window->rsrc.parent) {
+2 -2
drivers/mtd/maps/ck804xrom.c
··· 263 263 /* Trim the size if we are larger than the map */ 264 264 if (map->mtd->size > map->map.size) { 265 265 printk(KERN_WARNING MOD_NAME 266 - " rom(%u) larger than window(%lu). fixing...\n", 267 - map->mtd->size, map->map.size); 266 + " rom(%llu) larger than window(%lu). fixing...\n", 267 + (unsigned long long)map->mtd->size, map->map.size); 268 268 map->mtd->size = map->map.size; 269 269 } 270 270 if (window->rsrc.parent) {
+2 -2
drivers/mtd/maps/esb2rom.c
··· 324 324 /* Trim the size if we are larger than the map */ 325 325 if (map->mtd->size > map->map.size) { 326 326 printk(KERN_WARNING MOD_NAME 327 - " rom(%u) larger than window(%lu). fixing...\n", 328 - map->mtd->size, map->map.size); 327 + " rom(%llu) larger than window(%lu). fixing...\n", 328 + (unsigned long long)map->mtd->size, map->map.size); 329 329 map->mtd->size = map->map.size; 330 330 } 331 331 if (window->rsrc.parent) {
+2 -2
drivers/mtd/maps/ichxrom.c
··· 258 258 /* Trim the size if we are larger than the map */ 259 259 if (map->mtd->size > map->map.size) { 260 260 printk(KERN_WARNING MOD_NAME 261 - " rom(%u) larger than window(%lu). fixing...\n", 262 - map->mtd->size, map->map.size); 261 + " rom(%llu) larger than window(%lu). fixing...\n", 262 + (unsigned long long)map->mtd->size, map->map.size); 263 263 map->mtd->size = map->map.size; 264 264 } 265 265 if (window->rsrc.parent) {
+1 -1
drivers/mtd/maps/nettel.c
··· 226 226 227 227 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { 228 228 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", 229 - amd_mtd->size>>10); 229 + (int)(amd_mtd->size>>10)); 230 230 231 231 amd_mtd->owner = THIS_MODULE; 232 232
+5 -3
drivers/mtd/maps/scb2_flash.c
··· 118 118 struct mtd_erase_region_info *region = &mtd->eraseregions[i]; 119 119 120 120 if (region->numblocks * region->erasesize > mtd->size) { 121 - region->numblocks = (mtd->size / region->erasesize); 121 + region->numblocks = ((unsigned long)mtd->size / 122 + region->erasesize); 122 123 done = 1; 123 124 } else { 124 125 region->numblocks = 0; ··· 188 187 return -ENODEV; 189 188 } 190 189 191 - printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n", 192 - scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size); 190 + printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n", 191 + (unsigned long long)scb2_mtd->size, 192 + (unsigned long long)(SCB2_WINDOW - scb2_mtd->size)); 193 193 194 194 add_mtd_device(scb2_mtd); 195 195
+5 -1
drivers/mtd/mtdchar.c
··· 450 450 if (!erase) 451 451 ret = -ENOMEM; 452 452 else { 453 + struct erase_info_user einfo; 454 + 453 455 wait_queue_head_t waitq; 454 456 DECLARE_WAITQUEUE(wait, current); 455 457 456 458 init_waitqueue_head(&waitq); 457 459 458 - if (copy_from_user(&erase->addr, argp, 460 + if (copy_from_user(&einfo, argp, 459 461 sizeof(struct erase_info_user))) { 460 462 kfree(erase); 461 463 return -EFAULT; 462 464 } 465 + erase->addr = einfo.start; 466 + erase->len = einfo.length; 463 467 erase->mtd = mtd; 464 468 erase->callback = mtdchar_erase_callback; 465 469 erase->priv = (unsigned long)&waitq;
+19 -14
drivers/mtd/mtdconcat.c
··· 197 197 continue; 198 198 } 199 199 200 - size = min(total_len, (size_t)(subdev->size - to)); 200 + size = min_t(uint64_t, total_len, subdev->size - to); 201 201 wsize = size; /* store for future use */ 202 202 203 203 entry_high = entry_low; ··· 385 385 struct mtd_concat *concat = CONCAT(mtd); 386 386 struct mtd_info *subdev; 387 387 int i, err; 388 - u_int32_t length, offset = 0; 388 + uint64_t length, offset = 0; 389 389 struct erase_info *erase; 390 390 391 391 if (!(mtd->flags & MTD_WRITEABLE)) ··· 518 518 return 0; 519 519 } 520 520 521 - static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 521 + static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 522 522 { 523 523 struct mtd_concat *concat = CONCAT(mtd); 524 524 int i, err = -EINVAL; ··· 528 528 529 529 for (i = 0; i < concat->num_subdev; i++) { 530 530 struct mtd_info *subdev = concat->subdev[i]; 531 - size_t size; 531 + uint64_t size; 532 532 533 533 if (ofs >= subdev->size) { 534 534 size = 0; ··· 556 556 return err; 557 557 } 558 558 559 - static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 559 + static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 560 560 { 561 561 struct mtd_concat *concat = CONCAT(mtd); 562 562 int i, err = 0; ··· 566 566 567 567 for (i = 0; i < concat->num_subdev; i++) { 568 568 struct mtd_info *subdev = concat->subdev[i]; 569 - size_t size; 569 + uint64_t size; 570 570 571 571 if (ofs >= subdev->size) { 572 572 size = 0; ··· 842 842 concat->mtd.erasesize = curr_erasesize; 843 843 concat->mtd.numeraseregions = 0; 844 844 } else { 845 + uint64_t tmp64; 846 + 845 847 /* 846 848 * erase block size varies across the subdevices: allocate 847 849 * space to store the data describing the variable erase regions 848 850 */ 849 851 struct mtd_erase_region_info *erase_region_p; 850 - u_int32_t begin, position; 852 + uint64_t begin, position; 851 853 852 854 concat->mtd.erasesize = max_erasesize; 853 855 concat->mtd.numeraseregions = num_erase_region; ··· 881 879 erase_region_p->offset = begin; 882 880 erase_region_p->erasesize = 883 881 curr_erasesize; 884 - erase_region_p->numblocks = 885 - (position - begin) / curr_erasesize; 882 + tmp64 = position - begin; 883 + do_div(tmp64, curr_erasesize); 884 + erase_region_p->numblocks = tmp64; 886 885 begin = position; 887 886 888 887 curr_erasesize = subdev[i]->erasesize; ··· 900 897 erase_region_p->offset = begin; 901 898 erase_region_p->erasesize = 902 899 curr_erasesize; 903 - erase_region_p->numblocks = 904 - (position - 905 - begin) / curr_erasesize; 900 + tmp64 = position - begin; 901 + do_div(tmp64, curr_erasesize); 902 + erase_region_p->numblocks = tmp64; 906 903 begin = position; 907 904 908 905 curr_erasesize = ··· 912 909 } 913 910 position += 914 911 subdev[i]->eraseregions[j]. 915 - numblocks * curr_erasesize; 912 + numblocks * (uint64_t)curr_erasesize; 916 913 } 917 914 } 918 915 } 919 916 /* Now write the final entry */ 920 917 erase_region_p->offset = begin; 921 918 erase_region_p->erasesize = curr_erasesize; 922 - erase_region_p->numblocks = (position - begin) / curr_erasesize; 919 + tmp64 = position - begin; 920 + do_div(tmp64, curr_erasesize); 921 + erase_region_p->numblocks = tmp64; 923 922 } 924 923 925 924 return &concat->mtd;
+15 -1
drivers/mtd/mtdcore.c
··· 57 57 mtd->index = i; 58 58 mtd->usecount = 0; 59 59 60 + if (is_power_of_2(mtd->erasesize)) 61 + mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 62 + else 63 + mtd->erasesize_shift = 0; 64 + 65 + if (is_power_of_2(mtd->writesize)) 66 + mtd->writesize_shift = ffs(mtd->writesize) - 1; 67 + else 68 + mtd->writesize_shift = 0; 69 + 70 + mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 71 + mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 72 + 60 73 /* Some chips always power up locked. Unlock them now */ 61 74 if ((mtd->flags & MTD_WRITEABLE) 62 75 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { ··· 357 344 if (!this) 358 345 return 0; 359 346 360 - return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size, 347 + return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i, 348 + (unsigned long long)this->size, 361 349 this->erasesize, this->name); 362 350 } 363 351
+6 -3
drivers/mtd/mtdoops.c
··· 80 80 if (ret) { 81 81 set_current_state(TASK_RUNNING); 82 82 remove_wait_queue(&wait_q, &wait); 83 - printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] " 83 + printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " 84 84 "on \"%s\" failed\n", 85 - erase.addr, erase.len, mtd->name); 85 + (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); 86 86 return ret; 87 87 } 88 88 ··· 289 289 } 290 290 291 291 cxt->mtd = mtd; 292 - cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 292 + if (mtd->size > INT_MAX) 293 + cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; 294 + else 295 + cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; 293 296 294 297 find_next_position(cxt); 295 298
+17 -17
drivers/mtd/mtdpart.c
··· 26 26 struct mtd_part { 27 27 struct mtd_info mtd; 28 28 struct mtd_info *master; 29 - u_int32_t offset; 29 + uint64_t offset; 30 30 int index; 31 31 struct list_head list; 32 32 int registered; ··· 235 235 } 236 236 EXPORT_SYMBOL_GPL(mtd_erase_callback); 237 237 238 - static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 238 + static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 239 239 { 240 240 struct mtd_part *part = PART(mtd); 241 241 if ((len + ofs) > mtd->size) ··· 243 243 return part->master->lock(part->master, ofs + part->offset, len); 244 244 } 245 245 246 - static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 246 + static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 247 247 { 248 248 struct mtd_part *part = PART(mtd); 249 249 if ((len + ofs) > mtd->size) ··· 317 317 318 318 static struct mtd_part *add_one_partition(struct mtd_info *master, 319 319 const struct mtd_partition *part, int partno, 320 - u_int32_t cur_offset) 320 + uint64_t cur_offset) 321 321 { 322 322 struct mtd_part *slave; 323 323 ··· 395 395 slave->offset = cur_offset; 396 396 if (slave->offset == MTDPART_OFS_NXTBLK) { 397 397 slave->offset = cur_offset; 398 - if ((cur_offset % master->erasesize) != 0) { 398 + if (mtd_mod_by_eb(cur_offset, master) != 0) { 399 399 /* Round up to next erasesize */ 400 - slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; 400 + slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 401 401 printk(KERN_NOTICE "Moving partition %d: " 402 - "0x%08x -> 0x%08x\n", partno, 403 - cur_offset, slave->offset); 402 + "0x%012llx -> 0x%012llx\n", partno, 403 + (unsigned long long)cur_offset, (unsigned long long)slave->offset); 404 404 } 405 405 } 406 406 if (slave->mtd.size == MTDPART_SIZ_FULL) 407 407 slave->mtd.size = master->size - slave->offset; 408 408 409 - printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 410 - slave->offset + slave->mtd.size, slave->mtd.name); 409 + printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 410 + (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 411 411 412 412 /* let's do some sanity checks */ 413 413 if (slave->offset >= master->size) { ··· 420 420 } 421 421 if (slave->offset + slave->mtd.size > master->size) { 422 422 slave->mtd.size = master->size - slave->offset; 423 - printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", 424 - part->name, master->name, slave->mtd.size); 423 + printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 424 + part->name, master->name, (unsigned long long)slave->mtd.size); 425 425 } 426 426 if (master->numeraseregions > 1) { 427 427 /* Deal with variable erase size stuff */ 428 428 int i, max = master->numeraseregions; 429 - u32 end = slave->offset + slave->mtd.size; 429 + u64 end = slave->offset + slave->mtd.size; 430 430 struct mtd_erase_region_info *regions = master->eraseregions; 431 431 432 432 /* Find the first erase regions which is part of this ··· 449 449 } 450 450 451 451 if ((slave->mtd.flags & MTD_WRITEABLE) && 452 - (slave->offset % slave->mtd.erasesize)) { 452 + mtd_mod_by_eb(slave->offset, &slave->mtd)) { 453 453 /* Doesn't start on a boundary of major erase size */ 454 454 /* FIXME: Let it be writable if it is on a boundary of 455 455 * _minor_ erase size though */ ··· 458 458 part->name); 459 459 } 460 460 if ((slave->mtd.flags & MTD_WRITEABLE) && 461 - (slave->mtd.size % slave->mtd.erasesize)) { 461 + mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { 462 462 slave->mtd.flags &= ~MTD_WRITEABLE; 463 463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 464 464 part->name); ··· 466 466 467 467 slave->mtd.ecclayout = master->ecclayout; 468 468 if (master->block_isbad) { 469 - uint32_t offs = 0; 469 + uint64_t offs = 0; 470 470 471 471 while (offs < slave->mtd.size) { 472 472 if (master->block_isbad(master, ··· 501 501 int nbparts) 502 502 { 503 503 struct mtd_part *slave; 504 - u_int32_t cur_offset = 0; 504 + uint64_t cur_offset = 0; 505 505 int i; 506 506 507 507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+15 -9
drivers/mtd/nand/nand_base.c
··· 2014 2014 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2015 2015 int allowbbt) 2016 2016 { 2017 - int page, len, status, pages_per_block, ret, chipnr; 2017 + int page, status, pages_per_block, ret, chipnr; 2018 2018 struct nand_chip *chip = mtd->priv; 2019 - int rewrite_bbt[NAND_MAX_CHIPS]={0}; 2019 + loff_t rewrite_bbt[NAND_MAX_CHIPS]={0}; 2020 2020 unsigned int bbt_masked_page = 0xffffffff; 2021 + loff_t len; 2021 2022 2022 - DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n", 2023 - (unsigned int)instr->addr, (unsigned int)instr->len); 2023 + DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n", 2024 + (unsigned long long)instr->addr, (unsigned long long)instr->len); 2024 2025 2025 2026 /* Start address must align on block boundary */ 2026 2027 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { ··· 2117 2116 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2118 2117 "Failed erase, page 0x%08x\n", page); 2119 2118 instr->state = MTD_ERASE_FAILED; 2120 - instr->fail_addr = (page << chip->page_shift); 2119 + instr->fail_addr = 2120 + ((loff_t)page << chip->page_shift); 2121 2121 goto erase_exit; 2122 2122 } 2123 2123 ··· 2128 2126 */ 2129 2127 if (bbt_masked_page != 0xffffffff && 2130 2128 (page & BBT_PAGE_MASK) == bbt_masked_page) 2131 - rewrite_bbt[chipnr] = (page << chip->page_shift); 2129 + rewrite_bbt[chipnr] = 2130 + ((loff_t)page << chip->page_shift); 2132 2131 2133 2132 /* Increment page address and decrement length */ 2134 2133 len -= (1 << chip->phys_erase_shift); ··· 2176 2173 continue; 2177 2174 /* update the BBT for chip */ 2178 2175 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " 2179 - "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr], 2176 + "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr], 2180 2177 chip->bbt_td->pages[chipnr]); 2181 2178 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2182 2179 } ··· 2368 2365 if (!mtd->name) 2369 2366 mtd->name = type->name; 2370 2367 2371 - chip->chipsize = type->chipsize << 20; 2368 + chip->chipsize = (uint64_t)type->chipsize << 20; 2372 2369 2373 2370 /* Newer devices have all the information in additional id bytes */ 2374 2371 if (!type->pagesize) { ··· 2426 2423 2427 2424 chip->bbt_erase_shift = chip->phys_erase_shift = 2428 2425 ffs(mtd->erasesize) - 1; 2429 - chip->chip_shift = ffs(chip->chipsize) - 1; 2426 + if (chip->chipsize & 0xffffffff) 2427 + chip->chip_shift = ffs((unsigned)chip->chipsize) - 1; 2428 + else 2429 + chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; 2430 2430 2431 2431 /* Set the bad block position */ 2432 2432 chip->badblockpos = mtd->writesize > 512 ?
+15 -16
drivers/mtd/nand/nand_bbt.c
··· 171 171 if (tmp == msk) 172 172 continue; 173 173 if (reserved_block_code && (tmp == reserved_block_code)) { 174 - printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", 175 - ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 174 + printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", 175 + (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 176 176 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 177 177 mtd->ecc_stats.bbtblocks++; 178 178 continue; 179 179 } 180 180 /* Leave it for now, if its matured we can move this 181 181 * message to MTD_DEBUG_LEVEL0 */ 182 - printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", 183 - ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 182 + printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", 183 + (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 184 184 /* Factory marked bad or worn out ? */ 185 185 if (tmp == 0) 186 186 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); ··· 284 284 285 285 /* Read the primary version, if available */ 286 286 if (td->options & NAND_BBT_VERSION) { 287 - scan_read_raw(mtd, buf, td->pages[0] << this->page_shift, 287 + scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 288 288 mtd->writesize); 289 289 td->version[0] = buf[mtd->writesize + td->veroffs]; 290 290 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", ··· 293 293 294 294 /* Read the mirror version, if available */ 295 295 if (md && (md->options & NAND_BBT_VERSION)) { 296 - scan_read_raw(mtd, buf, md->pages[0] << this->page_shift, 296 + scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 297 297 mtd->writesize); 298 298 md->version[0] = buf[mtd->writesize + md->veroffs]; 299 299 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", ··· 411 411 numblocks = this->chipsize >> (this->bbt_erase_shift - 1); 412 412 startblock = chip * numblocks; 413 413 numblocks += startblock; 414 - from = startblock << (this->bbt_erase_shift - 1); 414 + from = (loff_t)startblock << (this->bbt_erase_shift - 1); 415 415 } 416 416 417 417 for (i = startblock; i < numblocks;) { ··· 428 428 429 429 if (ret) { 430 430 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 431 - printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 432 - i >> 1, (unsigned int)from); 431 + printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", 432 + i >> 1, (unsigned long long)from); 433 433 mtd->ecc_stats.badblocks++; 434 434 } 435 435 ··· 495 495 for (block = 0; block < td->maxblocks; block++) { 496 496 497 497 int actblock = startblock + dir * block; 498 - loff_t offs = actblock << this->bbt_erase_shift; 498 + loff_t offs = (loff_t)actblock << this->bbt_erase_shift; 499 499 500 500 /* Read first page */ 501 501 scan_read_raw(mtd, buf, offs, mtd->writesize); ··· 719 719 720 720 memset(&einfo, 0, sizeof(einfo)); 721 721 einfo.mtd = mtd; 722 - einfo.addr = (unsigned long)to; 722 + einfo.addr = to; 723 723 einfo.len = 1 << this->bbt_erase_shift; 724 724 res = nand_erase_nand(mtd, &einfo, 1); 725 725 if (res < 0) ··· 729 729 if (res < 0) 730 730 goto outerr; 731 731 732 - printk(KERN_DEBUG "Bad block table written to 0x%08x, version " 733 - "0x%02X\n", (unsigned int)to, td->version[chip]); 732 + printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " 733 + "0x%02X\n", (unsigned long long)to, td->version[chip]); 734 734 735 735 /* Mark it as used */ 736 736 td->pages[chip] = page; ··· 910 910 newval = oldval | (0x2 << (block & 0x06)); 911 911 this->bbt[(block >> 3)] = newval; 912 912 if ((oldval != newval) && td->reserved_block_code) 913 - nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1)); 913 + nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1)); 914 914 continue; 915 915 } 916 916 update = 0; ··· 931 931 new ones have been marked, then we need to update the stored 932 932 bbts. This should only happen once. */ 933 933 if (update && td->reserved_block_code) 934 - nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1)); 934 + nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); 935 935 } 936 936 } 937 937 ··· 1027 1027 if (!this->bbt || !td) 1028 1028 return -EINVAL; 1029 1029 1030 - len = mtd->size >> (this->bbt_erase_shift + 2); 1031 1030 /* Allocate a temporary buffer for one eraseblock incl. oob */ 1032 1031 len = (1 << this->bbt_erase_shift); 1033 1032 len += (len >> this->page_shift) * mtd->oobsize;
+1 -1
drivers/mtd/nftlcore.c
··· 39 39 struct NFTLrecord *nftl; 40 40 unsigned long temp; 41 41 42 - if (mtd->type != MTD_NANDFLASH) 42 + if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX) 43 43 return; 44 44 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 45 45 if (memcmp(mtd->name, "DiskOnChip", 10))
+2 -2
drivers/mtd/nftlmount.c
··· 51 51 the mtd device accordingly. We could even get rid of 52 52 nftl->EraseSize if there were any point in doing so. */ 53 53 nftl->EraseSize = nftl->mbd.mtd->erasesize; 54 - nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 54 + nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize; 55 55 56 56 nftl->MediaUnit = BLOCK_NIL; 57 57 nftl->SpareMediaUnit = BLOCK_NIL; ··· 168 168 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n", 169 169 mh->UnitSizeFactor); 170 170 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor); 171 - nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 171 + nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize; 172 172 } 173 173 #endif 174 174 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
+4 -4
drivers/mtd/onenand/onenand_base.c
··· 1772 1772 int len; 1773 1773 int ret = 0; 1774 1774 1775 - DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); 1775 + DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); 1776 1776 1777 1777 block_size = (1 << this->erase_shift); 1778 1778 ··· 1810 1810 1811 1811 /* Check if we have a bad block, we do not erase bad blocks */ 1812 1812 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 1813 - printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr); 1813 + printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); 1814 1814 instr->state = MTD_ERASE_FAILED; 1815 1815 goto erase_exit; 1816 1816 } ··· 2029 2029 * 2030 2030 * Lock one or more blocks 2031 2031 */ 2032 - static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2032 + static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2033 2033 { 2034 2034 int ret; 2035 2035 ··· 2047 2047 * 2048 2048 * Unlock one or more blocks 2049 2049 */ 2050 - static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2050 + static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2051 2051 { 2052 2052 int ret; 2053 2053
+12 -11
drivers/mtd/rfd_ftl.c
··· 156 156 size_t retlen; 157 157 158 158 sectors_per_block = part->block_size / SECTOR_SIZE; 159 - part->total_blocks = part->mbd.mtd->size / part->block_size; 159 + part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; 160 160 161 161 if (part->total_blocks < 2) 162 162 return -ENOENT; ··· 276 276 277 277 part = (struct partition*)erase->priv; 278 278 279 - i = erase->addr / part->block_size; 280 - if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) { 281 - printk(KERN_ERR PREFIX "erase callback for unknown offset %x " 282 - "on '%s'\n", erase->addr, part->mbd.mtd->name); 279 + i = (u32)erase->addr / part->block_size; 280 + if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || 281 + erase->addr > UINT_MAX) { 282 + printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " 283 + "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); 283 284 return; 284 285 } 285 286 286 287 if (erase->state != MTD_ERASE_DONE) { 287 - printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', " 288 - "state %d\n", erase->addr, 288 + printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " 289 + "state %d\n", (unsigned long long)erase->addr, 289 290 part->mbd.mtd->name, erase->state); 290 291 291 292 part->blocks[i].state = BLOCK_FAILED; ··· 346 345 rc = part->mbd.mtd->erase(part->mbd.mtd, erase); 347 346 348 347 if (rc) { 349 - printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' " 350 - "failed\n", erase->addr, erase->len, 351 - part->mbd.mtd->name); 348 + printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " 349 + "failed\n", (unsigned long long)erase->addr, 350 + (unsigned long long)erase->len, part->mbd.mtd->name); 352 351 kfree(erase); 353 352 } 354 353 ··· 764 763 { 765 764 struct partition *part; 766 765 767 - if (mtd->type != MTD_NORFLASH) 766 + if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) 768 767 return; 769 768 770 769 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
+4 -3
drivers/mtd/ssfdc.c
··· 294 294 int cis_sector; 295 295 296 296 /* Check for small page NAND flash */ 297 - if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE) 297 + if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE || 298 + mtd->size > UINT_MAX) 298 299 return; 299 300 300 301 /* Check for SSDFC format by reading CIS/IDI sector */ ··· 317 316 318 317 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); 319 318 ssfdc->erase_size = mtd->erasesize; 320 - ssfdc->map_len = mtd->size / mtd->erasesize; 319 + ssfdc->map_len = (u32)mtd->size / mtd->erasesize; 321 320 322 321 DEBUG(MTD_DEBUG_LEVEL1, 323 322 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", ··· 328 327 ssfdc->heads = 16; 329 328 ssfdc->sectors = 32; 330 329 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); 331 - ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / 330 + ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / 332 331 ((long)ssfdc->sectors * (long)ssfdc->heads)); 333 332 334 333 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+1 -1
drivers/mtd/ubi/build.c
··· 561 561 */ 562 562 563 563 ubi->peb_size = ubi->mtd->erasesize; 564 - ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; 564 + ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 565 565 ubi->flash_size = ubi->mtd->size; 566 566 567 567 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+9 -8
drivers/mtd/ubi/gluebi.c
··· 215 215 struct ubi_volume *vol; 216 216 struct ubi_device *ubi; 217 217 218 - dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); 218 + dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len, 219 + (unsigned long long)instr->addr); 219 220 220 221 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 221 222 return -EINVAL; ··· 224 223 if (instr->len < 0 || instr->addr + instr->len > mtd->size) 225 224 return -EINVAL; 226 225 227 - if (instr->addr % mtd->writesize || instr->len % mtd->writesize) 226 + if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) 228 227 return -EINVAL; 229 228 230 - lnum = instr->addr / mtd->erasesize; 231 - count = instr->len / mtd->erasesize; 229 + lnum = mtd_div_by_eb(instr->addr, mtd); 230 + count = mtd_div_by_eb(instr->len, mtd); 232 231 233 232 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 234 233 ubi = vol->ubi; ··· 256 255 257 256 out_err: 258 257 instr->state = MTD_ERASE_FAILED; 259 - instr->fail_addr = lnum * mtd->erasesize; 258 + instr->fail_addr = (long long)lnum * mtd->erasesize; 260 259 return err; 261 260 } 262 261 ··· 295 294 * bytes. 296 295 */ 297 296 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 298 - mtd->size = vol->usable_leb_size * vol->reserved_pebs; 297 + mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs; 299 298 else 300 299 mtd->size = vol->used_bytes; 301 300 ··· 305 304 return -ENFILE; 306 305 } 307 306 308 - dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", 309 - mtd->index, mtd->name, mtd->size, mtd->erasesize); 307 + dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u", 308 + mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize); 310 309 return 0; 311 310 } 312 311
+3 -2
fs/jffs2/erase.c
··· 175 175 { 176 176 /* For NAND, if the failure did not occur at the device level for a 177 177 specific physical page, don't bother updating the bad block table. */ 178 - if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) { 178 + if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { 179 179 /* We had a device-level failure to erase. Let's see if we've 180 180 failed too many times. */ 181 181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { ··· 209 209 struct erase_priv_struct *priv = (void *)instr->priv; 210 210 211 211 if(instr->state != MTD_ERASE_DONE) { 212 - printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); 212 + printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", 213 + (unsigned long long)instr->addr, instr->state); 213 214 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); 214 215 } else { 215 216 jffs2_erase_succeeded(priv->c, priv->jeb);
+49 -8
include/linux/mtd/mtd.h
··· 15 15 #include <linux/mtd/compatmac.h> 16 16 #include <mtd/mtd-abi.h> 17 17 18 + #include <asm/div64.h> 19 + 18 20 #define MTD_CHAR_MAJOR 90 19 21 #define MTD_BLOCK_MAJOR 31 20 22 #define MAX_MTD_DEVICES 32 ··· 27 25 #define MTD_ERASE_DONE 0x08 28 26 #define MTD_ERASE_FAILED 0x10 29 27 30 - #define MTD_FAIL_ADDR_UNKNOWN 0xffffffff 28 + #define MTD_FAIL_ADDR_UNKNOWN -1LL 31 29 32 30 /* If the erase fails, fail_addr might indicate exactly which block failed. If 33 31 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not 34 32 specific to any particular block. */ 35 33 struct erase_info { 36 34 struct mtd_info *mtd; 37 - u_int32_t addr; 38 - u_int32_t len; 39 - u_int32_t fail_addr; 35 + uint64_t addr; 36 + uint64_t len; 37 + uint64_t fail_addr; 40 38 u_long time; 41 39 u_long retries; 42 40 u_int dev; ··· 48 46 }; 49 47 50 48 struct mtd_erase_region_info { 51 - u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ 49 + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ 52 50 u_int32_t erasesize; /* For this region */ 53 51 u_int32_t numblocks; /* Number of blocks of erasesize in this region */ 54 52 unsigned long *lockmap; /* If keeping bitmap of locks */ ··· 103 101 struct mtd_info { 104 102 u_char type; 105 103 u_int32_t flags; 106 - u_int32_t size; // Total size of the MTD 104 + uint64_t size; // Total size of the MTD 107 105 108 106 /* "Major" erase size for the device. Naïve users may take this 109 107 * to be the only erase size available, or may use the more detailed ··· 121 119 122 120 u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) 123 121 u_int32_t oobavail; // Available OOB bytes per block 122 + 123 + /* 124 + * If erasesize is a power of 2 then the shift is stored in 125 + * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. 126 + */ 127 + unsigned int erasesize_shift; 128 + unsigned int writesize_shift; 129 + /* Masks based on erasesize_shift and writesize_shift */ 130 + unsigned int erasesize_mask; 131 + unsigned int writesize_mask; 124 132 125 133 // Kernel-only stuff starts here. 126 134 const char *name; ··· 202 190 void (*sync) (struct mtd_info *mtd); 203 191 204 192 /* Chip-supported device locking */ 205 - int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); 206 - int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); 193 + int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 194 + int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 207 195 208 196 /* Power Management functions */ 209 197 int (*suspend) (struct mtd_info *mtd); ··· 233 221 void (*put_device) (struct mtd_info *mtd); 234 222 }; 235 223 224 + static inline u_int32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 225 + { 226 + if (mtd->erasesize_shift) 227 + return sz >> mtd->erasesize_shift; 228 + do_div(sz, mtd->erasesize); 229 + return sz; 230 + } 231 + 232 + static inline u_int32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) 233 + { 234 + if (mtd->erasesize_shift) 235 + return sz & mtd->erasesize_mask; 236 + return do_div(sz, mtd->erasesize); 237 + } 238 + 239 + static inline u_int32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) 240 + { 241 + if (mtd->writesize_shift) 242 + return sz >> mtd->writesize_shift; 243 + do_div(sz, mtd->writesize); 244 + return sz; 245 + } 246 + 247 + static inline u_int32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) 248 + { 249 + if (mtd->writesize_shift) 250 + return sz & mtd->writesize_mask; 251 + return do_div(sz, mtd->writesize); 252 + } 236 253 237 254 /* Kernel-side ioctl definitions */ 238 255
+1 -1
include/linux/mtd/nand.h
··· 399 399 int bbt_erase_shift; 400 400 int chip_shift; 401 401 int numchips; 402 - unsigned long chipsize; 402 + uint64_t chipsize; 403 403 int pagemask; 404 404 int pagebuf; 405 405 int subpagesize;
+2 -2
include/linux/mtd/partitions.h
··· 36 36 37 37 struct mtd_partition { 38 38 char *name; /* identifier string */ 39 - u_int32_t size; /* partition size */ 40 - u_int32_t offset; /* offset within the master MTD space */ 39 + uint64_t size; /* partition size */ 40 + uint64_t offset; /* offset within the master MTD space */ 41 41 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ 42 42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ 43 43 struct mtd_info **mtdp; /* pointer to store the MTD object */