Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MTD] XIP for AMD CFI flash.

Author: Vitaly Wool <vwool@ru.mvista.com>
Signed-off-by: Todd Poynor <tpoynor@mvista.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Todd Poynor and committed by
Thomas Gleixner
02b15e34 0dfc6246

+324 -111
+2 -2
drivers/mtd/chips/Kconfig
··· 1 1 # drivers/mtd/chips/Kconfig 2 - # $Id: Kconfig,v 1.14 2005/02/08 17:11:15 nico Exp $ 2 + # $Id: Kconfig,v 1.15 2005/06/06 23:04:35 tpoynor Exp $ 3 3 4 4 menu "RAM/ROM/Flash chip drivers" 5 5 depends on MTD!=n ··· 300 300 301 301 config MTD_XIP 302 302 bool "XIP aware MTD support" 303 - depends on !SMP && MTD_CFI_INTELEXT && EXPERIMENTAL 303 + depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL 304 304 default y if XIP_KERNEL 305 305 help 306 306 This allows MTD support to work with flash memory which is also
+312 -90
drivers/mtd/chips/cfi_cmdset_0002.c
··· 4 4 * 5 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 + * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 7 8 * 8 9 * 2_by_8 routines added by Simon Munton 9 10 * 10 11 * 4_by_16 work by Carolyn J. Smith 11 12 * 13 + * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 + * by Nicolas Pitre) 15 + * 12 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 13 17 * 14 18 * This code is GPL 15 19 * 16 - * $Id: cfi_cmdset_0002.c,v 1.116 2005/05/24 13:29:42 gleixner Exp $ 20 + * $Id: cfi_cmdset_0002.c,v 1.117 2005/06/06 23:04:35 tpoynor Exp $ 17 21 * 18 22 */ 19 23 ··· 38 34 #include <linux/mtd/map.h> 39 35 #include <linux/mtd/mtd.h> 40 36 #include <linux/mtd/cfi.h> 37 + #include <linux/mtd/xip.h> 41 38 42 39 #define AMD_BOOTLOC_BUG 43 40 #define FORCE_WORD_WRITE 0 ··· 398 393 * correctly and is therefore not done (particulary with interleaved chips 399 394 * as each chip must be checked independantly of the others). 400 395 */ 401 - static int chip_ready(struct map_info *map, unsigned long addr) 396 + static int __xipram chip_ready(struct map_info *map, unsigned long addr) 402 397 { 403 398 map_word d, t; 404 399 ··· 423 418 * as each chip must be checked independantly of the others). 424 419 * 425 420 */ 426 - static int chip_good(struct map_info *map, unsigned long addr, map_word expected) 421 + static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 427 422 { 428 423 map_word oldd, curd; 429 424 ··· 453 448 454 449 if (time_after(jiffies, timeo)) { 455 450 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 456 - cfi_spin_unlock(chip->mutex); 451 + spin_unlock(chip->mutex); 457 452 return -EIO; 458 453 } 459 - cfi_spin_unlock(chip->mutex); 454 + spin_unlock(chip->mutex); 460 455 cfi_udelay(1); 461 - cfi_spin_lock(chip->mutex); 456 + spin_lock(chip->mutex); 462 457 /* Someone else might have been playing with it. */ 463 458 goto retry; 464 459 } ··· 506 501 return -EIO; 507 502 } 508 503 509 - cfi_spin_unlock(chip->mutex); 504 + spin_unlock(chip->mutex); 510 505 cfi_udelay(1); 511 - cfi_spin_lock(chip->mutex); 506 + spin_lock(chip->mutex); 512 507 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 513 508 So we can just loop here. */ 514 509 } 510 + chip->state = FL_READY; 511 + return 0; 512 + 513 + case FL_XIP_WHILE_ERASING: 514 + if (mode != FL_READY && mode != FL_POINT && 515 + (!cfip || !(cfip->EraseSuspend&2))) 516 + goto sleep; 517 + chip->oldstate = chip->state; 515 518 chip->state = FL_READY; 516 519 return 0; 517 520 ··· 532 519 sleep: 533 520 set_current_state(TASK_UNINTERRUPTIBLE); 534 521 add_wait_queue(&chip->wq, &wait); 535 - cfi_spin_unlock(chip->mutex); 522 + spin_unlock(chip->mutex); 536 523 schedule(); 537 524 remove_wait_queue(&chip->wq, &wait); 538 - cfi_spin_lock(chip->mutex); 525 + spin_lock(chip->mutex); 539 526 goto resettime; 540 527 } 541 528 } ··· 553 540 chip->state = FL_ERASING; 554 541 break; 555 542 543 + case FL_XIP_WHILE_ERASING: 544 + chip->state = chip->oldstate; 545 + chip->oldstate = FL_READY; 546 + break; 547 + 556 548 case FL_READY: 557 549 case FL_STATUS: 558 550 /* We should really make set_vpp() count, rather than doing this */ ··· 569 551 wake_up(&chip->wq); 570 552 } 571 553 554 + #ifdef CONFIG_MTD_XIP 555 + 556 + /* 557 + * No interrupt what so ever can be serviced while the flash isn't in array 558 + * mode. This is ensured by the xip_disable() and xip_enable() functions 559 + * enclosing any code path where the flash is known not to be in array mode. 560 + * And within a XIP disabled code path, only functions marked with __xipram 561 + * may be called and nothing else (it's a good thing to inspect generated 562 + * assembly to make sure inline functions were actually inlined and that gcc 563 + * didn't emit calls to its own support functions). Also configuring MTD CFI 564 + * support to a single buswidth and a single interleave is also recommended. 565 + */ 566 + #include <asm/hardware.h> 567 + static void xip_disable(struct map_info *map, struct flchip *chip, 568 + unsigned long adr) 569 + { 570 + /* TODO: chips with no XIP use should ignore and return */ 571 + (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 572 + local_irq_disable(); 573 + } 574 + 575 + static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 576 + unsigned long adr) 577 + { 578 + struct cfi_private *cfi = map->fldrv_priv; 579 + 580 + if (chip->state != FL_POINT && chip->state != FL_READY) { 581 + map_write(map, CMD(0xf0), adr); 582 + chip->state = FL_READY; 583 + } 584 + (void) map_read(map, adr); 585 + asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */ 586 + local_irq_enable(); 587 + } 588 + 589 + /* 590 + * When a delay is required for the flash operation to complete, the 591 + * xip_udelay() function is polling for both the given timeout and pending 592 + * (but still masked) hardware interrupts. Whenever there is an interrupt 593 + * pending then the flash erase operation is suspended, array mode restored 594 + * and interrupts unmasked. Task scheduling might also happen at that 595 + * point. The CPU eventually returns from the interrupt or the call to 596 + * schedule() and the suspended flash operation is resumed for the remaining 597 + * of the delay period. 598 + * 599 + * Warning: this function _will_ fool interrupt latency tracing tools. 600 + */ 601 + 602 + static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 603 + unsigned long adr, int usec) 604 + { 605 + struct cfi_private *cfi = map->fldrv_priv; 606 + struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 607 + map_word status, OK = CMD(0x80); 608 + unsigned long suspended, start = xip_currtime(); 609 + flstate_t oldstate; 610 + 611 + do { 612 + cpu_relax(); 613 + if (xip_irqpending() && extp && 614 + ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 615 + (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 616 + /* 617 + * Let's suspend the erase operation when supported. 618 + * Note that we currently don't try to suspend 619 + * interleaved chips if there is already another 620 + * operation suspended (imagine what happens 621 + * when one chip was already done with the current 622 + * operation while another chip suspended it, then 623 + * we resume the whole thing at once). Yes, it 624 + * can happen! 625 + */ 626 + map_write(map, CMD(0xb0), adr); 627 + usec -= xip_elapsed_since(start); 628 + suspended = xip_currtime(); 629 + do { 630 + if (xip_elapsed_since(suspended) > 100000) { 631 + /* 632 + * The chip doesn't want to suspend 633 + * after waiting for 100 msecs. 634 + * This is a critical error but there 635 + * is not much we can do here. 636 + */ 637 + return; 638 + } 639 + status = map_read(map, adr); 640 + } while (!map_word_andequal(map, status, OK, OK)); 641 + 642 + /* Suspend succeeded */ 643 + oldstate = chip->state; 644 + if (!map_word_bitsset(map, status, CMD(0x40))) 645 + break; 646 + chip->state = FL_XIP_WHILE_ERASING; 647 + chip->erase_suspended = 1; 648 + map_write(map, CMD(0xf0), adr); 649 + (void) map_read(map, adr); 650 + asm volatile (".rep 8; nop; .endr"); 651 + local_irq_enable(); 652 + spin_unlock(chip->mutex); 653 + asm volatile (".rep 8; nop; .endr"); 654 + cond_resched(); 655 + 656 + /* 657 + * We're back. However someone else might have 658 + * decided to go write to the chip if we are in 659 + * a suspended erase state. If so let's wait 660 + * until it's done. 661 + */ 662 + spin_lock(chip->mutex); 663 + while (chip->state != FL_XIP_WHILE_ERASING) { 664 + DECLARE_WAITQUEUE(wait, current); 665 + set_current_state(TASK_UNINTERRUPTIBLE); 666 + add_wait_queue(&chip->wq, &wait); 667 + spin_unlock(chip->mutex); 668 + schedule(); 669 + remove_wait_queue(&chip->wq, &wait); 670 + spin_lock(chip->mutex); 671 + } 672 + /* Disallow XIP again */ 673 + local_irq_disable(); 674 + 675 + /* Resume the write or erase operation */ 676 + map_write(map, CMD(0x30), adr); 677 + chip->state = oldstate; 678 + start = xip_currtime(); 679 + } else if (usec >= 1000000/HZ) { 680 + /* 681 + * Try to save on CPU power when waiting delay 682 + * is at least a system timer tick period. 683 + * No need to be extremely accurate here. 684 + */ 685 + xip_cpu_idle(); 686 + } 687 + status = map_read(map, adr); 688 + } while (!map_word_andequal(map, status, OK, OK) 689 + && xip_elapsed_since(start) < usec); 690 + } 691 + 692 + #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 693 + 694 + /* 695 + * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 696 + * the flash is actively programming or erasing since we have to poll for 697 + * the operation to complete anyway. We can't do that in a generic way with 698 + * a XIP setup so do it before the actual flash operation in this case 699 + * and stub it out from INVALIDATE_CACHE_UDELAY. 700 + */ 701 + #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 702 + INVALIDATE_CACHED_RANGE(map, from, size) 703 + 704 + #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 705 + UDELAY(map, chip, adr, usec) 706 + 707 + /* 708 + * Extra notes: 709 + * 710 + * Activating this XIP support changes the way the code works a bit. For 711 + * example the code to suspend the current process when concurrent access 712 + * happens is never executed because xip_udelay() will always return with the 713 + * same chip state as it was entered with. This is why there is no care for 714 + * the presence of add_wait_queue() or schedule() calls from within a couple 715 + * xip_disable()'d areas of code, like in do_erase_oneblock for example. 716 + * The queueing and scheduling are always happening within xip_udelay(). 717 + * 718 + * Similarly, get_chip() and put_chip() just happen to always be executed 719 + * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 720 + * is in array mode, therefore never executing many cases therein and not 721 + * causing any problem with XIP. 722 + */ 723 + 724 + #else 725 + 726 + #define xip_disable(map, chip, adr) 727 + #define xip_enable(map, chip, adr) 728 + #define XIP_INVAL_CACHED_RANGE(x...) 729 + 730 + #define UDELAY(map, chip, adr, usec) \ 731 + do { \ 732 + spin_unlock(chip->mutex); \ 733 + cfi_udelay(usec); \ 734 + spin_lock(chip->mutex); \ 735 + } while (0) 736 + 737 + #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 738 + do { \ 739 + spin_unlock(chip->mutex); \ 740 + INVALIDATE_CACHED_RANGE(map, adr, len); \ 741 + cfi_udelay(usec); \ 742 + spin_lock(chip->mutex); \ 743 + } while (0) 744 + 745 + #endif 572 746 573 747 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 574 748 { ··· 773 563 /* Ensure cmd read/writes are aligned. */ 774 564 cmd_addr = adr & ~(map_bankwidth(map)-1); 775 565 776 - cfi_spin_lock(chip->mutex); 566 + spin_lock(chip->mutex); 777 567 ret = get_chip(map, chip, cmd_addr, FL_READY); 778 568 if (ret) { 779 - cfi_spin_unlock(chip->mutex); 569 + spin_unlock(chip->mutex); 780 570 return ret; 781 571 } 782 572 ··· 789 579 790 580 put_chip(map, chip, cmd_addr); 791 581 792 - cfi_spin_unlock(chip->mutex); 582 + spin_unlock(chip->mutex); 793 583 return 0; 794 584 } 795 585 ··· 843 633 struct cfi_private *cfi = map->fldrv_priv; 844 634 845 635 retry: 846 - cfi_spin_lock(chip->mutex); 636 + spin_lock(chip->mutex); 847 637 848 638 if (chip->state != FL_READY){ 849 639 #if 0 ··· 852 642 set_current_state(TASK_UNINTERRUPTIBLE); 853 643 add_wait_queue(&chip->wq, &wait); 854 644 855 - cfi_spin_unlock(chip->mutex); 645 + spin_unlock(chip->mutex); 856 646 857 647 schedule(); 858 648 remove_wait_queue(&chip->wq, &wait); ··· 881 671 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 882 672 883 673 wake_up(&chip->wq); 884 - cfi_spin_unlock(chip->mutex); 674 + spin_unlock(chip->mutex); 885 675 886 676 return 0; 887 677 } ··· 930 720 } 931 721 932 722 933 - static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 723 + static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 934 724 { 935 725 struct cfi_private *cfi = map->fldrv_priv; 936 726 unsigned long timeo = jiffies + HZ; ··· 950 740 951 741 adr += chip->start; 952 742 953 - cfi_spin_lock(chip->mutex); 743 + spin_lock(chip->mutex); 954 744 ret = get_chip(map, chip, adr, FL_WRITING); 955 745 if (ret) { 956 - cfi_spin_unlock(chip->mutex); 746 + spin_unlock(chip->mutex); 957 747 return ret; 958 748 } 959 749 ··· 973 763 goto op_done; 974 764 } 975 765 766 + XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 976 767 ENABLE_VPP(map); 768 + xip_disable(map, chip, adr); 977 769 retry: 978 770 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 979 771 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); ··· 983 771 map_write(map, datum, adr); 984 772 chip->state = FL_WRITING; 985 773 986 - cfi_spin_unlock(chip->mutex); 987 - cfi_udelay(chip->word_write_time); 988 - cfi_spin_lock(chip->mutex); 774 + INVALIDATE_CACHE_UDELAY(map, chip, 775 + adr, map_bankwidth(map), 776 + chip->word_write_time); 989 777 990 778 /* See comment above for timeout value. */ 991 779 timeo = jiffies + uWriteTimeout; ··· 996 784 997 785 set_current_state(TASK_UNINTERRUPTIBLE); 998 786 add_wait_queue(&chip->wq, &wait); 999 - cfi_spin_unlock(chip->mutex); 787 + spin_unlock(chip->mutex); 1000 788 schedule(); 1001 789 remove_wait_queue(&chip->wq, &wait); 1002 790 timeo = jiffies + (HZ / 2); /* FIXME */ 1003 - cfi_spin_lock(chip->mutex); 791 + spin_lock(chip->mutex); 1004 792 continue; 1005 793 } 1006 794 ··· 1008 796 break; 1009 797 1010 798 if (time_after(jiffies, timeo)) { 799 + xip_enable(map, chip, adr); 1011 800 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 801 + xip_disable(map, chip, adr); 1012 802 break; 1013 803 } 1014 804 1015 805 /* Latency issues. Drop the lock, wait a while and retry */ 1016 - cfi_spin_unlock(chip->mutex); 1017 - cfi_udelay(1); 1018 - cfi_spin_lock(chip->mutex); 806 + UDELAY(map, chip, adr, 1); 1019 807 } 1020 808 /* Did we succeed? */ 1021 809 if (!chip_good(map, adr, datum)) { ··· 1028 816 1029 817 ret = -EIO; 1030 818 } 819 + xip_enable(map, chip, adr); 1031 820 op_done: 1032 821 chip->state = FL_READY; 1033 822 put_chip(map, chip, adr); 1034 - cfi_spin_unlock(chip->mutex); 823 + spin_unlock(chip->mutex); 1035 824 1036 825 return ret; 1037 826 } ··· 1064 851 map_word tmp_buf; 1065 852 1066 853 retry: 1067 - cfi_spin_lock(cfi->chips[chipnum].mutex); 854 + spin_lock(cfi->chips[chipnum].mutex); 1068 855 1069 856 if (cfi->chips[chipnum].state != FL_READY) { 1070 857 #if 0 ··· 1073 860 set_current_state(TASK_UNINTERRUPTIBLE); 1074 861 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1075 862 1076 - cfi_spin_unlock(cfi->chips[chipnum].mutex); 863 + spin_unlock(cfi->chips[chipnum].mutex); 1077 864 1078 865 schedule(); 1079 866 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); ··· 1087 874 /* Load 'tmp_buf' with old contents of flash */ 1088 875 tmp_buf = map_read(map, bus_ofs+chipstart); 1089 876 1090 - cfi_spin_unlock(cfi->chips[chipnum].mutex); 877 + spin_unlock(cfi->chips[chipnum].mutex); 1091 878 1092 879 /* Number of bytes to copy from buffer */ 1093 880 n = min_t(int, len, map_bankwidth(map)-i); ··· 1142 929 map_word tmp_buf; 1143 930 1144 931 retry1: 1145 - cfi_spin_lock(cfi->chips[chipnum].mutex); 932 + spin_lock(cfi->chips[chipnum].mutex); 1146 933 1147 934 if (cfi->chips[chipnum].state != FL_READY) { 1148 935 #if 0 ··· 1151 938 set_current_state(TASK_UNINTERRUPTIBLE); 1152 939 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1153 940 1154 - cfi_spin_unlock(cfi->chips[chipnum].mutex); 941 + spin_unlock(cfi->chips[chipnum].mutex); 1155 942 1156 943 schedule(); 1157 944 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); ··· 1164 951 1165 952 tmp_buf = map_read(map, ofs + chipstart); 1166 953 1167 - cfi_spin_unlock(cfi->chips[chipnum].mutex); 954 + spin_unlock(cfi->chips[chipnum].mutex); 1168 955 1169 956 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1170 957 ··· 1183 970 /* 1184 971 * FIXME: interleaved mode not tested, and probably not supported! 1185 972 */ 1186 - static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 1187 - unsigned long adr, const u_char *buf, int len) 973 + static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 974 + unsigned long adr, const u_char *buf, 975 + int len) 1188 976 { 1189 977 struct cfi_private *cfi = map->fldrv_priv; 1190 978 unsigned long timeo = jiffies + HZ; ··· 1199 985 adr += chip->start; 1200 986 cmd_adr = adr; 1201 987 1202 - cfi_spin_lock(chip->mutex); 988 + spin_lock(chip->mutex); 1203 989 ret = get_chip(map, chip, adr, FL_WRITING); 1204 990 if (ret) { 1205 - cfi_spin_unlock(chip->mutex); 991 + spin_unlock(chip->mutex); 1206 992 return ret; 1207 993 } 1208 994 ··· 1211 997 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1212 998 __func__, adr, datum.x[0] ); 1213 999 1000 + XIP_INVAL_CACHED_RANGE(map, adr, len); 1214 1001 ENABLE_VPP(map); 1002 + xip_disable(map, chip, cmd_adr); 1003 + 1215 1004 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1216 1005 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1217 1006 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); ··· 1244 1027 map_write(map, CMD(0x29), cmd_adr); 1245 1028 chip->state = FL_WRITING; 1246 1029 1247 - cfi_spin_unlock(chip->mutex); 1248 - cfi_udelay(chip->buffer_write_time); 1249 - cfi_spin_lock(chip->mutex); 1030 + INVALIDATE_CACHE_UDELAY(map, chip, 1031 + adr, map_bankwidth(map), 1032 + chip->word_write_time); 1250 1033 1251 1034 timeo = jiffies + uWriteTimeout; 1252 1035 ··· 1257 1040 1258 1041 set_current_state(TASK_UNINTERRUPTIBLE); 1259 1042 add_wait_queue(&chip->wq, &wait); 1260 - cfi_spin_unlock(chip->mutex); 1043 + spin_unlock(chip->mutex); 1261 1044 schedule(); 1262 1045 remove_wait_queue(&chip->wq, &wait); 1263 1046 timeo = jiffies + (HZ / 2); /* FIXME */ 1264 - cfi_spin_lock(chip->mutex); 1047 + spin_lock(chip->mutex); 1265 1048 continue; 1266 1049 } 1267 1050 1268 - if (chip_ready(map, adr)) 1051 + if (chip_ready(map, adr)) { 1052 + xip_enable(map, chip, adr); 1269 1053 goto op_done; 1054 + } 1270 1055 1271 1056 if( time_after(jiffies, timeo)) 1272 1057 break; 1273 1058 1274 1059 /* Latency issues. Drop the lock, wait a while and retry */ 1275 - cfi_spin_unlock(chip->mutex); 1276 - cfi_udelay(1); 1277 - cfi_spin_lock(chip->mutex); 1060 + UDELAY(map, chip, adr, 1); 1278 1061 } 1279 - 1280 - printk(KERN_WARNING "MTD %s(): software timeout\n", 1281 - __func__ ); 1282 1062 1283 1063 /* reset on all failures. */ 1284 1064 map_write( map, CMD(0xF0), chip->start ); 1065 + xip_enable(map, chip, adr); 1285 1066 /* FIXME - should have reset delay before continuing */ 1067 + 1068 + printk(KERN_WARNING "MTD %s(): software timeout\n", 1069 + __func__ ); 1286 1070 1287 1071 ret = -EIO; 1288 1072 op_done: 1289 1073 chip->state = FL_READY; 1290 1074 put_chip(map, chip, adr); 1291 - cfi_spin_unlock(chip->mutex); 1075 + spin_unlock(chip->mutex); 1292 1076 1293 1077 return ret; 1294 1078 } ··· 1379 1161 * Handle devices with one erase region, that only implement 1380 1162 * the chip erase command. 1381 1163 */ 1382 - static inline int do_erase_chip(struct map_info *map, struct flchip *chip) 1164 + static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1383 1165 { 1384 1166 struct cfi_private *cfi = map->fldrv_priv; 1385 1167 unsigned long timeo = jiffies + HZ; ··· 1389 1171 1390 1172 adr = cfi->addr_unlock1; 1391 1173 1392 - cfi_spin_lock(chip->mutex); 1174 + spin_lock(chip->mutex); 1393 1175 ret = get_chip(map, chip, adr, FL_WRITING); 1394 1176 if (ret) { 1395 - cfi_spin_unlock(chip->mutex); 1177 + spin_unlock(chip->mutex); 1396 1178 return ret; 1397 1179 } 1398 1180 1399 1181 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1400 1182 __func__, chip->start ); 1401 1183 1184 + XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1402 1185 ENABLE_VPP(map); 1186 + xip_disable(map, chip, adr); 1187 + 1403 1188 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1404 1189 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1405 1190 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); ··· 1414 1193 chip->erase_suspended = 0; 1415 1194 chip->in_progress_block_addr = adr; 1416 1195 1417 - cfi_spin_unlock(chip->mutex); 1418 - msleep(chip->erase_time/2); 1419 - cfi_spin_lock(chip->mutex); 1196 + INVALIDATE_CACHE_UDELAY(map, chip, 1197 + adr, map->size, 1198 + chip->erase_time*500); 1420 1199 1421 1200 timeo = jiffies + (HZ*20); 1422 1201 ··· 1425 1204 /* Someone's suspended the erase. Sleep */ 1426 1205 set_current_state(TASK_UNINTERRUPTIBLE); 1427 1206 add_wait_queue(&chip->wq, &wait); 1428 - cfi_spin_unlock(chip->mutex); 1207 + spin_unlock(chip->mutex); 1429 1208 schedule(); 1430 1209 remove_wait_queue(&chip->wq, &wait); 1431 - cfi_spin_lock(chip->mutex); 1210 + spin_lock(chip->mutex); 1432 1211 continue; 1433 1212 } 1434 1213 if (chip->erase_suspended) { ··· 1448 1227 } 1449 1228 1450 1229 /* Latency issues. Drop the lock, wait a while and retry */ 1451 - cfi_spin_unlock(chip->mutex); 1452 - set_current_state(TASK_UNINTERRUPTIBLE); 1453 - schedule_timeout(1); 1454 - cfi_spin_lock(chip->mutex); 1230 + UDELAY(map, chip, adr, 1000000/HZ); 1455 1231 } 1456 1232 /* Did we succeed? */ 1457 1233 if (!chip_good(map, adr, map_word_ff(map))) { ··· 1460 1242 } 1461 1243 1462 1244 chip->state = FL_READY; 1245 + xip_enable(map, chip, adr); 1463 1246 put_chip(map, chip, adr); 1464 - cfi_spin_unlock(chip->mutex); 1247 + spin_unlock(chip->mutex); 1465 1248 1466 1249 return ret; 1467 1250 } 1468 1251 1469 1252 1470 - static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1253 + static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1471 1254 { 1472 1255 struct cfi_private *cfi = map->fldrv_priv; 1473 1256 unsigned long timeo = jiffies + HZ; ··· 1477 1258 1478 1259 adr += chip->start; 1479 1260 1480 - cfi_spin_lock(chip->mutex); 1261 + spin_lock(chip->mutex); 1481 1262 ret = get_chip(map, chip, adr, FL_ERASING); 1482 1263 if (ret) { 1483 - cfi_spin_unlock(chip->mutex); 1264 + spin_unlock(chip->mutex); 1484 1265 return ret; 1485 1266 } 1486 1267 1487 1268 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1488 1269 __func__, adr ); 1489 1270 1271 + XIP_INVAL_CACHED_RANGE(map, adr, len); 1490 1272 ENABLE_VPP(map); 1273 + xip_disable(map, chip, adr); 1274 + 1491 1275 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1492 1276 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1493 1277 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); ··· 1501 1279 chip->state = FL_ERASING; 1502 1280 chip->erase_suspended = 0; 1503 1281 chip->in_progress_block_addr = adr; 1504 - 1505 - cfi_spin_unlock(chip->mutex); 1506 - msleep(chip->erase_time/2); 1507 - cfi_spin_lock(chip->mutex); 1282 + 1283 + INVALIDATE_CACHE_UDELAY(map, chip, 1284 + adr, len, 1285 + chip->erase_time*500); 1508 1286 1509 1287 timeo = jiffies + (HZ*20); 1510 1288 ··· 1513 1291 /* Someone's suspended the erase. Sleep */ 1514 1292 set_current_state(TASK_UNINTERRUPTIBLE); 1515 1293 add_wait_queue(&chip->wq, &wait); 1516 - cfi_spin_unlock(chip->mutex); 1294 + spin_unlock(chip->mutex); 1517 1295 schedule(); 1518 1296 remove_wait_queue(&chip->wq, &wait); 1519 - cfi_spin_lock(chip->mutex); 1297 + spin_lock(chip->mutex); 1520 1298 continue; 1521 1299 } 1522 1300 if (chip->erase_suspended) { ··· 1526 1304 chip->erase_suspended = 0; 1527 1305 } 1528 1306 1529 - if (chip_ready(map, adr)) 1307 + if (chip_ready(map, adr)) { 1308 + xip_enable(map, chip, adr); 1530 1309 break; 1310 + } 1531 1311 1532 1312 if (time_after(jiffies, timeo)) { 1313 + xip_enable(map, chip, adr); 1533 1314 printk(KERN_WARNING "MTD %s(): software timeout\n", 1534 1315 __func__ ); 1535 1316 break; 1536 1317 } 1537 1318 1538 1319 /* Latency issues. Drop the lock, wait a while and retry */ 1539 - cfi_spin_unlock(chip->mutex); 1540 - set_current_state(TASK_UNINTERRUPTIBLE); 1541 - schedule_timeout(1); 1542 - cfi_spin_lock(chip->mutex); 1320 + UDELAY(map, chip, adr, 1000000/HZ); 1543 1321 } 1544 1322 /* Did we succeed? */ 1545 1323 if (!chip_good(map, adr, map_word_ff(map))) { ··· 1552 1330 1553 1331 chip->state = FL_READY; 1554 1332 put_chip(map, chip, adr); 1555 - cfi_spin_unlock(chip->mutex); 1333 + spin_unlock(chip->mutex); 1556 1334 return ret; 1557 1335 } 1558 1336 ··· 1612 1390 chip = &cfi->chips[i]; 1613 1391 1614 1392 retry: 1615 - cfi_spin_lock(chip->mutex); 1393 + spin_lock(chip->mutex); 1616 1394 1617 1395 switch(chip->state) { 1618 1396 case FL_READY: ··· 1626 1404 * with the chip now anyway. 1627 1405 */ 1628 1406 case FL_SYNCING: 1629 - cfi_spin_unlock(chip->mutex); 1407 + spin_unlock(chip->mutex); 1630 1408 break; 1631 1409 1632 1410 default: 1633 1411 /* Not an idle state */ 1634 1412 add_wait_queue(&chip->wq, &wait); 1635 1413 1636 - cfi_spin_unlock(chip->mutex); 1414 + spin_unlock(chip->mutex); 1637 1415 1638 1416 schedule(); 1639 1417 ··· 1648 1426 for (i--; i >=0; i--) { 1649 1427 chip = &cfi->chips[i]; 1650 1428 1651 - cfi_spin_lock(chip->mutex); 1429 + spin_lock(chip->mutex); 1652 1430 1653 1431 if (chip->state == FL_SYNCING) { 1654 1432 chip->state = chip->oldstate; 1655 1433 wake_up(&chip->wq); 1656 1434 } 1657 - cfi_spin_unlock(chip->mutex); 1435 + spin_unlock(chip->mutex); 1658 1436 } 1659 1437 } 1660 1438 ··· 1670 1448 for (i=0; !ret && i<cfi->numchips; i++) { 1671 1449 chip = &cfi->chips[i]; 1672 1450 1673 - cfi_spin_lock(chip->mutex); 1451 + spin_lock(chip->mutex); 1674 1452 1675 1453 switch(chip->state) { 1676 1454 case FL_READY: ··· 1690 1468 ret = -EAGAIN; 1691 1469 break; 1692 1470 } 1693 - cfi_spin_unlock(chip->mutex); 1471 + spin_unlock(chip->mutex); 1694 1472 } 1695 1473 1696 1474 /* Unlock the chips again */ ··· 1699 1477 for (i--; i >=0; i--) { 1700 1478 chip = &cfi->chips[i]; 1701 1479 1702 - cfi_spin_lock(chip->mutex); 1480 + spin_lock(chip->mutex); 1703 1481 1704 1482 if (chip->state == FL_PM_SUSPENDED) { 1705 1483 chip->state = chip->oldstate; 1706 1484 wake_up(&chip->wq); 1707 1485 } 1708 - cfi_spin_unlock(chip->mutex); 1486 + spin_unlock(chip->mutex); 1709 1487 } 1710 1488 } 1711 1489 ··· 1724 1502 1725 1503 chip = &cfi->chips[i]; 1726 1504 1727 - cfi_spin_lock(chip->mutex); 1505 + spin_lock(chip->mutex); 1728 1506 1729 1507 if (chip->state == FL_PM_SUSPENDED) { 1730 1508 chip->state = FL_READY; ··· 1734 1512 else 1735 1513 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1736 1514 1737 - cfi_spin_unlock(chip->mutex); 1515 + spin_unlock(chip->mutex); 1738 1516 } 1739 1517 } 1740 1518
+3 -3
drivers/mtd/chips/fwh_lock.h
··· 58 58 * to flash memory - that means that we don't have to check status 59 59 * and timeout. 60 60 */ 61 - cfi_spin_lock(chip->mutex); 61 + spin_lock(chip->mutex); 62 62 ret = get_chip(map, chip, adr, FL_LOCKING); 63 63 if (ret) { 64 - cfi_spin_unlock(chip->mutex); 64 + spin_unlock(chip->mutex); 65 65 return ret; 66 66 } 67 67 ··· 71 71 /* Done and happy. */ 72 72 chip->state = FL_READY; 73 73 put_chip(map, chip, adr); 74 - cfi_spin_unlock(chip->mutex); 74 + spin_unlock(chip->mutex); 75 75 return 0; 76 76 } 77 77
+6 -5
drivers/mtd/maps/map_funcs.c
··· 1 1 /* 2 - * $Id: map_funcs.c,v 1.9 2004/07/13 22:33:15 dwmw2 Exp $ 2 + * $Id: map_funcs.c,v 1.10 2005/06/06 23:04:36 tpoynor Exp $ 3 3 * 4 4 * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS 5 5 * is enabled. ··· 9 9 #include <linux/module.h> 10 10 11 11 #include <linux/mtd/map.h> 12 + #include <linux/mtd/xip.h> 12 13 13 - static map_word simple_map_read(struct map_info *map, unsigned long ofs) 14 + static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs) 14 15 { 15 16 return inline_map_read(map, ofs); 16 17 } 17 18 18 - static void simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) 19 + static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) 19 20 { 20 21 inline_map_write(map, datum, ofs); 21 22 } 22 23 23 - static void simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 24 + static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 24 25 { 25 26 inline_map_copy_from(map, to, from, len); 26 27 } 27 28 28 - static void simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 29 + static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 29 30 { 30 31 inline_map_copy_to(map, to, from, len); 31 32 }
+1 -11
include/linux/mtd/cfi.h
··· 1 1 2 2 /* Common Flash Interface structures 3 3 * See http://support.intel.com/design/flash/technote/index.htm 4 - * $Id: cfi.h,v 1.53 2005/03/15 19:03:13 gleixner Exp $ 4 + * $Id: cfi.h,v 1.54 2005/06/06 23:04:36 tpoynor Exp $ 5 5 */ 6 6 7 7 #ifndef __MTD_CFI_H__ ··· 426 426 udelay(us); 427 427 cond_resched(); 428 428 } 429 - } 430 - 431 - static inline void cfi_spin_lock(spinlock_t *mutex) 432 - { 433 - spin_lock_bh(mutex); 434 - } 435 - 436 - static inline void cfi_spin_unlock(spinlock_t *mutex) 437 - { 438 - spin_unlock_bh(mutex); 439 429 } 440 430 441 431 struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,