Automatic merge of master.kernel.org:/home/rmk/linux-2.6-rmk.git

+256 -142
+1 -1
arch/arm/mach-s3c2410/clock.c
··· 478 478 { 479 479 unsigned long upllcon = __raw_readl(S3C2410_UPLLCON); 480 480 481 - s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate) * 2; 481 + s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate); 482 482 483 483 printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n", 484 484 print_mhz(s3c2440_clk_upll.rate));
+4 -2
arch/arm/mach-s3c2410/s3c2440.c
··· 192 192 193 193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc)); 194 194 iotable_init(mach_desc, size); 195 + 195 196 /* rename any peripherals used differing from the s3c2410 */ 196 197 197 - s3c_device_i2c.name = "s3c2440-i2c"; 198 + s3c_device_i2c.name = "s3c2440-i2c"; 199 + s3c_device_nand.name = "s3c2440-nand"; 198 200 199 201 /* change irq for watchdog */ 200 202 ··· 227 225 break; 228 226 229 227 case S3C2440_CLKDIVN_HDIVN_2: 230 - hdiv = 1; 228 + hdiv = 2; 231 229 break; 232 230 233 231 case S3C2440_CLKDIVN_HDIVN_4_8:
+10 -11
arch/arm/mm/Kconfig
··· 412 412 413 413 config TLS_REG_EMUL 414 414 bool 415 - default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3) 415 + default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3) 416 416 help 417 - We might be running on an ARMv6+ processor which should have the TLS 418 - register but for some reason we can't use it, or maybe an SMP system 419 - using a pre-ARMv6 processor (there are apparently a few prototypes 420 - like that in existence) and therefore access to that register must 421 - be emulated. 417 + An SMP system using a pre-ARMv6 processor (there are apparently 418 + a few prototypes like that in existence) and therefore access to 419 + that required register must be emulated. 422 420 423 421 config HAS_TLS_REG 424 422 bool 425 - depends on CPU_32v6 426 - default y if !TLS_REG_EMUL 423 + depends on !TLS_REG_EMUL 424 + default y if SMP || CPU_32v7 427 425 help 428 426 This selects support for the CP15 thread register. 429 - It is defined to be available on ARMv6 or later. If a particular 430 - ARMv6 or later CPU doesn't support it then it must omc;ide "select 431 - TLS_REG_EMUL" along with its other caracteristics. 427 + It is defined to be available on some ARMv6 processors (including 428 + all SMP capable ARMv6's) or later processors. User space may 429 + assume directly accessing that register and always obtain the 430 + expected value only on ARMv7 and above. 432 431
-80
arch/arm/mm/copypage-v4mc.S
··· 1 - /* 2 - * linux/arch/arm/lib/copy_page-armv4mc.S 3 - * 4 - * Copyright (C) 1995-2001 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * ASM optimised string functions 11 - */ 12 - #include <linux/linkage.h> 13 - #include <linux/init.h> 14 - #include <asm/constants.h> 15 - 16 - .text 17 - .align 5 18 - /* 19 - * ARMv4 mini-dcache optimised copy_user_page 20 - * 21 - * We flush the destination cache lines just before we write the data into the 22 - * corresponding address. Since the Dcache is read-allocate, this removes the 23 - * Dcache aliasing issue. The writes will be forwarded to the write buffer, 24 - * and merged as appropriate. 25 - * 26 - * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 27 - * instruction. If your processor does not supply this, you have to write your 28 - * own copy_user_page that does the right thing. 29 - */ 30 - ENTRY(v4_mc_copy_user_page) 31 - stmfd sp!, {r4, lr} @ 2 32 - mov r4, r0 33 - mov r0, r1 34 - bl map_page_minicache 35 - mov r1, #PAGE_SZ/64 @ 1 36 - ldmia r0!, {r2, r3, ip, lr} @ 4 37 - 1: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line 38 - stmia r4!, {r2, r3, ip, lr} @ 4 39 - ldmia r0!, {r2, r3, ip, lr} @ 4+1 40 - stmia r4!, {r2, r3, ip, lr} @ 4 41 - ldmia r0!, {r2, r3, ip, lr} @ 4 42 - mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line 43 - stmia r4!, {r2, r3, ip, lr} @ 4 44 - ldmia r0!, {r2, r3, ip, lr} @ 4 45 - subs r1, r1, #1 @ 1 46 - stmia r4!, {r2, r3, ip, lr} @ 4 47 - ldmneia r0!, {r2, r3, ip, lr} @ 4 48 - bne 1b @ 1 49 - ldmfd sp!, {r4, pc} @ 3 50 - 51 - .align 5 52 - /* 53 - * ARMv4 optimised clear_user_page 54 - * 55 - * Same story as above. 56 - */ 57 - ENTRY(v4_mc_clear_user_page) 58 - str lr, [sp, #-4]! 59 - mov r1, #PAGE_SZ/64 @ 1 60 - mov r2, #0 @ 1 61 - mov r3, #0 @ 1 62 - mov ip, #0 @ 1 63 - mov lr, #0 @ 1 64 - 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line 65 - stmia r0!, {r2, r3, ip, lr} @ 4 66 - stmia r0!, {r2, r3, ip, lr} @ 4 67 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line 68 - stmia r0!, {r2, r3, ip, lr} @ 4 69 - stmia r0!, {r2, r3, ip, lr} @ 4 70 - subs r1, r1, #1 @ 1 71 - bne 1b @ 1 72 - ldr pc, [sp], #4 73 - 74 - __INITDATA 75 - 76 - .type v4_mc_user_fns, #object 77 - ENTRY(v4_mc_user_fns) 78 - .long v4_mc_clear_user_page 79 - .long v4_mc_copy_user_page 80 - .size v4_mc_user_fns, . - v4_mc_user_fns
+111
arch/arm/mm/copypage-v4mc.c
··· 1 + /* 2 + * linux/arch/arm/lib/copypage-armv4mc.S 3 + * 4 + * Copyright (C) 1995-2005 Russell King 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This handles the mini data cache, as found on SA11x0 and XScale 11 + * processors. When we copy a user page page, we map it in such a way 12 + * that accesses to this page will not touch the main data cache, but 13 + * will be cached in the mini data cache. This prevents us thrashing 14 + * the main data cache on page faults. 15 + */ 16 + #include <linux/init.h> 17 + #include <linux/mm.h> 18 + 19 + #include <asm/page.h> 20 + #include <asm/pgtable.h> 21 + #include <asm/tlbflush.h> 22 + 23 + /* 24 + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 25 + * specific hacks for copying pages efficiently. 26 + */ 27 + #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 28 + L_PTE_CACHEABLE) 29 + 30 + #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 31 + 32 + static DEFINE_SPINLOCK(minicache_lock); 33 + 34 + /* 35 + * ARMv4 mini-dcache optimised copy_user_page 36 + * 37 + * We flush the destination cache lines just before we write the data into the 38 + * corresponding address. Since the Dcache is read-allocate, this removes the 39 + * Dcache aliasing issue. The writes will be forwarded to the write buffer, 40 + * and merged as appropriate. 41 + * 42 + * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 43 + * instruction. If your processor does not supply this, you have to write your 44 + * own copy_user_page that does the right thing. 45 + */ 46 + static void __attribute__((naked)) 47 + mc_copy_user_page(void *from, void *to) 48 + { 49 + asm volatile( 50 + "stmfd sp!, {r4, lr} @ 2\n\ 51 + mov r4, %2 @ 1\n\ 52 + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 53 + 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ 54 + stmia %1!, {r2, r3, ip, lr} @ 4\n\ 55 + ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ 56 + stmia %1!, {r2, r3, ip, lr} @ 4\n\ 57 + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 58 + mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ 59 + stmia %1!, {r2, r3, ip, lr} @ 4\n\ 60 + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 61 + subs r4, r4, #1 @ 1\n\ 62 + stmia %1!, {r2, r3, ip, lr} @ 4\n\ 63 + ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ 64 + bne 1b @ 1\n\ 65 + ldmfd sp!, {r4, pc} @ 3" 66 + : 67 + : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); 68 + } 69 + 70 + void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 71 + { 72 + spin_lock(&minicache_lock); 73 + 74 + set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); 75 + flush_tlb_kernel_page(COPYPAGE_MINICACHE); 76 + 77 + mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 78 + 79 + spin_unlock(&minicache_lock); 80 + } 81 + 82 + /* 83 + * ARMv4 optimised clear_user_page 84 + */ 85 + void __attribute__((naked)) 86 + v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) 87 + { 88 + asm volatile( 89 + "str lr, [sp, #-4]!\n\ 90 + mov r1, %0 @ 1\n\ 91 + mov r2, #0 @ 1\n\ 92 + mov r3, #0 @ 1\n\ 93 + mov ip, #0 @ 1\n\ 94 + mov lr, #0 @ 1\n\ 95 + 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 96 + stmia r0!, {r2, r3, ip, lr} @ 4\n\ 97 + stmia r0!, {r2, r3, ip, lr} @ 4\n\ 98 + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 99 + stmia r0!, {r2, r3, ip, lr} @ 4\n\ 100 + stmia r0!, {r2, r3, ip, lr} @ 4\n\ 101 + subs r1, r1, #1 @ 1\n\ 102 + bne 1b @ 1\n\ 103 + ldr pc, [sp], #4" 104 + : 105 + : "I" (PAGE_SIZE / 64)); 106 + } 107 + 108 + struct cpu_user_fns v4_mc_user_fns __initdata = { 109 + .cpu_clear_user_page = v4_mc_clear_user_page, 110 + .cpu_copy_user_page = v4_mc_copy_user_page, 111 + };
+6 -22
arch/arm/mm/copypage-v6.c
··· 26 26 #define to_address (0xffffc000) 27 27 #define to_pgprot PAGE_KERNEL 28 28 29 - static pte_t *from_pte; 30 - static pte_t *to_pte; 29 + #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 30 + 31 31 static DEFINE_SPINLOCK(v6_lock); 32 32 33 33 #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) ··· 74 74 */ 75 75 spin_lock(&v6_lock); 76 76 77 - set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 78 - set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 77 + set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 78 + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 79 79 80 80 from = from_address + (offset << PAGE_SHIFT); 81 81 to = to_address + (offset << PAGE_SHIFT); ··· 114 114 */ 115 115 spin_lock(&v6_lock); 116 116 117 - set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 117 + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 118 118 flush_tlb_kernel_page(to); 119 119 clear_page((void *)to); 120 120 ··· 129 129 static int __init v6_userpage_init(void) 130 130 { 131 131 if (cache_is_vipt_aliasing()) { 132 - pgd_t *pgd; 133 - pmd_t *pmd; 134 - 135 - pgd = pgd_offset_k(from_address); 136 - pmd = pmd_alloc(&init_mm, pgd, from_address); 137 - if (!pmd) 138 - BUG(); 139 - from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); 140 - if (!from_pte) 141 - BUG(); 142 - 143 - to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); 144 - if (!to_pte) 145 - BUG(); 146 - 147 132 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 148 133 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 149 134 } ··· 136 151 return 0; 137 152 } 138 153 139 - __initcall(v6_userpage_init); 140 - 154 + core_initcall(v6_userpage_init);
+35 -2
arch/arm/mm/flush.c
··· 13 13 14 14 #include <asm/cacheflush.h> 15 15 #include <asm/system.h> 16 + #include <asm/tlbflush.h> 17 + 18 + #ifdef CONFIG_CPU_CACHE_VIPT 19 + #define ALIAS_FLUSH_START 0xffff4000 20 + 21 + #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 22 + 23 + static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 24 + { 25 + unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 26 + 27 + set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); 28 + flush_tlb_kernel_page(to); 29 + 30 + asm( "mcrr p15, 0, %1, %0, c14\n" 31 + " mcrr p15, 0, %1, %0, c5\n" 32 + : 33 + : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) 34 + : "cc"); 35 + } 36 + #else 37 + #define flush_pfn_alias(pfn,vaddr) do { } while (0) 38 + #endif 16 39 17 40 static void __flush_dcache_page(struct address_space *mapping, struct page *page) 18 41 { ··· 60 37 return; 61 38 62 39 /* 40 + * This is a page cache page. If we have a VIPT cache, we 41 + * only need to do one flush - which would be at the relevant 42 + * userspace colour, which is congruent with page->index. 43 + */ 44 + if (cache_is_vipt()) { 45 + if (cache_is_vipt_aliasing()) 46 + flush_pfn_alias(page_to_pfn(page), 47 + page->index << PAGE_CACHE_SHIFT); 48 + return; 49 + } 50 + 51 + /* 63 52 * There are possible user space mappings of this page: 64 53 * - VIVT cache: we need to also write back and invalidate all user 65 54 * data in the current VM view associated with this page. ··· 92 57 continue; 93 58 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 94 59 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 95 - if (cache_is_vipt()) 96 - break; 97 60 } 98 61 flush_dcache_mmap_unlock(mapping); 99 62 }
+19 -8
arch/arm/mm/mm-armv.c
··· 37 37 38 38 EXPORT_SYMBOL(pgprot_kernel); 39 39 40 + pmd_t *top_pmd; 41 + 40 42 struct cachepolicy { 41 43 const char policy[16]; 42 44 unsigned int cr_mask; ··· 144 142 145 143 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) 146 144 145 + static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) 146 + { 147 + return pmd_offset(pgd, virt); 148 + } 149 + 150 + static inline pmd_t *pmd_off_k(unsigned long virt) 151 + { 152 + return pmd_off(pgd_offset_k(virt), virt); 153 + } 154 + 147 155 /* 148 156 * need to get a 16k page for level 1 149 157 */ ··· 232 220 return; 233 221 234 222 /* pgd is always present and good */ 235 - pmd = (pmd_t *)pgd; 223 + pmd = pmd_off(pgd, 0); 236 224 if (pmd_none(*pmd)) 237 225 goto free; 238 226 if (pmd_bad(*pmd)) { ··· 258 246 static inline void 259 247 alloc_init_section(unsigned long virt, unsigned long phys, int prot) 260 248 { 261 - pmd_t *pmdp; 249 + pmd_t *pmdp = pmd_off_k(virt); 262 250 263 - pmdp = pmd_offset(pgd_offset_k(virt), virt); 264 251 if (virt & (1 << 20)) 265 252 pmdp++; 266 253 ··· 294 283 static inline void 295 284 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) 296 285 { 297 - pmd_t *pmdp; 286 + pmd_t *pmdp = pmd_off_k(virt); 298 287 pte_t *ptep; 299 - 300 - pmdp = pmd_offset(pgd_offset_k(virt), virt); 301 288 302 289 if (pmd_none(*pmdp)) { 303 290 unsigned long pmdval; ··· 319 310 */ 320 311 static inline void clear_mapping(unsigned long virt) 321 312 { 322 - pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); 313 + pmd_clear(pmd_off_k(virt)); 323 314 } 324 315 325 316 struct mem_types { ··· 587 578 PMD_TYPE_SECT; 588 579 if (cpu_arch <= CPU_ARCH_ARMv5) 589 580 pmdval |= PMD_BIT4; 590 - pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); 581 + pmd = pmd_off(pgd, i << PGDIR_SHIFT); 591 582 pmd[0] = __pmd(pmdval); 592 583 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 593 584 flush_pmd_entry(pmd); ··· 684 675 685 676 flush_cache_all(); 686 677 flush_tlb_all(); 678 + 679 + top_pmd = pmd_off_k(VECTORS_HIGH); 687 680 } 688 681 689 682 /*
+24
include/asm-arm/arch-imx/imx-regs.h
··· 228 228 #define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 ) 229 229 230 230 /* 231 + * PWM controller 232 + */ 233 + #define PWMC __REG(IMX_PWM_BASE + 0x00) /* PWM Control Register */ 234 + #define PWMS __REG(IMX_PWM_BASE + 0x04) /* PWM Sample Register */ 235 + #define PWMP __REG(IMX_PWM_BASE + 0x08) /* PWM Period Register */ 236 + #define PWMCNT __REG(IMX_PWM_BASE + 0x0C) /* PWM Counter Register */ 237 + 238 + #define PWMC_HCTR (0x01<<18) /* Halfword FIFO Data Swapping */ 239 + #define PWMC_BCTR (0x01<<17) /* Byte FIFO Data Swapping */ 240 + #define PWMC_SWR (0x01<<16) /* Software Reset */ 241 + #define PWMC_CLKSRC (0x01<<15) /* Clock Source */ 242 + #define PWMC_PRESCALER(x) (((x-1) & 0x7F) << 8) /* PRESCALER */ 243 + #define PWMC_IRQ (0x01<< 7) /* Interrupt Request */ 244 + #define PWMC_IRQEN (0x01<< 6) /* Interrupt Request Enable */ 245 + #define PWMC_FIFOAV (0x01<< 5) /* FIFO Available */ 246 + #define PWMC_EN (0x01<< 4) /* Enables/Disables the PWM */ 247 + #define PWMC_REPEAT(x) (((x) & 0x03) << 2) /* Sample Repeats */ 248 + #define PWMC_CLKSEL(x) (((x) & 0x03) << 0) /* Clock Selection */ 249 + 250 + #define PWMS_SAMPLE(x) ((x) & 0xFFFF) /* Contains a two-sample word */ 251 + #define PWMP_PERIOD(x) ((x) & 0xFFFF) /* Represents the PWM's period */ 252 + #define PWMC_COUNTER(x) ((x) & 0xFFFF) /* Represents the current count value */ 253 + 254 + /* 231 255 * DMA Controller 232 256 */ 233 257 #define DCR __REG(IMX_DMAC_BASE +0x00) /* DMA Control Register */
+41 -3
include/asm-arm/arch-s3c2410/regs-nand.h
··· 1 1 /* linux/include/asm-arm/arch-s3c2410/regs-nand.h 2 2 * 3 - * Copyright (c) 2004 Simtec Electronics <linux@simtec.co.uk> 3 + * Copyright (c) 2004,2005 Simtec Electronics <linux@simtec.co.uk> 4 4 * http://www.simtec.co.uk/products/SWLINUX/ 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify 7 7 * it under the terms of the GNU General Public License version 2 as 8 8 * published by the Free Software Foundation. 9 9 * 10 - * S3C2410 clock register definitions 10 + * S3C2410 NAND register definitions 11 11 * 12 12 * Changelog: 13 13 * 18-Aug-2004 BJD Copied file from 2.4 and updated 14 + * 01-May-2005 BJD Added definitions for s3c2440 controller 14 15 */ 15 16 16 17 #ifndef __ASM_ARM_REGS_NAND ··· 27 26 #define S3C2410_NFSTAT S3C2410_NFREG(0x10) 28 27 #define S3C2410_NFECC S3C2410_NFREG(0x14) 29 28 29 + #define S3C2440_NFCONT S3C2410_NFREG(0x04) 30 + #define S3C2440_NFCMD S3C2410_NFREG(0x08) 31 + #define S3C2440_NFADDR S3C2410_NFREG(0x0C) 32 + #define S3C2440_NFDATA S3C2410_NFREG(0x10) 33 + #define S3C2440_NFECCD0 S3C2410_NFREG(0x14) 34 + #define S3C2440_NFECCD1 S3C2410_NFREG(0x18) 35 + #define S3C2440_NFECCD S3C2410_NFREG(0x1C) 36 + #define S3C2440_NFSTAT S3C2410_NFREG(0x20) 37 + #define S3C2440_NFESTAT0 S3C2410_NFREG(0x24) 38 + #define S3C2440_NFESTAT1 S3C2410_NFREG(0x28) 39 + #define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) 40 + #define S3C2440_NFMECC1 S3C2410_NFREG(0x30) 41 + #define S3C2440_NFSECC S3C2410_NFREG(0x34) 42 + #define S3C2440_NFSBLK S3C2410_NFREG(0x38) 43 + #define S3C2440_NFEBLK S3C2410_NFREG(0x3C) 44 + 30 45 #define S3C2410_NFCONF_EN (1<<15) 31 46 #define S3C2410_NFCONF_512BYTE (1<<14) 32 47 #define S3C2410_NFCONF_4STEP (1<<13) ··· 54 37 55 38 #define S3C2410_NFSTAT_BUSY (1<<0) 56 39 57 - /* think ECC can only be 8bit read? */ 40 + #define S3C2440_NFCONF_BUSWIDTH_8 (0<<0) 41 + #define S3C2440_NFCONF_BUSWIDTH_16 (1<<0) 42 + #define S3C2440_NFCONF_ADVFLASH (1<<3) 43 + #define S3C2440_NFCONF_TACLS(x) ((x)<<12) 44 + #define S3C2440_NFCONF_TWRPH0(x) ((x)<<8) 45 + #define S3C2440_NFCONF_TWRPH1(x) ((x)<<4) 46 + 47 + #define S3C2440_NFCONT_LOCKTIGHT (1<<13) 48 + #define S3C2440_NFCONT_SOFTLOCK (1<<12) 49 + #define S3C2440_NFCONT_ILLEGALACC_EN (1<<10) 50 + #define S3C2440_NFCONT_RNBINT_EN (1<<9) 51 + #define S3C2440_NFCONT_RN_FALLING (1<<8) 52 + #define S3C2440_NFCONT_SPARE_ECCLOCK (1<<6) 53 + #define S3C2440_NFCONT_MAIN_ECCLOCK (1<<5) 54 + #define S3C2440_NFCONT_INITECC (1<<4) 55 + #define S3C2440_NFCONT_nFCE (1<<1) 56 + #define S3C2440_NFCONT_ENABLE (1<<0) 57 + 58 + #define S3C2440_NFSTAT_READY (1<<0) 59 + #define S3C2440_NFSTAT_nCE (1<<1) 60 + #define S3C2440_NFSTAT_RnB_CHANGE (1<<2) 61 + #define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3) 58 62 59 63 #endif /* __ASM_ARM_REGS_NAND */ 60 64
+5 -13
include/asm-arm/page.h
··· 114 114 unsigned long user); 115 115 #endif 116 116 117 - #define clear_user_page(addr,vaddr,pg) \ 118 - do { \ 119 - preempt_disable(); \ 120 - __cpu_clear_user_page(addr, vaddr); \ 121 - preempt_enable(); \ 122 - } while (0) 123 - 124 - #define copy_user_page(to,from,vaddr,pg) \ 125 - do { \ 126 - preempt_disable(); \ 127 - __cpu_copy_user_page(to, from, vaddr); \ 128 - preempt_enable(); \ 129 - } while (0) 117 + #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) 118 + #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) 130 119 131 120 #define clear_page(page) memzero((void *)(page), PAGE_SIZE) 132 121 extern void copy_page(void *to, const void *from); ··· 159 170 #define __pgprot(x) (x) 160 171 161 172 #endif /* STRICT_MM_TYPECHECKS */ 173 + 174 + /* the upper-most page table pointer */ 175 + extern pmd_t *top_pmd; 162 176 163 177 /* Pure 2^n version of get_order */ 164 178 static inline int get_order(unsigned long size)