···192192193193 iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc));194194 iotable_init(mach_desc, size);195195+195196 /* rename any peripherals used differing from the s3c2410 */196197197197- s3c_device_i2c.name = "s3c2440-i2c";198198+ s3c_device_i2c.name = "s3c2440-i2c";199199+ s3c_device_nand.name = "s3c2440-nand";198200199201 /* change irq for watchdog */200202···227225 break;228226229227 case S3C2440_CLKDIVN_HDIVN_2:230230- hdiv = 1;228228+ hdiv = 2;231229 break;232230233231 case S3C2440_CLKDIVN_HDIVN_4_8:
+10-11
arch/arm/mm/Kconfig
···412412413413config TLS_REG_EMUL414414 bool415415- default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3)415415+ default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)416416 help417417- We might be running on an ARMv6+ processor which should have the TLS418418- register but for some reason we can't use it, or maybe an SMP system419419- using a pre-ARMv6 processor (there are apparently a few prototypes420420- like that in existence) and therefore access to that register must421421- be emulated.417417+ An SMP system using a pre-ARMv6 processor (there are apparently418418+ a few prototypes like that in existence) and therefore access to419419+ that required register must be emulated.422420423421config HAS_TLS_REG424422 bool425425- depends on CPU_32v6426426- default y if !TLS_REG_EMUL423423+ depends on !TLS_REG_EMUL424424+ default y if SMP || CPU_32v7427425 help428426 This selects support for the CP15 thread register.429429- It is defined to be available on ARMv6 or later. If a particular430430- ARMv6 or later CPU doesn't support it then it must omc;ide "select431431- TLS_REG_EMUL" along with its other caracteristics.427427+ It is defined to be available on some ARMv6 processors (including428428+ all SMP capable ARMv6's) or later processors. User space may429429+ assume directly accessing that register and always obtain the430430+ expected value only on ARMv7 and above.432431
-80
arch/arm/mm/copypage-v4mc.S
···11-/*22- * linux/arch/arm/lib/copy_page-armv4mc.S33- *44- * Copyright (C) 1995-2001 Russell King55- *66- * This program is free software; you can redistribute it and/or modify77- * it under the terms of the GNU General Public License version 2 as88- * published by the Free Software Foundation.99- *1010- * ASM optimised string functions1111- */1212-#include <linux/linkage.h>1313-#include <linux/init.h>1414-#include <asm/constants.h>1515-1616- .text1717- .align 51818-/*1919- * ARMv4 mini-dcache optimised copy_user_page2020- *2121- * We flush the destination cache lines just before we write the data into the2222- * corresponding address. Since the Dcache is read-allocate, this removes the2323- * Dcache aliasing issue. The writes will be forwarded to the write buffer,2424- * and merged as appropriate.2525- *2626- * Note: We rely on all ARMv4 processors implementing the "invalidate D line"2727- * instruction. If your processor does not supply this, you have to write your2828- * own copy_user_page that does the right thing.2929- */3030-ENTRY(v4_mc_copy_user_page)3131- stmfd sp!, {r4, lr} @ 23232- mov r4, r03333- mov r0, r13434- bl map_page_minicache3535- mov r1, #PAGE_SZ/64 @ 13636- ldmia r0!, {r2, r3, ip, lr} @ 43737-1: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line3838- stmia r4!, {r2, r3, ip, lr} @ 43939- ldmia r0!, {r2, r3, ip, lr} @ 4+14040- stmia r4!, {r2, r3, ip, lr} @ 44141- ldmia r0!, {r2, r3, ip, lr} @ 44242- mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line4343- stmia r4!, {r2, r3, ip, lr} @ 44444- ldmia r0!, {r2, r3, ip, lr} @ 44545- subs r1, r1, #1 @ 14646- stmia r4!, {r2, r3, ip, lr} @ 44747- ldmneia r0!, {r2, r3, ip, lr} @ 44848- bne 1b @ 14949- ldmfd sp!, {r4, pc} @ 35050-5151- .align 55252-/*5353- * ARMv4 optimised clear_user_page5454- *5555- * Same story as above.5656- */5757-ENTRY(v4_mc_clear_user_page)5858- str lr, [sp, #-4]!5959- mov r1, #PAGE_SZ/64 @ 16060- mov r2, #0 @ 16161- mov r3, #0 @ 16262- mov ip, #0 @ 16363- mov lr, #0 @ 16464-1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line6565- stmia r0!, {r2, r3, ip, lr} @ 46666- stmia r0!, {r2, r3, ip, lr} @ 46767- mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line6868- stmia r0!, {r2, r3, ip, lr} @ 46969- stmia r0!, {r2, r3, ip, lr} @ 47070- subs r1, r1, #1 @ 17171- bne 1b @ 17272- ldr pc, [sp], #47373-7474- __INITDATA7575-7676- .type v4_mc_user_fns, #object7777-ENTRY(v4_mc_user_fns)7878- .long v4_mc_clear_user_page7979- .long v4_mc_copy_user_page8080- .size v4_mc_user_fns, . - v4_mc_user_fns
+111
arch/arm/mm/copypage-v4mc.c
···11+/*22+ * linux/arch/arm/lib/copypage-armv4mc.S33+ *44+ * Copyright (C) 1995-2005 Russell King55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 as88+ * published by the Free Software Foundation.99+ *1010+ * This handles the mini data cache, as found on SA11x0 and XScale1111+ * processors. When we copy a user page page, we map it in such a way1212+ * that accesses to this page will not touch the main data cache, but1313+ * will be cached in the mini data cache. This prevents us thrashing1414+ * the main data cache on page faults.1515+ */1616+#include <linux/init.h>1717+#include <linux/mm.h>1818+1919+#include <asm/page.h>2020+#include <asm/pgtable.h>2121+#include <asm/tlbflush.h>2222+2323+/*2424+ * 0xffff8000 to 0xffffffff is reserved for any ARM architecture2525+ * specific hacks for copying pages efficiently.2626+ */2727+#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \2828+ L_PTE_CACHEABLE)2929+3030+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)3131+3232+static DEFINE_SPINLOCK(minicache_lock);3333+3434+/*3535+ * ARMv4 mini-dcache optimised copy_user_page3636+ *3737+ * We flush the destination cache lines just before we write the data into the3838+ * corresponding address. Since the Dcache is read-allocate, this removes the3939+ * Dcache aliasing issue. The writes will be forwarded to the write buffer,4040+ * and merged as appropriate.4141+ *4242+ * Note: We rely on all ARMv4 processors implementing the "invalidate D line"4343+ * instruction. If your processor does not supply this, you have to write your4444+ * own copy_user_page that does the right thing.4545+ */4646+static void __attribute__((naked))4747+mc_copy_user_page(void *from, void *to)4848+{4949+ asm volatile(5050+ "stmfd sp!, {r4, lr} @ 2\n\5151+ mov r4, %2 @ 1\n\5252+ ldmia %0!, {r2, r3, ip, lr} @ 4\n\5353+1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\5454+ stmia %1!, {r2, r3, ip, lr} @ 4\n\5555+ ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\5656+ stmia %1!, {r2, r3, ip, lr} @ 4\n\5757+ ldmia %0!, {r2, r3, ip, lr} @ 4\n\5858+ mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\5959+ stmia %1!, {r2, r3, ip, lr} @ 4\n\6060+ ldmia %0!, {r2, r3, ip, lr} @ 4\n\6161+ subs r4, r4, #1 @ 1\n\6262+ stmia %1!, {r2, r3, ip, lr} @ 4\n\6363+ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\6464+ bne 1b @ 1\n\6565+ ldmfd sp!, {r4, pc} @ 3"6666+ :6767+ : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));6868+}6969+7070+void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)7171+{7272+ spin_lock(&minicache_lock);7373+7474+ set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));7575+ flush_tlb_kernel_page(COPYPAGE_MINICACHE);7676+7777+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);7878+7979+ spin_unlock(&minicache_lock);8080+}8181+8282+/*8383+ * ARMv4 optimised clear_user_page8484+ */8585+void __attribute__((naked))8686+v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)8787+{8888+ asm volatile(8989+ "str lr, [sp, #-4]!\n\9090+ mov r1, %0 @ 1\n\9191+ mov r2, #0 @ 1\n\9292+ mov r3, #0 @ 1\n\9393+ mov ip, #0 @ 1\n\9494+ mov lr, #0 @ 1\n\9595+1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\9696+ stmia r0!, {r2, r3, ip, lr} @ 4\n\9797+ stmia r0!, {r2, r3, ip, lr} @ 4\n\9898+ mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\9999+ stmia r0!, {r2, r3, ip, lr} @ 4\n\100100+ stmia r0!, {r2, r3, ip, lr} @ 4\n\101101+ subs r1, r1, #1 @ 1\n\102102+ bne 1b @ 1\n\103103+ ldr pc, [sp], #4"104104+ :105105+ : "I" (PAGE_SIZE / 64));106106+}107107+108108+struct cpu_user_fns v4_mc_user_fns __initdata = {109109+ .cpu_clear_user_page = v4_mc_clear_user_page, 110110+ .cpu_copy_user_page = v4_mc_copy_user_page,111111+};
···13131414#include <asm/cacheflush.h>1515#include <asm/system.h>1616+#include <asm/tlbflush.h>1717+1818+#ifdef CONFIG_CPU_CACHE_VIPT1919+#define ALIAS_FLUSH_START 0xffff40002020+2121+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)2222+2323+static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)2424+{2525+ unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);2626+2727+ set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));2828+ flush_tlb_kernel_page(to);2929+3030+ asm( "mcrr p15, 0, %1, %0, c14\n"3131+ " mcrr p15, 0, %1, %0, c5\n"3232+ :3333+ : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)3434+ : "cc");3535+}3636+#else3737+#define flush_pfn_alias(pfn,vaddr) do { } while (0)3838+#endif16391740static void __flush_dcache_page(struct address_space *mapping, struct page *page)1841{···6037 return;61386239 /*4040+ * This is a page cache page. If we have a VIPT cache, we4141+ * only need to do one flush - which would be at the relevant4242+ * userspace colour, which is congruent with page->index.4343+ */4444+ if (cache_is_vipt()) {4545+ if (cache_is_vipt_aliasing())4646+ flush_pfn_alias(page_to_pfn(page),4747+ page->index << PAGE_CACHE_SHIFT);4848+ return;4949+ }5050+5151+ /*6352 * There are possible user space mappings of this page:6453 * - VIVT cache: we need to also write back and invalidate all user6554 * data in the current VM view associated with this page.···9257 continue;9358 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;9459 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));9595- if (cache_is_vipt())9696- break;9760 }9861 flush_dcache_mmap_unlock(mapping);9962}
+19-8
arch/arm/mm/mm-armv.c
···37373838EXPORT_SYMBOL(pgprot_kernel);39394040+pmd_t *top_pmd;4141+4042struct cachepolicy {4143 const char policy[16];4244 unsigned int cr_mask;···144142145143#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)146144145145+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)146146+{147147+ return pmd_offset(pgd, virt);148148+}149149+150150+static inline pmd_t *pmd_off_k(unsigned long virt)151151+{152152+ return pmd_off(pgd_offset_k(virt), virt);153153+}154154+147155/*148156 * need to get a 16k page for level 1149157 */···232220 return;233221234222 /* pgd is always present and good */235235- pmd = (pmd_t *)pgd;223223+ pmd = pmd_off(pgd, 0);236224 if (pmd_none(*pmd))237225 goto free;238226 if (pmd_bad(*pmd)) {···258246static inline void259247alloc_init_section(unsigned long virt, unsigned long phys, int prot)260248{261261- pmd_t *pmdp;249249+ pmd_t *pmdp = pmd_off_k(virt);262250263263- pmdp = pmd_offset(pgd_offset_k(virt), virt);264251 if (virt & (1 << 20))265252 pmdp++;266253···294283static inline void295284alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)296285{297297- pmd_t *pmdp;286286+ pmd_t *pmdp = pmd_off_k(virt);298287 pte_t *ptep;299299-300300- pmdp = pmd_offset(pgd_offset_k(virt), virt);301288302289 if (pmd_none(*pmdp)) {303290 unsigned long pmdval;···319310 */320311static inline void clear_mapping(unsigned long virt)321312{322322- pmd_clear(pmd_offset(pgd_offset_k(virt), virt));313313+ pmd_clear(pmd_off_k(virt));323314}324315325316struct mem_types {···587578 PMD_TYPE_SECT;588579 if (cpu_arch <= CPU_ARCH_ARMv5)589580 pmdval |= PMD_BIT4;590590- pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);581581+ pmd = pmd_off(pgd, i << PGDIR_SHIFT);591582 pmd[0] = __pmd(pmdval);592583 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));593584 flush_pmd_entry(pmd);···684675685676 flush_cache_all();686677 flush_tlb_all();678678+679679+ top_pmd = pmd_off_k(VECTORS_HIGH);687680}688681689682/*