Merge branch 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm:
[ARM] nommu: backtrace code must not reference a discarded section
[ARM] nommu: Initial uCLinux support for MMU-based CPUs
[ARM] nommu: prevent Xscale-based machines being selected
[ARM] nommu: export flush_dcache_page()
[ARM] nommu: remove fault-armv, mmap and mm-armv files from nommu build
[ARM] Remove TABLE_SIZE, and several unused function prototypes
[ARM] nommu: Provide a simple flush_dcache_page implementation
[ARM] nommu: add arch/arm/Kconfig-nommu to Kconfig files
[ARM] nommu: add stubs for ioremap and friends
[ARM] nommu: avoid selecting TLB and CPU specific copy code
[ARM] nommu: uaccess tweaks
[ARM] nommu: adjust headers for !MMU ARM systems
[ARM] nommu: we need the TLS register emulation for nommu mode

+675 -172
+9
arch/arm/Kconfig
··· 188 189 config ARCH_IOP3XX 190 bool "IOP3xx-based" 191 select PCI 192 help 193 Support for Intel's IOP3XX (XScale) family of processors. 194 195 config ARCH_IXP4XX 196 bool "IXP4xx-based" 197 help 198 Support for Intel's IXP4XX (XScale) family of processors. 199 200 config ARCH_IXP2000 201 bool "IXP2400/2800-based" 202 select PCI 203 help 204 Support for Intel's IXP2400/2800 (XScale) family of processors. 205 206 config ARCH_IXP23XX 207 bool "IXP23XX-based" 208 select PCI 209 help 210 Support for Intel's IXP23xx (XScale) family of processors. ··· 233 234 config ARCH_PXA 235 bool "PXA2xx-based" 236 select ARCH_MTD_XIP 237 help 238 Support for Intel's PXA2XX processor line. ··· 343 bool 344 depends on CPU_XSCALE && !XSCALE_PMU_TIMER 345 default y 346 347 endmenu 348
··· 188 189 config ARCH_IOP3XX 190 bool "IOP3xx-based" 191 + depends on MMU 192 select PCI 193 help 194 Support for Intel's IOP3XX (XScale) family of processors. 195 196 config ARCH_IXP4XX 197 bool "IXP4xx-based" 198 + depends on MMU 199 help 200 Support for Intel's IXP4XX (XScale) family of processors. 201 202 config ARCH_IXP2000 203 bool "IXP2400/2800-based" 204 + depends on MMU 205 select PCI 206 help 207 Support for Intel's IXP2400/2800 (XScale) family of processors. 208 209 config ARCH_IXP23XX 210 bool "IXP23XX-based" 211 + depends on MMU 212 select PCI 213 help 214 Support for Intel's IXP23xx (XScale) family of processors. ··· 229 230 config ARCH_PXA 231 bool "PXA2xx-based" 232 + depends on MMU 233 select ARCH_MTD_XIP 234 help 235 Support for Intel's PXA2XX processor line. ··· 338 bool 339 depends on CPU_XSCALE && !XSCALE_PMU_TIMER 340 default y 341 + 342 + if !MMU 343 + source "arch/arm/Kconfig-nommu" 344 + endif 345 346 endmenu 347
+5 -2
arch/arm/kernel/armksyms.c
··· 109 EXPORT_SYMBOL(__memzero); 110 111 /* user mem (segment) */ 112 EXPORT_SYMBOL(__copy_from_user); 113 EXPORT_SYMBOL(__copy_to_user); 114 EXPORT_SYMBOL(__clear_user); 115 - EXPORT_SYMBOL(__strnlen_user); 116 - EXPORT_SYMBOL(__strncpy_from_user); 117 118 EXPORT_SYMBOL(__get_user_1); 119 EXPORT_SYMBOL(__get_user_2); ··· 125 EXPORT_SYMBOL(__put_user_2); 126 EXPORT_SYMBOL(__put_user_4); 127 EXPORT_SYMBOL(__put_user_8); 128 129 /* crypto hash */ 130 EXPORT_SYMBOL(sha_transform);
··· 109 EXPORT_SYMBOL(__memzero); 110 111 /* user mem (segment) */ 112 + EXPORT_SYMBOL(__strnlen_user); 113 + EXPORT_SYMBOL(__strncpy_from_user); 114 + 115 + #ifdef CONFIG_MMU 116 EXPORT_SYMBOL(__copy_from_user); 117 EXPORT_SYMBOL(__copy_to_user); 118 EXPORT_SYMBOL(__clear_user); 119 120 EXPORT_SYMBOL(__get_user_1); 121 EXPORT_SYMBOL(__get_user_2); ··· 123 EXPORT_SYMBOL(__put_user_2); 124 EXPORT_SYMBOL(__put_user_4); 125 EXPORT_SYMBOL(__put_user_8); 126 + #endif 127 128 /* crypto hash */ 129 EXPORT_SYMBOL(sha_transform);
+8
arch/arm/kernel/vmlinux.lds.S
··· 80 *(.exit.text) 81 *(.exit.data) 82 *(.exitcall.exit) 83 } 84 85 .text : { /* Real text segment */ ··· 91 *(.text) 92 SCHED_TEXT 93 LOCK_TEXT 94 *(.fixup) 95 *(.gnu.warning) 96 *(.rodata) 97 *(.rodata.*) ··· 148 */ 149 . = ALIGN(32); 150 __start___ex_table = .; 151 *(__ex_table) 152 __stop___ex_table = .; 153 154 /*
··· 80 *(.exit.text) 81 *(.exit.data) 82 *(.exitcall.exit) 83 + #ifndef CONFIG_MMU 84 + *(.fixup) 85 + *(__ex_table) 86 + #endif 87 } 88 89 .text : { /* Real text segment */ ··· 87 *(.text) 88 SCHED_TEXT 89 LOCK_TEXT 90 + #ifdef CONFIG_MMU 91 *(.fixup) 92 + #endif 93 *(.gnu.warning) 94 *(.rodata) 95 *(.rodata.*) ··· 142 */ 143 . = ALIGN(32); 144 __start___ex_table = .; 145 + #ifdef CONFIG_MMU 146 *(__ex_table) 147 + #endif 148 __stop___ex_table = .; 149 150 /*
+8 -5
arch/arm/lib/Makefile
··· 6 7 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ 8 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ 9 - copy_page.o delay.o findbit.o memchr.o memcpy.o \ 10 memmove.o memset.o memzero.o setbit.o \ 11 strncpy_from_user.o strnlen_user.o \ 12 strchr.o strrchr.o \ 13 testchangebit.o testclearbit.o testsetbit.o \ 14 - getuser.o putuser.o clear_user.o \ 15 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 16 ucmpdi2.o lib1funcs.o div64.o sha1.o \ 17 io-readsb.o io-writesb.o io-readsl.o io-writesl.o 18 19 # the code in uaccess.S is not preemption safe and 20 # probably faster on ARMv3 only 21 ifeq ($(CONFIG_PREEMPT),y) 22 - lib-y += copy_from_user.o copy_to_user.o 23 else 24 ifneq ($(CONFIG_CPU_32v3),y) 25 - lib-y += copy_from_user.o copy_to_user.o 26 else 27 - lib-y += uaccess.o 28 endif 29 endif 30 31 ifeq ($(CONFIG_CPU_32v3),y) 32 lib-y += io-readsw-armv3.o io-writesw-armv3.o
··· 6 7 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ 8 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ 9 + delay.o findbit.o memchr.o memcpy.o \ 10 memmove.o memset.o memzero.o setbit.o \ 11 strncpy_from_user.o strnlen_user.o \ 12 strchr.o strrchr.o \ 13 testchangebit.o testclearbit.o testsetbit.o \ 14 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 15 ucmpdi2.o lib1funcs.o div64.o sha1.o \ 16 io-readsb.o io-writesb.o io-readsl.o io-writesl.o 17 18 + mmu-y := clear_user.o copy_page.o getuser.o putuser.o 19 + 20 # the code in uaccess.S is not preemption safe and 21 # probably faster on ARMv3 only 22 ifeq ($(CONFIG_PREEMPT),y) 23 + mmu-y += copy_from_user.o copy_to_user.o 24 else 25 ifneq ($(CONFIG_CPU_32v3),y) 26 + mmu-y += copy_from_user.o copy_to_user.o 27 else 28 + mmu-y += uaccess.o 29 endif 30 endif 31 + 32 + lib-$(CONFIG_MMU) += $(mmu-y) 33 34 ifeq ($(CONFIG_CPU_32v3),y) 35 lib-y += io-readsw-armv3.o io-writesw-armv3.o
+1 -4
arch/arm/lib/backtrace.S
··· 97 b 1007f 98 99 /* 100 - * Fixup for LDMDB 101 */ 102 - .section .fixup,"ax" 103 - .align 0 104 1007: ldr r0, =.Lbad 105 mov r1, frame 106 bl printk 107 ldmfd sp!, {r4 - r8, pc} 108 .ltorg 109 - .previous 110 111 .section __ex_table,"a" 112 .align 3
··· 97 b 1007f 98 99 /* 100 + * Fixup for LDMDB. Note that this must not be in the fixup section. 101 */ 102 1007: ldr r0, =.Lbad 103 mov r1, frame 104 bl printk 105 ldmfd sp!, {r4 - r8, pc} 106 .ltorg 107 108 .section __ex_table,"a" 109 .align 3
+35 -32
arch/arm/mm/Kconfig
··· 15 select CPU_32v3 16 select CPU_CACHE_V3 17 select CPU_CACHE_VIVT 18 - select CPU_COPY_V3 19 - select CPU_TLB_V3 20 help 21 The ARM610 is the successor to the ARM3 processor 22 and was produced by VLSI Technology Inc. ··· 31 select CPU_32v3 32 select CPU_CACHE_V3 33 select CPU_CACHE_VIVT 34 - select CPU_COPY_V3 35 - select CPU_TLB_V3 36 help 37 A 32-bit RISC microprocessor based on the ARM7 processor core 38 designed by Advanced RISC Machines Ltd. The ARM710 is the ··· 50 select CPU_ABRT_LV4T 51 select CPU_CACHE_V4 52 select CPU_CACHE_VIVT 53 - select CPU_COPY_V4WT 54 - select CPU_TLB_V4WT 55 help 56 A 32-bit RISC processor with 8kByte Cache, Write Buffer and 57 MMU built around an ARM7TDMI core. ··· 68 select CPU_ABRT_EV4T 69 select CPU_CACHE_V4WT 70 select CPU_CACHE_VIVT 71 - select CPU_COPY_V4WB 72 - select CPU_TLB_V4WBI 73 help 74 The ARM920T is licensed to be produced by numerous vendors, 75 and is used in the Maverick EP9312 and the Samsung S3C2410. ··· 89 select CPU_ABRT_EV4T 90 select CPU_CACHE_V4WT 91 select CPU_CACHE_VIVT 92 - select CPU_COPY_V4WB 93 - select CPU_TLB_V4WBI 94 help 95 The ARM922T is a version of the ARM920T, but with smaller 96 instruction and data caches. It is used in Altera's ··· 108 select CPU_ABRT_EV4T 109 select CPU_CACHE_V4WT 110 select CPU_CACHE_VIVT 111 - select CPU_COPY_V4WB 112 - select CPU_TLB_V4WBI 113 help 114 The ARM925T is a mix between the ARM920T and ARM926T, but with 115 different instruction and data caches. It is used in TI's OMAP ··· 126 select CPU_32v5 127 select CPU_ABRT_EV5TJ 128 select CPU_CACHE_VIVT 129 - select CPU_COPY_V4WB 130 - select CPU_TLB_V4WBI 131 help 132 This is a variant of the ARM920. It has slightly different 133 instruction sequences for cache and TLB operations. Curiously, ··· 144 select CPU_ABRT_EV4T 145 select CPU_CACHE_V4WT 146 select CPU_CACHE_VIVT 147 - select CPU_COPY_V4WB 148 - select CPU_TLB_V4WBI 149 help 150 The ARM1020 is the 32K cached version of the ARM10 processor, 151 with an addition of a floating-point unit. ··· 161 select CPU_ABRT_EV4T 162 select CPU_CACHE_V4WT 163 select CPU_CACHE_VIVT 164 - select CPU_COPY_V4WB 165 - select CPU_TLB_V4WBI 166 depends on n 167 168 # ARM1022E ··· 172 select CPU_32v5 173 select CPU_ABRT_EV4T 174 select CPU_CACHE_VIVT 175 - select CPU_COPY_V4WB # can probably do better 176 - select CPU_TLB_V4WBI 177 help 178 The ARM1022E is an implementation of the ARMv5TE architecture 179 based upon the ARM10 integer core with a 16KiB L1 Harvard cache, ··· 189 select CPU_32v5 190 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 191 select CPU_CACHE_VIVT 192 - select CPU_COPY_V4WB # can probably do better 193 - select CPU_TLB_V4WBI 194 help 195 The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture 196 based upon the ARM10 integer core. ··· 207 select CPU_ABRT_EV4 208 select CPU_CACHE_V4WB 209 select CPU_CACHE_VIVT 210 - select CPU_COPY_V4WB 211 - select CPU_TLB_V4WB 212 help 213 The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and 214 is available at five speeds ranging from 100 MHz to 233 MHz. ··· 227 select CPU_ABRT_EV4 228 select CPU_CACHE_V4WB 229 select CPU_CACHE_VIVT 230 - select CPU_TLB_V4WB 231 232 # XScale 233 config CPU_XSCALE ··· 237 select CPU_32v5 238 select CPU_ABRT_EV5T 239 select CPU_CACHE_VIVT 240 - select CPU_TLB_V4WBI 241 242 # XScale Core Version 3 243 config CPU_XSC3 ··· 247 select CPU_32v5 248 select CPU_ABRT_EV5T 249 select CPU_CACHE_VIVT 250 - select CPU_TLB_V4WBI 251 select IO_36 252 253 # ARMv6 ··· 258 select CPU_ABRT_EV6 259 select CPU_CACHE_V6 260 select CPU_CACHE_VIPT 261 - select CPU_COPY_V6 262 - select CPU_TLB_V6 263 264 # ARMv6k 265 config CPU_32v6K ··· 277 # This defines the compiler instruction set which depends on the machine type. 278 config CPU_32v3 279 bool 280 - select TLS_REG_EMUL if SMP 281 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 282 283 config CPU_32v4 284 bool 285 - select TLS_REG_EMUL if SMP 286 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 287 288 config CPU_32v5 289 bool 290 - select TLS_REG_EMUL if SMP 291 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 292 293 config CPU_32v6 ··· 334 config CPU_CACHE_VIPT 335 bool 336 337 # The copy-page model 338 config CPU_COPY_V3 339 bool ··· 372 373 config CPU_TLB_V6 374 bool 375 376 # 377 # CPU supports 36-bit I/O
··· 15 select CPU_32v3 16 select CPU_CACHE_V3 17 select CPU_CACHE_VIVT 18 + select CPU_COPY_V3 if MMU 19 + select CPU_TLB_V3 if MMU 20 help 21 The ARM610 is the successor to the ARM3 processor 22 and was produced by VLSI Technology Inc. ··· 31 select CPU_32v3 32 select CPU_CACHE_V3 33 select CPU_CACHE_VIVT 34 + select CPU_COPY_V3 if MMU 35 + select CPU_TLB_V3 if MMU 36 help 37 A 32-bit RISC microprocessor based on the ARM7 processor core 38 designed by Advanced RISC Machines Ltd. The ARM710 is the ··· 50 select CPU_ABRT_LV4T 51 select CPU_CACHE_V4 52 select CPU_CACHE_VIVT 53 + select CPU_COPY_V4WT if MMU 54 + select CPU_TLB_V4WT if MMU 55 help 56 A 32-bit RISC processor with 8kByte Cache, Write Buffer and 57 MMU built around an ARM7TDMI core. ··· 68 select CPU_ABRT_EV4T 69 select CPU_CACHE_V4WT 70 select CPU_CACHE_VIVT 71 + select CPU_COPY_V4WB if MMU 72 + select CPU_TLB_V4WBI if MMU 73 help 74 The ARM920T is licensed to be produced by numerous vendors, 75 and is used in the Maverick EP9312 and the Samsung S3C2410. ··· 89 select CPU_ABRT_EV4T 90 select CPU_CACHE_V4WT 91 select CPU_CACHE_VIVT 92 + select CPU_COPY_V4WB if MMU 93 + select CPU_TLB_V4WBI if MMU 94 help 95 The ARM922T is a version of the ARM920T, but with smaller 96 instruction and data caches. It is used in Altera's ··· 108 select CPU_ABRT_EV4T 109 select CPU_CACHE_V4WT 110 select CPU_CACHE_VIVT 111 + select CPU_COPY_V4WB if MMU 112 + select CPU_TLB_V4WBI if MMU 113 help 114 The ARM925T is a mix between the ARM920T and ARM926T, but with 115 different instruction and data caches. It is used in TI's OMAP ··· 126 select CPU_32v5 127 select CPU_ABRT_EV5TJ 128 select CPU_CACHE_VIVT 129 + select CPU_COPY_V4WB if MMU 130 + select CPU_TLB_V4WBI if MMU 131 help 132 This is a variant of the ARM920. It has slightly different 133 instruction sequences for cache and TLB operations. Curiously, ··· 144 select CPU_ABRT_EV4T 145 select CPU_CACHE_V4WT 146 select CPU_CACHE_VIVT 147 + select CPU_COPY_V4WB if MMU 148 + select CPU_TLB_V4WBI if MMU 149 help 150 The ARM1020 is the 32K cached version of the ARM10 processor, 151 with an addition of a floating-point unit. ··· 161 select CPU_ABRT_EV4T 162 select CPU_CACHE_V4WT 163 select CPU_CACHE_VIVT 164 + select CPU_COPY_V4WB if MMU 165 + select CPU_TLB_V4WBI if MMU 166 depends on n 167 168 # ARM1022E ··· 172 select CPU_32v5 173 select CPU_ABRT_EV4T 174 select CPU_CACHE_VIVT 175 + select CPU_COPY_V4WB if MMU # can probably do better 176 + select CPU_TLB_V4WBI if MMU 177 help 178 The ARM1022E is an implementation of the ARMv5TE architecture 179 based upon the ARM10 integer core with a 16KiB L1 Harvard cache, ··· 189 select CPU_32v5 190 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 191 select CPU_CACHE_VIVT 192 + select CPU_COPY_V4WB if MMU # can probably do better 193 + select CPU_TLB_V4WBI if MMU 194 help 195 The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture 196 based upon the ARM10 integer core. ··· 207 select CPU_ABRT_EV4 208 select CPU_CACHE_V4WB 209 select CPU_CACHE_VIVT 210 + select CPU_COPY_V4WB if MMU 211 + select CPU_TLB_V4WB if MMU 212 help 213 The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and 214 is available at five speeds ranging from 100 MHz to 233 MHz. ··· 227 select CPU_ABRT_EV4 228 select CPU_CACHE_V4WB 229 select CPU_CACHE_VIVT 230 + select CPU_TLB_V4WB if MMU 231 232 # XScale 233 config CPU_XSCALE ··· 237 select CPU_32v5 238 select CPU_ABRT_EV5T 239 select CPU_CACHE_VIVT 240 + select CPU_TLB_V4WBI if MMU 241 242 # XScale Core Version 3 243 config CPU_XSC3 ··· 247 select CPU_32v5 248 select CPU_ABRT_EV5T 249 select CPU_CACHE_VIVT 250 + select CPU_TLB_V4WBI if MMU 251 select IO_36 252 253 # ARMv6 ··· 258 select CPU_ABRT_EV6 259 select CPU_CACHE_V6 260 select CPU_CACHE_VIPT 261 + select CPU_COPY_V6 if MMU 262 + select CPU_TLB_V6 if MMU 263 264 # ARMv6k 265 config CPU_32v6K ··· 277 # This defines the compiler instruction set which depends on the machine type. 278 config CPU_32v3 279 bool 280 + select TLS_REG_EMUL if SMP || !MMU 281 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 282 283 config CPU_32v4 284 bool 285 + select TLS_REG_EMUL if SMP || !MMU 286 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 287 288 config CPU_32v5 289 bool 290 + select TLS_REG_EMUL if SMP || !MMU 291 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 292 293 config CPU_32v6 ··· 334 config CPU_CACHE_VIPT 335 bool 336 337 + if MMU 338 # The copy-page model 339 config CPU_COPY_V3 340 bool ··· 371 372 config CPU_TLB_V6 373 bool 374 + 375 + endif 376 377 # 378 # CPU supports 36-bit I/O
+8 -2
arch/arm/mm/Makefile
··· 2 # Makefile for the linux arm-specific parts of the memory manager. 3 # 4 5 - obj-y := consistent.o extable.o fault-armv.o \ 6 - fault.o flush.o init.o ioremap.o mmap.o \ 7 mm-armv.o 8 9 obj-$(CONFIG_MODULES) += proc-syms.o 10
··· 2 # Makefile for the linux arm-specific parts of the memory manager. 3 # 4 5 + obj-y := consistent.o extable.o fault.o init.o \ 6 + iomap.o 7 + 8 + obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 9 mm-armv.o 10 + 11 + ifneq ($(CONFIG_MMU),y) 12 + obj-y += nommu.o 13 + endif 14 15 obj-$(CONFIG_MODULES) += proc-syms.o 16
-2
arch/arm/mm/init.c
··· 26 #include <asm/mach/arch.h> 27 #include <asm/mach/map.h> 28 29 - #define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) 30 - 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 33 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
··· 26 #include <asm/mach/arch.h> 27 #include <asm/mach/map.h> 28 29 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 30 31 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+55
arch/arm/mm/iomap.c
···
··· 1 + /* 2 + * linux/arch/arm/mm/iomap.c 3 + * 4 + * Map IO port and PCI memory spaces so that {read,write}[bwl] can 5 + * be used to access this memory. 6 + */ 7 + #include <linux/module.h> 8 + #include <linux/pci.h> 9 + #include <linux/ioport.h> 10 + 11 + #include <asm/io.h> 12 + 13 + #ifdef __io 14 + void __iomem *ioport_map(unsigned long port, unsigned int nr) 15 + { 16 + return __io(port); 17 + } 18 + EXPORT_SYMBOL(ioport_map); 19 + 20 + void ioport_unmap(void __iomem *addr) 21 + { 22 + } 23 + EXPORT_SYMBOL(ioport_unmap); 24 + #endif 25 + 26 + #ifdef CONFIG_PCI 27 + void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 28 + { 29 + unsigned long start = pci_resource_start(dev, bar); 30 + unsigned long len = pci_resource_len(dev, bar); 31 + unsigned long flags = pci_resource_flags(dev, bar); 32 + 33 + if (!len || !start) 34 + return NULL; 35 + if (maxlen && len > maxlen) 36 + len = maxlen; 37 + if (flags & IORESOURCE_IO) 38 + return ioport_map(start, len); 39 + if (flags & IORESOURCE_MEM) { 40 + if (flags & IORESOURCE_CACHEABLE) 41 + return ioremap(start, len); 42 + return ioremap_nocache(start, len); 43 + } 44 + return NULL; 45 + } 46 + EXPORT_SYMBOL(pci_iomap); 47 + 48 + void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 49 + { 50 + if ((unsigned long)addr >= VMALLOC_START && 51 + (unsigned long)addr < VMALLOC_END) 52 + iounmap(addr); 53 + } 54 + EXPORT_SYMBOL(pci_iounmap); 55 + #endif
-47
arch/arm/mm/ioremap.c
··· 176 vunmap((void *)(PAGE_MASK & (unsigned long)addr)); 177 } 178 EXPORT_SYMBOL(__iounmap); 179 - 180 - #ifdef __io 181 - void __iomem *ioport_map(unsigned long port, unsigned int nr) 182 - { 183 - return __io(port); 184 - } 185 - EXPORT_SYMBOL(ioport_map); 186 - 187 - void ioport_unmap(void __iomem *addr) 188 - { 189 - } 190 - EXPORT_SYMBOL(ioport_unmap); 191 - #endif 192 - 193 - #ifdef CONFIG_PCI 194 - #include <linux/pci.h> 195 - #include <linux/ioport.h> 196 - 197 - void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 198 - { 199 - unsigned long start = pci_resource_start(dev, bar); 200 - unsigned long len = pci_resource_len(dev, bar); 201 - unsigned long flags = pci_resource_flags(dev, bar); 202 - 203 - if (!len || !start) 204 - return NULL; 205 - if (maxlen && len > maxlen) 206 - len = maxlen; 207 - if (flags & IORESOURCE_IO) 208 - return ioport_map(start, len); 209 - if (flags & IORESOURCE_MEM) { 210 - if (flags & IORESOURCE_CACHEABLE) 211 - return ioremap(start, len); 212 - return ioremap_nocache(start, len); 213 - } 214 - return NULL; 215 - } 216 - EXPORT_SYMBOL(pci_iomap); 217 - 218 - void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 219 - { 220 - if ((unsigned long)addr >= VMALLOC_START && 221 - (unsigned long)addr < VMALLOC_END) 222 - iounmap(addr); 223 - } 224 - EXPORT_SYMBOL(pci_iounmap); 225 - #endif
··· 176 vunmap((void *)(PAGE_MASK & (unsigned long)addr)); 177 } 178 EXPORT_SYMBOL(__iounmap);
+39
arch/arm/mm/nommu.c
···
··· 1 + /* 2 + * linux/arch/arm/mm/nommu.c 3 + * 4 + * ARM uCLinux supporting functions. 5 + */ 6 + #include <linux/module.h> 7 + #include <linux/mm.h> 8 + #include <linux/pagemap.h> 9 + 10 + #include <asm/cacheflush.h> 11 + #include <asm/io.h> 12 + #include <asm/page.h> 13 + 14 + void flush_dcache_page(struct page *page) 15 + { 16 + __cpuc_flush_dcache_page(page_address(page)); 17 + } 18 + EXPORT_SYMBOL(flush_dcache_page); 19 + 20 + void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset, 21 + size_t size, unsigned long flags) 22 + { 23 + if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) 24 + return NULL; 25 + return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); 26 + } 27 + EXPORT_SYMBOL(__ioremap_pfn); 28 + 29 + void __iomem *__ioremap(unsigned long phys_addr, size_t size, 30 + unsigned long flags) 31 + { 32 + return (void __iomem *)phys_addr; 33 + } 34 + EXPORT_SYMBOL(__ioremap); 35 + 36 + void __iounmap(void __iomem *addr) 37 + { 38 + } 39 + EXPORT_SYMBOL(__iounmap);
+9
arch/arm/mm/proc-arm1020.S
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 102 mov ip, #0 103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 104 mcr p15, 0, ip, c7, c10, 4 @ drain WB 105 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 106 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 107 bic ip, ip, #0x000f @ ............wcam 108 bic ip, ip, #0x1100 @ ...i...s........ ··· 362 */ 363 .align 5 364 ENTRY(cpu_arm1020_switch_mm) 365 #ifndef CONFIG_CPU_DCACHE_DISABLE 366 mcr p15, 0, r3, c7, c10, 4 367 mov r1, #0xF @ 16 segments ··· 387 mcr p15, 0, r1, c7, c10, 4 @ drain WB 388 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 389 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 390 mov pc, lr 391 392 /* ··· 397 */ 398 .align 5 399 ENTRY(cpu_arm1020_set_pte) 400 str r1, [r0], #-2048 @ linux version 401 402 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 427 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 428 #endif 429 mcr p15, 0, r0, c7, c10, 4 @ drain WB 430 mov pc, lr 431 432 __INIT ··· 437 mov r0, #0 438 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 439 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 440 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 441 mrc p15, 0, r0, c1, c0 @ get control register v4 442 ldr r5, arm1020_cr1_clear 443 bic r0, r0, r5
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 101 mov ip, #0 102 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 103 mcr p15, 0, ip, c7, c10, 4 @ drain WB 104 + #ifdef CONFIG_MMU 105 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 106 + #endif 107 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 108 bic ip, ip, #0x000f @ ............wcam 109 bic ip, ip, #0x1100 @ ...i...s........ ··· 359 */ 360 .align 5 361 ENTRY(cpu_arm1020_switch_mm) 362 + #ifdef CONFIG_MMU 363 #ifndef CONFIG_CPU_DCACHE_DISABLE 364 mcr p15, 0, r3, c7, c10, 4 365 mov r1, #0xF @ 16 segments ··· 383 mcr p15, 0, r1, c7, c10, 4 @ drain WB 384 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 385 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 386 + #endif /* CONFIG_MMU */ 387 mov pc, lr 388 389 /* ··· 392 */ 393 .align 5 394 ENTRY(cpu_arm1020_set_pte) 395 + #ifdef CONFIG_MMU 396 str r1, [r0], #-2048 @ linux version 397 398 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 421 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 422 #endif 423 mcr p15, 0, r0, c7, c10, 4 @ drain WB 424 + #endif /* CONFIG_MMU */ 425 mov pc, lr 426 427 __INIT ··· 430 mov r0, #0 431 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 432 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 433 + #ifdef CONFIG_MMU 434 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 435 + #endif 436 mrc p15, 0, r0, c1, c0 @ get control register v4 437 ldr r5, arm1020_cr1_clear 438 bic r0, r0, r5
+9
arch/arm/mm/proc-arm1020e.S
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 102 mov ip, #0 103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 104 mcr p15, 0, ip, c7, c10, 4 @ drain WB 105 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 106 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 107 bic ip, ip, #0x000f @ ............wcam 108 bic ip, ip, #0x1100 @ ...i...s........ ··· 347 */ 348 .align 5 349 ENTRY(cpu_arm1020e_switch_mm) 350 #ifndef CONFIG_CPU_DCACHE_DISABLE 351 mcr p15, 0, r3, c7, c10, 4 352 mov r1, #0xF @ 16 segments ··· 371 mcr p15, 0, r1, c7, c10, 4 @ drain WB 372 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 373 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 374 mov pc, lr 375 376 /* ··· 381 */ 382 .align 5 383 ENTRY(cpu_arm1020e_set_pte) 384 str r1, [r0], #-2048 @ linux version 385 386 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 409 #ifndef CONFIG_CPU_DCACHE_DISABLE 410 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 411 #endif 412 mov pc, lr 413 414 __INIT ··· 419 mov r0, #0 420 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 421 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 422 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 423 mrc p15, 0, r0, c1, c0 @ get control register v4 424 ldr r5, arm1020e_cr1_clear 425 bic r0, r0, r5
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 101 mov ip, #0 102 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 103 mcr p15, 0, ip, c7, c10, 4 @ drain WB 104 + #ifdef CONFIG_MMU 105 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 106 + #endif 107 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 108 bic ip, ip, #0x000f @ ............wcam 109 bic ip, ip, #0x1100 @ ...i...s........ ··· 344 */ 345 .align 5 346 ENTRY(cpu_arm1020e_switch_mm) 347 + #ifdef CONFIG_MMU 348 #ifndef CONFIG_CPU_DCACHE_DISABLE 349 mcr p15, 0, r3, c7, c10, 4 350 mov r1, #0xF @ 16 segments ··· 367 mcr p15, 0, r1, c7, c10, 4 @ drain WB 368 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 369 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 370 + #endif 371 mov pc, lr 372 373 /* ··· 376 */ 377 .align 5 378 ENTRY(cpu_arm1020e_set_pte) 379 + #ifdef CONFIG_MMU 380 str r1, [r0], #-2048 @ linux version 381 382 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 403 #ifndef CONFIG_CPU_DCACHE_DISABLE 404 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 405 #endif 406 + #endif /* CONFIG_MMU */ 407 mov pc, lr 408 409 __INIT ··· 412 mov r0, #0 413 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 414 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 415 + #ifdef CONFIG_MMU 416 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 417 + #endif 418 mrc p15, 0, r0, c1, c0 @ get control register v4 419 ldr r5, arm1020e_cr1_clear 420 bic r0, r0, r5
+9
arch/arm/mm/proc-arm1022.S
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 91 mov ip, #0 92 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 93 mcr p15, 0, ip, c7, c10, 4 @ drain WB 94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 96 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x1100 @ ...i...s........ ··· 336 */ 337 .align 5 338 ENTRY(cpu_arm1022_switch_mm) 339 #ifndef CONFIG_CPU_DCACHE_DISABLE 340 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 341 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 353 mcr p15, 0, r1, c7, c10, 4 @ drain WB 354 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 355 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 356 mov pc, lr 357 358 /* ··· 363 */ 364 .align 5 365 ENTRY(cpu_arm1022_set_pte) 366 str r1, [r0], #-2048 @ linux version 367 368 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 391 #ifndef CONFIG_CPU_DCACHE_DISABLE 392 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 393 #endif 394 mov pc, lr 395 396 __INIT ··· 401 mov r0, #0 402 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 403 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 404 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 405 mrc p15, 0, r0, c1, c0 @ get control register v4 406 ldr r5, arm1022_cr1_clear 407 bic r0, r0, r5
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 90 mov ip, #0 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 92 mcr p15, 0, ip, c7, c10, 4 @ drain WB 93 + #ifdef CONFIG_MMU 94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95 + #endif 96 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 97 bic ip, ip, #0x000f @ ............wcam 98 bic ip, ip, #0x1100 @ ...i...s........ ··· 333 */ 334 .align 5 335 ENTRY(cpu_arm1022_switch_mm) 336 + #ifdef CONFIG_MMU 337 #ifndef CONFIG_CPU_DCACHE_DISABLE 338 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 339 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 349 mcr p15, 0, r1, c7, c10, 4 @ drain WB 350 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 351 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 352 + #endif 353 mov pc, lr 354 355 /* ··· 358 */ 359 .align 5 360 ENTRY(cpu_arm1022_set_pte) 361 + #ifdef CONFIG_MMU 362 str r1, [r0], #-2048 @ linux version 363 364 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 385 #ifndef CONFIG_CPU_DCACHE_DISABLE 386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 387 #endif 388 + #endif /* CONFIG_MMU */ 389 mov pc, lr 390 391 __INIT ··· 394 mov r0, #0 395 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 396 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 397 + #ifdef CONFIG_MMU 398 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 399 + #endif 400 mrc p15, 0, r0, c1, c0 @ get control register v4 401 ldr r5, arm1022_cr1_clear 402 bic r0, r0, r5
+9
arch/arm/mm/proc-arm1026.S
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 91 mov ip, #0 92 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 93 mcr p15, 0, ip, c7, c10, 4 @ drain WB 94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 96 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x1100 @ ...i...s........ ··· 330 */ 331 .align 5 332 ENTRY(cpu_arm1026_switch_mm) 333 mov r1, #0 334 #ifndef CONFIG_CPU_DCACHE_DISABLE 335 1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate ··· 342 mcr p15, 0, r1, c7, c10, 4 @ drain WB 343 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 344 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 345 mov pc, lr 346 347 /* ··· 352 */ 353 .align 5 354 ENTRY(cpu_arm1026_set_pte) 355 str r1, [r0], #-2048 @ linux version 356 357 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 380 #ifndef CONFIG_CPU_DCACHE_DISABLE 381 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 382 #endif 383 mov pc, lr 384 385 ··· 391 mov r0, #0 392 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 393 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 394 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 395 mcr p15, 0, r4, c2, c0 @ load page table pointer 396 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 397 mov r0, #4 @ explicitly disable writeback 398 mcr p15, 7, r0, c15, c0, 0
··· 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 90 mov ip, #0 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 92 mcr p15, 0, ip, c7, c10, 4 @ drain WB 93 + #ifdef CONFIG_MMU 94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95 + #endif 96 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 97 bic ip, ip, #0x000f @ ............wcam 98 bic ip, ip, #0x1100 @ ...i...s........ ··· 327 */ 328 .align 5 329 ENTRY(cpu_arm1026_switch_mm) 330 + #ifdef CONFIG_MMU 331 mov r1, #0 332 #ifndef CONFIG_CPU_DCACHE_DISABLE 333 1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate ··· 338 mcr p15, 0, r1, c7, c10, 4 @ drain WB 339 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 340 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 341 + #endif 342 mov pc, lr 343 344 /* ··· 347 */ 348 .align 5 349 ENTRY(cpu_arm1026_set_pte) 350 + #ifdef CONFIG_MMU 351 str r1, [r0], #-2048 @ linux version 352 353 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 374 #ifndef CONFIG_CPU_DCACHE_DISABLE 375 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 376 #endif 377 + #endif /* CONFIG_MMU */ 378 mov pc, lr 379 380 ··· 384 mov r0, #0 385 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 386 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 387 + #ifdef CONFIG_MMU 388 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 389 mcr p15, 0, r4, c2, c0 @ load page table pointer 390 + #endif 391 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 392 mov r0, #4 @ explicitly disable writeback 393 mcr p15, 7, r0, c15, c0, 0
+15
arch/arm/mm/proc-arm6_7.S
··· 2 * linux/arch/arm/mm/proc-arm6,7.S 3 * 4 * Copyright (C) 1997-2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as ··· 200 */ 201 ENTRY(cpu_arm6_switch_mm) 202 ENTRY(cpu_arm7_switch_mm) 203 mov r1, #0 204 mcr p15, 0, r1, c7, c0, 0 @ flush cache 205 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 206 mcr p15, 0, r1, c5, c0, 0 @ flush TLBs 207 mov pc, lr 208 209 /* ··· 217 .align 5 218 ENTRY(cpu_arm6_set_pte) 219 ENTRY(cpu_arm7_set_pte) 220 str r1, [r0], #-2048 @ linux version 221 222 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 236 movne r2, #0 237 238 str r2, [r0] @ hardware version 239 mov pc, lr 240 241 /* ··· 248 ENTRY(cpu_arm7_reset) 249 mov r1, #0 250 mcr p15, 0, r1, c7, c0, 0 @ flush cache 251 mcr p15, 0, r1, c5, c0, 0 @ flush TLB 252 mov r1, #0x30 253 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 254 mov pc, r0 ··· 260 .type __arm6_setup, #function 261 __arm6_setup: mov r0, #0 262 mcr p15, 0, r0, c7, c0 @ flush caches on v3 263 mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 264 mov r0, #0x3d @ . ..RS BLDP WCAM 265 orr r0, r0, #0x100 @ . ..01 0011 1101 266 mov pc, lr 267 .size __arm6_setup, . - __arm6_setup 268 269 .type __arm7_setup, #function 270 __arm7_setup: mov r0, #0 271 mcr p15, 0, r0, c7, c0 @ flush caches on v3 272 mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 273 mcr p15, 0, r0, c3, c0 @ load domain access register 274 mov r0, #0x7d @ . ..RS BLDP WCAM 275 orr r0, r0, #0x100 @ . ..01 0111 1101 276 mov pc, lr 277 .size __arm7_setup, . - __arm7_setup 278
··· 2 * linux/arch/arm/mm/proc-arm6,7.S 3 * 4 * Copyright (C) 1997-2000 Russell King 5 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as ··· 199 */ 200 ENTRY(cpu_arm6_switch_mm) 201 ENTRY(cpu_arm7_switch_mm) 202 + #ifdef CONFIG_MMU 203 mov r1, #0 204 mcr p15, 0, r1, c7, c0, 0 @ flush cache 205 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 206 mcr p15, 0, r1, c5, c0, 0 @ flush TLBs 207 + #endif 208 mov pc, lr 209 210 /* ··· 214 .align 5 215 ENTRY(cpu_arm6_set_pte) 216 ENTRY(cpu_arm7_set_pte) 217 + #ifdef CONFIG_MMU 218 str r1, [r0], #-2048 @ linux version 219 220 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 232 movne r2, #0 233 234 str r2, [r0] @ hardware version 235 + #endif /* CONFIG_MMU */ 236 mov pc, lr 237 238 /* ··· 243 ENTRY(cpu_arm7_reset) 244 mov r1, #0 245 mcr p15, 0, r1, c7, c0, 0 @ flush cache 246 + #ifdef CONFIG_MMU 247 mcr p15, 0, r1, c5, c0, 0 @ flush TLB 248 + #endif 249 mov r1, #0x30 250 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 251 mov pc, r0 ··· 253 .type __arm6_setup, #function 254 __arm6_setup: mov r0, #0 255 mcr p15, 0, r0, c7, c0 @ flush caches on v3 256 + #ifdef CONFIG_MMU 257 mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 258 mov r0, #0x3d @ . ..RS BLDP WCAM 259 orr r0, r0, #0x100 @ . ..01 0011 1101 260 + #else 261 + mov r0, #0x3c @ . ..RS BLDP WCA. 262 + #endif 263 mov pc, lr 264 .size __arm6_setup, . - __arm6_setup 265 266 .type __arm7_setup, #function 267 __arm7_setup: mov r0, #0 268 mcr p15, 0, r0, c7, c0 @ flush caches on v3 269 + #ifdef CONFIG_MMU 270 mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 271 mcr p15, 0, r0, c3, c0 @ load domain access register 272 mov r0, #0x7d @ . ..RS BLDP WCAM 273 orr r0, r0, #0x100 @ . ..01 0111 1101 274 + #else 275 + mov r0, #0x7c @ . ..RS BLDP WCA. 276 + #endif 277 mov pc, lr 278 .size __arm7_setup, . - __arm7_setup 279
+12
arch/arm/mm/proc-arm720.S
··· 4 * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) 5 * Rob Scott (rscott@mtrob.fdns.net) 6 * Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 30 * out of 'proc-arm6,7.S' per RMK discussion 31 * 07-25-2000 SJH Added idle function. 32 * 08-25-2000 DBS Updated for integration of ARM Ltd version. 33 */ 34 #include <linux/linkage.h> 35 #include <linux/init.h> ··· 77 * the new. 78 */ 79 ENTRY(cpu_arm720_switch_mm) 80 mov r1, #0 81 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache 82 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 83 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 84 mov pc, lr 85 86 /* ··· 93 */ 94 .align 5 95 ENTRY(cpu_arm720_set_pte) 96 str r1, [r0], #-2048 @ linux version 97 98 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 112 movne r2, #0 113 114 str r2, [r0] @ hardware version 115 mov pc, lr 116 117 /* ··· 123 ENTRY(cpu_arm720_reset) 124 mov ip, #0 125 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 126 mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4) 127 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 128 bic ip, ip, #0x000f @ ............wcam 129 bic ip, ip, #0x2100 @ ..v....s........ ··· 138 __arm710_setup: 139 mov r0, #0 140 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches 141 mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) 142 mrc p15, 0, r0, c1, c0 @ get control register 143 ldr r5, arm710_cr1_clear 144 bic r0, r0, r5 ··· 166 __arm720_setup: 167 mov r0, #0 168 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches 169 mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) 170 mrc p15, 0, r0, c1, c0 @ get control register 171 ldr r5, arm720_cr1_clear 172 bic r0, r0, r5
··· 4 * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) 5 * Rob Scott (rscott@mtrob.fdns.net) 6 * Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd. 7 + * hacked for non-paged-MM by Hyok S. Choi, 2004. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by ··· 29 * out of 'proc-arm6,7.S' per RMK discussion 30 * 07-25-2000 SJH Added idle function. 31 * 08-25-2000 DBS Updated for integration of ARM Ltd version. 32 + * 04-20-2004 HSC modified for non-paged memory management mode. 33 */ 34 #include <linux/linkage.h> 35 #include <linux/init.h> ··· 75 * the new. 76 */ 77 ENTRY(cpu_arm720_switch_mm) 78 + #ifdef CONFIG_MMU 79 mov r1, #0 80 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache 81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 83 + #endif 84 mov pc, lr 85 86 /* ··· 89 */ 90 .align 5 91 ENTRY(cpu_arm720_set_pte) 92 + #ifdef CONFIG_MMU 93 str r1, [r0], #-2048 @ linux version 94 95 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 107 movne r2, #0 108 109 str r2, [r0] @ hardware version 110 + #endif 111 mov pc, lr 112 113 /* ··· 117 ENTRY(cpu_arm720_reset) 118 mov ip, #0 119 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 120 + #ifdef CONFIG_MMU 121 mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4) 122 + #endif 123 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 124 bic ip, ip, #0x000f @ ............wcam 125 bic ip, ip, #0x2100 @ ..v....s........ ··· 130 __arm710_setup: 131 mov r0, #0 132 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches 133 + #ifdef CONFIG_MMU 134 mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) 135 + #endif 136 mrc p15, 0, r0, c1, c0 @ get control register 137 ldr r5, arm710_cr1_clear 138 bic r0, r0, r5 ··· 156 __arm720_setup: 157 mov r0, #0 158 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches 159 + #ifdef CONFIG_MMU 160 mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) 161 + #endif 162 mrc p15, 0, r0, c1, c0 @ get control register 163 ldr r5, arm720_cr1_clear 164 bic r0, r0, r5
+9
arch/arm/mm/proc-arm920.S
··· 3 * 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 98 mov ip, #0 99 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 100 mcr p15, 0, ip, c7, c10, 4 @ drain WB 101 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 102 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 103 bic ip, ip, #0x000f @ ............wcam 104 bic ip, ip, #0x1100 @ ...i...s........ ··· 320 */ 321 .align 5 322 ENTRY(cpu_arm920_switch_mm) 323 mov ip, #0 324 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 325 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 341 mcr p15, 0, ip, c7, c10, 4 @ drain WB 342 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 343 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 344 mov pc, lr 345 346 /* ··· 351 */ 352 .align 5 353 ENTRY(cpu_arm920_set_pte) 354 str r1, [r0], #-2048 @ linux version 355 356 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 378 mov r0, r0 379 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 380 mcr p15, 0, r0, c7, c10, 4 @ drain WB 381 mov pc, lr 382 383 __INIT ··· 388 mov r0, #0 389 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 390 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 391 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 392 mrc p15, 0, r0, c1, c0 @ get control register v4 393 ldr r5, arm920_cr1_clear 394 bic r0, r0, r5
··· 3 * 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 97 mov ip, #0 98 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 99 mcr p15, 0, ip, c7, c10, 4 @ drain WB 100 + #ifdef CONFIG_MMU 101 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 102 + #endif 103 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 104 bic ip, ip, #0x000f @ ............wcam 105 bic ip, ip, #0x1100 @ ...i...s........ ··· 317 */ 318 .align 5 319 ENTRY(cpu_arm920_switch_mm) 320 + #ifdef CONFIG_MMU 321 mov ip, #0 322 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 323 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 337 mcr p15, 0, ip, c7, c10, 4 @ drain WB 338 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 339 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 340 + #endif 341 mov pc, lr 342 343 /* ··· 346 */ 347 .align 5 348 ENTRY(cpu_arm920_set_pte) 349 + #ifdef CONFIG_MMU 350 str r1, [r0], #-2048 @ linux version 351 352 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 372 mov r0, r0 373 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 374 mcr p15, 0, r0, c7, c10, 4 @ drain WB 375 + #endif /* CONFIG_MMU */ 376 mov pc, lr 377 378 __INIT ··· 381 mov r0, #0 382 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 383 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 384 + #ifdef CONFIG_MMU 385 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 386 + #endif 387 mrc p15, 0, r0, c1, c0 @ get control register v4 388 ldr r5, arm920_cr1_clear 389 bic r0, r0, r5
+9
arch/arm/mm/proc-arm922.S
··· 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * Copyright (C) 2001 Altera Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 100 mov ip, #0 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 102 mcr p15, 0, ip, c7, c10, 4 @ drain WB 103 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 104 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 105 bic ip, ip, #0x000f @ ............wcam 106 bic ip, ip, #0x1100 @ ...i...s........ ··· 324 */ 325 .align 5 326 ENTRY(cpu_arm922_switch_mm) 327 mov ip, #0 328 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 329 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 345 mcr p15, 0, ip, c7, c10, 4 @ drain WB 346 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 347 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 348 mov pc, lr 349 350 /* ··· 355 */ 356 .align 5 357 ENTRY(cpu_arm922_set_pte) 358 str r1, [r0], #-2048 @ linux version 359 360 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 382 mov r0, r0 383 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 384 mcr p15, 0, r0, c7, c10, 4 @ drain WB 385 mov pc, lr 386 387 __INIT ··· 392 mov r0, #0 393 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 394 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 395 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 396 mrc p15, 0, r0, c1, c0 @ get control register v4 397 ldr r5, arm922_cr1_clear 398 bic r0, r0, r5
··· 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * Copyright (C) 2001 Altera Corporation 7 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by ··· 99 mov ip, #0 100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c10, 4 @ drain WB 102 + #ifdef CONFIG_MMU 103 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 104 + #endif 105 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 106 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x1100 @ ...i...s........ ··· 321 */ 322 .align 5 323 ENTRY(cpu_arm922_switch_mm) 324 + #ifdef CONFIG_MMU 325 mov ip, #0 326 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 327 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 341 mcr p15, 0, ip, c7, c10, 4 @ drain WB 342 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 343 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 344 + #endif 345 mov pc, lr 346 347 /* ··· 350 */ 351 .align 5 352 ENTRY(cpu_arm922_set_pte) 353 + #ifdef CONFIG_MMU 354 str r1, [r0], #-2048 @ linux version 355 356 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 376 mov r0, r0 377 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 378 mcr p15, 0, r0, c7, c10, 4 @ drain WB 379 + #endif /* CONFIG_MMU */ 380 mov pc, lr 381 382 __INIT ··· 385 mov r0, #0 386 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 387 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 388 + #ifdef CONFIG_MMU 389 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 390 + #endif 391 mrc p15, 0, r0, c1, c0 @ get control register v4 392 ldr r5, arm922_cr1_clear 393 bic r0, r0, r5
+10
arch/arm/mm/proc-arm925.S
··· 9 * Update for Linux-2.6 and cache flush improvements 10 * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or ··· 124 mov ip, #0 125 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 126 mcr p15, 0, ip, c7, c10, 4 @ drain WB 127 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 128 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 129 bic ip, ip, #0x000f @ ............wcam 130 bic ip, ip, #0x1100 @ ...i...s........ ··· 373 */ 374 .align 5 375 ENTRY(cpu_arm925_switch_mm) 376 mov ip, #0 377 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 378 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 388 mcr p15, 0, ip, c7, c10, 4 @ drain WB 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 390 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 391 mov pc, lr 392 393 /* ··· 398 */ 399 .align 5 400 ENTRY(cpu_arm925_set_pte) 401 str r1, [r0], #-2048 @ linux version 402 403 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 427 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 428 #endif 429 mcr p15, 0, r0, c7, c10, 4 @ drain WB 430 mov pc, lr 431 432 __INIT ··· 446 mov r0, #0 447 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 448 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 449 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 450 451 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 452 mov r0, #4 @ disable write-back on caches explicitly
··· 9 * Update for Linux-2.6 and cache flush improvements 10 * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> 11 * 12 + * hacked for non-paged-MM by Hyok S. Choi, 2004. 13 + * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or ··· 122 mov ip, #0 123 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 124 mcr p15, 0, ip, c7, c10, 4 @ drain WB 125 + #ifdef CONFIG_MMU 126 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 127 + #endif 128 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 129 bic ip, ip, #0x000f @ ............wcam 130 bic ip, ip, #0x1100 @ ...i...s........ ··· 369 */ 370 .align 5 371 ENTRY(cpu_arm925_switch_mm) 372 + #ifdef CONFIG_MMU 373 mov ip, #0 374 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 375 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 383 mcr p15, 0, ip, c7, c10, 4 @ drain WB 384 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 385 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 386 + #endif 387 mov pc, lr 388 389 /* ··· 392 */ 393 .align 5 394 ENTRY(cpu_arm925_set_pte) 395 + #ifdef CONFIG_MMU 396 str r1, [r0], #-2048 @ linux version 397 398 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 420 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 421 #endif 422 mcr p15, 0, r0, c7, c10, 4 @ drain WB 423 + #endif /* CONFIG_MMU */ 424 mov pc, lr 425 426 __INIT ··· 438 mov r0, #0 439 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 440 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 441 + #ifdef CONFIG_MMU 442 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 443 + #endif 444 445 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 446 mov r0, #4 @ disable write-back on caches explicitly
+9
arch/arm/mm/proc-arm926.S
··· 3 * 4 * Copyright (C) 1999-2001 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 86 mov ip, #0 87 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 88 mcr p15, 0, ip, c7, c10, 4 @ drain WB 89 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 90 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 91 bic ip, ip, #0x000f @ ............wcam 92 bic ip, ip, #0x1100 @ ...i...s........ ··· 332 */ 333 .align 5 334 ENTRY(cpu_arm926_switch_mm) 335 mov ip, #0 336 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 337 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 345 mcr p15, 0, ip, c7, c10, 4 @ drain WB 346 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 347 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 348 mov pc, lr 349 350 /* ··· 355 */ 356 .align 5 357 ENTRY(cpu_arm926_set_pte) 358 str r1, [r0], #-2048 @ linux version 359 360 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 384 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 385 #endif 386 mcr p15, 0, r0, c7, c10, 4 @ drain WB 387 mov pc, lr 388 389 __INIT ··· 394 mov r0, #0 395 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 396 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 397 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 398 399 400 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
··· 3 * 4 * Copyright (C) 1999-2001 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by ··· 85 mov ip, #0 86 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 87 mcr p15, 0, ip, c7, c10, 4 @ drain WB 88 + #ifdef CONFIG_MMU 89 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 90 + #endif 91 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 92 bic ip, ip, #0x000f @ ............wcam 93 bic ip, ip, #0x1100 @ ...i...s........ ··· 329 */ 330 .align 5 331 ENTRY(cpu_arm926_switch_mm) 332 + #ifdef CONFIG_MMU 333 mov ip, #0 334 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 335 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache ··· 341 mcr p15, 0, ip, c7, c10, 4 @ drain WB 342 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 343 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 344 + #endif 345 mov pc, lr 346 347 /* ··· 350 */ 351 .align 5 352 ENTRY(cpu_arm926_set_pte) 353 + #ifdef CONFIG_MMU 354 str r1, [r0], #-2048 @ linux version 355 356 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 378 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 379 #endif 380 mcr p15, 0, r0, c7, c10, 4 @ drain WB 381 + #endif 382 mov pc, lr 383 384 __INIT ··· 387 mov r0, #0 388 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 389 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 390 + #ifdef CONFIG_MMU 391 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 392 + #endif 393 394 395 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+11
arch/arm/mm/proc-sa110.S
··· 2 * linux/arch/arm/mm/proc-sa110.S 3 * 4 * Copyright (C) 1997-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as ··· 68 mov ip, #0 69 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 70 mcr p15, 0, ip, c7, c10, 4 @ drain WB 71 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 72 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 73 bic ip, ip, #0x000f @ ............wcam 74 bic ip, ip, #0x1100 @ ...i...s........ ··· 133 */ 134 .align 5 135 ENTRY(cpu_sa110_switch_mm) 136 str lr, [sp, #-4]! 137 bl v4wb_flush_kern_cache_all @ clears IP 138 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 139 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 140 ldr pc, [sp], #4 141 142 /* 143 * cpu_sa110_set_pte(ptep, pte) ··· 150 */ 151 .align 5 152 ENTRY(cpu_sa110_set_pte) 153 str r1, [r0], #-2048 @ linux version 154 155 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 172 mov r0, r0 173 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 174 mcr p15, 0, r0, c7, c10, 4 @ drain WB 175 mov pc, lr 176 177 __INIT ··· 182 mov r10, #0 183 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 184 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 185 mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4 186 mrc p15, 0, r0, c1, c0 @ get control register v4 187 ldr r5, sa110_cr1_clear 188 bic r0, r0, r5
··· 2 * linux/arch/arm/mm/proc-sa110.S 3 * 4 * Copyright (C) 1997-2002 Russell King 5 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as ··· 67 mov ip, #0 68 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 69 mcr p15, 0, ip, c7, c10, 4 @ drain WB 70 + #ifdef CONFIG_MMU 71 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 72 + #endif 73 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 74 bic ip, ip, #0x000f @ ............wcam 75 bic ip, ip, #0x1100 @ ...i...s........ ··· 130 */ 131 .align 5 132 ENTRY(cpu_sa110_switch_mm) 133 + #ifdef CONFIG_MMU 134 str lr, [sp, #-4]! 135 bl v4wb_flush_kern_cache_all @ clears IP 136 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 137 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 138 ldr pc, [sp], #4 139 + #else 140 + mov pc, lr 141 + #endif 142 143 /* 144 * cpu_sa110_set_pte(ptep, pte) ··· 143 */ 144 .align 5 145 ENTRY(cpu_sa110_set_pte) 146 + #ifdef CONFIG_MMU 147 str r1, [r0], #-2048 @ linux version 148 149 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 164 mov r0, r0 165 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 166 mcr p15, 0, r0, c7, c10, 4 @ drain WB 167 + #endif 168 mov pc, lr 169 170 __INIT ··· 173 mov r10, #0 174 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 175 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 176 + #ifdef CONFIG_MMU 177 mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4 178 + #endif 179 mrc p15, 0, r0, c1, c0 @ get control register v4 180 ldr r5, sa110_cr1_clear 181 bic r0, r0, r5
+11
arch/arm/mm/proc-sa1100.S
··· 2 * linux/arch/arm/mm/proc-sa1100.S 3 * 4 * Copyright (C) 1997-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as ··· 78 mov ip, #0 79 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 80 mcr p15, 0, ip, c7, c10, 4 @ drain WB 81 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 82 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 83 bic ip, ip, #0x000f @ ............wcam 84 bic ip, ip, #0x1100 @ ...i...s........ ··· 145 */ 146 .align 5 147 ENTRY(cpu_sa1100_switch_mm) 148 str lr, [sp, #-4]! 149 bl v4wb_flush_kern_cache_all @ clears IP 150 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 151 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 153 ldr pc, [sp], #4 154 155 /* 156 * cpu_sa1100_set_pte(ptep, pte) ··· 163 */ 164 .align 5 165 ENTRY(cpu_sa1100_set_pte) 166 str r1, [r0], #-2048 @ linux version 167 168 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 185 mov r0, r0 186 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 187 mcr p15, 0, r0, c7, c10, 4 @ drain WB 188 mov pc, lr 189 190 __INIT ··· 195 mov r0, #0 196 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 198 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 199 mrc p15, 0, r0, c1, c0 @ get control register v4 200 ldr r5, sa1100_cr1_clear 201 bic r0, r0, r5
··· 2 * linux/arch/arm/mm/proc-sa1100.S 3 * 4 * Copyright (C) 1997-2002 Russell King 5 + * hacked for non-paged-MM by Hyok S. Choi, 2003. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as ··· 77 mov ip, #0 78 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 79 mcr p15, 0, ip, c7, c10, 4 @ drain WB 80 + #ifdef CONFIG_MMU 81 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 82 + #endif 83 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 84 bic ip, ip, #0x000f @ ............wcam 85 bic ip, ip, #0x1100 @ ...i...s........ ··· 142 */ 143 .align 5 144 ENTRY(cpu_sa1100_switch_mm) 145 + #ifdef CONFIG_MMU 146 str lr, [sp, #-4]! 147 bl v4wb_flush_kern_cache_all @ clears IP 148 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 149 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 150 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 151 ldr pc, [sp], #4 152 + #else 153 + mov pc, lr 154 + #endif 155 156 /* 157 * cpu_sa1100_set_pte(ptep, pte) ··· 156 */ 157 .align 5 158 ENTRY(cpu_sa1100_set_pte) 159 + #ifdef CONFIG_MMU 160 str r1, [r0], #-2048 @ linux version 161 162 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY ··· 177 mov r0, r0 178 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 179 mcr p15, 0, r0, c7, c10, 4 @ drain WB 180 + #endif 181 mov pc, lr 182 183 __INIT ··· 186 mov r0, #0 187 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 188 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 189 + #ifdef CONFIG_MMU 190 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 191 + #endif 192 mrc p15, 0, r0, c1, c0 @ get control register v4 193 ldr r5, sa1100_cr1_clear 194 bic r0, r0, r5
+7
arch/arm/mm/proc-v6.S
··· 2 * linux/arch/arm/mm/proc-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as ··· 89 * - we are not using split page tables 90 */ 91 ENTRY(cpu_v6_switch_mm) 92 mov r2, #0 93 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 94 #ifdef CONFIG_SMP ··· 99 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 100 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 101 mcr p15, 0, r1, c13, c0, 1 @ set context ID 102 mov pc, lr 103 104 /* ··· 122 * 1111 0 1 1 r/w r/w 123 */ 124 ENTRY(cpu_v6_set_pte) 125 str r1, [r0], #-2048 @ linux version 126 127 bic r2, r1, #0x000003f0 ··· 149 150 str r2, [r0] 151 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 152 mov pc, lr 153 154 ··· 199 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 200 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 202 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 203 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 204 #ifdef CONFIG_SMP 205 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable 206 #endif 207 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 208 #ifdef CONFIG_VFP 209 mrc p15, 0, r0, c1, c0, 2 210 orr r0, r0, #(0xf << 20)
··· 2 * linux/arch/arm/mm/proc-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 + * Modified by Catalin Marinas for noMMU support 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as ··· 88 * - we are not using split page tables 89 */ 90 ENTRY(cpu_v6_switch_mm) 91 + #ifdef CONFIG_MMU 92 mov r2, #0 93 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 94 #ifdef CONFIG_SMP ··· 97 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 98 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 99 mcr p15, 0, r1, c13, c0, 1 @ set context ID 100 + #endif 101 mov pc, lr 102 103 /* ··· 119 * 1111 0 1 1 r/w r/w 120 */ 121 ENTRY(cpu_v6_set_pte) 122 + #ifdef CONFIG_MMU 123 str r1, [r0], #-2048 @ linux version 124 125 bic r2, r1, #0x000003f0 ··· 145 146 str r2, [r0] 147 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 148 + #endif 149 mov pc, lr 150 151 ··· 194 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 195 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 196 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 197 + #ifdef CONFIG_MMU 198 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 199 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 200 #ifdef CONFIG_SMP 201 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable 202 #endif 203 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 204 + #endif /* CONFIG_MMU */ 205 #ifdef CONFIG_VFP 206 mrc p15, 0, r0, c1, c0, 2 207 orr r0, r0, #(0xf << 20)
+4
include/asm-arm/bugs.h
··· 10 #ifndef __ASM_BUGS_H 11 #define __ASM_BUGS_H 12 13 extern void check_writebuffer_bugs(void); 14 15 #define check_bugs() check_writebuffer_bugs() 16 17 #endif
··· 10 #ifndef __ASM_BUGS_H 11 #define __ASM_BUGS_H 12 13 + #ifdef CONFIG_MMU 14 extern void check_writebuffer_bugs(void); 15 16 #define check_bugs() check_writebuffer_bugs() 17 + #else 18 + #define check_bugs() do { } while (0) 19 + #endif 20 21 #endif
+7
include/asm-arm/domain.h
··· 50 #define domain_val(dom,type) ((type) << (2*(dom))) 51 52 #ifndef __ASSEMBLY__ 53 #define set_domain(x) \ 54 do { \ 55 __asm__ __volatile__( \ ··· 67 thread->cpu_domain = domain | domain_val(dom, type); \ 68 set_domain(thread->cpu_domain); \ 69 } while (0) 70 71 #endif 72 #endif /* !__ASSEMBLY__ */
··· 50 #define domain_val(dom,type) ((type) << (2*(dom))) 51 52 #ifndef __ASSEMBLY__ 53 + 54 + #ifdef CONFIG_MMU 55 #define set_domain(x) \ 56 do { \ 57 __asm__ __volatile__( \ ··· 65 thread->cpu_domain = domain | domain_val(dom, type); \ 66 set_domain(thread->cpu_domain); \ 67 } while (0) 68 + 69 + #else 70 + #define set_domain(x) do { } while (0) 71 + #define modify_domain(dom,type) do { } while (0) 72 + #endif 73 74 #endif 75 #endif /* !__ASSEMBLY__ */
+4 -5
include/asm-arm/mach/map.h
··· 16 unsigned int type; 17 }; 18 19 - struct meminfo; 20 - 21 #define MT_DEVICE 0 22 #define MT_CACHECLEAN 1 23 #define MT_MINICLEAN 2 ··· 26 #define MT_IXP2000_DEVICE 7 27 #define MT_NONSHARED_DEVICE 8 28 29 - extern void create_memmap_holes(struct meminfo *); 30 - extern void memtable_init(struct meminfo *); 31 extern void iotable_init(struct map_desc *, int); 32 - extern void setup_io_desc(void);
··· 16 unsigned int type; 17 }; 18 19 #define MT_DEVICE 0 20 #define MT_CACHECLEAN 1 21 #define MT_MINICLEAN 2 ··· 28 #define MT_IXP2000_DEVICE 7 29 #define MT_NONSHARED_DEVICE 8 30 31 + #ifdef CONFIG_MMU 32 extern void iotable_init(struct map_desc *, int); 33 + #else 34 + #define iotable_init(map,num) do { } while (0) 35 + #endif
+57 -18
include/asm-arm/memory.h
··· 2 * linux/include/asm-arm/memory.h 3 * 4 * Copyright (C) 2000-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as ··· 27 #include <asm/arch/memory.h> 28 #include <asm/sizes.h> 29 30 #ifndef TASK_SIZE 31 /* 32 * TASK_SIZE - the maximum size of a user space task. ··· 49 #ifndef PAGE_OFFSET 50 #define PAGE_OFFSET UL(0xc0000000) 51 #endif 52 53 /* 54 * Size of DMA-consistent memory region. Must be multiple of 2M, ··· 127 */ 128 #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 129 #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 130 - 131 - /* 132 - * The module space lives between the addresses given by TASK_SIZE 133 - * and PAGE_OFFSET - it must be within 32MB of the kernel text. 134 - */ 135 - #define MODULE_END (PAGE_OFFSET) 136 - #define MODULE_START (MODULE_END - 16*1048576) 137 - 138 - #if TASK_SIZE > MODULE_START 139 - #error Top of user space clashes with start of module space 140 - #endif 141 - 142 - /* 143 - * The XIP kernel gets mapped at the bottom of the module vm area. 144 - * Since we use sections to map it, this macro replaces the physical address 145 - * with its virtual address while keeping offset from the base section. 146 - */ 147 - #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) 148 149 #ifndef __ASSEMBLY__ 150
··· 2 * linux/include/asm-arm/memory.h 3 * 4 * Copyright (C) 2000-2002 Russell King 5 + * modification for nommu, Hyok S. Choi, 2004 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as ··· 26 #include <asm/arch/memory.h> 27 #include <asm/sizes.h> 28 29 + #ifdef CONFIG_MMU 30 + 31 #ifndef TASK_SIZE 32 /* 33 * TASK_SIZE - the maximum size of a user space task. ··· 46 #ifndef PAGE_OFFSET 47 #define PAGE_OFFSET UL(0xc0000000) 48 #endif 49 + 50 + /* 51 + * The module space lives between the addresses given by TASK_SIZE 52 + * and PAGE_OFFSET - it must be within 32MB of the kernel text. 53 + */ 54 + #define MODULE_END (PAGE_OFFSET) 55 + #define MODULE_START (MODULE_END - 16*1048576) 56 + 57 + #if TASK_SIZE > MODULE_START 58 + #error Top of user space clashes with start of module space 59 + #endif 60 + 61 + /* 62 + * The XIP kernel gets mapped at the bottom of the module vm area. 63 + * Since we use sections to map it, this macro replaces the physical address 64 + * with its virtual address while keeping offset from the base section. 65 + */ 66 + #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) 67 + 68 + #else /* CONFIG_MMU */ 69 + 70 + /* 71 + * The limitation of user task size can grow up to the end of free ram region. 72 + * It is difficult to define and perhaps will never meet the original meaning 73 + * of this define that was meant to. 74 + * Fortunately, there is no reference for this in noMMU mode, for now. 75 + */ 76 + #ifndef TASK_SIZE 77 + #define TASK_SIZE (CONFIG_DRAM_SIZE) 78 + #endif 79 + 80 + #ifndef TASK_UNMAPPED_BASE 81 + #define TASK_UNMAPPED_BASE UL(0x00000000) 82 + #endif 83 + 84 + #ifndef PHYS_OFFSET 85 + #define PHYS_OFFSET (CONFIG_DRAM_BASE) 86 + #endif 87 + 88 + #ifndef END_MEM 89 + #define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) 90 + #endif 91 + 92 + #ifndef PAGE_OFFSET 93 + #define PAGE_OFFSET (PHYS_OFFSET) 94 + #endif 95 + 96 + /* 97 + * The module can be at any place in ram in nommu mode. 98 + */ 99 + #define MODULE_END (END_MEM) 100 + #define MODULE_START (PHYS_OFFSET) 101 + 102 + #endif /* !CONFIG_MMU */ 103 104 /* 105 * Size of DMA-consistent memory region. Must be multiple of 2M, ··· 70 */ 71 #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 72 #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 73 74 #ifndef __ASSEMBLY__ 75
+16
include/asm-arm/mmu.h
··· 1 #ifndef __ARM_MMU_H 2 #define __ARM_MMU_H 3 4 typedef struct { 5 #if __LINUX_ARM_ARCH__ >= 6 6 unsigned int id; ··· 13 #define ASID(mm) ((mm)->context.id & 255) 14 #else 15 #define ASID(mm) (0) 16 #endif 17 18 #endif
··· 1 #ifndef __ARM_MMU_H 2 #define __ARM_MMU_H 3 4 + #ifdef CONFIG_MMU 5 + 6 typedef struct { 7 #if __LINUX_ARM_ARCH__ >= 6 8 unsigned int id; ··· 11 #define ASID(mm) ((mm)->context.id & 255) 12 #else 13 #define ASID(mm) (0) 14 + #endif 15 + 16 + #else 17 + 18 + /* 19 + * From nommu.h: 20 + * Copyright (C) 2002, David McCullough <davidm@snapgear.com> 21 + * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> 22 + */ 23 + typedef struct { 24 + struct vm_list_struct *vmlist; 25 + unsigned long end_brk; 26 + } mm_context_t; 27 + 28 #endif 29 30 #endif
+2
include/asm-arm/mmu_context.h
··· 82 switch_mm(struct mm_struct *prev, struct mm_struct *next, 83 struct task_struct *tsk) 84 { 85 unsigned int cpu = smp_processor_id(); 86 87 if (prev != next) { ··· 92 if (cache_is_vivt()) 93 cpu_clear(cpu, prev->cpu_vm_mask); 94 } 95 } 96 97 #define deactivate_mm(tsk,mm) do { } while (0)
··· 82 switch_mm(struct mm_struct *prev, struct mm_struct *next, 83 struct task_struct *tsk) 84 { 85 + #ifdef CONFIG_MMU 86 unsigned int cpu = smp_processor_id(); 87 88 if (prev != next) { ··· 91 if (cache_is_vivt()) 92 cpu_clear(cpu, prev->cpu_vm_mask); 93 } 94 + #endif 95 } 96 97 #define deactivate_mm(tsk,mm) do { } while (0)
+51
include/asm-arm/page-nommu.h
···
··· 1 + /* 2 + * linux/include/asm-arm/page-nommu.h 3 + * 4 + * Copyright (C) 2004 Hyok S. Choi 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef _ASMARM_PAGE_NOMMU_H 11 + #define _ASMARM_PAGE_NOMMU_H 12 + 13 + #if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13 14 + #define KTHREAD_SIZE (8192) 15 + #else 16 + #define KTHREAD_SIZE PAGE_SIZE 17 + #endif 18 + 19 + #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 20 + #define free_user_page(page, addr) free_page(addr) 21 + 22 + #define clear_page(page) memset((page), 0, PAGE_SIZE) 23 + #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) 24 + 25 + #define clear_user_page(page, vaddr, pg) clear_page(page) 26 + #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 27 + 28 + /* 29 + * These are used to make use of C type-checking.. 30 + */ 31 + typedef unsigned long pte_t; 32 + typedef unsigned long pmd_t; 33 + typedef unsigned long pgd_t[2]; 34 + typedef unsigned long pgprot_t; 35 + 36 + #define pte_val(x) (x) 37 + #define pmd_val(x) (x) 38 + #define pgd_val(x) ((x)[0]) 39 + #define pgprot_val(x) (x) 40 + 41 + #define __pte(x) (x) 42 + #define __pmd(x) (x) 43 + #define __pgprot(x) (x) 44 + 45 + /* to align the pointer to the (next) page boundary */ 46 + #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 47 + 48 + extern unsigned long memory_start; 49 + extern unsigned long memory_end; 50 + 51 + #endif
+8
include/asm-arm/page.h
··· 23 24 #ifndef __ASSEMBLY__ 25 26 #include <asm/glue.h> 27 28 /* ··· 176 177 /* the upper-most page table pointer */ 178 extern pmd_t *top_pmd; 179 180 #include <asm/memory.h> 181
··· 23 24 #ifndef __ASSEMBLY__ 25 26 + #ifndef CONFIG_MMU 27 + 28 + #include "page-nommu.h" 29 + 30 + #else 31 + 32 #include <asm/glue.h> 33 34 /* ··· 170 171 /* the upper-most page table pointer */ 172 extern pmd_t *top_pmd; 173 + 174 + #endif /* CONFIG_MMU */ 175 176 #include <asm/memory.h> 177
+6 -2
include/asm-arm/pgalloc.h
··· 16 #include <asm/cacheflush.h> 17 #include <asm/tlbflush.h> 18 19 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) 20 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) 21 ··· 35 36 #define pgd_alloc(mm) get_pgd_slow(mm) 37 #define pgd_free(pgd) free_pgd_slow(pgd) 38 - 39 - #define check_pgt_cache() do { } while (0) 40 41 /* 42 * Allocate one PTE table. ··· 127 { 128 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); 129 } 130 131 #endif
··· 16 #include <asm/cacheflush.h> 17 #include <asm/tlbflush.h> 18 19 + #define check_pgt_cache() do { } while (0) 20 + 21 + #ifdef CONFIG_MMU 22 + 23 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) 24 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) 25 ··· 31 32 #define pgd_alloc(mm) get_pgd_slow(mm) 33 #define pgd_free(pgd) free_pgd_slow(pgd) 34 35 /* 36 * Allocate one PTE table. ··· 125 { 126 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); 127 } 128 + 129 + #endif /* CONFIG_MMU */ 130 131 #endif
+123
include/asm-arm/pgtable-nommu.h
···
··· 1 + /* 2 + * linux/include/asm-arm/pgtable-nommu.h 3 + * 4 + * Copyright (C) 1995-2002 Russell King 5 + * Copyright (C) 2004 Hyok S. Choi 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + #ifndef _ASMARM_PGTABLE_NOMMU_H 12 + #define _ASMARM_PGTABLE_NOMMU_H 13 + 14 + #ifndef __ASSEMBLY__ 15 + 16 + #include <linux/config.h> 17 + #include <linux/slab.h> 18 + #include <asm/processor.h> 19 + #include <asm/page.h> 20 + #include <asm/io.h> 21 + 22 + /* 23 + * Trivial page table functions. 24 + */ 25 + #define pgd_present(pgd) (1) 26 + #define pgd_none(pgd) (0) 27 + #define pgd_bad(pgd) (0) 28 + #define pgd_clear(pgdp) 29 + #define kern_addr_valid(addr) (1) 30 + #define pmd_offset(a, b) ((void *)0) 31 + /* FIXME */ 32 + /* 33 + * PMD_SHIFT determines the size of the area a second-level page table can map 34 + * PGDIR_SHIFT determines what a third-level page table entry can map 35 + */ 36 + #define PGDIR_SHIFT 21 37 + 38 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 39 + #define PGDIR_MASK (~(PGDIR_SIZE-1)) 40 + /* FIXME */ 41 + 42 + #define PAGE_NONE __pgprot(0) 43 + #define PAGE_SHARED __pgprot(0) 44 + #define PAGE_COPY __pgprot(0) 45 + #define PAGE_READONLY __pgprot(0) 46 + #define PAGE_KERNEL __pgprot(0) 47 + 48 + //extern void paging_init(struct meminfo *, struct machine_desc *); 49 + #define swapper_pg_dir ((pgd_t *) 0) 50 + 51 + #define __swp_type(x) (0) 52 + #define __swp_offset(x) (0) 53 + #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 54 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 55 + #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 56 + 57 + 58 + typedef pte_t *pte_addr_t; 59 + 60 + static inline int pte_file(pte_t pte) { return 0; } 61 + 62 + /* 63 + * ZERO_PAGE is a global shared page that is always zero: used 64 + * for zero-mapped memory areas etc.. 65 + */ 66 + #define ZERO_PAGE(vaddr) (virt_to_page(0)) 67 + 68 + /* 69 + * Mark the prot value as uncacheable and unbufferable. 70 + */ 71 + #define pgprot_noncached(prot) __pgprot(0) 72 + #define pgprot_writecombine(prot) __pgprot(0) 73 + 74 + 75 + /* 76 + * These would be in other places but having them here reduces the diffs. 77 + */ 78 + extern unsigned int kobjsize(const void *objp); 79 + extern int is_in_rom(unsigned long); 80 + 81 + /* 82 + * No page table caches to initialise. 83 + */ 84 + #define pgtable_cache_init() do { } while (0) 85 + #define io_remap_page_range remap_page_range 86 + #define io_remap_pfn_range remap_pfn_range 87 + 88 + #define MK_IOSPACE_PFN(space, pfn) (pfn) 89 + #define GET_IOSPACE(pfn) 0 90 + #define GET_PFN(pfn) (pfn) 91 + 92 + 93 + /* 94 + * All 32bit addresses are effectively valid for vmalloc... 95 + * Sort of meaningless for non-VM targets. 96 + */ 97 + #define VMALLOC_START 0 98 + #define VMALLOC_END 0xffffffff 99 + 100 + #define FIRST_USER_ADDRESS (0) 101 + 102 + #else 103 + 104 + /* 105 + * dummy tlb and user structures. 106 + */ 107 + #define v3_tlb_fns (0) 108 + #define v4_tlb_fns (0) 109 + #define v4wb_tlb_fns (0) 110 + #define v4wbi_tlb_fns (0) 111 + #define v6_tlb_fns (0) 112 + 113 + #define v3_user_fns (0) 114 + #define v4_user_fns (0) 115 + #define v4_mc_user_fns (0) 116 + #define v4wb_user_fns (0) 117 + #define v4wt_user_fns (0) 118 + #define v6_user_fns (0) 119 + #define xscale_mc_user_fns (0) 120 + 121 + #endif /*__ASSEMBLY__*/ 122 + 123 + #endif /* _ASMARM_PGTABLE_H */
+9 -1
include/asm-arm/pgtable.h
··· 11 #define _ASMARM_PGTABLE_H 12 13 #include <asm-generic/4level-fixup.h> 14 15 #include <asm/memory.h> 16 - #include <asm/proc-fns.h> 17 #include <asm/arch/vmalloc.h> 18 19 /* ··· 383 #define pgtable_cache_init() do { } while (0) 384 385 #endif /* !__ASSEMBLY__ */ 386 387 #endif /* _ASMARM_PGTABLE_H */
··· 11 #define _ASMARM_PGTABLE_H 12 13 #include <asm-generic/4level-fixup.h> 14 + #include <asm/proc-fns.h> 15 + 16 + #ifndef CONFIG_MMU 17 + 18 + #include "pgtable-nommu.h" 19 + 20 + #else 21 22 #include <asm/memory.h> 23 #include <asm/arch/vmalloc.h> 24 25 /* ··· 377 #define pgtable_cache_init() do { } while (0) 378 379 #endif /* !__ASSEMBLY__ */ 380 + 381 + #endif /* CONFIG_MMU */ 382 383 #endif /* _ASMARM_PGTABLE_H */
+4
include/asm-arm/proc-fns.h
··· 165 166 #include <asm/memory.h> 167 168 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 169 170 #define cpu_get_pgd() \ ··· 177 pg &= ~0x3fff; \ 178 (pgd_t *)phys_to_virt(pg); \ 179 }) 180 181 #endif /* __ASSEMBLY__ */ 182 #endif /* __KERNEL__ */
··· 165 166 #include <asm/memory.h> 167 168 + #ifdef CONFIG_MMU 169 + 170 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 171 172 #define cpu_get_pgd() \ ··· 175 pg &= ~0x3fff; \ 176 (pgd_t *)phys_to_virt(pg); \ 177 }) 178 + 179 + #endif 180 181 #endif /* __ASSEMBLY__ */ 182 #endif /* __KERNEL__ */
+87 -52
include/asm-arm/uaccess.h
··· 41 extern int fixup_exception(struct pt_regs *regs); 42 43 /* 44 * Note that this is actually 0x1,0000,0000 45 */ 46 #define KERNEL_DS 0x00000000 47 - #define USER_DS TASK_SIZE 48 - 49 #define get_ds() (KERNEL_DS) 50 #define get_fs() (current_thread_info()->addr_limit) 51 52 - static inline void set_fs (mm_segment_t fs) 53 { 54 current_thread_info()->addr_limit = fs; 55 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); ··· 84 : "cc"); \ 85 flag; }) 86 87 - #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 88 - 89 /* 90 * Single-value transfer routines. They automatically use the right 91 * size if we just have the right pointer type. Note that the functions ··· 94 * fixup code, but there are a few places where it intrudes on the 95 * main code path. When we only write to user space, there is no 96 * problem. 97 - * 98 - * The "__xxx" versions of the user access functions do not verify the 99 - * address space - it must have been done previously with a separate 100 - * "access_ok()" call. 101 - * 102 - * The "xxx_error" versions set the third argument to EFAULT if an 103 - * error occurs, and leave it unchanged on success. Note that these 104 - * versions are void (ie, don't return a value as such). 105 */ 106 - 107 extern int __get_user_1(void *); 108 extern int __get_user_2(void *); 109 extern int __get_user_4(void *); 110 - extern int __get_user_bad(void); 111 112 #define __get_user_x(__r2,__p,__e,__s,__i...) \ 113 __asm__ __volatile__ ( \ ··· 128 __e; \ 129 }) 130 131 #define __get_user(x,ptr) \ 132 ({ \ 133 long __gu_err = 0; \ ··· 276 : "+r" (err), "=&r" (x) \ 277 : "r" (addr), "i" (-EFAULT) \ 278 : "cc") 279 - 280 - extern int __put_user_1(void *, unsigned int); 281 - extern int __put_user_2(void *, unsigned int); 282 - extern int __put_user_4(void *, unsigned int); 283 - extern int __put_user_8(void *, unsigned long long); 284 - extern int __put_user_bad(void); 285 - 286 - #define __put_user_x(__r2,__p,__e,__s) \ 287 - __asm__ __volatile__ ( \ 288 - __asmeq("%0", "r0") __asmeq("%2", "r2") \ 289 - "bl __put_user_" #__s \ 290 - : "=&r" (__e) \ 291 - : "0" (__p), "r" (__r2) \ 292 - : "ip", "lr", "cc") 293 - 294 - #define put_user(x,p) \ 295 - ({ \ 296 - const register typeof(*(p)) __r2 asm("r2") = (x); \ 297 - const register typeof(*(p)) __user *__p asm("r0") = (p);\ 298 - register int __e asm("r0"); \ 299 - switch (sizeof(*(__p))) { \ 300 - case 1: \ 301 - __put_user_x(__r2, __p, __e, 1); \ 302 - break; \ 303 - case 2: \ 304 - __put_user_x(__r2, __p, __e, 2); \ 305 - break; \ 306 - case 4: \ 307 - __put_user_x(__r2, __p, __e, 4); \ 308 - break; \ 309 - case 8: \ 310 - __put_user_x(__r2, __p, __e, 8); \ 311 - break; \ 312 - default: __e = __put_user_bad(); break; \ 313 - } \ 314 - __e; \ 315 - }) 316 317 #define __put_user(x,ptr) \ 318 ({ \ ··· 382 : "cc") 383 384 385 extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n); 386 extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n); 387 extern unsigned long __clear_user(void __user *addr, unsigned long n); 388 extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count); 389 extern unsigned long __strnlen_user(const char __user *s, long n); 390
··· 41 extern int fixup_exception(struct pt_regs *regs); 42 43 /* 44 + * These two are intentionally not defined anywhere - if the kernel 45 + * code generates any references to them, that's a bug. 46 + */ 47 + extern int __get_user_bad(void); 48 + extern int __put_user_bad(void); 49 + 50 + /* 51 * Note that this is actually 0x1,0000,0000 52 */ 53 #define KERNEL_DS 0x00000000 54 #define get_ds() (KERNEL_DS) 55 + 56 + #ifdef CONFIG_MMU 57 + 58 + #define USER_DS TASK_SIZE 59 #define get_fs() (current_thread_info()->addr_limit) 60 61 + static inline void set_fs(mm_segment_t fs) 62 { 63 current_thread_info()->addr_limit = fs; 64 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); ··· 75 : "cc"); \ 76 flag; }) 77 78 /* 79 * Single-value transfer routines. They automatically use the right 80 * size if we just have the right pointer type. Note that the functions ··· 87 * fixup code, but there are a few places where it intrudes on the 88 * main code path. When we only write to user space, there is no 89 * problem. 90 */ 91 extern int __get_user_1(void *); 92 extern int __get_user_2(void *); 93 extern int __get_user_4(void *); 94 95 #define __get_user_x(__r2,__p,__e,__s,__i...) \ 96 __asm__ __volatile__ ( \ ··· 131 __e; \ 132 }) 133 134 + extern int __put_user_1(void *, unsigned int); 135 + extern int __put_user_2(void *, unsigned int); 136 + extern int __put_user_4(void *, unsigned int); 137 + extern int __put_user_8(void *, unsigned long long); 138 + 139 + #define __put_user_x(__r2,__p,__e,__s) \ 140 + __asm__ __volatile__ ( \ 141 + __asmeq("%0", "r0") __asmeq("%2", "r2") \ 142 + "bl __put_user_" #__s \ 143 + : "=&r" (__e) \ 144 + : "0" (__p), "r" (__r2) \ 145 + : "ip", "lr", "cc") 146 + 147 + #define put_user(x,p) \ 148 + ({ \ 149 + const register typeof(*(p)) __r2 asm("r2") = (x); \ 150 + const register typeof(*(p)) __user *__p asm("r0") = (p);\ 151 + register int __e asm("r0"); \ 152 + switch (sizeof(*(__p))) { \ 153 + case 1: \ 154 + __put_user_x(__r2, __p, __e, 1); \ 155 + break; \ 156 + case 2: \ 157 + __put_user_x(__r2, __p, __e, 2); \ 158 + break; \ 159 + case 4: \ 160 + __put_user_x(__r2, __p, __e, 4); \ 161 + break; \ 162 + case 8: \ 163 + __put_user_x(__r2, __p, __e, 8); \ 164 + break; \ 165 + default: __e = __put_user_bad(); break; \ 166 + } \ 167 + __e; \ 168 + }) 169 + 170 + #else /* CONFIG_MMU */ 171 + 172 + /* 173 + * uClinux has only one addr space, so has simplified address limits. 174 + */ 175 + #define USER_DS KERNEL_DS 176 + 177 + #define segment_eq(a,b) (1) 178 + #define __addr_ok(addr) (1) 179 + #define __range_ok(addr,size) (0) 180 + #define get_fs() (KERNEL_DS) 181 + 182 + static inline void set_fs(mm_segment_t fs) 183 + { 184 + } 185 + 186 + #define get_user(x,p) __get_user(x,p) 187 + #define put_user(x,p) __put_user(x,p) 188 + 189 + #endif /* CONFIG_MMU */ 190 + 191 + #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 192 + 193 + /* 194 + * The "__xxx" versions of the user access functions do not verify the 195 + * address space - it must have been done previously with a separate 196 + * "access_ok()" call. 197 + * 198 + * The "xxx_error" versions set the third argument to EFAULT if an 199 + * error occurs, and leave it unchanged on success. Note that these 200 + * versions are void (ie, don't return a value as such). 201 + */ 202 #define __get_user(x,ptr) \ 203 ({ \ 204 long __gu_err = 0; \ ··· 211 : "+r" (err), "=&r" (x) \ 212 : "r" (addr), "i" (-EFAULT) \ 213 : "cc") 214 215 #define __put_user(x,ptr) \ 216 ({ \ ··· 354 : "cc") 355 356 357 + #ifdef CONFIG_MMU 358 extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n); 359 extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n); 360 extern unsigned long __clear_user(void __user *addr, unsigned long n); 361 + #else 362 + #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) 363 + #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) 364 + #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 365 + #endif 366 + 367 extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count); 368 extern unsigned long __strnlen_user(const char __user *s, long n); 369