Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: nommu support

Add support for !CONFIG_MMU setups.

Signed-off-by: Johannes Weiner <jw@emlix.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

authored by

Johannes Weiner and committed by
Chris Zankel
e5083a63 7789f89a

+169 -75
+7 -3
arch/xtensa/include/asm/cacheflush.h
··· 65 65 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) 66 66 #endif 67 67 68 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) 68 + #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) 69 69 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); 70 + #else 71 + static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, 72 + unsigned long phys) { } 70 73 #endif 71 - #if (ICACHE_WAY_SIZE > PAGE_SIZE) 74 + #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE) 72 75 extern void __invalidate_icache_page_alias(unsigned long, unsigned long); 73 76 #else 74 - # define __invalidate_icache_page_alias(v,p) do { } while(0) 77 + static inline void __invalidate_icache_page_alias(unsigned long virt, 78 + unsigned long phys) { } 75 79 #endif 76 80 77 81 /*
+2 -1
arch/xtensa/include/asm/dma.h
··· 44 44 * the value desired). 45 45 */ 46 46 47 + #ifndef MAX_DMA_ADDRESS 47 48 #define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1) 48 - 49 + #endif 49 50 50 51 /* Reserve and release a DMA channel */ 51 52 extern int request_dma(unsigned int dmanr, const char * device_id);
+8 -1
arch/xtensa/include/asm/io.h
··· 69 69 70 70 static inline void *ioremap(unsigned long offset, unsigned long size) 71 71 { 72 + #ifdef CONFIG_MMU 72 73 if (offset >= XCHAL_KIO_PADDR 73 74 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE) 74 75 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); 75 - 76 76 else 77 77 BUG(); 78 + #else 79 + return (void *)offset; 80 + #endif 78 81 } 79 82 80 83 static inline void *ioremap_nocache(unsigned long offset, unsigned long size) 81 84 { 85 + #ifdef CONFIG_MMU 82 86 if (offset >= XCHAL_KIO_PADDR 83 87 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE) 84 88 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); 85 89 else 86 90 BUG(); 91 + #else 92 + return (void *)offset; 93 + #endif 87 94 } 88 95 89 96 static inline void iounmap(void *addr)
+5
arch/xtensa/include/asm/mmu.h
··· 11 11 #ifndef _XTENSA_MMU_H 12 12 #define _XTENSA_MMU_H 13 13 14 + #ifndef CONFIG_MMU 15 + #include <asm/nommu.h> 16 + #else 17 + 14 18 /* Default "unsigned long" context */ 15 19 typedef unsigned long mm_context_t; 16 20 21 + #endif /* CONFIG_MMU */ 17 22 #endif /* _XTENSA_MMU_H */
+5
arch/xtensa/include/asm/mmu_context.h
··· 13 13 #ifndef _XTENSA_MMU_CONTEXT_H 14 14 #define _XTENSA_MMU_CONTEXT_H 15 15 16 + #ifndef CONFIG_MMU 17 + #include <asm/nommu_context.h> 18 + #else 19 + 16 20 #include <linux/stringify.h> 17 21 #include <linux/sched.h> 18 22 ··· 137 133 138 134 } 139 135 136 + #endif /* CONFIG_MMU */ 140 137 #endif /* _XTENSA_MMU_CONTEXT_H */
+3
arch/xtensa/include/asm/nommu.h
··· 1 + typedef struct { 2 + unsigned long end_brk; 3 + } mm_context_t;
+25
arch/xtensa/include/asm/nommu_context.h
··· 1 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 2 + { 3 + } 4 + 5 + static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 6 + { 7 + return 0; 8 + } 9 + 10 + static inline void destroy_context(struct mm_struct *mm) 11 + { 12 + } 13 + 14 + static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 15 + { 16 + } 17 + 18 + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 19 + struct task_struct *tsk) 20 + { 21 + } 22 + 23 + static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm) 24 + { 25 + }
+8 -1
arch/xtensa/include/asm/page.h
··· 33 33 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) 34 34 #define PAGE_MASK (~(PAGE_SIZE-1)) 35 35 36 + #ifdef CONFIG_MMU 36 37 #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 37 38 #define MAX_MEM_PFN XCHAL_KSEG_SIZE 39 + #else 40 + #define PAGE_OFFSET 0 41 + #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) 42 + #endif 43 + 38 44 #define PGTABLE_START 0x80000000 39 45 40 46 /* ··· 171 165 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 172 166 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 173 167 168 + #ifdef CONFIG_MMU 174 169 #define WANT_PAGE_VIRTUAL 175 - 170 + #endif 176 171 177 172 #endif /* __ASSEMBLY__ */ 178 173
+8 -5
arch/xtensa/include/asm/pgtable.h
··· 183 183 184 184 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 185 185 186 + #ifdef CONFIG_MMU 186 187 extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; 188 + extern void paging_init(void); 189 + extern void pgtable_cache_init(void); 190 + #else 191 + # define swapper_pg_dir NULL 192 + static inline void paging_init(void) { } 193 + static inline void pgtable_cache_init(void) { } 194 + #endif 187 195 188 196 /* 189 197 * The pmd contains the kernel virtual address of the pte page. ··· 391 383 392 384 #else 393 385 394 - extern void paging_init(void); 395 - 396 386 #define kern_addr_valid(addr) (1) 397 387 398 388 extern void update_mmu_cache(struct vm_area_struct * vma, ··· 403 397 404 398 #define io_remap_pfn_range(vma,from,pfn,size,prot) \ 405 399 remap_pfn_range(vma, from, pfn, size, prot) 406 - 407 - 408 - extern void pgtable_cache_init(void); 409 400 410 401 typedef pte_t *pte_addr_t; 411 402
+6
arch/xtensa/include/asm/processor.h
··· 13 13 14 14 #include <variant/core.h> 15 15 #include <asm/coprocessor.h> 16 + #include <platform/hardware.h> 16 17 17 18 #include <linux/compiler.h> 18 19 #include <asm/ptrace.h> ··· 36 35 * the 1 GB requirement applies to the stack as well. 37 36 */ 38 37 38 + #ifdef CONFIG_MMU 39 39 #define TASK_SIZE __XTENSA_UL_CONST(0x40000000) 40 + #else 41 + #define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) 42 + #endif 43 + 40 44 #define STACK_TOP TASK_SIZE 41 45 #define STACK_TOP_MAX STACK_TOP 42 46
+2 -1
arch/xtensa/kernel/entry.S
··· 1463 1463 callx0 a0 # should not return 1464 1464 1: j 1b 1465 1465 1466 + #ifdef CONFIG_MMU 1466 1467 /* 1467 1468 * We should never get here. Bail out! 1468 1469 */ ··· 1776 1775 bbsi.l a2, PS_UM_BIT, 1f 1777 1776 j _kernel_exception 1778 1777 1: j _user_exception 1779 - 1778 + #endif /* CONFIG_MMU */ 1780 1779 1781 1780 /* 1782 1781 * System Calls.
+2 -1
arch/xtensa/kernel/head.S
··· 235 235 */ 236 236 237 237 .section ".bss.page_aligned", "w" 238 + #ifdef CONFIG_MMU 238 239 ENTRY(swapper_pg_dir) 239 240 .fill PAGE_SIZE, 1, 0 241 + #endif 240 242 ENTRY(empty_zero_page) 241 243 .fill PAGE_SIZE, 1, 0 242 -
+7
arch/xtensa/kernel/setup.c
··· 84 84 int initrd_is_mapped; 85 85 #endif 86 86 87 + #ifdef CONFIG_MMU 87 88 extern void init_mmu(void); 89 + #else 90 + static inline void init_mmu(void) { } 91 + #endif 92 + 93 + extern void zones_init(void); 88 94 89 95 /* 90 96 * Boot parameter parsing. ··· 292 286 293 287 294 288 paging_init(); 289 + zones_init(); 295 290 296 291 #ifdef CONFIG_VT 297 292 # if defined(CONFIG_VGA_CONSOLE)
+2
arch/xtensa/kernel/traps.c
··· 104 104 #endif 105 105 { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 106 106 #endif 107 + #ifdef CONFIG_MMU 107 108 { EXCCAUSE_ITLB_MISS, 0, do_page_fault }, 108 109 { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, 109 110 { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, ··· 119 118 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, 120 119 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 121 120 { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 121 + #endif /* CONFIG_MMU */ 122 122 /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 123 123 #if XTENSA_HAVE_COPROCESSOR(0) 124 124 COPROCESSOR(0),
+4
arch/xtensa/kernel/vectors.S
··· 309 309 * All other exceptions are unexpected and thus unrecoverable! 310 310 */ 311 311 312 + #ifdef CONFIG_MMU 312 313 .extern fast_second_level_miss_double_kernel 313 314 314 315 .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ ··· 320 319 bnez a3, .Lunrecoverable 321 320 1: movi a3, fast_second_level_miss_double_kernel 322 321 jx a3 322 + #else 323 + .equ .Lksp, .Lunrecoverable 324 + #endif 323 325 324 326 /* Critical! We can't handle this situation. PANIC! */ 325 327
+2 -1
arch/xtensa/mm/Makefile
··· 2 2 # Makefile for the Linux/Xtensa-specific parts of the memory manager. 3 3 # 4 4 5 - obj-y := init.o fault.o tlb.o misc.o cache.o 5 + obj-y := init.o cache.o misc.o 6 + obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+1 -61
arch/xtensa/mm/init.c
··· 24 24 #include <linux/mm.h> 25 25 #include <linux/slab.h> 26 26 27 - #include <asm/pgtable.h> 28 27 #include <asm/bootparam.h> 29 - #include <asm/mmu_context.h> 30 - #include <asm/tlb.h> 31 28 #include <asm/page.h> 32 - #include <asm/pgalloc.h> 33 - 34 - 35 - DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 36 29 37 30 /* References to section boundaries */ 38 31 ··· 153 160 } 154 161 155 162 156 - void __init paging_init(void) 163 + void __init zones_init(void) 157 164 { 158 165 unsigned long zones_size[MAX_NR_ZONES]; 159 166 int i; ··· 168 175 zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 169 176 #endif 170 177 171 - /* Initialize the kernel's page tables. */ 172 - 173 - memset(swapper_pg_dir, 0, PAGE_SIZE); 174 - 175 178 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); 176 - } 177 - 178 - /* 179 - * Flush the mmu and reset associated register to default values. 180 - */ 181 - 182 - void __init init_mmu (void) 183 - { 184 - /* Writing zeros to the <t>TLBCFG special registers ensure 185 - * that valid values exist in the register. For existing 186 - * PGSZID<w> fields, zero selects the first element of the 187 - * page-size array. For nonexistent PGSZID<w> fields, zero is 188 - * the best value to write. Also, when changing PGSZID<w> 189 - * fields, the corresponding TLB must be flushed. 190 - */ 191 - set_itlbcfg_register (0); 192 - set_dtlbcfg_register (0); 193 - flush_tlb_all (); 194 - 195 - /* Set rasid register to a known value. */ 196 - 197 - set_rasid_register (ASID_USER_FIRST); 198 - 199 - /* Set PTEVADDR special register to the start of the page 200 - * table, which is in kernel mappable space (ie. not 201 - * statically mapped). This register's value is undefined on 202 - * reset. 203 - */ 204 - set_ptevaddr_register (PGTABLE_START); 205 179 } 206 180 207 181 /* ··· 240 280 free_reserved_mem(&__init_begin, &__init_end); 241 281 printk("Freeing unused kernel memory: %dk freed\n", 242 282 (&__init_end - &__init_begin) >> 10); 243 - } 244 - 245 - struct kmem_cache *pgtable_cache __read_mostly; 246 - 247 - static void pgd_ctor(void* addr) 248 - { 249 - pte_t* ptep = (pte_t*)addr; 250 - int i; 251 - 252 - for (i = 0; i < 1024; i++, ptep++) 253 - pte_clear(NULL, 0, ptep); 254 - 255 - } 256 - 257 - void __init pgtable_cache_init(void) 258 - { 259 - pgtable_cache = kmem_cache_create("pgd", 260 - PAGE_SIZE, PAGE_SIZE, 261 - SLAB_HWCACHE_ALIGN, 262 - pgd_ctor); 263 283 }
+2
arch/xtensa/mm/misc.S
··· 84 84 85 85 retw 86 86 87 + #ifdef CONFIG_MMU 87 88 /* 88 89 * If we have to deal with cache aliasing, we use temporary memory mappings 89 90 * to ensure that the source and destination pages have the same color as ··· 312 311 /* End of special treatment in tlb miss exception */ 313 312 314 313 ENTRY(__tlbtemp_mapping_end) 314 + #endif /* CONFIG_MMU 315 315 316 316 /* 317 317 * void __invalidate_icache_page(ulong start)
+70
arch/xtensa/mm/mmu.c
··· 1 + /* 2 + * xtensa mmu stuff 3 + * 4 + * Extracted from init.c 5 + */ 6 + #include <linux/percpu.h> 7 + #include <linux/init.h> 8 + #include <linux/string.h> 9 + #include <linux/slab.h> 10 + #include <linux/cache.h> 11 + 12 + #include <asm/tlb.h> 13 + #include <asm/tlbflush.h> 14 + #include <asm/mmu_context.h> 15 + #include <asm/page.h> 16 + 17 + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 18 + 19 + void __init paging_init(void) 20 + { 21 + memset(swapper_pg_dir, 0, PAGE_SIZE); 22 + } 23 + 24 + /* 25 + * Flush the mmu and reset associated register to default values. 26 + */ 27 + void __init init_mmu(void) 28 + { 29 + /* Writing zeros to the <t>TLBCFG special registers ensure 30 + * that valid values exist in the register. For existing 31 + * PGSZID<w> fields, zero selects the first element of the 32 + * page-size array. For nonexistent PGSZID<w> fields, zero is 33 + * the best value to write. Also, when changing PGSZID<w> 34 + * fields, the corresponding TLB must be flushed. 35 + */ 36 + set_itlbcfg_register(0); 37 + set_dtlbcfg_register(0); 38 + flush_tlb_all(); 39 + 40 + /* Set rasid register to a known value. */ 41 + 42 + set_rasid_register(ASID_USER_FIRST); 43 + 44 + /* Set PTEVADDR special register to the start of the page 45 + * table, which is in kernel mappable space (ie. not 46 + * statically mapped). This register's value is undefined on 47 + * reset. 48 + */ 49 + set_ptevaddr_register(PGTABLE_START); 50 + } 51 + 52 + struct kmem_cache *pgtable_cache __read_mostly; 53 + 54 + static void pgd_ctor(void *addr) 55 + { 56 + pte_t *ptep = (pte_t *)addr; 57 + int i; 58 + 59 + for (i = 0; i < 1024; i++, ptep++) 60 + pte_clear(NULL, 0, ptep); 61 + 62 + } 63 + 64 + void __init pgtable_cache_init(void) 65 + { 66 + pgtable_cache = kmem_cache_create("pgd", 67 + PAGE_SIZE, PAGE_SIZE, 68 + SLAB_HWCACHE_ALIGN, 69 + pgd_ctor); 70 + }