Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: add HIGHMEM support

Introduce fixmap area just below the vmalloc region. Use it for atomic
mapping of high memory pages.
High memory on cores with cache aliasing is not supported and is still
to be implemented. Fail build for such configurations for now.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+270 -16
+18
arch/xtensa/Kconfig
··· 190 190 191 191 If in doubt, say Y. 192 192 193 + config HIGHMEM 194 + bool "High Memory Support" 195 + help 196 + Linux can use the full amount of RAM in the system by 197 + default. However, the default MMUv2 setup only maps the 198 + lowermost 128 MB of memory linearly to the areas starting 199 + at 0xd0000000 (cached) and 0xd8000000 (uncached). 200 + When there are more than 128 MB memory in the system not 201 + all of it can be "permanently mapped" by the kernel. 202 + The physical memory that's not permanently mapped is called 203 + "high memory". 204 + 205 + If you are compiling a kernel which will never run on a 206 + machine with more than 128 MB total physical RAM, answer 207 + N here. 208 + 209 + If unsure, say Y. 210 + 193 211 endmenu 194 212 195 213 config XTENSA_CALIBRATE_CCOUNT
+58
arch/xtensa/include/asm/fixmap.h
··· 1 + /* 2 + * fixmap.h: compile-time virtual memory allocation 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 1998 Ingo Molnar 9 + * 10 + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 + */ 12 + 13 + #ifndef _ASM_FIXMAP_H 14 + #define _ASM_FIXMAP_H 15 + 16 + #include <asm/pgtable.h> 17 + #ifdef CONFIG_HIGHMEM 18 + #include <linux/threads.h> 19 + #include <asm/kmap_types.h> 20 + #endif 21 + 22 + /* 23 + * Here we define all the compile-time 'special' virtual 24 + * addresses. The point is to have a constant address at 25 + * compile time, but to set the physical address only 26 + * in the boot process. We allocate these special addresses 27 + * from the end of the consistent memory region backwards. 28 + * Also this lets us do fail-safe vmalloc(), we 29 + * can guarantee that these special addresses and 30 + * vmalloc()-ed addresses never overlap. 31 + * 32 + * these 'compile-time allocated' memory buffers are 33 + * fixed-size 4k pages. (or larger if used with an increment 34 + * higher than 1) use fixmap_set(idx,phys) to associate 35 + * physical memory with fixmap indices. 36 + */ 37 + enum fixed_addresses { 38 + #ifdef CONFIG_HIGHMEM 39 + /* reserved pte's for temporary kernel mappings */ 40 + FIX_KMAP_BEGIN, 41 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 42 + #endif 43 + __end_of_fixed_addresses 44 + }; 45 + 46 + #define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) 47 + #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 48 + #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 49 + 50 + #include <asm-generic/fixmap.h> 51 + 52 + #define kmap_get_fixmap_pte(vaddr) \ 53 + pte_offset_kernel( \ 54 + pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ 55 + (vaddr) \ 56 + ) 57 + 58 + #endif
+44 -1
arch/xtensa/include/asm/highmem.h
··· 6 6 * this archive for more details. 7 7 * 8 8 * Copyright (C) 2003 - 2005 Tensilica Inc. 9 + * Copyright (C) 2014 Cadence Design Systems Inc. 9 10 */ 10 11 11 12 #ifndef _XTENSA_HIGHMEM_H 12 13 #define _XTENSA_HIGHMEM_H 13 14 14 - extern void flush_cache_kmaps(void); 15 + #include <asm/cacheflush.h> 16 + #include <asm/fixmap.h> 17 + #include <asm/kmap_types.h> 18 + #include <asm/pgtable.h> 19 + 20 + #define PKMAP_BASE (FIXADDR_START - PMD_SIZE) 21 + #define LAST_PKMAP PTRS_PER_PTE 22 + #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 23 + #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 24 + #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 25 + 26 + #define kmap_prot PAGE_KERNEL 27 + 28 + extern pte_t *pkmap_page_table; 29 + 30 + void *kmap_high(struct page *page); 31 + void kunmap_high(struct page *page); 32 + 33 + static inline void *kmap(struct page *page) 34 + { 35 + BUG_ON(in_interrupt()); 36 + if (!PageHighMem(page)) 37 + return page_address(page); 38 + return kmap_high(page); 39 + } 40 + 41 + static inline void kunmap(struct page *page) 42 + { 43 + BUG_ON(in_interrupt()); 44 + if (!PageHighMem(page)) 45 + return; 46 + kunmap_high(page); 47 + } 48 + 49 + static inline void flush_cache_kmaps(void) 50 + { 51 + flush_cache_all(); 52 + } 53 + 54 + void *kmap_atomic(struct page *page); 55 + void __kunmap_atomic(void *kvaddr); 56 + 57 + void kmap_init(void); 15 58 16 59 #endif
+4
arch/xtensa/include/asm/pgtable.h
··· 310 310 update_pte(ptep, pteval); 311 311 } 312 312 313 + static inline void set_pte(pte_t *ptep, pte_t pteval) 314 + { 315 + update_pte(ptep, pteval); 316 + } 313 317 314 318 static inline void 315 319 set_pmd(pmd_t *pmdp, pmd_t pmdval)
+1
arch/xtensa/mm/Makefile
··· 4 4 5 5 obj-y := init.o cache.o misc.o 6 6 obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o 7 + obj-$(CONFIG_HIGHMEM) += highmem.o
+6 -1
arch/xtensa/mm/cache.c
··· 59 59 * 60 60 */ 61 61 62 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) 63 + #error "HIGHMEM is not supported on cores with aliasing cache." 64 + #endif 65 + 62 66 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 63 67 64 68 /* ··· 183 179 #else 184 180 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) 185 181 && (vma->vm_flags & VM_EXEC) != 0) { 186 - unsigned long paddr = (unsigned long) page_address(page); 182 + unsigned long paddr = (unsigned long)kmap_atomic(page); 187 183 __flush_dcache_page(paddr); 188 184 __invalidate_icache_page(paddr); 189 185 set_bit(PG_arch_1, &page->flags); 186 + kunmap_atomic((void *)paddr); 190 187 } 191 188 #endif 192 189 }
+72
arch/xtensa/mm/highmem.c
··· 1 + /* 2 + * High memory support for Xtensa architecture 3 + * 4 + * This file is subject to the terms and conditions of the GNU General 5 + * Public License. See the file "COPYING" in the main directory of 6 + * this archive for more details. 7 + * 8 + * Copyright (C) 2014 Cadence Design Systems Inc. 9 + */ 10 + 11 + #include <linux/export.h> 12 + #include <linux/highmem.h> 13 + #include <asm/tlbflush.h> 14 + 15 + static pte_t *kmap_pte; 16 + 17 + void *kmap_atomic(struct page *page) 18 + { 19 + enum fixed_addresses idx; 20 + unsigned long vaddr; 21 + int type; 22 + 23 + pagefault_disable(); 24 + if (!PageHighMem(page)) 25 + return page_address(page); 26 + 27 + type = kmap_atomic_idx_push(); 28 + idx = type + KM_TYPE_NR * smp_processor_id(); 29 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 30 + #ifdef CONFIG_DEBUG_HIGHMEM 31 + BUG_ON(!pte_none(*(kmap_pte - idx))); 32 + #endif 33 + set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); 34 + 35 + return (void *)vaddr; 36 + } 37 + EXPORT_SYMBOL(kmap_atomic); 38 + 39 + void __kunmap_atomic(void *kvaddr) 40 + { 41 + int idx, type; 42 + 43 + if (kvaddr >= (void *)FIXADDR_START && 44 + kvaddr < (void *)FIXADDR_TOP) { 45 + type = kmap_atomic_idx(); 46 + idx = type + KM_TYPE_NR * smp_processor_id(); 47 + 48 + /* 49 + * Force other mappings to Oops if they'll try to access this 50 + * pte without first remap it. Keeping stale mappings around 51 + * is a bad idea also, in case the page changes cacheability 52 + * attributes or becomes a protected page in a hypervisor. 53 + */ 54 + pte_clear(&init_mm, kvaddr, kmap_pte - idx); 55 + local_flush_tlb_kernel_range((unsigned long)kvaddr, 56 + (unsigned long)kvaddr + PAGE_SIZE); 57 + 58 + kmap_atomic_idx_pop(); 59 + } 60 + 61 + pagefault_enable(); 62 + } 63 + EXPORT_SYMBOL(__kunmap_atomic); 64 + 65 + void __init kmap_init(void) 66 + { 67 + unsigned long kmap_vstart; 68 + 69 + /* cache the first kmap pte */ 70 + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 71 + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 72 + }
+31 -14
arch/xtensa/mm/init.c
··· 20 20 #include <linux/errno.h> 21 21 #include <linux/bootmem.h> 22 22 #include <linux/gfp.h> 23 + #include <linux/highmem.h> 23 24 #include <linux/swap.h> 24 25 #include <linux/mman.h> 25 26 #include <linux/nodemask.h> ··· 297 296 298 297 void __init zones_init(void) 299 298 { 300 - unsigned long zones_size[MAX_NR_ZONES]; 301 - int i; 302 - 303 299 /* All pages are DMA-able, so we put them all in the DMA zone. */ 304 - 305 - zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; 306 - for (i = 1; i < MAX_NR_ZONES; i++) 307 - zones_size[i] = 0; 308 - 300 + unsigned long zones_size[MAX_NR_ZONES] = { 301 + [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET, 309 302 #ifdef CONFIG_HIGHMEM 310 - zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 303 + [ZONE_HIGHMEM] = max_pfn - max_low_pfn, 311 304 #endif 312 - 305 + }; 313 306 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); 314 307 } 315 308 ··· 313 318 314 319 void __init mem_init(void) 315 320 { 316 - max_mapnr = max_low_pfn - ARCH_PFN_OFFSET; 317 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 318 - 319 321 #ifdef CONFIG_HIGHMEM 320 - #error HIGHGMEM not implemented in init.c 322 + unsigned long tmp; 323 + 324 + reset_all_zones_managed_pages(); 325 + for (tmp = max_low_pfn; tmp < max_pfn; tmp++) 326 + free_highmem_page(pfn_to_page(tmp)); 321 327 #endif 328 + 329 + max_mapnr = max_pfn - ARCH_PFN_OFFSET; 330 + high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 322 331 323 332 free_all_bootmem(); 324 333 325 334 mem_init_print_info(NULL); 335 + pr_info("virtual kernel memory layout:\n" 336 + #ifdef CONFIG_HIGHMEM 337 + " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 338 + " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 339 + #endif 340 + " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" 341 + " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n", 342 + #ifdef CONFIG_HIGHMEM 343 + PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, 344 + (LAST_PKMAP*PAGE_SIZE) >> 10, 345 + FIXADDR_START, FIXADDR_TOP, 346 + (FIXADDR_TOP - FIXADDR_START) >> 10, 347 + #endif 348 + VMALLOC_START, VMALLOC_END, 349 + (VMALLOC_END - VMALLOC_START) >> 20, 350 + PAGE_OFFSET, PAGE_OFFSET + 351 + (max_low_pfn - min_low_pfn) * PAGE_SIZE, 352 + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); 326 353 } 327 354 328 355 #ifdef CONFIG_BLK_DEV_INITRD
+36
arch/xtensa/mm/mmu.c
··· 3 3 * 4 4 * Extracted from init.c 5 5 */ 6 + #include <linux/bootmem.h> 6 7 #include <linux/percpu.h> 7 8 #include <linux/init.h> 8 9 #include <linux/string.h> ··· 17 16 #include <asm/initialize_mmu.h> 18 17 #include <asm/io.h> 19 18 19 + #if defined(CONFIG_HIGHMEM) 20 + static void * __init init_pmd(unsigned long vaddr) 21 + { 22 + pgd_t *pgd = pgd_offset_k(vaddr); 23 + pmd_t *pmd = pmd_offset(pgd, vaddr); 24 + 25 + if (pmd_none(*pmd)) { 26 + unsigned i; 27 + pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); 28 + 29 + for (i = 0; i < 1024; i++) 30 + pte_clear(NULL, 0, pte + i); 31 + 32 + set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); 33 + BUG_ON(pte != pte_offset_kernel(pmd, 0)); 34 + pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", 35 + __func__, vaddr, pmd, pte); 36 + return pte; 37 + } else { 38 + return pte_offset_kernel(pmd, 0); 39 + } 40 + } 41 + 42 + static void __init fixedrange_init(void) 43 + { 44 + BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); 45 + init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); 46 + } 47 + #endif 48 + 20 49 void __init paging_init(void) 21 50 { 22 51 memset(swapper_pg_dir, 0, PAGE_SIZE); 52 + #ifdef CONFIG_HIGHMEM 53 + fixedrange_init(); 54 + pkmap_page_table = init_pmd(PKMAP_BASE); 55 + kmap_init(); 56 + #endif 23 57 } 24 58 25 59 /*