Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] kmap support

The kmap virtual area borrows a 2MB range at the top of the 16MB area
below PAGE_OFFSET currently reserved for kernel modules and/or the
XIP kernel. This 2MB corresponds to the range covered by 2 consecutive
second-level page tables, or a single pmd entry as seen by the Linux
page table abstraction. Because XIP kernels are unlikely to be seen
on systems needing highmem support, there shouldn't be any shortage of
VM space for modules (14 MB for modules is still way more than twice the
typical usage).

Because the virtual mapping of highmem pages can go away at any moment
after kunmap() is called on them, we need to bypass the delayed cache
flushing provided by flush_dcache_page() in that case.

The atomic kmap versions are based on fixmaps, and
__cpuc_flush_dcache_page() is used directly in that case.

Signed-off-by: Nicolas Pitre <nico@marvell.com>

+169 -4
+28
arch/arm/include/asm/highmem.h
··· 1 + #ifndef _ASM_HIGHMEM_H 2 + #define _ASM_HIGHMEM_H 3 + 4 + #include <asm/kmap_types.h> 5 + 6 + #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) 7 + #define LAST_PKMAP PTRS_PER_PTE 8 + #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 9 + #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 10 + #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 11 + 12 + #define kmap_prot PAGE_KERNEL 13 + 14 + #define flush_cache_kmaps() flush_cache_all() 15 + 16 + extern pte_t *pkmap_page_table; 17 + 18 + extern void *kmap_high(struct page *page); 19 + extern void kunmap_high(struct page *page); 20 + 21 + extern void *kmap(struct page *page); 22 + extern void kunmap(struct page *page); 23 + extern void *kmap_atomic(struct page *page, enum km_type type); 24 + extern void kunmap_atomic(void *kvaddr, enum km_type type); 25 + extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 26 + extern struct page *kmap_atomic_to_page(const void *ptr); 27 + 28 + #endif
+10 -3
arch/arm/include/asm/memory.h
··· 44 44 * The module space lives between the addresses given by TASK_SIZE 45 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 46 46 */ 47 - #define MODULES_END (PAGE_OFFSET) 48 - #define MODULES_VADDR (MODULES_END - 16*1048576) 49 - 47 + #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 50 48 #if TASK_SIZE > MODULES_VADDR 51 49 #error Top of user space clashes with start of module space 50 + #endif 51 + 52 + /* 53 + * The highmem pkmap virtual space shares the end of the module area. 54 + */ 55 + #ifdef CONFIG_HIGHMEM 56 + #define MODULES_END (PAGE_OFFSET - PMD_SIZE) 57 + #else 58 + #define MODULES_END (PAGE_OFFSET) 52 59 #endif 53 60 54 61 /*
+1
arch/arm/mm/Makefile
··· 16 16 17 17 obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 18 18 obj-$(CONFIG_DISCONTIGMEM) += discontig.o 19 + obj-$(CONFIG_HIGHMEM) += highmem.o 19 20 20 21 obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 21 22 obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
+1 -1
arch/arm/mm/flush.c
··· 192 192 struct address_space *mapping = page_mapping(page); 193 193 194 194 #ifndef CONFIG_SMP 195 - if (mapping && !mapping_mapped(mapping)) 195 + if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 196 196 set_bit(PG_dcache_dirty, &page->flags); 197 197 else 198 198 #endif
+116
arch/arm/mm/highmem.c
··· 1 + /* 2 + * arch/arm/mm/highmem.c -- ARM highmem support 3 + * 4 + * Author: Nicolas Pitre 5 + * Created: september 8, 2008 6 + * Copyright: Marvell Semiconductors Inc. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/highmem.h> 15 + #include <linux/interrupt.h> 16 + #include <asm/fixmap.h> 17 + #include <asm/cacheflush.h> 18 + #include <asm/tlbflush.h> 19 + #include "mm.h" 20 + 21 + void *kmap(struct page *page) 22 + { 23 + might_sleep(); 24 + if (!PageHighMem(page)) 25 + return page_address(page); 26 + return kmap_high(page); 27 + } 28 + EXPORT_SYMBOL(kmap); 29 + 30 + void kunmap(struct page *page) 31 + { 32 + BUG_ON(in_interrupt()); 33 + if (!PageHighMem(page)) 34 + return; 35 + kunmap_high(page); 36 + } 37 + EXPORT_SYMBOL(kunmap); 38 + 39 + void *kmap_atomic(struct page *page, enum km_type type) 40 + { 41 + unsigned int idx; 42 + unsigned long vaddr; 43 + 44 + pagefault_disable(); 45 + if (!PageHighMem(page)) 46 + return page_address(page); 47 + 48 + idx = type + KM_TYPE_NR * smp_processor_id(); 49 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 50 + #ifdef CONFIG_DEBUG_HIGHMEM 51 + /* 52 + * With debugging enabled, kunmap_atomic forces that entry to 0. 53 + * Make sure it was indeed properly unmapped. 54 + */ 55 + BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 56 + #endif 57 + set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); 58 + /* 59 + * When debugging is off, kunmap_atomic leaves the previous mapping 60 + * in place, so this TLB flush ensures the TLB is updated with the 61 + * new mapping. 62 + */ 63 + local_flush_tlb_kernel_page(vaddr); 64 + 65 + return (void *)vaddr; 66 + } 67 + EXPORT_SYMBOL(kmap_atomic); 68 + 69 + void kunmap_atomic(void *kvaddr, enum km_type type) 70 + { 71 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 72 + unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 73 + 74 + if (kvaddr >= (void *)FIXADDR_START) { 75 + __cpuc_flush_dcache_page((void *)vaddr); 76 + #ifdef CONFIG_DEBUG_HIGHMEM 77 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 78 + set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 79 + local_flush_tlb_kernel_page(vaddr); 80 + #else 81 + (void) idx; /* to kill a warning */ 82 + #endif 83 + } 84 + pagefault_enable(); 85 + } 86 + EXPORT_SYMBOL(kunmap_atomic); 87 + 88 + void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 89 + { 90 + unsigned int idx; 91 + unsigned long vaddr; 92 + 93 + pagefault_disable(); 94 + 95 + idx = type + KM_TYPE_NR * smp_processor_id(); 96 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 97 + #ifdef CONFIG_DEBUG_HIGHMEM 98 + BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 99 + #endif 100 + set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); 101 + local_flush_tlb_kernel_page(vaddr); 102 + 103 + return (void *)vaddr; 104 + } 105 + 106 + struct page *kmap_atomic_to_page(const void *ptr) 107 + { 108 + unsigned long vaddr = (unsigned long)ptr; 109 + pte_t *pte; 110 + 111 + if (vaddr < FIXADDR_START) 112 + return virt_to_page(ptr); 113 + 114 + pte = TOP_PTE(vaddr); 115 + return pte_page(*pte); 116 + }
+13
arch/arm/mm/mmu.c
··· 21 21 #include <asm/setup.h> 22 22 #include <asm/sizes.h> 23 23 #include <asm/tlb.h> 24 + #include <asm/highmem.h> 24 25 25 26 #include <asm/mach/arch.h> 26 27 #include <asm/mach/map.h> ··· 896 895 flush_cache_all(); 897 896 } 898 897 898 + static void __init kmap_init(void) 899 + { 900 + #ifdef CONFIG_HIGHMEM 901 + pmd_t *pmd = pmd_off_k(PKMAP_BASE); 902 + pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); 903 + BUG_ON(!pmd_none(*pmd) || !pte); 904 + __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); 905 + pkmap_page_table = pte + PTRS_PER_PTE; 906 + #endif 907 + } 908 + 899 909 /* 900 910 * paging_init() sets up the page tables, initialises the zone memory 901 911 * maps, and sets up the zero page, bad page and bad page tables. ··· 920 908 prepare_page_table(); 921 909 bootmem_init(); 922 910 devicemaps_init(mdesc); 911 + kmap_init(); 923 912 924 913 top_pmd = pmd_off_k(0xffff0000); 925 914