Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

microblaze: Highmem support

The first highmem implementation.

Signed-off-by: Michal Simek <monstr@monstr.eu>

+272 -13
+11 -13
arch/microblaze/Kconfig
··· 159 159 The feature requires the design to define the RAM memory controller 160 160 window to be twice as large as the actual physical memory. 161 161 162 - config HIGHMEM_START_BOOL 163 - bool "Set high memory pool address" 164 - depends on ADVANCED_OPTIONS && HIGHMEM 165 - help 166 - This option allows you to set the base address of the kernel virtual 167 - area used to map high memory pages. This can be useful in 168 - optimizing the layout of kernel virtual memory. 169 - 170 - Say N here unless you know what you are doing. 171 - 172 - config HIGHMEM_START 173 - hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL 162 + config HIGHMEM 163 + bool "High memory support" 174 164 depends on MMU 175 - default "0xfe000000" 165 + help 166 + The address space of Microblaze processors is only 4 Gigabytes large 167 + and it has to accommodate user address space, kernel address 168 + space as well as some memory mapped IO. That means that, if you 169 + have a large amount of physical memory and/or IO, not all of the 170 + memory can be "permanently mapped" by the kernel. The physical 171 + memory that is not permanently mapped is called "high memory". 172 + 173 + If unsure, say n. 176 174 177 175 config LOWMEM_SIZE_BOOL 178 176 bool "Set maximum low memory"
+8
arch/microblaze/include/asm/fixmap.h
··· 21 21 #ifndef __ASSEMBLY__ 22 22 #include <linux/kernel.h> 23 23 #include <asm/page.h> 24 + #ifdef CONFIG_HIGHMEM 25 + #include <linux/threads.h> 26 + #include <asm/kmap_types.h> 27 + #endif 24 28 25 29 #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) 26 30 ··· 48 44 */ 49 45 enum fixed_addresses { 50 46 FIX_HOLE, 47 + #ifdef CONFIG_HIGHMEM 48 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 49 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, 50 + #endif 51 51 __end_of_fixed_addresses 52 52 }; 53 53
+96
arch/microblaze/include/asm/highmem.h
··· 1 + /* 2 + * highmem.h: virtual kernel memory mappings for high memory 3 + * 4 + * Used in CONFIG_HIGHMEM systems for memory pages which 5 + * are not addressable by direct kernel virtual addresses. 6 + * 7 + * Copyright (C) 1999 Gerhard Wichert, Siemens AG 8 + * Gerhard.Wichert@pdb.siemens.de 9 + * 10 + * 11 + * Redesigned the x86 32-bit VM architecture to deal with 12 + * up to 16 Terabyte physical memory. With current x86 CPUs 13 + * we now support up to 64 Gigabytes physical RAM. 14 + * 15 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 16 + */ 17 + #ifndef _ASM_HIGHMEM_H 18 + #define _ASM_HIGHMEM_H 19 + 20 + #ifdef __KERNEL__ 21 + 22 + #include <linux/init.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/uaccess.h> 25 + #include <asm/fixmap.h> 26 + 27 + extern pte_t *kmap_pte; 28 + extern pgprot_t kmap_prot; 29 + extern pte_t *pkmap_page_table; 30 + 31 + /* 32 + * Right now we initialize only a single pte table. It can be extended 33 + * easily, subsequent pte tables have to be allocated in one physical 34 + * chunk of RAM. 35 + */ 36 + /* 37 + * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte 38 + * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP 39 + * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP 40 + * in case of 16K/64K/256K page sizes. 41 + */ 42 + 43 + #define PKMAP_ORDER PTE_SHIFT 44 + #define LAST_PKMAP (1 << PKMAP_ORDER) 45 + 46 + #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 47 + & PMD_MASK) 48 + 49 + #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 50 + #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) 51 + #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 52 + 53 + extern void *kmap_high(struct page *page); 54 + extern void kunmap_high(struct page *page); 55 + extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 56 + extern void __kunmap_atomic(void *kvaddr); 57 + 58 + static inline void *kmap(struct page *page) 59 + { 60 + might_sleep(); 61 + if (!PageHighMem(page)) 62 + return page_address(page); 63 + return kmap_high(page); 64 + } 65 + 66 + static inline void kunmap(struct page *page) 67 + { 68 + BUG_ON(in_interrupt()); 69 + if (!PageHighMem(page)) 70 + return; 71 + kunmap_high(page); 72 + } 73 + 74 + static inline void *__kmap_atomic(struct page *page) 75 + { 76 + return kmap_atomic_prot(page, kmap_prot); 77 + } 78 + 79 + static inline struct page *kmap_atomic_to_page(void *ptr) 80 + { 81 + unsigned long idx, vaddr = (unsigned long) ptr; 82 + pte_t *pte; 83 + 84 + if (vaddr < FIXADDR_START) 85 + return virt_to_page(ptr); 86 + 87 + idx = virt_to_fix(vaddr); 88 + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 89 + return pte_page(*pte); 90 + } 91 + 92 + #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } 93 + 94 + #endif /* __KERNEL__ */ 95 + 96 + #endif /* _ASM_HIGHMEM_H */
+1
arch/microblaze/mm/Makefile
··· 5 5 obj-y := consistent.o init.o 6 6 7 7 obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 8 + obj-$(CONFIG_HIGHMEM) += highmem.o
+88
arch/microblaze/mm/highmem.c
··· 1 + /* 2 + * highmem.c: virtual kernel memory mappings for high memory 3 + * 4 + * PowerPC version, stolen from the i386 version. 5 + * 6 + * Used in CONFIG_HIGHMEM systems for memory pages which 7 + * are not addressable by direct kernel virtual addresses. 8 + * 9 + * Copyright (C) 1999 Gerhard Wichert, Siemens AG 10 + * Gerhard.Wichert@pdb.siemens.de 11 + * 12 + * 13 + * Redesigned the x86 32-bit VM architecture to deal with 14 + * up to 16 Terrabyte physical memory. With current x86 CPUs 15 + * we now support up to 64 Gigabytes physical RAM. 16 + * 17 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 18 + * 19 + * Reworked for PowerPC by various contributors. Moved from 20 + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. 21 + */ 22 + 23 + #include <linux/highmem.h> 24 + #include <linux/module.h> 25 + 26 + /* 27 + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap 28 + * gives a more generic (and caching) interface. But kmap_atomic can 29 + * be used in IRQ contexts, so in some (very limited) cases we need 30 + * it. 31 + */ 32 + #include <asm/tlbflush.h> 33 + 34 + void *kmap_atomic_prot(struct page *page, pgprot_t prot) 35 + { 36 + 37 + unsigned long vaddr; 38 + int idx, type; 39 + 40 + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 41 + pagefault_disable(); 42 + if (!PageHighMem(page)) 43 + return page_address(page); 44 + 45 + 46 + type = kmap_atomic_idx_push(); 47 + idx = type + KM_TYPE_NR*smp_processor_id(); 48 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 49 + #ifdef CONFIG_DEBUG_HIGHMEM 50 + BUG_ON(!pte_none(*(kmap_pte-idx))); 51 + #endif 52 + set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 53 + local_flush_tlb_page(NULL, vaddr); 54 + 55 + return (void *) vaddr; 56 + } 57 + EXPORT_SYMBOL(kmap_atomic_prot); 58 + 59 + void __kunmap_atomic(void *kvaddr) 60 + { 61 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 62 + int type; 63 + 64 + if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 65 + pagefault_enable(); 66 + return; 67 + } 68 + 69 + type = kmap_atomic_idx(); 70 + #ifdef CONFIG_DEBUG_HIGHMEM 71 + { 72 + unsigned int idx; 73 + 74 + idx = type + KM_TYPE_NR * smp_processor_id(); 75 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 76 + 77 + /* 78 + * force other mappings to Oops if they'll try to access 79 + * this pte without first remap it 80 + */ 81 + pte_clear(&init_mm, vaddr, kmap_pte-idx); 82 + local_flush_tlb_page(NULL, vaddr); 83 + } 84 + #endif 85 + kmap_atomic_idx_pop(); 86 + pagefault_enable(); 87 + } 88 + EXPORT_SYMBOL(__kunmap_atomic);
+68
arch/microblaze/mm/init.c
··· 49 49 EXPORT_SYMBOL(memory_size); 50 50 unsigned long lowmem_size; 51 51 52 + #ifdef CONFIG_HIGHMEM 53 + pte_t *kmap_pte; 54 + EXPORT_SYMBOL(kmap_pte); 55 + pgprot_t kmap_prot; 56 + EXPORT_SYMBOL(kmap_prot); 57 + 58 + static inline pte_t *virt_to_kpte(unsigned long vaddr) 59 + { 60 + return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), 61 + vaddr), vaddr); 62 + } 63 + 64 + static void __init highmem_init(void) 65 + { 66 + pr_debug("%x\n", (u32)PKMAP_BASE); 67 + map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 68 + pkmap_page_table = virt_to_kpte(PKMAP_BASE); 69 + 70 + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 71 + kmap_prot = PAGE_KERNEL; 72 + } 73 + 74 + static unsigned long highmem_setup(void) 75 + { 76 + unsigned long pfn; 77 + unsigned long reservedpages = 0; 78 + 79 + for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { 80 + struct page *page = pfn_to_page(pfn); 81 + 82 + /* FIXME not sure about */ 83 + if (memblock_is_reserved(pfn << PAGE_SHIFT)) 84 + continue; 85 + ClearPageReserved(page); 86 + init_page_count(page); 87 + __free_page(page); 88 + totalhigh_pages++; 89 + reservedpages++; 90 + } 91 + totalram_pages += totalhigh_pages; 92 + printk(KERN_INFO "High memory: %luk\n", 93 + totalhigh_pages << (PAGE_SHIFT-10)); 94 + 95 + return reservedpages; 96 + } 97 + #endif /* CONFIG_HIGHMEM */ 98 + 52 99 /* 53 100 * paging_init() sets up the page tables - in fact we've already done this. 54 101 */ ··· 113 66 /* Clean every zones */ 114 67 memset(zones_size, 0, sizeof(zones_size)); 115 68 69 + #ifdef CONFIG_HIGHMEM 70 + highmem_init(); 71 + 72 + zones_size[ZONE_DMA] = max_low_pfn; 73 + zones_size[ZONE_HIGHMEM] = max_pfn; 74 + #else 116 75 zones_size[ZONE_DMA] = max_pfn; 76 + #endif 117 77 118 78 /* We don't have holes in memory map */ 119 79 free_area_init_nodes(zones_size); ··· 295 241 } 296 242 } 297 243 244 + #ifdef CONFIG_HIGHMEM 245 + reservedpages -= highmem_setup(); 246 + #endif 247 + 298 248 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 299 249 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 300 250 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; ··· 317 259 #ifdef CONFIG_MMU 318 260 pr_info("Kernel virtual memory layout:\n"); 319 261 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 262 + #ifdef CONFIG_HIGHMEM 263 + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 264 + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 265 + #endif /* CONFIG_HIGHMEM */ 320 266 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 321 267 ioremap_bot, ioremap_base); 322 268 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", ··· 408 346 409 347 if (lowmem_size > CONFIG_LOWMEM_SIZE) { 410 348 lowmem_size = CONFIG_LOWMEM_SIZE; 349 + #ifndef CONFIG_HIGHMEM 411 350 memory_size = lowmem_size; 351 + #endif 412 352 } 413 353 414 354 mm_cmdline_setup(); /* FIXME parse args from command line - not used */ ··· 439 375 mapin_ram(); 440 376 441 377 /* Extend vmalloc and ioremap area as big as possible */ 378 + #ifdef CONFIG_HIGHMEM 379 + ioremap_base = ioremap_bot = PKMAP_BASE; 380 + #else 442 381 ioremap_base = ioremap_bot = FIXADDR_START; 382 + #endif 443 383 444 384 /* Initialize the context management stuff */ 445 385 mmu_context_init();