···159159 The feature requires the design to define the RAM memory controller160160 window to be twice as large as the actual physical memory.161161162162-config HIGHMEM_START_BOOL163163- bool "Set high memory pool address"164164- depends on ADVANCED_OPTIONS && HIGHMEM165165- help166166- This option allows you to set the base address of the kernel virtual167167- area used to map high memory pages. This can be useful in168168- optimizing the layout of kernel virtual memory.169169-170170- Say N here unless you know what you are doing.171171-172172-config HIGHMEM_START173173- hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL162162+config HIGHMEM163163+ bool "High memory support"174164 depends on MMU175175- default "0xfe000000"165165+ help166166+ The address space of Microblaze processors is only 4 Gigabytes large167167+ and it has to accommodate user address space, kernel address168168+ space as well as some memory mapped IO. That means that, if you169169+ have a large amount of physical memory and/or IO, not all of the170170+ memory can be "permanently mapped" by the kernel. The physical171171+ memory that is not permanently mapped is called "high memory".172172+173173+ If unsure, say n.176174177175config LOWMEM_SIZE_BOOL178176 bool "Set maximum low memory"
···11+/*22+ * highmem.h: virtual kernel memory mappings for high memory33+ *44+ * Used in CONFIG_HIGHMEM systems for memory pages which55+ * are not addressable by direct kernel virtual addresses.66+ *77+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG88+ * Gerhard.Wichert@pdb.siemens.de99+ *1010+ *1111+ * Redesigned the x86 32-bit VM architecture to deal with1212+ * up to 16 Terabyte physical memory. With current x86 CPUs1313+ * we now support up to 64 Gigabytes physical RAM.1414+ *1515+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>1616+ */1717+#ifndef _ASM_HIGHMEM_H1818+#define _ASM_HIGHMEM_H1919+2020+#ifdef __KERNEL__2121+2222+#include <linux/init.h>2323+#include <linux/interrupt.h>2424+#include <linux/uaccess.h>2525+#include <asm/fixmap.h>2626+2727+extern pte_t *kmap_pte;2828+extern pgprot_t kmap_prot;2929+extern pte_t *pkmap_page_table;3030+3131+/*3232+ * Right now we initialize only a single pte table. It can be extended3333+ * easily, subsequent pte tables have to be allocated in one physical3434+ * chunk of RAM.3535+ */3636+/*3737+ * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte3838+ * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP3939+ * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP4040+ * in case of 16K/64K/256K page sizes.4141+ */4242+4343+#define PKMAP_ORDER PTE_SHIFT4444+#define LAST_PKMAP (1 << PKMAP_ORDER)4545+4646+#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \4747+ & PMD_MASK)4848+4949+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)5050+#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)5151+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))5252+5353+extern void *kmap_high(struct page *page);5454+extern void kunmap_high(struct page *page);5555+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);5656+extern void __kunmap_atomic(void *kvaddr);5757+5858+static inline void *kmap(struct page *page)5959+{6060+ might_sleep();6161+ if (!PageHighMem(page))6262+ return page_address(page);6363+ return kmap_high(page);6464+}6565+6666+static inline void kunmap(struct page *page)6767+{6868+ BUG_ON(in_interrupt());6969+ if (!PageHighMem(page))7070+ return;7171+ kunmap_high(page);7272+}7373+7474+static inline void *__kmap_atomic(struct page *page)7575+{7676+ return kmap_atomic_prot(page, kmap_prot);7777+}7878+7979+static inline struct page *kmap_atomic_to_page(void *ptr)8080+{8181+ unsigned long idx, vaddr = (unsigned long) ptr;8282+ pte_t *pte;8383+8484+ if (vaddr < FIXADDR_START)8585+ return virt_to_page(ptr);8686+8787+ idx = virt_to_fix(vaddr);8888+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);8989+ return pte_page(*pte);9090+}9191+9292+#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }9393+9494+#endif /* __KERNEL__ */9595+9696+#endif /* _ASM_HIGHMEM_H */
···11+/*22+ * highmem.c: virtual kernel memory mappings for high memory33+ *44+ * PowerPC version, stolen from the i386 version.55+ *66+ * Used in CONFIG_HIGHMEM systems for memory pages which77+ * are not addressable by direct kernel virtual addresses.88+ *99+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG1010+ * Gerhard.Wichert@pdb.siemens.de1111+ *1212+ *1313+ * Redesigned the x86 32-bit VM architecture to deal with1414+ * up to 16 Terrabyte physical memory. With current x86 CPUs1515+ * we now support up to 64 Gigabytes physical RAM.1616+ *1717+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>1818+ *1919+ * Reworked for PowerPC by various contributors. Moved from2020+ * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.2121+ */2222+2323+#include <linux/highmem.h>2424+#include <linux/module.h>2525+2626+/*2727+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap2828+ * gives a more generic (and caching) interface. But kmap_atomic can2929+ * be used in IRQ contexts, so in some (very limited) cases we need3030+ * it.3131+ */3232+#include <asm/tlbflush.h>3333+3434+void *kmap_atomic_prot(struct page *page, pgprot_t prot)3535+{3636+3737+ unsigned long vaddr;3838+ int idx, type;3939+4040+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */4141+ pagefault_disable();4242+ if (!PageHighMem(page))4343+ return page_address(page);4444+4545+4646+ type = kmap_atomic_idx_push();4747+ idx = type + KM_TYPE_NR*smp_processor_id();4848+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);4949+#ifdef CONFIG_DEBUG_HIGHMEM5050+ BUG_ON(!pte_none(*(kmap_pte-idx)));5151+#endif5252+ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));5353+ local_flush_tlb_page(NULL, vaddr);5454+5555+ return (void *) vaddr;5656+}5757+EXPORT_SYMBOL(kmap_atomic_prot);5858+5959+void __kunmap_atomic(void *kvaddr)6060+{6161+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;6262+ int type;6363+6464+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {6565+ pagefault_enable();6666+ return;6767+ }6868+6969+ type = kmap_atomic_idx();7070+#ifdef CONFIG_DEBUG_HIGHMEM7171+ {7272+ unsigned int idx;7373+7474+ idx = type + KM_TYPE_NR * smp_processor_id();7575+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));7676+7777+ /*7878+ * force other mappings to Oops if they'll try to access7979+ * this pte without first remap it8080+ */8181+ pte_clear(&init_mm, vaddr, kmap_pte-idx);8282+ local_flush_tlb_page(NULL, vaddr);8383+ }8484+#endif8585+ kmap_atomic_idx_pop();8686+ pagefault_enable();8787+}8888+EXPORT_SYMBOL(__kunmap_atomic);
+68
arch/microblaze/mm/init.c
···4949EXPORT_SYMBOL(memory_size);5050unsigned long lowmem_size;51515252+#ifdef CONFIG_HIGHMEM5353+pte_t *kmap_pte;5454+EXPORT_SYMBOL(kmap_pte);5555+pgprot_t kmap_prot;5656+EXPORT_SYMBOL(kmap_prot);5757+5858+static inline pte_t *virt_to_kpte(unsigned long vaddr)5959+{6060+ return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),6161+ vaddr), vaddr);6262+}6363+6464+static void __init highmem_init(void)6565+{6666+ pr_debug("%x\n", (u32)PKMAP_BASE);6767+ map_page(PKMAP_BASE, 0, 0); /* XXX gross */6868+ pkmap_page_table = virt_to_kpte(PKMAP_BASE);6969+7070+ kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));7171+ kmap_prot = PAGE_KERNEL;7272+}7373+7474+static unsigned long highmem_setup(void)7575+{7676+ unsigned long pfn;7777+ unsigned long reservedpages = 0;7878+7979+ for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {8080+ struct page *page = pfn_to_page(pfn);8181+8282+ /* FIXME not sure about */8383+ if (memblock_is_reserved(pfn << PAGE_SHIFT))8484+ continue;8585+ ClearPageReserved(page);8686+ init_page_count(page);8787+ __free_page(page);8888+ totalhigh_pages++;8989+ reservedpages++;9090+ }9191+ totalram_pages += totalhigh_pages;9292+ printk(KERN_INFO "High memory: %luk\n",9393+ totalhigh_pages << (PAGE_SHIFT-10));9494+9595+ return reservedpages;9696+}9797+#endif /* CONFIG_HIGHMEM */9898+5299/*53100 * paging_init() sets up the page tables - in fact we've already done this.54101 */···11366 /* Clean every zones */11467 memset(zones_size, 0, sizeof(zones_size));115686969+#ifdef CONFIG_HIGHMEM7070+ highmem_init();7171+7272+ zones_size[ZONE_DMA] = max_low_pfn;7373+ zones_size[ZONE_HIGHMEM] = max_pfn;7474+#else11675 zones_size[ZONE_DMA] = max_pfn;7676+#endif1177711878 /* We don't have holes in memory map */11979 free_area_init_nodes(zones_size);···295241 }296242 }297243244244+#ifdef CONFIG_HIGHMEM245245+ reservedpages -= highmem_setup();246246+#endif247247+298248 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;299249 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;300250 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;···317259#ifdef CONFIG_MMU318260 pr_info("Kernel virtual memory layout:\n");319261 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);262262+#ifdef CONFIG_HIGHMEM263263+ pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",264264+ PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));265265+#endif /* CONFIG_HIGHMEM */320266 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",321267 ioremap_bot, ioremap_base);322268 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",···408346409347 if (lowmem_size > CONFIG_LOWMEM_SIZE) {410348 lowmem_size = CONFIG_LOWMEM_SIZE;349349+#ifndef CONFIG_HIGHMEM411350 memory_size = lowmem_size;351351+#endif412352 }413353414354 mm_cmdline_setup(); /* FIXME parse args from command line - not used */···439375 mapin_ram();440376441377 /* Extend vmalloc and ioremap area as big as possible */378378+#ifdef CONFIG_HIGHMEM379379+ ioremap_base = ioremap_bot = PKMAP_BASE;380380+#else442381 ioremap_base = ioremap_bot = FIXADDR_START;382382+#endif443383444384 /* Initialize the context management stuff */445385 mmu_context_init();