···11+/*22+ * linux/arch/unicore32/include/asm/cache.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __UNICORE_CACHE_H__1313+#define __UNICORE_CACHE_H__1414+1515+#define L1_CACHE_SHIFT (5)1616+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)1717+1818+/*1919+ * Memory returned by kmalloc() may be used for DMA, so we must make2020+ * sure that all such allocations are cache aligned. Otherwise,2121+ * unrelated code may cause parts of the buffer to be read into the2222+ * cache before the transfer is done, causing old data to be seen by2323+ * the CPU.2424+ */2525+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES2626+2727+#endif
+46
arch/unicore32/include/asm/memblock.h
···11+/*22+ * linux/arch/unicore32/include/asm/memblock.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+1313+#ifndef __UNICORE_MEMBLOCK_H__1414+#define __UNICORE_MEMBLOCK_H__1515+1616+/*1717+ * Memory map description1818+ */1919+# define NR_BANKS 82020+2121+struct membank {2222+ unsigned long start;2323+ unsigned long size;2424+ unsigned int highmem;2525+};2626+2727+struct meminfo {2828+ int nr_banks;2929+ struct membank bank[NR_BANKS];3030+};3131+3232+extern struct meminfo meminfo;3333+3434+#define for_each_bank(iter, mi) \3535+ for (iter = 0; iter < (mi)->nr_banks; iter++)3636+3737+#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)3838+#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)3939+#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)4040+#define bank_phys_start(bank) ((bank)->start)4141+#define bank_phys_end(bank) ((bank)->start + (bank)->size)4242+#define bank_phys_size(bank) ((bank)->size)4343+4444+extern void uc32_memblock_init(struct meminfo *);4545+4646+#endif
+123
arch/unicore32/include/asm/memory.h
···11+/*22+ * linux/arch/unicore32/include/asm/memory.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ * Note: this file should not be included by non-asm/.h files1313+ */1414+#ifndef __UNICORE_MEMORY_H__1515+#define __UNICORE_MEMORY_H__1616+1717+#include <linux/compiler.h>1818+#include <linux/const.h>1919+#include <asm/sizes.h>2020+#include <mach/memory.h>2121+2222+/*2323+ * Allow for constants defined here to be used from assembly code2424+ * by prepending the UL suffix only with actual C code compilation.2525+ */2626+#define UL(x) _AC(x, UL)2727+2828+/*2929+ * PAGE_OFFSET - the virtual address of the start of the kernel image3030+ * TASK_SIZE - the maximum size of a user space task.3131+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area3232+ */3333+#define PAGE_OFFSET UL(0xC0000000)3434+#define TASK_SIZE (PAGE_OFFSET - UL(0x41000000))3535+#define TASK_UNMAPPED_BASE (PAGE_OFFSET / 3)3636+3737+/*3838+ * The module space lives between the addresses given by TASK_SIZE3939+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.4040+ */4141+#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)4242+#if TASK_SIZE > MODULES_VADDR4343+#error Top of user space clashes with start of module space4444+#endif4545+4646+#define MODULES_END (PAGE_OFFSET)4747+4848+/*4949+ * Allow 16MB-aligned ioremap pages5050+ */5151+#define IOREMAP_MAX_ORDER 245252+5353+/*5454+ * Physical vs virtual RAM address space conversion. These are5555+ * private definitions which should NOT be used outside memory.h5656+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.5757+ */5858+#ifndef __virt_to_phys5959+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)6060+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)6161+#endif6262+6363+/*6464+ * Convert a physical address to a Page Frame Number and back6565+ */6666+#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)6767+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)6868+6969+/*7070+ * Convert a page to/from a physical address7171+ */7272+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))7373+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))7474+7575+#ifndef __ASSEMBLY__7676+7777+#ifndef arch_adjust_zones7878+#define arch_adjust_zones(size, holes) do { } while (0)7979+#endif8080+8181+/*8282+ * PFNs are used to describe any physical page; this means8383+ * PFN 0 == physical address 0.8484+ *8585+ * This is the PFN of the first RAM page in the kernel8686+ * direct-mapped view. We assume this is the first page8787+ * of RAM in the mem_map as well.8888+ */8989+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)9090+9191+/*9292+ * Drivers should NOT use these either.9393+ */9494+#define __pa(x) __virt_to_phys((unsigned long)(x))9595+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))9696+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)9797+9898+/*9999+ * Conversion between a struct page and a physical address.100100+ *101101+ * Note: when converting an unknown physical address to a102102+ * struct page, the resulting pointer must be validated103103+ * using VALID_PAGE(). It must return an invalid struct page104104+ * for any physical address not corresponding to a system105105+ * RAM address.106106+ *107107+ * page_to_pfn(page) convert a struct page * to a PFN number108108+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *109109+ *110110+ * virt_to_page(k) convert a _valid_ virtual address to struct page *111111+ * virt_addr_valid(k) indicates whether a virtual address is valid112112+ */113113+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET114114+115115+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)116116+#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && \117117+ (unsigned long)(kaddr) < (unsigned long)high_memory)118118+119119+#endif120120+121121+#include <asm-generic/memory_model.h>122122+123123+#endif
+80
arch/unicore32/include/asm/page.h
···11+/*22+ * linux/arch/unicore32/include/asm/page.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __UNICORE_PAGE_H__1313+#define __UNICORE_PAGE_H__1414+1515+/* PAGE_SHIFT determines the page size */1616+#define PAGE_SHIFT 121717+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)1818+#define PAGE_MASK (~(PAGE_SIZE-1))1919+2020+#ifndef __ASSEMBLY__2121+2222+struct page;2323+struct vm_area_struct;2424+2525+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)2626+extern void copy_page(void *to, const void *from);2727+2828+#define clear_user_page(page, vaddr, pg) clear_page(page)2929+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)3030+3131+#undef STRICT_MM_TYPECHECKS3232+3333+#ifdef STRICT_MM_TYPECHECKS3434+/*3535+ * These are used to make use of C type-checking..3636+ */3737+typedef struct { unsigned long pte; } pte_t;3838+typedef struct { unsigned long pgd; } pgd_t;3939+typedef struct { unsigned long pgprot; } pgprot_t;4040+4141+#define pte_val(x) ((x).pte)4242+#define pgd_val(x) ((x).pgd)4343+#define pgprot_val(x) ((x).pgprot)4444+4545+#define __pte(x) ((pte_t) { (x) })4646+#define __pgd(x) ((pgd_t) { (x) })4747+#define __pgprot(x) ((pgprot_t) { (x) })4848+4949+#else5050+/*5151+ * .. while these make it easier on the compiler5252+ */5353+typedef unsigned long pte_t;5454+typedef unsigned long pgd_t;5555+typedef unsigned long pgprot_t;5656+5757+#define pte_val(x) (x)5858+#define pgd_val(x) (x)5959+#define pgprot_val(x) (x)6060+6161+#define __pte(x) (x)6262+#define __pgd(x) (x)6363+#define __pgprot(x) (x)6464+6565+#endif /* STRICT_MM_TYPECHECKS */6666+6767+typedef struct page *pgtable_t;6868+6969+extern int pfn_valid(unsigned long);7070+7171+#include <asm/memory.h>7272+7373+#endif /* !__ASSEMBLY__ */7474+7575+#define VM_DATA_DEFAULT_FLAGS \7676+ (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)7777+7878+#include <asm-generic/getorder.h>7979+8080+#endif
+98
arch/unicore32/include/asm/tlb.h
···11+/*22+ * linux/arch/unicore32/include/asm/tlb.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __UNICORE_TLB_H__1313+#define __UNICORE_TLB_H__1414+1515+#include <asm/cacheflush.h>1616+#include <asm/tlbflush.h>1717+#include <asm/pgalloc.h>1818+1919+/*2020+ * TLB handling. This allows us to remove pages from the page2121+ * tables, and efficiently handle the TLB issues.2222+ */2323+struct mmu_gather {2424+ struct mm_struct *mm;2525+ unsigned int fullmm;2626+ unsigned long range_start;2727+ unsigned long range_end;2828+};2929+3030+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);3131+3232+static inline struct mmu_gather *3333+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)3434+{3535+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);3636+3737+ tlb->mm = mm;3838+ tlb->fullmm = full_mm_flush;3939+4040+ return tlb;4141+}4242+4343+static inline void4444+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)4545+{4646+ if (tlb->fullmm)4747+ flush_tlb_mm(tlb->mm);4848+4949+ /* keep the page table cache within bounds */5050+ check_pgt_cache();5151+5252+ put_cpu_var(mmu_gathers);5353+}5454+5555+/*5656+ * Memorize the range for the TLB flush.5757+ */5858+static inline void5959+tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)6060+{6161+ if (!tlb->fullmm) {6262+ if (addr < tlb->range_start)6363+ tlb->range_start = addr;6464+ if (addr + PAGE_SIZE > tlb->range_end)6565+ tlb->range_end = addr + PAGE_SIZE;6666+ }6767+}6868+6969+/*7070+ * In the case of tlb vma handling, we can optimise these away in the7171+ * case where we're doing a full MM flush. When we're doing a munmap,7272+ * the vmas are adjusted to only cover the region to be torn down.7373+ */7474+static inline void7575+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)7676+{7777+ if (!tlb->fullmm) {7878+ flush_cache_range(vma, vma->vm_start, vma->vm_end);7979+ tlb->range_start = TASK_SIZE;8080+ tlb->range_end = 0;8181+ }8282+}8383+8484+static inline void8585+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)8686+{8787+ if (!tlb->fullmm && tlb->range_end > 0)8888+ flush_tlb_range(vma, tlb->range_start, tlb->range_end);8989+}9090+9191+#define tlb_remove_page(tlb, page) free_page_and_swap_cache(page)9292+#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)9393+#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)9494+#define pud_free_tlb(tlb, x, addr) do { } while (0)9595+9696+#define tlb_migrate_finish(mm) do { } while (0)9797+9898+#endif
+20
arch/unicore32/include/mach/map.h
···11+/*22+ * linux/arch/unicore32/include/mach/map.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ * Page table mapping constructs and function prototypes1313+ */1414+#define MT_DEVICE 01515+#define MT_DEVICE_CACHED 21616+#define MT_KUSER 71717+#define MT_HIGH_VECTORS 81818+#define MT_MEMORY 91919+#define MT_ROM 102020+
+58
arch/unicore32/include/mach/memory.h
···11+/*22+ * linux/arch/unicore32/include/mach/memory.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+#ifndef __MACH_PUV3_MEMORY_H__1313+#define __MACH_PUV3_MEMORY_H__1414+1515+#include <mach/hardware.h>1616+1717+/* Physical DRAM offset. */1818+#define PHYS_OFFSET UL(0x00000000)1919+/* The base address of exception vectors. */2020+#define VECTORS_BASE UL(0xffff0000)2121+/* The base address of kuser area. */2222+#define KUSER_BASE UL(0x80000000)2323+2424+#ifdef __ASSEMBLY__2525+/* The byte offset of the kernel image in RAM from the start of RAM. */2626+#define KERNEL_IMAGE_START 0x004080002727+#endif2828+2929+#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)3030+3131+void puv3_pci_adjust_zones(unsigned long *size, unsigned long *holes);3232+3333+#define arch_adjust_zones(size, holes) \3434+ puv3_pci_adjust_zones(size, holes)3535+3636+#endif3737+3838+/*3939+ * PCI controller in PKUnity-3 masks highest 5-bit for upstream channel,4040+ * so we must limit the DMA allocation within 128M physical memory for4141+ * supporting PCI devices.4242+ */4343+#define PCI_DMA_THRESHOLD (PHYS_OFFSET + SZ_128M - 1)4444+4545+#define is_pcibus_device(dev) (dev && \4646+ (strncmp(dev->bus->name, "pci", 3) == 0))4747+4848+#define __virt_to_pcibus(x) (__virt_to_phys(x) + PKUNITY_PCIAHB_BASE)4949+#define __pcibus_to_virt(x) __phys_to_virt((x) - PKUNITY_PCIAHB_BASE)5050+5151+/* kuser area */5252+#define KUSER_VECPAGE_BASE (KUSER_BASE + UL(0x3fff0000))5353+#define KUSER_UNIGFX_BASE (KUSER_BASE + PKUNITY_UNIGFX_MMAP_BASE)5454+/* kuser_vecpage (0xbfff0000) is ro, and vectors page (0xffff0000) is rw */5555+#define kuser_vecpage_to_vectors(x) ((x) - (KUSER_VECPAGE_BASE) \5656+ + (VECTORS_BASE))5757+5858+#endif
+50
arch/unicore32/mm/Kconfig
···11+comment "Processor Type"22+33+# Select CPU types depending on the architecture selected. This selects44+# which CPUs we support in the kernel image, and the compiler instruction55+# optimiser behaviour.66+77+config CPU_UCV288+ def_bool y99+1010+comment "Processor Features"1111+1212+config CPU_ICACHE_DISABLE1313+ bool "Disable I-Cache (I-bit)"1414+ help1515+ Say Y here to disable the processor instruction cache. Unless1616+ you have a reason not to or are unsure, say N.1717+1818+config CPU_DCACHE_DISABLE1919+ bool "Disable D-Cache (D-bit)"2020+ help2121+ Say Y here to disable the processor data cache. Unless2222+ you have a reason not to or are unsure, say N.2323+2424+config CPU_DCACHE_WRITETHROUGH2525+ bool "Force write through D-cache"2626+ help2727+ Say Y here to use the data cache in writethrough mode. Unless you2828+ specifically require this or are unsure, say N.2929+3030+config CPU_DCACHE_LINE_DISABLE3131+ bool "Disable D-cache line ops"3232+ default y3333+ help3434+ Say Y here to disable the data cache line operations.3535+3636+config CPU_TLB_SINGLE_ENTRY_DISABLE3737+ bool "Disable TLB single entry ops"3838+ default y3939+ help4040+ Say Y here to disable the TLB single entry operations.4141+4242+config SWIOTLB4343+ def_bool y4444+4545+config IOMMU_HELPER4646+ def_bool SWIOTLB4747+4848+config NEED_SG_DMA_LENGTH4949+ def_bool SWIOTLB5050+
+15
arch/unicore32/mm/Makefile
···11+#22+# Makefile for the linux unicore-specific parts of the memory manager.33+#44+55+obj-y := extable.o fault.o init.o pgd.o mmu.o66+obj-y += iomap.o flush.o ioremap.o77+88+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o99+1010+obj-$(CONFIG_MODULES) += proc-syms.o1111+1212+obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o1313+1414+obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o1515+
+517
arch/unicore32/mm/init.c
···11+/*22+ * linux/arch/unicore32/mm/init.c33+ *44+ * Copyright (C) 2010 GUAN Xue-tao55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 as88+ * published by the Free Software Foundation.99+ */1010+#include <linux/kernel.h>1111+#include <linux/errno.h>1212+#include <linux/swap.h>1313+#include <linux/init.h>1414+#include <linux/bootmem.h>1515+#include <linux/mman.h>1616+#include <linux/nodemask.h>1717+#include <linux/initrd.h>1818+#include <linux/highmem.h>1919+#include <linux/gfp.h>2020+#include <linux/memblock.h>2121+#include <linux/sort.h>2222+#include <linux/dma-mapping.h>2323+2424+#include <asm/sections.h>2525+#include <asm/setup.h>2626+#include <asm/sizes.h>2727+#include <asm/tlb.h>2828+#include <mach/map.h>2929+3030+#include "mm.h"3131+3232+static unsigned long phys_initrd_start __initdata = 0x01000000;3333+static unsigned long phys_initrd_size __initdata = SZ_8M;3434+3535+static int __init early_initrd(char *p)3636+{3737+ unsigned long start, size;3838+ char *endp;3939+4040+ start = memparse(p, &endp);4141+ if (*endp == ',') {4242+ size = memparse(endp + 1, NULL);4343+4444+ phys_initrd_start = start;4545+ phys_initrd_size = size;4646+ }4747+ return 0;4848+}4949+early_param("initrd", early_initrd);5050+5151+/*5252+ * This keeps memory configuration data used by a couple memory5353+ * initialization functions, as well as show_mem() for the skipping5454+ * of holes in the memory map. It is populated by uc32_add_memory().5555+ */5656+struct meminfo meminfo;5757+5858+void show_mem(void)5959+{6060+ int free = 0, total = 0, reserved = 0;6161+ int shared = 0, cached = 0, slab = 0, i;6262+ struct meminfo *mi = &meminfo;6363+6464+ printk(KERN_DEFAULT "Mem-info:\n");6565+ show_free_areas();6666+6767+ for_each_bank(i, mi) {6868+ struct membank *bank = &mi->bank[i];6969+ unsigned int pfn1, pfn2;7070+ struct page *page, *end;7171+7272+ pfn1 = bank_pfn_start(bank);7373+ pfn2 = bank_pfn_end(bank);7474+7575+ page = pfn_to_page(pfn1);7676+ end = pfn_to_page(pfn2 - 1) + 1;7777+7878+ do {7979+ total++;8080+ if (PageReserved(page))8181+ reserved++;8282+ else if (PageSwapCache(page))8383+ cached++;8484+ else if (PageSlab(page))8585+ slab++;8686+ else if (!page_count(page))8787+ free++;8888+ else8989+ shared += page_count(page) - 1;9090+ page++;9191+ } while (page < end);9292+ }9393+9494+ printk(KERN_DEFAULT "%d pages of RAM\n", total);9595+ printk(KERN_DEFAULT "%d free pages\n", free);9696+ printk(KERN_DEFAULT "%d reserved pages\n", reserved);9797+ printk(KERN_DEFAULT "%d slab pages\n", slab);9898+ printk(KERN_DEFAULT "%d pages shared\n", shared);9999+ printk(KERN_DEFAULT "%d pages swap cached\n", cached);100100+}101101+102102+static void __init find_limits(unsigned long *min, unsigned long *max_low,103103+ unsigned long *max_high)104104+{105105+ struct meminfo *mi = &meminfo;106106+ int i;107107+108108+ *min = -1UL;109109+ *max_low = *max_high = 0;110110+111111+ for_each_bank(i, mi) {112112+ struct membank *bank = &mi->bank[i];113113+ unsigned long start, end;114114+115115+ start = bank_pfn_start(bank);116116+ end = bank_pfn_end(bank);117117+118118+ if (*min > start)119119+ *min = start;120120+ if (*max_high < end)121121+ *max_high = end;122122+ if (bank->highmem)123123+ continue;124124+ if (*max_low < end)125125+ *max_low = end;126126+ }127127+}128128+129129+static void __init uc32_bootmem_init(unsigned long start_pfn,130130+ unsigned long end_pfn)131131+{132132+ struct memblock_region *reg;133133+ unsigned int boot_pages;134134+ phys_addr_t bitmap;135135+ pg_data_t *pgdat;136136+137137+ /*138138+ * Allocate the bootmem bitmap page. This must be in a region139139+ * of memory which has already been mapped.140140+ */141141+ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);142142+ bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,143143+ __pfn_to_phys(end_pfn));144144+145145+ /*146146+ * Initialise the bootmem allocator, handing the147147+ * memory banks over to bootmem.148148+ */149149+ node_set_online(0);150150+ pgdat = NODE_DATA(0);151151+ init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);152152+153153+ /* Free the lowmem regions from memblock into bootmem. */154154+ for_each_memblock(memory, reg) {155155+ unsigned long start = memblock_region_memory_base_pfn(reg);156156+ unsigned long end = memblock_region_memory_end_pfn(reg);157157+158158+ if (end >= end_pfn)159159+ end = end_pfn;160160+ if (start >= end)161161+ break;162162+163163+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);164164+ }165165+166166+ /* Reserve the lowmem memblock reserved regions in bootmem. */167167+ for_each_memblock(reserved, reg) {168168+ unsigned long start = memblock_region_reserved_base_pfn(reg);169169+ unsigned long end = memblock_region_reserved_end_pfn(reg);170170+171171+ if (end >= end_pfn)172172+ end = end_pfn;173173+ if (start >= end)174174+ break;175175+176176+ reserve_bootmem(__pfn_to_phys(start),177177+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);178178+ }179179+}180180+181181+static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,182182+ unsigned long max_high)183183+{184184+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];185185+ struct memblock_region *reg;186186+187187+ /*188188+ * initialise the zones.189189+ */190190+ memset(zone_size, 0, sizeof(zone_size));191191+192192+ /*193193+ * The memory size has already been determined. If we need194194+ * to do anything fancy with the allocation of this memory195195+ * to the zones, now is the time to do it.196196+ */197197+ zone_size[0] = max_low - min;198198+199199+ /*200200+ * Calculate the size of the holes.201201+ * holes = node_size - sum(bank_sizes)202202+ */203203+ memcpy(zhole_size, zone_size, sizeof(zhole_size));204204+ for_each_memblock(memory, reg) {205205+ unsigned long start = memblock_region_memory_base_pfn(reg);206206+ unsigned long end = memblock_region_memory_end_pfn(reg);207207+208208+ if (start < max_low) {209209+ unsigned long low_end = min(end, max_low);210210+ zhole_size[0] -= low_end - start;211211+ }212212+ }213213+214214+ /*215215+ * Adjust the sizes according to any special requirements for216216+ * this machine type.217217+ */218218+ arch_adjust_zones(zone_size, zhole_size);219219+220220+ free_area_init_node(0, zone_size, min, zhole_size);221221+}222222+223223+int pfn_valid(unsigned long pfn)224224+{225225+ return memblock_is_memory(pfn << PAGE_SHIFT);226226+}227227+EXPORT_SYMBOL(pfn_valid);228228+229229+static void uc32_memory_present(void)230230+{231231+}232232+233233+static int __init meminfo_cmp(const void *_a, const void *_b)234234+{235235+ const struct membank *a = _a, *b = _b;236236+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);237237+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;238238+}239239+240240+void __init uc32_memblock_init(struct meminfo *mi)241241+{242242+ int i;243243+244244+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),245245+ meminfo_cmp, NULL);246246+247247+ memblock_init();248248+ for (i = 0; i < mi->nr_banks; i++)249249+ memblock_add(mi->bank[i].start, mi->bank[i].size);250250+251251+ /* Register the kernel text, kernel data and initrd with memblock. */252252+ memblock_reserve(__pa(_text), _end - _text);253253+254254+#ifdef CONFIG_BLK_DEV_INITRD255255+ if (phys_initrd_size) {256256+ memblock_reserve(phys_initrd_start, phys_initrd_size);257257+258258+ /* Now convert initrd to virtual addresses */259259+ initrd_start = __phys_to_virt(phys_initrd_start);260260+ initrd_end = initrd_start + phys_initrd_size;261261+ }262262+#endif263263+264264+ uc32_mm_memblock_reserve();265265+266266+ memblock_analyze();267267+ memblock_dump_all();268268+}269269+270270+void __init bootmem_init(void)271271+{272272+ unsigned long min, max_low, max_high;273273+274274+ max_low = max_high = 0;275275+276276+ find_limits(&min, &max_low, &max_high);277277+278278+ uc32_bootmem_init(min, max_low);279279+280280+#ifdef CONFIG_SWIOTLB281281+ swiotlb_init(1);282282+#endif283283+ /*284284+ * Sparsemem tries to allocate bootmem in memory_present(),285285+ * so must be done after the fixed reservations286286+ */287287+ uc32_memory_present();288288+289289+ /*290290+ * sparse_init() needs the bootmem allocator up and running.291291+ */292292+ sparse_init();293293+294294+ /*295295+ * Now free the memory - free_area_init_node needs296296+ * the sparse mem_map arrays initialized by sparse_init()297297+ * for memmap_init_zone(), otherwise all PFNs are invalid.298298+ */299299+ uc32_bootmem_free(min, max_low, max_high);300300+301301+ high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;302302+303303+ /*304304+ * This doesn't seem to be used by the Linux memory manager any305305+ * more, but is used by ll_rw_block. If we can get rid of it, we306306+ * also get rid of some of the stuff above as well.307307+ *308308+ * Note: max_low_pfn and max_pfn reflect the number of _pages_ in309309+ * the system, not the maximum PFN.310310+ */311311+ max_low_pfn = max_low - PHYS_PFN_OFFSET;312312+ max_pfn = max_high - PHYS_PFN_OFFSET;313313+}314314+315315+static inline int free_area(unsigned long pfn, unsigned long end, char *s)316316+{317317+ unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);318318+319319+ for (; pfn < end; pfn++) {320320+ struct page *page = pfn_to_page(pfn);321321+ ClearPageReserved(page);322322+ init_page_count(page);323323+ __free_page(page);324324+ pages++;325325+ }326326+327327+ if (size && s)328328+ printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);329329+330330+ return pages;331331+}332332+333333+static inline void334334+free_memmap(unsigned long start_pfn, unsigned long end_pfn)335335+{336336+ struct page *start_pg, *end_pg;337337+ unsigned long pg, pgend;338338+339339+ /*340340+ * Convert start_pfn/end_pfn to a struct page pointer.341341+ */342342+ start_pg = pfn_to_page(start_pfn - 1) + 1;343343+ end_pg = pfn_to_page(end_pfn);344344+345345+ /*346346+ * Convert to physical addresses, and347347+ * round start upwards and end downwards.348348+ */349349+ pg = PAGE_ALIGN(__pa(start_pg));350350+ pgend = __pa(end_pg) & PAGE_MASK;351351+352352+ /*353353+ * If there are free pages between these,354354+ * free the section of the memmap array.355355+ */356356+ if (pg < pgend)357357+ free_bootmem(pg, pgend - pg);358358+}359359+360360+/*361361+ * The mem_map array can get very big. Free the unused area of the memory map.362362+ */363363+static void __init free_unused_memmap(struct meminfo *mi)364364+{365365+ unsigned long bank_start, prev_bank_end = 0;366366+ unsigned int i;367367+368368+ /*369369+ * This relies on each bank being in address order.370370+ * The banks are sorted previously in bootmem_init().371371+ */372372+ for_each_bank(i, mi) {373373+ struct membank *bank = &mi->bank[i];374374+375375+ bank_start = bank_pfn_start(bank);376376+377377+ /*378378+ * If we had a previous bank, and there is a space379379+ * between the current bank and the previous, free it.380380+ */381381+ if (prev_bank_end && prev_bank_end < bank_start)382382+ free_memmap(prev_bank_end, bank_start);383383+384384+ /*385385+ * Align up here since the VM subsystem insists that the386386+ * memmap entries are valid from the bank end aligned to387387+ * MAX_ORDER_NR_PAGES.388388+ */389389+ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);390390+ }391391+}392392+393393+/*394394+ * mem_init() marks the free areas in the mem_map and tells us how much395395+ * memory is free. This is done after various parts of the system have396396+ * claimed their memory after the kernel image.397397+ */398398+void __init mem_init(void)399399+{400400+ unsigned long reserved_pages, free_pages;401401+ struct memblock_region *reg;402402+ int i;403403+404404+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;405405+406406+ /* this will put all unused low memory onto the freelists */407407+ free_unused_memmap(&meminfo);408408+409409+ totalram_pages += free_all_bootmem();410410+411411+ reserved_pages = free_pages = 0;412412+413413+ for_each_bank(i, &meminfo) {414414+ struct membank *bank = &meminfo.bank[i];415415+ unsigned int pfn1, pfn2;416416+ struct page *page, *end;417417+418418+ pfn1 = bank_pfn_start(bank);419419+ pfn2 = bank_pfn_end(bank);420420+421421+ page = pfn_to_page(pfn1);422422+ end = pfn_to_page(pfn2 - 1) + 1;423423+424424+ do {425425+ if (PageReserved(page))426426+ reserved_pages++;427427+ else if (!page_count(page))428428+ free_pages++;429429+ page++;430430+ } while (page < end);431431+ }432432+433433+ /*434434+ * Since our memory may not be contiguous, calculate the435435+ * real number of pages we have in this system436436+ */437437+ printk(KERN_INFO "Memory:");438438+ num_physpages = 0;439439+ for_each_memblock(memory, reg) {440440+ unsigned long pages = memblock_region_memory_end_pfn(reg) -441441+ memblock_region_memory_base_pfn(reg);442442+ num_physpages += pages;443443+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));444444+ }445445+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));446446+447447+ printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",448448+ nr_free_pages() << (PAGE_SHIFT-10),449449+ free_pages << (PAGE_SHIFT-10),450450+ reserved_pages << (PAGE_SHIFT-10),451451+ totalhigh_pages << (PAGE_SHIFT-10));452452+453453+ printk(KERN_NOTICE "Virtual kernel memory layout:\n"454454+ " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"455455+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"456456+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"457457+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"458458+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"459459+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"460460+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n",461461+462462+ VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,463463+ DIV_ROUND_UP(PAGE_SIZE, SZ_1K),464464+ VMALLOC_START, VMALLOC_END,465465+ DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),466466+ PAGE_OFFSET, (unsigned long)high_memory,467467+ DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),468468+ MODULES_VADDR, MODULES_END,469469+ DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),470470+471471+ __init_begin, __init_end,472472+ DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),473473+ _stext, _etext,474474+ DIV_ROUND_UP((_etext - _stext), SZ_1K),475475+ _sdata, _edata,476476+ DIV_ROUND_UP((_edata - _sdata), SZ_1K));477477+478478+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);479479+ BUG_ON(TASK_SIZE > MODULES_VADDR);480480+481481+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {482482+ /*483483+ * On a machine this small we won't get484484+ * anywhere without overcommit, so turn485485+ * it on by default.486486+ */487487+ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;488488+ }489489+}490490+491491+void free_initmem(void)492492+{493493+ totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),494494+ __phys_to_pfn(__pa(__init_end)),495495+ "init");496496+}497497+498498+#ifdef CONFIG_BLK_DEV_INITRD499499+500500+static int keep_initrd;501501+502502+void free_initrd_mem(unsigned long start, unsigned long end)503503+{504504+ if (!keep_initrd)505505+ totalram_pages += free_area(__phys_to_pfn(__pa(start)),506506+ __phys_to_pfn(__pa(end)),507507+ "initrd");508508+}509509+510510+static int __init keepinitrd_setup(char *__unused)511511+{512512+ keep_initrd = 1;513513+ return 1;514514+}515515+516516+__setup("keepinitrd", keepinitrd_setup);517517+#endif
+56
arch/unicore32/mm/iomap.c
···11+/*22+ * linux/arch/unicore32/mm/iomap.c33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ * Map IO port and PCI memory spaces so that {read,write}[bwl] can1313+ * be used to access this memory.1414+ */1515+#include <linux/module.h>1616+#include <linux/pci.h>1717+#include <linux/ioport.h>1818+#include <linux/io.h>1919+2020+#ifdef __io2121+void __iomem *ioport_map(unsigned long port, unsigned int nr)2222+{2323+ /* we map PC lagcy 64K IO port to PCI IO space 0x80030000 */2424+ return (void __iomem *) (unsigned long)2525+ io_p2v((port & 0xffff) + PKUNITY_PCILIO_BASE);2626+}2727+EXPORT_SYMBOL(ioport_map);2828+2929+void ioport_unmap(void __iomem *addr)3030+{3131+}3232+EXPORT_SYMBOL(ioport_unmap);3333+#endif3434+3535+#ifdef CONFIG_PCI3636+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)3737+{3838+ resource_size_t start = pci_resource_start(dev, bar);3939+ resource_size_t len = pci_resource_len(dev, bar);4040+ unsigned long flags = pci_resource_flags(dev, bar);4141+4242+ if (!len || !start)4343+ return NULL;4444+ if (maxlen && len > maxlen)4545+ len = maxlen;4646+ if (flags & IORESOURCE_IO)4747+ return ioport_map(start, len);4848+ if (flags & IORESOURCE_MEM) {4949+ if (flags & IORESOURCE_CACHEABLE)5050+ return ioremap(start, len);5151+ return ioremap_nocache(start, len);5252+ }5353+ return NULL;5454+}5555+EXPORT_SYMBOL(pci_iomap);5656+#endif
+261
arch/unicore32/mm/ioremap.c
···11+/*22+ * linux/arch/unicore32/mm/ioremap.c33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ *1212+ *1313+ * Re-map IO memory to kernel address space so that we can access it.1414+ *1515+ * This allows a driver to remap an arbitrary region of bus memory into1616+ * virtual space. One should *only* use readl, writel, memcpy_toio and1717+ * so on with such remapped areas.1818+ *1919+ * Because UniCore only has a 32-bit address space we can't address the2020+ * whole of the (physical) PCI space at once. PCI huge-mode addressing2121+ * allows us to circumvent this restriction by splitting PCI space into2222+ * two 2GB chunks and mapping only one at a time into processor memory.2323+ * We use MMU protection domains to trap any attempt to access the bank2424+ * that is not currently mapped. (This isn't fully implemented yet.)2525+ */2626+#include <linux/module.h>2727+#include <linux/errno.h>2828+#include <linux/mm.h>2929+#include <linux/vmalloc.h>3030+#include <linux/io.h>3131+3232+#include <asm/cputype.h>3333+#include <asm/cacheflush.h>3434+#include <asm/mmu_context.h>3535+#include <asm/pgalloc.h>3636+#include <asm/tlbflush.h>3737+#include <asm/sizes.h>3838+3939+#include <mach/map.h>4040+#include "mm.h"4141+4242+/*4343+ * Used by ioremap() and iounmap() code to mark (super)section-mapped4444+ * I/O regions in vm_struct->flags field.4545+ */4646+#define VM_UNICORE_SECTION_MAPPING 0x800000004747+4848+int ioremap_page(unsigned long virt, unsigned long phys,4949+ const struct mem_type *mtype)5050+{5151+ return ioremap_page_range(virt, virt + PAGE_SIZE, phys,5252+ __pgprot(mtype->prot_pte));5353+}5454+EXPORT_SYMBOL(ioremap_page);5555+5656+/*5757+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,5858+ * the other CPUs will not see this change until their next context switch.5959+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs6060+ * which requires the new ioremap'd region to be referenced, the CPU will6161+ * reference the _old_ region.6262+ *6363+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to6464+ * mask the size back to 4MB aligned or we will overflow in the loop below.6565+ */6666+static void unmap_area_sections(unsigned long virt, unsigned long size)6767+{6868+ unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));6969+ pgd_t *pgd;7070+7171+ flush_cache_vunmap(addr, end);7272+ pgd = pgd_offset_k(addr);7373+ do {7474+ pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);7575+7676+ pmd = *pmdp;7777+ if (!pmd_none(pmd)) {7878+ /*7979+ * Clear the PMD from the page table, and8080+ * increment the kvm sequence so others8181+ * notice this change.8282+ *8383+ * Note: this is still racy on SMP machines.8484+ */8585+ pmd_clear(pmdp);8686+8787+ /*8888+ * Free the page table, if there was one.8989+ */9090+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)9191+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));9292+ }9393+9494+ addr += PGDIR_SIZE;9595+ pgd++;9696+ } while (addr < end);9797+9898+ flush_tlb_kernel_range(virt, end);9999+}100100+101101+static int102102+remap_area_sections(unsigned long virt, unsigned long pfn,103103+ size_t size, const struct mem_type *type)104104+{105105+ unsigned long addr = virt, end = virt + size;106106+ pgd_t *pgd;107107+108108+ /*109109+ * Remove and free any PTE-based mapping, and110110+ * sync the current kernel mapping.111111+ */112112+ unmap_area_sections(virt, size);113113+114114+ pgd = pgd_offset_k(addr);115115+ do {116116+ pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);117117+118118+ set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));119119+ pfn += SZ_4M >> PAGE_SHIFT;120120+ flush_pmd_entry(pmd);121121+122122+ addr += PGDIR_SIZE;123123+ pgd++;124124+ } while (addr < end);125125+126126+ return 0;127127+}128128+129129+void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,130130+ unsigned long offset, size_t size, unsigned int mtype, void *caller)131131+{132132+ const struct mem_type *type;133133+ int err;134134+ unsigned long addr;135135+ struct vm_struct *area;136136+137137+ /*138138+ * High mappings must be section aligned139139+ */140140+ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))141141+ return NULL;142142+143143+ /*144144+ * Don't allow RAM to be mapped145145+ */146146+ if (pfn_valid(pfn)) {147147+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"148148+ "system memory. This leads to architecturally\n"149149+ "unpredictable behaviour, and ioremap() will fail in\n"150150+ "the next kernel release. Please fix your driver.\n");151151+ WARN_ON(1);152152+ }153153+154154+ type = get_mem_type(mtype);155155+ if (!type)156156+ return NULL;157157+158158+ /*159159+ * Page align the mapping size, taking account of any offset.160160+ */161161+ size = PAGE_ALIGN(offset + size);162162+163163+ area = get_vm_area_caller(size, VM_IOREMAP, caller);164164+ if (!area)165165+ return NULL;166166+ addr = (unsigned long)area->addr;167167+168168+ if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {169169+ area->flags |= VM_UNICORE_SECTION_MAPPING;170170+ err = remap_area_sections(addr, pfn, size, type);171171+ } else172172+ err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),173173+ __pgprot(type->prot_pte));174174+175175+ if (err) {176176+ vunmap((void *)addr);177177+ return NULL;178178+ }179179+180180+ flush_cache_vmap(addr, addr + size);181181+ return (void __iomem *) (offset + addr);182182+}183183+184184+void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,185185+ unsigned int mtype, void *caller)186186+{187187+ unsigned long last_addr;188188+ unsigned long offset = phys_addr & ~PAGE_MASK;189189+ unsigned long pfn = __phys_to_pfn(phys_addr);190190+191191+ /*192192+ * Don't allow wraparound or zero size193193+ */194194+ last_addr = phys_addr + size - 1;195195+ if (!size || last_addr < phys_addr)196196+ return NULL;197197+198198+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);199199+}200200+201201+/*202202+ * Remap an arbitrary physical address space into the kernel virtual203203+ * address space. Needed when the kernel wants to access high addresses204204+ * directly.205205+ *206206+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously207207+ * have to convert them into an offset in a page-aligned mapping, but the208208+ * caller shouldn't need to know that small detail.209209+ */210210+void __iomem *211211+__uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,212212+ unsigned int mtype)213213+{214214+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,215215+ __builtin_return_address(0));216216+}217217+EXPORT_SYMBOL(__uc32_ioremap_pfn);218218+219219+void __iomem *220220+__uc32_ioremap(unsigned long phys_addr, size_t size)221221+{222222+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,223223+ __builtin_return_address(0));224224+}225225+EXPORT_SYMBOL(__uc32_ioremap);226226+227227+void __iomem *228228+__uc32_ioremap_cached(unsigned long phys_addr, size_t size)229229+{230230+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,231231+ __builtin_return_address(0));232232+}233233+EXPORT_SYMBOL(__uc32_ioremap_cached);234234+235235+void __uc32_iounmap(volatile void __iomem *io_addr)236236+{237237+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);238238+ struct vm_struct **p, *tmp;239239+240240+ /*241241+ * If this is a section based mapping we need to handle it242242+ * specially as the VM subsystem does not know how to handle243243+ * such a beast. We need the lock here b/c we need to clear244244+ * all the mappings before the area can be reclaimed245245+ * by someone else.246246+ */247247+ write_lock(&vmlist_lock);248248+ for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {249249+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {250250+ if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {251251+ unmap_area_sections((unsigned long)tmp->addr,252252+ tmp->size);253253+ }254254+ break;255255+ }256256+ }257257+ write_unlock(&vmlist_lock);258258+259259+ vunmap(addr);260260+}261261+EXPORT_SYMBOL(__uc32_iounmap);
+39
arch/unicore32/mm/mm.h
···11+/*22+ * linux/arch/unicore32/mm/mm.h33+ *44+ * Code specific to PKUnity SoC and UniCore ISA55+ *66+ * Copyright (C) 2001-2010 GUAN Xue-tao77+ *88+ * This program is free software; you can redistribute it and/or modify99+ * it under the terms of the GNU General Public License version 2 as1010+ * published by the Free Software Foundation.1111+ */1212+/* the upper-most page table pointer */1313+extern pmd_t *top_pmd;1414+extern int sysctl_overcommit_memory;1515+1616+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)1717+1818+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)1919+{2020+ return pmd_offset((pud_t *)pgd, virt);2121+}2222+2323+static inline pmd_t *pmd_off_k(unsigned long virt)2424+{2525+ return pmd_off(pgd_offset_k(virt), virt);2626+}2727+2828+struct mem_type {2929+ unsigned int prot_pte;3030+ unsigned int prot_l1;3131+ unsigned int prot_sect;3232+};3333+3434+const struct mem_type *get_mem_type(unsigned int type);3535+3636+extern void __flush_dcache_page(struct address_space *, struct page *);3737+3838+void __init bootmem_init(void);3939+void uc32_mm_memblock_reserve(void);