Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] Virtual memmap for s390.

Virtual memmap support for s390. Inspired by the ia64 implementation.

Unlike ia64 we need a mechanism which allows us to dynamically attach
shared memory regions.
These memory regions are accessed via the dcss device driver. dcss
implements the 'direct_access' operation, which requires struct pages
for every single shared page.
Therefore this implementation provides an interface to attach/detach
shared memory:

int add_shared_memory(unsigned long start, unsigned long size);
int remove_shared_memory(unsigned long start, unsigned long size);

The purpose of the add_shared_memory function is to add the given
memory range to the 1:1 mapping and to make sure that the
corresponding range in the vmemmap is backed with physical pages.
It also initialises the new struct pages.

remove_shared_memory in turn only invalidates the page table
entries in the 1:1 mapping. The page tables and the memory used for
struct pages in the vmemmap are currently not freed. They will be
reused when the next segment will be attached.
Given that the maximum size of a shared memory region is 2GB and
in addition all regions must reside below 2GB this is not too much of
a restriction, but there is room for improvement.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
f4eb07c1 7f090145

+490 -212
+3
arch/s390/Kconfig
··· 235 235 236 236 source "mm/Kconfig" 237 237 238 + config HOLES_IN_ZONE 239 + def_bool y 240 + 238 241 comment "I/O subsystem configuration" 239 242 240 243 config MACHCHK_WARNING
+1 -1
arch/s390/kernel/setup.c
··· 64 64 unsigned int console_irq = -1; 65 65 unsigned long machine_flags = 0; 66 66 67 - struct mem_chunk memory_chunk[MEMORY_CHUNKS]; 67 + struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; 68 68 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 69 69 unsigned long __initdata zholes_size[MAX_NR_ZONES]; 70 70 static unsigned long __initdata memory_end;
+1 -1
arch/s390/mm/Makefile
··· 2 2 # Makefile for the linux s390-specific parts of the memory manager. 3 3 # 4 4 5 - obj-y := init.o fault.o ioremap.o extmem.o mmap.o 5 + obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o 6 6 obj-$(CONFIG_CMM) += cmm.o 7 7
+28 -82
arch/s390/mm/extmem.c
··· 16 16 #include <linux/bootmem.h> 17 17 #include <linux/ctype.h> 18 18 #include <asm/page.h> 19 + #include <asm/pgtable.h> 19 20 #include <asm/ebcdic.h> 20 21 #include <asm/errno.h> 21 22 #include <asm/extmem.h> ··· 239 238 } 240 239 241 240 /* 242 - * check if the given segment collides with guest storage. 243 - * returns 1 if this is the case, 0 if no collision was found 244 - */ 245 - static int 246 - segment_overlaps_storage(struct dcss_segment *seg) 247 - { 248 - int i; 249 - 250 - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 251 - if (memory_chunk[i].type != CHUNK_READ_WRITE) 252 - continue; 253 - if ((memory_chunk[i].addr >> 20) > (seg->end >> 20)) 254 - continue; 255 - if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20) 256 - < (seg->start_addr >> 20)) 257 - continue; 258 - return 1; 259 - } 260 - return 0; 261 - } 262 - 263 - /* 264 - * check if segment collides with other segments that are currently loaded 265 - * returns 1 if this is the case, 0 if no collision was found 266 - */ 267 - static int 268 - segment_overlaps_others (struct dcss_segment *seg) 269 - { 270 - struct list_head *l; 271 - struct dcss_segment *tmp; 272 - 273 - BUG_ON(!mutex_is_locked(&dcss_lock)); 274 - list_for_each(l, &dcss_list) { 275 - tmp = list_entry(l, struct dcss_segment, list); 276 - if ((tmp->start_addr >> 20) > (seg->end >> 20)) 277 - continue; 278 - if ((tmp->end >> 20) < (seg->start_addr >> 20)) 279 - continue; 280 - if (seg == tmp) 281 - continue; 282 - return 1; 283 - } 284 - return 0; 285 - } 286 - 287 - /* 288 - * check if segment exceeds the kernel mapping range (detected or set via mem=) 289 - * returns 1 if this is the case, 0 if segment fits into the range 290 - */ 291 - static inline int 292 - segment_exceeds_range (struct dcss_segment *seg) 293 - { 294 - int seg_last_pfn = (seg->end) >> PAGE_SHIFT; 295 - if (seg_last_pfn > max_pfn) 296 - return 1; 297 - return 0; 298 - } 299 - 300 - /* 301 241 * get info about a segment 302 242 * possible return values: 303 243 * -ENOSYS : we are not running on VM ··· 283 341 rc = query_segment_type (seg); 284 342 if (rc < 0) 285 343 goto out_free; 286 - if (segment_exceeds_range(seg)) { 287 - PRINT_WARN ("segment_load: not loading segment %s - exceeds" 288 - " kernel mapping range\n",name); 289 - rc = -ERANGE; 344 + 345 + rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 346 + 347 + switch (rc) { 348 + case 0: 349 + break; 350 + case -ENOSPC: 351 + PRINT_WARN("segment_load: not loading segment %s - overlaps " 352 + "storage/segment\n", name); 353 + goto out_free; 354 + case -ERANGE: 355 + PRINT_WARN("segment_load: not loading segment %s - exceeds " 356 + "kernel mapping range\n", name); 357 + goto out_free; 358 + default: 359 + PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n", 360 + name, rc); 290 361 goto out_free; 291 362 } 292 - if (segment_overlaps_storage(seg)) { 293 - PRINT_WARN ("segment_load: not loading segment %s - overlaps" 294 - " storage\n",name); 295 - rc = -ENOSPC; 296 - goto out_free; 297 - } 298 - if (segment_overlaps_others(seg)) { 299 - PRINT_WARN ("segment_load: not loading segment %s - overlaps" 300 - " other segments\n",name); 301 - rc = -EBUSY; 302 - goto out_free; 303 - } 363 + 304 364 if (do_nonshared) 305 365 dcss_command = DCSS_LOADNSR; 306 366 else ··· 316 372 rc = dcss_diag_translate_rc (seg->end); 317 373 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 318 374 &seg->start_addr, &seg->end); 319 - goto out_free; 375 + goto out_shared; 320 376 } 321 377 seg->do_nonshared = do_nonshared; 322 378 atomic_set(&seg->ref_count, 1); ··· 335 391 (void*)seg->start_addr, (void*)seg->end, 336 392 segtype_string[seg->vm_segtype]); 337 393 goto out; 394 + out_shared: 395 + remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 338 396 out_free: 339 397 kfree(seg); 340 398 out: ··· 476 530 "please report to linux390@de.ibm.com\n",name); 477 531 goto out_unlock; 478 532 } 479 - if (atomic_dec_return(&seg->ref_count) == 0) { 480 - list_del(&seg->list); 481 - dcss_diag(DCSS_PURGESEG, seg->dcss_name, 482 - &dummy, &dummy); 483 - kfree(seg); 484 - } 533 + if (atomic_dec_return(&seg->ref_count) != 0) 534 + goto out_unlock; 535 + remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 536 + list_del(&seg->list); 537 + dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); 538 + kfree(seg); 485 539 out_unlock: 486 540 mutex_unlock(&dcss_lock); 487 541 }
+41 -122
arch/s390/mm/init.c
··· 69 69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 70 70 i = max_mapnr; 71 71 while (i-- > 0) { 72 + if (!pfn_valid(i)) 73 + continue; 72 74 page = pfn_to_page(i); 73 75 total++; 74 76 if (PageReserved(page)) ··· 86 84 printk("%d pages swap cached\n",cached); 87 85 } 88 86 87 + static void __init setup_ro_region(void) 88 + { 89 + pgd_t *pgd; 90 + pmd_t *pmd; 91 + pte_t *pte; 92 + pte_t new_pte; 93 + unsigned long address, end; 94 + 95 + address = ((unsigned long)&__start_rodata) & PAGE_MASK; 96 + end = PFN_ALIGN((unsigned long)&__end_rodata); 97 + 98 + for (; address < end; address += PAGE_SIZE) { 99 + pgd = pgd_offset_k(address); 100 + pmd = pmd_offset(pgd, address); 101 + pte = pte_offset_kernel(pmd, address); 102 + new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 103 + set_pte(pte, new_pte); 104 + } 105 + } 106 + 89 107 extern unsigned long __initdata zholes_size[]; 108 + extern void vmem_map_init(void); 90 109 /* 91 110 * paging_init() sets up the page tables 92 111 */ 93 - 94 - #ifndef CONFIG_64BIT 95 112 void __init paging_init(void) 96 113 { 97 - pgd_t * pg_dir; 98 - pte_t * pg_table; 99 - pte_t pte; 100 - int i; 101 - unsigned long tmp; 102 - unsigned long pfn = 0; 103 - unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 104 - static const int ssm_mask = 0x04000000L; 105 - unsigned long ro_start_pfn, ro_end_pfn; 114 + pgd_t *pg_dir; 115 + int i; 116 + unsigned long pgdir_k; 117 + static const int ssm_mask = 0x04000000L; 106 118 unsigned long zones_size[MAX_NR_ZONES]; 119 + unsigned long dma_pfn, high_pfn; 107 120 108 - ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 109 - ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 110 - 111 - memset(zones_size, 0, sizeof(zones_size)); 112 - zones_size[ZONE_DMA] = max_low_pfn; 113 - free_area_init_node(0, &contig_page_data, zones_size, 114 - __pa(PAGE_OFFSET) >> PAGE_SHIFT, 115 - zholes_size); 116 - 117 - /* unmap whole virtual address space */ 121 + pg_dir = swapper_pg_dir; 118 122 119 - pg_dir = swapper_pg_dir; 120 - 123 + #ifdef CONFIG_64BIT 124 + pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 121 125 for (i = 0; i < PTRS_PER_PGD; i++) 122 - pmd_clear((pmd_t *) pg_dir++); 123 - 124 - /* 125 - * map whole physical memory to virtual memory (identity mapping) 126 - */ 127 - 128 - pg_dir = swapper_pg_dir; 129 - 130 - while (pfn < max_low_pfn) { 131 - /* 132 - * pg_table is physical at this point 133 - */ 134 - pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 135 - 136 - pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table); 137 - pg_dir++; 138 - 139 - for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 140 - if (pfn >= ro_start_pfn && pfn < ro_end_pfn) 141 - pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); 142 - else 143 - pte = pfn_pte(pfn, PAGE_KERNEL); 144 - if (pfn >= max_low_pfn) 145 - pte_val(pte) = _PAGE_TYPE_EMPTY; 146 - set_pte(pg_table, pte); 147 - pfn++; 148 - } 149 - } 126 + pgd_clear(pg_dir + i); 127 + #else 128 + pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 129 + for (i = 0; i < PTRS_PER_PGD; i++) 130 + pmd_clear((pmd_t *)(pg_dir + i)); 131 + #endif 132 + vmem_map_init(); 133 + setup_ro_region(); 150 134 151 135 S390_lowcore.kernel_asce = pgdir_k; 152 136 ··· 142 154 __ctl_load(pgdir_k, 13, 13); 143 155 __raw_local_irq_ssm(ssm_mask); 144 156 145 - local_flush_tlb(); 146 - } 147 - 148 - #else /* CONFIG_64BIT */ 149 - 150 - void __init paging_init(void) 151 - { 152 - pgd_t * pg_dir; 153 - pmd_t * pm_dir; 154 - pte_t * pt_dir; 155 - pte_t pte; 156 - int i,j,k; 157 - unsigned long pfn = 0; 158 - unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 159 - _KERN_REGION_TABLE; 160 - static const int ssm_mask = 0x04000000L; 161 - unsigned long zones_size[MAX_NR_ZONES]; 162 - unsigned long dma_pfn, high_pfn; 163 - unsigned long ro_start_pfn, ro_end_pfn; 164 - 165 157 memset(zones_size, 0, sizeof(zones_size)); 166 158 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 167 159 high_pfn = max_low_pfn; 168 - ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 169 - ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 170 160 171 161 if (dma_pfn > high_pfn) 172 162 zones_size[ZONE_DMA] = high_pfn; ··· 156 190 /* Initialize mem_map[]. */ 157 191 free_area_init_node(0, &contig_page_data, zones_size, 158 192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 159 - 160 - /* 161 - * map whole physical memory to virtual memory (identity mapping) 162 - */ 163 - 164 - pg_dir = swapper_pg_dir; 165 - 166 - for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { 167 - 168 - if (pfn >= max_low_pfn) { 169 - pgd_clear(pg_dir); 170 - continue; 171 - } 172 - 173 - pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); 174 - pgd_populate(&init_mm, pg_dir, pm_dir); 175 - 176 - for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 177 - if (pfn >= max_low_pfn) { 178 - pmd_clear(pm_dir); 179 - continue; 180 - } 181 - 182 - pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 183 - pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 184 - 185 - for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { 186 - if (pfn >= ro_start_pfn && pfn < ro_end_pfn) 187 - pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); 188 - else 189 - pte = pfn_pte(pfn, PAGE_KERNEL); 190 - if (pfn >= max_low_pfn) 191 - pte_val(pte) = _PAGE_TYPE_EMPTY; 192 - set_pte(pt_dir, pte); 193 - pfn++; 194 - } 195 - } 196 - } 197 - 198 - S390_lowcore.kernel_asce = pgdir_k; 199 - 200 - /* enable virtual mapping in kernel mode */ 201 - __ctl_load(pgdir_k, 1, 1); 202 - __ctl_load(pgdir_k, 7, 7); 203 - __ctl_load(pgdir_k, 13, 13); 204 - __raw_local_irq_ssm(ssm_mask); 205 - 206 - local_flush_tlb(); 207 193 } 208 - #endif /* CONFIG_64BIT */ 209 194 210 195 void __init mem_init(void) 211 196 { ··· 186 269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 187 270 (unsigned long)&__start_rodata, 188 271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 272 + printk("Virtual memmap size: %ldk\n", 273 + (max_pfn * sizeof(struct page)) >> 10); 189 274 } 190 275 191 276 void free_initmem(void)
+381
arch/s390/mm/vmem.c
··· 1 + /* 2 + * arch/s390/mm/vmem.c 3 + * 4 + * Copyright IBM Corp. 2006 5 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 6 + */ 7 + 8 + #include <linux/bootmem.h> 9 + #include <linux/pfn.h> 10 + #include <linux/mm.h> 11 + #include <linux/module.h> 12 + #include <linux/list.h> 13 + #include <asm/pgalloc.h> 14 + #include <asm/pgtable.h> 15 + #include <asm/setup.h> 16 + #include <asm/tlbflush.h> 17 + 18 + unsigned long vmalloc_end; 19 + EXPORT_SYMBOL(vmalloc_end); 20 + 21 + static struct page *vmem_map; 22 + static DEFINE_MUTEX(vmem_mutex); 23 + 24 + struct memory_segment { 25 + struct list_head list; 26 + unsigned long start; 27 + unsigned long size; 28 + }; 29 + 30 + static LIST_HEAD(mem_segs); 31 + 32 + void memmap_init(unsigned long size, int nid, unsigned long zone, 33 + unsigned long start_pfn) 34 + { 35 + struct page *start, *end; 36 + struct page *map_start, *map_end; 37 + int i; 38 + 39 + start = pfn_to_page(start_pfn); 40 + end = start + size; 41 + 42 + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 43 + unsigned long cstart, cend; 44 + 45 + cstart = PFN_DOWN(memory_chunk[i].addr); 46 + cend = cstart + PFN_DOWN(memory_chunk[i].size); 47 + 48 + map_start = mem_map + cstart; 49 + map_end = mem_map + cend; 50 + 51 + if (map_start < start) 52 + map_start = start; 53 + if (map_end > end) 54 + map_end = end; 55 + 56 + map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) 57 + / sizeof(struct page); 58 + map_end += ((PFN_ALIGN((unsigned long) map_end) 59 + - (unsigned long) map_end) 60 + / sizeof(struct page)); 61 + 62 + if (map_start < map_end) 63 + memmap_init_zone((unsigned long)(map_end - map_start), 64 + nid, zone, page_to_pfn(map_start)); 65 + } 66 + } 67 + 68 + static inline void *vmem_alloc_pages(unsigned int order) 69 + { 70 + if (slab_is_available()) 71 + return (void *)__get_free_pages(GFP_KERNEL, order); 72 + return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 73 + } 74 + 75 + static inline pmd_t *vmem_pmd_alloc(void) 76 + { 77 + pmd_t *pmd; 78 + int i; 79 + 80 + pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); 81 + if (!pmd) 82 + return NULL; 83 + for (i = 0; i < PTRS_PER_PMD; i++) 84 + pmd_clear(pmd + i); 85 + return pmd; 86 + } 87 + 88 + static inline pte_t *vmem_pte_alloc(void) 89 + { 90 + pte_t *pte; 91 + pte_t empty_pte; 92 + int i; 93 + 94 + pte = vmem_alloc_pages(PTE_ALLOC_ORDER); 95 + if (!pte) 96 + return NULL; 97 + pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 + for (i = 0; i < PTRS_PER_PTE; i++) 99 + set_pte(pte + i, empty_pte); 100 + return pte; 101 + } 102 + 103 + /* 104 + * Add a physical memory range to the 1:1 mapping. 105 + */ 106 + static int vmem_add_range(unsigned long start, unsigned long size) 107 + { 108 + unsigned long address; 109 + pgd_t *pg_dir; 110 + pmd_t *pm_dir; 111 + pte_t *pt_dir; 112 + pte_t pte; 113 + int ret = -ENOMEM; 114 + 115 + for (address = start; address < start + size; address += PAGE_SIZE) { 116 + pg_dir = pgd_offset_k(address); 117 + if (pgd_none(*pg_dir)) { 118 + pm_dir = vmem_pmd_alloc(); 119 + if (!pm_dir) 120 + goto out; 121 + pgd_populate(&init_mm, pg_dir, pm_dir); 122 + } 123 + 124 + pm_dir = pmd_offset(pg_dir, address); 125 + if (pmd_none(*pm_dir)) { 126 + pt_dir = vmem_pte_alloc(); 127 + if (!pt_dir) 128 + goto out; 129 + pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 130 + } 131 + 132 + pt_dir = pte_offset_kernel(pm_dir, address); 133 + pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 + set_pte(pt_dir, pte); 135 + } 136 + ret = 0; 137 + out: 138 + flush_tlb_kernel_range(start, start + size); 139 + return ret; 140 + } 141 + 142 + /* 143 + * Remove a physical memory range from the 1:1 mapping. 144 + * Currently only invalidates page table entries. 145 + */ 146 + static void vmem_remove_range(unsigned long start, unsigned long size) 147 + { 148 + unsigned long address; 149 + pgd_t *pg_dir; 150 + pmd_t *pm_dir; 151 + pte_t *pt_dir; 152 + pte_t pte; 153 + 154 + pte_val(pte) = _PAGE_TYPE_EMPTY; 155 + for (address = start; address < start + size; address += PAGE_SIZE) { 156 + pg_dir = pgd_offset_k(address); 157 + if (pgd_none(*pg_dir)) 158 + continue; 159 + pm_dir = pmd_offset(pg_dir, address); 160 + if (pmd_none(*pm_dir)) 161 + continue; 162 + pt_dir = pte_offset_kernel(pm_dir, address); 163 + set_pte(pt_dir, pte); 164 + } 165 + flush_tlb_kernel_range(start, start + size); 166 + } 167 + 168 + /* 169 + * Add a backed mem_map array to the virtual mem_map array. 170 + */ 171 + static int vmem_add_mem_map(unsigned long start, unsigned long size) 172 + { 173 + unsigned long address, start_addr, end_addr; 174 + struct page *map_start, *map_end; 175 + pgd_t *pg_dir; 176 + pmd_t *pm_dir; 177 + pte_t *pt_dir; 178 + pte_t pte; 179 + int ret = -ENOMEM; 180 + 181 + map_start = vmem_map + PFN_DOWN(start); 182 + map_end = vmem_map + PFN_DOWN(start + size); 183 + 184 + start_addr = (unsigned long) map_start & PAGE_MASK; 185 + end_addr = PFN_ALIGN((unsigned long) map_end); 186 + 187 + for (address = start_addr; address < end_addr; address += PAGE_SIZE) { 188 + pg_dir = pgd_offset_k(address); 189 + if (pgd_none(*pg_dir)) { 190 + pm_dir = vmem_pmd_alloc(); 191 + if (!pm_dir) 192 + goto out; 193 + pgd_populate(&init_mm, pg_dir, pm_dir); 194 + } 195 + 196 + pm_dir = pmd_offset(pg_dir, address); 197 + if (pmd_none(*pm_dir)) { 198 + pt_dir = vmem_pte_alloc(); 199 + if (!pt_dir) 200 + goto out; 201 + pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 202 + } 203 + 204 + pt_dir = pte_offset_kernel(pm_dir, address); 205 + if (pte_none(*pt_dir)) { 206 + unsigned long new_page; 207 + 208 + new_page =__pa(vmem_alloc_pages(0)); 209 + if (!new_page) 210 + goto out; 211 + pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 + set_pte(pt_dir, pte); 213 + } 214 + } 215 + ret = 0; 216 + out: 217 + flush_tlb_kernel_range(start_addr, end_addr); 218 + return ret; 219 + } 220 + 221 + static int vmem_add_mem(unsigned long start, unsigned long size) 222 + { 223 + int ret; 224 + 225 + ret = vmem_add_range(start, size); 226 + if (ret) 227 + return ret; 228 + return vmem_add_mem_map(start, size); 229 + } 230 + 231 + /* 232 + * Add memory segment to the segment list if it doesn't overlap with 233 + * an already present segment. 234 + */ 235 + static int insert_memory_segment(struct memory_segment *seg) 236 + { 237 + struct memory_segment *tmp; 238 + 239 + if (PFN_DOWN(seg->start + seg->size) > max_pfn || 240 + seg->start + seg->size < seg->start) 241 + return -ERANGE; 242 + 243 + list_for_each_entry(tmp, &mem_segs, list) { 244 + if (seg->start >= tmp->start + tmp->size) 245 + continue; 246 + if (seg->start + seg->size <= tmp->start) 247 + continue; 248 + return -ENOSPC; 249 + } 250 + list_add(&seg->list, &mem_segs); 251 + return 0; 252 + } 253 + 254 + /* 255 + * Remove memory segment from the segment list. 256 + */ 257 + static void remove_memory_segment(struct memory_segment *seg) 258 + { 259 + list_del(&seg->list); 260 + } 261 + 262 + static void __remove_shared_memory(struct memory_segment *seg) 263 + { 264 + remove_memory_segment(seg); 265 + vmem_remove_range(seg->start, seg->size); 266 + } 267 + 268 + int remove_shared_memory(unsigned long start, unsigned long size) 269 + { 270 + struct memory_segment *seg; 271 + int ret; 272 + 273 + mutex_lock(&vmem_mutex); 274 + 275 + ret = -ENOENT; 276 + list_for_each_entry(seg, &mem_segs, list) { 277 + if (seg->start == start && seg->size == size) 278 + break; 279 + } 280 + 281 + if (seg->start != start || seg->size != size) 282 + goto out; 283 + 284 + ret = 0; 285 + __remove_shared_memory(seg); 286 + kfree(seg); 287 + out: 288 + mutex_unlock(&vmem_mutex); 289 + return ret; 290 + } 291 + 292 + int add_shared_memory(unsigned long start, unsigned long size) 293 + { 294 + struct memory_segment *seg; 295 + struct page *page; 296 + unsigned long pfn, num_pfn, end_pfn; 297 + int ret; 298 + 299 + mutex_lock(&vmem_mutex); 300 + ret = -ENOMEM; 301 + seg = kzalloc(sizeof(*seg), GFP_KERNEL); 302 + if (!seg) 303 + goto out; 304 + seg->start = start; 305 + seg->size = size; 306 + 307 + ret = insert_memory_segment(seg); 308 + if (ret) 309 + goto out_free; 310 + 311 + ret = vmem_add_mem(start, size); 312 + if (ret) 313 + goto out_remove; 314 + 315 + pfn = PFN_DOWN(start); 316 + num_pfn = PFN_DOWN(size); 317 + end_pfn = pfn + num_pfn; 318 + 319 + page = pfn_to_page(pfn); 320 + memset(page, 0, num_pfn * sizeof(struct page)); 321 + 322 + for (; pfn < end_pfn; pfn++) { 323 + page = pfn_to_page(pfn); 324 + init_page_count(page); 325 + reset_page_mapcount(page); 326 + SetPageReserved(page); 327 + INIT_LIST_HEAD(&page->lru); 328 + } 329 + goto out; 330 + 331 + out_remove: 332 + __remove_shared_memory(seg); 333 + out_free: 334 + kfree(seg); 335 + out: 336 + mutex_unlock(&vmem_mutex); 337 + return ret; 338 + } 339 + 340 + /* 341 + * map whole physical memory to virtual memory (identity mapping) 342 + */ 343 + void __init vmem_map_init(void) 344 + { 345 + unsigned long map_size; 346 + int i; 347 + 348 + map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 349 + vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 350 + vmem_map = (struct page *) vmalloc_end; 351 + NODE_DATA(0)->node_mem_map = vmem_map; 352 + 353 + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 354 + vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 355 + } 356 + 357 + /* 358 + * Convert memory chunk array to a memory segment list so there is a single 359 + * list that contains both r/w memory and shared memory segments. 360 + */ 361 + static int __init vmem_convert_memory_chunk(void) 362 + { 363 + struct memory_segment *seg; 364 + int i; 365 + 366 + mutex_lock(&vmem_mutex); 367 + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 368 + if (!memory_chunk[i].size) 369 + continue; 370 + seg = kzalloc(sizeof(*seg), GFP_KERNEL); 371 + if (!seg) 372 + panic("Out of memory...\n"); 373 + seg->start = memory_chunk[i].addr; 374 + seg->size = memory_chunk[i].size; 375 + insert_memory_segment(seg); 376 + } 377 + mutex_unlock(&vmem_mutex); 378 + return 0; 379 + } 380 + 381 + core_initcall(vmem_convert_memory_chunk);
+20 -2
include/asm-s390/page.h
··· 127 127 return skey; 128 128 } 129 129 130 + extern unsigned long max_pfn; 131 + 132 + static inline int pfn_valid(unsigned long pfn) 133 + { 134 + unsigned long dummy; 135 + int ccode; 136 + 137 + if (pfn >= max_pfn) 138 + return 0; 139 + 140 + asm volatile( 141 + " lra %0,0(%2)\n" 142 + " ipm %1\n" 143 + " srl %1,28\n" 144 + : "=d" (dummy), "=d" (ccode) 145 + : "a" (pfn << PAGE_SHIFT) 146 + : "cc"); 147 + return !ccode; 148 + } 149 + 130 150 #endif /* !__ASSEMBLY__ */ 131 151 132 152 /* to align the pointer to the (next) page boundary */ ··· 158 138 #define __va(x) (void *)(unsigned long)(x) 159 139 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 160 140 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 161 - 162 - #define pfn_valid(pfn) ((pfn) < max_mapnr) 163 141 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 164 142 165 143 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+3
include/asm-s390/pgalloc.h
··· 25 25 * Page allocation orders. 26 26 */ 27 27 #ifndef __s390x__ 28 + # define PTE_ALLOC_ORDER 0 29 + # define PMD_ALLOC_ORDER 0 28 30 # define PGD_ALLOC_ORDER 1 29 31 #else /* __s390x__ */ 32 + # define PTE_ALLOC_ORDER 0 30 33 # define PMD_ALLOC_ORDER 2 31 34 # define PGD_ALLOC_ORDER 2 32 35 #endif /* __s390x__ */
+12 -4
include/asm-s390/pgtable.h
··· 107 107 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 108 108 * area for the same reason. ;) 109 109 */ 110 + extern unsigned long vmalloc_end; 110 111 #define VMALLOC_OFFSET (8*1024*1024) 111 112 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ 112 113 & ~(VMALLOC_OFFSET-1)) 114 + #define VMALLOC_END vmalloc_end 113 115 114 116 /* 115 117 * We need some free virtual space to be able to do vmalloc. 116 118 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc 117 119 * area. On a machine with 2GB memory we make sure that we 118 120 * have at least 128MB free space for vmalloc. On a machine 119 - * with 4TB we make sure we have at least 1GB. 121 + * with 4TB we make sure we have at least 128GB. 120 122 */ 121 123 #ifndef __s390x__ 122 124 #define VMALLOC_MIN_SIZE 0x8000000UL 123 - #define VMALLOC_END 0x80000000UL 125 + #define VMALLOC_END_INIT 0x80000000UL 124 126 #else /* __s390x__ */ 125 - #define VMALLOC_MIN_SIZE 0x40000000UL 126 - #define VMALLOC_END 0x40000000000UL 127 + #define VMALLOC_MIN_SIZE 0x2000000000UL 128 + #define VMALLOC_END_INIT 0x40000000000UL 127 129 #endif /* __s390x__ */ 128 130 129 131 /* ··· 817 815 818 816 #define kern_addr_valid(addr) (1) 819 817 818 + extern int add_shared_memory(unsigned long start, unsigned long size); 819 + extern int remove_shared_memory(unsigned long start, unsigned long size); 820 + 820 821 /* 821 822 * No page table caches to initialise 822 823 */ 823 824 #define pgtable_cache_init() do { } while (0) 825 + 826 + #define __HAVE_ARCH_MEMMAP_INIT 827 + extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 824 828 825 829 #define __HAVE_ARCH_PTEP_ESTABLISH 826 830 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS