Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/mm: use new mm defines instead of magic values

Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
f1c1174f c67da7c7

+103 -99
+1 -1
arch/s390/include/asm/elf.h
··· 191 191 } while (0) 192 192 193 193 #define CORE_DUMP_USE_REGSET 194 - #define ELF_EXEC_PAGESIZE 4096 194 + #define ELF_EXEC_PAGESIZE PAGE_SIZE 195 195 196 196 /* 197 197 * This is the base location for PIE (ET_DYN with INTERP) loads. On
+1 -1
arch/s390/include/asm/ipl.h
··· 81 81 struct ipl_block_fcp fcp; 82 82 struct ipl_block_ccw ccw; 83 83 } ipl_info; 84 - } __attribute__((packed,aligned(4096))); 84 + } __packed __aligned(PAGE_SIZE); 85 85 86 86 /* 87 87 * IPL validity flags
+3 -3
arch/s390/include/asm/mmu_context.h
··· 33 33 mm->context.use_cmma = 0; 34 34 #endif 35 35 switch (mm->context.asce_limit) { 36 - case 1UL << 42: 36 + case _REGION2_SIZE: 37 37 /* 38 38 * forked 3-level task, fall through to set new asce with new 39 39 * mm->pgd ··· 44 44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 45 45 _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 46 46 break; 47 - case 1UL << 53: 47 + case _REGION1_SIZE: 48 48 /* forked 4-level task, set new asce with new mm->pgd */ 49 49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 50 50 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 51 51 break; 52 - case 1UL << 31: 52 + case _REGION3_SIZE: 53 53 /* forked 2-level compat task, set new asce with new mm->pgd */ 54 54 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 55 55 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+6 -6
arch/s390/include/asm/pgalloc.h
··· 44 44 45 45 static inline void crst_table_init(unsigned long *crst, unsigned long entry) 46 46 { 47 - clear_table(crst, entry, sizeof(unsigned long)*2048); 47 + clear_table(crst, entry, _CRST_TABLE_SIZE); 48 48 } 49 49 50 50 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 51 51 { 52 - if (mm->context.asce_limit <= (1UL << 31)) 52 + if (mm->context.asce_limit <= _REGION3_SIZE) 53 53 return _SEGMENT_ENTRY_EMPTY; 54 - if (mm->context.asce_limit <= (1UL << 42)) 54 + if (mm->context.asce_limit <= _REGION2_SIZE) 55 55 return _REGION3_ENTRY_EMPTY; 56 - if (mm->context.asce_limit <= (1UL << 53)) 56 + if (mm->context.asce_limit <= _REGION1_SIZE) 57 57 return _REGION2_ENTRY_EMPTY; 58 58 return _REGION1_ENTRY_EMPTY; 59 59 } ··· 121 121 122 122 if (!table) 123 123 return NULL; 124 - if (mm->context.asce_limit == (1UL << 31)) { 124 + if (mm->context.asce_limit == _REGION3_SIZE) { 125 125 /* Forking a compat process with 2 page table levels */ 126 126 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { 127 127 crst_table_free(mm, table); ··· 133 133 134 134 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 135 135 { 136 - if (mm->context.asce_limit == (1UL << 31)) 136 + if (mm->context.asce_limit == _REGION3_SIZE) 137 137 pgtable_pmd_page_dtor(virt_to_page(pgd)); 138 138 crst_table_free(mm, (unsigned long *) pgd); 139 139 }
+1 -1
arch/s390/include/asm/qdio.h
··· 80 80 u32 qkey : 4; 81 81 u32 : 28; 82 82 struct qdesfmt0 qdf0[126]; 83 - } __attribute__ ((packed, aligned(4096))); 83 + } __packed __aligned(PAGE_SIZE); 84 84 85 85 #define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 86 86 #define QIB_RFLAGS_ENABLE_QEBSM 0x80
+3 -3
arch/s390/include/asm/tlb.h
··· 130 130 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 131 131 unsigned long address) 132 132 { 133 - if (tlb->mm->context.asce_limit <= (1UL << 31)) 133 + if (tlb->mm->context.asce_limit <= _REGION3_SIZE) 134 134 return; 135 135 pgtable_pmd_page_dtor(virt_to_page(pmd)); 136 136 tlb_remove_table(tlb, pmd); ··· 146 146 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 147 147 unsigned long address) 148 148 { 149 - if (tlb->mm->context.asce_limit <= (1UL << 53)) 149 + if (tlb->mm->context.asce_limit <= _REGION1_SIZE) 150 150 return; 151 151 tlb_remove_table(tlb, p4d); 152 152 } ··· 161 161 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 162 162 unsigned long address) 163 163 { 164 - if (tlb->mm->context.asce_limit <= (1UL << 42)) 164 + if (tlb->mm->context.asce_limit <= _REGION2_SIZE) 165 165 return; 166 166 tlb_remove_table(tlb, pud); 167 167 }
+1 -1
arch/s390/kernel/dumpstack.c
··· 76 76 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 77 77 #ifdef CONFIG_CHECK_STACK 78 78 sp = __dump_trace(func, data, sp, 79 - S390_lowcore.panic_stack + frame_size - 4096, 79 + S390_lowcore.panic_stack + frame_size - PAGE_SIZE, 80 80 S390_lowcore.panic_stack + frame_size); 81 81 #endif 82 82 sp = __dump_trace(func, data, sp,
+3 -2
arch/s390/kernel/relocate_kernel.S
··· 7 7 */ 8 8 9 9 #include <linux/linkage.h> 10 + #include <asm/page.h> 10 11 #include <asm/sigp.h> 11 12 12 13 /* ··· 56 55 .back_pgm: 57 56 lmg %r0,%r15,gprregs-.base(%r13) 58 57 .top: 59 - lghi %r7,4096 # load PAGE_SIZE in r7 60 - lghi %r9,4096 # load PAGE_SIZE in r9 58 + lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7 59 + lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9 61 60 lg %r5,0(%r2) # read another word for indirection page 62 61 aghi %r2,8 # increment pointer 63 62 tml %r5,0x1 # is it a destination page?
+4 -4
arch/s390/kernel/setup.c
··· 305 305 /* 306 306 * Setup lowcore for boot cpu 307 307 */ 308 - BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); 308 + BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); 309 309 lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc)); 310 310 lc->restart_psw.mask = PSW_KERNEL_BITS; 311 311 lc->restart_psw.addr = (unsigned long) restart_int_handler; ··· 469 469 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 470 470 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 471 471 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 472 - if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42)) 473 - vmax = 1UL << 42; /* 3-level kernel page table */ 472 + if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) 473 + vmax = _REGION2_SIZE; /* 3-level kernel page table */ 474 474 else 475 - vmax = 1UL << 53; /* 4-level kernel page table */ 475 + vmax = _REGION1_SIZE; /* 4-level kernel page table */ 476 476 /* module area is at the end of the kernel address space. */ 477 477 MODULES_END = vmax; 478 478 MODULES_VADDR = MODULES_END - MODULES_LEN;
+3 -1
arch/s390/kernel/vdso32/vdso32.lds.S
··· 2 2 * This is the infamous ld script for the 32 bits vdso 3 3 * library 4 4 */ 5 + 6 + #include <asm/page.h> 5 7 #include <asm/vdso.h> 6 8 7 9 OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") ··· 93 91 .debug_ranges 0 : { *(.debug_ranges) } 94 92 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 95 93 96 - . = ALIGN(4096); 94 + . = ALIGN(PAGE_SIZE); 97 95 PROVIDE(_vdso_data = .); 98 96 99 97 /DISCARD/ : {
+3 -1
arch/s390/kernel/vdso64/vdso64.lds.S
··· 2 2 * This is the infamous ld script for the 64 bits vdso 3 3 * library 4 4 */ 5 + 6 + #include <asm/page.h> 5 7 #include <asm/vdso.h> 6 8 7 9 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") ··· 93 91 .debug_ranges 0 : { *(.debug_ranges) } 94 92 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 95 93 96 - . = ALIGN(4096); 94 + . = ALIGN(PAGE_SIZE); 97 95 PROVIDE(_vdso_data = .); 98 96 99 97 /DISCARD/ : {
+5 -5
arch/s390/mm/fault.c
··· 135 135 pr_alert("AS:%016lx ", asce); 136 136 switch (asce & _ASCE_TYPE_MASK) { 137 137 case _ASCE_TYPE_REGION1: 138 - table = table + ((address >> 53) & 0x7ff); 138 + table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; 139 139 if (bad_address(table)) 140 140 goto bad; 141 141 pr_cont("R1:%016lx ", *table); ··· 144 144 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 145 145 /* fallthrough */ 146 146 case _ASCE_TYPE_REGION2: 147 - table = table + ((address >> 42) & 0x7ff); 147 + table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 148 148 if (bad_address(table)) 149 149 goto bad; 150 150 pr_cont("R2:%016lx ", *table); ··· 153 153 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 154 154 /* fallthrough */ 155 155 case _ASCE_TYPE_REGION3: 156 - table = table + ((address >> 31) & 0x7ff); 156 + table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 157 157 if (bad_address(table)) 158 158 goto bad; 159 159 pr_cont("R3:%016lx ", *table); ··· 162 162 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 163 163 /* fallthrough */ 164 164 case _ASCE_TYPE_SEGMENT: 165 - table = table + ((address >> 20) & 0x7ff); 165 + table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 166 166 if (bad_address(table)) 167 167 goto bad; 168 168 pr_cont("S:%016lx ", *table); ··· 170 170 goto out; 171 171 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 172 172 } 173 - table = table + ((address >> 12) & 0xff); 173 + table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; 174 174 if (bad_address(table)) 175 175 goto bad; 176 176 pr_cont("P:%016lx ", *table);
+62 -62
arch/s390/mm/gmap.c
··· 36 36 unsigned long *table; 37 37 unsigned long etype, atype; 38 38 39 - if (limit < (1UL << 31)) { 40 - limit = (1UL << 31) - 1; 39 + if (limit < _REGION3_SIZE) { 40 + limit = _REGION3_SIZE - 1; 41 41 atype = _ASCE_TYPE_SEGMENT; 42 42 etype = _SEGMENT_ENTRY_EMPTY; 43 - } else if (limit < (1UL << 42)) { 44 - limit = (1UL << 42) - 1; 43 + } else if (limit < _REGION2_SIZE) { 44 + limit = _REGION2_SIZE - 1; 45 45 atype = _ASCE_TYPE_REGION3; 46 46 etype = _REGION3_ENTRY_EMPTY; 47 - } else if (limit < (1UL << 53)) { 48 - limit = (1UL << 53) - 1; 47 + } else if (limit < _REGION1_SIZE) { 48 + limit = _REGION1_SIZE - 1; 49 49 atype = _ASCE_TYPE_REGION2; 50 50 etype = _REGION2_ENTRY_EMPTY; 51 51 } else { ··· 65 65 spin_lock_init(&gmap->guest_table_lock); 66 66 spin_lock_init(&gmap->shadow_lock); 67 67 atomic_set(&gmap->ref_count, 1); 68 - page = alloc_pages(GFP_KERNEL, 2); 68 + page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 69 69 if (!page) 70 70 goto out_free; 71 71 page->index = 0; ··· 186 186 gmap_flush_tlb(gmap); 187 187 /* Free all segment & region tables. */ 188 188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) 189 - __free_pages(page, 2); 189 + __free_pages(page, CRST_ALLOC_ORDER); 190 190 gmap_radix_tree_free(&gmap->guest_to_host); 191 191 gmap_radix_tree_free(&gmap->host_to_guest); 192 192 ··· 306 306 unsigned long *new; 307 307 308 308 /* since we dont free the gmap table until gmap_free we can unlock */ 309 - page = alloc_pages(GFP_KERNEL, 2); 309 + page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 310 310 if (!page) 311 311 return -ENOMEM; 312 312 new = (unsigned long *) page_to_phys(page); ··· 321 321 } 322 322 spin_unlock(&gmap->guest_table_lock); 323 323 if (page) 324 - __free_pages(page, 2); 324 + __free_pages(page, CRST_ALLOC_ORDER); 325 325 return 0; 326 326 } 327 327 ··· 546 546 /* Create higher level tables in the gmap page table */ 547 547 table = gmap->table; 548 548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { 549 - table += (gaddr >> 53) & 0x7ff; 549 + table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; 550 550 if ((*table & _REGION_ENTRY_INVALID) && 551 551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, 552 - gaddr & 0xffe0000000000000UL)) 552 + gaddr & _REGION1_MASK)) 553 553 return -ENOMEM; 554 554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 555 555 } 556 556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { 557 - table += (gaddr >> 42) & 0x7ff; 557 + table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; 558 558 if ((*table & _REGION_ENTRY_INVALID) && 559 559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, 560 - gaddr & 0xfffffc0000000000UL)) 560 + gaddr & _REGION2_MASK)) 561 561 return -ENOMEM; 562 562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 563 563 } 564 564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { 565 - table += (gaddr >> 31) & 0x7ff; 565 + table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; 566 566 if ((*table & _REGION_ENTRY_INVALID) && 567 567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, 568 - gaddr & 0xffffffff80000000UL)) 568 + gaddr & _REGION3_MASK)) 569 569 return -ENOMEM; 570 570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 571 571 } 572 - table += (gaddr >> 20) & 0x7ff; 572 + table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 573 573 /* Walk the parent mm page table */ 574 574 mm = gmap->mm; 575 575 pgd = pgd_offset(mm, vmaddr); ··· 771 771 table = gmap->table; 772 772 switch (gmap->asce & _ASCE_TYPE_MASK) { 773 773 case _ASCE_TYPE_REGION1: 774 - table += (gaddr >> 53) & 0x7ff; 774 + table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; 775 775 if (level == 4) 776 776 break; 777 777 if (*table & _REGION_ENTRY_INVALID) ··· 779 779 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 780 780 /* Fallthrough */ 781 781 case _ASCE_TYPE_REGION2: 782 - table += (gaddr >> 42) & 0x7ff; 782 + table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; 783 783 if (level == 3) 784 784 break; 785 785 if (*table & _REGION_ENTRY_INVALID) ··· 787 787 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 788 788 /* Fallthrough */ 789 789 case _ASCE_TYPE_REGION3: 790 - table += (gaddr >> 31) & 0x7ff; 790 + table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; 791 791 if (level == 2) 792 792 break; 793 793 if (*table & _REGION_ENTRY_INVALID) ··· 795 795 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 796 796 /* Fallthrough */ 797 797 case _ASCE_TYPE_SEGMENT: 798 - table += (gaddr >> 20) & 0x7ff; 798 + table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 799 799 if (level == 1) 800 800 break; 801 801 if (*table & _REGION_ENTRY_INVALID) 802 802 return NULL; 803 803 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 804 - table += (gaddr >> 12) & 0xff; 804 + table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT; 805 805 } 806 806 return table; 807 807 } ··· 1126 1126 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */ 1127 1127 if (!table || *table & _PAGE_INVALID) 1128 1128 return; 1129 - gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1); 1129 + gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1); 1130 1130 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table); 1131 1131 } 1132 1132 ··· 1144 1144 int i; 1145 1145 1146 1146 BUG_ON(!gmap_is_shadow(sg)); 1147 - for (i = 0; i < 256; i++, raddr += 1UL << 12) 1147 + for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE) 1148 1148 pgt[i] = _PAGE_INVALID; 1149 1149 } 1150 1150 ··· 1164 1164 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */ 1165 1165 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN)) 1166 1166 return; 1167 - gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1); 1168 - sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff)); 1167 + gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1); 1168 + sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT)); 1169 1169 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr); 1170 1170 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN); 1171 1171 *ste = _SEGMENT_ENTRY_EMPTY; ··· 1193 1193 1194 1194 BUG_ON(!gmap_is_shadow(sg)); 1195 1195 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT; 1196 - for (i = 0; i < 2048; i++, raddr += 1UL << 20) { 1196 + for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) { 1197 1197 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN)) 1198 1198 continue; 1199 1199 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN); ··· 1222 1222 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */ 1223 1223 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN)) 1224 1224 return; 1225 - gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1); 1226 - r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff)); 1225 + gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1); 1226 + r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT)); 1227 1227 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr); 1228 1228 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN); 1229 1229 *r3e = _REGION3_ENTRY_EMPTY; ··· 1231 1231 /* Free segment table */ 1232 1232 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); 1233 1233 list_del(&page->lru); 1234 - __free_pages(page, 2); 1234 + __free_pages(page, CRST_ALLOC_ORDER); 1235 1235 } 1236 1236 1237 1237 /** ··· 1251 1251 1252 1252 BUG_ON(!gmap_is_shadow(sg)); 1253 1253 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3; 1254 - for (i = 0; i < 2048; i++, raddr += 1UL << 31) { 1254 + for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) { 1255 1255 if (!(r3t[i] & _REGION_ENTRY_ORIGIN)) 1256 1256 continue; 1257 1257 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN); ··· 1260 1260 /* Free segment table */ 1261 1261 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); 1262 1262 list_del(&page->lru); 1263 - __free_pages(page, 2); 1263 + __free_pages(page, CRST_ALLOC_ORDER); 1264 1264 } 1265 1265 } 1266 1266 ··· 1280 1280 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */ 1281 1281 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN)) 1282 1282 return; 1283 - gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1); 1284 - r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff)); 1283 + gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1); 1284 + r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT)); 1285 1285 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr); 1286 1286 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN); 1287 1287 *r2e = _REGION2_ENTRY_EMPTY; ··· 1289 1289 /* Free region 3 table */ 1290 1290 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); 1291 1291 list_del(&page->lru); 1292 - __free_pages(page, 2); 1292 + __free_pages(page, CRST_ALLOC_ORDER); 1293 1293 } 1294 1294 1295 1295 /** ··· 1309 1309 1310 1310 BUG_ON(!gmap_is_shadow(sg)); 1311 1311 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2; 1312 - for (i = 0; i < 2048; i++, raddr += 1UL << 42) { 1312 + for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) { 1313 1313 if (!(r2t[i] & _REGION_ENTRY_ORIGIN)) 1314 1314 continue; 1315 1315 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN); ··· 1318 1318 /* Free region 3 table */ 1319 1319 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); 1320 1320 list_del(&page->lru); 1321 - __free_pages(page, 2); 1321 + __free_pages(page, CRST_ALLOC_ORDER); 1322 1322 } 1323 1323 } 1324 1324 ··· 1338 1338 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */ 1339 1339 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN)) 1340 1340 return; 1341 - gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1); 1342 - r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff)); 1341 + gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1); 1342 + r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT)); 1343 1343 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr); 1344 1344 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN); 1345 1345 *r1e = _REGION1_ENTRY_EMPTY; ··· 1347 1347 /* Free region 2 table */ 1348 1348 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); 1349 1349 list_del(&page->lru); 1350 - __free_pages(page, 2); 1350 + __free_pages(page, CRST_ALLOC_ORDER); 1351 1351 } 1352 1352 1353 1353 /** ··· 1367 1367 1368 1368 BUG_ON(!gmap_is_shadow(sg)); 1369 1369 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1; 1370 - for (i = 0; i < 2048; i++, raddr += 1UL << 53) { 1370 + for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) { 1371 1371 if (!(r1t[i] & _REGION_ENTRY_ORIGIN)) 1372 1372 continue; 1373 1373 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN); ··· 1378 1378 /* Free region 2 table */ 1379 1379 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); 1380 1380 list_del(&page->lru); 1381 - __free_pages(page, 2); 1381 + __free_pages(page, CRST_ALLOC_ORDER); 1382 1382 } 1383 1383 } 1384 1384 ··· 1535 1535 /* protect after insertion, so it will get properly invalidated */ 1536 1536 down_read(&parent->mm->mmap_sem); 1537 1537 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, 1538 - ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096, 1538 + ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE, 1539 1539 PROT_READ, PGSTE_VSIE_BIT); 1540 1540 up_read(&parent->mm->mmap_sem); 1541 1541 spin_lock(&parent->shadow_lock); ··· 1578 1578 1579 1579 BUG_ON(!gmap_is_shadow(sg)); 1580 1580 /* Allocate a shadow region second table */ 1581 - page = alloc_pages(GFP_KERNEL, 2); 1581 + page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1582 1582 if (!page) 1583 1583 return -ENOMEM; 1584 1584 page->index = r2t & _REGION_ENTRY_ORIGIN; ··· 1614 1614 } 1615 1615 spin_unlock(&sg->guest_table_lock); 1616 1616 /* Make r2t read-only in parent gmap page table */ 1617 - raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1; 1617 + raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1; 1618 1618 origin = r2t & _REGION_ENTRY_ORIGIN; 1619 - offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1620 - len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1619 + offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 1620 + len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 1621 1621 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1622 1622 spin_lock(&sg->guest_table_lock); 1623 1623 if (!rc) { ··· 1634 1634 return rc; 1635 1635 out_free: 1636 1636 spin_unlock(&sg->guest_table_lock); 1637 - __free_pages(page, 2); 1637 + __free_pages(page, CRST_ALLOC_ORDER); 1638 1638 return rc; 1639 1639 } 1640 1640 EXPORT_SYMBOL_GPL(gmap_shadow_r2t); ··· 1662 1662 1663 1663 BUG_ON(!gmap_is_shadow(sg)); 1664 1664 /* Allocate a shadow region second table */ 1665 - page = alloc_pages(GFP_KERNEL, 2); 1665 + page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1666 1666 if (!page) 1667 1667 return -ENOMEM; 1668 1668 page->index = r3t & _REGION_ENTRY_ORIGIN; ··· 1697 1697 } 1698 1698 spin_unlock(&sg->guest_table_lock); 1699 1699 /* Make r3t read-only in parent gmap page table */ 1700 - raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2; 1700 + raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2; 1701 1701 origin = r3t & _REGION_ENTRY_ORIGIN; 1702 - offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1703 - len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1702 + offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 1703 + len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 1704 1704 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1705 1705 spin_lock(&sg->guest_table_lock); 1706 1706 if (!rc) { ··· 1717 1717 return rc; 1718 1718 out_free: 1719 1719 spin_unlock(&sg->guest_table_lock); 1720 - __free_pages(page, 2); 1720 + __free_pages(page, CRST_ALLOC_ORDER); 1721 1721 return rc; 1722 1722 } 1723 1723 EXPORT_SYMBOL_GPL(gmap_shadow_r3t); ··· 1745 1745 1746 1746 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); 1747 1747 /* Allocate a shadow segment table */ 1748 - page = alloc_pages(GFP_KERNEL, 2); 1748 + page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 1749 1749 if (!page) 1750 1750 return -ENOMEM; 1751 1751 page->index = sgt & _REGION_ENTRY_ORIGIN; ··· 1781 1781 } 1782 1782 spin_unlock(&sg->guest_table_lock); 1783 1783 /* Make sgt read-only in parent gmap page table */ 1784 - raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3; 1784 + raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3; 1785 1785 origin = sgt & _REGION_ENTRY_ORIGIN; 1786 - offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1787 - len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1786 + offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 1787 + len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 1788 1788 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1789 1789 spin_lock(&sg->guest_table_lock); 1790 1790 if (!rc) { ··· 1801 1801 return rc; 1802 1802 out_free: 1803 1803 spin_unlock(&sg->guest_table_lock); 1804 - __free_pages(page, 2); 1804 + __free_pages(page, CRST_ALLOC_ORDER); 1805 1805 return rc; 1806 1806 } 1807 1807 EXPORT_SYMBOL_GPL(gmap_shadow_sgt); ··· 1902 1902 } 1903 1903 spin_unlock(&sg->guest_table_lock); 1904 1904 /* Make pgt read-only in parent gmap page table (not the pgste) */ 1905 - raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT; 1905 + raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT; 1906 1906 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK; 1907 1907 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ); 1908 1908 spin_lock(&sg->guest_table_lock); ··· 2021 2021 } 2022 2022 /* Check for top level table */ 2023 2023 start = sg->orig_asce & _ASCE_ORIGIN; 2024 - end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096; 2024 + end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE; 2025 2025 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && 2026 2026 gaddr < end) { 2027 2027 /* The complete shadow table has to go */ ··· 2032 2032 return; 2033 2033 } 2034 2034 /* Remove the page table tree from on specific entry */ 2035 - head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12); 2035 + head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); 2036 2036 gmap_for_each_rmap_safe(rmap, rnext, head) { 2037 2037 bits = rmap->raddr & _SHADOW_RMAP_MASK; 2038 2038 raddr = rmap->raddr ^ bits; ··· 2076 2076 struct gmap *gmap, *sg, *next; 2077 2077 2078 2078 offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 2079 - offset = offset * (4096 / sizeof(pte_t)); 2079 + offset = offset * (PAGE_SIZE / sizeof(pte_t)); 2080 2080 rcu_read_lock(); 2081 2081 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2082 2082 spin_lock(&gmap->guest_table_lock);
+2 -3
arch/s390/mm/init.c
··· 84 84 psw_t psw; 85 85 86 86 init_mm.pgd = swapper_pg_dir; 87 - if (VMALLOC_END > (1UL << 42)) { 87 + if (VMALLOC_END > _REGION2_SIZE) { 88 88 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 89 89 pgd_type = _REGION2_ENTRY_EMPTY; 90 90 } else { ··· 93 93 } 94 94 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 95 95 S390_lowcore.kernel_asce = init_mm.context.asce; 96 - clear_table((unsigned long *) init_mm.pgd, pgd_type, 97 - sizeof(unsigned long)*2048); 96 + crst_table_init((unsigned long *) init_mm.pgd, pgd_type); 98 97 vmem_map_init(); 99 98 100 99 /* enable virtual mapping in kernel mode */
+5 -5
arch/s390/mm/pgalloc.c
··· 83 83 int rc, notify; 84 84 85 85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 86 - BUG_ON(mm->context.asce_limit < (1UL << 42)); 86 + BUG_ON(mm->context.asce_limit < _REGION2_SIZE); 87 87 if (end >= TASK_SIZE_MAX) 88 88 return -ENOMEM; 89 89 rc = 0; ··· 96 96 } 97 97 spin_lock_bh(&mm->page_table_lock); 98 98 pgd = (unsigned long *) mm->pgd; 99 - if (mm->context.asce_limit == (1UL << 42)) { 99 + if (mm->context.asce_limit == _REGION2_SIZE) { 100 100 crst_table_init(table, _REGION2_ENTRY_EMPTY); 101 101 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 102 102 mm->pgd = (pgd_t *) table; 103 - mm->context.asce_limit = 1UL << 53; 103 + mm->context.asce_limit = _REGION1_SIZE; 104 104 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 105 105 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 106 106 } else { ··· 124 124 pgd_t *pgd; 125 125 126 126 /* downgrade should only happen from 3 to 2 levels (compat only) */ 127 - BUG_ON(mm->context.asce_limit != (1UL << 42)); 127 + BUG_ON(mm->context.asce_limit != _REGION2_SIZE); 128 128 129 129 if (current->active_mm == mm) { 130 130 clear_user_asce(); ··· 133 133 134 134 pgd = mm->pgd; 135 135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 136 - mm->context.asce_limit = 1UL << 31; 136 + mm->context.asce_limit = _REGION3_SIZE; 137 137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 138 138 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 139 139 crst_table_free(mm, (unsigned long *) pgd);