Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/boot: allow setup of different virtual address types

Currently the decompressor sets up only identity mapping.
Allow adding more address range types as a prerequisite
for allocation of kernel fixed mappings.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

authored by

Alexander Gordeev and committed by
Heiko Carstens
e0e0a87b 07493a9c

+33 -15
+33 -15
arch/s390/boot/vmem.c
··· 17 17 unsigned long __bootdata(pgalloc_end); 18 18 unsigned long __bootdata(pgalloc_low); 19 19 20 + enum populate_mode { 21 + POPULATE_ONE2ONE, 22 + }; 23 + 20 24 static void boot_check_oom(void) 21 25 { 22 26 if (pgalloc_pos < pgalloc_low) ··· 85 81 return pte; 86 82 } 87 83 84 + static unsigned long _pa(unsigned long addr, enum populate_mode mode) 85 + { 86 + switch (mode) { 87 + case POPULATE_ONE2ONE: 88 + return addr; 89 + default: 90 + return -1; 91 + } 92 + } 93 + 88 94 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end) 89 95 { 90 96 return machine.has_edat2 && ··· 107 93 IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE; 108 94 } 109 95 110 - static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end) 96 + static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, 97 + enum populate_mode mode) 111 98 { 112 99 unsigned long next; 113 100 pte_t *pte, entry; ··· 116 101 pte = pte_offset_kernel(pmd, addr); 117 102 for (; addr < end; addr += PAGE_SIZE, pte++) { 118 103 if (pte_none(*pte)) { 119 - entry = __pte(__pa(addr)); 104 + entry = __pte(_pa(addr, mode)); 120 105 entry = set_pte_bit(entry, PAGE_KERNEL_EXEC); 121 106 set_pte(pte, entry); 122 107 } 123 108 } 124 109 } 125 110 126 - static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end) 111 + static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end, 112 + enum populate_mode mode) 127 113 { 128 114 unsigned long next; 129 115 pmd_t *pmd, entry; ··· 135 119 next = pmd_addr_end(addr, end); 136 120 if (pmd_none(*pmd)) { 137 121 if (can_large_pmd(pmd, addr, next)) { 138 - entry = __pmd(__pa(addr)); 122 + entry = __pmd(_pa(addr, mode)); 139 123 entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC); 140 124 set_pmd(pmd, entry); 141 125 continue; ··· 145 129 } else if (pmd_large(*pmd)) { 146 130 continue; 147 131 } 148 - pgtable_pte_populate(pmd, addr, next); 132 + pgtable_pte_populate(pmd, addr, next, mode); 149 133 } 150 134 } 151 135 152 - static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end) 136 + static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end, 137 + enum populate_mode mode) 153 138 { 154 139 unsigned long next; 155 140 pud_t *pud, entry; ··· 161 144 next = pud_addr_end(addr, end); 162 145 if (pud_none(*pud)) { 163 146 if (can_large_pud(pud, addr, next)) { 164 - entry = __pud(__pa(addr)); 147 + entry = __pud(_pa(addr, mode)); 165 148 entry = set_pud_bit(entry, REGION3_KERNEL_EXEC); 166 149 set_pud(pud, entry); 167 150 continue; ··· 171 154 } else if (pud_large(*pud)) { 172 155 continue; 173 156 } 174 - pgtable_pmd_populate(pud, addr, next); 157 + pgtable_pmd_populate(pud, addr, next, mode); 175 158 } 176 159 } 177 160 178 - static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) 161 + static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end, 162 + enum populate_mode mode) 179 163 { 180 164 unsigned long next; 181 165 p4d_t *p4d; ··· 189 171 pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY); 190 172 p4d_populate(&init_mm, p4d, pud); 191 173 } 192 - pgtable_pud_populate(p4d, addr, next); 174 + pgtable_pud_populate(p4d, addr, next, mode); 193 175 } 194 176 } 195 177 196 - static void pgtable_populate(unsigned long addr, unsigned long end) 178 + static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode) 197 179 { 198 180 unsigned long next; 199 181 pgd_t *pgd; ··· 206 188 p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY); 207 189 pgd_populate(&init_mm, pgd, p4d); 208 190 } 209 - pgtable_p4d_populate(pgd, addr, next); 191 + pgtable_p4d_populate(pgd, addr, next, mode); 210 192 } 211 193 } 212 194 ··· 226 208 227 209 do { 228 210 pgalloc_pos_prev = pgalloc_pos; 229 - pgtable_populate(pgalloc_pos, pgalloc_end_curr); 211 + pgtable_populate(pgalloc_pos, pgalloc_end_curr, POPULATE_ONE2ONE); 230 212 pgalloc_end_curr = pgalloc_pos_prev; 231 213 } while (pgalloc_pos < pgalloc_pos_prev); 232 214 } ··· 257 239 * of pgalloc_pos finalized with a call to pgtable_populate_end(). 258 240 */ 259 241 pgtable_populate_begin(online_end); 260 - pgtable_populate(0, sizeof(struct lowcore)); 261 - pgtable_populate(0, online_end); 242 + pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE); 243 + pgtable_populate(0, online_end, POPULATE_ONE2ONE); 262 244 pgtable_populate_end(); 263 245 264 246 S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;