Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: setup kernel memory layout early

Currently there are two separate places where kernel memory layout has
to be known and adjusted:
1. early kasan setup.
2. paging setup later.

Those 2 places had to be kept in sync and adjusted to reflect peculiar
technical details of one another. With additional factors which influence
kernel memory layout like ultravisor secure storage limit, complexity
of keeping two things in sync grew up even more.

Besides that if we look forward towards creating identity mapping and
enabling DAT before jumping into uncompressed kernel - that would also
require full knowledge of and control over kernel memory layout.

So, de-duplicate and move kernel memory layout setup logic into
the decompressor.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

+136 -96
+1
arch/s390/boot/boot.h
··· 24 24 25 25 extern const char kernel_version[]; 26 26 extern unsigned long memory_limit; 27 + extern unsigned long vmalloc_size; 27 28 extern int vmalloc_size_set; 28 29 extern int kaslr_enabled; 29 30
+5 -5
arch/s390/boot/ipl_parm.c
··· 12 12 #include "boot.h" 13 13 14 14 char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; 15 - struct ipl_parameter_block __bootdata_preserved(ipl_block); 16 - int __bootdata_preserved(ipl_block_valid); 17 - unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; 18 - 19 - unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE; 20 15 int __bootdata(noexec_disabled); 21 16 17 + unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; 18 + struct ipl_parameter_block __bootdata_preserved(ipl_block); 19 + int __bootdata_preserved(ipl_block_valid); 20 + 21 + unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE; 22 22 unsigned long memory_limit; 23 23 int vmalloc_size_set; 24 24 int kaslr_enabled;
+88
arch/s390/boot/startup.c
··· 5 5 #include <asm/sections.h> 6 6 #include <asm/cpu_mf.h> 7 7 #include <asm/setup.h> 8 + #include <asm/kasan.h> 8 9 #include <asm/kexec.h> 9 10 #include <asm/sclp.h> 10 11 #include <asm/diag.h> ··· 16 15 extern char __boot_data_start[], __boot_data_end[]; 17 16 extern char __boot_data_preserved_start[], __boot_data_preserved_end[]; 18 17 unsigned long __bootdata_preserved(__kaslr_offset); 18 + unsigned long __bootdata_preserved(VMALLOC_START); 19 + unsigned long __bootdata_preserved(VMALLOC_END); 20 + struct page *__bootdata_preserved(vmemmap); 21 + unsigned long __bootdata_preserved(vmemmap_size); 22 + unsigned long __bootdata_preserved(MODULES_VADDR); 23 + unsigned long __bootdata_preserved(MODULES_END); 19 24 unsigned long __bootdata(ident_map_size); 20 25 21 26 u64 __bootdata_preserved(stfle_fac_list[16]); ··· 179 172 #endif 180 173 } 181 174 175 + static void setup_kernel_memory_layout(void) 176 + { 177 + bool vmalloc_size_verified = false; 178 + unsigned long vmemmap_off; 179 + unsigned long vspace_left; 180 + unsigned long rte_size; 181 + unsigned long pages; 182 + unsigned long vmax; 183 + 184 + pages = ident_map_size / PAGE_SIZE; 185 + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 186 + vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 187 + 188 + /* choose kernel address space layout: 4 or 3 levels. */ 189 + vmemmap_off = round_up(ident_map_size, _REGION3_SIZE); 190 + if (IS_ENABLED(CONFIG_KASAN) || 191 + vmalloc_size > _REGION2_SIZE || 192 + vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE) 193 + vmax = _REGION1_SIZE; 194 + else 195 + vmax = _REGION2_SIZE; 196 + 197 + /* keep vmemmap_off aligned to a top level region table entry */ 198 + rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE; 199 + MODULES_END = vmax; 200 + if (is_prot_virt_host()) { 201 + /* 202 + * forcing modules and vmalloc area under the ultravisor 203 + * secure storage limit, so that any vmalloc allocation 204 + * we do could be used to back secure guest storage. 205 + */ 206 + adjust_to_uv_max(&MODULES_END); 207 + } 208 + 209 + #ifdef CONFIG_KASAN 210 + if (MODULES_END < vmax) { 211 + /* force vmalloc and modules below kasan shadow */ 212 + MODULES_END = min(MODULES_END, KASAN_SHADOW_START); 213 + } else { 214 + /* 215 + * leave vmalloc and modules above kasan shadow but make 216 + * sure they don't overlap with it 217 + */ 218 + vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN); 219 + vmalloc_size_verified = true; 220 + vspace_left = KASAN_SHADOW_START; 221 + } 222 + #endif 223 + MODULES_VADDR = MODULES_END - MODULES_LEN; 224 + VMALLOC_END = MODULES_VADDR; 225 + 226 + if (vmalloc_size_verified) { 227 + VMALLOC_START = VMALLOC_END - vmalloc_size; 228 + } else { 229 + vmemmap_off = round_up(ident_map_size, rte_size); 230 + 231 + if (vmemmap_off + vmemmap_size > VMALLOC_END || 232 + vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) { 233 + /* 234 + * allow vmalloc area to occupy up to 1/2 of 235 + * the rest virtual space left. 236 + */ 237 + vmalloc_size = min(vmalloc_size, VMALLOC_END / 2); 238 + } 239 + VMALLOC_START = VMALLOC_END - vmalloc_size; 240 + vspace_left = VMALLOC_START; 241 + } 242 + 243 + pages = vspace_left / (PAGE_SIZE + sizeof(struct page)); 244 + pages = SECTION_ALIGN_UP(pages); 245 + vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size); 246 + /* keep vmemmap left most starting from a fresh region table entry */ 247 + vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size)); 248 + /* take care that identity map is lower then vmemmap */ 249 + ident_map_size = min(ident_map_size, vmemmap_off); 250 + vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 251 + VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START); 252 + vmemmap = (struct page *)vmemmap_off; 253 + } 254 + 182 255 /* 183 256 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 184 257 */ ··· 298 211 parse_boot_command_line(); 299 212 setup_ident_map_size(detect_memory()); 300 213 setup_vmalloc_size(); 214 + setup_kernel_memory_layout(); 301 215 302 216 random_lma = __kaslr_offset = 0; 303 217 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
+25
arch/s390/boot/uv.c
··· 44 44 prot_virt_guest = 1; 45 45 #endif 46 46 } 47 + 48 + #if IS_ENABLED(CONFIG_KVM) 49 + static bool has_uv_sec_stor_limit(void) 50 + { 51 + /* 52 + * keep these conditions in line with setup_uv() 53 + */ 54 + if (!is_prot_virt_host()) 55 + return false; 56 + 57 + if (is_prot_virt_guest()) 58 + return false; 59 + 60 + if (!test_facility(158)) 61 + return false; 62 + 63 + return !!uv_info.max_sec_stor_addr; 64 + } 65 + 66 + void adjust_to_uv_max(unsigned long *vmax) 67 + { 68 + if (has_uv_sec_stor_limit()) 69 + *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); 70 + } 71 + #endif
-1
arch/s390/include/asm/kasan.h
··· 16 16 extern void kasan_early_init(void); 17 17 extern void kasan_copy_shadow_mapping(void); 18 18 extern void kasan_free_early_identity(void); 19 - extern unsigned long kasan_vmax; 20 19 21 20 /* 22 21 * Estimate kasan memory requirements, which it will reserve
+7 -6
arch/s390/include/asm/pgtable.h
··· 17 17 #include <linux/page-flags.h> 18 18 #include <linux/radix-tree.h> 19 19 #include <linux/atomic.h> 20 + #include <asm/sections.h> 20 21 #include <asm/bug.h> 21 22 #include <asm/page.h> 22 23 #include <asm/uv.h> ··· 87 86 * happen without trampolines and in addition the placement within a 88 87 * 2GB frame is branch prediction unit friendly. 89 88 */ 90 - extern unsigned long VMALLOC_START; 91 - extern unsigned long VMALLOC_END; 89 + extern unsigned long __bootdata_preserved(VMALLOC_START); 90 + extern unsigned long __bootdata_preserved(VMALLOC_END); 92 91 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) 93 - extern struct page *vmemmap; 94 - extern unsigned long vmemmap_size; 92 + extern struct page *__bootdata_preserved(vmemmap); 93 + extern unsigned long __bootdata_preserved(vmemmap_size); 95 94 96 95 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 97 96 98 - extern unsigned long MODULES_VADDR; 99 - extern unsigned long MODULES_END; 97 + extern unsigned long __bootdata_preserved(MODULES_VADDR); 98 + extern unsigned long __bootdata_preserved(MODULES_END); 100 99 #define MODULES_VADDR MODULES_VADDR 101 100 #define MODULES_END MODULES_END 102 101 #define MODULES_LEN (1UL << 31)
-1
arch/s390/include/asm/setup.h
··· 89 89 90 90 extern int noexec_disabled; 91 91 extern unsigned long ident_map_size; 92 - extern unsigned long vmalloc_size; 93 92 94 93 /* The Write Back bit position in the physaddr is given by the SLPC PCI */ 95 94 extern unsigned long mio_wb_bit_mask;
+3 -47
arch/s390/kernel/setup.c
··· 96 96 97 97 int __bootdata(noexec_disabled); 98 98 unsigned long __bootdata(ident_map_size); 99 - unsigned long __bootdata(vmalloc_size); 100 99 struct mem_detect_info __bootdata(mem_detect); 101 100 102 101 struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); ··· 544 545 #endif 545 546 } 546 547 547 - static void __init setup_ident_map_size(void) 548 + static void __init setup_memory_end(void) 548 549 { 549 - unsigned long vmax, tmp; 550 - 551 - /* Choose kernel address space layout: 3 or 4 levels. */ 552 - tmp = ident_map_size / PAGE_SIZE; 553 - tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 554 - if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) 555 - vmax = _REGION2_SIZE; /* 3-level kernel page table */ 556 - else 557 - vmax = _REGION1_SIZE; /* 4-level kernel page table */ 558 - /* module area is at the end of the kernel address space. */ 559 - MODULES_END = vmax; 560 - if (is_prot_virt_host()) 561 - adjust_to_uv_max(&MODULES_END); 562 - #ifdef CONFIG_KASAN 563 - vmax = _REGION1_SIZE; 564 - MODULES_END = kasan_vmax; 565 - #endif 566 - MODULES_VADDR = MODULES_END - MODULES_LEN; 567 - VMALLOC_END = MODULES_VADDR; 568 - VMALLOC_START = VMALLOC_END - vmalloc_size; 569 - 570 - /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 571 - tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 572 - /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 573 - tmp = SECTION_ALIGN_UP(tmp); 574 - tmp = VMALLOC_START - tmp * sizeof(struct page); 575 - tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 576 - tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 577 - vmemmap = (struct page *) tmp; 578 - 579 - /* Take care that ident_map_size <= vmemmap */ 580 - ident_map_size = min(ident_map_size, (unsigned long)vmemmap); 581 - #ifdef CONFIG_KASAN 582 - ident_map_size = min(ident_map_size, KASAN_SHADOW_START); 583 - #endif 584 - vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 585 - #ifdef CONFIG_KASAN 586 - /* move vmemmap above kasan shadow only if stands in a way */ 587 - if (KASAN_SHADOW_END > (unsigned long)vmemmap && 588 - (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START) 589 - vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); 590 - #endif 591 - max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); 592 550 memblock_remove(ident_map_size, ULONG_MAX); 593 - 551 + max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); 594 552 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); 595 553 } 596 554 ··· 1088 1132 remove_oldmem(); 1089 1133 1090 1134 setup_uv(); 1091 - setup_ident_map_size(); 1135 + setup_memory_end(); 1092 1136 setup_memory(); 1093 1137 dma_contiguous_reserve(ident_map_size); 1094 1138 vmcp_cma_reserve();
+1 -7
arch/s390/kernel/uv.c
··· 52 52 unsigned long uv_stor_base; 53 53 54 54 /* 55 - * keep these conditions in line with kasan init code has_uv_sec_stor_limit() 55 + * keep these conditions in line with has_uv_sec_stor_limit() 56 56 */ 57 57 if (!is_prot_virt_host()) 58 58 return; ··· 89 89 fail: 90 90 pr_info("Disabling support for protected virtualization"); 91 91 prot_virt_host = 0; 92 - } 93 - 94 - void adjust_to_uv_max(unsigned long *vmax) 95 - { 96 - if (uv_info.max_sec_stor_addr) 97 - *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); 98 92 } 99 93 100 94 /*
+6 -29
arch/s390/mm/kasan_init.c
··· 13 13 #include <asm/setup.h> 14 14 #include <asm/uv.h> 15 15 16 - unsigned long kasan_vmax; 17 16 static unsigned long segment_pos __initdata; 18 17 static unsigned long segment_low __initdata; 19 18 static unsigned long pgalloc_pos __initdata; ··· 250 251 } 251 252 } 252 253 253 - static bool __init has_uv_sec_stor_limit(void) 254 - { 255 - /* 256 - * keep these conditions in line with setup_uv() 257 - */ 258 - if (!is_prot_virt_host()) 259 - return false; 260 - 261 - if (is_prot_virt_guest()) 262 - return false; 263 - 264 - if (!test_facility(158)) 265 - return false; 266 - 267 - return !!uv_info.max_sec_stor_addr; 268 - } 269 - 270 254 void __init kasan_early_init(void) 271 255 { 272 - unsigned long untracked_mem_end; 273 256 unsigned long shadow_alloc_size; 274 - unsigned long vmax_unlimited; 275 257 unsigned long initrd_end; 276 258 unsigned long memsize; 277 259 unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); ··· 286 306 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); 287 307 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); 288 308 crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY); 289 - untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE; 290 - if (has_uv_sec_stor_limit()) 291 - kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr); 292 309 293 310 /* init kasan zero shadow */ 294 311 crst_table_init((unsigned long *)kasan_early_shadow_p4d, ··· 352 375 */ 353 376 /* populate kasan shadow (for identity mapping and zero page mapping) */ 354 377 kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP); 355 - if (IS_ENABLED(CONFIG_MODULES)) 356 - untracked_mem_end = kasan_vmax - MODULES_LEN; 357 378 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { 358 - untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN; 359 379 /* shallowly populate kasan shadow for vmalloc and modules */ 360 - kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax), 380 + kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), 361 381 POPULATE_SHALLOW); 362 382 } 363 383 /* populate kasan shadow for untracked memory */ 364 - kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end), 384 + kasan_early_pgtable_populate(__sha(ident_map_size), 385 + IS_ENABLED(CONFIG_KASAN_VMALLOC) ? 386 + __sha(VMALLOC_START) : 387 + __sha(MODULES_VADDR), 365 388 POPULATE_ZERO_SHADOW); 366 - kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited), 389 + kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), 367 390 POPULATE_ZERO_SHADOW); 368 391 /* memory allocated for identity mapping structs will be freed later */ 369 392 pgalloc_freeable = pgalloc_pos;