Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: mm: Recreate kernel mappings in early_paging_init()

This patch adds a step in the init sequence, in order to recreate
the kernel code/data page table mappings prior to full paging
initialization. This is necessary on LPAE systems that run out of
a physical address space outside the 4G limit. On these systems,
this implementation provides a machine descriptor hook that allows
the PHYS_OFFSET to be overridden in a machine specific fashion.

Cc: Russell King <linux@arm.linux.org.uk>

Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: R Sricharan <r.sricharan@ti.com>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

+87
+1
arch/arm/include/asm/mach/arch.h
··· 49 49 bool (*smp_init)(void); 50 50 void (*fixup)(struct tag *, char **, 51 51 struct meminfo *); 52 + void (*init_meminfo)(void); 52 53 void (*reserve)(void);/* reserve mem blocks */ 53 54 void (*map_io)(void);/* IO mapping function */ 54 55 void (*init_early)(void);
+4
arch/arm/kernel/setup.c
··· 73 73 #endif 74 74 75 75 extern void paging_init(const struct machine_desc *desc); 76 + extern void early_paging_init(const struct machine_desc *, 77 + struct proc_info_list *); 76 78 extern void sanity_check_meminfo(void); 77 79 extern enum reboot_mode reboot_mode; 78 80 extern void setup_dma_zone(const struct machine_desc *desc); ··· 880 878 parse_early_param(); 881 879 882 880 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 881 + 882 + early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 883 883 sanity_check_meminfo(); 884 884 arm_memblock_init(&meminfo, mdesc); 885 885
+82
arch/arm/mm/mmu.c
··· 28 28 #include <asm/highmem.h> 29 29 #include <asm/system_info.h> 30 30 #include <asm/traps.h> 31 + #include <asm/procinfo.h> 32 + #include <asm/memory.h> 31 33 32 34 #include <asm/mach/arch.h> 33 35 #include <asm/mach/map.h> ··· 1316 1314 create_mapping(&map); 1317 1315 } 1318 1316 } 1317 + 1318 + #ifdef CONFIG_ARM_LPAE 1319 + /* 1320 + * early_paging_init() recreates boot time page table setup, allowing machines 1321 + * to switch over to a high (>4G) address space on LPAE systems 1322 + */ 1323 + void __init early_paging_init(const struct machine_desc *mdesc, 1324 + struct proc_info_list *procinfo) 1325 + { 1326 + pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags; 1327 + unsigned long map_start, map_end; 1328 + pgd_t *pgd0, *pgdk; 1329 + pud_t *pud0, *pudk, *pud_start; 1330 + pmd_t *pmd0, *pmdk; 1331 + phys_addr_t phys; 1332 + int i; 1333 + 1334 + if (!(mdesc->init_meminfo)) 1335 + return; 1336 + 1337 + /* remap kernel code and data */ 1338 + map_start = init_mm.start_code; 1339 + map_end = init_mm.brk; 1340 + 1341 + /* get a handle on things... */ 1342 + pgd0 = pgd_offset_k(0); 1343 + pud_start = pud0 = pud_offset(pgd0, 0); 1344 + pmd0 = pmd_offset(pud0, 0); 1345 + 1346 + pgdk = pgd_offset_k(map_start); 1347 + pudk = pud_offset(pgdk, map_start); 1348 + pmdk = pmd_offset(pudk, map_start); 1349 + 1350 + mdesc->init_meminfo(); 1351 + 1352 + /* Run the patch stub to update the constants */ 1353 + fixup_pv_table(&__pv_table_begin, 1354 + (&__pv_table_end - &__pv_table_begin) << 2); 1355 + 1356 + /* 1357 + * Cache cleaning operations for self-modifying code 1358 + * We should clean the entries by MVA but running a 1359 + * for loop over every pv_table entry pointer would 1360 + * just complicate the code. 1361 + */ 1362 + flush_cache_louis(); 1363 + dsb(); 1364 + isb(); 1365 + 1366 + /* remap level 1 table */ 1367 + for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { 1368 + set_pud(pud0, 1369 + __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 1370 + pmd0 += PTRS_PER_PMD; 1371 + } 1372 + 1373 + /* remap pmds for kernel mapping */ 1374 + phys = __pa(map_start) & PMD_MASK; 1375 + do { 1376 + *pmdk++ = __pmd(phys | pmdprot); 1377 + phys += PMD_SIZE; 1378 + } while (phys < map_end); 1379 + 1380 + flush_cache_all(); 1381 + cpu_switch_mm(pgd0, &init_mm); 1382 + cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); 1383 + local_flush_bp_all(); 1384 + local_flush_tlb_all(); 1385 + } 1386 + 1387 + #else 1388 + 1389 + void __init early_paging_init(const struct machine_desc *mdesc, 1390 + struct proc_info_list *procinfo) 1391 + { 1392 + if (mdesc->init_meminfo) 1393 + mdesc->init_meminfo(); 1394 + } 1395 + 1396 + #endif 1319 1397 1320 1398 /* 1321 1399 * paging_init() sets up the page tables, initialises the zone memory