Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n

This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.

* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)

New XTLB refill handler looks like this:

80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Atsushi Nemoto and committed by
Ralf Baechle
656be92f 56ae5833

+94 -3
+1 -3
arch/mips/Makefile
··· 63 63 ifdef CONFIG_BUILD_ELF64 64 64 cflags-y += $(call cc-option,-mno-explicit-relocs) 65 65 else 66 - # -msym32 can not be used for modules since they are loaded into XKSEG 67 - CFLAGS_MODULE += $(call cc-option,-mno-explicit-relocs) 68 - CFLAGS_KERNEL += $(call cc-option,-msym32) 66 + cflags-y += $(call cc-option,-msym32) 69 67 endif 70 68 endif 71 69
+3
arch/mips/kernel/head.S
··· 250 250 */ 251 251 page swapper_pg_dir, _PGD_ORDER 252 252 #ifdef CONFIG_64BIT 253 + #if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) 254 + page module_pg_dir, _PGD_ORDER 255 + #endif 253 256 page invalid_pmd_table, _PMD_ORDER 254 257 #endif 255 258 page invalid_pte_table, _PTE_ORDER
+15
arch/mips/kernel/module.c
··· 29 29 #include <linux/kernel.h> 30 30 #include <linux/module.h> 31 31 #include <linux/spinlock.h> 32 + #include <asm/pgtable.h> /* MODULE_START */ 32 33 33 34 struct mips_hi16 { 34 35 struct mips_hi16 *next; ··· 44 43 45 44 void *module_alloc(unsigned long size) 46 45 { 46 + #ifdef MODULE_START 47 + struct vm_struct *area; 48 + 49 + size = PAGE_ALIGN(size); 50 + if (!size) 51 + return NULL; 52 + 53 + area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); 54 + if (!area) 55 + return NULL; 56 + 57 + return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); 58 + #else 47 59 if (size == 0) 48 60 return NULL; 49 61 return vmalloc(size); 62 + #endif 50 63 } 51 64 52 65 /* Free memory returned from module_alloc */
+4
arch/mips/mm/fault.c
··· 60 60 */ 61 61 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) 62 62 goto vmalloc_fault; 63 + #ifdef MODULE_START 64 + if (unlikely(address >= MODULE_START && address < MODULE_END)) 65 + goto vmalloc_fault; 66 + #endif 63 67 64 68 /* 65 69 * If we're in an interrupt or have no user
+3
arch/mips/mm/pgtable-64.c
··· 58 58 59 59 /* Initialize the entire pgd. */ 60 60 pgd_init((unsigned long)swapper_pg_dir); 61 + #ifdef MODULE_START 62 + pgd_init((unsigned long)module_pg_dir); 63 + #endif 61 64 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); 62 65 63 66 pgd_base = swapper_pg_dir;
+55
arch/mips/mm/tlbex.c
··· 423 423 label_invalid, 424 424 label_second_part, 425 425 label_leave, 426 + #ifdef MODULE_START 427 + label_module_alloc, 428 + #endif 426 429 label_vmalloc, 427 430 label_vmalloc_done, 428 431 label_tlbw_hazard, ··· 458 455 459 456 L_LA(_second_part) 460 457 L_LA(_leave) 458 + #ifdef MODULE_START 459 + L_LA(_module_alloc) 460 + #endif 461 461 L_LA(_vmalloc) 462 462 L_LA(_vmalloc_done) 463 463 L_LA(_tlbw_hazard) ··· 690 684 { 691 685 r_mips_pc16(r, *p, l); 692 686 i_bgezl(p, reg, 0); 687 + } 688 + 689 + static void __init __attribute__((unused)) 690 + il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) 691 + { 692 + r_mips_pc16(r, *p, l); 693 + i_bgez(p, reg, 0); 693 694 } 694 695 695 696 /* The only general purpose registers allowed in TLB handlers. */ ··· 983 970 * The vmalloc handling is not in the hotpath. 984 971 */ 985 972 i_dmfc0(p, tmp, C0_BADVADDR); 973 + #ifdef MODULE_START 974 + il_bltz(p, r, tmp, label_module_alloc); 975 + #else 986 976 il_bltz(p, r, tmp, label_vmalloc); 977 + #endif 987 978 /* No i_nop needed here, since the next insn doesn't touch TMP. */ 988 979 989 980 #ifdef CONFIG_SMP ··· 1040 1023 { 1041 1024 long swpd = (long)swapper_pg_dir; 1042 1025 1026 + #ifdef MODULE_START 1027 + long modd = (long)module_pg_dir; 1028 + 1029 + l_module_alloc(l, *p); 1030 + /* 1031 + * Assumption: 1032 + * VMALLOC_START >= 0xc000000000000000UL 1033 + * MODULE_START >= 0xe000000000000000UL 1034 + */ 1035 + i_SLL(p, ptr, bvaddr, 2); 1036 + il_bgez(p, r, ptr, label_vmalloc); 1037 + 1038 + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) { 1039 + i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */ 1040 + } else { 1041 + /* unlikely configuration */ 1042 + i_nop(p); /* delay slot */ 1043 + i_LA(p, ptr, MODULE_START); 1044 + } 1045 + i_dsubu(p, bvaddr, bvaddr, ptr); 1046 + 1047 + if (in_compat_space_p(modd) && !rel_lo(modd)) { 1048 + il_b(p, r, label_vmalloc_done); 1049 + i_lui(p, ptr, rel_hi(modd)); 1050 + } else { 1051 + i_LA_mostly(p, ptr, modd); 1052 + il_b(p, r, label_vmalloc_done); 1053 + i_daddiu(p, ptr, ptr, rel_lo(modd)); 1054 + } 1055 + 1056 + l_vmalloc(l, *p); 1057 + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) && 1058 + MODULE_START << 32 == VMALLOC_START) 1059 + i_dsll32(p, ptr, ptr, 0); /* typical case */ 1060 + else 1061 + i_LA(p, ptr, VMALLOC_START); 1062 + #else 1043 1063 l_vmalloc(l, *p); 1044 1064 i_LA(p, ptr, VMALLOC_START); 1065 + #endif 1045 1066 i_dsubu(p, bvaddr, bvaddr, ptr); 1046 1067 1047 1068 if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
+13
include/asm-mips/pgtable-64.h
··· 14 14 #include <asm/addrspace.h> 15 15 #include <asm/page.h> 16 16 #include <asm/cachectl.h> 17 + #include <asm/fixmap.h> 17 18 18 19 #include <asm-generic/pgtable-nopud.h> 19 20 ··· 104 103 #define VMALLOC_START MAP_BASE 105 104 #define VMALLOC_END \ 106 105 (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) 106 + #if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \ 107 + VMALLOC_START != CKSSEG 108 + /* Load modules into 32bit-compatible segment. */ 109 + #define MODULE_START CKSSEG 110 + #define MODULE_END (FIXADDR_START-2*PAGE_SIZE) 111 + extern pgd_t module_pg_dir[PTRS_PER_PGD]; 112 + #endif 107 113 108 114 #define pte_ERROR(e) \ 109 115 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) ··· 182 174 #define __pmd_offset(address) pmd_index(address) 183 175 184 176 /* to find an entry in a kernel page-table-directory */ 177 + #ifdef MODULE_START 178 + #define pgd_offset_k(address) \ 179 + ((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL)) 180 + #else 185 181 #define pgd_offset_k(address) pgd_offset(&init_mm, 0UL) 182 + #endif 186 183 187 184 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 188 185 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))