Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
"Various fixes across the tree. The modpost error due to
virt_addr_valid() not being usable from modules required a number of
preparatory cleanups so a clean fix was possible."

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
MIPS: 64-bit: Fix build if !CONFIG_MODULES
MIPS: Wire up finit_module syscall.
MIPS: Fix modpost error in modules attepting to use virt_addr_valid().
MIPS: page.h: Remove now unnecessary #ifndef __ASSEMBLY__ wrapper.
MIPS: Switch remaining assembler PAGE_SIZE users to <asm/asm-offsets.h>.
MIPS: Include PAGE_S{IZE,HIFT} in <asm/offset.h>.
MIPS: Don't include <asm/page.h> unnecessarily.
MIPS: Fix comment.
Revert "MIPS: Optimise TLB handlers for MIPS32/64 R2 cores."
MIPS: perf: Fix build failure in XLP perf support.
MIPS: Alchemy: Make 32kHz and r4k timer coexist peacefully

+44 -104
+2 -2
arch/mips/Kconfig
··· 39 39 select GENERIC_CLOCKEVENTS 40 40 select GENERIC_CMOS_UPDATE 41 41 select HAVE_MOD_ARCH_SPECIFIC 42 - select MODULES_USE_ELF_REL 43 - select MODULES_USE_ELF_RELA if 64BIT 42 + select MODULES_USE_ELF_REL if MODULES 43 + select MODULES_USE_ELF_RELA if MODULES && 64BIT 44 44 45 45 menu "Machine selection" 46 46
+5 -20
arch/mips/alchemy/common/time.c
··· 53 53 .read = au1x_counter1_read, 54 54 .mask = CLOCKSOURCE_MASK(32), 55 55 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 56 - .rating = 100, 56 + .rating = 1500, 57 57 }; 58 58 59 59 static int au1x_rtcmatch2_set_next_event(unsigned long delta, ··· 84 84 static struct clock_event_device au1x_rtcmatch2_clockdev = { 85 85 .name = "rtcmatch2", 86 86 .features = CLOCK_EVT_FEAT_ONESHOT, 87 - .rating = 100, 87 + .rating = 1500, 88 88 .set_next_event = au1x_rtcmatch2_set_next_event, 89 89 .set_mode = au1x_rtcmatch2_set_mode, 90 90 .cpumask = cpu_all_mask, ··· 158 158 return -1; 159 159 } 160 160 161 - static void __init alchemy_setup_c0timer(void) 162 - { 163 - /* 164 - * MIPS kernel assigns 'au1k_wait' to 'cpu_wait' before this 165 - * function is called. Because the Alchemy counters are unusable 166 - * the C0 timekeeping code is installed and use of the 'wait' 167 - * instruction must be prohibited, which is done most easily by 168 - * assigning NULL to cpu_wait. 169 - */ 170 - cpu_wait = NULL; 171 - r4k_clockevent_init(); 172 - init_r4k_clocksource(); 173 - } 174 - 175 161 static int alchemy_m2inttab[] __initdata = { 176 162 AU1000_RTC_MATCH2_INT, 177 163 AU1500_RTC_MATCH2_INT, ··· 172 186 int t; 173 187 174 188 t = alchemy_get_cputype(); 175 - if (t == ALCHEMY_CPU_UNKNOWN) 176 - alchemy_setup_c0timer(); 177 - else if (alchemy_time_init(alchemy_m2inttab[t])) 178 - alchemy_setup_c0timer(); 189 + if (t == ALCHEMY_CPU_UNKNOWN || 190 + alchemy_time_init(alchemy_m2inttab[t])) 191 + cpu_wait = NULL; /* wait doesn't work with r4k timer */ 179 192 }
+4 -5
arch/mips/include/asm/page.h
··· 45 45 #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; }) 46 46 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 47 47 48 - #ifndef __ASSEMBLY__ 49 - 50 48 #include <linux/pfn.h> 51 49 #include <asm/io.h> 52 50 ··· 137 139 */ 138 140 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) 139 141 140 - #endif /* !__ASSEMBLY__ */ 141 - 142 142 /* 143 143 * __pa()/__va() should be used only during mem init. 144 144 */ ··· 198 202 #endif 199 203 200 204 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) 201 - #define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) 205 + 206 + extern int __virt_addr_valid(const volatile void *kaddr); 207 + #define virt_addr_valid(kaddr) \ 208 + __virt_addr_valid((const volatile void *) (kaddr)) 202 209 203 210 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 204 211 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+5 -4
arch/mips/include/asm/thread_info.h
··· 29 29 __u32 cpu; /* current CPU */ 30 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 31 32 - mm_segment_t addr_limit; /* thread address space: 33 - 0-0xBFFFFFFF for user-thead 34 - 0-0xFFFFFFFF for kernel-thread 35 - */ 32 + mm_segment_t addr_limit; /* 33 + * thread address space limit: 34 + * 0x7fffffff for user-thead 35 + * 0xffffffff for kernel-thread 36 + */ 36 37 struct restart_block restart_block; 37 38 struct pt_regs *regs; 38 39 };
+9 -6
arch/mips/include/uapi/asm/unistd.h
··· 368 368 #define __NR_process_vm_readv (__NR_Linux + 345) 369 369 #define __NR_process_vm_writev (__NR_Linux + 346) 370 370 #define __NR_kcmp (__NR_Linux + 347) 371 + #define __NR_finit_module (__NR_Linux + 348) 371 372 372 373 /* 373 374 * Offset of the last Linux o32 flavoured syscall 374 375 */ 375 - #define __NR_Linux_syscalls 347 376 + #define __NR_Linux_syscalls 348 376 377 377 378 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 378 379 379 380 #define __NR_O32_Linux 4000 380 - #define __NR_O32_Linux_syscalls 347 381 + #define __NR_O32_Linux_syscalls 348 381 382 382 383 #if _MIPS_SIM == _MIPS_SIM_ABI64 383 384 ··· 693 692 #define __NR_process_vm_readv (__NR_Linux + 304) 694 693 #define __NR_process_vm_writev (__NR_Linux + 305) 695 694 #define __NR_kcmp (__NR_Linux + 306) 695 + #define __NR_finit_module (__NR_Linux + 307) 696 696 697 697 /* 698 698 * Offset of the last Linux 64-bit flavoured syscall 699 699 */ 700 - #define __NR_Linux_syscalls 306 700 + #define __NR_Linux_syscalls 307 701 701 702 702 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 703 703 704 704 #define __NR_64_Linux 5000 705 - #define __NR_64_Linux_syscalls 306 705 + #define __NR_64_Linux_syscalls 307 706 706 707 707 #if _MIPS_SIM == _MIPS_SIM_NABI32 708 708 ··· 1023 1021 #define __NR_process_vm_readv (__NR_Linux + 309) 1024 1022 #define __NR_process_vm_writev (__NR_Linux + 310) 1025 1023 #define __NR_kcmp (__NR_Linux + 311) 1024 + #define __NR_finit_module (__NR_Linux + 312) 1026 1025 1027 1026 /* 1028 1027 * Offset of the last N32 flavoured syscall 1029 1028 */ 1030 - #define __NR_Linux_syscalls 311 1029 + #define __NR_Linux_syscalls 312 1031 1030 1032 1031 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1033 1032 1034 1033 #define __NR_N32_Linux 6000 1035 - #define __NR_N32_Linux_syscalls 311 1034 + #define __NR_N32_Linux_syscalls 312 1036 1035 1037 1036 #endif /* _UAPI_ASM_UNISTD_H */
+3
arch/mips/kernel/asm-offsets.c
··· 200 200 DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); 201 201 DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); 202 202 BLANK(); 203 + DEFINE(_PAGE_SHIFT, PAGE_SHIFT); 204 + DEFINE(_PAGE_SIZE, PAGE_SIZE); 205 + BLANK(); 203 206 } 204 207 205 208 #ifdef CONFIG_32BIT
+2 -3
arch/mips/kernel/genex.S
··· 19 19 #include <asm/mipsregs.h> 20 20 #include <asm/stackframe.h> 21 21 #include <asm/war.h> 22 - #include <asm/page.h> 23 22 #include <asm/thread_info.h> 24 23 25 24 #define PANIC_PIC(msg) \ ··· 482 483 MFC0 k1, CP0_ENTRYHI 483 484 andi k1, 0xff /* ASID_MASK */ 484 485 MFC0 k0, CP0_EPC 485 - PTR_SRL k0, PAGE_SHIFT + 1 486 - PTR_SLL k0, PAGE_SHIFT + 1 486 + PTR_SRL k0, _PAGE_SHIFT + 1 487 + PTR_SLL k0, _PAGE_SHIFT + 1 487 488 or k1, k0 488 489 MTC0 k1, CP0_ENTRYHI 489 490 mtc0_tlbw_hazard
-1
arch/mips/kernel/head.S
··· 21 21 #include <asm/asmmacro.h> 22 22 #include <asm/irqflags.h> 23 23 #include <asm/regdef.h> 24 - #include <asm/page.h> 25 24 #include <asm/pgtable-bits.h> 26 25 #include <asm/mipsregs.h> 27 26 #include <asm/stackframe.h>
-1
arch/mips/kernel/octeon_switch.S
··· 15 15 #include <asm/fpregdef.h> 16 16 #include <asm/mipsregs.h> 17 17 #include <asm/asm-offsets.h> 18 - #include <asm/page.h> 19 18 #include <asm/pgtable-bits.h> 20 19 #include <asm/regdef.h> 21 20 #include <asm/stackframe.h>
-38
arch/mips/kernel/perf_event_mipsxx.c
··· 847 847 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 848 848 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ 849 849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ 850 - [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, 851 850 }; 852 851 853 852 /* 24K/34K/1004K cores can share the same cache event map. */ ··· 1114 1115 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ 1115 1116 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ 1116 1117 }, 1117 - [C(OP_PREFETCH)] = { 1118 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1119 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1120 - }, 1121 1118 }, 1122 1119 [C(L1I)] = { 1123 1120 [C(OP_READ)] = { 1124 1121 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ 1125 1122 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 1126 - }, 1127 - [C(OP_WRITE)] = { 1128 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1129 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1130 - }, 1131 - [C(OP_PREFETCH)] = { 1132 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1133 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1134 1123 }, 1135 1124 }, 1136 1125 [C(LL)] = { ··· 1130 1143 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ 1131 1144 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ 1132 1145 }, 1133 - [C(OP_PREFETCH)] = { 1134 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1135 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1136 - }, 1137 1146 }, 1138 1147 [C(DTLB)] = { 1139 1148 /* ··· 1137 1154 * read and write. 1138 1155 */ 1139 1156 [C(OP_READ)] = { 1140 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1141 1157 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1142 1158 }, 1143 1159 [C(OP_WRITE)] = { 1144 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1145 1160 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1146 - }, 1147 - [C(OP_PREFETCH)] = { 1148 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1149 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1150 1161 }, 1151 1162 }, 1152 1163 [C(ITLB)] = { 1153 1164 [C(OP_READ)] = { 1154 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1155 1165 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1156 1166 }, 1157 1167 [C(OP_WRITE)] = { 1158 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1159 1168 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1160 - }, 1161 - [C(OP_PREFETCH)] = { 1162 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1163 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1164 1169 }, 1165 1170 }, 1166 1171 [C(BPU)] = { 1167 1172 [C(OP_READ)] = { 1168 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1169 1173 [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, 1170 - }, 1171 - [C(OP_WRITE)] = { 1172 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1173 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1174 - }, 1175 - [C(OP_PREFETCH)] = { 1176 - [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1177 - [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1178 1174 }, 1179 1175 }, 1180 1176 };
-1
arch/mips/kernel/r2300_switch.S
··· 15 15 #include <asm/fpregdef.h> 16 16 #include <asm/mipsregs.h> 17 17 #include <asm/asm-offsets.h> 18 - #include <asm/page.h> 19 18 #include <asm/regdef.h> 20 19 #include <asm/stackframe.h> 21 20 #include <asm/thread_info.h>
-1
arch/mips/kernel/r4k_switch.S
··· 15 15 #include <asm/fpregdef.h> 16 16 #include <asm/mipsregs.h> 17 17 #include <asm/asm-offsets.h> 18 - #include <asm/page.h> 19 18 #include <asm/pgtable-bits.h> 20 19 #include <asm/regdef.h> 21 20 #include <asm/stackframe.h>
+1 -2
arch/mips/kernel/relocate_kernel.S
··· 9 9 #include <asm/asm.h> 10 10 #include <asm/asmmacro.h> 11 11 #include <asm/regdef.h> 12 - #include <asm/page.h> 13 12 #include <asm/mipsregs.h> 14 13 #include <asm/stackframe.h> 15 14 #include <asm/addrspace.h> ··· 49 50 and s3, s2, 0x8 50 51 beq s3, zero, process_entry 51 52 and s2, s2, ~0x8 52 - li s6, (1 << PAGE_SHIFT) / SZREG 53 + li s6, (1 << _PAGE_SHIFT) / SZREG 53 54 54 55 copy_word: 55 56 /* copy page word by word */
+1
arch/mips/kernel/scall32-o32.S
··· 583 583 sys sys_process_vm_readv 6 /* 4345 */ 584 584 sys sys_process_vm_writev 6 585 585 sys sys_kcmp 5 586 + sys sys_finit_module 3 586 587 .endm 587 588 588 589 /* We pre-compute the number of _instruction_ bytes needed to
+1
arch/mips/kernel/scall64-64.S
··· 422 422 PTR sys_process_vm_readv 423 423 PTR sys_process_vm_writev /* 5305 */ 424 424 PTR sys_kcmp 425 + PTR sys_finit_module 425 426 .size sys_call_table,.-sys_call_table
+1
arch/mips/kernel/scall64-n32.S
··· 416 416 PTR compat_sys_process_vm_readv 417 417 PTR compat_sys_process_vm_writev /* 6310 */ 418 418 PTR sys_kcmp 419 + PTR sys_finit_module 419 420 .size sysn32_call_table,.-sysn32_call_table
+1
arch/mips/kernel/scall64-o32.S
··· 540 540 PTR compat_sys_process_vm_readv /* 4345 */ 541 541 PTR compat_sys_process_vm_writev 542 542 PTR sys_kcmp 543 + PTR sys_finit_module 543 544 .size sys_call_table,.-sys_call_table
+2 -1
arch/mips/kernel/vmlinux.lds.S
··· 1 1 #include <asm/asm-offsets.h> 2 - #include <asm/page.h> 3 2 #include <asm/thread_info.h> 3 + 4 + #define PAGE_SIZE _PAGE_SIZE 4 5 5 6 /* 6 7 * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+6
arch/mips/mm/ioremap.c
··· 190 190 191 191 EXPORT_SYMBOL(__ioremap); 192 192 EXPORT_SYMBOL(__iounmap); 193 + 194 + int __virt_addr_valid(const volatile void *kaddr) 195 + { 196 + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); 197 + } 198 + EXPORT_SYMBOL_GPL(__virt_addr_valid);
-1
arch/mips/mm/tlbex-fault.S
··· 7 7 * Copyright (C) 1999 Silicon Graphics, Inc. 8 8 */ 9 9 #include <asm/mipsregs.h> 10 - #include <asm/page.h> 11 10 #include <asm/regdef.h> 12 11 #include <asm/stackframe.h> 13 12
-16
arch/mips/mm/tlbex.c
··· 976 976 #endif 977 977 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 978 978 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 979 - 980 - if (cpu_has_mips_r2) { 981 - uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT)); 982 - uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT)); 983 - return; 984 - } 985 - 986 979 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 987 980 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 988 981 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ ··· 1011 1018 1012 1019 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1013 1020 { 1014 - if (cpu_has_mips_r2) { 1015 - /* PTE ptr offset is obtained from BadVAddr */ 1016 - UASM_i_MFC0(p, tmp, C0_BADVADDR); 1017 - UASM_i_LW(p, ptr, 0, ptr); 1018 - uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1); 1019 - uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1); 1020 - return; 1021 - } 1022 - 1023 1021 /* 1024 1022 * Bug workaround for the Nevada. It seems as if under certain 1025 1023 * circumstances the move from cp0_context might produce a
+1 -2
arch/mips/power/hibernate.S
··· 8 8 * Wu Zhangjin <wuzhangjin@gmail.com> 9 9 */ 10 10 #include <asm/asm-offsets.h> 11 - #include <asm/page.h> 12 11 #include <asm/regdef.h> 13 12 #include <asm/asm.h> 14 13 ··· 34 35 0: 35 36 PTR_L t1, PBE_ADDRESS(t0) /* source */ 36 37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ 37 - PTR_ADDU t3, t1, PAGE_SIZE 38 + PTR_ADDU t3, t1, _PAGE_SIZE 38 39 1: 39 40 REG_L t8, (t1) 40 41 REG_S t8, (t2)