Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

- add SSP support

- add KASAN support

- improvements to xtensa-specific assembly:
- use ENTRY and ENDPROC consistently
- clean up and unify word alignment macros
- clean up and unify fixup marking
- use 'call' instead of 'callx' where possible

- various cleanups:
- consiolidate kernel stack size related definitions
- replace #ifdef'fed/commented out debug printk statements with
pr_debug
- use struct exc_table instead of flat array for exception handling
data

- build kernel with -mtext-section-literals; simplify xtensa linker
script

- fix futex_atomic_cmpxchg_inatomic()

* tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa: (21 commits)
xtensa: fix futex_atomic_cmpxchg_inatomic
xtensa: shut up gcc-8 warnings
xtensa: print kernel sections info in mem_init
xtensa: use generic strncpy_from_user with KASAN
xtensa: use __memset in __xtensa_clear_user
xtensa: add support for KASAN
xtensa: move fixmap and kmap just above the KSEG
xtensa: don't clear swapper_pg_dir in paging_init
xtensa: extract init_kio
xtensa: implement early_trap_init
xtensa: clean up exception handling structure
xtensa: clean up custom-controlled debug output
xtensa: enable stack protector
xtensa: print hardware config ID on startup
xtensa: consolidate kernel stack size related definitions
xtensa: clean up functions in assembly code
xtensa: clean up word alignment macros in assembly code
xtensa: clean up fixups in assembly code
xtensa: use call instead of callx in assembly code
xtensa: build kernel with text-section-literals
...

+830 -664
+1 -1
Documentation/features/debug/KASAN/arch-support.txt
··· 35 35 | um: | TODO | 36 36 | unicore32: | TODO | 37 37 | x86: | ok | 64-bit only 38 - | xtensa: | TODO | 38 + | xtensa: | ok | 39 39 -----------------------
+1 -1
Documentation/features/debug/stackprotector/arch-support.txt
··· 35 35 | um: | TODO | 36 36 | unicore32: | TODO | 37 37 | x86: | ok | 38 - | xtensa: | TODO | 38 + | xtensa: | ok | 39 39 -----------------------
+42 -36
Documentation/xtensa/mmu.txt
··· 69 69 | Userspace | 0x00000000 TASK_SIZE 70 70 +------------------+ 0x40000000 71 71 +------------------+ 72 - | Page table | 0x80000000 73 - +------------------+ 0x80400000 72 + | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE 73 + +------------------+ 74 + | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE 75 + +------------------+ 0x8e400000 76 + +------------------+ 77 + | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB 78 + +------------------+ VMALLOC_END 79 + | Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE 80 + | remap area 1 | 81 + +------------------+ 82 + | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 83 + | remap area 2 | 84 + +------------------+ 74 85 +------------------+ 75 86 | KMAP area | PKMAP_BASE PTRS_PER_PTE * 76 87 | | DCACHE_N_COLORS * ··· 92 81 | | NR_CPUS * 93 82 | | DCACHE_N_COLORS * 94 83 | | PAGE_SIZE 95 - +------------------+ FIXADDR_TOP 0xbffff000 96 - +------------------+ 97 - | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB 98 - +------------------+ VMALLOC_END 99 - | Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE 100 - | remap area 1 | 101 - +------------------+ 102 - | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 103 - | remap area 2 | 104 - +------------------+ 84 + +------------------+ FIXADDR_TOP 0xcffff000 105 85 +------------------+ 106 86 | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB 107 87 +------------------+ ··· 111 109 | Userspace | 0x00000000 TASK_SIZE 112 110 +------------------+ 0x40000000 113 111 +------------------+ 114 - | Page table | 0x80000000 115 - +------------------+ 0x80400000 112 + | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE 113 + +------------------+ 114 + | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE 115 + +------------------+ 0x8e400000 116 + +------------------+ 117 + | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB 118 + +------------------+ VMALLOC_END 119 + | Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE 120 + | remap area 1 | 121 + +------------------+ 122 + | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 123 + | remap area 2 | 124 + +------------------+ 116 125 +------------------+ 117 126 | KMAP area | PKMAP_BASE PTRS_PER_PTE * 118 127 | | DCACHE_N_COLORS * ··· 134 121 | | NR_CPUS * 135 122 | | DCACHE_N_COLORS * 136 123 | | PAGE_SIZE 137 - +------------------+ FIXADDR_TOP 0x9ffff000 138 - +------------------+ 139 - | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB 140 - +------------------+ VMALLOC_END 141 - | Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE 142 - | remap area 1 | 143 - +------------------+ 144 - | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 145 - | remap area 2 | 146 - +------------------+ 124 + +------------------+ FIXADDR_TOP 0xaffff000 147 125 +------------------+ 148 126 | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB 149 127 +------------------+ ··· 154 150 | Userspace | 0x00000000 TASK_SIZE 155 151 +------------------+ 0x40000000 156 152 +------------------+ 157 - | Page table | 0x80000000 158 - +------------------+ 0x80400000 153 + | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE 154 + +------------------+ 155 + | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE 156 + +------------------+ 0x8e400000 157 + +------------------+ 158 + | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB 159 + +------------------+ VMALLOC_END 160 + | Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE 161 + | remap area 1 | 162 + +------------------+ 163 + | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 164 + | remap area 2 | 165 + +------------------+ 159 166 +------------------+ 160 167 | KMAP area | PKMAP_BASE PTRS_PER_PTE * 161 168 | | DCACHE_N_COLORS * ··· 177 162 | | NR_CPUS * 178 163 | | DCACHE_N_COLORS * 179 164 | | PAGE_SIZE 180 - +------------------+ FIXADDR_TOP 0x8ffff000 181 - +------------------+ 182 - | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB 183 - +------------------+ VMALLOC_END 184 - | Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE 185 - | remap area 1 | 186 - +------------------+ 187 - | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE 188 - | remap area 2 | 189 - +------------------+ 165 + +------------------+ FIXADDR_TOP 0x9ffff000 190 166 +------------------+ 191 167 | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB 192 168 +------------------+
+7
arch/xtensa/Kconfig
··· 15 15 select GENERIC_IRQ_SHOW 16 16 select GENERIC_PCI_IOMAP 17 17 select GENERIC_SCHED_CLOCK 18 + select GENERIC_STRNCPY_FROM_USER if KASAN 19 + select HAVE_ARCH_KASAN if MMU 20 + select HAVE_CC_STACKPROTECTOR 18 21 select HAVE_DEBUG_KMEMLEAK 19 22 select HAVE_DMA_API_DEBUG 20 23 select HAVE_DMA_CONTIGUOUS ··· 81 78 82 79 config HAVE_XTENSA_GPIO32 83 80 def_bool n 81 + 82 + config KASAN_SHADOW_OFFSET 83 + hex 84 + default 0x6e400000 84 85 85 86 menu "Processor type and features" 86 87
+4 -3
arch/xtensa/Makefile
··· 42 42 43 43 # temporarily until string.h is fixed 44 44 KBUILD_CFLAGS += -ffreestanding -D__linux__ 45 - 46 - KBUILD_CFLAGS += -pipe -mlongcalls 47 - 45 + KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals 48 46 KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) 47 + KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,) 48 + 49 + KBUILD_AFLAGS += -mlongcalls -mtext-section-literals 49 50 50 51 ifneq ($(CONFIG_LD_NO_RELAX),) 51 52 LDFLAGS := --no-relax
+1
arch/xtensa/boot/boot-redboot/bootstrap.S
··· 42 42 .align 4 43 43 44 44 .section .text, "ax" 45 + .literal_position 45 46 .begin literal_prefix .text 46 47 47 48 /* put literals in here! */
+6
arch/xtensa/boot/lib/Makefile
··· 15 15 CFLAGS_REMOVE_inffast.o = -pg 16 16 endif 17 17 18 + KASAN_SANITIZE := n 19 + 20 + CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong 21 + CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong 22 + CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong 23 + CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong 18 24 19 25 quiet_cmd_copy_zlib = COPY $@ 20 26 cmd_copy_zlib = cat $< > $@
+40
arch/xtensa/include/asm/asmmacro.h
··· 150 150 __endl \ar \as 151 151 .endm 152 152 153 + /* Load or store instructions that may cause exceptions use the EX macro. */ 154 + 155 + #define EX(handler) \ 156 + .section __ex_table, "a"; \ 157 + .word 97f, handler; \ 158 + .previous \ 159 + 97: 160 + 161 + 162 + /* 163 + * Extract unaligned word that is split between two registers w0 and w1 164 + * into r regardless of machine endianness. SAR must be loaded with the 165 + * starting bit of the word (see __ssa8). 166 + */ 167 + 168 + .macro __src_b r, w0, w1 169 + #ifdef __XTENSA_EB__ 170 + src \r, \w0, \w1 171 + #else 172 + src \r, \w1, \w0 173 + #endif 174 + .endm 175 + 176 + /* 177 + * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned 178 + * word starting at r from two registers loaded from consecutive aligned 179 + * addresses covering r regardless of machine endianness. 180 + * 181 + * r 0 1 2 3 182 + * LE SAR 0 8 16 24 183 + * BE SAR 32 24 16 8 184 + */ 185 + 186 + .macro __ssa8 r 187 + #ifdef __XTENSA_EB__ 188 + ssa8b \r 189 + #else 190 + ssa8l \r 191 + #endif 192 + .endm 153 193 154 194 #endif /* _XTENSA_ASMMACRO_H */
+2 -2
arch/xtensa/include/asm/current.h
··· 11 11 #ifndef _XTENSA_CURRENT_H 12 12 #define _XTENSA_CURRENT_H 13 13 14 + #include <asm/thread_info.h> 15 + 14 16 #ifndef __ASSEMBLY__ 15 17 16 18 #include <linux/thread_info.h> ··· 27 25 #define current get_current() 28 26 29 27 #else 30 - 31 - #define CURRENT_SHIFT 13 32 28 33 29 #define GET_CURRENT(reg,sp) \ 34 30 GET_THREAD_INFO(reg,sp); \
+2 -2
arch/xtensa/include/asm/fixmap.h
··· 44 44 __end_of_fixed_addresses 45 45 }; 46 46 47 - #define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) 47 + #define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE) 48 48 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 49 49 #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 50 50 ··· 63 63 * table. 64 64 */ 65 65 BUILD_BUG_ON(FIXADDR_START < 66 - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); 66 + TLBTEMP_BASE_1 + TLBTEMP_SIZE); 67 67 BUILD_BUG_ON(idx >= __end_of_fixed_addresses); 68 68 return __fix_to_virt(idx); 69 69 }
+10 -13
arch/xtensa/include/asm/futex.h
··· 92 92 u32 oldval, u32 newval) 93 93 { 94 94 int ret = 0; 95 - u32 prev; 96 95 97 96 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 98 97 return -EFAULT; ··· 102 103 103 104 __asm__ __volatile__ ( 104 105 " # futex_atomic_cmpxchg_inatomic\n" 105 - "1: l32i %1, %3, 0\n" 106 - " mov %0, %5\n" 107 - " wsr %1, scompare1\n" 108 - "2: s32c1i %0, %3, 0\n" 109 - "3:\n" 106 + " wsr %5, scompare1\n" 107 + "1: s32c1i %1, %4, 0\n" 108 + " s32i %1, %6, 0\n" 109 + "2:\n" 110 110 " .section .fixup,\"ax\"\n" 111 111 " .align 4\n" 112 - "4: .long 3b\n" 113 - "5: l32r %1, 4b\n" 114 - " movi %0, %6\n" 112 + "3: .long 2b\n" 113 + "4: l32r %1, 3b\n" 114 + " movi %0, %7\n" 115 115 " jx %1\n" 116 116 " .previous\n" 117 117 " .section __ex_table,\"a\"\n" 118 - " .long 1b,5b,2b,5b\n" 118 + " .long 1b,4b\n" 119 119 " .previous\n" 120 - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) 121 - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) 120 + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) 121 + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) 122 122 : "memory"); 123 123 124 - *uval = prev; 125 124 return ret; 126 125 } 127 126
+1 -1
arch/xtensa/include/asm/highmem.h
··· 72 72 * page table. 73 73 */ 74 74 BUILD_BUG_ON(PKMAP_BASE < 75 - XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); 75 + TLBTEMP_BASE_1 + TLBTEMP_SIZE); 76 76 BUG_ON(in_interrupt()); 77 77 if (!PageHighMem(page)) 78 78 return page_address(page);
+37
arch/xtensa/include/asm/kasan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_KASAN_H 3 + #define __ASM_KASAN_H 4 + 5 + #ifndef __ASSEMBLY__ 6 + 7 + #ifdef CONFIG_KASAN 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/sizes.h> 11 + #include <asm/kmem_layout.h> 12 + 13 + /* Start of area covered by KASAN */ 14 + #define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) 15 + /* Start of the shadow map */ 16 + #define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE) 17 + /* Size of the shadow map */ 18 + #define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT) 19 + /* Offset for mem to shadow address transformation */ 20 + #define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET) 21 + 22 + void __init kasan_early_init(void); 23 + void __init kasan_init(void); 24 + 25 + #else 26 + 27 + static inline void kasan_early_init(void) 28 + { 29 + } 30 + 31 + static inline void kasan_init(void) 32 + { 33 + } 34 + 35 + #endif 36 + #endif 37 + #endif
+7
arch/xtensa/include/asm/kmem_layout.h
··· 71 71 72 72 #endif 73 73 74 + #ifndef CONFIG_KASAN 75 + #define KERNEL_STACK_SHIFT 13 76 + #else 77 + #define KERNEL_STACK_SHIFT 15 78 + #endif 79 + #define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) 80 + 74 81 #endif
+9
arch/xtensa/include/asm/linkage.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_LINKAGE_H 4 + #define __ASM_LINKAGE_H 5 + 6 + #define __ALIGN .align 4 7 + #define __ALIGN_STR ".align 4" 8 + 9 + #endif
+1
arch/xtensa/include/asm/mmu_context.h
··· 52 52 #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) 53 53 54 54 void init_mmu(void); 55 + void init_kio(void); 55 56 56 57 static inline void set_rasid_register (unsigned long val) 57 58 {
+4
arch/xtensa/include/asm/nommu_context.h
··· 3 3 { 4 4 } 5 5 6 + static inline void init_kio(void) 7 + { 8 + } 9 + 6 10 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 7 11 { 8 12 }
-2
arch/xtensa/include/asm/page.h
··· 36 36 #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) 37 37 #endif 38 38 39 - #define PGTABLE_START 0x80000000 40 - 41 39 /* 42 40 * Cache aliasing: 43 41 *
+2 -1
arch/xtensa/include/asm/pgtable.h
··· 12 12 #define _XTENSA_PGTABLE_H 13 13 14 14 #define __ARCH_USE_5LEVEL_HACK 15 - #include <asm-generic/pgtable-nopmd.h> 16 15 #include <asm/page.h> 17 16 #include <asm/kmem_layout.h> 17 + #include <asm-generic/pgtable-nopmd.h> 18 18 19 19 /* 20 20 * We only use two ring levels, user and kernel space. ··· 170 170 #define PAGE_SHARED_EXEC \ 171 171 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) 172 172 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) 173 + #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT) 173 174 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) 174 175 175 176 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
+1 -14
arch/xtensa/include/asm/ptrace.h
··· 10 10 #ifndef _XTENSA_PTRACE_H 11 11 #define _XTENSA_PTRACE_H 12 12 13 + #include <asm/kmem_layout.h> 13 14 #include <uapi/asm/ptrace.h> 14 15 15 16 /* ··· 38 37 * | struct thread_info | | | | 39 38 * +-----------------------+ -------- 40 39 */ 41 - 42 - #define KERNEL_STACK_SIZE (2 * PAGE_SIZE) 43 - 44 - /* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ 45 - 46 - #define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ 47 - #define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */ 48 - #define EXC_TABLE_FIXUP 0x00c /* Fixup handler */ 49 - #define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */ 50 - #define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */ 51 - #define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */ 52 - #define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */ 53 - #define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */ 54 - #define EXC_TABLE_SIZE 0x400 55 40 56 41 #ifndef __ASSEMBLY__ 57 42
+1
arch/xtensa/include/asm/regs.h
··· 76 76 #define EXCCAUSE_COPROCESSOR5_DISABLED 37 77 77 #define EXCCAUSE_COPROCESSOR6_DISABLED 38 78 78 #define EXCCAUSE_COPROCESSOR7_DISABLED 39 79 + #define EXCCAUSE_N 64 79 80 80 81 /* PS register fields. */ 81 82
+40
arch/xtensa/include/asm/stackprotector.h
··· 1 + /* 2 + * GCC stack protector support. 3 + * 4 + * (This is directly adopted from the ARM implementation) 5 + * 6 + * Stack protector works by putting predefined pattern at the start of 7 + * the stack frame and verifying that it hasn't been overwritten when 8 + * returning from the function. The pattern is called stack canary 9 + * and gcc expects it to be defined by a global variable called 10 + * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP 11 + * we cannot have a different canary value per task. 12 + */ 13 + 14 + #ifndef _ASM_STACKPROTECTOR_H 15 + #define _ASM_STACKPROTECTOR_H 1 16 + 17 + #include <linux/random.h> 18 + #include <linux/version.h> 19 + 20 + extern unsigned long __stack_chk_guard; 21 + 22 + /* 23 + * Initialize the stackprotector canary value. 24 + * 25 + * NOTE: this must only be called from functions that never return, 26 + * and it must always be inlined. 27 + */ 28 + static __always_inline void boot_init_stack_canary(void) 29 + { 30 + unsigned long canary; 31 + 32 + /* Try to get a semi random initial value. */ 33 + get_random_bytes(&canary, sizeof(canary)); 34 + canary ^= LINUX_VERSION_CODE; 35 + 36 + current->stack_canary = canary; 37 + __stack_chk_guard = current->stack_canary; 38 + } 39 + 40 + #endif /* _ASM_STACKPROTECTOR_H */
+21 -2
arch/xtensa/include/asm/string.h
··· 53 53 "bne %1, %5, 1b\n" 54 54 "2:" 55 55 : "=r" (__dest), "=r" (__src), "=&r" (__dummy) 56 - : "0" (__dest), "1" (__src), "r" (__src+__n) 56 + : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n) 57 57 : "memory"); 58 58 59 59 return __xdest; ··· 101 101 "2:\n\t" 102 102 "sub %2, %2, %3" 103 103 : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) 104 - : "0" (__cs), "1" (__ct), "r" (__cs+__n)); 104 + : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n)); 105 105 106 106 return __res; 107 107 } 108 108 109 109 #define __HAVE_ARCH_MEMSET 110 110 extern void *memset(void *__s, int __c, size_t __count); 111 + extern void *__memset(void *__s, int __c, size_t __count); 111 112 112 113 #define __HAVE_ARCH_MEMCPY 113 114 extern void *memcpy(void *__to, __const__ void *__from, size_t __n); 115 + extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); 114 116 115 117 #define __HAVE_ARCH_MEMMOVE 116 118 extern void *memmove(void *__dest, __const__ void *__src, size_t __n); 119 + extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); 117 120 118 121 /* Don't build bcopy at all ... */ 119 122 #define __HAVE_ARCH_BCOPY 123 + 124 + #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) 125 + 126 + /* 127 + * For files that are not instrumented (e.g. mm/slub.c) we 128 + * should use not instrumented version of mem* functions. 129 + */ 130 + 131 + #define memcpy(dst, src, len) __memcpy(dst, src, len) 132 + #define memmove(dst, src, len) __memmove(dst, src, len) 133 + #define memset(s, c, n) __memset(s, c, n) 134 + 135 + #ifndef __NO_FORTIFY 136 + #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ 137 + #endif 138 + #endif 120 139 121 140 #endif /* _XTENSA_STRING_H */
+7 -6
arch/xtensa/include/asm/thread_info.h
··· 11 11 #ifndef _XTENSA_THREAD_INFO_H 12 12 #define _XTENSA_THREAD_INFO_H 13 13 14 - #ifdef __KERNEL__ 14 + #include <asm/kmem_layout.h> 15 + 16 + #define CURRENT_SHIFT KERNEL_STACK_SHIFT 15 17 16 18 #ifndef __ASSEMBLY__ 17 19 # include <asm/processor.h> ··· 83 81 static inline struct thread_info *current_thread_info(void) 84 82 { 85 83 struct thread_info *ti; 86 - __asm__("extui %0,a1,0,13\n\t" 84 + __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t" 87 85 "xor %0, a1, %0" : "=&r" (ti) : ); 88 86 return ti; 89 87 } ··· 92 90 93 91 /* how to get the thread information struct from ASM */ 94 92 #define GET_THREAD_INFO(reg,sp) \ 95 - extui reg, sp, 0, 13; \ 93 + extui reg, sp, 0, CURRENT_SHIFT; \ 96 94 xor reg, sp, reg 97 95 #endif 98 96 ··· 129 127 */ 130 128 #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 131 129 132 - #define THREAD_SIZE 8192 //(2*PAGE_SIZE) 133 - #define THREAD_SIZE_ORDER 1 130 + #define THREAD_SIZE KERNEL_STACK_SIZE 131 + #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) 134 132 135 - #endif /* __KERNEL__ */ 136 133 #endif /* _XTENSA_THREAD_INFO */
+35
arch/xtensa/include/asm/traps.h
··· 13 13 #include <asm/ptrace.h> 14 14 15 15 /* 16 + * Per-CPU exception handling data structure. 17 + * EXCSAVE1 points to it. 18 + */ 19 + struct exc_table { 20 + /* Kernel Stack */ 21 + void *kstk; 22 + /* Double exception save area for a0 */ 23 + unsigned long double_save; 24 + /* Fixup handler */ 25 + void *fixup; 26 + /* For passing a parameter to fixup */ 27 + void *fixup_param; 28 + /* For fast syscall handler */ 29 + unsigned long syscall_save; 30 + /* Fast user exception handlers */ 31 + void *fast_user_handler[EXCCAUSE_N]; 32 + /* Fast kernel exception handlers */ 33 + void *fast_kernel_handler[EXCCAUSE_N]; 34 + /* Default C-Handlers */ 35 + void *default_handler[EXCCAUSE_N]; 36 + }; 37 + 38 + /* 16 39 * handler must be either of the following: 17 40 * void (*)(struct pt_regs *regs); 18 41 * void (*)(struct pt_regs *regs, unsigned long exccause); 19 42 */ 20 43 extern void * __init trap_set_handler(int cause, void *handler); 21 44 extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); 45 + void fast_second_level_miss(void); 46 + 47 + /* Initialize minimal exc_table structure sufficient for basic paging */ 48 + static inline void __init early_trap_init(void) 49 + { 50 + static struct exc_table exc_table __initdata = { 51 + .fast_kernel_handler[EXCCAUSE_DTLB_MISS] = 52 + fast_second_level_miss, 53 + }; 54 + __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table)); 55 + } 56 + 22 57 void secondary_trap_init(void); 23 58 24 59 static inline void spill_registers(void)
+8 -1
arch/xtensa/include/asm/uaccess.h
··· 44 44 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 45 45 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 46 46 47 + #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) 48 + 47 49 /* 48 50 * These are the main single-value transfer routines. They 49 51 * automatically use the right size if we just have the right pointer ··· 263 261 static inline unsigned long 264 262 __xtensa_clear_user(void *addr, unsigned long size) 265 263 { 266 - if ( ! memset(addr, 0, size) ) 264 + if (!__memset(addr, 0, size)) 267 265 return size; 268 266 return 0; 269 267 } ··· 279 277 #define __clear_user __xtensa_clear_user 280 278 281 279 280 + #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER 281 + 282 282 extern long __strncpy_user(char *, const char *, long); 283 283 284 284 static inline long ··· 290 286 return __strncpy_user(dst, src, count); 291 287 return -EFAULT; 292 288 } 289 + #else 290 + long strncpy_from_user(char *dst, const char *src, long count); 291 + #endif 293 292 294 293 /* 295 294 * Return the size of a string (including the ending 0!)
-3
arch/xtensa/kernel/Makefile
··· 17 17 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 18 18 obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o 19 19 20 - AFLAGS_head.o += -mtext-section-literals 21 - AFLAGS_mxhead.o += -mtext-section-literals 22 - 23 20 # In the Xtensa architecture, assembly generates literals which must always 24 21 # precede the L32R instruction with a relative offset less than 256 kB. 25 22 # Therefore, the .text and .literal section must be combined in parenthesis
+2 -5
arch/xtensa/kernel/align.S
··· 19 19 #include <linux/linkage.h> 20 20 #include <asm/current.h> 21 21 #include <asm/asm-offsets.h> 22 + #include <asm/asmmacro.h> 22 23 #include <asm/processor.h> 23 24 24 25 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION ··· 67 66 #define INSN_T 24 68 67 #define INSN_OP1 16 69 68 70 - .macro __src_b r, w0, w1; src \r, \w0, \w1; .endm 71 - .macro __ssa8 r; ssa8b \r; .endm 72 69 .macro __ssa8r r; ssa8l \r; .endm 73 70 .macro __sh r, s; srl \r, \s; .endm 74 71 .macro __sl r, s; sll \r, \s; .endm ··· 80 81 #define INSN_T 4 81 82 #define INSN_OP1 12 82 83 83 - .macro __src_b r, w0, w1; src \r, \w1, \w0; .endm 84 - .macro __ssa8 r; ssa8l \r; .endm 85 84 .macro __ssa8r r; ssa8b \r; .endm 86 85 .macro __sh r, s; sll \r, \s; .endm 87 86 .macro __sl r, s; srl \r, \s; .endm ··· 152 155 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 153 156 */ 154 157 155 - 158 + .literal_position 156 159 ENTRY(fast_unaligned) 157 160 158 161 /* Note: We don't expect the address to be aligned on a word
+16
arch/xtensa/kernel/asm-offsets.c
··· 76 76 DEFINE(TASK_PID, offsetof (struct task_struct, pid)); 77 77 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); 78 78 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); 79 + #ifdef CONFIG_CC_STACKPROTECTOR 80 + DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); 81 + #endif 79 82 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); 80 83 81 84 /* offsets in thread_info struct */ ··· 131 128 DEFINE(DT_ICOUNT_LEVEL_SAVE, 132 129 offsetof(struct debug_table, icount_level_save)); 133 130 #endif 131 + 132 + /* struct exc_table */ 133 + DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk)); 134 + DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); 135 + DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); 136 + DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); 137 + DEFINE(EXC_TABLE_SYSCALL_SAVE, 138 + offsetof(struct exc_table, syscall_save)); 139 + DEFINE(EXC_TABLE_FAST_USER, 140 + offsetof(struct exc_table, fast_user_handler)); 141 + DEFINE(EXC_TABLE_FAST_KERNEL, 142 + offsetof(struct exc_table, fast_kernel_handler)); 143 + DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); 134 144 135 145 return 0; 136 146 }
+1 -2
arch/xtensa/kernel/coprocessor.S
··· 212 212 ENTRY(fast_coprocessor_double) 213 213 214 214 wsr a0, excsave1 215 - movi a0, unrecoverable_exception 216 - callx0 a0 215 + call0 unrecoverable_exception 217 216 218 217 ENDPROC(fast_coprocessor_double) 219 218
+38 -65
arch/xtensa/kernel/entry.S
··· 14 14 15 15 #include <linux/linkage.h> 16 16 #include <asm/asm-offsets.h> 17 + #include <asm/asmmacro.h> 17 18 #include <asm/processor.h> 18 19 #include <asm/coprocessor.h> 19 20 #include <asm/thread_info.h> ··· 126 125 * 127 126 * Note: _user_exception might be at an odd address. Don't use call0..call12 128 127 */ 128 + .literal_position 129 129 130 130 ENTRY(user_exception) 131 131 ··· 477 475 1: 478 476 irq_save a2, a3 479 477 #ifdef CONFIG_TRACE_IRQFLAGS 480 - movi a4, trace_hardirqs_off 481 - callx4 a4 478 + call4 trace_hardirqs_off 482 479 #endif 483 480 484 481 /* Jump if we are returning from kernel exceptions. */ ··· 504 503 /* Call do_signal() */ 505 504 506 505 #ifdef CONFIG_TRACE_IRQFLAGS 507 - movi a4, trace_hardirqs_on 508 - callx4 a4 506 + call4 trace_hardirqs_on 509 507 #endif 510 508 rsil a2, 0 511 - movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) 512 509 mov a6, a1 513 - callx4 a4 510 + call4 do_notify_resume # int do_notify_resume(struct pt_regs*) 514 511 j 1b 515 512 516 513 3: /* Reschedule */ 517 514 518 515 #ifdef CONFIG_TRACE_IRQFLAGS 519 - movi a4, trace_hardirqs_on 520 - callx4 a4 516 + call4 trace_hardirqs_on 521 517 #endif 522 518 rsil a2, 0 523 - movi a4, schedule # void schedule (void) 524 - callx4 a4 519 + call4 schedule # void schedule (void) 525 520 j 1b 526 521 527 522 #ifdef CONFIG_PREEMPT ··· 528 531 529 532 l32i a4, a2, TI_PRE_COUNT 530 533 bnez a4, 4f 531 - movi a4, preempt_schedule_irq 532 - callx4 a4 534 + call4 preempt_schedule_irq 533 535 j 1b 534 536 #endif 535 537 ··· 541 545 5: 542 546 #ifdef CONFIG_HAVE_HW_BREAKPOINT 543 547 _bbci.l a4, TIF_DB_DISABLED, 7f 544 - movi a4, restore_dbreak 545 - callx4 a4 548 + call4 restore_dbreak 546 549 7: 547 550 #endif 548 551 #ifdef CONFIG_DEBUG_TLB_SANITY 549 552 l32i a4, a1, PT_DEPC 550 553 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 551 - movi a4, check_tlb_sanity 552 - callx4 a4 554 + call4 check_tlb_sanity 553 555 #endif 554 556 6: 555 557 4: 556 558 #ifdef CONFIG_TRACE_IRQFLAGS 557 559 extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 558 560 bgei a4, LOCKLEVEL, 1f 559 - movi a4, trace_hardirqs_on 560 - callx4 a4 561 + call4 trace_hardirqs_on 561 562 1: 562 563 #endif 563 564 /* Restore optional registers. */ ··· 770 777 * When we get here, a0 is trashed and saved to excsave[debuglevel] 771 778 */ 772 779 780 + .literal_position 781 + 773 782 ENTRY(debug_exception) 774 783 775 784 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL ··· 911 916 unrecoverable_text: 912 917 .ascii "Unrecoverable error in exception handler\0" 913 918 919 + .literal_position 920 + 914 921 ENTRY(unrecoverable_exception) 915 922 916 923 movi a0, 1 ··· 930 933 movi a0, 0 931 934 addi a1, a1, PT_REGS_OFFSET 932 935 933 - movi a4, panic 934 936 movi a6, unrecoverable_text 935 - 936 - callx4 a4 937 + call4 panic 937 938 938 939 1: j 1b 939 940 ··· 1068 1073 xsr a2, depc # restore a2, depc 1069 1074 1070 1075 wsr a0, excsave1 1071 - movi a0, unrecoverable_exception 1072 - callx0 a0 1076 + call0 unrecoverable_exception 1073 1077 1074 1078 ENDPROC(fast_syscall_unrecoverable) 1075 1079 ··· 1095 1101 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1096 1102 * 1097 1103 * Note: we don't have to save a2; a2 holds the return value 1098 - * 1099 - * We use the two macros TRY and CATCH: 1100 - * 1101 - * TRY adds an entry to the __ex_table fixup table for the immediately 1102 - * following instruction. 1103 - * 1104 - * CATCH catches any exception that occurred at one of the preceding TRY 1105 - * statements and continues from there 1106 - * 1107 - * Usage TRY l32i a0, a1, 0 1108 - * <other code> 1109 - * done: rfe 1110 - * CATCH <set return code> 1111 - * j done 1112 1104 */ 1113 1105 1106 + .literal_position 1107 + 1114 1108 #ifdef CONFIG_FAST_SYSCALL_XTENSA 1115 - 1116 - #define TRY \ 1117 - .section __ex_table, "a"; \ 1118 - .word 66f, 67f; \ 1119 - .text; \ 1120 - 66: 1121 - 1122 - #define CATCH \ 1123 - 67: 1124 1109 1125 1110 ENTRY(fast_syscall_xtensa) 1126 1111 ··· 1114 1141 1115 1142 .Lswp: /* Atomic compare and swap */ 1116 1143 1117 - TRY l32i a0, a3, 0 # read old value 1144 + EX(.Leac) l32i a0, a3, 0 # read old value 1118 1145 bne a0, a4, 1f # same as old value? jump 1119 - TRY s32i a5, a3, 0 # different, modify value 1146 + EX(.Leac) s32i a5, a3, 0 # different, modify value 1120 1147 l32i a7, a2, PT_AREG7 # restore a7 1121 1148 l32i a0, a2, PT_AREG0 # restore a0 1122 1149 movi a2, 1 # and return 1 ··· 1129 1156 1130 1157 .Lnswp: /* Atomic set, add, and exg_add. */ 1131 1158 1132 - TRY l32i a7, a3, 0 # orig 1159 + EX(.Leac) l32i a7, a3, 0 # orig 1133 1160 addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1134 1161 add a0, a4, a7 # + arg 1135 1162 moveqz a0, a4, a6 # set 1136 1163 addi a6, a6, SYS_XTENSA_ATOMIC_SET 1137 - TRY s32i a0, a3, 0 # write new value 1164 + EX(.Leac) s32i a0, a3, 0 # write new value 1138 1165 1139 1166 mov a0, a2 1140 1167 mov a2, a7 ··· 1142 1169 l32i a0, a0, PT_AREG0 # restore a0 1143 1170 rfe 1144 1171 1145 - CATCH 1146 1172 .Leac: l32i a7, a2, PT_AREG7 # restore a7 1147 1173 l32i a0, a2, PT_AREG0 # restore a0 1148 1174 movi a2, -EFAULT ··· 1383 1411 rsync 1384 1412 1385 1413 movi a6, SIGSEGV 1386 - movi a4, do_exit 1387 - callx4 a4 1414 + call4 do_exit 1388 1415 1389 1416 /* shouldn't return, so panic */ 1390 1417 1391 1418 wsr a0, excsave1 1392 - movi a0, unrecoverable_exception 1393 - callx0 a0 # should not return 1419 + call0 unrecoverable_exception # should not return 1394 1420 1: j 1b 1395 1421 1396 1422 ··· 1534 1564 1535 1565 ENTRY(fast_second_level_miss_double_kernel) 1536 1566 1537 - 1: movi a0, unrecoverable_exception 1538 - callx0 a0 # should not return 1567 + 1: 1568 + call0 unrecoverable_exception # should not return 1539 1569 1: j 1b 1540 1570 1541 1571 ENDPROC(fast_second_level_miss_double_kernel) ··· 1857 1887 * void system_call (struct pt_regs* regs, int exccause) 1858 1888 * a2 a3 1859 1889 */ 1890 + .literal_position 1860 1891 1861 1892 ENTRY(system_call) 1862 1893 ··· 1867 1896 1868 1897 l32i a3, a2, PT_AREG2 1869 1898 mov a6, a2 1870 - movi a4, do_syscall_trace_enter 1871 1899 s32i a3, a2, PT_SYSCALL 1872 - callx4 a4 1900 + call4 do_syscall_trace_enter 1873 1901 mov a3, a6 1874 1902 1875 1903 /* syscall = sys_call_table[syscall_nr] */ ··· 1900 1930 1: /* regs->areg[2] = return_value */ 1901 1931 1902 1932 s32i a6, a2, PT_AREG2 1903 - movi a4, do_syscall_trace_leave 1904 1933 mov a6, a2 1905 - callx4 a4 1934 + call4 do_syscall_trace_leave 1906 1935 retw 1907 1936 1908 1937 ENDPROC(system_call) ··· 1971 2002 s32i a1, a2, THREAD_SP # save stack pointer 1972 2003 #endif 1973 2004 2005 + #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 2006 + movi a6, __stack_chk_guard 2007 + l32i a8, a3, TASK_STACK_CANARY 2008 + s32i a8, a6, 0 2009 + #endif 2010 + 1974 2011 /* Disable ints while we manipulate the stack pointer. */ 1975 2012 1976 2013 irq_save a14, a3 ··· 2023 2048 /* void schedule_tail (struct task_struct *prev) 2024 2049 * Note: prev is still in a6 (return value from fake call4 frame) 2025 2050 */ 2026 - movi a4, schedule_tail 2027 - callx4 a4 2051 + call4 schedule_tail 2028 2052 2029 - movi a4, do_syscall_trace_leave 2030 2053 mov a6, a1 2031 - callx4 a4 2054 + call4 do_syscall_trace_leave 2032 2055 2033 2056 j common_exception_return 2034 2057
+3 -7
arch/xtensa/kernel/head.S
··· 264 264 265 265 /* init_arch kick-starts the linux kernel */ 266 266 267 - movi a4, init_arch 268 - callx4 a4 269 - 270 - movi a4, start_kernel 271 - callx4 a4 267 + call4 init_arch 268 + call4 start_kernel 272 269 273 270 should_never_return: 274 271 j should_never_return ··· 291 294 movi a6, 0 292 295 wsr a6, excsave1 293 296 294 - movi a4, secondary_start_kernel 295 - callx4 a4 297 + call4 secondary_start_kernel 296 298 j should_never_return 297 299 298 300 #endif /* CONFIG_SMP */
+8 -11
arch/xtensa/kernel/module.c
··· 22 22 #include <linux/kernel.h> 23 23 #include <linux/cache.h> 24 24 25 - #undef DEBUG_RELOCATE 26 - 27 25 static int 28 26 decode_calln_opcode (unsigned char *location) 29 27 { ··· 56 58 unsigned char *location; 57 59 uint32_t value; 58 60 59 - #ifdef DEBUG_RELOCATE 60 - printk("Applying relocate section %u to %u\n", relsec, 61 - sechdrs[relsec].sh_info); 62 - #endif 61 + pr_debug("Applying relocate section %u to %u\n", relsec, 62 + sechdrs[relsec].sh_info); 63 + 63 64 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 64 65 location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr 65 66 + rela[i].r_offset; ··· 84 87 value -= ((unsigned long)location & -4) + 4; 85 88 if ((value & 3) != 0 || 86 89 ((value + (1 << 19)) >> 20) != 0) { 87 - printk("%s: relocation out of range, " 90 + pr_err("%s: relocation out of range, " 88 91 "section %d reloc %d " 89 92 "sym '%s'\n", 90 93 mod->name, relsec, i, ··· 108 111 value -= (((unsigned long)location + 3) & -4); 109 112 if ((value & 3) != 0 || 110 113 (signed int)value >> 18 != -1) { 111 - printk("%s: relocation out of range, " 114 + pr_err("%s: relocation out of range, " 112 115 "section %d reloc %d " 113 116 "sym '%s'\n", 114 117 mod->name, relsec, i, ··· 153 156 case R_XTENSA_SLOT12_OP: 154 157 case R_XTENSA_SLOT13_OP: 155 158 case R_XTENSA_SLOT14_OP: 156 - printk("%s: unexpected FLIX relocation: %u\n", 159 + pr_err("%s: unexpected FLIX relocation: %u\n", 157 160 mod->name, 158 161 ELF32_R_TYPE(rela[i].r_info)); 159 162 return -ENOEXEC; ··· 173 176 case R_XTENSA_SLOT12_ALT: 174 177 case R_XTENSA_SLOT13_ALT: 175 178 case R_XTENSA_SLOT14_ALT: 176 - printk("%s: unexpected ALT relocation: %u\n", 179 + pr_err("%s: unexpected ALT relocation: %u\n", 177 180 mod->name, 178 181 ELF32_R_TYPE(rela[i].r_info)); 179 182 return -ENOEXEC; 180 183 181 184 default: 182 - printk("%s: unexpected relocation: %u\n", 185 + pr_err("%s: unexpected relocation: %u\n", 183 186 mod->name, 184 187 ELF32_R_TYPE(rela[i].r_info)); 185 188 return -ENOEXEC;
+11 -19
arch/xtensa/kernel/pci.c
··· 29 29 #include <asm/pci-bridge.h> 30 30 #include <asm/platform.h> 31 31 32 - #undef DEBUG 33 - 34 - #ifdef DEBUG 35 - #define DBG(x...) printk(x) 36 - #else 37 - #define DBG(x...) 38 - #endif 39 - 40 32 /* PCI Controller */ 41 33 42 34 ··· 93 101 for(idx=0; idx<6; idx++) { 94 102 r = &dev->resource[idx]; 95 103 if (!r->start && r->end) { 96 - printk (KERN_ERR "PCI: Device %s not available because " 97 - "of resource collisions\n", pci_name(dev)); 104 + pr_err("PCI: Device %s not available because " 105 + "of resource collisions\n", pci_name(dev)); 98 106 return -EINVAL; 99 107 } 100 108 if (r->flags & IORESOURCE_IO) ··· 105 113 if (dev->resource[PCI_ROM_RESOURCE].start) 106 114 cmd |= PCI_COMMAND_MEMORY; 107 115 if (cmd != old_cmd) { 108 - printk("PCI: Enabling device %s (%04x -> %04x)\n", 116 + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", 109 117 pci_name(dev), old_cmd, cmd); 110 118 pci_write_config_word(dev, PCI_COMMAND, cmd); 111 119 } ··· 136 144 res = &pci_ctrl->io_resource; 137 145 if (!res->flags) { 138 146 if (io_offset) 139 - printk (KERN_ERR "I/O resource not set for host" 140 - " bridge %d\n", pci_ctrl->index); 147 + pr_err("I/O resource not set for host bridge %d\n", 148 + pci_ctrl->index); 141 149 res->start = 0; 142 150 res->end = IO_SPACE_LIMIT; 143 151 res->flags = IORESOURCE_IO; ··· 151 159 if (!res->flags) { 152 160 if (i > 0) 153 161 continue; 154 - printk(KERN_ERR "Memory resource not set for " 155 - "host bridge %d\n", pci_ctrl->index); 162 + pr_err("Memory resource not set for host bridge %d\n", 163 + pci_ctrl->index); 156 164 res->start = 0; 157 165 res->end = ~0U; 158 166 res->flags = IORESOURCE_MEM; ··· 168 176 struct pci_bus *bus; 169 177 int next_busno = 0, ret; 170 178 171 - printk("PCI: Probing PCI hardware\n"); 179 + pr_info("PCI: Probing PCI hardware\n"); 172 180 173 181 /* Scan all of the recorded PCI controllers. */ 174 182 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { ··· 224 232 for (idx=0; idx<6; idx++) { 225 233 r = &dev->resource[idx]; 226 234 if (!r->start && r->end) { 227 - printk(KERN_ERR "PCI: Device %s not available because " 235 + pr_err("PCI: Device %s not available because " 228 236 "of resource collisions\n", pci_name(dev)); 229 237 return -EINVAL; 230 238 } ··· 234 242 cmd |= PCI_COMMAND_MEMORY; 235 243 } 236 244 if (cmd != old_cmd) { 237 - printk("PCI: Enabling device %s (%04x -> %04x)\n", 238 - pci_name(dev), old_cmd, cmd); 245 + pr_info("PCI: Enabling device %s (%04x -> %04x)\n", 246 + pci_name(dev), old_cmd, cmd); 239 247 pci_write_config_word(dev, PCI_COMMAND, cmd); 240 248 } 241 249
+6
arch/xtensa/kernel/process.c
··· 58 58 EXPORT_SYMBOL(pm_power_off); 59 59 60 60 61 + #ifdef CONFIG_CC_STACKPROTECTOR 62 + #include <linux/stackprotector.h> 63 + unsigned long __stack_chk_guard __read_mostly; 64 + EXPORT_SYMBOL(__stack_chk_guard); 65 + #endif 66 + 61 67 #if XTENSA_HAVE_COPROCESSORS 62 68 63 69 void coprocessor_release_all(struct thread_info *ti)
+32 -17
arch/xtensa/kernel/setup.c
··· 36 36 #endif 37 37 38 38 #include <asm/bootparam.h> 39 + #include <asm/kasan.h> 39 40 #include <asm/mmu_context.h> 40 41 #include <asm/pgtable.h> 41 42 #include <asm/processor.h> ··· 157 156 /* Boot parameters must start with a BP_TAG_FIRST tag. */ 158 157 159 158 if (tag->id != BP_TAG_FIRST) { 160 - printk(KERN_WARNING "Invalid boot parameters!\n"); 159 + pr_warn("Invalid boot parameters!\n"); 161 160 return 0; 162 161 } 163 162 ··· 166 165 /* Parse all tags. */ 167 166 168 167 while (tag != NULL && tag->id != BP_TAG_LAST) { 169 - for (t = &__tagtable_begin; t < &__tagtable_end; t++) { 168 + for (t = &__tagtable_begin; t < &__tagtable_end; t++) { 170 169 if (tag->id == t->tag) { 171 170 t->parse(tag); 172 171 break; 173 172 } 174 173 } 175 174 if (t == &__tagtable_end) 176 - printk(KERN_WARNING "Ignoring tag " 177 - "0x%08x\n", tag->id); 175 + pr_warn("Ignoring tag 0x%08x\n", tag->id); 178 176 tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); 179 177 } 180 178 ··· 207 207 xtensa_kio_paddr = of_read_ulong(ranges+1, 1); 208 208 /* round down to nearest 256MB boundary */ 209 209 xtensa_kio_paddr &= 0xf0000000; 210 + 211 + init_kio(); 210 212 211 213 return 1; 212 214 } ··· 248 246 249 247 void __init init_arch(bp_tag_t *bp_start) 250 248 { 249 + /* Initialize MMU. */ 250 + 251 + init_mmu(); 252 + 253 + /* Initialize initial KASAN shadow map */ 254 + 255 + kasan_early_init(); 256 + 251 257 /* Parse boot parameters */ 252 258 253 259 if (bp_start) ··· 273 263 /* Early hook for platforms */ 274 264 275 265 platform_init(bp_start); 276 - 277 - /* Initialize MMU. */ 278 - 279 - init_mmu(); 280 266 } 281 267 282 268 /* ··· 283 277 extern char _stext[]; 284 278 extern char _WindowVectors_text_start; 285 279 extern char _WindowVectors_text_end; 286 - extern char _DebugInterruptVector_literal_start; 280 + extern char _DebugInterruptVector_text_start; 287 281 extern char _DebugInterruptVector_text_end; 288 - extern char _KernelExceptionVector_literal_start; 282 + extern char _KernelExceptionVector_text_start; 289 283 extern char _KernelExceptionVector_text_end; 290 - extern char _UserExceptionVector_literal_start; 284 + extern char _UserExceptionVector_text_start; 291 285 extern char _UserExceptionVector_text_end; 292 - extern char _DoubleExceptionVector_literal_start; 286 + extern char _DoubleExceptionVector_text_start; 293 287 extern char _DoubleExceptionVector_text_end; 294 288 #if XCHAL_EXCM_LEVEL >= 2 295 289 extern char _Level2InterruptVector_text_start; ··· 323 317 324 318 void __init setup_arch(char **cmdline_p) 325 319 { 320 + pr_info("config ID: %08x:%08x\n", 321 + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE)); 322 + if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || 323 + get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) 324 + pr_info("built for config ID: %08x:%08x\n", 325 + XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); 326 + 326 327 *cmdline_p = command_line; 327 328 platform_setup(cmdline_p); 328 329 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); ··· 352 339 mem_reserve(__pa(&_WindowVectors_text_start), 353 340 __pa(&_WindowVectors_text_end)); 354 341 355 - mem_reserve(__pa(&_DebugInterruptVector_literal_start), 342 + mem_reserve(__pa(&_DebugInterruptVector_text_start), 356 343 __pa(&_DebugInterruptVector_text_end)); 357 344 358 - mem_reserve(__pa(&_KernelExceptionVector_literal_start), 345 + mem_reserve(__pa(&_KernelExceptionVector_text_start), 359 346 __pa(&_KernelExceptionVector_text_end)); 360 347 361 - mem_reserve(__pa(&_UserExceptionVector_literal_start), 348 + mem_reserve(__pa(&_UserExceptionVector_text_start), 362 349 __pa(&_UserExceptionVector_text_end)); 363 350 364 - mem_reserve(__pa(&_DoubleExceptionVector_literal_start), 351 + mem_reserve(__pa(&_DoubleExceptionVector_text_start), 365 352 __pa(&_DoubleExceptionVector_text_end)); 366 353 367 354 #if XCHAL_EXCM_LEVEL >= 2 ··· 393 380 #endif 394 381 parse_early_param(); 395 382 bootmem_init(); 396 - 383 + kasan_init(); 397 384 unflatten_and_copy_device_tree(); 398 385 399 386 #ifdef CONFIG_SMP ··· 595 582 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" 596 583 "core ID\t\t: " XCHAL_CORE_ID "\n" 597 584 "build ID\t: 0x%x\n" 585 + "config ID\t: %08x:%08x\n" 598 586 "byte order\t: %s\n" 599 587 "cpu MHz\t\t: %lu.%02lu\n" 600 588 "bogomips\t: %lu.%02lu\n", 601 589 num_online_cpus(), 602 590 cpumask_pr_args(cpu_online_mask), 603 591 XCHAL_BUILD_UNIQUE_ID, 592 + get_sr(SREG_EPC), get_sr(SREG_EXCSAVE), 604 593 XCHAL_HAVE_BE ? "big" : "little", 605 594 ccount_freq/1000000, 606 595 (ccount_freq/10000) % 100,
+2 -6
arch/xtensa/kernel/signal.c
··· 28 28 #include <asm/coprocessor.h> 29 29 #include <asm/unistd.h> 30 30 31 - #define DEBUG_SIG 0 32 - 33 31 extern struct task_struct *coproc_owners[]; 34 32 35 33 struct rt_sigframe ··· 397 399 regs->areg[8] = (unsigned long) &frame->uc; 398 400 regs->threadptr = tp; 399 401 400 - #if DEBUG_SIG 401 - printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", 402 - current->comm, current->pid, sig, frame, regs->pc); 403 - #endif 402 + pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", 403 + current->comm, current->pid, sig, frame, regs->pc); 404 404 405 405 return 0; 406 406 }
+33 -31
arch/xtensa/kernel/traps.c
··· 33 33 #include <linux/kallsyms.h> 34 34 #include <linux/delay.h> 35 35 #include <linux/hardirq.h> 36 + #include <linux/ratelimit.h> 36 37 37 38 #include <asm/stacktrace.h> 38 39 #include <asm/ptrace.h> ··· 159 158 * 2. it is a temporary memory buffer for the exception handlers. 160 159 */ 161 160 162 - DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); 163 - 161 + DEFINE_PER_CPU(struct exc_table, exc_table); 164 162 DEFINE_PER_CPU(struct debug_table, debug_table); 165 163 166 164 void die(const char*, struct pt_regs*, long); ··· 178 178 void do_unhandled(struct pt_regs *regs, unsigned long exccause) 179 179 { 180 180 __die_if_kernel("Caught unhandled exception - should not happen", 181 - regs, SIGKILL); 181 + regs, SIGKILL); 182 182 183 183 /* If in user mode, send SIGILL signal to current process */ 184 - printk("Caught unhandled exception in '%s' " 185 - "(pid = %d, pc = %#010lx) - should not happen\n" 186 - "\tEXCCAUSE is %ld\n", 187 - current->comm, task_pid_nr(current), regs->pc, exccause); 184 + pr_info_ratelimited("Caught unhandled exception in '%s' " 185 + "(pid = %d, pc = %#010lx) - should not happen\n" 186 + "\tEXCCAUSE is %ld\n", 187 + current->comm, task_pid_nr(current), regs->pc, 188 + exccause); 188 189 force_sig(SIGILL, current); 189 190 } 190 191 ··· 306 305 307 306 /* If in user mode, send SIGILL signal to current process. */ 308 307 309 - printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", 310 - current->comm, task_pid_nr(current), regs->pc); 308 + pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", 309 + current->comm, task_pid_nr(current), regs->pc); 311 310 force_sig(SIGILL, current); 312 311 } 313 312 ··· 326 325 siginfo_t info; 327 326 328 327 __die_if_kernel("Unhandled unaligned exception in kernel", 329 - regs, SIGKILL); 328 + regs, SIGKILL); 330 329 331 330 current->thread.bad_vaddr = regs->excvaddr; 332 331 current->thread.error_code = -3; 333 - printk("Unaligned memory access to %08lx in '%s' " 334 - "(pid = %d, pc = %#010lx)\n", 335 - regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); 332 + pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " 333 + "(pid = %d, pc = %#010lx)\n", 334 + regs->excvaddr, current->comm, 335 + task_pid_nr(current), regs->pc); 336 336 info.si_signo = SIGBUS; 337 337 info.si_errno = 0; 338 338 info.si_code = BUS_ADRALN; ··· 367 365 } 368 366 369 367 370 - static void set_handler(int idx, void *handler) 371 - { 372 - unsigned int cpu; 373 - 374 - for_each_possible_cpu(cpu) 375 - per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; 376 - } 368 + #define set_handler(type, cause, handler) \ 369 + do { \ 370 + unsigned int cpu; \ 371 + \ 372 + for_each_possible_cpu(cpu) \ 373 + per_cpu(exc_table, cpu).type[cause] = (handler);\ 374 + } while (0) 377 375 378 376 /* Set exception C handler - for temporary use when probing exceptions */ 379 377 380 378 void * __init trap_set_handler(int cause, void *handler) 381 379 { 382 - void *previous = (void *)per_cpu(exc_table, 0)[ 383 - EXC_TABLE_DEFAULT / 4 + cause]; 384 - set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); 380 + void *previous = per_cpu(exc_table, 0).default_handler[cause]; 381 + 382 + set_handler(default_handler, cause, handler); 385 383 return previous; 386 384 } 387 385 388 386 389 387 static void trap_init_excsave(void) 390 388 { 391 - unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); 389 + unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); 392 390 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); 393 391 } 394 392 ··· 420 418 421 419 /* Setup default vectors. */ 422 420 423 - for(i = 0; i < 64; i++) { 424 - set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); 425 - set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); 426 - set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); 421 + for (i = 0; i < EXCCAUSE_N; i++) { 422 + set_handler(fast_user_handler, i, user_exception); 423 + set_handler(fast_kernel_handler, i, kernel_exception); 424 + set_handler(default_handler, i, do_unhandled); 427 425 } 428 426 429 427 /* Setup specific handlers. */ ··· 435 433 void *handler = dispatch_init_table[i].handler; 436 434 437 435 if (fast == 0) 438 - set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); 436 + set_handler(default_handler, cause, handler); 439 437 if (fast && fast & USER) 440 - set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); 438 + set_handler(fast_user_handler, cause, handler); 441 439 if (fast && fast & KRNL) 442 - set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); 440 + set_handler(fast_kernel_handler, cause, handler); 443 441 } 444 442 445 443 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
+8 -9
arch/xtensa/kernel/vectors.S
··· 205 205 */ 206 206 207 207 .section .DoubleExceptionVector.text, "ax" 208 - .begin literal_prefix .DoubleExceptionVector 209 - .globl _DoubleExceptionVector_WindowUnderflow 210 - .globl _DoubleExceptionVector_WindowOverflow 211 208 212 209 ENTRY(_DoubleExceptionVector) 213 210 ··· 214 217 /* Check for kernel double exception (usually fatal). */ 215 218 216 219 rsr a2, ps 217 - _bbci.l a2, PS_UM_BIT, .Lksp 220 + _bbsi.l a2, PS_UM_BIT, 1f 221 + j .Lksp 218 222 223 + .align 4 224 + .literal_position 225 + 1: 219 226 /* Check if we are currently handling a window exception. */ 220 227 /* Note: We don't need to indicate that we enter a critical section. */ 221 228 ··· 305 304 .Lunrecoverable: 306 305 rsr a3, excsave1 307 306 wsr a0, excsave1 308 - movi a0, unrecoverable_exception 309 - callx0 a0 307 + call0 unrecoverable_exception 310 308 311 309 .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ 312 310 ··· 475 475 rotw -3 476 476 j 1b 477 477 478 - 479 478 ENDPROC(_DoubleExceptionVector) 480 - 481 - .end literal_prefix 482 479 483 480 .text 484 481 /* ··· 504 507 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 505 508 * a3: exctable, original value in excsave1 506 509 */ 510 + 511 + .literal_position 507 512 508 513 ENTRY(window_overflow_restore_a0_fixup) 509 514
+24 -66
arch/xtensa/kernel/vmlinux.lds.S
··· 45 45 LONG(sym ## _end); \ 46 46 LONG(LOADADDR(section)) 47 47 48 - /* Macro to define a section for a vector. 49 - * 50 - * Use of the MIN function catches the types of errors illustrated in 51 - * the following example: 52 - * 53 - * Assume the section .DoubleExceptionVector.literal is completely 54 - * full. Then a programmer adds code to .DoubleExceptionVector.text 55 - * that produces another literal. The final literal position will 56 - * overlay onto the first word of the adjacent code section 57 - * .DoubleExceptionVector.text. (In practice, the literals will 58 - * overwrite the code, and the first few instructions will be 59 - * garbage.) 48 + /* 49 + * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is 50 + * defined code for every vector is located with other init data. At startup 51 + * time head.S copies code for every vector to its final position according 52 + * to description recorded in the corresponding RELOCATE_ENTRY. 60 53 */ 61 54 62 55 #ifdef CONFIG_VECTORS_OFFSET 63 - #define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ 64 - section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ 65 - LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ 56 + #define SECTION_VECTOR(sym, section, addr, prevsec) \ 57 + section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ 66 58 { \ 67 59 . = ALIGN(4); \ 68 60 sym ## _start = ABSOLUTE(.); \ ··· 104 112 #if XCHAL_EXCM_LEVEL >= 6 105 113 SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) 106 114 #endif 107 - SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) 108 115 SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) 109 - SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) 110 116 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) 111 - SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) 112 117 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) 113 - SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) 114 118 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) 115 119 #endif 116 120 121 + IRQENTRY_TEXT 122 + SOFTIRQENTRY_TEXT 123 + ENTRY_TEXT 117 124 TEXT_TEXT 118 - VMLINUX_SYMBOL(__sched_text_start) = .; 119 - *(.sched.literal .sched.text) 120 - VMLINUX_SYMBOL(__sched_text_end) = .; 121 - VMLINUX_SYMBOL(__cpuidle_text_start) = .; 122 - *(.cpuidle.literal .cpuidle.text) 123 - VMLINUX_SYMBOL(__cpuidle_text_end) = .; 124 - VMLINUX_SYMBOL(__lock_text_start) = .; 125 - *(.spinlock.literal .spinlock.text) 126 - VMLINUX_SYMBOL(__lock_text_end) = .; 125 + SCHED_TEXT 126 + CPUIDLE_TEXT 127 + LOCK_TEXT 127 128 128 129 } 129 130 _etext = .; ··· 181 196 .KernelExceptionVector.text); 182 197 RELOCATE_ENTRY(_UserExceptionVector_text, 183 198 .UserExceptionVector.text); 184 - RELOCATE_ENTRY(_DoubleExceptionVector_literal, 185 - .DoubleExceptionVector.literal); 186 199 RELOCATE_ENTRY(_DoubleExceptionVector_text, 187 200 .DoubleExceptionVector.text); 188 201 RELOCATE_ENTRY(_DebugInterruptVector_text, ··· 213 230 214 231 SECTION_VECTOR (_WindowVectors_text, 215 232 .WindowVectors.text, 216 - WINDOW_VECTORS_VADDR, 4, 233 + WINDOW_VECTORS_VADDR, 217 234 .dummy) 218 - SECTION_VECTOR (_DebugInterruptVector_literal, 219 - .DebugInterruptVector.literal, 220 - DEBUG_VECTOR_VADDR - 4, 221 - SIZEOF(.WindowVectors.text), 222 - .WindowVectors.text) 223 235 SECTION_VECTOR (_DebugInterruptVector_text, 224 236 .DebugInterruptVector.text, 225 237 DEBUG_VECTOR_VADDR, 226 - 4, 227 - .DebugInterruptVector.literal) 238 + .WindowVectors.text) 228 239 #undef LAST 229 240 #define LAST .DebugInterruptVector.text 230 241 #if XCHAL_EXCM_LEVEL >= 2 231 242 SECTION_VECTOR (_Level2InterruptVector_text, 232 243 .Level2InterruptVector.text, 233 244 INTLEVEL2_VECTOR_VADDR, 234 - SIZEOF(LAST), LAST) 245 + LAST) 235 246 # undef LAST 236 247 # define LAST .Level2InterruptVector.text 237 248 #endif ··· 233 256 SECTION_VECTOR (_Level3InterruptVector_text, 234 257 .Level3InterruptVector.text, 235 258 INTLEVEL3_VECTOR_VADDR, 236 - SIZEOF(LAST), LAST) 259 + LAST) 237 260 # undef LAST 238 261 # define LAST .Level3InterruptVector.text 239 262 #endif ··· 241 264 SECTION_VECTOR (_Level4InterruptVector_text, 242 265 .Level4InterruptVector.text, 243 266 INTLEVEL4_VECTOR_VADDR, 244 - SIZEOF(LAST), LAST) 267 + LAST) 245 268 # undef LAST 246 269 # define LAST .Level4InterruptVector.text 247 270 #endif ··· 249 272 SECTION_VECTOR (_Level5InterruptVector_text, 250 273 .Level5InterruptVector.text, 251 274 INTLEVEL5_VECTOR_VADDR, 252 - SIZEOF(LAST), LAST) 275 + LAST) 253 276 # undef LAST 254 277 # define LAST .Level5InterruptVector.text 255 278 #endif ··· 257 280 SECTION_VECTOR (_Level6InterruptVector_text, 258 281 .Level6InterruptVector.text, 259 282 INTLEVEL6_VECTOR_VADDR, 260 - SIZEOF(LAST), LAST) 283 + LAST) 261 284 # undef LAST 262 285 # define LAST .Level6InterruptVector.text 263 286 #endif 264 - SECTION_VECTOR (_KernelExceptionVector_literal, 265 - .KernelExceptionVector.literal, 266 - KERNEL_VECTOR_VADDR - 4, 267 - SIZEOF(LAST), LAST) 268 - #undef LAST 269 287 SECTION_VECTOR (_KernelExceptionVector_text, 270 288 .KernelExceptionVector.text, 271 289 KERNEL_VECTOR_VADDR, 272 - 4, 273 - .KernelExceptionVector.literal) 274 - SECTION_VECTOR (_UserExceptionVector_literal, 275 - .UserExceptionVector.literal, 276 - USER_VECTOR_VADDR - 4, 277 - SIZEOF(.KernelExceptionVector.text), 278 - .KernelExceptionVector.text) 290 + LAST) 291 + #undef LAST 279 292 SECTION_VECTOR (_UserExceptionVector_text, 280 293 .UserExceptionVector.text, 281 294 USER_VECTOR_VADDR, 282 - 4, 283 - .UserExceptionVector.literal) 284 - SECTION_VECTOR (_DoubleExceptionVector_literal, 285 - .DoubleExceptionVector.literal, 286 - DOUBLEEXC_VECTOR_VADDR - 20, 287 - SIZEOF(.UserExceptionVector.text), 288 - .UserExceptionVector.text) 295 + .KernelExceptionVector.text) 289 296 SECTION_VECTOR (_DoubleExceptionVector_text, 290 297 .DoubleExceptionVector.text, 291 298 DOUBLEEXC_VECTOR_VADDR, 292 - 20, 293 - .DoubleExceptionVector.literal) 299 + .UserExceptionVector.text) 294 300 295 301 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 296 302 ··· 283 323 SECTION_VECTOR (_SecondaryResetVector_text, 284 324 .SecondaryResetVector.text, 285 325 RESET_VECTOR1_VADDR, 286 - SIZEOF(.DoubleExceptionVector.text), 287 326 .DoubleExceptionVector.text) 288 327 289 328 . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); ··· 332 373 333 374 /* Sections to be discarded */ 334 375 DISCARDS 335 - /DISCARD/ : { *(.exit.literal) } 336 376 }
+5
arch/xtensa/kernel/xtensa_ksyms.c
··· 41 41 EXPORT_SYMBOL(memset); 42 42 EXPORT_SYMBOL(memcpy); 43 43 EXPORT_SYMBOL(memmove); 44 + EXPORT_SYMBOL(__memset); 45 + EXPORT_SYMBOL(__memcpy); 46 + EXPORT_SYMBOL(__memmove); 47 + #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER 44 48 EXPORT_SYMBOL(__strncpy_user); 49 + #endif 45 50 EXPORT_SYMBOL(clear_page); 46 51 EXPORT_SYMBOL(copy_page); 47 52
+30 -44
arch/xtensa/lib/checksum.S
··· 14 14 * 2 of the License, or (at your option) any later version. 15 15 */ 16 16 17 - #include <asm/errno.h> 17 + #include <linux/errno.h> 18 18 #include <linux/linkage.h> 19 19 #include <variant/core.h> 20 + #include <asm/asmmacro.h> 20 21 21 22 /* 22 23 * computes a partial checksum, e.g. for TCP/UDP fragments ··· 176 175 177 176 /* 178 177 * Copy from ds while checksumming, otherwise like csum_partial 179 - * 180 - * The macros SRC and DST specify the type of access for the instruction. 181 - * thus we can call a custom exception handler for each access type. 182 178 */ 183 - 184 - #define SRC(y...) \ 185 - 9999: y; \ 186 - .section __ex_table, "a"; \ 187 - .long 9999b, 6001f ; \ 188 - .previous 189 - 190 - #define DST(y...) \ 191 - 9999: y; \ 192 - .section __ex_table, "a"; \ 193 - .long 9999b, 6002f ; \ 194 - .previous 195 179 196 180 /* 197 181 unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, ··· 230 244 add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ 231 245 .Loop5: 232 246 #endif 233 - SRC( l32i a9, a2, 0 ) 234 - SRC( l32i a8, a2, 4 ) 235 - DST( s32i a9, a3, 0 ) 236 - DST( s32i a8, a3, 4 ) 247 + EX(10f) l32i a9, a2, 0 248 + EX(10f) l32i a8, a2, 4 249 + EX(11f) s32i a9, a3, 0 250 + EX(11f) s32i a8, a3, 4 237 251 ONES_ADD(a5, a9) 238 252 ONES_ADD(a5, a8) 239 - SRC( l32i a9, a2, 8 ) 240 - SRC( l32i a8, a2, 12 ) 241 - DST( s32i a9, a3, 8 ) 242 - DST( s32i a8, a3, 12 ) 253 + EX(10f) l32i a9, a2, 8 254 + EX(10f) l32i a8, a2, 12 255 + EX(11f) s32i a9, a3, 8 256 + EX(11f) s32i a8, a3, 12 243 257 ONES_ADD(a5, a9) 244 258 ONES_ADD(a5, a8) 245 - SRC( l32i a9, a2, 16 ) 246 - SRC( l32i a8, a2, 20 ) 247 - DST( s32i a9, a3, 16 ) 248 - DST( s32i a8, a3, 20 ) 259 + EX(10f) l32i a9, a2, 16 260 + EX(10f) l32i a8, a2, 20 261 + EX(11f) s32i a9, a3, 16 262 + EX(11f) s32i a8, a3, 20 249 263 ONES_ADD(a5, a9) 250 264 ONES_ADD(a5, a8) 251 - SRC( l32i a9, a2, 24 ) 252 - SRC( l32i a8, a2, 28 ) 253 - DST( s32i a9, a3, 24 ) 254 - DST( s32i a8, a3, 28 ) 265 + EX(10f) l32i a9, a2, 24 266 + EX(10f) l32i a8, a2, 28 267 + EX(11f) s32i a9, a3, 24 268 + EX(11f) s32i a8, a3, 28 255 269 ONES_ADD(a5, a9) 256 270 ONES_ADD(a5, a8) 257 271 addi a2, a2, 32 ··· 270 284 add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ 271 285 .Loop6: 272 286 #endif 273 - SRC( l32i a9, a2, 0 ) 274 - DST( s32i a9, a3, 0 ) 287 + EX(10f) l32i a9, a2, 0 288 + EX(11f) s32i a9, a3, 0 275 289 ONES_ADD(a5, a9) 276 290 addi a2, a2, 4 277 291 addi a3, a3, 4 ··· 301 315 add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ 302 316 .Loop7: 303 317 #endif 304 - SRC( l16ui a9, a2, 0 ) 305 - DST( s16i a9, a3, 0 ) 318 + EX(10f) l16ui a9, a2, 0 319 + EX(11f) s16i a9, a3, 0 306 320 ONES_ADD(a5, a9) 307 321 addi a2, a2, 2 308 322 addi a3, a3, 2 ··· 312 326 4: 313 327 /* This section processes a possible trailing odd byte. */ 314 328 _bbci.l a4, 0, 8f /* 1-byte chunk */ 315 - SRC( l8ui a9, a2, 0 ) 316 - DST( s8i a9, a3, 0 ) 329 + EX(10f) l8ui a9, a2, 0 330 + EX(11f) s8i a9, a3, 0 317 331 #ifdef __XTENSA_EB__ 318 332 slli a9, a9, 8 /* shift byte to bits 8..15 */ 319 333 #endif ··· 336 350 add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ 337 351 .Loop8: 338 352 #endif 339 - SRC( l8ui a9, a2, 0 ) 340 - SRC( l8ui a8, a2, 1 ) 341 - DST( s8i a9, a3, 0 ) 342 - DST( s8i a8, a3, 1 ) 353 + EX(10f) l8ui a9, a2, 0 354 + EX(10f) l8ui a8, a2, 1 355 + EX(11f) s8i a9, a3, 0 356 + EX(11f) s8i a8, a3, 1 343 357 #ifdef __XTENSA_EB__ 344 358 slli a9, a9, 8 /* combine into a single 16-bit value */ 345 359 #else /* for checksum computation */ ··· 367 381 a12 = original dst for exception handling 368 382 */ 369 383 370 - 6001: 384 + 10: 371 385 _movi a2, -EFAULT 372 386 s32i a2, a6, 0 /* src_err_ptr */ 373 387 ··· 389 403 2: 390 404 retw 391 405 392 - 6002: 406 + 11: 393 407 movi a2, -EFAULT 394 408 s32i a2, a7, 0 /* dst_err_ptr */ 395 409 movi a2, 0
+29 -52
arch/xtensa/lib/memcopy.S
··· 9 9 * Copyright (C) 2002 - 2012 Tensilica Inc. 10 10 */ 11 11 12 + #include <linux/linkage.h> 12 13 #include <variant/core.h> 13 - 14 - .macro src_b r, w0, w1 15 - #ifdef __XTENSA_EB__ 16 - src \r, \w0, \w1 17 - #else 18 - src \r, \w1, \w0 19 - #endif 20 - .endm 21 - 22 - .macro ssa8 r 23 - #ifdef __XTENSA_EB__ 24 - ssa8b \r 25 - #else 26 - ssa8l \r 27 - #endif 28 - .endm 14 + #include <asm/asmmacro.h> 29 15 30 16 /* 31 17 * void *memcpy(void *dst, const void *src, size_t len); ··· 109 123 addi a5, a5, 2 110 124 j .Ldstaligned # dst is now aligned, return to main algorithm 111 125 112 - .align 4 113 - .global memcpy 114 - .type memcpy,@function 115 - memcpy: 126 + ENTRY(__memcpy) 127 + WEAK(memcpy) 116 128 117 129 entry sp, 16 # minimal stack frame 118 130 # a2/ dst, a3/ src, a4/ len ··· 193 209 .Lsrcunaligned: 194 210 _beqz a4, .Ldone # avoid loading anything for zero-length copies 195 211 # copy 16 bytes per iteration for word-aligned dst and unaligned src 196 - ssa8 a3 # set shift amount from byte offset 212 + __ssa8 a3 # set shift amount from byte offset 197 213 198 214 /* set to 1 when running on ISS (simulator) with the 199 215 lint or ferret client, or 0 to save a few cycles */ ··· 213 229 .Loop2: 214 230 l32i a7, a3, 4 215 231 l32i a8, a3, 8 216 - src_b a6, a6, a7 232 + __src_b a6, a6, a7 217 233 s32i a6, a5, 0 218 234 l32i a9, a3, 12 219 - src_b a7, a7, a8 235 + __src_b a7, a7, a8 220 236 s32i a7, a5, 4 221 237 l32i a6, a3, 16 222 - src_b a8, a8, a9 238 + __src_b a8, a8, a9 223 239 s32i a8, a5, 8 224 240 addi a3, a3, 16 225 - src_b a9, a9, a6 241 + __src_b a9, a9, a6 226 242 s32i a9, a5, 12 227 243 addi a5, a5, 16 228 244 #if !XCHAL_HAVE_LOOPS ··· 233 249 # copy 8 bytes 234 250 l32i a7, a3, 4 235 251 l32i a8, a3, 8 236 - src_b a6, a6, a7 252 + __src_b a6, a6, a7 237 253 s32i a6, a5, 0 238 254 addi a3, a3, 8 239 - src_b a7, a7, a8 255 + __src_b a7, a7, a8 240 256 s32i a7, a5, 4 241 257 addi a5, a5, 8 242 258 mov a6, a8 ··· 245 261 # copy 4 bytes 246 262 l32i a7, a3, 4 247 263 addi a3, a3, 4 248 - src_b a6, a6, a7 264 + __src_b a6, a6, a7 249 265 s32i a6, a5, 0 250 266 addi a5, a5, 4 251 267 mov a6, a7 ··· 272 288 s8i a6, a5, 0 273 289 retw 274 290 291 + ENDPROC(__memcpy) 275 292 276 293 /* 277 294 * void bcopy(const void *src, void *dest, size_t n); 278 295 */ 279 - .align 4 280 - .global bcopy 281 - .type bcopy,@function 282 - bcopy: 296 + 297 + ENTRY(bcopy) 298 + 283 299 entry sp, 16 # minimal stack frame 284 300 # a2=src, a3=dst, a4=len 285 301 mov a5, a3 286 302 mov a3, a2 287 303 mov a2, a5 288 304 j .Lmovecommon # go to common code for memmove+bcopy 305 + 306 + ENDPROC(bcopy) 289 307 290 308 /* 291 309 * void *memmove(void *dst, const void *src, size_t len); ··· 377 391 j .Lbackdstaligned # dst is now aligned, 378 392 # return to main algorithm 379 393 380 - .align 4 381 - .global memmove 382 - .type memmove,@function 383 - memmove: 394 + ENTRY(__memmove) 395 + WEAK(memmove) 384 396 385 397 entry sp, 16 # minimal stack frame 386 398 # a2/ dst, a3/ src, a4/ len ··· 469 485 .Lbacksrcunaligned: 470 486 _beqz a4, .Lbackdone # avoid loading anything for zero-length copies 471 487 # copy 16 bytes per iteration for word-aligned dst and unaligned src 472 - ssa8 a3 # set shift amount from byte offset 488 + __ssa8 a3 # set shift amount from byte offset 473 489 #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with 474 490 * the lint or ferret client, or 0 475 491 * to save a few cycles */ ··· 490 506 l32i a7, a3, 12 491 507 l32i a8, a3, 8 492 508 addi a5, a5, -16 493 - src_b a6, a7, a6 509 + __src_b a6, a7, a6 494 510 s32i a6, a5, 12 495 511 l32i a9, a3, 4 496 - src_b a7, a8, a7 512 + __src_b a7, a8, a7 497 513 s32i a7, a5, 8 498 514 l32i a6, a3, 0 499 - src_b a8, a9, a8 515 + __src_b a8, a9, a8 500 516 s32i a8, a5, 4 501 - src_b a9, a6, a9 517 + __src_b a9, a6, a9 502 518 s32i a9, a5, 0 503 519 #if !XCHAL_HAVE_LOOPS 504 520 bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start ··· 510 526 l32i a7, a3, 4 511 527 l32i a8, a3, 0 512 528 addi a5, a5, -8 513 - src_b a6, a7, a6 529 + __src_b a6, a7, a6 514 530 s32i a6, a5, 4 515 - src_b a7, a8, a7 531 + __src_b a7, a8, a7 516 532 s32i a7, a5, 0 517 533 mov a6, a8 518 534 .Lback12: ··· 521 537 addi a3, a3, -4 522 538 l32i a7, a3, 0 523 539 addi a5, a5, -4 524 - src_b a6, a7, a6 540 + __src_b a6, a7, a6 525 541 s32i a6, a5, 0 526 542 mov a6, a7 527 543 .Lback13: ··· 550 566 s8i a6, a5, 0 551 567 retw 552 568 553 - 554 - /* 555 - * Local Variables: 556 - * mode:fundamental 557 - * comment-start: "# " 558 - * comment-start-skip: "# *" 559 - * End: 560 - */ 569 + ENDPROC(__memmove)
+19 -26
arch/xtensa/lib/memset.S
··· 11 11 * Copyright (C) 2002 Tensilica Inc. 12 12 */ 13 13 14 + #include <linux/linkage.h> 14 15 #include <variant/core.h> 16 + #include <asm/asmmacro.h> 15 17 16 18 /* 17 19 * void *memset(void *dst, int c, size_t length) ··· 30 28 * the alignment labels). 31 29 */ 32 30 33 - /* Load or store instructions that may cause exceptions use the EX macro. */ 34 - 35 - #define EX(insn,reg1,reg2,offset,handler) \ 36 - 9: insn reg1, reg2, offset; \ 37 - .section __ex_table, "a"; \ 38 - .word 9b, handler; \ 39 - .previous 40 - 41 - 42 31 .text 43 - .align 4 44 - .global memset 45 - .type memset,@function 46 - memset: 32 + ENTRY(__memset) 33 + WEAK(memset) 34 + 47 35 entry sp, 16 # minimal stack frame 48 36 # a2/ dst, a3/ c, a4/ length 49 37 extui a3, a3, 0, 8 # mask to just 8 bits ··· 65 73 add a6, a6, a5 # a6 = end of last 16B chunk 66 74 #endif /* !XCHAL_HAVE_LOOPS */ 67 75 .Loop1: 68 - EX(s32i, a3, a5, 0, memset_fixup) 69 - EX(s32i, a3, a5, 4, memset_fixup) 70 - EX(s32i, a3, a5, 8, memset_fixup) 71 - EX(s32i, a3, a5, 12, memset_fixup) 76 + EX(10f) s32i a3, a5, 0 77 + EX(10f) s32i a3, a5, 4 78 + EX(10f) s32i a3, a5, 8 79 + EX(10f) s32i a3, a5, 12 72 80 addi a5, a5, 16 73 81 #if !XCHAL_HAVE_LOOPS 74 82 blt a5, a6, .Loop1 ··· 76 84 .Loop1done: 77 85 bbci.l a4, 3, .L2 78 86 # set 8 bytes 79 - EX(s32i, a3, a5, 0, memset_fixup) 80 - EX(s32i, a3, a5, 4, memset_fixup) 87 + EX(10f) s32i a3, a5, 0 88 + EX(10f) s32i a3, a5, 4 81 89 addi a5, a5, 8 82 90 .L2: 83 91 bbci.l a4, 2, .L3 84 92 # set 4 bytes 85 - EX(s32i, a3, a5, 0, memset_fixup) 93 + EX(10f) s32i a3, a5, 0 86 94 addi a5, a5, 4 87 95 .L3: 88 96 bbci.l a4, 1, .L4 89 97 # set 2 bytes 90 - EX(s16i, a3, a5, 0, memset_fixup) 98 + EX(10f) s16i a3, a5, 0 91 99 addi a5, a5, 2 92 100 .L4: 93 101 bbci.l a4, 0, .L5 94 102 # set 1 byte 95 - EX(s8i, a3, a5, 0, memset_fixup) 103 + EX(10f) s8i a3, a5, 0 96 104 .L5: 97 105 .Lret1: 98 106 retw ··· 106 114 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned 107 115 # dst is only byte aligned 108 116 # set 1 byte 109 - EX(s8i, a3, a5, 0, memset_fixup) 117 + EX(10f) s8i a3, a5, 0 110 118 addi a5, a5, 1 111 119 addi a4, a4, -1 112 120 # now retest if dst aligned ··· 114 122 .L20: 115 123 # dst half-aligned 116 124 # set 2 bytes 117 - EX(s16i, a3, a5, 0, memset_fixup) 125 + EX(10f) s16i a3, a5, 0 118 126 addi a5, a5, 2 119 127 addi a4, a4, -2 120 128 j .L0 # dst is now aligned, return to main algorithm ··· 133 141 add a6, a5, a4 # a6 = ending address 134 142 #endif /* !XCHAL_HAVE_LOOPS */ 135 143 .Lbyteloop: 136 - EX(s8i, a3, a5, 0, memset_fixup) 144 + EX(10f) s8i a3, a5, 0 137 145 addi a5, a5, 1 138 146 #if !XCHAL_HAVE_LOOPS 139 147 blt a5, a6, .Lbyteloop ··· 141 149 .Lbytesetdone: 142 150 retw 143 151 152 + ENDPROC(__memset) 144 153 145 154 .section .fixup, "ax" 146 155 .align 4 147 156 148 157 /* We return zero if a failure occurred. */ 149 158 150 - memset_fixup: 159 + 10: 151 160 movi a2, 0 152 161 retw
+9 -36
arch/xtensa/lib/pci-auto.c
··· 49 49 * 50 50 */ 51 51 52 - 53 - /* define DEBUG to print some debugging messages. */ 54 - 55 - #undef DEBUG 56 - 57 - #ifdef DEBUG 58 - # define DBG(x...) printk(x) 59 - #else 60 - # define DBG(x...) 61 - #endif 62 - 63 52 static int pciauto_upper_iospc; 64 53 static int pciauto_upper_memspc; 65 54 ··· 86 97 { 87 98 bar_size &= PCI_BASE_ADDRESS_IO_MASK; 88 99 upper_limit = &pciauto_upper_iospc; 89 - DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); 100 + pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr); 90 101 } 91 102 else 92 103 { ··· 96 107 97 108 bar_size &= PCI_BASE_ADDRESS_MEM_MASK; 98 109 upper_limit = &pciauto_upper_memspc; 99 - DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); 110 + pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr); 100 111 } 101 112 102 113 /* Allocate a base address (bar_size is negative!) */ ··· 114 125 if (found_mem64) 115 126 pci_write_config_dword(dev, (bar+=4), 0x00000000); 116 127 117 - DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); 128 + pr_debug("size=0x%x, address=0x%x\n", 129 + ~bar_size + 1, *upper_limit); 118 130 } 119 131 } 120 132 ··· 140 150 if (irq == -1) 141 151 irq = 0; 142 152 143 - DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); 153 + pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); 144 154 145 155 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 146 156 } ··· 279 289 280 290 int iosave, memsave; 281 291 282 - DBG("PCI Autoconfig: Found P2P bridge, device %d\n", 283 - PCI_SLOT(pci_devfn)); 292 + pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n", 293 + PCI_SLOT(pci_devfn)); 284 294 285 295 /* Allocate PCI I/O and/or memory space */ 286 296 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); ··· 296 306 297 307 } 298 308 299 - 300 - #if 0 301 - /* Skip legacy mode IDE controller */ 302 - 303 - if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { 304 - 305 - unsigned char prg_iface; 306 - pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); 307 - 308 - if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { 309 - DBG("PCI Autoconfig: Skipping legacy mode " 310 - "IDE controller\n"); 311 - continue; 312 - } 313 - } 314 - #endif 315 - 316 309 /* 317 310 * Found a peripheral, enable some standard 318 311 * settings ··· 310 337 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); 311 338 312 339 /* Allocate PCI I/O and/or memory space */ 313 - DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", 314 - current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); 340 + pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", 341 + current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn)); 315 342 316 343 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); 317 344 pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
+26 -34
arch/xtensa/lib/strncpy_user.S
··· 11 11 * Copyright (C) 2002 Tensilica Inc. 12 12 */ 13 13 14 - #include <variant/core.h> 15 14 #include <linux/errno.h> 16 - 17 - /* Load or store instructions that may cause exceptions use the EX macro. */ 18 - 19 - #define EX(insn,reg1,reg2,offset,handler) \ 20 - 9: insn reg1, reg2, offset; \ 21 - .section __ex_table, "a"; \ 22 - .word 9b, handler; \ 23 - .previous 15 + #include <linux/linkage.h> 16 + #include <variant/core.h> 17 + #include <asm/asmmacro.h> 24 18 25 19 /* 26 20 * char *__strncpy_user(char *dst, const char *src, size_t len) ··· 48 54 # a12/ tmp 49 55 50 56 .text 51 - .align 4 52 - .global __strncpy_user 53 - .type __strncpy_user,@function 54 - __strncpy_user: 57 + ENTRY(__strncpy_user) 58 + 55 59 entry sp, 16 # minimal stack frame 56 60 # a2/ dst, a3/ src, a4/ len 57 61 mov a11, a2 # leave dst in return value register ··· 67 75 j .Ldstunaligned 68 76 69 77 .Lsrc1mod2: # src address is odd 70 - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 78 + EX(11f) l8ui a9, a3, 0 # get byte 0 71 79 addi a3, a3, 1 # advance src pointer 72 - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 80 + EX(10f) s8i a9, a11, 0 # store byte 0 73 81 beqz a9, .Lret # if byte 0 is zero 74 82 addi a11, a11, 1 # advance dst pointer 75 83 addi a4, a4, -1 # decrement len ··· 77 85 bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned 78 86 79 87 .Lsrc2mod4: # src address is 2 mod 4 80 - EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 88 + EX(11f) l8ui a9, a3, 0 # get byte 0 81 89 /* 1-cycle interlock */ 82 - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 90 + EX(10f) s8i a9, a11, 0 # store byte 0 83 91 beqz a9, .Lret # if byte 0 is zero 84 92 addi a11, a11, 1 # advance dst pointer 85 93 addi a4, a4, -1 # decrement len 86 94 beqz a4, .Lret # if len is zero 87 - EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 95 + EX(11f) l8ui a9, a3, 1 # get byte 0 88 96 addi a3, a3, 2 # advance src pointer 89 - EX(s8i, a9, a11, 0, fixup_s) # store byte 0 97 + EX(10f) s8i a9, a11, 0 # store byte 0 90 98 beqz a9, .Lret # if byte 0 is zero 91 99 addi a11, a11, 1 # advance dst pointer 92 100 addi a4, a4, -1 # decrement len ··· 109 117 add a12, a12, a11 # a12 = end of last 4B chunck 110 118 #endif 111 119 .Loop1: 112 - EX(l32i, a9, a3, 0, fixup_l) # get word from src 120 + EX(11f) l32i a9, a3, 0 # get word from src 113 121 addi a3, a3, 4 # advance src pointer 114 122 bnone a9, a5, .Lz0 # if byte 0 is zero 115 123 bnone a9, a6, .Lz1 # if byte 1 is zero 116 124 bnone a9, a7, .Lz2 # if byte 2 is zero 117 - EX(s32i, a9, a11, 0, fixup_s) # store word to dst 125 + EX(10f) s32i a9, a11, 0 # store word to dst 118 126 bnone a9, a8, .Lz3 # if byte 3 is zero 119 127 addi a11, a11, 4 # advance dst pointer 120 128 #if !XCHAL_HAVE_LOOPS ··· 124 132 .Loop1done: 125 133 bbci.l a4, 1, .L100 126 134 # copy 2 bytes 127 - EX(l16ui, a9, a3, 0, fixup_l) 135 + EX(11f) l16ui a9, a3, 0 128 136 addi a3, a3, 2 # advance src pointer 129 137 #ifdef __XTENSA_EB__ 130 138 bnone a9, a7, .Lz0 # if byte 2 is zero ··· 133 141 bnone a9, a5, .Lz0 # if byte 0 is zero 134 142 bnone a9, a6, .Lz1 # if byte 1 is zero 135 143 #endif 136 - EX(s16i, a9, a11, 0, fixup_s) 144 + EX(10f) s16i a9, a11, 0 137 145 addi a11, a11, 2 # advance dst pointer 138 146 .L100: 139 147 bbci.l a4, 0, .Lret 140 - EX(l8ui, a9, a3, 0, fixup_l) 148 + EX(11f) l8ui a9, a3, 0 141 149 /* slot */ 142 - EX(s8i, a9, a11, 0, fixup_s) 150 + EX(10f) s8i a9, a11, 0 143 151 beqz a9, .Lret # if byte is zero 144 152 addi a11, a11, 1-3 # advance dst ptr 1, but also cancel 145 153 # the effect of adding 3 in .Lz3 code ··· 153 161 #ifdef __XTENSA_EB__ 154 162 movi a9, 0 155 163 #endif /* __XTENSA_EB__ */ 156 - EX(s8i, a9, a11, 0, fixup_s) 164 + EX(10f) s8i a9, a11, 0 157 165 sub a2, a11, a2 # compute strlen 158 166 retw 159 167 .Lz1: # byte 1 is zero 160 168 #ifdef __XTENSA_EB__ 161 169 extui a9, a9, 16, 16 162 170 #endif /* __XTENSA_EB__ */ 163 - EX(s16i, a9, a11, 0, fixup_s) 171 + EX(10f) s16i a9, a11, 0 164 172 addi a11, a11, 1 # advance dst pointer 165 173 sub a2, a11, a2 # compute strlen 166 174 retw ··· 168 176 #ifdef __XTENSA_EB__ 169 177 extui a9, a9, 16, 16 170 178 #endif /* __XTENSA_EB__ */ 171 - EX(s16i, a9, a11, 0, fixup_s) 179 + EX(10f) s16i a9, a11, 0 172 180 movi a9, 0 173 - EX(s8i, a9, a11, 2, fixup_s) 181 + EX(10f) s8i a9, a11, 2 174 182 addi a11, a11, 2 # advance dst pointer 175 183 sub a2, a11, a2 # compute strlen 176 184 retw ··· 188 196 add a12, a11, a4 # a12 = ending address 189 197 #endif /* XCHAL_HAVE_LOOPS */ 190 198 .Lnextbyte: 191 - EX(l8ui, a9, a3, 0, fixup_l) 199 + EX(11f) l8ui a9, a3, 0 192 200 addi a3, a3, 1 193 - EX(s8i, a9, a11, 0, fixup_s) 201 + EX(10f) s8i a9, a11, 0 194 202 beqz a9, .Lunalignedend 195 203 addi a11, a11, 1 196 204 #if !XCHAL_HAVE_LOOPS ··· 201 209 sub a2, a11, a2 # compute strlen 202 210 retw 203 211 212 + ENDPROC(__strncpy_user) 204 213 205 214 .section .fixup, "ax" 206 215 .align 4 ··· 211 218 * implementation in memset(). Thus, we differentiate between 212 219 * load/store fixups. */ 213 220 214 - fixup_s: 215 - fixup_l: 221 + 10: 222 + 11: 216 223 movi a2, -EFAULT 217 224 retw 218 -
+11 -17
arch/xtensa/lib/strnlen_user.S
··· 11 11 * Copyright (C) 2002 Tensilica Inc. 12 12 */ 13 13 14 + #include <linux/linkage.h> 14 15 #include <variant/core.h> 15 - 16 - /* Load or store instructions that may cause exceptions use the EX macro. */ 17 - 18 - #define EX(insn,reg1,reg2,offset,handler) \ 19 - 9: insn reg1, reg2, offset; \ 20 - .section __ex_table, "a"; \ 21 - .word 9b, handler; \ 22 - .previous 16 + #include <asm/asmmacro.h> 23 17 24 18 /* 25 19 * size_t __strnlen_user(const char *s, size_t len) ··· 43 49 # a10/ tmp 44 50 45 51 .text 46 - .align 4 47 - .global __strnlen_user 48 - .type __strnlen_user,@function 49 - __strnlen_user: 52 + ENTRY(__strnlen_user) 53 + 50 54 entry sp, 16 # minimal stack frame 51 55 # a2/ s, a3/ len 52 56 addi a4, a2, -4 # because we overincrement at the end; ··· 69 77 add a10, a10, a4 # a10 = end of last 4B chunk 70 78 #endif /* XCHAL_HAVE_LOOPS */ 71 79 .Loop: 72 - EX(l32i, a9, a4, 4, lenfixup) # get next word of string 80 + EX(10f) l32i a9, a4, 4 # get next word of string 73 81 addi a4, a4, 4 # advance string pointer 74 82 bnone a9, a5, .Lz0 # if byte 0 is zero 75 83 bnone a9, a6, .Lz1 # if byte 1 is zero ··· 80 88 #endif 81 89 82 90 .Ldone: 83 - EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks 91 + EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks 84 92 85 93 bbci.l a3, 1, .L100 86 94 # check two more bytes (bytes 0, 1 of word) ··· 117 125 retw 118 126 119 127 .L1mod2: # address is odd 120 - EX(l8ui, a9, a4, 4, lenfixup) # get byte 0 128 + EX(10f) l8ui a9, a4, 4 # get byte 0 121 129 addi a4, a4, 1 # advance string pointer 122 130 beqz a9, .Lz3 # if byte 0 is zero 123 131 bbci.l a4, 1, .Laligned # if string pointer is now word-aligned 124 132 125 133 .L2mod4: # address is 2 mod 4 126 134 addi a4, a4, 2 # advance ptr for aligned access 127 - EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string 135 + EX(10f) l32i a9, a4, 0 # get word with first two bytes of string 128 136 bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero 129 137 bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero 130 138 # byte 3 is zero ··· 132 140 sub a2, a4, a2 # subtract to get length 133 141 retw 134 142 143 + ENDPROC(__strnlen_user) 144 + 135 145 .section .fixup, "ax" 136 146 .align 4 137 - lenfixup: 147 + 10: 138 148 movi a2, 0 139 149 retw
+61 -77
arch/xtensa/lib/usercopy.S
··· 53 53 * a11/ original length 54 54 */ 55 55 56 + #include <linux/linkage.h> 56 57 #include <variant/core.h> 57 - 58 - #ifdef __XTENSA_EB__ 59 - #define ALIGN(R, W0, W1) src R, W0, W1 60 - #define SSA8(R) ssa8b R 61 - #else 62 - #define ALIGN(R, W0, W1) src R, W1, W0 63 - #define SSA8(R) ssa8l R 64 - #endif 65 - 66 - /* Load or store instructions that may cause exceptions use the EX macro. */ 67 - 68 - #define EX(insn,reg1,reg2,offset,handler) \ 69 - 9: insn reg1, reg2, offset; \ 70 - .section __ex_table, "a"; \ 71 - .word 9b, handler; \ 72 - .previous 73 - 58 + #include <asm/asmmacro.h> 74 59 75 60 .text 76 - .align 4 77 - .global __xtensa_copy_user 78 - .type __xtensa_copy_user,@function 79 - __xtensa_copy_user: 61 + ENTRY(__xtensa_copy_user) 62 + 80 63 entry sp, 16 # minimal stack frame 81 64 # a2/ dst, a3/ src, a4/ len 82 65 mov a5, a2 # copy dst so that a2 is return value ··· 72 89 # per iteration 73 90 movi a8, 3 # if source is also aligned, 74 91 bnone a3, a8, .Laligned # then use word copy 75 - SSA8( a3) # set shift amount from byte offset 92 + __ssa8 a3 # set shift amount from byte offset 76 93 bnez a4, .Lsrcunaligned 77 94 movi a2, 0 # return success for len==0 78 95 retw ··· 85 102 bltui a4, 7, .Lbytecopy # do short copies byte by byte 86 103 87 104 # copy 1 byte 88 - EX(l8ui, a6, a3, 0, fixup) 105 + EX(10f) l8ui a6, a3, 0 89 106 addi a3, a3, 1 90 - EX(s8i, a6, a5, 0, fixup) 107 + EX(10f) s8i a6, a5, 0 91 108 addi a5, a5, 1 92 109 addi a4, a4, -1 93 110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then ··· 95 112 .Ldst2mod4: # dst 16-bit aligned 96 113 # copy 2 bytes 97 114 bltui a4, 6, .Lbytecopy # do short copies byte by byte 98 - EX(l8ui, a6, a3, 0, fixup) 99 - EX(l8ui, a7, a3, 1, fixup) 115 + EX(10f) l8ui a6, a3, 0 116 + EX(10f) l8ui a7, a3, 1 100 117 addi a3, a3, 2 101 - EX(s8i, a6, a5, 0, fixup) 102 - EX(s8i, a7, a5, 1, fixup) 118 + EX(10f) s8i a6, a5, 0 119 + EX(10f) s8i a7, a5, 1 103 120 addi a5, a5, 2 104 121 addi a4, a4, -2 105 122 j .Ldstaligned # dst is now aligned, return to main algorithm ··· 118 135 add a7, a3, a4 # a7 = end address for source 119 136 #endif /* !XCHAL_HAVE_LOOPS */ 120 137 .Lnextbyte: 121 - EX(l8ui, a6, a3, 0, fixup) 138 + EX(10f) l8ui a6, a3, 0 122 139 addi a3, a3, 1 123 - EX(s8i, a6, a5, 0, fixup) 140 + EX(10f) s8i a6, a5, 0 124 141 addi a5, a5, 1 125 142 #if !XCHAL_HAVE_LOOPS 126 143 blt a3, a7, .Lnextbyte ··· 144 161 add a8, a8, a3 # a8 = end of last 16B source chunk 145 162 #endif /* !XCHAL_HAVE_LOOPS */ 146 163 .Loop1: 147 - EX(l32i, a6, a3, 0, fixup) 148 - EX(l32i, a7, a3, 4, fixup) 149 - EX(s32i, a6, a5, 0, fixup) 150 - EX(l32i, a6, a3, 8, fixup) 151 - EX(s32i, a7, a5, 4, fixup) 152 - EX(l32i, a7, a3, 12, fixup) 153 - EX(s32i, a6, a5, 8, fixup) 164 + EX(10f) l32i a6, a3, 0 165 + EX(10f) l32i a7, a3, 4 166 + EX(10f) s32i a6, a5, 0 167 + EX(10f) l32i a6, a3, 8 168 + EX(10f) s32i a7, a5, 4 169 + EX(10f) l32i a7, a3, 12 170 + EX(10f) s32i a6, a5, 8 154 171 addi a3, a3, 16 155 - EX(s32i, a7, a5, 12, fixup) 172 + EX(10f) s32i a7, a5, 12 156 173 addi a5, a5, 16 157 174 #if !XCHAL_HAVE_LOOPS 158 175 blt a3, a8, .Loop1 ··· 160 177 .Loop1done: 161 178 bbci.l a4, 3, .L2 162 179 # copy 8 bytes 163 - EX(l32i, a6, a3, 0, fixup) 164 - EX(l32i, a7, a3, 4, fixup) 180 + EX(10f) l32i a6, a3, 0 181 + EX(10f) l32i a7, a3, 4 165 182 addi a3, a3, 8 166 - EX(s32i, a6, a5, 0, fixup) 167 - EX(s32i, a7, a5, 4, fixup) 183 + EX(10f) s32i a6, a5, 0 184 + EX(10f) s32i a7, a5, 4 168 185 addi a5, a5, 8 169 186 .L2: 170 187 bbci.l a4, 2, .L3 171 188 # copy 4 bytes 172 - EX(l32i, a6, a3, 0, fixup) 189 + EX(10f) l32i a6, a3, 0 173 190 addi a3, a3, 4 174 - EX(s32i, a6, a5, 0, fixup) 191 + EX(10f) s32i a6, a5, 0 175 192 addi a5, a5, 4 176 193 .L3: 177 194 bbci.l a4, 1, .L4 178 195 # copy 2 bytes 179 - EX(l16ui, a6, a3, 0, fixup) 196 + EX(10f) l16ui a6, a3, 0 180 197 addi a3, a3, 2 181 - EX(s16i, a6, a5, 0, fixup) 198 + EX(10f) s16i a6, a5, 0 182 199 addi a5, a5, 2 183 200 .L4: 184 201 bbci.l a4, 0, .L5 185 202 # copy 1 byte 186 - EX(l8ui, a6, a3, 0, fixup) 187 - EX(s8i, a6, a5, 0, fixup) 203 + EX(10f) l8ui a6, a3, 0 204 + EX(10f) s8i a6, a5, 0 188 205 .L5: 189 206 movi a2, 0 # return success for len bytes copied 190 207 retw ··· 200 217 # copy 16 bytes per iteration for word-aligned dst and unaligned src 201 218 and a10, a3, a8 # save unalignment offset for below 202 219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) 203 - EX(l32i, a6, a3, 0, fixup) # load first word 220 + EX(10f) l32i a6, a3, 0 # load first word 204 221 #if XCHAL_HAVE_LOOPS 205 222 loopnez a7, .Loop2done 206 223 #else /* !XCHAL_HAVE_LOOPS */ ··· 209 226 add a12, a12, a3 # a12 = end of last 16B source chunk 210 227 #endif /* !XCHAL_HAVE_LOOPS */ 211 228 .Loop2: 212 - EX(l32i, a7, a3, 4, fixup) 213 - EX(l32i, a8, a3, 8, fixup) 214 - ALIGN( a6, a6, a7) 215 - EX(s32i, a6, a5, 0, fixup) 216 - EX(l32i, a9, a3, 12, fixup) 217 - ALIGN( a7, a7, a8) 218 - EX(s32i, a7, a5, 4, fixup) 219 - EX(l32i, a6, a3, 16, fixup) 220 - ALIGN( a8, a8, a9) 221 - EX(s32i, a8, a5, 8, fixup) 229 + EX(10f) l32i a7, a3, 4 230 + EX(10f) l32i a8, a3, 8 231 + __src_b a6, a6, a7 232 + EX(10f) s32i a6, a5, 0 233 + EX(10f) l32i a9, a3, 12 234 + __src_b a7, a7, a8 235 + EX(10f) s32i a7, a5, 4 236 + EX(10f) l32i a6, a3, 16 237 + __src_b a8, a8, a9 238 + EX(10f) s32i a8, a5, 8 222 239 addi a3, a3, 16 223 - ALIGN( a9, a9, a6) 224 - EX(s32i, a9, a5, 12, fixup) 240 + __src_b a9, a9, a6 241 + EX(10f) s32i a9, a5, 12 225 242 addi a5, a5, 16 226 243 #if !XCHAL_HAVE_LOOPS 227 244 blt a3, a12, .Loop2 ··· 229 246 .Loop2done: 230 247 bbci.l a4, 3, .L12 231 248 # copy 8 bytes 232 - EX(l32i, a7, a3, 4, fixup) 233 - EX(l32i, a8, a3, 8, fixup) 234 - ALIGN( a6, a6, a7) 235 - EX(s32i, a6, a5, 0, fixup) 249 + EX(10f) l32i a7, a3, 4 250 + EX(10f) l32i a8, a3, 8 251 + __src_b a6, a6, a7 252 + EX(10f) s32i a6, a5, 0 236 253 addi a3, a3, 8 237 - ALIGN( a7, a7, a8) 238 - EX(s32i, a7, a5, 4, fixup) 254 + __src_b a7, a7, a8 255 + EX(10f) s32i a7, a5, 4 239 256 addi a5, a5, 8 240 257 mov a6, a8 241 258 .L12: 242 259 bbci.l a4, 2, .L13 243 260 # copy 4 bytes 244 - EX(l32i, a7, a3, 4, fixup) 261 + EX(10f) l32i a7, a3, 4 245 262 addi a3, a3, 4 246 - ALIGN( a6, a6, a7) 247 - EX(s32i, a6, a5, 0, fixup) 263 + __src_b a6, a6, a7 264 + EX(10f) s32i a6, a5, 0 248 265 addi a5, a5, 4 249 266 mov a6, a7 250 267 .L13: 251 268 add a3, a3, a10 # readjust a3 with correct misalignment 252 269 bbci.l a4, 1, .L14 253 270 # copy 2 bytes 254 - EX(l8ui, a6, a3, 0, fixup) 255 - EX(l8ui, a7, a3, 1, fixup) 271 + EX(10f) l8ui a6, a3, 0 272 + EX(10f) l8ui a7, a3, 1 256 273 addi a3, a3, 2 257 - EX(s8i, a6, a5, 0, fixup) 258 - EX(s8i, a7, a5, 1, fixup) 274 + EX(10f) s8i a6, a5, 0 275 + EX(10f) s8i a7, a5, 1 259 276 addi a5, a5, 2 260 277 .L14: 261 278 bbci.l a4, 0, .L15 262 279 # copy 1 byte 263 - EX(l8ui, a6, a3, 0, fixup) 264 - EX(s8i, a6, a5, 0, fixup) 280 + EX(10f) l8ui a6, a3, 0 281 + EX(10f) s8i a6, a5, 0 265 282 .L15: 266 283 movi a2, 0 # return success for len bytes copied 267 284 retw 268 285 286 + ENDPROC(__xtensa_copy_user) 269 287 270 288 .section .fixup, "ax" 271 289 .align 4 ··· 278 294 */ 279 295 280 296 281 - fixup: 297 + 10: 282 298 sub a2, a5, a2 /* a2 <-- bytes copied */ 283 299 sub a2, a11, a2 /* a2 <-- bytes not copied */ 284 300 retw
+5
arch/xtensa/mm/Makefile
··· 5 5 obj-y := init.o misc.o 6 6 obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o 7 7 obj-$(CONFIG_HIGHMEM) += highmem.o 8 + obj-$(CONFIG_KASAN) += kasan_init.o 9 + 10 + KASAN_SANITIZE_fault.o := n 11 + KASAN_SANITIZE_kasan_init.o := n 12 + KASAN_SANITIZE_mmu.o := n
-3
arch/xtensa/mm/cache.c
··· 33 33 #include <asm/pgalloc.h> 34 34 #include <asm/pgtable.h> 35 35 36 - //#define printd(x...) printk(x) 37 - #define printd(x...) do { } while(0) 38 - 39 36 /* 40 37 * Note: 41 38 * The kernel provides one architecture bit PG_arch_1 in the page flags that
+9 -13
arch/xtensa/mm/fault.c
··· 25 25 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; 26 26 void bad_page_fault(struct pt_regs*, unsigned long, int); 27 27 28 - #undef DEBUG_PAGE_FAULT 29 - 30 28 /* 31 29 * This routine handles page faults. It determines the address, 32 30 * and the problem, and then passes it off to one of the appropriate ··· 66 68 exccause == EXCCAUSE_ITLB_MISS || 67 69 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 68 70 69 - #ifdef DEBUG_PAGE_FAULT 70 - printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, 71 - address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); 72 - #endif 71 + pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", 72 + current->comm, current->pid, 73 + address, exccause, regs->pc, 74 + is_write ? "w" : "", is_exec ? "x" : ""); 73 75 74 76 if (user_mode(regs)) 75 77 flags |= FAULT_FLAG_USER; ··· 245 247 246 248 /* Are we prepared to handle this kernel fault? */ 247 249 if ((entry = search_exception_tables(regs->pc)) != NULL) { 248 - #ifdef DEBUG_PAGE_FAULT 249 - printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", 250 - current->comm, regs->pc, entry->fixup); 251 - #endif 250 + pr_debug("%s: Exception at pc=%#010lx (%lx)\n", 251 + current->comm, regs->pc, entry->fixup); 252 252 current->thread.bad_uaddr = address; 253 253 regs->pc = entry->fixup; 254 254 return; ··· 255 259 /* Oops. The kernel tried to access some bad page. We'll have to 256 260 * terminate things with extreme prejudice. 257 261 */ 258 - printk(KERN_ALERT "Unable to handle kernel paging request at virtual " 259 - "address %08lx\n pc = %08lx, ra = %08lx\n", 260 - address, regs->pc, regs->areg[0]); 262 + pr_alert("Unable to handle kernel paging request at virtual " 263 + "address %08lx\n pc = %08lx, ra = %08lx\n", 264 + address, regs->pc, regs->areg[0]); 261 265 die("Oops", regs, sig); 262 266 do_exit(sig); 263 267 }
+30 -8
arch/xtensa/mm/init.c
··· 100 100 101 101 mem_init_print_info(NULL); 102 102 pr_info("virtual kernel memory layout:\n" 103 - #ifdef CONFIG_HIGHMEM 104 - " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 105 - " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 103 + #ifdef CONFIG_KASAN 104 + " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" 106 105 #endif 107 106 #ifdef CONFIG_MMU 108 107 " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" 109 108 #endif 110 - " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", 109 + #ifdef CONFIG_HIGHMEM 110 + " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 111 + " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 112 + #endif 113 + " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n" 114 + " .text : 0x%08lx - 0x%08lx (%5lu kB)\n" 115 + " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n" 116 + " .data : 0x%08lx - 0x%08lx (%5lu kB)\n" 117 + " .init : 0x%08lx - 0x%08lx (%5lu kB)\n" 118 + " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n", 119 + #ifdef CONFIG_KASAN 120 + KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, 121 + KASAN_SHADOW_SIZE >> 20, 122 + #endif 123 + #ifdef CONFIG_MMU 124 + VMALLOC_START, VMALLOC_END, 125 + (VMALLOC_END - VMALLOC_START) >> 20, 111 126 #ifdef CONFIG_HIGHMEM 112 127 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, 113 128 (LAST_PKMAP*PAGE_SIZE) >> 10, 114 129 FIXADDR_START, FIXADDR_TOP, 115 130 (FIXADDR_TOP - FIXADDR_START) >> 10, 116 131 #endif 117 - #ifdef CONFIG_MMU 118 - VMALLOC_START, VMALLOC_END, 119 - (VMALLOC_END - VMALLOC_START) >> 20, 120 132 PAGE_OFFSET, PAGE_OFFSET + 121 133 (max_low_pfn - min_low_pfn) * PAGE_SIZE, 122 134 #else 123 135 min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, 124 136 #endif 125 - ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); 137 + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, 138 + (unsigned long)_text, (unsigned long)_etext, 139 + (unsigned long)(_etext - _text) >> 10, 140 + (unsigned long)__start_rodata, (unsigned long)_sdata, 141 + (unsigned long)(_sdata - __start_rodata) >> 10, 142 + (unsigned long)_sdata, (unsigned long)_edata, 143 + (unsigned long)(_edata - _sdata) >> 10, 144 + (unsigned long)__init_begin, (unsigned long)__init_end, 145 + (unsigned long)(__init_end - __init_begin) >> 10, 146 + (unsigned long)__bss_start, (unsigned long)__bss_stop, 147 + (unsigned long)(__bss_stop - __bss_start) >> 10); 126 148 } 127 149 128 150 #ifdef CONFIG_BLK_DEV_INITRD
+95
arch/xtensa/mm/kasan_init.c
··· 1 + /* 2 + * Xtensa KASAN shadow map initialization 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 2017 Cadence Design Systems Inc. 9 + */ 10 + 11 + #include <linux/bootmem.h> 12 + #include <linux/init_task.h> 13 + #include <linux/kasan.h> 14 + #include <linux/kernel.h> 15 + #include <linux/memblock.h> 16 + #include <asm/initialize_mmu.h> 17 + #include <asm/tlbflush.h> 18 + #include <asm/traps.h> 19 + 20 + void __init kasan_early_init(void) 21 + { 22 + unsigned long vaddr = KASAN_SHADOW_START; 23 + pgd_t *pgd = pgd_offset_k(vaddr); 24 + pmd_t *pmd = pmd_offset(pgd, vaddr); 25 + int i; 26 + 27 + for (i = 0; i < PTRS_PER_PTE; ++i) 28 + set_pte(kasan_zero_pte + i, 29 + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); 30 + 31 + for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { 32 + BUG_ON(!pmd_none(*pmd)); 33 + set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); 34 + } 35 + early_trap_init(); 36 + } 37 + 38 + static void __init populate(void *start, void *end) 39 + { 40 + unsigned long n_pages = (end - start) / PAGE_SIZE; 41 + unsigned long n_pmds = n_pages / PTRS_PER_PTE; 42 + unsigned long i, j; 43 + unsigned long vaddr = (unsigned long)start; 44 + pgd_t *pgd = pgd_offset_k(vaddr); 45 + pmd_t *pmd = pmd_offset(pgd, vaddr); 46 + pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); 47 + 48 + pr_debug("%s: %p - %p\n", __func__, start, end); 49 + 50 + for (i = j = 0; i < n_pmds; ++i) { 51 + int k; 52 + 53 + for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { 54 + phys_addr_t phys = 55 + memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 56 + MEMBLOCK_ALLOC_ANYWHERE); 57 + 58 + set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); 59 + } 60 + } 61 + 62 + for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) 63 + set_pmd(pmd + i, __pmd((unsigned long)pte)); 64 + 65 + local_flush_tlb_all(); 66 + memset(start, 0, end - start); 67 + } 68 + 69 + void __init kasan_init(void) 70 + { 71 + int i; 72 + 73 + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - 74 + (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); 75 + BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); 76 + 77 + /* 78 + * Replace shadow map pages that cover addresses from VMALLOC area 79 + * start to the end of KSEG with clean writable pages. 80 + */ 81 + populate(kasan_mem_to_shadow((void *)VMALLOC_START), 82 + kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); 83 + 84 + /* Write protect kasan_zero_page and zero-initialize it again. */ 85 + for (i = 0; i < PTRS_PER_PTE; ++i) 86 + set_pte(kasan_zero_pte + i, 87 + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); 88 + 89 + local_flush_tlb_all(); 90 + memset(kasan_zero_page, 0, PAGE_SIZE); 91 + 92 + /* At this point kasan is fully initialized. Enable error messages. */ 93 + current->kasan_depth = 0; 94 + pr_info("KernelAddressSanitizer initialized\n"); 95 + }
+17 -14
arch/xtensa/mm/mmu.c
··· 56 56 57 57 void __init paging_init(void) 58 58 { 59 - memset(swapper_pg_dir, 0, PAGE_SIZE); 60 59 #ifdef CONFIG_HIGHMEM 61 60 fixedrange_init(); 62 61 pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); ··· 81 82 set_itlbcfg_register(0); 82 83 set_dtlbcfg_register(0); 83 84 #endif 85 + init_kio(); 86 + local_flush_tlb_all(); 87 + 88 + /* Set rasid register to a known value. */ 89 + 90 + set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); 91 + 92 + /* Set PTEVADDR special register to the start of the page 93 + * table, which is in kernel mappable space (ie. not 94 + * statically mapped). This register's value is undefined on 95 + * reset. 96 + */ 97 + set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); 98 + } 99 + 100 + void init_kio(void) 101 + { 84 102 #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) 85 103 /* 86 104 * Update the IO area mapping in case xtensa_kio_paddr has changed ··· 111 95 write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), 112 96 XCHAL_KIO_BYPASS_VADDR + 6); 113 97 #endif 114 - 115 - local_flush_tlb_all(); 116 - 117 - /* Set rasid register to a known value. */ 118 - 119 - set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); 120 - 121 - /* Set PTEVADDR special register to the start of the page 122 - * table, which is in kernel mappable space (ie. not 123 - * statically mapped). This register's value is undefined on 124 - * reset. 125 - */ 126 - set_ptevaddr_register(PGTABLE_START); 127 98 }
+2 -4
arch/xtensa/mm/tlb.c
··· 95 95 if (mm->context.asid[cpu] == NO_CONTEXT) 96 96 return; 97 97 98 - #if 0 99 - printk("[tlbrange<%02lx,%08lx,%08lx>]\n", 100 - (unsigned long)mm->context.asid[cpu], start, end); 101 - #endif 98 + pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n", 99 + (unsigned long)mm->context.asid[cpu], start, end); 102 100 local_irq_save(flags); 103 101 104 102 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
+2 -2
arch/xtensa/platforms/iss/console.c
··· 185 185 186 186 serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); 187 187 188 - printk ("%s %s\n", serial_name, serial_version); 188 + pr_info("%s %s\n", serial_name, serial_version); 189 189 190 190 /* Initialize the tty_driver structure */ 191 191 ··· 214 214 int error; 215 215 216 216 if ((error = tty_unregister_driver(serial_driver))) 217 - printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", 217 + pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n", 218 218 error); 219 219 put_tty_driver(serial_driver); 220 220 tty_port_destroy(&serial_port);
+6 -8
arch/xtensa/platforms/iss/network.c
··· 16 16 * 17 17 */ 18 18 19 + #define pr_fmt(fmt) "%s: " fmt, __func__ 20 + 19 21 #include <linux/list.h> 20 22 #include <linux/irq.h> 21 23 #include <linux/spinlock.h> ··· 608 606 * those fields. They will be later initialized in iss_net_init. 609 607 */ 610 608 611 - #define ERR KERN_ERR "iss_net_setup: " 612 - 613 609 static int __init iss_net_setup(char *str) 614 610 { 615 611 struct iss_net_private *device = NULL; ··· 619 619 620 620 end = strchr(str, '='); 621 621 if (!end) { 622 - printk(ERR "Expected '=' after device number\n"); 622 + pr_err("Expected '=' after device number\n"); 623 623 return 1; 624 624 } 625 625 *end = 0; 626 626 rc = kstrtouint(str, 0, &n); 627 627 *end = '='; 628 628 if (rc < 0) { 629 - printk(ERR "Failed to parse '%s'\n", str); 629 + pr_err("Failed to parse '%s'\n", str); 630 630 return 1; 631 631 } 632 632 str = end; ··· 642 642 spin_unlock(&devices_lock); 643 643 644 644 if (device && device->index == n) { 645 - printk(ERR "Device %u already configured\n", n); 645 + pr_err("Device %u already configured\n", n); 646 646 return 1; 647 647 } 648 648 649 649 new = alloc_bootmem(sizeof(*new)); 650 650 if (new == NULL) { 651 - printk(ERR "Alloc_bootmem failed\n"); 651 + pr_err("Alloc_bootmem failed\n"); 652 652 return 1; 653 653 } 654 654 ··· 659 659 list_add_tail(&new->list, &eth_cmd_line); 660 660 return 1; 661 661 } 662 - 663 - #undef ERR 664 662 665 663 __setup("eth", iss_net_setup); 666 664