Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull first batch of ARC changes from Vineet Gupta:
"There's a second bunch to follow next week - which depends on commits
on other trees (irq/net). I'd have preferred the accompanying ARC
change via respective trees, but it didn't workout somehow.

Highlights of changes:

- Continuation of ARC MM changes from 3.10 including

zero page optimization
Setting pagecache pages dirty by default
Non executable stack by default
Reducing dcache flushes for aliasing VIPT config

- Long overdue rework of pt_regs machinery - removing the unused word
gutters and adding ECR register to baseline (helps cleanup lot of
low level code)

- Support for ARC gcc 4.8

- Few other preventive fixes, cosmetics, usage of Kconfig helper..

The diffstat is larger than normal primarily because of arcregs.h
header split as well as beautification of macros in entry.h"

* tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (32 commits)
ARC: warn on improper stack unwind FDE entries
arc: delete __cpuinit usage from all arc files
ARC: [tlb-miss] Fix bug with CONFIG_ARC_DBG_TLB_MISS_COUNT
ARC: [tlb-miss] Extraneous PTE bit testing/setting
ARC: Adjustments for gcc 4.8
ARC: Setup Vector Table Base in early boot
ARC: Remove explicit passing around of ECR
ARC: pt_regs update #5: Use real ECR for pt_regs->event vs. synth values
ARC: stop using pt_regs->orig_r8
ARC: pt_regs update #4: r25 saved/restored unconditionally
ARC: K/U SP saved from one location in stack switching macro
ARC: Entry Handler tweaks: Simplify branch for in-kernel preemption
ARC: Entry Handler tweaks: Avoid hardcoded LIMMS for ECR values
ARC: Increase readability of entry handlers
ARC: pt_regs update #3: Remove unused gutter at start of callee_regs
ARC: pt_regs update #2: Remove unused gutter at start of pt_regs
ARC: pt_regs update #1: Align pt_regs end with end of kernel stack page
ARC: pt_regs update #0: remove kernel stack canary
ARC: [mm] Remove @write argument to do_page_fault()
ARC: [mm] Make stack/heap Non-executable by default
...

+668 -874
+1 -7
arch/arc/Kconfig
··· 184 184 185 185 config ARC_CACHE_VIPT_ALIASING 186 186 bool "Support VIPT Aliasing D$" 187 + depends on ARC_HAS_DCACHE 187 188 default n 188 189 189 190 endif #ARC_CACHE ··· 361 360 This enables misaligned 16 & 32 bit memory access from user space. 362 361 Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide 363 362 potential bugs in code 364 - 365 - config ARC_STACK_NONEXEC 366 - bool "Make stack non-executable" 367 - default n 368 - help 369 - To disable the execute permissions of stack/heap of processes 370 - which are enabled by default. 371 363 372 364 config HZ 373 365 int "Timer Frequency"
+17 -11
arch/arc/Makefile
··· 9 9 UTS_MACHINE := arc 10 10 11 11 ifeq ($(CROSS_COMPILE),) 12 - CROSS_COMPILE := arc-elf32- 12 + CROSS_COMPILE := arc-linux-uclibc- 13 13 endif 14 14 15 15 KBUILD_DEFCONFIG := fpga_defconfig 16 16 17 17 cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ 18 18 19 - LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h 20 - 21 19 ifdef CONFIG_ARC_CURR_IN_REG 22 20 # For a global register defintion, make sure it gets passed to every file 23 21 # We had a customer reported bug where some code built in kernel was NOT using 24 22 # any kernel headers, and missing the r25 global register 25 - # Can't do unconditionally (like above) because of recursive include issues 23 + # Can't do unconditionally because of recursive include issues 26 24 # due to <linux/thread_info.h> 27 25 LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h 28 26 endif 29 27 30 - atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y) 28 + upto_gcc42 := $(call cc-ifversion, -le, 0402, y) 29 + upto_gcc44 := $(call cc-ifversion, -le, 0404, y) 30 + atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) 31 + atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y) 32 + 31 33 cflags-$(atleast_gcc44) += -fsection-anchors 32 34 33 35 cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 34 36 cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 35 37 cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc 36 38 cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 39 + 40 + # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok 41 + ifeq ($(atleast_gcc48),y) 42 + cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 43 + endif 37 44 38 45 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 39 46 # Generic build system uses -O2, we want -O3 ··· 55 48 cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian 56 49 ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB 57 50 58 - # STAR 9000518362: 51 + # STAR 9000518362: (fixed with binutils shipping with gcc 4.8) 59 52 # arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept 60 - # --build-id w/o "-marclinux". 61 - # Default arc-elf32-ld is OK 62 - ldflags-y += -marclinux 53 + # --build-id w/o "-marclinux". Default arc-elf32-ld is OK 54 + ldflags-$(upto_gcc44) += -marclinux 63 55 64 56 ARC_LIBGCC := -mA7 65 57 cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16 ··· 72 66 # With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments, 73 67 # e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted 74 68 75 - ARC_LIBGCC := -marc600 76 - ifneq ($(atleast_gcc44),y) 69 + ifeq ($(upto_gcc42),y) 70 + ARC_LIBGCC := -marc600 77 71 cflags-y += -multcost=30 78 72 endif 79 73 endif
+1 -1
arch/arc/configs/fpga_defconfig
··· 1 - CONFIG_CROSS_COMPILE="arc-elf32-" 1 + CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 2 2 # CONFIG_LOCALVERSION_AUTO is not set 3 3 CONFIG_DEFAULT_HOSTNAME="ARCLinux" 4 4 # CONFIG_SWAP is not set
+1 -1
arch/arc/configs/nsimosci_defconfig
··· 1 - CONFIG_CROSS_COMPILE="arc-elf32-" 1 + CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 2 2 # CONFIG_LOCALVERSION_AUTO is not set 3 3 CONFIG_DEFAULT_HOSTNAME="ARCLinux" 4 4 # CONFIG_SWAP is not set
+1 -1
arch/arc/configs/tb10x_defconfig
··· 1 - CONFIG_CROSS_COMPILE="arc-elf32-" 1 + CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 2 2 # CONFIG_LOCALVERSION_AUTO is not set 3 3 CONFIG_DEFAULT_HOSTNAME="tb10x" 4 4 CONFIG_SYSVIPC=y
+10 -117
arch/arc/include/asm/arcregs.h
··· 20 20 #define ARC_REG_PERIBASE_BCR 0x69 21 21 #define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ 22 22 #define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ 23 - #define ARC_REG_MMU_BCR 0x6f 24 23 #define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ 25 24 #define ARC_REG_TIMERS_BCR 0x75 26 25 #define ARC_REG_ICCM_BCR 0x78 ··· 33 34 #define ARC_REG_D_UNCACH_BCR 0x6A 34 35 35 36 /* status32 Bits Positions */ 36 - #define STATUS_H_BIT 0 /* CPU Halted */ 37 - #define STATUS_E1_BIT 1 /* Int 1 enable */ 38 - #define STATUS_E2_BIT 2 /* Int 2 enable */ 39 - #define STATUS_A1_BIT 3 /* Int 1 active */ 40 - #define STATUS_A2_BIT 4 /* Int 2 active */ 41 37 #define STATUS_AE_BIT 5 /* Exception active */ 42 38 #define STATUS_DE_BIT 6 /* PC is in delay slot */ 43 39 #define STATUS_U_BIT 7 /* User/Kernel mode */ 44 40 #define STATUS_L_BIT 12 /* Loop inhibit */ 45 41 46 42 /* These masks correspond to the status word(STATUS_32) bits */ 47 - #define STATUS_H_MASK (1<<STATUS_H_BIT) 48 - #define STATUS_E1_MASK (1<<STATUS_E1_BIT) 49 - #define STATUS_E2_MASK (1<<STATUS_E2_BIT) 50 - #define STATUS_A1_MASK (1<<STATUS_A1_BIT) 51 - #define STATUS_A2_MASK (1<<STATUS_A2_BIT) 52 43 #define STATUS_AE_MASK (1<<STATUS_AE_BIT) 53 44 #define STATUS_DE_MASK (1<<STATUS_DE_BIT) 54 45 #define STATUS_U_MASK (1<<STATUS_U_BIT) ··· 60 71 #define ECR_V_ITLB_MISS 0x21 61 72 #define ECR_V_DTLB_MISS 0x22 62 73 #define ECR_V_PROTV 0x23 74 + #define ECR_V_TRAP 0x25 63 75 64 76 /* Protection Violation Exception Cause Code Values */ 65 77 #define ECR_C_PROTV_INST_FETCH 0x00 ··· 69 79 #define ECR_C_PROTV_XCHG 0x03 70 80 #define ECR_C_PROTV_MISALIG_DATA 0x04 71 81 82 + #define ECR_C_BIT_PROTV_MISALIG_DATA 10 83 + 84 + /* Machine Check Cause Code Values */ 85 + #define ECR_C_MCHK_DUP_TLB 0x01 86 + 72 87 /* DTLB Miss Exception Cause Code Values */ 73 88 #define ECR_C_BIT_DTLB_LD_MISS 8 74 89 #define ECR_C_BIT_DTLB_ST_MISS 9 75 90 91 + /* Dummy ECR values for Interrupts */ 92 + #define event_IRQ1 0x0031abcd 93 + #define event_IRQ2 0x0032abcd 76 94 77 95 /* Auxiliary registers */ 78 96 #define AUX_IDENTITY 4 79 97 #define AUX_INTR_VEC_BASE 0x25 80 - #define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */ 81 - #define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ 82 - #define AUX_IRQ_LV12 0x43 /* interrupt level register */ 83 98 84 - #define AUX_IENABLE 0x40c 85 - #define AUX_ITRIGGER 0x40d 86 - #define AUX_IPULSE 0x415 87 - 88 - /* Timer related Aux registers */ 89 - #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ 90 - #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ 91 - #define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ 92 - #define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ 93 - #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ 94 - #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ 95 - 96 - #define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ 97 - #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ 98 - 99 - /* MMU Management regs */ 100 - #define ARC_REG_TLBPD0 0x405 101 - #define ARC_REG_TLBPD1 0x406 102 - #define ARC_REG_TLBINDEX 0x407 103 - #define ARC_REG_TLBCOMMAND 0x408 104 - #define ARC_REG_PID 0x409 105 - #define ARC_REG_SCRATCH_DATA0 0x418 106 - 107 - /* Bits in MMU PID register */ 108 - #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ 109 - 110 - /* Error code if probe fails */ 111 - #define TLB_LKUP_ERR 0x80000000 112 - 113 - /* TLB Commands */ 114 - #define TLBWrite 0x1 115 - #define TLBRead 0x2 116 - #define TLBGetIndex 0x3 117 - #define TLBProbe 0x4 118 - 119 - #if (CONFIG_ARC_MMU_VER >= 2) 120 - #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ 121 - #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ 122 - #else 123 - #undef TLBWriteNI /* These cmds don't exist on older MMU */ 124 - #undef TLBIVUTLB 125 - #endif 126 - 127 - /* Instruction cache related Auxiliary registers */ 128 - #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ 129 - #define ARC_REG_IC_IVIC 0x10 130 - #define ARC_REG_IC_CTRL 0x11 131 - #define ARC_REG_IC_IVIL 0x19 132 - #if (CONFIG_ARC_MMU_VER > 2) 133 - #define ARC_REG_IC_PTAG 0x1E 134 - #endif 135 - 136 - /* Bit val in IC_CTRL */ 137 - #define IC_CTRL_CACHE_DISABLE 0x1 138 - 139 - /* Data cache related Auxiliary registers */ 140 - #define ARC_REG_DC_BCR 0x72 141 - #define ARC_REG_DC_IVDC 0x47 142 - #define ARC_REG_DC_CTRL 0x48 143 - #define ARC_REG_DC_IVDL 0x4A 144 - #define ARC_REG_DC_FLSH 0x4B 145 - #define ARC_REG_DC_FLDL 0x4C 146 - #if (CONFIG_ARC_MMU_VER > 2) 147 - #define ARC_REG_DC_PTAG 0x5C 148 - #endif 149 - 150 - /* Bit val in DC_CTRL */ 151 - #define DC_CTRL_INV_MODE_FLUSH 0x40 152 - #define DC_CTRL_FLUSH_STATUS 0x100 153 - 154 - /* MMU Management regs */ 155 - #define ARC_REG_PID 0x409 156 - #define ARC_REG_SCRATCH_DATA0 0x418 157 - 158 - /* Bits in MMU PID register */ 159 - #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ 160 99 161 100 /* 162 101 * Floating Pt Registers ··· 212 293 #endif 213 294 }; 214 295 215 - struct bcr_mmu_1_2 { 216 - #ifdef CONFIG_CPU_BIG_ENDIAN 217 - unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; 218 - #else 219 - unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; 220 - #endif 221 - }; 222 - 223 - struct bcr_mmu_3 { 224 - #ifdef CONFIG_CPU_BIG_ENDIAN 225 - unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, 226 - u_itlb:4, u_dtlb:4; 227 - #else 228 - unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, 229 - ways:4, ver:8; 230 - #endif 231 - }; 232 - 233 296 #define EXTN_SWAP_VALID 0x1 234 297 #define EXTN_NORM_VALID 0x2 235 298 #define EXTN_MINMAX_VALID 0x2 ··· 241 340 unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8; 242 341 #else 243 342 unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2; 244 - #endif 245 - }; 246 - 247 - struct bcr_cache { 248 - #ifdef CONFIG_CPU_BIG_ENDIAN 249 - unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 250 - #else 251 - unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 252 343 #endif 253 344 }; 254 345 ··· 296 403 }; 297 404 298 405 struct cpuinfo_arc_cache { 299 - unsigned int has_aliasing, sz, line_len, assoc, ver; 406 + unsigned int sz, line_len, assoc, ver; 300 407 }; 301 408 302 409 struct cpuinfo_arc_ccm {
+2 -3
arch/arc/include/asm/bug.h
··· 18 18 void show_regs(struct pt_regs *regs); 19 19 void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); 20 20 void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 21 - unsigned long address, unsigned long cause_reg); 22 - void die(const char *str, struct pt_regs *regs, unsigned long address, 23 - unsigned long cause_reg); 21 + unsigned long address); 22 + void die(const char *str, struct pt_regs *regs, unsigned long address); 24 23 25 24 #define BUG() do { \ 26 25 dump_stack(); \
+9 -17
arch/arc/include/asm/cache.h
··· 18 18 19 19 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 20 20 21 - #define ARC_ICACHE_WAYS 2 22 - #define ARC_DCACHE_WAYS 4 23 - 24 - /* Helpers */ 21 + /* For a rare case where customers have differently config I/D */ 25 22 #define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES 26 23 #define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES 27 24 28 25 #define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1)) 29 26 #define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1)) 30 27 31 - #if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN 32 - #error "Need to fix some code as I/D cache lines not same" 33 - #else 34 - #define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK)) 35 - #endif 28 + /* 29 + * ARC700 doesn't cache any access in top 256M. 30 + * Ideal for wiring memory mapped peripherals as we don't need to do 31 + * explicit uncached accesses (LD.di/ST.di) hence more portable drivers 32 + */ 33 + #define ARC_UNCACHED_ADDR_SPACE 0xc0000000 36 34 37 35 #ifndef __ASSEMBLY__ 38 36 ··· 55 57 56 58 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 57 59 58 - /* 59 - * ARC700 doesn't cache any access in top 256M. 60 - * Ideal for wiring memory mapped peripherals as we don't need to do 61 - * explicit uncached accesses (LD.di/ST.di) hence more portable drivers 62 - */ 63 - #define ARC_UNCACHED_ADDR_SPACE 0xc0000000 64 - 65 60 extern void arc_cache_init(void); 66 61 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 67 62 extern void __init read_decode_cache_bcr(void); 68 - #endif 63 + 64 + #endif /* !__ASSEMBLY__ */ 69 65 70 66 #endif /* _ASM_CACHE_H */
+8 -5
arch/arc/include/asm/cacheflush.h
··· 81 81 #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ 82 82 83 83 /* 84 + * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default 85 + * This works around some PIO based drivers which don't call flush_dcache_page 86 + * to record that they dirtied the dcache 87 + */ 88 + #define PG_dc_clean PG_arch_1 89 + 90 + /* 84 91 * Simple wrapper over config option 85 92 * Bootup code ensures that hardware matches kernel configuration 86 93 */ 87 94 static inline int cache_is_vipt_aliasing(void) 88 95 { 89 - #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 90 - return 1; 91 - #else 92 - return 0; 93 - #endif 96 + return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 94 97 } 95 98 96 99 #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-56
arch/arc/include/asm/defines.h
··· 1 - /* 2 - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - */ 8 - 9 - #ifndef __ARC_ASM_DEFINES_H__ 10 - #define __ARC_ASM_DEFINES_H__ 11 - 12 - #if defined(CONFIG_ARC_MMU_V1) 13 - #define CONFIG_ARC_MMU_VER 1 14 - #elif defined(CONFIG_ARC_MMU_V2) 15 - #define CONFIG_ARC_MMU_VER 2 16 - #elif defined(CONFIG_ARC_MMU_V3) 17 - #define CONFIG_ARC_MMU_VER 3 18 - #endif 19 - 20 - #ifdef CONFIG_ARC_HAS_LLSC 21 - #define __CONFIG_ARC_HAS_LLSC_VAL 1 22 - #else 23 - #define __CONFIG_ARC_HAS_LLSC_VAL 0 24 - #endif 25 - 26 - #ifdef CONFIG_ARC_HAS_SWAPE 27 - #define __CONFIG_ARC_HAS_SWAPE_VAL 1 28 - #else 29 - #define __CONFIG_ARC_HAS_SWAPE_VAL 0 30 - #endif 31 - 32 - #ifdef CONFIG_ARC_HAS_RTSC 33 - #define __CONFIG_ARC_HAS_RTSC_VAL 1 34 - #else 35 - #define __CONFIG_ARC_HAS_RTSC_VAL 0 36 - #endif 37 - 38 - #ifdef CONFIG_ARC_MMU_SASID 39 - #define __CONFIG_ARC_MMU_SASID_VAL 1 40 - #else 41 - #define __CONFIG_ARC_MMU_SASID_VAL 0 42 - #endif 43 - 44 - #ifdef CONFIG_ARC_HAS_ICACHE 45 - #define __CONFIG_ARC_HAS_ICACHE 1 46 - #else 47 - #define __CONFIG_ARC_HAS_ICACHE 0 48 - #endif 49 - 50 - #ifdef CONFIG_ARC_HAS_DCACHE 51 - #define __CONFIG_ARC_HAS_DCACHE 1 52 - #else 53 - #define __CONFIG_ARC_HAS_DCACHE 0 54 - #endif 55 - 56 - #endif /* __ARC_ASM_DEFINES_H__ */
+209 -308
arch/arc/include/asm/entry.h
··· 50 50 * Eff Addr for load = [reg2] 51 51 */ 52 52 53 - /*-------------------------------------------------------------- 54 - * Save caller saved registers (scratch registers) ( r0 - r12 ) 55 - * Registers are pushed / popped in the order defined in struct ptregs 56 - * in asm/ptrace.h 57 - *-------------------------------------------------------------*/ 58 - .macro SAVE_CALLER_SAVED 59 - st.a r0, [sp, -4] 60 - st.a r1, [sp, -4] 61 - st.a r2, [sp, -4] 62 - st.a r3, [sp, -4] 63 - st.a r4, [sp, -4] 64 - st.a r5, [sp, -4] 65 - st.a r6, [sp, -4] 66 - st.a r7, [sp, -4] 67 - st.a r8, [sp, -4] 68 - st.a r9, [sp, -4] 69 - st.a r10, [sp, -4] 70 - st.a r11, [sp, -4] 71 - st.a r12, [sp, -4] 53 + .macro PUSH reg 54 + st.a \reg, [sp, -4] 55 + .endm 56 + 57 + .macro PUSHAX aux 58 + lr r9, [\aux] 59 + PUSH r9 60 + .endm 61 + 62 + .macro POP reg 63 + ld.ab \reg, [sp, 4] 64 + .endm 65 + 66 + .macro POPAX aux 67 + POP r9 68 + sr r9, [\aux] 72 69 .endm 73 70 74 71 /*-------------------------------------------------------------- 75 - * Restore caller saved registers (scratch registers) 72 + * Helpers to save/restore Scratch Regs: 73 + * used by Interrupt/Exception Prologue/Epilogue 76 74 *-------------------------------------------------------------*/ 77 - .macro RESTORE_CALLER_SAVED 78 - ld.ab r12, [sp, 4] 79 - ld.ab r11, [sp, 4] 80 - ld.ab r10, [sp, 4] 81 - ld.ab r9, [sp, 4] 82 - ld.ab r8, [sp, 4] 83 - ld.ab r7, [sp, 4] 84 - ld.ab r6, [sp, 4] 85 - ld.ab r5, [sp, 4] 86 - ld.ab r4, [sp, 4] 87 - ld.ab r3, [sp, 4] 88 - ld.ab r2, [sp, 4] 89 - ld.ab r1, [sp, 4] 90 - ld.ab r0, [sp, 4] 75 + .macro SAVE_R0_TO_R12 76 + PUSH r0 77 + PUSH r1 78 + PUSH r2 79 + PUSH r3 80 + PUSH r4 81 + PUSH r5 82 + PUSH r6 83 + PUSH r7 84 + PUSH r8 85 + PUSH r9 86 + PUSH r10 87 + PUSH r11 88 + PUSH r12 91 89 .endm 92 90 91 + .macro RESTORE_R12_TO_R0 92 + POP r12 93 + POP r11 94 + POP r10 95 + POP r9 96 + POP r8 97 + POP r7 98 + POP r6 99 + POP r5 100 + POP r4 101 + POP r3 102 + POP r2 103 + POP r1 104 + POP r0 105 + 106 + #ifdef CONFIG_ARC_CURR_IN_REG 107 + ld r25, [sp, 12] 108 + #endif 109 + .endm 93 110 94 111 /*-------------------------------------------------------------- 95 - * Save callee saved registers (non scratch registers) ( r13 - r25 ) 96 - * on kernel stack. 97 - * User mode callee regs need to be saved in case of 98 - * -fork and friends for replicating from parent to child 99 - * -before going into do_signal( ) for ptrace/core-dump 100 - * Special case handling is required for r25 in case it is used by kernel 101 - * for caching task ptr. Low level exception/ISR save user mode r25 102 - * into task->thread.user_r25. So it needs to be retrieved from there and 103 - * saved into kernel stack with rest of callee reg-file 112 + * Helpers to save/restore callee-saved regs: 113 + * used by several macros below 114 + *-------------------------------------------------------------*/ 115 + .macro SAVE_R13_TO_R24 116 + PUSH r13 117 + PUSH r14 118 + PUSH r15 119 + PUSH r16 120 + PUSH r17 121 + PUSH r18 122 + PUSH r19 123 + PUSH r20 124 + PUSH r21 125 + PUSH r22 126 + PUSH r23 127 + PUSH r24 128 + .endm 129 + 130 + .macro RESTORE_R24_TO_R13 131 + POP r24 132 + POP r23 133 + POP r22 134 + POP r21 135 + POP r20 136 + POP r19 137 + POP r18 138 + POP r17 139 + POP r16 140 + POP r15 141 + POP r14 142 + POP r13 143 + .endm 144 + 145 + #define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4 146 + 147 + /*-------------------------------------------------------------- 148 + * Collect User Mode callee regs as struct callee_regs - needed by 149 + * fork/do_signal/unaligned-access-emulation. 150 + * (By default only scratch regs are saved on entry to kernel) 151 + * 152 + * Special handling for r25 if used for caching Task Pointer. 153 + * It would have been saved in task->thread.user_r25 already, but to keep 154 + * the interface same it is copied into regular r25 placeholder in 155 + * struct callee_regs. 104 156 *-------------------------------------------------------------*/ 105 157 .macro SAVE_CALLEE_SAVED_USER 106 - st.a r13, [sp, -4] 107 - st.a r14, [sp, -4] 108 - st.a r15, [sp, -4] 109 - st.a r16, [sp, -4] 110 - st.a r17, [sp, -4] 111 - st.a r18, [sp, -4] 112 - st.a r19, [sp, -4] 113 - st.a r20, [sp, -4] 114 - st.a r21, [sp, -4] 115 - st.a r22, [sp, -4] 116 - st.a r23, [sp, -4] 117 - st.a r24, [sp, -4] 158 + 159 + SAVE_R13_TO_R24 118 160 119 161 #ifdef CONFIG_ARC_CURR_IN_REG 120 162 ; Retrieve orig r25 and save it on stack 121 - ld r12, [r25, TASK_THREAD + THREAD_USER_R25] 163 + ld.as r12, [sp, OFF_USER_R25_FROM_R24] 122 164 st.a r12, [sp, -4] 123 165 #else 124 - st.a r25, [sp, -4] 166 + PUSH r25 125 167 #endif 126 168 127 - /* move up by 1 word to "create" callee_regs->"stack_place_holder" */ 128 - sub sp, sp, 4 129 169 .endm 130 170 131 171 /*-------------------------------------------------------------- 132 - * Save callee saved registers (non scratch registers) ( r13 - r25 ) 133 - * kernel mode callee regs needed to be saved in case of context switch 134 - * If r25 is used for caching task pointer then that need not be saved 135 - * as it can be re-created from current task global 172 + * Save kernel Mode callee regs at the time of Contect Switch. 173 + * 174 + * Special handling for r25 if used for caching Task Pointer. 175 + * Kernel simply skips saving it since it will be loaded with 176 + * incoming task pointer anyways 136 177 *-------------------------------------------------------------*/ 137 178 .macro SAVE_CALLEE_SAVED_KERNEL 138 - st.a r13, [sp, -4] 139 - st.a r14, [sp, -4] 140 - st.a r15, [sp, -4] 141 - st.a r16, [sp, -4] 142 - st.a r17, [sp, -4] 143 - st.a r18, [sp, -4] 144 - st.a r19, [sp, -4] 145 - st.a r20, [sp, -4] 146 - st.a r21, [sp, -4] 147 - st.a r22, [sp, -4] 148 - st.a r23, [sp, -4] 149 - st.a r24, [sp, -4] 179 + 180 + SAVE_R13_TO_R24 181 + 150 182 #ifdef CONFIG_ARC_CURR_IN_REG 151 - sub sp, sp, 8 152 - #else 153 - st.a r25, [sp, -4] 154 183 sub sp, sp, 4 184 + #else 185 + PUSH r25 155 186 #endif 156 187 .endm 157 188 158 189 /*-------------------------------------------------------------- 159 - * RESTORE_CALLEE_SAVED_KERNEL: 160 - * Loads callee (non scratch) Reg File by popping from Kernel mode stack. 161 - * This is reverse of SAVE_CALLEE_SAVED, 162 - * 163 - * NOTE: 164 - * Ideally this shd only be called in switch_to for loading 165 - * switched-IN task's CALLEE Reg File. 166 - * For all other cases RESTORE_CALLEE_SAVED_FAST must be used 167 - * which simply pops the stack w/o touching regs. 190 + * Opposite of SAVE_CALLEE_SAVED_KERNEL 168 191 *-------------------------------------------------------------*/ 169 192 .macro RESTORE_CALLEE_SAVED_KERNEL 170 193 171 - 172 194 #ifdef CONFIG_ARC_CURR_IN_REG 173 - add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */ 195 + add sp, sp, 4 /* skip usual r25 placeholder */ 174 196 #else 175 - add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ 176 - ld.ab r25, [sp, 4] 197 + POP r25 177 198 #endif 178 - 179 - ld.ab r24, [sp, 4] 180 - ld.ab r23, [sp, 4] 181 - ld.ab r22, [sp, 4] 182 - ld.ab r21, [sp, 4] 183 - ld.ab r20, [sp, 4] 184 - ld.ab r19, [sp, 4] 185 - ld.ab r18, [sp, 4] 186 - ld.ab r17, [sp, 4] 187 - ld.ab r16, [sp, 4] 188 - ld.ab r15, [sp, 4] 189 - ld.ab r14, [sp, 4] 190 - ld.ab r13, [sp, 4] 191 - 199 + RESTORE_R24_TO_R13 192 200 .endm 193 201 194 202 /*-------------------------------------------------------------- 195 - * RESTORE_CALLEE_SAVED_USER: 196 - * This is called after do_signal where tracer might have changed callee regs 197 - * thus we need to restore the reg file. 198 - * Special case handling is required for r25 in case it is used by kernel 199 - * for caching task ptr. Ptrace would have modified on-kernel-stack value of 200 - * r25, which needs to be shoved back into task->thread.user_r25 where from 201 - * Low level exception/ISR return code will retrieve to populate with rest of 202 - * callee reg-file. 203 + * Opposite of SAVE_CALLEE_SAVED_USER 204 + * 205 + * ptrace tracer or unaligned-access fixup might have changed a user mode 206 + * callee reg which is saved back to usual r25 storage location 203 207 *-------------------------------------------------------------*/ 204 208 .macro RESTORE_CALLEE_SAVED_USER 205 209 206 - add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ 207 - 208 210 #ifdef CONFIG_ARC_CURR_IN_REG 209 211 ld.ab r12, [sp, 4] 210 - st r12, [r25, TASK_THREAD + THREAD_USER_R25] 212 + st.as r12, [sp, OFF_USER_R25_FROM_R24] 211 213 #else 212 - ld.ab r25, [sp, 4] 214 + POP r25 213 215 #endif 214 - 215 - ld.ab r24, [sp, 4] 216 - ld.ab r23, [sp, 4] 217 - ld.ab r22, [sp, 4] 218 - ld.ab r21, [sp, 4] 219 - ld.ab r20, [sp, 4] 220 - ld.ab r19, [sp, 4] 221 - ld.ab r18, [sp, 4] 222 - ld.ab r17, [sp, 4] 223 - ld.ab r16, [sp, 4] 224 - ld.ab r15, [sp, 4] 225 - ld.ab r14, [sp, 4] 226 - ld.ab r13, [sp, 4] 216 + RESTORE_R24_TO_R13 227 217 .endm 228 218 229 219 /*-------------------------------------------------------------- 230 220 * Super FAST Restore callee saved regs by simply re-adjusting SP 231 221 *-------------------------------------------------------------*/ 232 222 .macro DISCARD_CALLEE_SAVED_USER 233 - add sp, sp, 14 * 4 234 - .endm 235 - 236 - /*-------------------------------------------------------------- 237 - * Restore User mode r25 saved in task_struct->thread.user_r25 238 - *-------------------------------------------------------------*/ 239 - .macro RESTORE_USER_R25 240 - ld r25, [r25, TASK_THREAD + THREAD_USER_R25] 223 + add sp, sp, SZ_CALLEE_REGS 241 224 .endm 242 225 243 226 /*------------------------------------------------------------- ··· 235 252 ld \out, [\tsk, TASK_THREAD_INFO] 236 253 237 254 /* Go to end of page where stack begins (grows upwards) */ 238 - add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */ 255 + add2 \out, \out, (THREAD_SIZE)/4 239 256 240 257 .endm 241 258 ··· 288 305 * safe-keeping not really needed, but it keeps the epilogue code 289 306 * (SP restore) simpler/uniform. 290 307 */ 291 - b.d 77f 292 - 293 - st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8 308 + b.d 66f 309 + mov r9, sp 294 310 295 311 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ 296 312 297 313 GET_CURR_TASK_ON_CPU r9 298 314 299 - #ifdef CONFIG_ARC_CURR_IN_REG 300 - 301 - /* If current task pointer cached in r25, time to 302 - * -safekeep USER r25 in task->thread_struct->user_r25 303 - * -load r25 with current task ptr 304 - */ 305 - st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4] 306 - mov r25, r9 307 - #endif 308 - 309 315 /* With current tsk in r9, get it's kernel mode stack base */ 310 316 GET_TSK_STACK_BASE r9, r9 311 317 312 - #ifdef PT_REGS_CANARY 313 - st 0xabcdabcd, [r9, 0] 318 + 66: 319 + #ifdef CONFIG_ARC_CURR_IN_REG 320 + /* 321 + * Treat r25 as scratch reg, save it on stack first 322 + * Load it with current task pointer 323 + */ 324 + st r25, [r9, -4] 325 + GET_CURR_TASK_ON_CPU r25 314 326 #endif 315 327 316 328 /* Save Pre Intr/Exception User SP on kernel stack */ 317 - st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8 329 + st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25 318 330 319 331 /* CAUTION: 320 332 * SP should be set at the very end when we are done with everything ··· 320 342 /* set SP to point to kernel mode stack */ 321 343 mov sp, r9 322 344 323 - 77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ 345 + /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ 324 346 325 347 .endm 326 348 ··· 347 369 * @reg [OUT] &thread_info of "current" 348 370 */ 349 371 .macro GET_CURR_THR_INFO_FROM_SP reg 350 - and \reg, sp, ~(THREAD_SIZE - 1) 372 + bic \reg, sp, (THREAD_SIZE - 1) 351 373 .endm 352 374 353 375 /* ··· 391 413 * Note that syscalls are implemented via TRAP which is also a exception 392 414 * from CPU's point of view 393 415 *-------------------------------------------------------------*/ 394 - .macro SAVE_ALL_EXCEPTION marker 416 + .macro SAVE_ALL_SYS 395 417 396 - st \marker, [sp, 8] /* orig_r8 */ 418 + lr r9, [ecr] 419 + st r9, [sp, 8] /* ECR */ 397 420 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 398 421 399 422 /* Restore r9 used to code the early prologue */ 400 423 EXCPN_PROLOG_RESTORE_REG r9 401 424 402 - SAVE_CALLER_SAVED 403 - st.a r26, [sp, -4] /* gp */ 404 - st.a fp, [sp, -4] 405 - st.a blink, [sp, -4] 406 - lr r9, [eret] 407 - st.a r9, [sp, -4] 408 - lr r9, [erstatus] 409 - st.a r9, [sp, -4] 410 - st.a lp_count, [sp, -4] 411 - lr r9, [lp_end] 412 - st.a r9, [sp, -4] 413 - lr r9, [lp_start] 414 - st.a r9, [sp, -4] 415 - lr r9, [erbta] 416 - st.a r9, [sp, -4] 417 - 418 - #ifdef PT_REGS_CANARY 419 - mov r9, 0xdeadbeef 420 - st r9, [sp, -4] 421 - #endif 422 - 423 - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 424 - sub sp, sp, 4 425 - .endm 426 - 427 - /*-------------------------------------------------------------- 428 - * Save scratch regs for exceptions 429 - *-------------------------------------------------------------*/ 430 - .macro SAVE_ALL_SYS 431 - SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN 432 - .endm 433 - 434 - /*-------------------------------------------------------------- 435 - * Save scratch regs for sys calls 436 - *-------------------------------------------------------------*/ 437 - .macro SAVE_ALL_TRAP 438 - /* 439 - * Setup pt_regs->orig_r8. 440 - * Encode syscall number (r8) in upper short word of event type (r9) 441 - * N.B. #1: This is already endian safe (see ptrace.h) 442 - * #2: Only r9 can be used as scratch as it is already clobbered 443 - * and it's contents are no longer needed by the latter part 444 - * of exception prologue 445 - */ 446 - lsl r9, r8, 16 447 - or r9, r9, orig_r8_IS_SCALL 448 - 449 - SAVE_ALL_EXCEPTION r9 425 + SAVE_R0_TO_R12 426 + PUSH gp 427 + PUSH fp 428 + PUSH blink 429 + PUSHAX eret 430 + PUSHAX erstatus 431 + PUSH lp_count 432 + PUSHAX lp_end 433 + PUSHAX lp_start 434 + PUSHAX erbta 450 435 .endm 451 436 452 437 /*-------------------------------------------------------------- ··· 424 483 * by hardware and that is not good. 425 484 *-------------------------------------------------------------*/ 426 485 .macro RESTORE_ALL_SYS 486 + POPAX erbta 487 + POPAX lp_start 488 + POPAX lp_end 427 489 428 - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 490 + POP r9 491 + mov lp_count, r9 ;LD to lp_count is not allowed 429 492 430 - ld.ab r9, [sp, 4] 431 - sr r9, [erbta] 432 - ld.ab r9, [sp, 4] 433 - sr r9, [lp_start] 434 - ld.ab r9, [sp, 4] 435 - sr r9, [lp_end] 436 - ld.ab r9, [sp, 4] 437 - mov lp_count, r9 438 - ld.ab r9, [sp, 4] 439 - sr r9, [erstatus] 440 - ld.ab r9, [sp, 4] 441 - sr r9, [eret] 442 - ld.ab blink, [sp, 4] 443 - ld.ab fp, [sp, 4] 444 - ld.ab r26, [sp, 4] /* gp */ 445 - RESTORE_CALLER_SAVED 493 + POPAX erstatus 494 + POPAX eret 495 + POP blink 496 + POP fp 497 + POP gp 498 + RESTORE_R12_TO_R0 446 499 447 500 ld sp, [sp] /* restore original sp */ 448 - /* orig_r0 and orig_r8 skipped automatically */ 501 + /* orig_r0, ECR, user_r25 skipped automatically */ 449 502 .endm 450 503 451 504 ··· 448 513 *-------------------------------------------------------------*/ 449 514 .macro SAVE_ALL_INT1 450 515 451 - /* restore original r9 , saved in int1_saved_reg 452 - * It will be saved on stack in macro: SAVE_CALLER_SAVED 453 - */ 516 + /* restore original r9 to be saved as part of reg-file */ 454 517 #ifdef CONFIG_SMP 455 518 lr r9, [ARC_REG_SCRATCH_DATA0] 456 519 #else ··· 456 523 #endif 457 524 458 525 /* now we are ready to save the remaining context :) */ 459 - st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ 526 + st event_IRQ1, [sp, 8] /* Dummy ECR */ 460 527 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 461 - SAVE_CALLER_SAVED 462 - st.a r26, [sp, -4] /* gp */ 463 - st.a fp, [sp, -4] 464 - st.a blink, [sp, -4] 465 - st.a ilink1, [sp, -4] 466 - lr r9, [status32_l1] 467 - st.a r9, [sp, -4] 468 - st.a lp_count, [sp, -4] 469 - lr r9, [lp_end] 470 - st.a r9, [sp, -4] 471 - lr r9, [lp_start] 472 - st.a r9, [sp, -4] 473 - lr r9, [bta_l1] 474 - st.a r9, [sp, -4] 475 528 476 - #ifdef PT_REGS_CANARY 477 - mov r9, 0xdeadbee1 478 - st r9, [sp, -4] 479 - #endif 480 - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 481 - sub sp, sp, 4 529 + SAVE_R0_TO_R12 530 + PUSH gp 531 + PUSH fp 532 + PUSH blink 533 + PUSH ilink1 534 + PUSHAX status32_l1 535 + PUSH lp_count 536 + PUSHAX lp_end 537 + PUSHAX lp_start 538 + PUSHAX bta_l1 482 539 .endm 483 540 484 541 .macro SAVE_ALL_INT2 ··· 481 558 ld r9, [@int2_saved_reg] 482 559 483 560 /* now we are ready to save the remaining context :) */ 484 - st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */ 561 + st event_IRQ2, [sp, 8] /* Dummy ECR */ 485 562 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 486 - SAVE_CALLER_SAVED 487 - st.a r26, [sp, -4] /* gp */ 488 - st.a fp, [sp, -4] 489 - st.a blink, [sp, -4] 490 - st.a ilink2, [sp, -4] 491 - lr r9, [status32_l2] 492 - st.a r9, [sp, -4] 493 - st.a lp_count, [sp, -4] 494 - lr r9, [lp_end] 495 - st.a r9, [sp, -4] 496 - lr r9, [lp_start] 497 - st.a r9, [sp, -4] 498 - lr r9, [bta_l2] 499 - st.a r9, [sp, -4] 500 563 501 - #ifdef PT_REGS_CANARY 502 - mov r9, 0xdeadbee2 503 - st r9, [sp, -4] 504 - #endif 505 - 506 - /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 507 - sub sp, sp, 4 564 + SAVE_R0_TO_R12 565 + PUSH gp 566 + PUSH fp 567 + PUSH blink 568 + PUSH ilink2 569 + PUSHAX status32_l2 570 + PUSH lp_count 571 + PUSHAX lp_end 572 + PUSHAX lp_start 573 + PUSHAX bta_l2 508 574 .endm 509 575 510 576 /*-------------------------------------------------------------- ··· 507 595 *-------------------------------------------------------------*/ 508 596 509 597 .macro RESTORE_ALL_INT1 510 - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 598 + POPAX bta_l1 599 + POPAX lp_start 600 + POPAX lp_end 511 601 512 - ld.ab r9, [sp, 4] /* Actual reg file */ 513 - sr r9, [bta_l1] 514 - ld.ab r9, [sp, 4] 515 - sr r9, [lp_start] 516 - ld.ab r9, [sp, 4] 517 - sr r9, [lp_end] 518 - ld.ab r9, [sp, 4] 519 - mov lp_count, r9 520 - ld.ab r9, [sp, 4] 521 - sr r9, [status32_l1] 522 - ld.ab r9, [sp, 4] 523 - mov ilink1, r9 524 - ld.ab blink, [sp, 4] 525 - ld.ab fp, [sp, 4] 526 - ld.ab r26, [sp, 4] /* gp */ 527 - RESTORE_CALLER_SAVED 602 + POP r9 603 + mov lp_count, r9 ;LD to lp_count is not allowed 604 + 605 + POPAX status32_l1 606 + POP ilink1 607 + POP blink 608 + POP fp 609 + POP gp 610 + RESTORE_R12_TO_R0 528 611 529 612 ld sp, [sp] /* restore original sp */ 530 - /* orig_r0 and orig_r8 skipped automatically */ 613 + /* orig_r0, ECR, user_r25 skipped automatically */ 531 614 .endm 532 615 533 616 .macro RESTORE_ALL_INT2 534 - add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 617 + POPAX bta_l2 618 + POPAX lp_start 619 + POPAX lp_end 535 620 536 - ld.ab r9, [sp, 4] 537 - sr r9, [bta_l2] 538 - ld.ab r9, [sp, 4] 539 - sr r9, [lp_start] 540 - ld.ab r9, [sp, 4] 541 - sr r9, [lp_end] 542 - ld.ab r9, [sp, 4] 543 - mov lp_count, r9 544 - ld.ab r9, [sp, 4] 545 - sr r9, [status32_l2] 546 - ld.ab r9, [sp, 4] 547 - mov ilink2, r9 548 - ld.ab blink, [sp, 4] 549 - ld.ab fp, [sp, 4] 550 - ld.ab r26, [sp, 4] /* gp */ 551 - RESTORE_CALLER_SAVED 621 + POP r9 622 + mov lp_count, r9 ;LD to lp_count is not allowed 623 + 624 + POPAX status32_l2 625 + POP ilink2 626 + POP blink 627 + POP fp 628 + POP gp 629 + RESTORE_R12_TO_R0 552 630 553 631 ld sp, [sp] /* restore original sp */ 554 - /* orig_r0 and orig_r8 skipped automatically */ 555 - 632 + /* orig_r0, ECR, user_r25 skipped automatically */ 556 633 .endm 557 634 558 635
+1 -1
arch/arc/include/asm/irq.h
··· 21 21 extern void __init arc_init_IRQ(void); 22 22 extern int __init get_hw_config_num_irq(void); 23 23 24 - void __cpuinit arc_local_timer_setup(unsigned int cpu); 24 + void arc_local_timer_setup(unsigned int cpu); 25 25 26 26 #endif
+20
arch/arc/include/asm/irqflags.h
··· 19 19 20 20 #include <asm/arcregs.h> 21 21 22 + /* status32 Reg bits related to Interrupt Handling */ 23 + #define STATUS_E1_BIT 1 /* Int 1 enable */ 24 + #define STATUS_E2_BIT 2 /* Int 2 enable */ 25 + #define STATUS_A1_BIT 3 /* Int 1 active */ 26 + #define STATUS_A2_BIT 4 /* Int 2 active */ 27 + 28 + #define STATUS_E1_MASK (1<<STATUS_E1_BIT) 29 + #define STATUS_E2_MASK (1<<STATUS_E2_BIT) 30 + #define STATUS_A1_MASK (1<<STATUS_A1_BIT) 31 + #define STATUS_A2_MASK (1<<STATUS_A2_BIT) 32 + 33 + /* Other Interrupt Handling related Aux regs */ 34 + #define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */ 35 + #define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ 36 + #define AUX_IRQ_LV12 0x43 /* interrupt level register */ 37 + 38 + #define AUX_IENABLE 0x40c 39 + #define AUX_ITRIGGER 0x40d 40 + #define AUX_IPULSE 0x415 41 + 22 42 #ifndef __ASSEMBLY__ 23 43 24 44 /******************************************************************
+2 -2
arch/arc/include/asm/kgdb.h
··· 31 31 __asm__ __volatile__ ("trap_s 0x4\n"); 32 32 } 33 33 34 - extern void kgdb_trap(struct pt_regs *regs, int param); 34 + extern void kgdb_trap(struct pt_regs *regs); 35 35 36 36 enum arc700_linux_regnums { 37 37 _R0 = 0, ··· 53 53 }; 54 54 55 55 #else 56 - #define kgdb_trap(regs, param) 56 + #define kgdb_trap(regs) 57 57 #endif 58 58 59 59 #endif /* __ARC_KGDB_H__ */
+2 -4
arch/arc/include/asm/kprobes.h
··· 50 50 51 51 int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); 52 52 void kretprobe_trampoline(void); 53 - void trap_is_kprobe(unsigned long cause, unsigned long address, 54 - struct pt_regs *regs); 53 + void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 55 54 #else 56 - static void trap_is_kprobe(unsigned long cause, unsigned long address, 57 - struct pt_regs *regs) 55 + static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 58 56 { 59 57 } 60 58 #endif
+44
arch/arc/include/asm/mmu.h
··· 9 9 #ifndef _ASM_ARC_MMU_H 10 10 #define _ASM_ARC_MMU_H 11 11 12 + #if defined(CONFIG_ARC_MMU_V1) 13 + #define CONFIG_ARC_MMU_VER 1 14 + #elif defined(CONFIG_ARC_MMU_V2) 15 + #define CONFIG_ARC_MMU_VER 2 16 + #elif defined(CONFIG_ARC_MMU_V3) 17 + #define CONFIG_ARC_MMU_VER 3 18 + #endif 19 + 20 + /* MMU Management regs */ 21 + #define ARC_REG_MMU_BCR 0x06f 22 + #define ARC_REG_TLBPD0 0x405 23 + #define ARC_REG_TLBPD1 0x406 24 + #define ARC_REG_TLBINDEX 0x407 25 + #define ARC_REG_TLBCOMMAND 0x408 26 + #define ARC_REG_PID 0x409 27 + #define ARC_REG_SCRATCH_DATA0 0x418 28 + 29 + /* Bits in MMU PID register */ 30 + #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ 31 + 32 + /* Error code if probe fails */ 33 + #define TLB_LKUP_ERR 0x80000000 34 + 35 + /* TLB Commands */ 36 + #define TLBWrite 0x1 37 + #define TLBRead 0x2 38 + #define TLBGetIndex 0x3 39 + #define TLBProbe 0x4 40 + 41 + #if (CONFIG_ARC_MMU_VER >= 2) 42 + #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ 43 + #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ 44 + #endif 45 + 12 46 #ifndef __ASSEMBLY__ 13 47 14 48 typedef struct { ··· 52 18 #endif 53 19 } mm_context_t; 54 20 21 + #ifdef CONFIG_ARC_DBG_TLB_PARANOIA 22 + void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); 23 + #else 24 + #define tlb_paranoid_check(a, b) 55 25 #endif 26 + 27 + void arc_mmu_init(void); 28 + extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); 29 + void __init read_decode_mmu_bcr(void); 30 + 31 + #endif /* !__ASSEMBLY__ */ 56 32 57 33 #endif
+1 -6
arch/arc/include/asm/page.h
··· 96 96 97 97 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 98 98 99 - /* Default Permissions for page, used in mmap.c */ 100 - #ifdef CONFIG_ARC_STACK_NONEXEC 99 + /* Default Permissions for stack/heaps pages (Non Executable) */ 101 100 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) 102 - #else 103 - #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 104 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 105 - #endif 106 101 107 102 #define WANT_PAGE_VIRTUAL 1 108 103
+6
arch/arc/include/asm/pgtable.h
··· 135 135 /* ioremap */ 136 136 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) 137 137 138 + /* Masks for actual TLB "PD"s */ 139 + #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) 140 + #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ 141 + _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ 142 + _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) 143 + 138 144 /************************************************************************** 139 145 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 140 146 *
+9 -8
arch/arc/include/asm/processor.h
··· 19 19 #ifndef __ASSEMBLY__ 20 20 21 21 #include <asm/arcregs.h> /* for STATUS_E1_MASK et all */ 22 + #include <asm/ptrace.h> 22 23 23 24 /* Arch specific stuff which needs to be saved per task. 24 25 * However these items are not so important so as to earn a place in ··· 29 28 unsigned long ksp; /* kernel mode stack pointer */ 30 29 unsigned long callee_reg; /* pointer to callee regs */ 31 30 unsigned long fault_address; /* dbls as brkpt holder as well */ 32 - unsigned long cause_code; /* Exception Cause Code (ECR) */ 33 - #ifdef CONFIG_ARC_CURR_IN_REG 34 - unsigned long user_r25; 35 - #endif 36 31 #ifdef CONFIG_ARC_FPU_SAVE_RESTORE 37 32 struct arc_fpu fpu; 38 33 #endif ··· 47 50 unsigned long thread_saved_pc(struct task_struct *t); 48 51 49 52 #define task_pt_regs(p) \ 50 - ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1) 53 + ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) 51 54 52 55 /* Free all resources held by a thread. */ 53 56 #define release_thread(thread) do { } while (0) ··· 72 75 73 76 /* 74 77 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. 75 - * These can't be derived from pt_regs as that would give correp user-mode val 78 + * Look in process.c for details of kernel stack layout 76 79 */ 77 80 #define KSTK_ESP(tsk) (tsk->thread.ksp) 78 - #define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4))) 79 - #define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4))) 81 + 82 + #define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ 83 + sizeof(struct callee_regs) + off))) 84 + 85 + #define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) 86 + #define KSTK_FP(tsk) KSTK_REG(tsk, 0) 80 87 81 88 /* 82 89 * Do necessary setup to start up a newly executed thread.
+23 -24
arch/arc/include/asm/ptrace.h
··· 17 17 /* THE pt_regs: Defines how regs are saved during entry into kernel */ 18 18 19 19 struct pt_regs { 20 - /* 21 - * 1 word gutter after reg-file has been saved 22 - * Technically not needed, Since SP always points to a "full" location 23 - * (vs. "empty"). But pt_regs is shared with tools.... 24 - */ 25 - long res; 26 20 27 21 /* Real registers */ 28 22 long bta; /* bta_l1, bta_l2, erbta */ ··· 44 50 long sp; /* user/kernel sp depending on where we came from */ 45 51 long orig_r0; 46 52 47 - /*to distinguish bet excp, syscall, irq */ 53 + /* 54 + * To distinguish bet excp, syscall, irq 55 + * For traps and exceptions, Exception Cause Register. 56 + * ECR: <00> <VV> <CC> <PP> 57 + * Last word used by Linux for extra state mgmt (syscall-restart) 58 + * For interrupts, use artificial ECR values to note current prio-level 59 + */ 48 60 union { 61 + struct { 49 62 #ifdef CONFIG_CPU_BIG_ENDIAN 50 - /* so that assembly code is same for LE/BE */ 51 - unsigned long orig_r8:16, event:16; 63 + unsigned long state:8, ecr_vec:8, 64 + ecr_cause:8, ecr_param:8; 52 65 #else 53 - unsigned long event:16, orig_r8:16; 66 + unsigned long ecr_param:8, ecr_cause:8, 67 + ecr_vec:8, state:8; 54 68 #endif 55 - long orig_r8_word; 69 + }; 70 + unsigned long event; 56 71 }; 72 + 73 + long user_r25; 57 74 }; 58 75 59 76 /* Callee saved registers - need to be saved only when you are scheduled out */ 60 77 61 78 struct callee_regs { 62 - long res; /* Again this is not needed */ 63 79 long r25; 64 80 long r24; 65 81 long r23; ··· 103 99 /* return 1 if PC in delay slot */ 104 100 #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) 105 101 106 - #define in_syscall(regs) (regs->event & orig_r8_IS_SCALL) 107 - #define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT) 102 + #define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param) 103 + #define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param) 108 104 109 - #define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED) 110 - #define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED) 105 + #define STATE_SCALL_RESTARTED 0x01 106 + 107 + #define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED) 108 + #define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED) 111 109 112 110 #define current_pt_regs() \ 113 111 ({ \ 114 112 /* open-coded current_thread_info() */ \ 115 113 register unsigned long sp asm ("sp"); \ 116 114 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ 117 - (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \ 115 + (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \ 118 116 }) 119 117 120 118 static inline long regs_return_value(struct pt_regs *regs) ··· 125 119 } 126 120 127 121 #endif /* !__ASSEMBLY__ */ 128 - 129 - #define orig_r8_IS_SCALL 0x0001 130 - #define orig_r8_IS_SCALL_RESTARTED 0x0002 131 - #define orig_r8_IS_BRKPT 0x0004 132 - #define orig_r8_IS_EXCPN 0x0008 133 - #define orig_r8_IS_IRQ1 0x0010 134 - #define orig_r8_IS_IRQ2 0x0020 135 122 136 123 #endif /* __ASM_PTRACE_H */
+2 -3
arch/arc/include/asm/syscall.h
··· 18 18 syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 19 19 { 20 20 if (user_mode(regs) && in_syscall(regs)) 21 - return regs->orig_r8; 21 + return regs->r8; 22 22 else 23 23 return -1; 24 24 } ··· 26 26 static inline void 27 27 syscall_rollback(struct task_struct *task, struct pt_regs *regs) 28 28 { 29 - /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */ 30 - regs->r8 = regs->orig_r8; 29 + regs->r0 = regs->orig_r0; 31 30 } 32 31 33 32 static inline long
+2 -2
arch/arc/include/asm/tlb-mmu1.h
··· 9 9 #ifndef __ASM_TLB_MMU_V1_H__ 10 10 #define __ASM_TLB_MMU_V1_H__ 11 11 12 - #if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1) 12 + #include <asm/mmu.h> 13 13 14 - #include <asm/tlb.h> 14 + #if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1) 15 15 16 16 .macro TLB_WRITE_HEURISTICS 17 17
-26
arch/arc/include/asm/tlb.h
··· 9 9 #ifndef _ASM_ARC_TLB_H 10 10 #define _ASM_ARC_TLB_H 11 11 12 - #ifdef __KERNEL__ 13 - 14 - #include <asm/pgtable.h> 15 - 16 - /* Masks for actual TLB "PD"s */ 17 - #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) 18 - #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ 19 - _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ 20 - _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) 21 - 22 - #ifndef __ASSEMBLY__ 23 - 24 12 #define tlb_flush(tlb) \ 25 13 do { \ 26 14 if (tlb->fullmm) \ ··· 43 55 44 56 #include <linux/pagemap.h> 45 57 #include <asm-generic/tlb.h> 46 - 47 - #ifdef CONFIG_ARC_DBG_TLB_PARANOIA 48 - void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); 49 - #else 50 - #define tlb_paranoid_check(a, b) 51 - #endif 52 - 53 - void arc_mmu_init(void); 54 - extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); 55 - void __init read_decode_mmu_bcr(void); 56 - 57 - #endif /* __ASSEMBLY__ */ 58 - 59 - #endif /* __KERNEL__ */ 60 58 61 59 #endif /* _ASM_ARC_TLB_H */
+2 -2
arch/arc/include/asm/unaligned.h
··· 16 16 17 17 #ifdef CONFIG_ARC_MISALIGN_ACCESS 18 18 int misaligned_fixup(unsigned long address, struct pt_regs *regs, 19 - unsigned long cause, struct callee_regs *cregs); 19 + struct callee_regs *cregs); 20 20 #else 21 21 static inline int 22 22 misaligned_fixup(unsigned long address, struct pt_regs *regs, 23 - unsigned long cause, struct callee_regs *cregs) 23 + struct callee_regs *cregs) 24 24 { 25 25 return 0; 26 26 }
+9 -6
arch/arc/include/uapi/asm/ptrace.h
··· 20 20 * 21 21 * This is to decouple pt_regs from user-space ABI, to be able to change it 22 22 * w/o affecting the ABI. 23 - * Although the layout (initial padding) is similar to pt_regs to have some 24 - * optimizations when copying pt_regs to/from user_regs_struct. 23 + * 24 + * The intermediate pad,pad2 are relics of initial layout based on pt_regs 25 + * for optimizations when copying pt_regs to/from user_regs_struct. 26 + * We no longer need them, but can't be changed as they are part of ABI now. 25 27 * 26 28 * Also, sigcontext only care about the scratch regs as that is what we really 27 - * save/restore for signal handling. 29 + * save/restore for signal handling. However gdb also uses the same struct 30 + * hence callee regs need to be in there too. 28 31 */ 29 32 struct user_regs_struct { 30 33 34 + long pad; 31 35 struct { 32 - long pad; 33 36 long bta, lp_start, lp_end, lp_count; 34 37 long status32, ret, blink, fp, gp; 35 38 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 36 39 long sp; 37 40 } scratch; 41 + long pad2; 38 42 struct { 39 - long pad; 40 43 long r25, r24, r23, r22, r21, r20; 41 44 long r19, r18, r17, r16, r15, r14, r13; 42 45 } callee; 43 46 long efa; /* break pt addr, for break points in delay slots */ 44 - long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */ 47 + long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ 45 48 }; 46 49 #endif /* !__ASSEMBLY__ */ 47 50
+3 -4
arch/arc/kernel/asm-offsets.c
··· 24 24 25 25 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 26 26 DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg)); 27 - #ifdef CONFIG_ARC_CURR_IN_REG 28 - DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25)); 29 - #endif 30 27 DEFINE(THREAD_FAULT_ADDR, 31 28 offsetof(struct thread_struct, fault_address)); 32 29 ··· 46 49 BLANK(); 47 50 48 51 DEFINE(PT_status32, offsetof(struct pt_regs, status32)); 49 - DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word)); 52 + DEFINE(PT_event, offsetof(struct pt_regs, event)); 50 53 DEFINE(PT_sp, offsetof(struct pt_regs, sp)); 51 54 DEFINE(PT_r0, offsetof(struct pt_regs, r0)); 52 55 DEFINE(PT_r1, offsetof(struct pt_regs, r1)); ··· 57 60 DEFINE(PT_r6, offsetof(struct pt_regs, r6)); 58 61 DEFINE(PT_r7, offsetof(struct pt_regs, r7)); 59 62 63 + DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); 64 + DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); 60 65 return 0; 61 66 }
+5 -9
arch/arc/kernel/ctx_sw.c
··· 23 23 unsigned int tmp; 24 24 unsigned int prev = (unsigned int)prev_task; 25 25 unsigned int next = (unsigned int)next_task; 26 - int num_words_to_skip = 1; 27 - #ifdef CONFIG_ARC_CURR_IN_REG 28 - num_words_to_skip++; 29 - #endif 30 26 31 27 __asm__ __volatile__( 32 28 /* FP/BLINK save generated by gcc (standard function prologue */ ··· 40 44 "st.a r24, [sp, -4] \n\t" 41 45 #ifndef CONFIG_ARC_CURR_IN_REG 42 46 "st.a r25, [sp, -4] \n\t" 47 + #else 48 + "sub sp, sp, 4 \n\t" /* usual r25 placeholder */ 43 49 #endif 44 - "sub sp, sp, %4 \n\t" /* create gutter at top */ 45 50 46 51 /* set ksp of outgoing task in tsk->thread.ksp */ 47 52 "st.as sp, [%3, %1] \n\t" ··· 73 76 74 77 /* start loading it's CALLEE reg file */ 75 78 76 - "add sp, sp, %4 \n\t" /* skip gutter at top */ 77 - 78 79 #ifndef CONFIG_ARC_CURR_IN_REG 79 80 "ld.ab r25, [sp, 4] \n\t" 81 + #else 82 + "add sp, sp, 4 \n\t" 80 83 #endif 81 84 "ld.ab r24, [sp, 4] \n\t" 82 85 "ld.ab r23, [sp, 4] \n\t" ··· 97 100 /* FP/BLINK restore generated by gcc (standard func epilogue */ 98 101 99 102 : "=r"(tmp) 100 - : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev), 101 - "n"(num_words_to_skip * 4) 103 + : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev) 102 104 : "blink" 103 105 ); 104 106
+41 -62
arch/arc/kernel/entry.S
··· 142 142 .endr 143 143 144 144 #include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ 145 - #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ 145 + #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */ 146 146 #include <asm/errno.h> 147 147 #include <asm/arcregs.h> 148 148 #include <asm/irqflags.h> ··· 274 274 SWITCH_TO_KERNEL_STK 275 275 SAVE_ALL_SYS 276 276 277 - lr r0, [ecr] 278 - lr r1, [efa] 279 - 280 - mov r2, sp 277 + lr r0, [efa] 278 + mov r1, sp 281 279 282 280 FAKE_RET_FROM_EXCPN r9 283 281 ··· 296 298 SWITCH_TO_KERNEL_STK 297 299 SAVE_ALL_SYS 298 300 299 - lr r0, [ecr] 300 - lr r1, [efa] 301 - mov r2, sp 301 + lr r0, [efa] 302 + mov r1, sp 302 303 bl do_memory_error 303 304 b ret_from_exception 304 305 ARC_EXIT mem_service ··· 314 317 SWITCH_TO_KERNEL_STK 315 318 SAVE_ALL_SYS 316 319 317 - lr r0, [ecr] 318 - lr r1, [efa] 319 - mov r2, sp 320 + lr r2, [ecr] 321 + lr r0, [efa] 322 + mov r1, sp 320 323 321 - brne r0, 0x200100, 1f 324 + lsr r3, r2, 8 325 + bmsk r3, r3, 7 326 + brne r3, ECR_C_MCHK_DUP_TLB, 1f 327 + 322 328 bl do_tlb_overlap_fault 323 329 b ret_from_exception 324 330 ··· 355 355 ; ecr and efa were not saved in case an Intr sneaks in 356 356 ; after fake rtie 357 357 ; 358 - lr r3, [ecr] 359 - lr r4, [efa] 358 + lr r2, [ecr] 359 + lr r1, [efa] ; Faulting Data address 360 360 361 361 ; --------(4) Return from CPU Exception Mode --------- 362 362 ; Fake a rtie, but rtie to next label ··· 368 368 ;------ (5) Type of Protection Violation? ---------- 369 369 ; 370 370 ; ProtV Hardware Exception is triggered for Access Faults of 2 types 371 - ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW 372 - ; -Unaligned Access (READ/WRITE on odd boundary) 371 + ; -Access Violaton : 00_23_(00|01|02|03)_00 372 + ; x r w r+w 373 + ; -Unaligned Access : 00_23_04_00 373 374 ; 374 - cmp r3, 0x230400 ; Misaligned data access ? 375 - beq 4f 375 + bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f 376 376 377 377 ;========= (6a) Access Violation Processing ======== 378 - cmp r3, 0x230100 379 - mov r1, 0x0 ; if LD exception ? write = 0 380 - mov.ne r1, 0x1 ; else write = 1 381 - 382 - mov r2, r4 ; faulting address 383 378 mov r0, sp ; pt_regs 384 379 bl do_page_fault 385 380 b ret_from_exception 386 381 387 382 ;========== (6b) Non aligned access ============ 388 383 4: 389 - mov r0, r3 ; cause code 390 - mov r1, r4 ; faulting address 391 - mov r2, sp ; pt_regs 384 + mov r0, r1 385 + mov r1, sp ; pt_regs 392 386 393 387 #ifdef CONFIG_ARC_MISALIGN_ACCESS 394 388 SAVE_CALLEE_SAVED_USER 395 - mov r3, sp ; callee_regs 389 + mov r2, sp ; callee_regs 396 390 397 391 bl do_misaligned_access 398 392 ··· 413 419 SWITCH_TO_KERNEL_STK 414 420 SAVE_ALL_SYS 415 421 416 - lr r0, [ecr] 417 - lr r1, [efa] 418 - mov r2, sp 422 + lr r0, [efa] 423 + mov r1, sp 419 424 420 425 FAKE_RET_FROM_EXCPN r9 421 426 ··· 433 440 SWITCH_TO_KERNEL_STK 434 441 SAVE_ALL_SYS 435 442 436 - lr r0, [ecr] 437 - lr r1, [efa] 438 - mov r2, sp 443 + lr r0, [efa] 444 + mov r1, sp 439 445 bl do_extension_fault 440 446 b ret_from_exception 441 447 ARC_EXIT EV_Extension ··· 490 498 trap_with_param: 491 499 492 500 ; stop_pc info by gdb needs this info 493 - stw orig_r8_IS_BRKPT, [sp, PT_orig_r8] 494 - 495 - mov r0, r12 496 - lr r1, [efa] 497 - mov r2, sp 501 + lr r0, [efa] 502 + mov r1, sp 498 503 499 504 ; Now that we have read EFA, its safe to do "fake" rtie 500 505 ; and get out of CPU exception mode ··· 533 544 lr r9, [erstatus] 534 545 535 546 SWITCH_TO_KERNEL_STK 536 - SAVE_ALL_TRAP 547 + SAVE_ALL_SYS 537 548 538 549 ;------- (4) What caused the Trap -------------- 539 550 lr r12, [ecr] 540 - and.f 0, r12, ECR_PARAM_MASK 551 + bmsk.f 0, r12, 7 541 552 bnz trap_with_param 542 553 543 554 ; ======= (5a) Trap is due to System Call ======== ··· 578 589 ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 579 590 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode 580 591 581 - #ifdef CONFIG_PREEMPT 582 592 bbit0 r8, STATUS_U_BIT, resume_kernel_mode 583 - #else 584 - bbit0 r8, STATUS_U_BIT, restore_regs 585 - #endif 586 593 587 594 ; Before returning to User mode check-for-and-complete any pending work 588 595 ; such as rescheduling/signal-delivery etc. ··· 638 653 b resume_user_mode_begin ; unconditionally back to U mode ret chks 639 654 ; for single exit point from this block 640 655 641 - #ifdef CONFIG_PREEMPT 642 - 643 656 resume_kernel_mode: 657 + 658 + #ifdef CONFIG_PREEMPT 644 659 645 660 ; Can't preempt if preemption disabled 646 661 GET_CURR_THR_INFO_FROM_SP r10 ··· 672 687 ; XXX can this be optimised out 673 688 IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy 674 689 675 - #ifdef CONFIG_ARC_CURR_IN_REG 676 - ; Restore User R25 677 - ; Earlier this used to be only for returning to user mode 678 - ; However with 2 levels of IRQ this can also happen even if 679 - ; in kernel mode 680 - ld r9, [sp, PT_sp] 681 - brhs r9, VMALLOC_START, 8f 682 - RESTORE_USER_R25 683 - 8: 684 - #endif 685 - 686 690 ; Restore REG File. In case multiple Events outstanding, 687 691 ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None 688 692 ; Note that we use realtime STATUS32 (not pt_regs->status32) to ··· 688 714 689 715 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 690 716 717 + ; Level 2 interrupt return Path - from hardware standpoint 691 718 bbit0 r10, STATUS_A2_BIT, not_level2_interrupt 719 + 720 + ;------------------------------------------------------------------ 721 + ; However the context returning might not have taken L2 intr itself 722 + ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret 723 + ; Special considerations needed for the context which took L2 intr 724 + 725 + ld r9, [sp, PT_event] ; Ensure this is L2 intr context 726 + brne r9, event_IRQ2, 149f 692 727 693 728 ;------------------------------------------------------------------ 694 729 ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier ··· 706 723 ; things to what they were, before returning from L2 context 707 724 ;---------------------------------------------------------------- 708 725 709 - ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is 710 - brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path 711 - 712 726 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) 713 727 bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal 714 728 715 - ; A1 is set in status32_l2 716 729 ; decrement thread_info->preempt_count (re-enable preemption) 717 730 GET_CURR_THR_INFO_FROM_SP r10 718 731 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] 719 732 720 733 ; paranoid check, given A1 was active when A2 happened, preempt count 721 - ; must not be 0 beccause we would have incremented it. 734 + ; must not be 0 because we would have incremented it. 722 735 ; If this does happen we simply HALT as it means a BUG !!! 723 736 cmp r9, 0 724 737 bnz 2f
+2
arch/arc/kernel/head.S
··· 27 27 ; Don't clobber r0-r4 yet. It might have bootloader provided info 28 28 ;------------------------------------------------------------------- 29 29 30 + sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] 31 + 30 32 #ifdef CONFIG_SMP 31 33 ; Only Boot (Master) proceeds. Others wait in platform dependent way 32 34 ; IDENTITY Reg [ 3 2 1 0 ]
+4 -12
arch/arc/kernel/irq.c
··· 28 28 * -Disable all IRQs (on CPU side) 29 29 * -Optionally, setup the High priority Interrupts as Level 2 IRQs 30 30 */ 31 - void __cpuinit arc_init_IRQ(void) 31 + void arc_init_IRQ(void) 32 32 { 33 33 int level_mask = 0; 34 - 35 - write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds); 36 34 37 35 /* Disable all IRQs: enable them as devices request */ 38 36 write_aux_reg(AUX_IENABLE, 0); 39 37 40 38 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 41 - #ifdef CONFIG_ARC_IRQ3_LV2 42 - level_mask |= (1 << 3); 43 - #endif 44 - #ifdef CONFIG_ARC_IRQ5_LV2 45 - level_mask |= (1 << 5); 46 - #endif 47 - #ifdef CONFIG_ARC_IRQ6_LV2 48 - level_mask |= (1 << 6); 49 - #endif 39 + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; 40 + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; 41 + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; 50 42 51 43 if (level_mask) { 52 44 pr_info("Level-2 interrupts bitset %x\n", level_mask);
+2 -2
arch/arc/kernel/kgdb.c
··· 169 169 return 0; 170 170 } 171 171 172 - void kgdb_trap(struct pt_regs *regs, int param) 172 + void kgdb_trap(struct pt_regs *regs) 173 173 { 174 174 /* trap_s 3 is used for breakpoints that overwrite existing 175 175 * instructions, while trap_s 4 is used for compiled breakpoints. ··· 181 181 * with trap_s 4 (compiled) breakpoints, continuation needs to 182 182 * start after the breakpoint. 183 183 */ 184 - if (param == 3) 184 + if (regs->ecr_param == 3) 185 185 instruction_pointer(regs) -= BREAK_INSTR_SIZE; 186 186 187 187 kgdb_handle_exception(1, SIGTRAP, 0, regs);
+2 -3
arch/arc/kernel/kprobes.c
··· 517 517 return 0; 518 518 } 519 519 520 - void trap_is_kprobe(unsigned long cause, unsigned long address, 521 - struct pt_regs *regs) 520 + void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 522 521 { 523 - notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP); 522 + notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP); 524 523 }
+5 -4
arch/arc/kernel/process.c
··· 55 55 * | ... | 56 56 * | unused | 57 57 * | | 58 - * ------------------ <==== top of Stack (thread.ksp) 59 - * | UNUSED 1 word| 60 58 * ------------------ 61 - * | r25 | 59 + * | r25 | <==== top of Stack (thread.ksp) 62 60 * ~ ~ 63 61 * | --to-- | (CALLEE Regs of user mode) 64 62 * | r13 | ··· 74 76 * | --to-- | (scratch Regs of user mode) 75 77 * | r0 | 76 78 * ------------------ 77 - * | UNUSED 1 word| 79 + * | SP | 80 + * | orig_r0 | 81 + * | event/ECR | 82 + * | user_r25 | 78 83 * ------------------ <===== END of PAGE 79 84 */ 80 85 int copy_thread(unsigned long clone_flags,
+12 -2
arch/arc/kernel/ptrace.c
··· 40 40 offsetof(struct user_regs_struct, LOC), \ 41 41 offsetof(struct user_regs_struct, LOC) + 4); 42 42 43 + #define REG_O_ZERO(LOC) \ 44 + if (!ret) \ 45 + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \ 46 + offsetof(struct user_regs_struct, LOC), \ 47 + offsetof(struct user_regs_struct, LOC) + 4); 48 + 49 + REG_O_ZERO(pad); 43 50 REG_O_CHUNK(scratch, callee, ptregs); 51 + REG_O_ZERO(pad2); 44 52 REG_O_CHUNK(callee, efa, cregs); 45 53 REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address); 46 54 ··· 96 88 offsetof(struct user_regs_struct, LOC), \ 97 89 offsetof(struct user_regs_struct, LOC) + 4); 98 90 99 - /* TBD: disallow updates to STATUS32, orig_r8 etc*/ 100 - REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */ 91 + REG_IGNORE_ONE(pad); 92 + /* TBD: disallow updates to STATUS32 etc*/ 93 + REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */ 94 + REG_IGNORE_ONE(pad2); 101 95 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ 102 96 REG_IGNORE_ONE(efa); /* efa update invalid */ 103 97 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+9 -9
arch/arc/kernel/setup.c
··· 31 31 int running_on_hw = 1; /* vs. on ISS */ 32 32 33 33 char __initdata command_line[COMMAND_LINE_SIZE]; 34 - struct machine_desc *machine_desc __cpuinitdata; 34 + struct machine_desc *machine_desc; 35 35 36 36 struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ 37 37 38 38 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 39 39 40 40 41 - void __cpuinit read_arc_build_cfg_regs(void) 41 + void read_arc_build_cfg_regs(void) 42 42 { 43 43 struct bcr_perip uncached_space; 44 44 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; ··· 182 182 FIX_PTR(cpu); 183 183 #define IS_AVAIL1(var, str) ((var) ? str : "") 184 184 #define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") 185 - #define IS_USED(var) ((var) ? "(in-use)" : "(not used)") 185 + #define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)") 186 186 187 187 n += scnprintf(buf + n, len - n, 188 188 "Extn [700-Base]\t: %s %s %s %s %s %s\n", ··· 202 202 if (cpu->core.family == 0x34) { 203 203 n += scnprintf(buf + n, len - n, 204 204 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", 205 - IS_USED(__CONFIG_ARC_HAS_LLSC_VAL), 206 - IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL), 207 - IS_USED(__CONFIG_ARC_HAS_RTSC_VAL)); 205 + IS_USED(CONFIG_ARC_HAS_LLSC), 206 + IS_USED(CONFIG_ARC_HAS_SWAPE), 207 + IS_USED(CONFIG_ARC_HAS_RTSC)); 208 208 } 209 209 210 210 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", ··· 237 237 return buf; 238 238 } 239 239 240 - void __cpuinit arc_chk_ccms(void) 240 + void arc_chk_ccms(void) 241 241 { 242 242 #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) 243 243 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; ··· 272 272 * hardware has dedicated regs which need to be saved/restored on ctx-sw 273 273 * (Single Precision uses core regs), thus kernel is kind of oblivious to it 274 274 */ 275 - void __cpuinit arc_chk_fpu(void) 275 + void arc_chk_fpu(void) 276 276 { 277 277 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 278 278 ··· 293 293 * such as only for boot CPU etc 294 294 */ 295 295 296 - void __cpuinit setup_processor(void) 296 + void setup_processor(void) 297 297 { 298 298 char str[512]; 299 299 int cpu_id = smp_processor_id();
+2 -2
arch/arc/kernel/smp.c
··· 117 117 * Called from asm stub in head.S 118 118 * "current"/R25 already setup by low level boot code 119 119 */ 120 - void __cpuinit start_kernel_secondary(void) 120 + void start_kernel_secondary(void) 121 121 { 122 122 struct mm_struct *mm = &init_mm; 123 123 unsigned int cpu = smp_processor_id(); ··· 154 154 * 155 155 * Essential requirements being where to run from (PC) and stack (SP) 156 156 */ 157 - int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 157 + int __cpu_up(unsigned int cpu, struct task_struct *idle) 158 158 { 159 159 unsigned long wait_till; 160 160
+1 -1
arch/arc/kernel/stacktrace.c
··· 79 79 * assembly code 80 80 */ 81 81 frame_info->regs.r27 = 0; 82 - frame_info->regs.r28 += 64; 82 + frame_info->regs.r28 += 60; 83 83 frame_info->call_frame = 0; 84 84 85 85 } else {
+14 -3
arch/arc/kernel/time.c
··· 44 44 #include <asm/clk.h> 45 45 #include <asm/mach_desc.h> 46 46 47 + /* Timer related Aux registers */ 48 + #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ 49 + #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ 50 + #define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ 51 + #define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ 52 + #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ 53 + #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ 54 + 55 + #define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ 56 + #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ 57 + 47 58 #define ARC_TIMER_MAX 0xFFFFFFFF 48 59 49 60 /********** Clock Source Device *********/ 50 61 51 62 #ifdef CONFIG_ARC_HAS_RTSC 52 63 53 - int __cpuinit arc_counter_setup(void) 64 + int arc_counter_setup(void) 54 65 { 55 66 /* RTSC insn taps into cpu clk, needs no setup */ 56 67 ··· 116 105 /* 117 106 * set 32bit TIMER1 to keep counting monotonically and wraparound 118 107 */ 119 - int __cpuinit arc_counter_setup(void) 108 + int arc_counter_setup(void) 120 109 { 121 110 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); 122 111 write_aux_reg(ARC_REG_TIMER1_CNT, 0); ··· 223 212 * Setup the local event timer for @cpu 224 213 * N.B. weak so that some exotic ARC SoCs can completely override it 225 214 */ 226 - void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) 215 + void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu) 227 216 { 228 217 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 229 218
+24 -28
arch/arc/kernel/traps.c
··· 28 28 return; 29 29 } 30 30 31 - void die(const char *str, struct pt_regs *regs, unsigned long address, 32 - unsigned long cause_reg) 31 + void die(const char *str, struct pt_regs *regs, unsigned long address) 33 32 { 34 - show_kernel_fault_diag(str, regs, address, cause_reg); 33 + show_kernel_fault_diag(str, regs, address); 35 34 36 35 /* DEAD END */ 37 36 __asm__("flag 1"); ··· 41 42 * -for user faults enqueues requested signal 42 43 * -for kernel, chk if due to copy_(to|from)_user, otherwise die() 43 44 */ 44 - static noinline int handle_exception(unsigned long cause, char *str, 45 - struct pt_regs *regs, siginfo_t *info) 45 + static noinline int 46 + handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info) 46 47 { 47 48 if (user_mode(regs)) { 48 49 struct task_struct *tsk = current; 49 50 50 51 tsk->thread.fault_address = (__force unsigned int)info->si_addr; 51 - tsk->thread.cause_code = cause; 52 52 53 53 force_sig_info(info->si_signo, info, tsk); 54 54 ··· 56 58 if (fixup_exception(regs)) 57 59 return 0; 58 60 59 - die(str, regs, (unsigned long)info->si_addr, cause); 61 + die(str, regs, (unsigned long)info->si_addr); 60 62 } 61 63 62 64 return 1; 63 65 } 64 66 65 67 #define DO_ERROR_INFO(signr, str, name, sicode) \ 66 - int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ 68 + int name(unsigned long address, struct pt_regs *regs) \ 67 69 { \ 68 70 siginfo_t info = { \ 69 71 .si_signo = signr, \ ··· 71 73 .si_code = sicode, \ 72 74 .si_addr = (void __user *)address, \ 73 75 }; \ 74 - return handle_exception(cause, str, regs, &info);\ 76 + return handle_exception(str, regs, &info);\ 75 77 } 76 78 77 79 /* ··· 88 90 /* 89 91 * Entry Point for Misaligned Data access Exception, for emulating in software 90 92 */ 91 - int do_misaligned_access(unsigned long cause, unsigned long address, 92 - struct pt_regs *regs, struct callee_regs *cregs) 93 + int do_misaligned_access(unsigned long address, struct pt_regs *regs, 94 + struct callee_regs *cregs) 93 95 { 94 - if (misaligned_fixup(address, regs, cause, cregs) != 0) 95 - return do_misaligned_error(cause, address, regs); 96 + if (misaligned_fixup(address, regs, cregs) != 0) 97 + return do_misaligned_error(address, regs); 96 98 97 99 return 0; 98 100 } ··· 102 104 * Entry point for miscll errors such as Nested Exceptions 103 105 * -Duplicate TLB entry is handled seperately though 104 106 */ 105 - void do_machine_check_fault(unsigned long cause, unsigned long address, 106 - struct pt_regs *regs) 107 + void do_machine_check_fault(unsigned long address, struct pt_regs *regs) 107 108 { 108 - die("Machine Check Exception", regs, address, cause); 109 + die("Machine Check Exception", regs, address); 109 110 } 110 111 111 112 ··· 117 120 * -1 used for software breakpointing (gdb) 118 121 * -2 used by kprobes 119 122 */ 120 - void do_non_swi_trap(unsigned long cause, unsigned long address, 121 - struct pt_regs *regs) 123 + void do_non_swi_trap(unsigned long address, struct pt_regs *regs) 122 124 { 123 - unsigned int param = cause & 0xff; 125 + unsigned int param = regs->ecr_param; 124 126 125 127 switch (param) { 126 128 case 1: 127 - trap_is_brkpt(cause, address, regs); 129 + trap_is_brkpt(address, regs); 128 130 break; 129 131 130 132 case 2: 131 - trap_is_kprobe(param, address, regs); 133 + trap_is_kprobe(address, regs); 132 134 break; 133 135 134 136 case 3: 135 137 case 4: 136 - kgdb_trap(regs, param); 138 + kgdb_trap(regs); 137 139 break; 138 140 139 141 default: ··· 145 149 * -For a corner case, ARC kprobes implementation resorts to using 146 150 * this exception, hence the check 147 151 */ 148 - void do_insterror_or_kprobe(unsigned long cause, 149 - unsigned long address, 150 - struct pt_regs *regs) 152 + void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs) 151 153 { 154 + int rc; 155 + 152 156 /* Check if this exception is caused by kprobes */ 153 - if (notify_die(DIE_IERR, "kprobe_ierr", regs, address, 154 - cause, SIGILL) == NOTIFY_STOP) 157 + rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL); 158 + if (rc == NOTIFY_STOP) 155 159 return; 156 160 157 - insterror_is_error(cause, address, regs); 161 + insterror_is_error(address, regs); 158 162 }
+13 -18
arch/arc/kernel/troubleshoot.c
··· 117 117 118 118 static void show_ecr_verbose(struct pt_regs *regs) 119 119 { 120 - unsigned int vec, cause_code, cause_reg; 120 + unsigned int vec, cause_code; 121 121 unsigned long address; 122 122 123 - cause_reg = current->thread.cause_code; 124 - pr_info("\n[ECR ]: 0x%08x => ", cause_reg); 123 + pr_info("\n[ECR ]: 0x%08lx => ", regs->event); 125 124 126 125 /* For Data fault, this is data address not instruction addr */ 127 126 address = current->thread.fault_address; 128 127 129 - vec = cause_reg >> 16; 130 - cause_code = (cause_reg >> 8) & 0xFF; 128 + vec = regs->ecr_vec; 129 + cause_code = regs->ecr_cause; 131 130 132 131 /* For DTLB Miss or ProtV, display the memory involved too */ 133 132 if (vec == ECR_V_DTLB_MISS) { 134 - pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", 135 - (cause_code == 0x01) ? "Read From" : 136 - ((cause_code == 0x02) ? "Write to" : "EX"), 133 + pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n", 134 + (cause_code == 0x01) ? "Read" : 135 + ((cause_code == 0x02) ? "Write" : "EX"), 137 136 address, regs->ret); 138 137 } else if (vec == ECR_V_ITLB_MISS) { 139 138 pr_cont("Insn could not be fetched\n"); ··· 143 144 } else if (vec == ECR_V_PROTV) { 144 145 if (cause_code == ECR_C_PROTV_INST_FETCH) 145 146 pr_cont("Execute from Non-exec Page\n"); 146 - else if (cause_code == ECR_C_PROTV_LOAD) 147 - pr_cont("Read from Non-readable Page\n"); 148 - else if (cause_code == ECR_C_PROTV_STORE) 149 - pr_cont("Write to Non-writable Page\n"); 150 - else if (cause_code == ECR_C_PROTV_XCHG) 151 - pr_cont("Data exchange protection violation\n"); 152 147 else if (cause_code == ECR_C_PROTV_MISALIG_DATA) 153 148 pr_cont("Misaligned r/w from 0x%08lx\n", address); 149 + else 150 + pr_cont("%s access not allowed on page\n", 151 + (cause_code == 0x01) ? "Read" : 152 + ((cause_code == 0x02) ? "Write" : "EX")); 154 153 } else if (vec == ECR_V_INSN_ERR) { 155 154 pr_cont("Illegal Insn\n"); 156 155 } else { ··· 173 176 print_task_path_n_nm(tsk, buf); 174 177 show_regs_print_info(KERN_INFO); 175 178 176 - if (current->thread.cause_code) 177 - show_ecr_verbose(regs); 179 + show_ecr_verbose(regs); 178 180 179 181 pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", 180 182 current->thread.fault_address, ··· 209 213 } 210 214 211 215 void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 212 - unsigned long address, unsigned long cause_reg) 216 + unsigned long address) 213 217 { 214 218 current->thread.fault_address = address; 215 - current->thread.cause_code = cause_reg; 216 219 217 220 /* Caller and Callee regs */ 218 221 show_regs(regs);
+1 -1
arch/arc/kernel/unaligned.c
··· 187 187 * Returns 0 if successfully handled, 1 if some error happened 188 188 */ 189 189 int misaligned_fixup(unsigned long address, struct pt_regs *regs, 190 - unsigned long cause, struct callee_regs *cregs) 190 + struct callee_regs *cregs) 191 191 { 192 192 struct disasm_state state; 193 193 char buf[TASK_COMM_LEN];
+2
arch/arc/kernel/unwind.c
··· 289 289 * instead of the initial loc addr 290 290 * return; 291 291 */ 292 + WARN(1, "unwinder: FDE->initial_location NULL %p\n", 293 + (const u8 *)(fde + 1) + *fde); 292 294 } 293 295 ++n; 294 296 }
+16 -8
arch/arc/kernel/vmlinux.lds.S
··· 125 125 *(.debug_frame) 126 126 __end_unwind = .; 127 127 } 128 + /* 129 + * gcc 4.8 generates this for -fasynchonous-unwind-tables, 130 + * while we still use the .debug_frame based unwinder 131 + */ 132 + /DISCARD/ : { *(.eh_frame) } 128 133 #else 129 134 /DISCARD/ : { *(.debug_frame) } 130 135 #endif ··· 147 142 *(.arcextmap.*) 148 143 } 149 144 145 + #ifndef CONFIG_DEBUG_INFO 150 146 /* open-coded because we need .debug_frame seperately for unwinding */ 151 - .debug_aranges 0 : { *(.debug_aranges) } 152 - .debug_pubnames 0 : { *(.debug_pubnames) } 153 - .debug_info 0 : { *(.debug_info) } 154 - .debug_abbrev 0 : { *(.debug_abbrev) } 155 - .debug_line 0 : { *(.debug_line) } 156 - .debug_str 0 : { *(.debug_str) } 157 - .debug_loc 0 : { *(.debug_loc) } 158 - .debug_macinfo 0 : { *(.debug_macinfo) } 147 + /DISCARD/ : { *(.debug_aranges) } 148 + /DISCARD/ : { *(.debug_pubnames) } 149 + /DISCARD/ : { *(.debug_info) } 150 + /DISCARD/ : { *(.debug_abbrev) } 151 + /DISCARD/ : { *(.debug_line) } 152 + /DISCARD/ : { *(.debug_str) } 153 + /DISCARD/ : { *(.debug_loc) } 154 + /DISCARD/ : { *(.debug_macinfo) } 155 + /DISCARD/ : { *(.debug_ranges) } 156 + #endif 159 157 160 158 #ifdef CONFIG_ARC_HAS_DCCM 161 159 . = CONFIG_ARC_DCCM_BASE;
+72 -54
arch/arc/mm/cache_arc700.c
··· 73 73 #include <asm/cachectl.h> 74 74 #include <asm/setup.h> 75 75 76 + /* Instruction cache related Auxiliary registers */ 77 + #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ 78 + #define ARC_REG_IC_IVIC 0x10 79 + #define ARC_REG_IC_CTRL 0x11 80 + #define ARC_REG_IC_IVIL 0x19 81 + #if (CONFIG_ARC_MMU_VER > 2) 82 + #define ARC_REG_IC_PTAG 0x1E 83 + #endif 84 + 85 + /* Bit val in IC_CTRL */ 86 + #define IC_CTRL_CACHE_DISABLE 0x1 87 + 88 + /* Data cache related Auxiliary registers */ 89 + #define ARC_REG_DC_BCR 0x72 /* Build Config reg */ 90 + #define ARC_REG_DC_IVDC 0x47 91 + #define ARC_REG_DC_CTRL 0x48 92 + #define ARC_REG_DC_IVDL 0x4A 93 + #define ARC_REG_DC_FLSH 0x4B 94 + #define ARC_REG_DC_FLDL 0x4C 95 + #if (CONFIG_ARC_MMU_VER > 2) 96 + #define ARC_REG_DC_PTAG 0x5C 97 + #endif 98 + 99 + /* Bit val in DC_CTRL */ 100 + #define DC_CTRL_INV_MODE_FLUSH 0x40 101 + #define DC_CTRL_FLUSH_STATUS 0x100 102 + 76 103 char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) 77 104 { 78 105 int n = 0; ··· 116 89 enb ? "" : "DISABLED (kernel-build)"); \ 117 90 } 118 91 119 - PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache"); 120 - PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache"); 92 + PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE), 93 + "I-Cache"); 94 + PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE), 95 + "D-Cache"); 121 96 122 97 return buf; 123 98 } ··· 129 100 * the cpuinfo structure for later use. 130 101 * No Validation done here, simply read/convert the BCRs 131 102 */ 132 - void __cpuinit read_decode_cache_bcr(void) 103 + void read_decode_cache_bcr(void) 133 104 { 134 - struct bcr_cache ibcr, dbcr; 135 105 struct cpuinfo_arc_cache *p_ic, *p_dc; 136 106 unsigned int cpu = smp_processor_id(); 107 + struct bcr_cache { 108 + #ifdef CONFIG_CPU_BIG_ENDIAN 109 + unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 110 + #else 111 + unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 112 + #endif 113 + } ibcr, dbcr; 137 114 138 115 p_ic = &cpuinfo_arc700[cpu].icache; 139 116 READ_BCR(ARC_REG_IC_BCR, ibcr); 140 117 141 - if (ibcr.config == 0x3) 142 - p_ic->assoc = 2; 118 + BUG_ON(ibcr.config != 3); 119 + p_ic->assoc = 2; /* Fixed to 2w set assoc */ 143 120 p_ic->line_len = 8 << ibcr.line_len; 144 121 p_ic->sz = 0x200 << ibcr.sz; 145 122 p_ic->ver = ibcr.ver; ··· 153 118 p_dc = &cpuinfo_arc700[cpu].dcache; 154 119 READ_BCR(ARC_REG_DC_BCR, dbcr); 155 120 156 - if (dbcr.config == 0x2) 157 - p_dc->assoc = 4; 121 + BUG_ON(dbcr.config != 2); 122 + p_dc->assoc = 4; /* Fixed to 4w set assoc */ 158 123 p_dc->line_len = 16 << dbcr.line_len; 159 124 p_dc->sz = 0x200 << dbcr.sz; 160 125 p_dc->ver = dbcr.ver; ··· 167 132 * 3. Enable the Caches, setup default flush mode for D-Cache 168 133 * 3. Calculate the SHMLBA used by user space 169 134 */ 170 - void __cpuinit arc_cache_init(void) 135 + void arc_cache_init(void) 171 136 { 172 - unsigned int temp; 173 137 unsigned int cpu = smp_processor_id(); 174 138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 175 139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 176 - int way_pg_ratio = way_pg_ratio; 177 - int dcache_does_alias; 140 + unsigned int dcache_does_alias, temp; 178 141 char str[256]; 179 142 180 143 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); ··· 182 149 183 150 #ifdef CONFIG_ARC_HAS_ICACHE 184 151 /* 1. Confirm some of I-cache params which Linux assumes */ 185 - if ((ic->assoc != ARC_ICACHE_WAYS) || 186 - (ic->line_len != ARC_ICACHE_LINE_LEN)) { 152 + if (ic->line_len != ARC_ICACHE_LINE_LEN) 187 153 panic("Cache H/W doesn't match kernel Config"); 188 - } 189 - #if (CONFIG_ARC_MMU_VER > 2) 190 - if (ic->ver != 3) { 191 - if (running_on_hw) 192 - panic("Cache ver doesn't match MMU ver\n"); 193 154 194 - /* For ISS - suggest the toggles to use */ 195 - pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n"); 196 - 197 - } 198 - #endif 155 + if (ic->ver != CONFIG_ARC_MMU_VER) 156 + panic("Cache ver doesn't match MMU ver\n"); 199 157 #endif 200 158 201 159 /* Enable/disable I-Cache */ ··· 205 181 return; 206 182 207 183 #ifdef CONFIG_ARC_HAS_DCACHE 208 - if ((dc->assoc != ARC_DCACHE_WAYS) || 209 - (dc->line_len != ARC_DCACHE_LINE_LEN)) { 184 + if (dc->line_len != ARC_DCACHE_LINE_LEN) 210 185 panic("Cache H/W doesn't match kernel Config"); 211 - } 212 - 213 - dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; 214 186 215 187 /* check for D-Cache aliasing */ 188 + dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; 189 + 216 190 if (dcache_does_alias && !cache_is_vipt_aliasing()) 217 191 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 218 192 else if (!dcache_does_alias && cache_is_vipt_aliasing()) ··· 261 239 */ 262 240 static inline void __dc_entire_op(const int cacheop) 263 241 { 264 - unsigned long flags, tmp = tmp; 242 + unsigned int tmp = tmp; 265 243 int aux; 266 - 267 - local_irq_save(flags); 268 244 269 245 if (cacheop == OP_FLUSH_N_INV) { 270 246 /* Dcache provides 2 cmd: FLUSH or INV ··· 287 267 /* Switch back the DISCARD ONLY Invalidate mode */ 288 268 if (cacheop == OP_FLUSH_N_INV) 289 269 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); 290 - 291 - local_irq_restore(flags); 292 270 } 293 271 294 272 /* ··· 477 459 local_irq_restore(flags); 478 460 } 479 461 462 + static inline void __ic_entire_inv(void) 463 + { 464 + write_aux_reg(ARC_REG_IC_IVIC, 1); 465 + read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 466 + } 467 + 480 468 #else 481 469 470 + #define __ic_entire_inv() 482 471 #define __ic_line_inv_vaddr(pstart, vstart, sz) 483 472 484 473 #endif /* CONFIG_ARC_HAS_ICACHE */ ··· 512 487 struct address_space *mapping; 513 488 514 489 if (!cache_is_vipt_aliasing()) { 515 - set_bit(PG_arch_1, &page->flags); 490 + clear_bit(PG_dc_clean, &page->flags); 516 491 return; 517 492 } 518 493 ··· 526 501 * Make a note that K-mapping is dirty 527 502 */ 528 503 if (!mapping_mapped(mapping)) { 529 - set_bit(PG_arch_1, &page->flags); 504 + clear_bit(PG_dc_clean, &page->flags); 530 505 } else if (page_mapped(page)) { 531 506 532 507 /* kernel reading from page with U-mapping */ ··· 654 629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 655 630 } 656 631 657 - void flush_icache_all(void) 658 - { 659 - unsigned long flags; 660 - 661 - local_irq_save(flags); 662 - 663 - write_aux_reg(ARC_REG_IC_IVIC, 1); 664 - 665 - /* lr will not complete till the icache inv operation is not over */ 666 - read_aux_reg(ARC_REG_IC_CTRL); 667 - local_irq_restore(flags); 668 - } 669 - 670 632 noinline void flush_cache_all(void) 671 633 { 672 634 unsigned long flags; 673 635 674 636 local_irq_save(flags); 675 637 676 - flush_icache_all(); 638 + __ic_entire_inv(); 677 639 __dc_entire_op(OP_FLUSH_N_INV); 678 640 679 641 local_irq_restore(flags); ··· 679 667 { 680 668 unsigned int paddr = pfn << PAGE_SHIFT; 681 669 682 - __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); 670 + u_vaddr &= PAGE_MASK; 671 + 672 + ___flush_dcache_page(paddr, u_vaddr); 673 + 674 + if (vma->vm_flags & VM_EXEC) 675 + __inv_icache_page(paddr, u_vaddr); 683 676 } 684 677 685 678 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ··· 734 717 * non copied user pages (e.g. read faults which wire in pagecache page 735 718 * directly). 736 719 */ 737 - set_bit(PG_arch_1, &to->flags); 720 + clear_bit(PG_dc_clean, &to->flags); 738 721 739 722 /* 740 723 * if SRC was already usermapped and non-congruent to kernel mapping ··· 742 725 */ 743 726 if (clean_src_k_mappings) { 744 727 __flush_dcache_page(kfrom, kfrom); 728 + set_bit(PG_dc_clean, &from->flags); 745 729 } else { 746 - set_bit(PG_arch_1, &from->flags); 730 + clear_bit(PG_dc_clean, &from->flags); 747 731 } 748 732 } 749 733 750 734 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 751 735 { 752 736 clear_page(to); 753 - set_bit(PG_arch_1, &page->flags); 737 + clear_bit(PG_dc_clean, &page->flags); 754 738 } 755 739 756 740
+6 -6
arch/arc/mm/fault.c
··· 15 15 #include <linux/uaccess.h> 16 16 #include <linux/kdebug.h> 17 17 #include <asm/pgalloc.h> 18 + #include <asm/mmu.h> 18 19 19 20 static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) 20 21 { ··· 52 51 return 1; 53 52 } 54 53 55 - void do_page_fault(struct pt_regs *regs, int write, unsigned long address, 56 - unsigned long cause_code) 54 + void do_page_fault(struct pt_regs *regs, unsigned long address) 57 55 { 58 56 struct vm_area_struct *vma = NULL; 59 57 struct task_struct *tsk = current; 60 58 struct mm_struct *mm = tsk->mm; 61 59 siginfo_t info; 62 60 int fault, ret; 61 + int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 63 62 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 64 63 (write ? FAULT_FLAG_WRITE : 0); 65 64 ··· 110 109 111 110 /* Handle protection violation, execute on heap or stack */ 112 111 113 - if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH)) 112 + if ((regs->ecr_vec == ECR_V_PROTV) && 113 + (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 114 114 goto bad_area; 115 115 116 116 if (write) { ··· 178 176 /* User mode accesses just cause a SIGSEGV */ 179 177 if (user_mode(regs)) { 180 178 tsk->thread.fault_address = address; 181 - tsk->thread.cause_code = cause_code; 182 179 info.si_signo = SIGSEGV; 183 180 info.si_errno = 0; 184 181 /* info.si_code has been set above */ ··· 198 197 if (fixup_exception(regs)) 199 198 return; 200 199 201 - die("Oops", regs, address, cause_code); 200 + die("Oops", regs, address); 202 201 203 202 out_of_memory: 204 203 if (is_global_init(tsk)) { ··· 219 218 goto no_context; 220 219 221 220 tsk->thread.fault_address = address; 222 - tsk->thread.cause_code = cause_code; 223 221 info.si_signo = SIGBUS; 224 222 info.si_errno = 0; 225 223 info.si_code = BUS_ADRERR;
+29 -9
arch/arc/mm/tlb.c
··· 55 55 #include <asm/arcregs.h> 56 56 #include <asm/setup.h> 57 57 #include <asm/mmu_context.h> 58 - #include <asm/tlb.h> 58 + #include <asm/mmu.h> 59 59 60 60 /* Need for ARC MMU v2 61 61 * ··· 96 96 * corner cases when TLBWrite was not executed at all because the corresp 97 97 * J-TLB entry got evicted/replaced. 98 98 */ 99 + 99 100 100 101 /* A copy of the ASID from the PID reg is kept in asid_cache */ 101 102 int asid_cache = FIRST_ASID; ··· 433 432 { 434 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 435 434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 435 + struct page *page = pfn_to_page(pte_pfn(*ptep)); 436 436 437 437 create_tlb(vma, vaddr, ptep); 438 + 439 + if (page == ZERO_PAGE(0)) { 440 + return; 441 + } 438 442 439 443 /* 440 444 * Exec page : Independent of aliasing/page-color considerations, ··· 452 446 */ 453 447 if ((vma->vm_flags & VM_EXEC) || 454 448 addr_not_cache_congruent(paddr, vaddr)) { 455 - struct page *page = pfn_to_page(pte_pfn(*ptep)); 456 449 457 - int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 450 + int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); 458 451 if (dirty) { 459 452 /* wback + inv dcache lines */ 460 453 __flush_dcache_page(paddr, paddr); ··· 469 464 * the cpuinfo structure for later use. 470 465 * No Validation is done here, simply read/convert the BCRs 471 466 */ 472 - void __cpuinit read_decode_mmu_bcr(void) 467 + void read_decode_mmu_bcr(void) 473 468 { 474 - unsigned int tmp; 475 - struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ 476 - struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */ 477 469 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 470 + unsigned int tmp; 471 + struct bcr_mmu_1_2 { 472 + #ifdef CONFIG_CPU_BIG_ENDIAN 473 + unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; 474 + #else 475 + unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; 476 + #endif 477 + } *mmu2; 478 + 479 + struct bcr_mmu_3 { 480 + #ifdef CONFIG_CPU_BIG_ENDIAN 481 + unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, 482 + u_itlb:4, u_dtlb:4; 483 + #else 484 + unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, 485 + ways:4, ver:8; 486 + #endif 487 + } *mmu3; 478 488 479 489 tmp = read_aux_reg(ARC_REG_MMU_BCR); 480 490 mmu->ver = (tmp >> 24); ··· 525 505 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", 526 506 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, 527 507 p_mmu->u_dtlb, p_mmu->u_itlb, 528 - __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : ""); 508 + IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); 529 509 530 510 return buf; 531 511 } 532 512 533 - void __cpuinit arc_mmu_init(void) 513 + void arc_mmu_init(void) 534 514 { 535 515 char str[256]; 536 516 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+12 -23
arch/arc/mm/tlbex.S
··· 39 39 40 40 #include <linux/linkage.h> 41 41 #include <asm/entry.h> 42 - #include <asm/tlb.h> 42 + #include <asm/mmu.h> 43 43 #include <asm/pgtable.h> 44 44 #include <asm/arcregs.h> 45 45 #include <asm/cache.h> ··· 147 147 #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 148 148 and.f 0, r0, _PAGE_PRESENT 149 149 bz 1f 150 - ld r2, [num_pte_not_present] 151 - add r2, r2, 1 152 - st r2, [num_pte_not_present] 150 + ld r3, [num_pte_not_present] 151 + add r3, r3, 1 152 + st r3, [num_pte_not_present] 153 153 1: 154 154 #endif 155 155 ··· 271 271 #endif 272 272 273 273 ;---------------------------------------------------------------- 274 - ; Get the PTE corresponding to V-addr accessed 274 + ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 275 275 LOAD_FAULT_PTE 276 276 277 277 ;---------------------------------------------------------------- 278 278 ; VERIFY_PTE: Check if PTE permissions approp for executing code 279 279 cmp_s r2, VMALLOC_START 280 - mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) 281 - mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) 280 + mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) 281 + mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) 282 282 283 283 and r3, r0, r2 ; Mask out NON Flag bits from PTE 284 284 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 285 285 bnz do_slow_path_pf 286 286 287 287 ; Let Linux VM know that the page was accessed 288 - or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit 289 - st_s r0, [r1] ; Write back PTE 288 + or r0, r0, _PAGE_ACCESSED ; set Accessed Bit 289 + st_s r0, [r1] ; Write back PTE 290 290 291 291 CONV_PTE_TO_TLB 292 292 COMMIT_ENTRY_TO_MMU ··· 311 311 312 312 ;---------------------------------------------------------------- 313 313 ; Get the PTE corresponding to V-addr accessed 314 - ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE 314 + ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA 315 315 LOAD_FAULT_PTE 316 316 317 317 ;---------------------------------------------------------------- ··· 345 345 ;---------------------------------------------------------------- 346 346 ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty 347 347 lr r3, [ecr] 348 - or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always 348 + or r0, r0, _PAGE_ACCESSED ; Accessed bit always 349 349 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? 350 350 or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well 351 351 st_s r0, [r1] ; Write back PTE ··· 381 381 382 382 ; ------- setup args for Linux Page fault Hanlder --------- 383 383 mov_s r0, sp 384 - lr r2, [efa] 385 - lr r3, [ecr] 386 - 387 - ; Both st and ex imply WRITE access of some sort, hence do_page_fault( ) 388 - ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or 389 - ; DTLB-ld Miss 390 - ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03 391 - ; Following code uses that fact that st/ex have one bit in common 392 - 393 - btst_s r3, ECR_C_BIT_DTLB_ST_MISS 394 - mov.z r1, 0 395 - mov.nz r1, 1 384 + lr r1, [efa] 396 385 397 386 ; We don't want exceptions to be disabled while the fault is handled. 398 387 ; Now that we have saved the context we return from exception hence
+9 -3
arch/arc/plat-arcfpga/platform.c
··· 77 77 78 78 /*----------------------- Platform Devices -----------------------------*/ 79 79 80 + #if IS_ENABLED(CONFIG_SERIAL_ARC) 80 81 static unsigned long arc_uart_info[] = { 81 82 0, /* uart->is_emulated (runtime @running_on_hw) */ 82 83 0, /* uart->port.uartclk */ ··· 116 115 static struct platform_device *fpga_early_devs[] __initdata = { 117 116 &arc_uart0_dev, 118 117 }; 119 - #endif 118 + #endif /* CONFIG_SERIAL_ARC_CONSOLE */ 120 119 121 120 static void arc_fpga_serial_init(void) 122 121 { ··· 153 152 * otherwise the early console never gets a chance to run. 154 153 */ 155 154 add_preferred_console("ttyARC", 0, "115200"); 156 - #endif 155 + #endif /* CONFIG_SERIAL_ARC_CONSOLE */ 157 156 } 157 + #else /* !IS_ENABLED(CONFIG_SERIAL_ARC) */ 158 + static void arc_fpga_serial_init(void) 159 + { 160 + } 161 + #endif 158 162 159 163 static void __init plat_fpga_early_init(void) 160 164 { ··· 175 169 } 176 170 177 171 static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = { 178 - #if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE) 172 + #if IS_ENABLED(CONFIG_SERIAL_ARC) 179 173 OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info), 180 174 #endif 181 175 {}