Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: parameterize system PLs to support KVM port

While not a port to KVM (yet), this change modifies the kernel
to be able to build either at PL1 or at PL2 with a suitable
config switch. Pushing up this change avoids handling branch
merge issues going forward with the KVM work.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+337 -131
+12
arch/tile/Kconfig
··· 96 96 97 97 config TILE 98 98 def_bool y 99 + select HAVE_KVM if !TILEGX 99 100 select GENERIC_FIND_FIRST_BIT 100 101 select GENERIC_FIND_NEXT_BIT 101 102 select USE_GENERIC_SMP_HELPERS ··· 315 314 bool "Hardwall support to allow access to user dynamic network" 316 315 default y 317 316 317 + config KERNEL_PL 318 + int "Processor protection level for kernel" 319 + range 1 2 320 + default "1" 321 + ---help--- 322 + This setting determines the processor protection level the 323 + kernel will be built to run at. Generally you should use 324 + the default value here. 325 + 318 326 endmenu # Tilera-specific configuration 319 327 320 328 menu "Bus options" ··· 364 354 source "crypto/Kconfig" 365 355 366 356 source "lib/Kconfig" 357 + 358 + source "arch/tile/kvm/Kconfig"
+2
arch/tile/Makefile
··· 53 53 # See arch/tile/Kbuild for content of core part of the kernel 54 54 core-y += arch/tile/ 55 55 56 + core-$(CONFIG_KVM) += arch/tile/kvm/ 57 + 56 58 ifdef TILERA_ROOT 57 59 INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot 58 60 endif
+85
arch/tile/include/arch/spr_def.h
··· 12 12 * more details. 13 13 */ 14 14 15 + /* 16 + * In addition to including the proper base SPR definition file, depending 17 + * on machine architecture, this file defines several macros which allow 18 + * kernel code to use protection-level dependent SPRs without worrying 19 + * about which PL it's running at. In these macros, the PL that the SPR 20 + * or interrupt number applies to is replaced by K. 21 + */ 22 + 23 + #if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2 24 + #error CONFIG_KERNEL_PL must be 1 or 2 25 + #endif 26 + 27 + /* Concatenate 4 strings. */ 28 + #define __concat4(a, b, c, d) a ## b ## c ## d 29 + #define _concat4(a, b, c, d) __concat4(a, b, c, d) 30 + 15 31 #ifdef __tilegx__ 16 32 #include <arch/spr_def_64.h> 33 + 34 + /* TILE-Gx dependent, protection-level dependent SPRs. */ 35 + 36 + #define SPR_INTERRUPT_MASK_K \ 37 + _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,) 38 + #define SPR_INTERRUPT_MASK_SET_K \ 39 + _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,) 40 + #define SPR_INTERRUPT_MASK_RESET_K \ 41 + _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,) 42 + #define SPR_INTERRUPT_VECTOR_BASE_K \ 43 + _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,) 44 + 45 + #define SPR_IPI_MASK_K \ 46 + _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,) 47 + #define SPR_IPI_MASK_RESET_K \ 48 + _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,) 49 + #define SPR_IPI_MASK_SET_K \ 50 + _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) 51 + #define SPR_IPI_EVENT_K \ 52 + _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) 53 + #define SPR_IPI_EVENT_RESET_K \ 54 + _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) 55 + #define SPR_IPI_MASK_SET_K \ 56 + _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) 57 + #define INT_IPI_K \ 58 + _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) 59 + 60 + #define SPR_SINGLE_STEP_CONTROL_K \ 61 + _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,) 62 + #define SPR_SINGLE_STEP_EN_K_K \ 63 + _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL) 64 + #define INT_SINGLE_STEP_K \ 65 + _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,) 66 + 17 67 #else 18 68 #include <arch/spr_def_32.h> 69 + 70 + /* TILEPro dependent, protection-level dependent SPRs. */ 71 + 72 + #define SPR_INTERRUPT_MASK_K_0 \ 73 + _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,) 74 + #define SPR_INTERRUPT_MASK_K_1 \ 75 + _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,) 76 + #define SPR_INTERRUPT_MASK_SET_K_0 \ 77 + _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,) 78 + #define SPR_INTERRUPT_MASK_SET_K_1 \ 79 + _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,) 80 + #define SPR_INTERRUPT_MASK_RESET_K_0 \ 81 + _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,) 82 + #define SPR_INTERRUPT_MASK_RESET_K_1 \ 83 + _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,) 84 + 19 85 #endif 86 + 87 + /* Generic protection-level dependent SPRs. */ 88 + 89 + #define SPR_SYSTEM_SAVE_K_0 \ 90 + _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,) 91 + #define SPR_SYSTEM_SAVE_K_1 \ 92 + _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,) 93 + #define SPR_SYSTEM_SAVE_K_2 \ 94 + _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,) 95 + #define SPR_SYSTEM_SAVE_K_3 \ 96 + _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,) 97 + #define SPR_EX_CONTEXT_K_0 \ 98 + _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,) 99 + #define SPR_EX_CONTEXT_K_1 \ 100 + _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,) 101 + #define SPR_INTCTRL_K_STATUS \ 102 + _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,) 103 + #define INT_INTCTRL_K \ 104 + _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
+39
arch/tile/include/arch/spr_def_32.h
··· 56 56 #define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 57 57 #define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 58 58 #define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 59 + #define SPR_EX_CONTEXT_2_0 0x4605 60 + #define SPR_EX_CONTEXT_2_1 0x4606 61 + #define SPR_EX_CONTEXT_2_1__PL_SHIFT 0 62 + #define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3 63 + #define SPR_EX_CONTEXT_2_1__PL_MASK 0x3 64 + #define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2 65 + #define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 66 + #define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 59 67 #define SPR_FAIL 0x4e09 60 68 #define SPR_INTCTRL_0_STATUS 0x4a07 61 69 #define SPR_INTCTRL_1_STATUS 0x4807 70 + #define SPR_INTCTRL_2_STATUS 0x4607 62 71 #define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a 63 72 #define SPR_INTERRUPT_MASK_0_0 0x4a08 64 73 #define SPR_INTERRUPT_MASK_0_1 0x4a09 65 74 #define SPR_INTERRUPT_MASK_1_0 0x4809 66 75 #define SPR_INTERRUPT_MASK_1_1 0x480a 76 + #define SPR_INTERRUPT_MASK_2_0 0x4608 77 + #define SPR_INTERRUPT_MASK_2_1 0x4609 67 78 #define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a 68 79 #define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b 69 80 #define SPR_INTERRUPT_MASK_RESET_1_0 0x480b 70 81 #define SPR_INTERRUPT_MASK_RESET_1_1 0x480c 82 + #define SPR_INTERRUPT_MASK_RESET_2_0 0x460a 83 + #define SPR_INTERRUPT_MASK_RESET_2_1 0x460b 71 84 #define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c 72 85 #define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d 73 86 #define SPR_INTERRUPT_MASK_SET_1_0 0x480d 74 87 #define SPR_INTERRUPT_MASK_SET_1_1 0x480e 88 + #define SPR_INTERRUPT_MASK_SET_2_0 0x460c 89 + #define SPR_INTERRUPT_MASK_SET_2_1 0x460d 75 90 #define SPR_MPL_DMA_CPL_SET_0 0x5800 76 91 #define SPR_MPL_DMA_CPL_SET_1 0x5801 92 + #define SPR_MPL_DMA_CPL_SET_2 0x5802 77 93 #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 78 94 #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 95 + #define SPR_MPL_DMA_NOTIFY_SET_2 0x3802 79 96 #define SPR_MPL_INTCTRL_0_SET_0 0x4a00 80 97 #define SPR_MPL_INTCTRL_0_SET_1 0x4a01 98 + #define SPR_MPL_INTCTRL_0_SET_2 0x4a02 81 99 #define SPR_MPL_INTCTRL_1_SET_0 0x4800 82 100 #define SPR_MPL_INTCTRL_1_SET_1 0x4801 101 + #define SPR_MPL_INTCTRL_1_SET_2 0x4802 102 + #define SPR_MPL_INTCTRL_2_SET_0 0x4600 103 + #define SPR_MPL_INTCTRL_2_SET_1 0x4601 104 + #define SPR_MPL_INTCTRL_2_SET_2 0x4602 83 105 #define SPR_MPL_SN_ACCESS_SET_0 0x0800 84 106 #define SPR_MPL_SN_ACCESS_SET_1 0x0801 107 + #define SPR_MPL_SN_ACCESS_SET_2 0x0802 85 108 #define SPR_MPL_SN_CPL_SET_0 0x5a00 86 109 #define SPR_MPL_SN_CPL_SET_1 0x5a01 110 + #define SPR_MPL_SN_CPL_SET_2 0x5a02 87 111 #define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 88 112 #define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 113 + #define SPR_MPL_SN_FIREWALL_SET_2 0x2c02 89 114 #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 90 115 #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 116 + #define SPR_MPL_SN_NOTIFY_SET_2 0x2a02 91 117 #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 92 118 #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 119 + #define SPR_MPL_UDN_ACCESS_SET_2 0x0c02 93 120 #define SPR_MPL_UDN_AVAIL_SET_0 0x4000 94 121 #define SPR_MPL_UDN_AVAIL_SET_1 0x4001 122 + #define SPR_MPL_UDN_AVAIL_SET_2 0x4002 95 123 #define SPR_MPL_UDN_CA_SET_0 0x3c00 96 124 #define SPR_MPL_UDN_CA_SET_1 0x3c01 125 + #define SPR_MPL_UDN_CA_SET_2 0x3c02 97 126 #define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 98 127 #define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 128 + #define SPR_MPL_UDN_COMPLETE_SET_2 0x1402 99 129 #define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 100 130 #define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 131 + #define SPR_MPL_UDN_FIREWALL_SET_2 0x3002 101 132 #define SPR_MPL_UDN_REFILL_SET_0 0x1000 102 133 #define SPR_MPL_UDN_REFILL_SET_1 0x1001 134 + #define SPR_MPL_UDN_REFILL_SET_2 0x1002 103 135 #define SPR_MPL_UDN_TIMER_SET_0 0x3600 104 136 #define SPR_MPL_UDN_TIMER_SET_1 0x3601 137 + #define SPR_MPL_UDN_TIMER_SET_2 0x3602 105 138 #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 106 139 #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 140 + #define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02 107 141 #define SPR_PASS 0x4e0b 108 142 #define SPR_PERF_COUNT_0 0x4205 109 143 #define SPR_PERF_COUNT_1 0x4206 110 144 #define SPR_PERF_COUNT_CTL 0x4207 145 + #define SPR_PERF_COUNT_DN_CTL 0x4210 111 146 #define SPR_PERF_COUNT_STS 0x4208 112 147 #define SPR_PROC_STATUS 0x4f00 113 148 #define SPR_SIM_CONTROL 0x4e0c ··· 159 124 #define SPR_SYSTEM_SAVE_1_1 0x4901 160 125 #define SPR_SYSTEM_SAVE_1_2 0x4902 161 126 #define SPR_SYSTEM_SAVE_1_3 0x4903 127 + #define SPR_SYSTEM_SAVE_2_0 0x4700 128 + #define SPR_SYSTEM_SAVE_2_1 0x4701 129 + #define SPR_SYSTEM_SAVE_2_2 0x4702 130 + #define SPR_SYSTEM_SAVE_2_3 0x4703 162 131 #define SPR_TILE_COORD 0x4c17 163 132 #define SPR_TILE_RTF_HWM 0x4e10 164 133 #define SPR_TILE_TIMER_CONTROL 0x3205
+32 -32
arch/tile/include/asm/irqflags.h
··· 47 47 int __n = (n); \ 48 48 int __mask = 1 << (__n & 0x1f); \ 49 49 if (__n < 32) \ 50 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ 50 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \ 51 51 else \ 52 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ 52 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \ 53 53 } while (0) 54 54 #define interrupt_mask_reset(n) do { \ 55 55 int __n = (n); \ 56 56 int __mask = 1 << (__n & 0x1f); \ 57 57 if (__n < 32) \ 58 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ 58 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \ 59 59 else \ 60 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ 60 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \ 61 61 } while (0) 62 62 #define interrupt_mask_check(n) ({ \ 63 63 int __n = (n); \ 64 64 (((__n < 32) ? \ 65 - __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ 66 - __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ 65 + __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \ 66 + __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \ 67 67 >> (__n & 0x1f)) & 1; \ 68 68 }) 69 69 #define interrupt_mask_set_mask(mask) do { \ 70 70 unsigned long long __m = (mask); \ 71 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ 72 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ 71 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \ 72 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \ 73 73 } while (0) 74 74 #define interrupt_mask_reset_mask(mask) do { \ 75 75 unsigned long long __m = (mask); \ 76 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ 77 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ 76 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ 77 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ 78 78 } while (0) 79 79 #else 80 80 #define interrupt_mask_set(n) \ 81 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) 81 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) 82 82 #define interrupt_mask_reset(n) \ 83 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) 83 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n))) 84 84 #define interrupt_mask_check(n) \ 85 - ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) 85 + ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1) 86 86 #define interrupt_mask_set_mask(mask) \ 87 - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) 87 + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) 88 88 #define interrupt_mask_reset_mask(mask) \ 89 - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) 89 + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) 90 90 #endif 91 91 92 92 /* 93 93 * The set of interrupts we want active if irqs are enabled. 94 94 * Note that in particular, the tile timer interrupt comes and goes 95 95 * from this set, since we have no other way to turn off the timer. 96 - * Likewise, INTCTRL_1 is removed and re-added during device 96 + * Likewise, INTCTRL_K is removed and re-added during device 97 97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt. 98 98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it 99 99 * is always claimed as an "active interrupt" so we can query that bit ··· 168 168 169 169 /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 170 170 #define IRQS_DISABLED(tmp) \ 171 - mfspr tmp, INTERRUPT_MASK_1; \ 171 + mfspr tmp, SPR_INTERRUPT_MASK_K; \ 172 172 andi tmp, tmp, 1 173 173 174 174 /* Load up a pointer to &interrupts_enabled_mask. */ 175 175 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 176 - moveli reg, hw2_last(interrupts_enabled_mask); \ 177 - shl16insli reg, reg, hw1(interrupts_enabled_mask); \ 178 - shl16insli reg, reg, hw0(interrupts_enabled_mask); \ 176 + moveli reg, hw2_last(interrupts_enabled_mask); \ 177 + shl16insli reg, reg, hw1(interrupts_enabled_mask); \ 178 + shl16insli reg, reg, hw0(interrupts_enabled_mask); \ 179 179 add reg, reg, tp 180 180 181 181 /* Disable interrupts. */ ··· 183 183 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ 184 184 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ 185 185 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ 186 - mtspr INTERRUPT_MASK_SET_1, tmp0 186 + mtspr SPR_INTERRUPT_MASK_SET_K, tmp0 187 187 188 188 /* Disable ALL synchronous interrupts (used by NMI entry). */ 189 189 #define IRQ_DISABLE_ALL(tmp) \ 190 190 movei tmp, -1; \ 191 - mtspr INTERRUPT_MASK_SET_1, tmp 191 + mtspr SPR_INTERRUPT_MASK_SET_K, tmp 192 192 193 193 /* Enable interrupts. */ 194 194 #define IRQ_ENABLE(tmp0, tmp1) \ 195 195 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 196 196 ld tmp0, tmp0; \ 197 - mtspr INTERRUPT_MASK_RESET_1, tmp0 197 + mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 198 198 199 199 #else /* !__tilegx__ */ 200 200 ··· 208 208 * (making the original code's write of the "high" mask word idempotent). 209 209 */ 210 210 #define IRQS_DISABLED(tmp) \ 211 - mfspr tmp, INTERRUPT_MASK_1_0; \ 211 + mfspr tmp, SPR_INTERRUPT_MASK_K_0; \ 212 212 shri tmp, tmp, INT_MEM_ERROR; \ 213 213 andi tmp, tmp, 1 214 214 215 215 /* Load up a pointer to &interrupts_enabled_mask. */ 216 216 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 217 - moveli reg, lo16(interrupts_enabled_mask); \ 218 - auli reg, reg, ha16(interrupts_enabled_mask);\ 217 + moveli reg, lo16(interrupts_enabled_mask); \ 218 + auli reg, reg, ha16(interrupts_enabled_mask); \ 219 219 add reg, reg, tp 220 220 221 221 /* Disable interrupts. */ ··· 225 225 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ 226 226 }; \ 227 227 { \ 228 - mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ 228 + mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ 229 229 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ 230 230 }; \ 231 - mtspr INTERRUPT_MASK_SET_1_1, tmp1 231 + mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 232 232 233 233 /* Disable ALL synchronous interrupts (used by NMI entry). */ 234 234 #define IRQ_DISABLE_ALL(tmp) \ 235 235 movei tmp, -1; \ 236 - mtspr INTERRUPT_MASK_SET_1_0, tmp; \ 237 - mtspr INTERRUPT_MASK_SET_1_1, tmp 236 + mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \ 237 + mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp 238 238 239 239 /* Enable interrupts. */ 240 240 #define IRQ_ENABLE(tmp0, tmp1) \ ··· 244 244 addi tmp1, tmp0, 4 \ 245 245 }; \ 246 246 lw tmp1, tmp1; \ 247 - mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ 248 - mtspr INTERRUPT_MASK_RESET_1_1, tmp1 247 + mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ 248 + mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 249 249 #endif 250 250 251 251 /*
+16 -11
arch/tile/include/asm/page.h
··· 199 199 * If you want more physical memory than this then see the CONFIG_HIGHMEM 200 200 * option in the kernel configuration. 201 201 * 202 - * The top two 16MB chunks in the table below (VIRT and HV) are 203 - * unavailable to Linux. Since the kernel interrupt vectors must live 204 - * at 0xfd000000, we map all of the bottom of RAM at this address with 205 - * a huge page table entry to minimize its ITLB footprint (as well as 206 - * at PAGE_OFFSET). The last architected requirement is that user 207 - * interrupt vectors live at 0xfc000000, so we make that range of 208 - * memory available to user processes. The remaining regions are sized 209 - * as shown; after the first four addresses, we show "typical" values, 210 - * since the actual addresses depend on kernel #defines. 202 + * The top 16MB chunk in the table below is unavailable to Linux. Since 203 + * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000 204 + * (depending on whether the kernel is at PL2 or Pl1), we map all of the 205 + * bottom of RAM at this address with a huge page table entry to minimize 206 + * its ITLB footprint (as well as at PAGE_OFFSET). The last architected 207 + * requirement is that user interrupt vectors live at 0xfc000000, so we 208 + * make that range of memory available to user processes. The remaining 209 + * regions are sized as shown; the first four addresses use the PL 1 210 + * values, and after that, we show "typical" values, since the actual 211 + * addresses depend on kernel #defines. 211 212 * 212 - * MEM_VIRT_INTRPT 0xff000000 213 213 * MEM_HV_INTRPT 0xfe000000 214 214 * MEM_SV_INTRPT (kernel code) 0xfd000000 215 215 * MEM_USER_INTRPT (user vector) 0xfc000000 ··· 221 221 */ 222 222 223 223 #define MEM_USER_INTRPT _AC(0xfc000000, UL) 224 + #if CONFIG_KERNEL_PL == 1 224 225 #define MEM_SV_INTRPT _AC(0xfd000000, UL) 225 226 #define MEM_HV_INTRPT _AC(0xfe000000, UL) 226 - #define MEM_VIRT_INTRPT _AC(0xff000000, UL) 227 + #else 228 + #define MEM_GUEST_INTRPT _AC(0xfd000000, UL) 229 + #define MEM_SV_INTRPT _AC(0xfe000000, UL) 230 + #define MEM_HV_INTRPT _AC(0xff000000, UL) 231 + #endif 227 232 228 233 #define INTRPT_SIZE 0x4000 229 234
+7 -4
arch/tile/include/asm/processor.h
··· 328 328 * Note that assembly code assumes that USER_PL is zero. 329 329 */ 330 330 #define USER_PL 0 331 - #define KERNEL_PL 1 331 + #if CONFIG_KERNEL_PL == 2 332 + #define GUEST_PL 1 333 + #endif 334 + #define KERNEL_PL CONFIG_KERNEL_PL 332 335 333 - /* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ 336 + /* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */ 334 337 #define CPU_LOG_MASK_VALUE 12 335 338 #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) 336 339 #if CONFIG_NR_CPUS > CPU_MASK_VALUE 337 340 # error Too many cpus! 338 341 #endif 339 342 #define raw_smp_processor_id() \ 340 - ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) 343 + ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE) 341 344 #define get_current_ksp0() \ 342 - (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) 345 + (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE) 343 346 #define next_current_ksp0(task) ({ \ 344 347 unsigned long __ksp0 = task_ksp0(task); \ 345 348 int __cpu = raw_smp_processor_id(); \
+2 -2
arch/tile/include/asm/ptrace.h
··· 62 62 pt_reg_t lr; /* aliases regs[TREG_LR] */ 63 63 64 64 /* Saved special registers. */ 65 - pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ 66 - pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ 65 + pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */ 66 + pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */ 67 67 pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ 68 68 pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ 69 69 pt_reg_t flags; /* flags (see below) */
+1 -1
arch/tile/include/asm/system.h
··· 164 164 /* Helper function for _switch_to(). */ 165 165 extern struct task_struct *__switch_to(struct task_struct *prev, 166 166 struct task_struct *next, 167 - unsigned long new_system_save_1_0); 167 + unsigned long new_system_save_k_0); 168 168 169 169 /* Address that switched-away from tasks are at. */ 170 170 extern unsigned long get_switch_to_pc(void);
+14 -14
arch/tile/include/hv/hypervisor.h
··· 1003 1003 * when these occur in a client's interrupt critical section, they must 1004 1004 * be delivered through the downcall mechanism. 1005 1005 * 1006 - * A downcall is initially delivered to the client as an INTCTRL_1 1007 - * interrupt. Upon entry to the INTCTRL_1 vector, the client must 1008 - * immediately invoke the hv_downcall_dispatch service. This service 1009 - * will not return; instead it will cause one of the client's actual 1010 - * downcall-handling interrupt vectors to be entered. The EX_CONTEXT 1011 - * registers in the client will be set so that when the client irets, 1012 - * it will return to the code which was interrupted by the INTCTRL_1 1013 - * interrupt. 1006 + * A downcall is initially delivered to the client as an INTCTRL_CL 1007 + * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL 1008 + * vector, the client must immediately invoke the hv_downcall_dispatch 1009 + * service. This service will not return; instead it will cause one of 1010 + * the client's actual downcall-handling interrupt vectors to be entered. 1011 + * The EX_CONTEXT registers in the client will be set so that when the 1012 + * client irets, it will return to the code which was interrupted by the 1013 + * INTCTRL_CL interrupt. 1014 1014 * 1015 - * Under some circumstances, the firing of INTCTRL_1 can race with 1015 + * Under some circumstances, the firing of INTCTRL_CL can race with 1016 1016 * the lowering of a device interrupt. In such a case, the 1017 1017 * hv_downcall_dispatch service may issue an iret instruction instead 1018 1018 * of entering one of the client's actual downcall-handling interrupt 1019 1019 * vectors. This will return execution to the location that was 1020 - * interrupted by INTCTRL_1. 1020 + * interrupted by INTCTRL_CL. 1021 1021 * 1022 1022 * Any saving of registers should be done by the actual handling 1023 - * vectors; no registers should be changed by the INTCTRL_1 handler. 1023 + * vectors; no registers should be changed by the INTCTRL_CL handler. 1024 1024 * In particular, the client should not use a jal instruction to invoke 1025 1025 * the hv_downcall_dispatch service, as that would overwrite the client's 1026 1026 * lr register. Note that the hv_downcall_dispatch service may overwrite 1027 1027 * one or more of the client's system save registers. 1028 1028 * 1029 - * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor 1029 + * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor 1030 1030 * will set this register to cause a downcall to happen, and will clear 1031 1031 * it when no further downcalls are pending. 1032 1032 * 1033 - * When a downcall vector is entered, the INTCTRL_1 interrupt will be 1033 + * When a downcall vector is entered, the INTCTRL_CL interrupt will be 1034 1034 * masked. When the client is done processing a downcall, and is ready 1035 1035 * to accept another, it must unmask this interrupt; if more downcalls 1036 - * are pending, this will cause the INTCTRL_1 vector to be reentered. 1036 + * are pending, this will cause the INTCTRL_CL vector to be reentered. 1037 1037 * Currently the following interrupt vectors can be entered through a 1038 1038 * downcall: 1039 1039 *
+7 -5
arch/tile/kernel/entry.S
··· 15 15 #include <linux/linkage.h> 16 16 #include <linux/unistd.h> 17 17 #include <asm/irqflags.h> 18 + #include <asm/processor.h> 18 19 #include <arch/abi.h> 20 + #include <arch/spr_def.h> 19 21 20 22 #ifdef __tilegx__ 21 23 #define bnzt bnezt ··· 82 80 STD_ENTRY(cpu_idle_on_new_stack) 83 81 { 84 82 move sp, r1 85 - mtspr SYSTEM_SAVE_1_0, r2 83 + mtspr SPR_SYSTEM_SAVE_K_0, r2 86 84 } 87 85 jal free_thread_info 88 86 j cpu_idle ··· 104 102 STD_ENTRY(_cpu_idle) 105 103 { 106 104 lnk r0 107 - movei r1, 1 105 + movei r1, KERNEL_PL 108 106 } 109 107 { 110 108 addli r0, r0, _cpu_idle_nap - . 111 109 mtspr INTERRUPT_CRITICAL_SECTION, r1 112 110 } 113 - IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 114 - mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ 115 - mtspr EX_CONTEXT_1_0, r0 111 + IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 112 + mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */ 113 + mtspr SPR_EX_CONTEXT_K_0, r0 116 114 iret 117 115 .global _cpu_idle_nap 118 116 _cpu_idle_nap:
+3 -2
arch/tile/kernel/head_32.S
··· 23 23 #include <asm/asm-offsets.h> 24 24 #include <hv/hypervisor.h> 25 25 #include <arch/chip.h> 26 + #include <arch/spr_def.h> 26 27 27 28 /* 28 29 * This module contains the entry code for kernel images. It performs the ··· 77 76 } 78 77 1: 79 78 80 - /* Get our processor number and save it away in SAVE_1_0. */ 79 + /* Get our processor number and save it away in SAVE_K_0. */ 81 80 jal hv_inquire_topology 82 81 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 83 82 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ ··· 125 124 lw r0, r0 126 125 lw sp, r1 127 126 or r4, sp, r4 128 - mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ 127 + mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ 129 128 addi sp, sp, -STACK_TOP_DELTA 130 129 { 131 130 move lr, zero /* stop backtraces in the called function */
+36 -31
arch/tile/kernel/intvec_32.S
··· 32 32 # error "No support for kernel preemption currently" 33 33 #endif 34 34 35 - #if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 36 - # error INT_INTCTRL_1 coded to set high interrupt mask 35 + #if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48 36 + # error INT_INTCTRL_K coded to set high interrupt mask 37 37 #endif 38 38 39 39 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) ··· 132 132 133 133 /* Temporarily save a register so we have somewhere to work. */ 134 134 135 - mtspr SYSTEM_SAVE_1_1, r0 136 - mfspr r0, EX_CONTEXT_1_1 135 + mtspr SPR_SYSTEM_SAVE_K_1, r0 136 + mfspr r0, SPR_EX_CONTEXT_K_1 137 137 138 138 /* The cmpxchg code clears sp to force us to reset it here on fault. */ 139 139 { ··· 167 167 * The page_fault handler may be downcalled directly by the 168 168 * hypervisor even when Linux is running and has ICS set. 169 169 * 170 - * In this case the contents of EX_CONTEXT_1_1 reflect the 170 + * In this case the contents of EX_CONTEXT_K_1 reflect the 171 171 * previous fault and can't be relied on to choose whether or 172 172 * not to reinitialize the stack pointer. So we add a test 173 - * to see whether SYSTEM_SAVE_1_2 has the high bit set, 173 + * to see whether SYSTEM_SAVE_K_2 has the high bit set, 174 174 * and if so we don't reinitialize sp, since we must be coming 175 175 * from Linux. (In fact the precise case is !(val & ~1), 176 176 * but any Linux PC has to have the high bit set.) 177 177 * 178 - * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for 178 + * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for 179 179 * any path that turns into a downcall to one of our TLB handlers. 180 180 */ 181 - mfspr r0, SYSTEM_SAVE_1_2 181 + mfspr r0, SPR_SYSTEM_SAVE_K_2 182 182 { 183 183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ 184 184 move r0, sp ··· 187 187 188 188 2: 189 189 /* 190 - * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and 190 + * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and 191 191 * the current stack top in the higher bits. So we recover 192 192 * our stack top by just masking off the low bits, then 193 193 * point sp at the top aligned address on the actual stack page. 194 194 */ 195 - mfspr r0, SYSTEM_SAVE_1_0 195 + mfspr r0, SPR_SYSTEM_SAVE_K_0 196 196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31 197 197 198 198 0: ··· 254 254 sw sp, r3 255 255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) 256 256 } 257 - mfspr r0, EX_CONTEXT_1_0 257 + mfspr r0, SPR_EX_CONTEXT_K_0 258 258 .ifc \processing,handle_syscall 259 259 /* 260 260 * Bump the saved PC by one bundle so that when we return, we won't ··· 267 267 sw sp, r0 268 268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 269 269 } 270 - mfspr r0, EX_CONTEXT_1_1 270 + mfspr r0, SPR_EX_CONTEXT_K_1 271 271 { 272 272 sw sp, r0 273 273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 ··· 289 289 .endif 290 290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM 291 291 } 292 - mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ 292 + mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */ 293 293 { 294 294 sw sp, r0 295 295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 ··· 309 309 * See discussion below at "finish_interrupt_save". 310 310 */ 311 311 .ifc \c_routine, do_page_fault 312 - mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ 313 - mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ 312 + mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */ 313 + mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */ 314 314 .else 315 315 .ifc \vecnum, INT_DOUBLE_FAULT 316 316 { 317 - mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ 317 + mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */ 318 318 movei r3, 0 319 319 } 320 320 .else ··· 467 467 /* Load tp with our per-cpu offset. */ 468 468 #ifdef CONFIG_SMP 469 469 { 470 - mfspr r20, SYSTEM_SAVE_1_0 470 + mfspr r20, SPR_SYSTEM_SAVE_K_0 471 471 moveli r21, lo16(__per_cpu_offset) 472 472 } 473 473 { ··· 487 487 * We load flags in r32 here so we can jump to .Lrestore_regs 488 488 * directly after do_page_fault_ics() if necessary. 489 489 */ 490 - mfspr r32, EX_CONTEXT_1_1 490 + mfspr r32, SPR_EX_CONTEXT_K_1 491 491 { 492 492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 493 493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) ··· 957 957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 958 958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 959 959 { 960 - mtspr EX_CONTEXT_1_0, r21 960 + mtspr SPR_EX_CONTEXT_K_0, r21 961 961 move r5, zero 962 962 } 963 963 { 964 - mtspr EX_CONTEXT_1_1, lr 964 + mtspr SPR_EX_CONTEXT_K_1, lr 965 965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 966 966 } 967 967 ··· 1199 1199 STD_ENDPROC(interrupt_return) 1200 1200 1201 1201 /* 1202 - * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit 1202 + * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit 1203 1203 * before returning, so we can properly get more downcalls. 1204 1204 */ 1205 1205 .pushsection .text.handle_interrupt_downcall,"ax" ··· 1208 1208 check_single_stepping normal, .Ldispatch_downcall 1209 1209 .Ldispatch_downcall: 1210 1210 1211 - /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ 1211 + /* Clear INTCTRL_K from the set of interrupts we ever enable. */ 1212 1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30) 1213 1213 { 1214 1214 addi r30, r30, 4 1215 - movei r31, INT_MASK(INT_INTCTRL_1) 1215 + movei r31, INT_MASK(INT_INTCTRL_K) 1216 1216 } 1217 1217 { 1218 1218 lw r20, r30 ··· 1227 1227 } 1228 1228 FEEDBACK_REENTER(handle_interrupt_downcall) 1229 1229 1230 - /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ 1230 + /* Allow INTCTRL_K to be enabled next time we enable interrupts. */ 1231 1231 lw r20, r30 1232 1232 or r20, r20, r31 1233 1233 sw r30, r20 ··· 1509 1509 /* Various stub interrupt handlers and syscall handlers */ 1510 1510 1511 1511 STD_ENTRY_LOCAL(_kernel_double_fault) 1512 - mfspr r1, EX_CONTEXT_1_0 1512 + mfspr r1, SPR_EX_CONTEXT_K_0 1513 1513 move r2, lr 1514 1514 move r3, sp 1515 1515 move r4, r52 ··· 1518 1518 STD_ENDPROC(_kernel_double_fault) 1519 1519 1520 1520 STD_ENTRY_LOCAL(bad_intr) 1521 - mfspr r2, EX_CONTEXT_1_0 1521 + mfspr r2, SPR_EX_CONTEXT_K_0 1522 1522 panic "Unhandled interrupt %#x: PC %#lx" 1523 1523 STD_ENDPROC(bad_intr) 1524 1524 ··· 1560 1560 * a page fault which would assume the stack was valid, it does 1561 1561 * save/restore the stack pointer and zero it out to make sure it gets reset. 1562 1562 * Since we always keep interrupts disabled, the hypervisor won't 1563 - * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them 1563 + * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them 1564 1564 * (other than to advance the PC on return). 1565 1565 * 1566 1566 * We have to manually validate the user vs kernel address range ··· 1766 1766 /* Do slow mtspr here so the following "mf" waits less. */ 1767 1767 { 1768 1768 move sp, r27 1769 - mtspr EX_CONTEXT_1_0, r28 1769 + mtspr SPR_EX_CONTEXT_K_0, r28 1770 1770 } 1771 1771 mf 1772 1772 ··· 1785 1785 } 1786 1786 { 1787 1787 move sp, r27 1788 - mtspr EX_CONTEXT_1_0, r28 1788 + mtspr SPR_EX_CONTEXT_K_0, r28 1789 1789 } 1790 1790 iret 1791 1791 ··· 1813 1813 #endif 1814 1814 1815 1815 /* Issue the slow SPR here while the tns result is in flight. */ 1816 - mfspr r28, EX_CONTEXT_1_0 1816 + mfspr r28, SPR_EX_CONTEXT_K_0 1817 1817 1818 1818 { 1819 1819 addi r28, r28, 8 /* return to the instruction after the swint1 */ ··· 1901 1901 .Lcmpxchg64_mismatch: 1902 1902 { 1903 1903 move sp, r27 1904 - mtspr EX_CONTEXT_1_0, r28 1904 + mtspr SPR_EX_CONTEXT_K_0, r28 1905 1905 } 1906 1906 mf 1907 1907 { ··· 1982 1982 int_hand INT_PERF_COUNT, PERF_COUNT, \ 1983 1983 op_handle_perf_interrupt, handle_nmi 1984 1984 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr 1985 + #if CONFIG_KERNEL_PL == 2 1986 + dc_dispatch INT_INTCTRL_2, INTCTRL_2 1987 + int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr 1988 + #else 1985 1989 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr 1986 1990 dc_dispatch INT_INTCTRL_1, INTCTRL_1 1991 + #endif 1987 1992 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr 1988 1993 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ 1989 1994 hv_message_intr, handle_interrupt_downcall
+8 -8
arch/tile/kernel/irq.c
··· 61 61 62 62 #if CHIP_HAS_IPI() 63 63 /* Use SPRs to manipulate device interrupts. */ 64 - #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 65 - #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 66 - #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 64 + #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) 65 + #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask) 66 + #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask) 67 67 #else 68 68 /* Use HV to manipulate device interrupts. */ 69 69 #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) ··· 89 89 * masked by a previous interrupt. Then, mask out the ones 90 90 * we're going to handle. 91 91 */ 92 - unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 93 - original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 94 - __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 92 + unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); 93 + original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; 94 + __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); 95 95 #else 96 96 /* 97 97 * Hypervisor performs the equivalent of the Gx code above and 98 98 * then puts the pending interrupt mask into a system save reg 99 99 * for us to find. 100 100 */ 101 - original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 101 + original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); 102 102 #endif 103 103 remaining_irqs = original_irqs; 104 104 ··· 225 225 /* Enable interrupt delivery. */ 226 226 unmask_irqs(~0UL); 227 227 #if CHIP_HAS_IPI() 228 - raw_local_irq_unmask(INT_IPI_1); 228 + raw_local_irq_unmask(INT_IPI_K); 229 229 #endif 230 230 } 231 231
+1 -1
arch/tile/kernel/messaging.c
··· 34 34 panic("hv_register_message_state: error %d", rc); 35 35 36 36 /* Make sure downcall interrupts will be enabled. */ 37 - raw_local_irq_unmask(INT_INTCTRL_1); 37 + raw_local_irq_unmask(INT_INTCTRL_K); 38 38 } 39 39 40 40 void hv_message_intr(struct pt_regs *regs, int intnum)
+11 -1
arch/tile/kernel/process.c
··· 305 305 /* Allow user processes to access the DMA SPRs */ 306 306 void grant_dma_mpls(void) 307 307 { 308 + #if CONFIG_KERNEL_PL == 2 309 + __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 310 + __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 311 + #else 308 312 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); 309 313 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); 314 + #endif 310 315 } 311 316 312 317 /* Forbid user processes from accessing the DMA SPRs */ 313 318 void restrict_dma_mpls(void) 314 319 { 320 + #if CONFIG_KERNEL_PL == 2 321 + __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1); 322 + __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1); 323 + #else 315 324 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 316 325 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 326 + #endif 317 327 } 318 328 319 329 /* Pause the DMA engine, then save off its state registers. */ ··· 534 524 * Switch kernel SP, PC, and callee-saved registers. 535 525 * In the context of the new task, return the old task pointer 536 526 * (i.e. the task that actually called __switch_to). 537 - * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. 527 + * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. 538 528 */ 539 529 return __switch_to(prev, next, next_current_ksp0(next)); 540 530 }
+1 -1
arch/tile/kernel/regs_32.S
··· 85 85 { 86 86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ 87 87 move sp, r13 88 - mtspr SYSTEM_SAVE_1_0, r2 88 + mtspr SPR_SYSTEM_SAVE_K_0, r2 89 89 } 90 90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) 91 91 .L__switch_to_pc:
+16 -12
arch/tile/kernel/setup.c
··· 187 187 188 188 #ifdef CONFIG_HIGHMEM 189 189 /* 190 - * Determine for each controller where its lowmem is mapped and how 191 - * much of it is mapped there. On controller zero, the first few 192 - * megabytes are mapped at 0xfd000000 as code, so in principle we 193 - * could start our data mappings higher up, but for now we don't 194 - * bother, to avoid additional confusion. 190 + * Determine for each controller where its lowmem is mapped and how much of 191 + * it is mapped there. On controller zero, the first few megabytes are 192 + * already mapped in as code at MEM_SV_INTRPT, so in principle we could 193 + * start our data mappings higher up, but for now we don't bother, to avoid 194 + * additional confusion. 195 195 * 196 196 * One question is whether, on systems with more than 768 Mb and 197 197 * controllers of different sizes, to map in a proportionate amount of ··· 876 876 #if CHIP_HAS_SN_PROC() 877 877 raw_local_irq_unmask(INT_SNITLB_MISS); 878 878 #endif 879 + #ifdef __tilegx__ 880 + raw_local_irq_unmask(INT_SINGLE_STEP_K); 881 + #endif 879 882 880 883 /* 881 884 * Allow user access to many generic SPRs, like the cycle ··· 896 893 #endif 897 894 898 895 /* 899 - * Set the MPL for interrupt control 0 to user level. 900 - * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 901 - * as well as the PL 0 interrupt mask. 896 + * Set the MPL for interrupt control 0 & 1 to the corresponding 897 + * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT 898 + * SPRs, as well as the interrupt mask. 902 899 */ 903 900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 901 + __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1); 904 902 905 903 /* Initialize IRQ support for this cpu. */ 906 904 setup_irq_regs(); ··· 1037 1033 * In addition, make sure we CAN'T use the end of memory, since 1038 1034 * we use the last chunk of each pgd for the pgd_list. 1039 1035 */ 1040 - int i, fc_fd_ok = 0; 1036 + int i, user_kernel_ok = 0; 1041 1037 unsigned long max_va = 0; 1042 1038 unsigned long list_va = 1043 1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); ··· 1048 1044 break; 1049 1045 if (range.start <= MEM_USER_INTRPT && 1050 1046 range.start + range.size >= MEM_HV_INTRPT) 1051 - fc_fd_ok = 1; 1047 + user_kernel_ok = 1; 1052 1048 if (range.start == 0) 1053 1049 max_va = range.size; 1054 1050 BUG_ON(range.start + range.size > list_va); 1055 1051 } 1056 - if (!fc_fd_ok) 1057 - early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1052 + if (!user_kernel_ok) 1053 + early_panic("Hypervisor not configured for user/kernel VAs\n"); 1058 1054 if (max_va == 0) 1059 1055 early_panic("Hypervisor not configured for low VAs\n"); 1060 1056 if (max_va < KERNEL_HIGH_VADDR)
+1 -1
arch/tile/kernel/smp.c
··· 212 212 213 213 tile.x = cpu_x(cpu); 214 214 tile.y = cpu_y(cpu); 215 - if (hv_get_ipi_pte(tile, 1, &pte) != 0) 215 + if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 216 216 panic("Failed to initialize IPI for cpu %d\n", cpu); 217 217 218 218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+1 -1
arch/tile/kernel/traps.c
··· 278 278 case INT_DOUBLE_FAULT: 279 279 /* 280 280 * For double fault, "reason" is actually passed as 281 - * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so 281 + * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so 282 282 * we can provide the original fault number rather than 283 283 * the uninteresting "INT_DOUBLE_FAULT" so the user can 284 284 * learn what actually struck while PL0 ICS was set.
+38
arch/tile/kvm/Kconfig
··· 1 + # 2 + # KVM configuration 3 + # 4 + 5 + source "virt/kvm/Kconfig" 6 + 7 + menuconfig VIRTUALIZATION 8 + bool "Virtualization" 9 + ---help--- 10 + Say Y here to get to see options for using your Linux host to run 11 + other operating systems inside virtual machines (guests). 12 + This option alone does not add any kernel code. 13 + 14 + If you say N, all options in this submenu will be skipped and 15 + disabled. 16 + 17 + if VIRTUALIZATION 18 + 19 + config KVM 20 + tristate "Kernel-based Virtual Machine (KVM) support" 21 + depends on HAVE_KVM && MODULES && EXPERIMENTAL 22 + select PREEMPT_NOTIFIERS 23 + select ANON_INODES 24 + ---help--- 25 + Support hosting paravirtualized guest machines. 26 + 27 + This module provides access to the hardware capabilities through 28 + a character device node named /dev/kvm. 29 + 30 + To compile this as a module, choose M here: the module 31 + will be called kvm. 32 + 33 + If unsure, say N. 34 + 35 + source drivers/vhost/Kconfig 36 + source drivers/virtio/Kconfig 37 + 38 + endif # VIRTUALIZATION
+3 -3
arch/tile/mm/fault.c
··· 563 563 /* 564 564 * When we take an ITLB or DTLB fault or access violation in the 565 565 * supervisor while the critical section bit is set, the hypervisor is 566 - * reluctant to write new values into the EX_CONTEXT_1_x registers, 566 + * reluctant to write new values into the EX_CONTEXT_K_x registers, 567 567 * since that might indicate we have not yet squirreled the SPR 568 568 * contents away and can thus safely take a recursive interrupt. 569 - * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 569 + * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2. 570 570 * 571 571 * Note that this routine is called before homecache_tlb_defer_enter(), 572 572 * which means that we can properly unlock any atomics that might ··· 610 610 * fault. We didn't set up a kernel stack on initial entry to 611 611 * sys_cmpxchg, but instead had one set up by the fault, which 612 612 * (because sys_cmpxchg never releases ICS) came to us via the 613 - * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 613 + * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are 614 614 * still referencing the original user code. We release the 615 615 * atomic lock and rewrite pt_regs so that it appears that we 616 616 * came from user-space directly, and after we finish the
+1 -1
arch/tile/mm/init.c
··· 1060 1060 1061 1061 /* 1062 1062 * Free the pages mapped from 0xc0000000 that correspond to code 1063 - * pages from 0xfd000000 that we won't use again after init. 1063 + * pages from MEM_SV_INTRPT that we won't use again after init. 1064 1064 */ 1065 1065 free_init_pages("unused kernel text", 1066 1066 (unsigned long)_sinittext - text_delta,