Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Hexagon: Add hypervisor interface

Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Richard Kuo and committed by
Linus Torvalds
e49ee290 c150290d

+1449
+281
arch/hexagon/include/asm/hexagon_vm.h
··· 1 + /* 2 + * Declarations for to Hexagon Virtal Machine. 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #ifndef ASM_HEXAGON_VM_H 22 + #define ASM_HEXAGON_VM_H 23 + 24 + /* 25 + * In principle, a Linux kernel for the VM could 26 + * selectively define the virtual instructions 27 + * as inline assembler macros, but for a first pass, 28 + * we'll use subroutines for both the VM and the native 29 + * kernels. It's costing a subroutine call/return, 30 + * but it makes for a single set of entry points 31 + * for tracing/debugging. 32 + */ 33 + 34 + /* 35 + * Lets make this stuff visible only if configured, 36 + * so we can unconditionally include the file. 37 + */ 38 + 39 + #ifndef __ASSEMBLY__ 40 + 41 + enum VM_CACHE_OPS { 42 + ickill, 43 + dckill, 44 + l2kill, 45 + dccleaninva, 46 + icinva, 47 + idsync, 48 + fetch_cfg 49 + }; 50 + 51 + enum VM_INT_OPS { 52 + nop, 53 + globen, 54 + globdis, 55 + locen, 56 + locdis, 57 + affinity, 58 + get, 59 + peek, 60 + status, 61 + post, 62 + clear 63 + }; 64 + 65 + extern void _K_VM_event_vector(void); 66 + 67 + void __vmrte(void); 68 + long __vmsetvec(void *); 69 + long __vmsetie(long); 70 + long __vmgetie(void); 71 + long __vmintop(enum VM_INT_OPS, long, long, long, long); 72 + long __vmclrmap(void *, unsigned long); 73 + long __vmnewmap(void *); 74 + long __vmcache(enum VM_CACHE_OPS op, unsigned long addr, unsigned long len); 75 + unsigned long long __vmgettime(void); 76 + long __vmsettime(unsigned long long); 77 + long __vmstart(void *, void *); 78 + void __vmstop(void); 79 + long __vmwait(void); 80 + void __vmyield(void); 81 + long __vmvpid(void); 82 + 83 + static inline long __vmcache_ickill(void) 84 + { 85 + return __vmcache(ickill, 0, 0); 86 + } 87 + 88 + static inline long __vmcache_dckill(void) 89 + { 90 + return __vmcache(dckill, 0, 0); 91 + } 92 + 93 + static inline long __vmcache_l2kill(void) 94 + { 95 + return __vmcache(l2kill, 0, 0); 96 + } 97 + 98 + static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len) 99 + { 100 + return __vmcache(dccleaninva, addr, len); 101 + } 102 + 103 + static inline long __vmcache_icinva(unsigned long addr, unsigned long len) 104 + { 105 + return __vmcache(icinva, addr, len); 106 + } 107 + 108 + static inline long __vmcache_idsync(unsigned long addr, 109 + unsigned long len) 110 + { 111 + return __vmcache(idsync, addr, len); 112 + } 113 + 114 + static inline long __vmcache_fetch_cfg(unsigned long val) 115 + { 116 + return __vmcache(fetch_cfg, val, 0); 117 + } 118 + 119 + /* interrupt operations */ 120 + 121 + static inline long __vmintop_nop(void) 122 + { 123 + return __vmintop(nop, 0, 0, 0, 0); 124 + } 125 + 126 + static inline long __vmintop_globen(long i) 127 + { 128 + return __vmintop(globen, i, 0, 0, 0); 129 + } 130 + 131 + static inline long __vmintop_globdis(long i) 132 + { 133 + return __vmintop(globdis, i, 0, 0, 0); 134 + } 135 + 136 + static inline long __vmintop_locen(long i) 137 + { 138 + return __vmintop(locen, i, 0, 0, 0); 139 + } 140 + 141 + static inline long __vmintop_locdis(long i) 142 + { 143 + return __vmintop(locdis, i, 0, 0, 0); 144 + } 145 + 146 + static inline long __vmintop_affinity(long i, long cpu) 147 + { 148 + return __vmintop(locdis, i, cpu, 0, 0); 149 + } 150 + 151 + static inline long __vmintop_get(void) 152 + { 153 + return __vmintop(get, 0, 0, 0, 0); 154 + } 155 + 156 + static inline long __vmintop_peek(void) 157 + { 158 + return __vmintop(peek, 0, 0, 0, 0); 159 + } 160 + 161 + static inline long __vmintop_status(long i) 162 + { 163 + return __vmintop(status, i, 0, 0, 0); 164 + } 165 + 166 + static inline long __vmintop_post(long i) 167 + { 168 + return __vmintop(post, i, 0, 0, 0); 169 + } 170 + 171 + static inline long __vmintop_clear(long i) 172 + { 173 + return __vmintop(clear, i, 0, 0, 0); 174 + } 175 + 176 + #else /* Only assembly code should reference these */ 177 + 178 + #define HVM_TRAP1_VMRTE 1 179 + #define HVM_TRAP1_VMSETVEC 2 180 + #define HVM_TRAP1_VMSETIE 3 181 + #define HVM_TRAP1_VMGETIE 4 182 + #define HVM_TRAP1_VMINTOP 5 183 + #define HVM_TRAP1_VMCLRMAP 10 184 + #define HVM_TRAP1_VMNEWMAP 11 185 + #define HVM_TRAP1_FORMERLY_VMWIRE 12 186 + #define HVM_TRAP1_VMCACHE 13 187 + #define HVM_TRAP1_VMGETTIME 14 188 + #define HVM_TRAP1_VMSETTIME 15 189 + #define HVM_TRAP1_VMWAIT 16 190 + #define HVM_TRAP1_VMYIELD 17 191 + #define HVM_TRAP1_VMSTART 18 192 + #define HVM_TRAP1_VMSTOP 19 193 + #define HVM_TRAP1_VMVPID 20 194 + #define HVM_TRAP1_VMSETREGS 21 195 + #define HVM_TRAP1_VMGETREGS 22 196 + 197 + #endif /* __ASSEMBLY__ */ 198 + 199 + /* 200 + * Constants for virtual instruction parameters and return values 201 + */ 202 + 203 + /* vmsetie arguments */ 204 + 205 + #define VM_INT_DISABLE 0 206 + #define VM_INT_ENABLE 1 207 + 208 + /* vmsetimask arguments */ 209 + 210 + #define VM_INT_UNMASK 0 211 + #define VM_INT_MASK 1 212 + 213 + #define VM_NEWMAP_TYPE_LINEAR 0 214 + #define VM_NEWMAP_TYPE_PGTABLES 1 215 + 216 + 217 + /* 218 + * Event Record definitions useful to both C and Assembler 219 + */ 220 + 221 + /* VMEST Layout */ 222 + 223 + #define HVM_VMEST_UM_SFT 31 224 + #define HVM_VMEST_UM_MSK 1 225 + #define HVM_VMEST_IE_SFT 30 226 + #define HVM_VMEST_IE_MSK 1 227 + #define HVM_VMEST_EVENTNUM_SFT 16 228 + #define HVM_VMEST_EVENTNUM_MSK 0xff 229 + #define HVM_VMEST_CAUSE_SFT 0 230 + #define HVM_VMEST_CAUSE_MSK 0xffff 231 + 232 + /* 233 + * The initial program gets to find a system environment descriptor 234 + * on its stack when it begins exection. The first word is a version 235 + * code to indicate what is there. Zero means nothing more. 236 + */ 237 + 238 + #define HEXAGON_VM_SED_NULL 0 239 + 240 + /* 241 + * Event numbers for vector binding 242 + */ 243 + 244 + #define HVM_EV_RESET 0 245 + #define HVM_EV_MACHCHECK 1 246 + #define HVM_EV_GENEX 2 247 + #define HVM_EV_TRAP 8 248 + #define HVM_EV_INTR 15 249 + /* These shoud be nuked as soon as we know the VM is up to spec v0.1.1 */ 250 + #define HVM_EV_INTR_0 16 251 + #define HVM_MAX_INTR 240 252 + 253 + /* 254 + * Cause values for General Exception 255 + */ 256 + 257 + #define HVM_GE_C_BUS 0x01 258 + #define HVM_GE_C_XPROT 0x11 259 + #define HVM_GE_C_XUSER 0x14 260 + #define HVM_GE_C_INVI 0x15 261 + #define HVM_GE_C_PRIVI 0x1B 262 + #define HVM_GE_C_XMAL 0x1C 263 + #define HVM_GE_C_RMAL 0x20 264 + #define HVM_GE_C_WMAL 0x21 265 + #define HVM_GE_C_RPROT 0x22 266 + #define HVM_GE_C_WPROT 0x23 267 + #define HVM_GE_C_RUSER 0x24 268 + #define HVM_GE_C_WUSER 0x25 269 + #define HVM_GE_C_CACHE 0x28 270 + 271 + /* 272 + * Cause codes for Machine Check 273 + */ 274 + 275 + #define HVM_MCHK_C_DOWN 0x00 276 + #define HVM_MCHK_C_BADSP 0x01 277 + #define HVM_MCHK_C_BADEX 0x02 278 + #define HVM_MCHK_C_BADPT 0x03 279 + #define HVM_MCHK_C_REGWR 0x29 280 + 281 + #endif
+111
arch/hexagon/include/asm/vm_mmu.h
··· 1 + /* 2 + * Hexagon VM page table entry definitions 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #ifndef _ASM_VM_MMU_H 22 + #define _ASM_VM_MMU_H 23 + 24 + /* 25 + * Shift, mask, and other constants for the Hexagon Virtual Machine 26 + * page tables. 27 + * 28 + * Virtual machine MMU allows first-level entries to either be 29 + * single-level lookup PTEs for very large pages, or PDEs pointing 30 + * to second-level PTEs for smaller pages. If PTE is single-level, 31 + * the least significant bits cannot be used as software bits to encode 32 + * virtual memory subsystem information about the page, and that state 33 + * must be maintained in some parallel data structure. 34 + */ 35 + 36 + /* S or Page Size field in PDE */ 37 + #define __HVM_PDE_S (0x7 << 0) 38 + #define __HVM_PDE_S_4KB 0 39 + #define __HVM_PDE_S_16KB 1 40 + #define __HVM_PDE_S_64KB 2 41 + #define __HVM_PDE_S_256KB 3 42 + #define __HVM_PDE_S_1MB 4 43 + #define __HVM_PDE_S_4MB 5 44 + #define __HVM_PDE_S_16MB 6 45 + #define __HVM_PDE_S_INVALID 7 46 + 47 + /* Masks for L2 page table pointer, as function of page size */ 48 + #define __HVM_PDE_PTMASK_4KB 0xfffff000 49 + #define __HVM_PDE_PTMASK_16KB 0xfffffc00 50 + #define __HVM_PDE_PTMASK_64KB 0xffffff00 51 + #define __HVM_PDE_PTMASK_256KB 0xffffffc0 52 + #define __HVM_PDE_PTMASK_1MB 0xfffffff0 53 + 54 + /* 55 + * Virtual Machine PTE Bits/Fields 56 + */ 57 + #define __HVM_PTE_T (1<<4) 58 + #define __HVM_PTE_U (1<<5) 59 + #define __HVM_PTE_C (0x7<<6) 60 + #define __HVM_PTE_CVAL(pte) (((pte) & __HVM_PTE_C) >> 6) 61 + #define __HVM_PTE_R (1<<9) 62 + #define __HVM_PTE_W (1<<10) 63 + #define __HVM_PTE_X (1<<11) 64 + 65 + /* 66 + * Cache Attributes, to be shifted as necessary for virtual/physical PTEs 67 + */ 68 + 69 + #define __HEXAGON_C_WB 0x0 /* Write-back, no L2 */ 70 + #define __HEXAGON_C_WT 0x1 /* Write-through, no L2 */ 71 + #define __HEXAGON_C_DEV 0x4 /* Device register space */ 72 + #define __HEXAGON_C_WT_L2 0x5 /* Write-through, with L2 */ 73 + /* this really should be #if CONFIG_HEXAGON_ARCH = 2 but that's not defined */ 74 + #if defined(CONFIG_HEXAGON_COMET) || defined(CONFIG_QDSP6_ST1) 75 + #define __HEXAGON_C_UNC __HEXAGON_C_DEV 76 + #else 77 + #define __HEXAGON_C_UNC 0x6 /* Uncached memory */ 78 + #endif 79 + #define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */ 80 + 81 + /* 82 + * This can be overriden, but we're defaulting to the most aggressive 83 + * cache policy, the better to find bugs sooner. 84 + */ 85 + 86 + #define CACHE_DEFAULT __HEXAGON_C_WB_L2 87 + 88 + /* Masks for physical page address, as a function of page size */ 89 + 90 + #define __HVM_PTE_PGMASK_4KB 0xfffff000 91 + #define __HVM_PTE_PGMASK_16KB 0xffffc000 92 + #define __HVM_PTE_PGMASK_64KB 0xffff0000 93 + #define __HVM_PTE_PGMASK_256KB 0xfffc0000 94 + #define __HVM_PTE_PGMASK_1MB 0xfff00000 95 + 96 + /* Masks for single-level large page lookups */ 97 + 98 + #define __HVM_PTE_PGMASK_4MB 0xffc00000 99 + #define __HVM_PTE_PGMASK_16MB 0xff000000 100 + 101 + /* 102 + * "Big kernel page mappings" (see vm_init_segtable.S) 103 + * are currently 16MB 104 + */ 105 + 106 + #define BIG_KERNEL_PAGE_SHIFT 24 107 + #define BIG_KERNEL_PAGE_SIZE (1 << BIG_KERNEL_PAGE_SHIFT) 108 + 109 + 110 + 111 + #endif /* _ASM_VM_MMU_H */
+269
arch/hexagon/kernel/vm_entry.S
··· 1 + /* 2 + * Event entry/exit for Hexagon 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <asm/asm-offsets.h> /* assembly-safer versions of C defines */ 22 + #include <asm/mem-layout.h> /* sigh, except for page_offset */ 23 + #include <asm/hexagon_vm.h> 24 + #include <asm/thread_info.h> 25 + 26 + /* 27 + * Entry into guest-mode Linux under Hexagon Virtual Machine. 28 + * Stack pointer points to event record - build pt_regs on top of it, 29 + * set up a plausible C stack frame, and dispatch to the C handler. 30 + * On return, do vmrte virtual instruction with SP where we started. 31 + * 32 + * VM Spec 0.5 uses a trap to fetch HVM record now. 33 + */ 34 + 35 + /* 36 + * Save full register state, while setting up thread_info struct 37 + * pointer derived from kernel stack pointer in THREADINFO_REG 38 + * register, putting prior thread_info.regs pointer in a callee-save 39 + * register (R24, which had better not ever be assigned to THREADINFO_REG), 40 + * and updating thread_info.regs to point to current stack frame, 41 + * so as to support nested events in kernel mode. 42 + * 43 + * As this is common code, we set the pt_regs system call number 44 + * to -1 for all events. It will be replaced with the system call 45 + * number in the case where we decode a system call (trap0(#1)). 46 + */ 47 + 48 + #define save_pt_regs()\ 49 + memd(R0 + #_PT_R3130) = R31:30; \ 50 + { memw(R0 + #_PT_R2928) = R28; \ 51 + R31 = memw(R0 + #_PT_ER_VMPSP); }\ 52 + { memw(R0 + #(_PT_R2928 + 4)) = R31; \ 53 + R31 = ugp; } \ 54 + { memd(R0 + #_PT_R2726) = R27:26; \ 55 + R30 = gp ; } \ 56 + memd(R0 + #_PT_R2524) = R25:24; \ 57 + memd(R0 + #_PT_R2322) = R23:22; \ 58 + memd(R0 + #_PT_R2120) = R21:20; \ 59 + memd(R0 + #_PT_R1918) = R19:18; \ 60 + memd(R0 + #_PT_R1716) = R17:16; \ 61 + memd(R0 + #_PT_R1514) = R15:14; \ 62 + memd(R0 + #_PT_R1312) = R13:12; \ 63 + { memd(R0 + #_PT_R1110) = R11:10; \ 64 + R15 = lc0; } \ 65 + { memd(R0 + #_PT_R0908) = R9:8; \ 66 + R14 = sa0; } \ 67 + { memd(R0 + #_PT_R0706) = R7:6; \ 68 + R13 = lc1; } \ 69 + { memd(R0 + #_PT_R0504) = R5:4; \ 70 + R12 = sa1; } \ 71 + { memd(R0 + #_PT_UGPGP) = R31:30; \ 72 + R11 = m1; \ 73 + R2.H = #HI(_THREAD_SIZE); } \ 74 + { memd(R0 + #_PT_LC0SA0) = R15:14; \ 75 + R10 = m0; \ 76 + R2.L = #LO(_THREAD_SIZE); } \ 77 + { memd(R0 + #_PT_LC1SA1) = R13:12; \ 78 + R15 = p3:0; \ 79 + R2 = neg(R2); } \ 80 + { memd(R0 + #_PT_M1M0) = R11:10; \ 81 + R14 = usr; \ 82 + R2 = and(R0,R2); } \ 83 + { memd(R0 + #_PT_PREDSUSR) = R15:14; \ 84 + THREADINFO_REG = R2; } \ 85 + { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ 86 + memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ 87 + R2 = #-1; } \ 88 + { memw(R0 + #_PT_SYSCALL_NR) = R2; \ 89 + R30 = #0; } 90 + 91 + /* 92 + * Restore registers and thread_info.regs state. THREADINFO_REG 93 + * is assumed to still be sane, and R24 to have been correctly 94 + * preserved. Don't restore R29 (SP) until later. 95 + */ 96 + 97 + #define restore_pt_regs() \ 98 + { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ 99 + R15:14 = memd(R0 + #_PT_PREDSUSR); } \ 100 + { R11:10 = memd(R0 + #_PT_M1M0); \ 101 + p3:0 = R15; } \ 102 + { R13:12 = memd(R0 + #_PT_LC1SA1); \ 103 + usr = R14; } \ 104 + { R15:14 = memd(R0 + #_PT_LC0SA0); \ 105 + m1 = R11; } \ 106 + { R3:2 = memd(R0 + #_PT_R0302); \ 107 + m0 = R10; } \ 108 + { R5:4 = memd(R0 + #_PT_R0504); \ 109 + lc1 = R13; } \ 110 + { R7:6 = memd(R0 + #_PT_R0706); \ 111 + sa1 = R12; } \ 112 + { R9:8 = memd(R0 + #_PT_R0908); \ 113 + lc0 = R15; } \ 114 + { R11:10 = memd(R0 + #_PT_R1110); \ 115 + sa0 = R14; } \ 116 + { R13:12 = memd(R0 + #_PT_R1312); \ 117 + R15:14 = memd(R0 + #_PT_R1514); } \ 118 + { R17:16 = memd(R0 + #_PT_R1716); \ 119 + R19:18 = memd(R0 + #_PT_R1918); } \ 120 + { R21:20 = memd(R0 + #_PT_R2120); \ 121 + R23:22 = memd(R0 + #_PT_R2322); } \ 122 + { R25:24 = memd(R0 + #_PT_R2524); \ 123 + R27:26 = memd(R0 + #_PT_R2726); } \ 124 + R31:30 = memd(R0 + #_PT_UGPGP); \ 125 + { R28 = memw(R0 + #_PT_R2928); \ 126 + ugp = R31; } \ 127 + { R31:30 = memd(R0 + #_PT_R3130); \ 128 + gp = R30; } 129 + 130 + /* 131 + * Clears off enough space for the rest of pt_regs; evrec is a part 132 + * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1. 133 + * R0 is the address of pt_regs and is the parameter to save_pt_regs. 134 + */ 135 + 136 + /* 137 + * Since the HVM isn't automagically pushing the EVREC onto the stack anymore, 138 + * we'll subract the entire size out and then fill it in ourselves. 139 + * Need to save off R0, R1, R2, R3 immediately. 140 + */ 141 + 142 + #define vm_event_entry(CHandler) \ 143 + { \ 144 + R29 = add(R29, #-(_PT_REGS_SIZE)); \ 145 + memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ 146 + } \ 147 + { \ 148 + memd(R29 +#_PT_R0302) = R3:2; \ 149 + } \ 150 + trap1(#HVM_TRAP1_VMGETREGS); \ 151 + { \ 152 + memd(R29 + #_PT_ER_VMEL) = R1:0; \ 153 + R0 = R29; \ 154 + R1.L = #LO(CHandler); \ 155 + } \ 156 + { \ 157 + memd(R29 + #_PT_ER_VMPSP) = R3:2; \ 158 + R1.H = #HI(CHandler); \ 159 + jump event_dispatch; \ 160 + } 161 + 162 + .text 163 + /* 164 + * Do bulk save/restore in one place. 165 + * Adds a jump to dispatch latency, but 166 + * saves hundreds of bytes. 167 + */ 168 + 169 + event_dispatch: 170 + save_pt_regs() 171 + callr r1 172 + 173 + /* 174 + * If we were in kernel mode, we don't need to check scheduler 175 + * or signals if CONFIG_PREEMPT is not set. If set, then it has 176 + * to jump to a need_resched kind of block. 177 + * BTW, CONFIG_PREEMPT is not supported yet. 178 + */ 179 + 180 + #ifdef CONFIG_PREEMPT 181 + R0 = #VM_INT_DISABLE 182 + trap1(#HVM_TRAP1_VMSETIE) 183 + #endif 184 + 185 + /* "Nested control path" -- if the previous mode was kernel */ 186 + R0 = memw(R29 + #_PT_ER_VMEST); 187 + P0 = tstbit(R0, #HVM_VMEST_UM_SFT); 188 + if !P0 jump restore_all; 189 + /* 190 + * Returning from system call, normally coming back from user mode 191 + */ 192 + return_from_syscall: 193 + /* Disable interrupts while checking TIF */ 194 + R0 = #VM_INT_DISABLE 195 + trap1(#HVM_TRAP1_VMSETIE) 196 + 197 + /* 198 + * Coming back from the C-world, our thread info pointer 199 + * should be in the designated register (usually R19) 200 + */ 201 + R1.L = #LO(_TIF_ALLWORK_MASK) 202 + { 203 + R1.H = #HI(_TIF_ALLWORK_MASK); 204 + R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); 205 + } 206 + 207 + /* 208 + * Compare against the "return to userspace" _TIF_WORK_MASK 209 + */ 210 + R1 = and(R1,R0); 211 + { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;} 212 + jump restore_all; /* we're outta here! */ 213 + 214 + work_pending: 215 + { 216 + P0 = tstbit(R1, #TIF_NEED_RESCHED); 217 + if (!P0.new) jump:nt work_notifysig; 218 + } 219 + call schedule 220 + jump return_from_syscall; /* check for more work */ 221 + 222 + work_notifysig: 223 + /* this is the part that's kind of fuzzy. */ 224 + R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME)); 225 + P0 = cmp.eq(R1, #0); 226 + if P0 jump restore_all 227 + R1 = R0; /* unsigned long thread_info_flags */ 228 + R0 = R29; /* regs should still be at top of stack */ 229 + call do_notify_resume 230 + 231 + restore_all: 232 + /* Disable interrupts, if they weren't already, before reg restore. */ 233 + R0 = #VM_INT_DISABLE 234 + trap1(#HVM_TRAP1_VMSETIE) 235 + 236 + /* do the setregs here for VM 0.5 */ 237 + /* R29 here should already be pointing at pt_regs */ 238 + R1:0 = memd(R29 + #_PT_ER_VMEL); 239 + R3:2 = memd(R29 + #_PT_ER_VMPSP); 240 + trap1(#HVM_TRAP1_VMSETREGS); 241 + 242 + R0 = R29 243 + restore_pt_regs() 244 + R1:0 = memd(R29 + #_PT_R0100); 245 + R29 = add(R29, #_PT_REGS_SIZE); 246 + trap1(#HVM_TRAP1_VMRTE) 247 + /* Notreached */ 248 + 249 + .globl _K_enter_genex 250 + _K_enter_genex: 251 + vm_event_entry(do_genex) 252 + 253 + .globl _K_enter_interrupt 254 + _K_enter_interrupt: 255 + vm_event_entry(arch_do_IRQ) 256 + 257 + .globl _K_enter_trap0 258 + _K_enter_trap0: 259 + vm_event_entry(do_trap0) 260 + 261 + .globl _K_enter_machcheck 262 + _K_enter_machcheck: 263 + vm_event_entry(do_machcheck) 264 + 265 + 266 + .globl ret_from_fork 267 + ret_from_fork: 268 + call schedule_tail 269 + jump return_from_syscall
+101
arch/hexagon/kernel/vm_events.c
··· 1 + /* 2 + * Mostly IRQ support for Hexagon 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <linux/kernel.h> 22 + #include <asm/registers.h> 23 + #include <linux/irq.h> 24 + #include <linux/hardirq.h> 25 + #include <asm/system.h> 26 + 27 + /* 28 + * show_regs - print pt_regs structure 29 + * @regs: pointer to pt_regs 30 + * 31 + * To-do: add all the accessor definitions to registers.h 32 + * 33 + * Will make this routine a lot easier to write. 34 + */ 35 + void show_regs(struct pt_regs *regs) 36 + { 37 + printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n", 38 + regs->restart_r0, regs->syscall_nr); 39 + printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds); 40 + printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n", 41 + regs->lc0, regs->sa0, regs->m0); 42 + printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n", 43 + regs->lc1, regs->sa1, regs->m1); 44 + printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n", 45 + regs->gp, regs->ugp, regs->usr); 46 + printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00, 47 + regs->r01, 48 + regs->r02, 49 + regs->r03); 50 + printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04, 51 + regs->r05, 52 + regs->r06, 53 + regs->r07); 54 + printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08, 55 + regs->r09, 56 + regs->r10, 57 + regs->r11); 58 + printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12, 59 + regs->r13, 60 + regs->r14, 61 + regs->r15); 62 + printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16, 63 + regs->r17, 64 + regs->r18, 65 + regs->r19); 66 + printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20, 67 + regs->r21, 68 + regs->r22, 69 + regs->r23); 70 + printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24, 71 + regs->r25, 72 + regs->r26, 73 + regs->r27); 74 + printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28, 75 + regs->r29, 76 + regs->r30, 77 + regs->r31); 78 + 79 + printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n", 80 + pt_elr(regs), pt_cause(regs), user_mode(regs)); 81 + printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n", 82 + pt_psp(regs), pt_badva(regs), ints_enabled(regs)); 83 + } 84 + 85 + void dummy_handler(struct pt_regs *regs) 86 + { 87 + unsigned int elr = pt_elr(regs); 88 + printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr); 89 + } 90 + 91 + 92 + void arch_do_IRQ(struct pt_regs *regs) 93 + { 94 + int irq = pt_cause(regs); 95 + struct pt_regs *old_regs = set_irq_regs(regs); 96 + 97 + irq_enter(); 98 + generic_handle_irq(irq); 99 + irq_exit(); 100 + set_irq_regs(old_regs); 101 + }
+442
arch/hexagon/kernel/vm_init_segtable.S
··· 1 + /* 2 + * Initial page table for Linux kernel under Hexagon VM, 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + /* 22 + * These tables are pre-computed and linked into kernel. 23 + */ 24 + 25 + #include <asm/vm_mmu.h> 26 + /* #include <asm/iomap.h> */ 27 + 28 + /* 29 + * Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages. 30 + * No user mode access, RWX, write-back cache. The entry needs 31 + * to be replicated for all 4 virtual segments mapping to the page. 32 + */ 33 + 34 + /* "Big Kernel Page" */ 35 + #define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \ 36 + | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \ 37 + | __HEXAGON_C_WB_L2 << 6 \ 38 + | __HVM_PDE_S_16MB) 39 + 40 + /* No cache version */ 41 + 42 + #define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \ 43 + | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \ 44 + | __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 ) 45 + 46 + #define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \ 47 + | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \ 48 + | __HEXAGON_C_DEV << 6 ) 49 + 50 + #define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \ 51 + | __HVM_PDE_S_4KB ) 52 + 53 + #define X __HVM_PDE_S_INVALID 54 + 55 + .p2align 12 56 + .globl swapper_pg_dir 57 + .globl _K_init_segtable 58 + swapper_pg_dir: 59 + /* VA 0x00000000 */ 60 + .word X,X,X,X 61 + .word X,X,X,X 62 + .word X,X,X,X 63 + .word X,X,X,X 64 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 65 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 66 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 67 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 68 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 69 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 70 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 71 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 72 + /* VA 0x40000000 */ 73 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 74 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 75 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 76 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 77 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 78 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 79 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 80 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 81 + /* VA 0x80000000 */ 82 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 83 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 84 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 85 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 86 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 87 + /*0xa8*/.word X,X,X,X 88 + #ifdef CONFIG_COMET_EARLY_UART_DEBUG 89 + UART_PTE_ENTRY: 90 + /*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000) 91 + #else 92 + /*0xa9*/.word X,X,X,X 93 + #endif 94 + /*0xaa*/.word X,X,X,X 95 + /*0xab*/.word X,X,X,X 96 + /*0xac*/.word X,X,X,X 97 + /*0xad*/.word X,X,X,X 98 + /*0xae*/.word X,X,X,X 99 + /*0xaf*/.word X,X,X,X 100 + /*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 101 + .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X 102 + _K_init_segtable: 103 + /* VA 0xC0000000 */ 104 + .word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000) 105 + .word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000) 106 + .word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000) 107 + .word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000) 108 + .word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000) 109 + .word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000) 110 + .word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000) 111 + .word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000) 112 + 113 + .word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000) 114 + .word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000) 115 + .word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000) 116 + .word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000) 117 + .word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000) 118 + .word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000) 119 + .word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000) 120 + .word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000) 121 + 122 + .word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000) 123 + .word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000) 124 + .word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000) 125 + .word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000) 126 + .word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000) 127 + .word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000) 128 + .word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000) 129 + .word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000) 130 + 131 + .word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000) 132 + .word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000) 133 + .word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000) 134 + .word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000) 135 + .word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000) 136 + .word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000) 137 + .word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000) 138 + .word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000) 139 + 140 + .word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000) 141 + .word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000) 142 + .word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000) 143 + .word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000) 144 + .word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000) 145 + .word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000) 146 + .word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000) 147 + .word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000) 148 + 149 + .word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000) 150 + .word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000) 151 + .word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000) 152 + .word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000) 153 + .word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000) 154 + .word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000) 155 + .word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000) 156 + .word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000) 157 + 158 + .word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000) 159 + .word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000) 160 + .word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000) 161 + .word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000) 162 + .word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000) 163 + .word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000) 164 + .word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000) 165 + .word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000) 166 + 167 + .word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000) 168 + .word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000) 169 + .word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000) 170 + .word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000) 171 + .word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000) 172 + .word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000) 173 + _K_io_map: 174 + .word X,X,X,X /* 0x3e000000 - device IO early remap */ 175 + .word X,X,X,X /* 0x3f000000 - hypervisor space*/ 176 + 177 + #if 0 178 + /* 179 + * This is in here as an example for devices which need to be mapped really 180 + * early. 181 + */ 182 + .p2align 12 183 + .globl _K_io_kmap 184 + .globl _K_init_devicetable 185 + _K_init_devicetable: /* Should be 4MB worth of entries */ 186 + .word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X 187 + .word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X 188 + .word X,X,X,X 189 + .word X,X,X,X 190 + .word X,X,X,X 191 + .word X,X,X,X 192 + .word X,X,X,X 193 + .word X,X,X,X 194 + .word X,X,X,X 195 + .word X,X,X,X 196 + .word X,X,X,X 197 + .word X,X,X,X 198 + .word X,X,X,X 199 + .word X,X,X,X 200 + .word X,X,X,X 201 + .word X,X,X,X 202 + .word X,X,X,X 203 + .word X,X,X,X 204 + .word X,X,X,X 205 + .word X,X,X,X 206 + .word X,X,X,X 207 + .word X,X,X,X 208 + .word X,X,X,X 209 + .word X,X,X,X 210 + .word X,X,X,X 211 + .word X,X,X,X 212 + .word X,X,X,X 213 + .word X,X,X,X 214 + .word X,X,X,X 215 + .word X,X,X,X 216 + .word X,X,X,X 217 + .word X,X,X,X 218 + .word X,X,X,X 219 + .word X,X,X,X 220 + .word X,X,X,X 221 + .word X,X,X,X 222 + .word X,X,X,X 223 + .word X,X,X,X 224 + .word X,X,X,X 225 + .word X,X,X,X 226 + .word X,X,X,X 227 + .word X,X,X,X 228 + .word X,X,X,X 229 + .word X,X,X,X 230 + .word X,X,X,X 231 + .word X,X,X,X 232 + .word X,X,X,X 233 + .word X,X,X,X 234 + .word X,X,X,X 235 + .word X,X,X,X 236 + .word X,X,X,X 237 + .word X,X,X,X 238 + .word X,X,X,X 239 + .word X,X,X,X 240 + .word X,X,X,X 241 + .word X,X,X,X 242 + .word X,X,X,X 243 + .word X,X,X,X 244 + .word X,X,X,X 245 + .word X,X,X,X 246 + .word X,X,X,X 247 + .word X,X,X,X 248 + .word X,X,X,X 249 + .word X,X,X,X 250 + .word X,X,X,X 251 + .word X,X,X,X 252 + .word X,X,X,X 253 + .word X,X,X,X 254 + .word X,X,X,X 255 + .word X,X,X,X 256 + .word X,X,X,X 257 + .word X,X,X,X 258 + .word X,X,X,X 259 + .word X,X,X,X 260 + .word X,X,X,X 261 + .word X,X,X,X 262 + .word X,X,X,X 263 + .word X,X,X,X 264 + .word X,X,X,X 265 + .word X,X,X,X 266 + .word X,X,X,X 267 + .word X,X,X,X 268 + .word X,X,X,X 269 + .word X,X,X,X 270 + .word X,X,X,X 271 + .word X,X,X,X 272 + .word X,X,X,X 273 + .word X,X,X,X 274 + .word X,X,X,X 275 + .word X,X,X,X 276 + .word X,X,X,X 277 + .word X,X,X,X 278 + .word X,X,X,X 279 + .word X,X,X,X 280 + .word X,X,X,X 281 + .word X,X,X,X 282 + .word X,X,X,X 283 + .word X,X,X,X 284 + .word X,X,X,X 285 + .word X,X,X,X 286 + .word X,X,X,X 287 + .word X,X,X,X 288 + .word X,X,X,X 289 + .word X,X,X,X 290 + .word X,X,X,X 291 + .word X,X,X,X 292 + .word X,X,X,X 293 + .word X,X,X,X 294 + .word X,X,X,X 295 + .word X,X,X,X 296 + .word X,X,X,X 297 + .word X,X,X,X 298 + .word X,X,X,X 299 + .word X,X,X,X 300 + .word X,X,X,X 301 + .word X,X,X,X 302 + .word X,X,X,X 303 + .word X,X,X,X 304 + .word X,X,X,X 305 + .word X,X,X,X 306 + .word X,X,X,X 307 + .word X,X,X,X 308 + .word X,X,X,X 309 + .word X,X,X,X 310 + .word X,X,X,X 311 + .word X,X,X,X 312 + .word X,X,X,X 313 + .word X,X,X,X 314 + .word X,X,X,X 315 + .word X,X,X,X 316 + .word X,X,X,X 317 + .word X,X,X,X 318 + .word X,X,X,X 319 + .word X,X,X,X 320 + .word X,X,X,X 321 + .word X,X,X,X 322 + .word X,X,X,X 323 + .word X,X,X,X 324 + .word X,X,X,X 325 + .word X,X,X,X 326 + .word X,X,X,X 327 + .word X,X,X,X 328 + .word X,X,X,X 329 + .word X,X,X,X 330 + .word X,X,X,X 331 + .word X,X,X,X 332 + .word X,X,X,X 333 + .word X,X,X,X 334 + .word X,X,X,X 335 + .word X,X,X,X 336 + .word X,X,X,X 337 + .word X,X,X,X 338 + .word X,X,X,X 339 + .word X,X,X,X 340 + .word X,X,X,X 341 + .word X,X,X,X 342 + .word X,X,X,X 343 + .word X,X,X,X 344 + .word X,X,X,X 345 + .word X,X,X,X 346 + .word X,X,X,X 347 + .word X,X,X,X 348 + .word X,X,X,X 349 + .word X,X,X,X 350 + .word X,X,X,X 351 + .word X,X,X,X 352 + .word X,X,X,X 353 + .word X,X,X,X 354 + .word X,X,X,X 355 + .word X,X,X,X 356 + .word X,X,X,X 357 + .word X,X,X,X 358 + .word X,X,X,X 359 + .word X,X,X,X 360 + .word X,X,X,X 361 + .word X,X,X,X 362 + .word X,X,X,X 363 + .word X,X,X,X 364 + .word X,X,X,X 365 + .word X,X,X,X 366 + .word X,X,X,X 367 + .word X,X,X,X 368 + .word X,X,X,X 369 + .word X,X,X,X 370 + .word X,X,X,X 371 + .word X,X,X,X 372 + .word X,X,X,X 373 + .word X,X,X,X 374 + .word X,X,X,X 375 + .word X,X,X,X 376 + .word X,X,X,X 377 + .word X,X,X,X 378 + .word X,X,X,X 379 + .word X,X,X,X 380 + .word X,X,X,X 381 + .word X,X,X,X 382 + .word X,X,X,X 383 + .word X,X,X,X 384 + .word X,X,X,X 385 + .word X,X,X,X 386 + .word X,X,X,X 387 + .word X,X,X,X 388 + .word X,X,X,X 389 + .word X,X,X,X 390 + .word X,X,X,X 391 + .word X,X,X,X 392 + .word X,X,X,X 393 + .word X,X,X,X 394 + .word X,X,X,X 395 + .word X,X,X,X 396 + .word X,X,X,X 397 + .word X,X,X,X 398 + .word X,X,X,X 399 + .word X,X,X,X 400 + .word X,X,X,X 401 + .word X,X,X,X 402 + .word X,X,X,X 403 + .word X,X,X,X 404 + .word X,X,X,X 405 + .word X,X,X,X 406 + .word X,X,X,X 407 + .word X,X,X,X 408 + .word X,X,X,X 409 + .word X,X,X,X 410 + .word X,X,X,X 411 + .word X,X,X,X 412 + .word X,X,X,X 413 + .word X,X,X,X 414 + .word X,X,X,X 415 + .word X,X,X,X 416 + .word X,X,X,X 417 + .word X,X,X,X 418 + .word X,X,X,X 419 + .word X,X,X,X 420 + .word X,X,X,X 421 + .word X,X,X,X 422 + .word X,X,X,X 423 + .word X,X,X,X 424 + .word X,X,X,X 425 + .word X,X,X,X 426 + .word X,X,X,X 427 + .word X,X,X,X 428 + .word X,X,X,X 429 + .word X,X,X,X 430 + .word X,X,X,X 431 + .word X,X,X,X 432 + .word X,X,X,X 433 + .word X,X,X,X 434 + .word X,X,X,X 435 + .word X,X,X,X 436 + .word X,X,X,X 437 + .word X,X,X,X 438 + .word X,X,X,X 439 + .word X,X,X,X 440 + .word X,X,X,X 441 + .word X,X,X,X 442 + #endif
+102
arch/hexagon/kernel/vm_ops.S
··· 1 + /* 2 + * Hexagon VM instruction support 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <linux/linkage.h> 22 + #include <asm/hexagon_vm.h> 23 + 24 + /* 25 + * C wrappers for virtual machine "instructions". These 26 + * could be, and perhaps some day will be, handled as in-line 27 + * macros, but for tracing/debugging it's handy to have 28 + * a single point of invocation for each of them. 29 + * Conveniently, they take paramters and return values 30 + * consistent with the ABI calling convention. 31 + */ 32 + 33 + ENTRY(__vmrte) 34 + trap1(#HVM_TRAP1_VMRTE); 35 + jumpr R31; 36 + 37 + ENTRY(__vmsetvec) 38 + trap1(#HVM_TRAP1_VMSETVEC); 39 + jumpr R31; 40 + 41 + ENTRY(__vmsetie) 42 + trap1(#HVM_TRAP1_VMSETIE); 43 + jumpr R31; 44 + 45 + ENTRY(__vmgetie) 46 + trap1(#HVM_TRAP1_VMGETIE); 47 + jumpr R31; 48 + 49 + ENTRY(__vmintop) 50 + trap1(#HVM_TRAP1_VMINTOP); 51 + jumpr R31; 52 + 53 + ENTRY(__vmclrmap) 54 + trap1(#HVM_TRAP1_VMCLRMAP); 55 + jumpr R31; 56 + 57 + ENTRY(__vmnewmap) 58 + r1 = #VM_NEWMAP_TYPE_PGTABLES; 59 + trap1(#HVM_TRAP1_VMNEWMAP); 60 + jumpr R31; 61 + 62 + ENTRY(__vmcache) 63 + trap1(#HVM_TRAP1_VMCACHE); 64 + jumpr R31; 65 + 66 + ENTRY(__vmgettime) 67 + trap1(#HVM_TRAP1_VMGETTIME); 68 + jumpr R31; 69 + 70 + ENTRY(__vmsettime) 71 + trap1(#HVM_TRAP1_VMSETTIME); 72 + jumpr R31; 73 + 74 + ENTRY(__vmwait) 75 + trap1(#HVM_TRAP1_VMWAIT); 76 + jumpr R31; 77 + 78 + ENTRY(__vmyield) 79 + trap1(#HVM_TRAP1_VMYIELD); 80 + jumpr R31; 81 + 82 + ENTRY(__vmstart) 83 + trap1(#HVM_TRAP1_VMSTART); 84 + jumpr R31; 85 + 86 + ENTRY(__vmstop) 87 + trap1(#HVM_TRAP1_VMSTOP); 88 + jumpr R31; 89 + 90 + ENTRY(__vmvpid) 91 + trap1(#HVM_TRAP1_VMVPID); 92 + jumpr R31; 93 + 94 + /* Probably not actually going to use these; see vm_entry.S */ 95 + 96 + ENTRY(__vmsetregs) 97 + trap1(#HVM_TRAP1_VMSETREGS); 98 + jumpr R31; 99 + 100 + ENTRY(__vmgetregs) 101 + trap1(#HVM_TRAP1_VMGETREGS); 102 + jumpr R31;
+95
arch/hexagon/kernel/vm_switch.S
··· 1 + /* 2 + * Context switch support for Hexagon 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <asm/asm-offsets.h> 22 + 23 + .text 24 + 25 + /* 26 + * The register used as a fast-path thread information pointer 27 + * is determined as a kernel configuration option. If it happens 28 + * to be a callee-save register, we're going to be saving and 29 + * restoring it twice here. 30 + * 31 + * This code anticipates a revised ABI where R20-23 are added 32 + * to the set of callee-save registers, but this should be 33 + * backward compatible to legacy tools. 34 + */ 35 + 36 + 37 + /* 38 + * void switch_to(struct task_struct *prev, 39 + * struct task_struct *next, struct task_struct *last); 40 + */ 41 + .p2align 2 42 + .globl __switch_to 43 + .type __switch_to, @function 44 + 45 + /* 46 + * When we exit the wormhole, we need to store the previous task 47 + * in the new R0's pointer. Technically it should be R2, but they should 48 + * be the same; seems like a legacy thing. In short, don't butcher 49 + * R0, let it go back out unmolested. 50 + */ 51 + 52 + __switch_to: 53 + /* 54 + * Push callee-saves onto "prev" stack. 55 + * Here, we're sneaky because the LR and FP 56 + * storage of the thread_stack structure 57 + * is automagically allocated by allocframe, 58 + * so we pass struct size less 8. 59 + */ 60 + allocframe(#(_SWITCH_STACK_SIZE - 8)); 61 + memd(R29+#(_SWITCH_R2726))=R27:26; 62 + memd(R29+#(_SWITCH_R2524))=R25:24; 63 + memd(R29+#(_SWITCH_R2322))=R23:22; 64 + memd(R29+#(_SWITCH_R2120))=R21:20; 65 + memd(R29+#(_SWITCH_R1918))=R19:18; 66 + memd(R29+#(_SWITCH_R1716))=R17:16; 67 + /* Stash thread_info pointer in task_struct */ 68 + memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG; 69 + memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29; 70 + /* Switch to "next" stack and restore callee saves from there */ 71 + R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)); 72 + { 73 + R27:26 = memd(R29+#(_SWITCH_R2726)); 74 + R25:24 = memd(R29+#(_SWITCH_R2524)); 75 + } 76 + { 77 + R23:22 = memd(R29+#(_SWITCH_R2322)); 78 + R21:20 = memd(R29+#(_SWITCH_R2120)); 79 + } 80 + { 81 + R19:18 = memd(R29+#(_SWITCH_R1918)); 82 + R17:16 = memd(R29+#(_SWITCH_R1716)); 83 + } 84 + { 85 + /* THREADINFO_REG is currently one of the callee-saved regs 86 + * above, and so be sure to re-load it last. 87 + */ 88 + THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO); 89 + R31:30 = memd(R29+#_SWITCH_FP); 90 + } 91 + { 92 + R29 = add(R29,#_SWITCH_STACK_SIZE); 93 + jumpr R31; 94 + } 95 + .size __switch_to, .-__switch_to
+48
arch/hexagon/kernel/vm_vectors.S
··· 1 + /* 2 + * Event jump tables 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <asm/hexagon_vm.h> 22 + 23 + .text 24 + 25 + /* This is registered early on to allow angel */ 26 + .global _K_provisional_vec 27 + _K_provisional_vec: 28 + jump 1f; 29 + jump 1f; 30 + jump 1f; 31 + jump 1f; 32 + jump 1f; 33 + trap1(#HVM_TRAP1_VMRTE) 34 + jump 1f; 35 + jump 1f; 36 + 37 + 38 + .global _K_VM_event_vector 39 + _K_VM_event_vector: 40 + 1: 41 + jump 1b; /* Reset */ 42 + jump _K_enter_machcheck; 43 + jump _K_enter_genex; 44 + jump 1b; /* 3 Rsvd */ 45 + jump 1b; /* 4 Rsvd */ 46 + jump _K_enter_trap0; 47 + jump 1b; /* 6 Rsvd */ 48 + jump _K_enter_interrupt;