Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Register definitions for the Hexagon architecture
3 */
4
5
6#ifndef _ASM_REGISTERS_H
7#define _ASM_REGISTERS_H
8
9#define SP r29
10
11#ifndef __ASSEMBLY__
12
13/* See kernel/entry.S for further documentation. */
14
15/*
16 * Entry code copies the event record out of guest registers into
17 * this structure (which is on the stack).
18 */
19
20struct hvm_event_record {
21 unsigned long vmel; /* Event Linkage (return address) */
22 unsigned long vmest; /* Event context - pre-event SSR values */
23 unsigned long vmpsp; /* Previous stack pointer */
24 unsigned long vmbadva; /* Bad virtual address for addressing events */
25};
26
27struct pt_regs {
28 long restart_r0; /* R0 checkpoint for syscall restart */
29 long syscall_nr; /* Only used in system calls */
30 union {
31 struct {
32 unsigned long usr;
33 unsigned long preds;
34 };
35 long long int predsusr;
36 };
37 union {
38 struct {
39 unsigned long m0;
40 unsigned long m1;
41 };
42 long long int m1m0;
43 };
44 union {
45 struct {
46 unsigned long sa1;
47 unsigned long lc1;
48 };
49 long long int lc1sa1;
50 };
51 union {
52 struct {
53 unsigned long sa0;
54 unsigned long lc0;
55 };
56 long long int lc0sa0;
57 };
58 union {
59 struct {
60 unsigned long ugp;
61 unsigned long gp;
62 };
63 long long int gpugp;
64 };
65 union {
66 struct {
67 unsigned long cs0;
68 unsigned long cs1;
69 };
70 long long int cs1cs0;
71 };
72 /*
73 * Be extremely careful with rearranging these, if at all. Some code
74 * assumes the 32 registers exist exactly like this in memory;
75 * e.g. kernel/ptrace.c
76 * e.g. kernel/signal.c (restore_sigcontext)
77 */
78 union {
79 struct {
80 unsigned long r00;
81 unsigned long r01;
82 };
83 long long int r0100;
84 };
85 union {
86 struct {
87 unsigned long r02;
88 unsigned long r03;
89 };
90 long long int r0302;
91 };
92 union {
93 struct {
94 unsigned long r04;
95 unsigned long r05;
96 };
97 long long int r0504;
98 };
99 union {
100 struct {
101 unsigned long r06;
102 unsigned long r07;
103 };
104 long long int r0706;
105 };
106 union {
107 struct {
108 unsigned long r08;
109 unsigned long r09;
110 };
111 long long int r0908;
112 };
113 union {
114 struct {
115 unsigned long r10;
116 unsigned long r11;
117 };
118 long long int r1110;
119 };
120 union {
121 struct {
122 unsigned long r12;
123 unsigned long r13;
124 };
125 long long int r1312;
126 };
127 union {
128 struct {
129 unsigned long r14;
130 unsigned long r15;
131 };
132 long long int r1514;
133 };
134 union {
135 struct {
136 unsigned long r16;
137 unsigned long r17;
138 };
139 long long int r1716;
140 };
141 union {
142 struct {
143 unsigned long r18;
144 unsigned long r19;
145 };
146 long long int r1918;
147 };
148 union {
149 struct {
150 unsigned long r20;
151 unsigned long r21;
152 };
153 long long int r2120;
154 };
155 union {
156 struct {
157 unsigned long r22;
158 unsigned long r23;
159 };
160 long long int r2322;
161 };
162 union {
163 struct {
164 unsigned long r24;
165 unsigned long r25;
166 };
167 long long int r2524;
168 };
169 union {
170 struct {
171 unsigned long r26;
172 unsigned long r27;
173 };
174 long long int r2726;
175 };
176 union {
177 struct {
178 unsigned long r28;
179 unsigned long r29;
180 };
181 long long int r2928;
182 };
183 union {
184 struct {
185 unsigned long r30;
186 unsigned long r31;
187 };
188 long long int r3130;
189 };
190 /* VM dispatch pushes event record onto stack - we can build on it */
191 struct hvm_event_record hvmer;
192};
193
194/* Defines to conveniently access the values */
195
196/*
197 * As of the VM spec 0.5, these registers are now set/retrieved via a
198 * VM call. On the in-bound side, we just fetch the values
199 * at the entry points and stuff them into the old record in pt_regs.
200 * However, on the outbound side, probably at VM rte, we set the
201 * registers back.
202 */
203
204#define pt_elr(regs) ((regs)->hvmer.vmel)
205#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
206#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
207#define user_mode(regs) \
208 (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
209#define ints_enabled(regs) \
210 (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
211#define pt_psp(regs) ((regs)->hvmer.vmpsp)
212#define pt_badva(regs) ((regs)->hvmer.vmbadva)
213
214#define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
215#define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
216
217#define pt_set_rte_sp(regs, sp) do {\
218 pt_psp(regs) = (regs)->SP = (sp);\
219 } while (0)
220
221#define pt_set_kmode(regs) \
222 (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
223
224#define pt_set_usermode(regs) \
225 (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
226 | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
227
228#endif /* ifndef __ASSEMBLY */
229
230#endif