Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
3 */
4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H
6
7#include <linux/stringify.h>
8#include <asm/asm-compat.h>
9#include <asm/processor.h>
10#include <asm/ppc-opcode.h>
11#include <asm/firmware.h>
12#include <asm/feature-fixups.h>
13#include <asm/extable.h>
14
15#ifdef __ASSEMBLY__
16
17#define SZL (BITS_PER_LONG/8)
18
19/*
20 * This expands to a sequence of operations with reg incrementing from
21 * start to end inclusive, of this form:
22 *
23 * op reg, (offset + (width * reg))(base)
24 *
25 * Note that offset is not the offset of the first operation unless start
26 * is zero (or width is zero).
27 */
28.macro OP_REGS op, width, start, end, base, offset
29 .Lreg=\start
30 .rept (\end - \start + 1)
31 \op .Lreg, \offset + \width * .Lreg(\base)
32 .Lreg=.Lreg+1
33 .endr
34.endm
35
36/*
37 * Macros for storing registers into and loading registers from
38 * exception frames.
39 */
40#ifdef __powerpc64__
41#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
42#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
43#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
44#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
45#else
46#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
47#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
48#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
49#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
50#endif
51
52#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
53#define REST_GPR(n, base) REST_GPRS(n, n, base)
54
55#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
56#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
57#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
58#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
59#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
60#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
61#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
62#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
63#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
64#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
65#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
66#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
67
68#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
69#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
70#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
71#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
72#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
73#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
74#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
75#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
76#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
77#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
78#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
79#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
80
81#ifdef __BIG_ENDIAN__
82#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
83#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
84#else
85#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
86 STXVD2X(n,b,base); \
87 XXSWAPD(n,n)
88
89#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
90 XXSWAPD(n,n)
91#endif
92/* Save the lower 32 VSRs in the thread VSR region */
93#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
94#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
95#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
96#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
97#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
98#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
99#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
100#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
101#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
102#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
103#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
104#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
105
106/*
107 * b = base register for addressing, o = base offset from register of 1st EVR
108 * n = first EVR, s = scratch
109 */
110#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
111#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
112#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
113#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
114#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
115#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
116#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
117#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
118#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
119#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
120#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
121#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
122
123/* Macros to adjust thread priority for hardware multithreading */
124#define HMT_VERY_LOW or 31,31,31 # very low priority
125#define HMT_LOW or 1,1,1
126#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
127#define HMT_MEDIUM or 2,2,2
128#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
129#define HMT_HIGH or 3,3,3
130#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
131
132#ifdef CONFIG_PPC64
133#define ULONG_SIZE 8
134#else
135#define ULONG_SIZE 4
136#endif
137#define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
138#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
139
140#ifdef __KERNEL__
141
142/*
143 * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
144 * version below in the else case of the ifdef.
145 */
146#ifdef __powerpc64__
147
148#define STACKFRAMESIZE 256
149#define __STK_REG(i) (112 + ((i)-14)*8)
150#define STK_REG(i) __STK_REG(__REG_##i)
151
152#ifdef CONFIG_PPC64_ELF_ABI_V2
153#define STK_GOT 24
154#define __STK_PARAM(i) (32 + ((i)-3)*8)
155#else
156#define STK_GOT 40
157#define __STK_PARAM(i) (48 + ((i)-3)*8)
158#endif
159#define STK_PARAM(i) __STK_PARAM(__REG_##i)
160
161#ifdef CONFIG_PPC64_ELF_ABI_V2
162
163#define _GLOBAL(name) \
164 .align 2 ; \
165 .type name,@function; \
166 .globl name; \
167name:
168
169#define _GLOBAL_TOC(name) \
170 .align 2 ; \
171 .type name,@function; \
172 .globl name; \
173name: \
1740: addis r2,r12,(.TOC.-0b)@ha; \
175 addi r2,r2,(.TOC.-0b)@l; \
176 .localentry name,.-name
177
178#define DOTSYM(a) a
179
180#else
181
182#define XGLUE(a,b) a##b
183#define GLUE(a,b) XGLUE(a,b)
184
185#define _GLOBAL(name) \
186 .align 2 ; \
187 .globl name; \
188 .globl GLUE(.,name); \
189 .pushsection ".opd","aw"; \
190name: \
191 .quad GLUE(.,name); \
192 .quad .TOC.@tocbase; \
193 .quad 0; \
194 .popsection; \
195 .type GLUE(.,name),@function; \
196GLUE(.,name):
197
198#define _GLOBAL_TOC(name) _GLOBAL(name)
199
200#define DOTSYM(a) GLUE(.,a)
201
202#endif
203
204#else /* 32-bit */
205
206#define _GLOBAL(n) \
207 .globl n; \
208n:
209
210#define _GLOBAL_TOC(name) _GLOBAL(name)
211
212#define DOTSYM(a) a
213
214#endif
215
216/*
217 * __kprobes (the C annotation) puts the symbol into the .kprobes.text
218 * section, which gets emitted at the end of regular text.
219 *
220 * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
221 * a blacklist. The former is for core kprobe functions/data, the
222 * latter is for those that incdentially must be excluded from probing
223 * and allows them to be linked at more optimal location within text.
224 */
225#ifdef CONFIG_KPROBES
226#define _ASM_NOKPROBE_SYMBOL(entry) \
227 .pushsection "_kprobe_blacklist","aw"; \
228 PPC_LONG (entry) ; \
229 .popsection
230#else
231#define _ASM_NOKPROBE_SYMBOL(entry)
232#endif
233
234#define FUNC_START(name) _GLOBAL(name)
235#define FUNC_END(name)
236
237/*
238 * LOAD_REG_IMMEDIATE(rn, expr)
239 * Loads the value of the constant expression 'expr' into register 'rn'
240 * using immediate instructions only. Use this when it's important not
241 * to reference other data (i.e. on ppc64 when the TOC pointer is not
242 * valid) and when 'expr' is a constant or absolute address.
243 *
244 * LOAD_REG_ADDR(rn, name)
245 * Loads the address of label 'name' into register 'rn'. Use this when
246 * you don't particularly need immediate instructions only, but you need
247 * the whole address in one register (e.g. it's a structure address and
248 * you want to access various offsets within it). On ppc32 this is
249 * identical to LOAD_REG_IMMEDIATE.
250 *
251 * LOAD_REG_ADDR_PIC(rn, name)
252 * Loads the address of label 'name' into register 'run'. Use this when
253 * the kernel doesn't run at the linked or relocated address. Please
254 * note that this macro will clobber the lr register.
255 *
256 * LOAD_REG_ADDRBASE(rn, name)
257 * ADDROFF(name)
258 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
259 * register 'rn'. ADDROFF(name) returns the remainder of the address as
260 * a constant expression. ADDROFF(name) is a signed expression < 16 bits
261 * in size, so is suitable for use directly as an offset in load and store
262 * instructions. Use this when loading/storing a single word or less as:
263 * LOAD_REG_ADDRBASE(rX, name)
264 * ld rY,ADDROFF(name)(rX)
265 */
266
267/* Be careful, this will clobber the lr register. */
268#define LOAD_REG_ADDR_PIC(reg, name) \
269 bcl 20,31,$+4; \
2700: mflr reg; \
271 addis reg,reg,(name - 0b)@ha; \
272 addi reg,reg,(name - 0b)@l;
273
274#if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH)
275#define __AS_ATHIGH high
276#else
277#define __AS_ATHIGH h
278#endif
279
280.macro __LOAD_REG_IMMEDIATE_32 r, x
281 .if (\x) >= 0x8000 || (\x) < -0x8000
282 lis \r, (\x)@__AS_ATHIGH
283 .if (\x) & 0xffff != 0
284 ori \r, \r, (\x)@l
285 .endif
286 .else
287 li \r, (\x)@l
288 .endif
289.endm
290
291.macro __LOAD_REG_IMMEDIATE r, x
292 .if (\x) >= 0x80000000 || (\x) < -0x80000000
293 __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32
294 sldi \r, \r, 32
295 .if (\x) & 0xffff0000 != 0
296 oris \r, \r, (\x)@__AS_ATHIGH
297 .endif
298 .if (\x) & 0xffff != 0
299 ori \r, \r, (\x)@l
300 .endif
301 .else
302 __LOAD_REG_IMMEDIATE_32 \r, \x
303 .endif
304.endm
305
306#ifdef __powerpc64__
307
308#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
309
310#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
311 lis tmp, (expr)@highest; \
312 lis reg, (expr)@__AS_ATHIGH; \
313 ori tmp, tmp, (expr)@higher; \
314 ori reg, reg, (expr)@l; \
315 rldimi reg, tmp, 32, 0
316
317#define LOAD_REG_ADDR(reg,name) \
318 ld reg,name@got(r2)
319
320#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
321#define ADDROFF(name) 0
322
323/* offsets for stack frame layout */
324#define LRSAVE 16
325
326#else /* 32-bit */
327
328#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
329
330#define LOAD_REG_IMMEDIATE_SYM(reg,expr) \
331 lis reg,(expr)@ha; \
332 addi reg,reg,(expr)@l;
333
334#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE_SYM(reg, name)
335
336#define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
337#define ADDROFF(name) name@l
338
339/* offsets for stack frame layout */
340#define LRSAVE 4
341
342#endif
343
344/* various errata or part fixups */
345#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
346#define MFTB(dest) \
34790: mfspr dest, SPRN_TBRL; \
348BEGIN_FTR_SECTION_NESTED(96); \
349 cmpwi dest,0; \
350 beq- 90b; \
351END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
352#else
353#define MFTB(dest) MFTBL(dest)
354#endif
355
356#ifdef CONFIG_PPC_8xx
357#define MFTBL(dest) mftb dest
358#define MFTBU(dest) mftbu dest
359#else
360#define MFTBL(dest) mfspr dest, SPRN_TBRL
361#define MFTBU(dest) mfspr dest, SPRN_TBRU
362#endif
363
364#ifndef CONFIG_SMP
365#define TLBSYNC
366#else
367#define TLBSYNC tlbsync; sync
368#endif
369
370#ifdef CONFIG_PPC64
371#define MTOCRF(FXM, RS) \
372 BEGIN_FTR_SECTION_NESTED(848); \
373 mtcrf (FXM), RS; \
374 FTR_SECTION_ELSE_NESTED(848); \
375 mtocrf (FXM), RS; \
376 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
377#endif
378
379/*
380 * This instruction is not implemented on the PPC 603 or 601; however, on
381 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
382 * All of these instructions exist in the 8xx, they have magical powers,
383 * and they must be used.
384 */
385
386#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
387#define tlbia \
388 li r4,1024; \
389 mtctr r4; \
390 lis r4,KERNELBASE@h; \
391 .machine push; \
392 .machine "power4"; \
3930: tlbie r4; \
394 .machine pop; \
395 addi r4,r4,0x1000; \
396 bdnz 0b
397#endif
398
399
400#ifdef CONFIG_IBM440EP_ERR42
401#define PPC440EP_ERR42 isync
402#else
403#define PPC440EP_ERR42
404#endif
405
406/* The following stops all load and store data streams associated with stream
407 * ID (ie. streams created explicitly). The embedded and server mnemonics for
408 * dcbt are different so this must only be used for server.
409 */
410#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \
411 lis scratch,0x60000000@h; \
412 dcbt 0,scratch,0b01010
413
414/*
415 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
416 * keep the address intact to be compatible with code shared with
417 * 32-bit classic.
418 *
419 * On the other hand, I find it useful to have them behave as expected
420 * by their name (ie always do the addition) on 64-bit BookE
421 */
422#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
423#define toreal(rd)
424#define fromreal(rd)
425
426/*
427 * We use addis to ensure compatibility with the "classic" ppc versions of
428 * these macros, which use rs = 0 to get the tophys offset in rd, rather than
429 * converting the address in r0, and so this version has to do that too
430 * (i.e. set register rd to 0 when rs == 0).
431 */
432#define tophys(rd,rs) \
433 addis rd,rs,0
434
435#define tovirt(rd,rs) \
436 addis rd,rs,0
437
438#elif defined(CONFIG_PPC64)
439#define toreal(rd) /* we can access c000... in real mode */
440#define fromreal(rd)
441
442#define tophys(rd,rs) \
443 clrldi rd,rs,2
444
445#define tovirt(rd,rs) \
446 rotldi rd,rs,16; \
447 ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
448 rotldi rd,rd,48
449#else
450#define toreal(rd) tophys(rd,rd)
451#define fromreal(rd) tovirt(rd,rd)
452
453#define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h
454#define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h
455#endif
456
457#ifdef CONFIG_PPC_BOOK3S_64
458#define MTMSRD(r) mtmsrd r
459#define MTMSR_EERI(reg) mtmsrd reg,1
460#else
461#define MTMSRD(r) mtmsr r
462#define MTMSR_EERI(reg) mtmsr reg
463#endif
464
465#endif /* __KERNEL__ */
466
467/* The boring bits... */
468
469/* Condition Register Bit Fields */
470
471#define cr0 0
472#define cr1 1
473#define cr2 2
474#define cr3 3
475#define cr4 4
476#define cr5 5
477#define cr6 6
478#define cr7 7
479
480
481/*
482 * General Purpose Registers (GPRs)
483 *
484 * The lower case r0-r31 should be used in preference to the upper
485 * case R0-R31 as they provide more error checking in the assembler.
486 * Use R0-31 only when really nessesary.
487 */
488
489#define r0 %r0
490#define r1 %r1
491#define r2 %r2
492#define r3 %r3
493#define r4 %r4
494#define r5 %r5
495#define r6 %r6
496#define r7 %r7
497#define r8 %r8
498#define r9 %r9
499#define r10 %r10
500#define r11 %r11
501#define r12 %r12
502#define r13 %r13
503#define r14 %r14
504#define r15 %r15
505#define r16 %r16
506#define r17 %r17
507#define r18 %r18
508#define r19 %r19
509#define r20 %r20
510#define r21 %r21
511#define r22 %r22
512#define r23 %r23
513#define r24 %r24
514#define r25 %r25
515#define r26 %r26
516#define r27 %r27
517#define r28 %r28
518#define r29 %r29
519#define r30 %r30
520#define r31 %r31
521
522
523/* Floating Point Registers (FPRs) */
524
525#define fr0 0
526#define fr1 1
527#define fr2 2
528#define fr3 3
529#define fr4 4
530#define fr5 5
531#define fr6 6
532#define fr7 7
533#define fr8 8
534#define fr9 9
535#define fr10 10
536#define fr11 11
537#define fr12 12
538#define fr13 13
539#define fr14 14
540#define fr15 15
541#define fr16 16
542#define fr17 17
543#define fr18 18
544#define fr19 19
545#define fr20 20
546#define fr21 21
547#define fr22 22
548#define fr23 23
549#define fr24 24
550#define fr25 25
551#define fr26 26
552#define fr27 27
553#define fr28 28
554#define fr29 29
555#define fr30 30
556#define fr31 31
557
558/* AltiVec Registers (VPRs) */
559
560#define v0 0
561#define v1 1
562#define v2 2
563#define v3 3
564#define v4 4
565#define v5 5
566#define v6 6
567#define v7 7
568#define v8 8
569#define v9 9
570#define v10 10
571#define v11 11
572#define v12 12
573#define v13 13
574#define v14 14
575#define v15 15
576#define v16 16
577#define v17 17
578#define v18 18
579#define v19 19
580#define v20 20
581#define v21 21
582#define v22 22
583#define v23 23
584#define v24 24
585#define v25 25
586#define v26 26
587#define v27 27
588#define v28 28
589#define v29 29
590#define v30 30
591#define v31 31
592
593/* VSX Registers (VSRs) */
594
595#define vs0 0
596#define vs1 1
597#define vs2 2
598#define vs3 3
599#define vs4 4
600#define vs5 5
601#define vs6 6
602#define vs7 7
603#define vs8 8
604#define vs9 9
605#define vs10 10
606#define vs11 11
607#define vs12 12
608#define vs13 13
609#define vs14 14
610#define vs15 15
611#define vs16 16
612#define vs17 17
613#define vs18 18
614#define vs19 19
615#define vs20 20
616#define vs21 21
617#define vs22 22
618#define vs23 23
619#define vs24 24
620#define vs25 25
621#define vs26 26
622#define vs27 27
623#define vs28 28
624#define vs29 29
625#define vs30 30
626#define vs31 31
627#define vs32 32
628#define vs33 33
629#define vs34 34
630#define vs35 35
631#define vs36 36
632#define vs37 37
633#define vs38 38
634#define vs39 39
635#define vs40 40
636#define vs41 41
637#define vs42 42
638#define vs43 43
639#define vs44 44
640#define vs45 45
641#define vs46 46
642#define vs47 47
643#define vs48 48
644#define vs49 49
645#define vs50 50
646#define vs51 51
647#define vs52 52
648#define vs53 53
649#define vs54 54
650#define vs55 55
651#define vs56 56
652#define vs57 57
653#define vs58 58
654#define vs59 59
655#define vs60 60
656#define vs61 61
657#define vs62 62
658#define vs63 63
659
660/* SPE Registers (EVPRs) */
661
662#define evr0 0
663#define evr1 1
664#define evr2 2
665#define evr3 3
666#define evr4 4
667#define evr5 5
668#define evr6 6
669#define evr7 7
670#define evr8 8
671#define evr9 9
672#define evr10 10
673#define evr11 11
674#define evr12 12
675#define evr13 13
676#define evr14 14
677#define evr15 15
678#define evr16 16
679#define evr17 17
680#define evr18 18
681#define evr19 19
682#define evr20 20
683#define evr21 21
684#define evr22 22
685#define evr23 23
686#define evr24 24
687#define evr25 25
688#define evr26 26
689#define evr27 27
690#define evr28 28
691#define evr29 29
692#define evr30 30
693#define evr31 31
694
695#define RFSCV .long 0x4c0000a4
696
697/*
698 * Create an endian fixup trampoline
699 *
700 * This starts with a "tdi 0,0,0x48" instruction which is
701 * essentially a "trap never", and thus akin to a nop.
702 *
703 * The opcode for this instruction read with the wrong endian
704 * however results in a b . + 8
705 *
706 * So essentially we use that trick to execute the following
707 * trampoline in "reverse endian" if we are running with the
708 * MSR_LE bit set the "wrong" way for whatever endianness the
709 * kernel is built for.
710 */
711
712#ifdef CONFIG_PPC_BOOK3E
713#define FIXUP_ENDIAN
714#else
715/*
716 * This version may be used in HV or non-HV context.
717 * MSR[EE] must be disabled.
718 */
719#define FIXUP_ENDIAN \
720 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
721 b 191f; /* Skip trampoline if endian is good */ \
722 .long 0xa600607d; /* mfmsr r11 */ \
723 .long 0x01006b69; /* xori r11,r11,1 */ \
724 .long 0x00004039; /* li r10,0 */ \
725 .long 0x6401417d; /* mtmsrd r10,1 */ \
726 .long 0x05009f42; /* bcl 20,31,$+4 */ \
727 .long 0xa602487d; /* mflr r10 */ \
728 .long 0x14004a39; /* addi r10,r10,20 */ \
729 .long 0xa6035a7d; /* mtsrr0 r10 */ \
730 .long 0xa6037b7d; /* mtsrr1 r11 */ \
731 .long 0x2400004c; /* rfid */ \
732191:
733
734/*
735 * This version that may only be used with MSR[HV]=1
736 * - Does not clear MSR[RI], so more robust.
737 * - Slightly smaller and faster.
738 */
739#define FIXUP_ENDIAN_HV \
740 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
741 b 191f; /* Skip trampoline if endian is good */ \
742 .long 0xa600607d; /* mfmsr r11 */ \
743 .long 0x01006b69; /* xori r11,r11,1 */ \
744 .long 0x05009f42; /* bcl 20,31,$+4 */ \
745 .long 0xa602487d; /* mflr r10 */ \
746 .long 0x14004a39; /* addi r10,r10,20 */ \
747 .long 0xa64b5a7d; /* mthsrr0 r10 */ \
748 .long 0xa64b7b7d; /* mthsrr1 r11 */ \
749 .long 0x2402004c; /* hrfid */ \
750191:
751
752#endif /* !CONFIG_PPC_BOOK3E */
753
754#endif /* __ASSEMBLY__ */
755
756#define SOFT_MASK_TABLE(_start, _end) \
757 stringify_in_c(.section __soft_mask_table,"a";)\
758 stringify_in_c(.balign 8;) \
759 stringify_in_c(.llong (_start);) \
760 stringify_in_c(.llong (_end);) \
761 stringify_in_c(.previous)
762
763#define RESTART_TABLE(_start, _end, _target) \
764 stringify_in_c(.section __restart_table,"a";)\
765 stringify_in_c(.balign 8;) \
766 stringify_in_c(.llong (_start);) \
767 stringify_in_c(.llong (_end);) \
768 stringify_in_c(.llong (_target);) \
769 stringify_in_c(.previous)
770
771#ifdef CONFIG_PPC_FSL_BOOK3E
772#define BTB_FLUSH(reg) \
773 lis reg,BUCSR_INIT@h; \
774 ori reg,reg,BUCSR_INIT@l; \
775 mtspr SPRN_BUCSR,reg; \
776 isync;
777#else
778#define BTB_FLUSH(reg)
779#endif /* CONFIG_PPC_FSL_BOOK3E */
780
781#endif /* _ASM_POWERPC_PPC_ASM_H */