Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * BPF JIT compiler for ARM64
4 *
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 */
7#ifndef _BPF_JIT_H
8#define _BPF_JIT_H
9
10#include <asm/insn.h>
11
12/* 5-bit Register Operand */
13#define A64_R(x) AARCH64_INSN_REG_##x
14#define A64_FP AARCH64_INSN_REG_FP
15#define A64_LR AARCH64_INSN_REG_LR
16#define A64_ZR AARCH64_INSN_REG_ZR
17#define A64_SP AARCH64_INSN_REG_SP
18
19#define A64_VARIANT(sf) \
20 ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
21
22/* Compare & branch (immediate) */
23#define A64_COMP_BRANCH(sf, Rt, offset, type) \
24 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
25 AARCH64_INSN_BRANCH_COMP_##type)
26#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
27#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
28
29/* Conditional branch (immediate) */
30#define A64_COND_BRANCH(cond, offset) \
31 aarch64_insn_gen_cond_branch_imm(0, offset, cond)
32#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
33#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
34#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
35#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
36#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
37#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
38#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
39#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
40#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
41#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
42#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
43
44/* Unconditional branch (immediate) */
45#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
46 AARCH64_INSN_BRANCH_##type)
47#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
48#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
49
50/* Unconditional branch (register) */
51#define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
52#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
53#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
54
55/* Load/store register (register offset) */
56#define A64_LS_REG(Rt, Rn, Rm, size, type) \
57 aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
58 AARCH64_INSN_SIZE_##size, \
59 AARCH64_INSN_LDST_##type##_REG_OFFSET)
60#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
61#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
62#define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD)
63#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
64#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
65#define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD)
66#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
67#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
68#define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD)
69#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
70#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
71
72/* Load/store register (immediate offset) */
73#define A64_LS_IMM(Rt, Rn, imm, size, type) \
74 aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
75 AARCH64_INSN_SIZE_##size, \
76 AARCH64_INSN_LDST_##type##_IMM_OFFSET)
77#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
78#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
79#define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD)
80#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
81#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
82#define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD)
83#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
84#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
85#define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD)
86#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
87#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
88
89/* LDR (literal) */
90#define A64_LDR32LIT(Wt, offset) \
91 aarch64_insn_gen_load_literal(0, offset, Wt, false)
92#define A64_LDR64LIT(Xt, offset) \
93 aarch64_insn_gen_load_literal(0, offset, Xt, true)
94
95/* Load/store register pair */
96#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
97 aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
98 AARCH64_INSN_VARIANT_64BIT, \
99 AARCH64_INSN_LDST_##ls##_PAIR_##type)
100/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
101#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
102/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
103#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
104
105/* Load/store exclusive */
106#define A64_SIZE(sf) \
107 ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
108#define A64_LSX(sf, Rt, Rn, Rs, type) \
109 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
110 AARCH64_INSN_LDST_##type)
111/* Rt = [Rn]; (atomic) */
112#define A64_LDXR(sf, Rt, Rn) \
113 A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
114/* [Rn] = Rt; (atomic) Rs = [state] */
115#define A64_STXR(sf, Rt, Rn, Rs) \
116 A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
117/* [Rn] = Rt (store release); (atomic) Rs = [state] */
118#define A64_STLXR(sf, Rt, Rn, Rs) \
119 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
120 AARCH64_INSN_LDST_STORE_REL_EX)
121
122/* Load-acquire & store-release */
123#define A64_LDAR(Rt, Rn, size) \
124 aarch64_insn_gen_load_acq_store_rel(Rt, Rn, AARCH64_INSN_SIZE_##size, \
125 AARCH64_INSN_LDST_LOAD_ACQ)
126#define A64_STLR(Rt, Rn, size) \
127 aarch64_insn_gen_load_acq_store_rel(Rt, Rn, AARCH64_INSN_SIZE_##size, \
128 AARCH64_INSN_LDST_STORE_REL)
129
130/* Rt = [Rn] (load acquire) */
131#define A64_LDARB(Wt, Xn) A64_LDAR(Wt, Xn, 8)
132#define A64_LDARH(Wt, Xn) A64_LDAR(Wt, Xn, 16)
133#define A64_LDAR32(Wt, Xn) A64_LDAR(Wt, Xn, 32)
134#define A64_LDAR64(Xt, Xn) A64_LDAR(Xt, Xn, 64)
135
136/* [Rn] = Rt (store release) */
137#define A64_STLRB(Wt, Xn) A64_STLR(Wt, Xn, 8)
138#define A64_STLRH(Wt, Xn) A64_STLR(Wt, Xn, 16)
139#define A64_STLR32(Wt, Xn) A64_STLR(Wt, Xn, 32)
140#define A64_STLR64(Xt, Xn) A64_STLR(Xt, Xn, 64)
141
142/*
143 * LSE atomics
144 *
145 * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
146 * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
147 */
148#define A64_ST_OP(sf, Rn, Rs, op) \
149 aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
150 A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
151 AARCH64_INSN_MEM_ORDER_NONE)
152/* [Rn] <op>= Rs */
153#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
154#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
155#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
156#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
157
158#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
159 aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
160 A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
161 AARCH64_INSN_MEM_ORDER_ACQREL)
162/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
163#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
164#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
165#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
166#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
167/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
168#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
169/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
170#define A64_CASAL(sf, Rt, Rn, Rs) \
171 aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
172 AARCH64_INSN_MEM_ORDER_ACQREL)
173
174/* Add/subtract (immediate) */
175#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
176 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
177 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
178/* Rd = Rn OP imm12 */
179#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
180#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
181#define A64_ADDS_I(sf, Rd, Rn, imm12) \
182 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
183#define A64_SUBS_I(sf, Rd, Rn, imm12) \
184 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
185/* Rn + imm12; set condition flags */
186#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
187/* Rn - imm12; set condition flags */
188#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
189/* Rd = Rn */
190#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
191
192/* Bitfield move */
193#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
194 aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
195 A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
196/* Signed, with sign replication to left and zeros to right */
197#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
198/* Unsigned, with zeros to left and right */
199#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
200
201/* Rd = Rn << shift */
202#define A64_LSL(sf, Rd, Rn, shift) ({ \
203 int sz = (sf) ? 64 : 32; \
204 A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
205})
206/* Rd = Rn >> shift */
207#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
208/* Rd = Rn >> shift; signed */
209#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
210
211/* Zero extend */
212#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
213#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
214
215/* Sign extend */
216#define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7)
217#define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15)
218#define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31)
219
220/* Move wide (immediate) */
221#define A64_MOVEW(sf, Rd, imm16, shift, type) \
222 aarch64_insn_gen_movewide(Rd, imm16, shift, \
223 A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
224/* Rd = Zeros (for MOVZ);
225 * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
226 * Rd = ~Rd; (for MOVN); */
227#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
228#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
229#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
230
231/* Add/subtract (shifted register) */
232#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
233 aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
234 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
235/* Rd = Rn OP Rm */
236#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
237#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
238#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
239/* Rd = -Rm */
240#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
241/* Rn - Rm; set condition flags */
242#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
243
244/* Data-processing (1 source) */
245#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
246 A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
247/* Rd = BSWAPx(Rn) */
248#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
249#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
250#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
251
252/* Data-processing (2 source) */
253/* Rd = Rn OP Rm */
254#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
255 A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
256#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
257#define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV)
258#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
259#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
260#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
261
262/* Data-processing (3 source) */
263/* Rd = Ra + Rn * Rm */
264#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
265 A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
266/* Rd = Ra - Rn * Rm */
267#define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
268 A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB)
269/* Rd = Rn * Rm */
270#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
271
272/* Logical (shifted register) */
273#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
274 aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
275 A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
276/* Rd = Rn OP Rm */
277#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
278#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
279#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
280#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
281/* Rn & Rm; set condition flags */
282#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
283/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
284#define A64_MVN(sf, Rd, Rm) \
285 A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
286
287/* Logical (immediate) */
288#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
289 u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
290 aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
291 A64_VARIANT(sf), Rn, Rd, imm64); \
292})
293/* Rd = Rn OP imm */
294#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
295#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
296#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
297#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
298/* Rn & imm; set condition flags */
299#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
300
301/* HINTs */
302#define A64_HINT(x) aarch64_insn_gen_hint(x)
303
304#define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
305#define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
306
307/* BTI */
308#define A64_BTI_C A64_HINT(AARCH64_INSN_HINT_BTIC)
309#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
310#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
311#define A64_NOP A64_HINT(AARCH64_INSN_HINT_NOP)
312
313/* DMB */
314#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
315
316/* ADR */
317#define A64_ADR(Rd, offset) \
318 aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
319
320/* MRS */
321#define A64_MRS_TPIDR_EL1(Rt) \
322 aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL1)
323#define A64_MRS_TPIDR_EL2(Rt) \
324 aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL2)
325#define A64_MRS_SP_EL0(Rt) \
326 aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)
327
328/* Barriers */
329#define A64_SB aarch64_insn_get_sb_value()
330#define A64_DSB_NSH (aarch64_insn_get_dsb_base_value() | 0x7 << 8)
331#define A64_ISB aarch64_insn_get_isb_value()
332
333#endif /* _BPF_JIT_H */