Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/bpf/jit: Isolate classic BPF JIT specifics into a separate header

Break out classic BPF JIT specifics into a separate header in
preparation for eBPF JIT implementation. Note that ppc32 will still need
the classic BPF JIT.

Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Naveen N. Rao and committed by
Michael Ellerman
6ac0ba5a cef1e8cd

+143 -121
+2 -119
arch/powerpc/net/bpf_jit.h
··· 1 - /* bpf_jit.h: BPF JIT compiler for PPC64 1 + /* 2 + * bpf_jit.h: BPF JIT compiler for PPC 2 3 * 3 4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation 4 5 * ··· 11 10 #ifndef _BPF_JIT_H 12 11 #define _BPF_JIT_H 13 12 14 - #ifdef CONFIG_PPC64 15 - #define BPF_PPC_STACK_R3_OFF 48 16 - #define BPF_PPC_STACK_LOCALS 32 17 - #define BPF_PPC_STACK_BASIC (48+64) 18 - #define BPF_PPC_STACK_SAVE (18*8) 19 - #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ 20 - BPF_PPC_STACK_SAVE) 21 - #define BPF_PPC_SLOWPATH_FRAME (48+64) 22 - #else 23 - #define BPF_PPC_STACK_R3_OFF 24 24 - #define BPF_PPC_STACK_LOCALS 16 25 - #define BPF_PPC_STACK_BASIC (24+32) 26 - #define BPF_PPC_STACK_SAVE (18*4) 27 - #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ 28 - BPF_PPC_STACK_SAVE) 29 - #define BPF_PPC_SLOWPATH_FRAME (24+32) 30 - #endif 31 - 32 - #define REG_SZ (BITS_PER_LONG/8) 33 - 34 - /* 35 - * Generated code register usage: 36 - * 37 - * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: 38 - * 39 - * skb r3 (Entry parameter) 40 - * A register r4 41 - * X register r5 42 - * addr param r6 43 - * r7-r10 scratch 44 - * skb->data r14 45 - * skb headlen r15 (skb->len - skb->data_len) 46 - * m[0] r16 47 - * m[...] ... 48 - * m[15] r31 49 - */ 50 - #define r_skb 3 51 - #define r_ret 3 52 - #define r_A 4 53 - #define r_X 5 54 - #define r_addr 6 55 - #define r_scratch1 7 56 - #define r_scratch2 8 57 - #define r_D 14 58 - #define r_HL 15 59 - #define r_M 16 60 - 61 13 #ifndef __ASSEMBLY__ 62 - 63 - /* 64 - * Assembly helpers from arch/powerpc/net/bpf_jit.S: 65 - */ 66 - #define DECLARE_LOAD_FUNC(func) \ 67 - extern u8 func[], func##_negative_offset[], func##_positive_offset[] 68 - 69 - DECLARE_LOAD_FUNC(sk_load_word); 70 - DECLARE_LOAD_FUNC(sk_load_half); 71 - DECLARE_LOAD_FUNC(sk_load_byte); 72 - DECLARE_LOAD_FUNC(sk_load_byte_msh); 73 14 74 15 #ifdef CONFIG_PPC64 75 16 #define FUNCTION_DESCR_SIZE 24 ··· 72 129 #define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) 73 130 #define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) 74 131 #define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) 75 - #endif 76 - 77 - /* Convenience helpers for the above with 'far' offsets: */ 78 - #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ 79 - else { PPC_ADDIS(r, base, IMM_HA(i)); \ 80 - PPC_LBZ(r, r, IMM_L(i)); } } while(0) 81 - 82 - #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ 83 - else { PPC_ADDIS(r, base, IMM_HA(i)); \ 84 - PPC_LD(r, r, IMM_L(i)); } } while(0) 85 - 86 - #define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ 87 - else { PPC_ADDIS(r, base, IMM_HA(i)); \ 88 - PPC_LWZ(r, r, IMM_L(i)); } } while(0) 89 - 90 - #define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ 91 - else { PPC_ADDIS(r, base, IMM_HA(i)); \ 92 - PPC_LHZ(r, r, IMM_L(i)); } } while(0) 93 - 94 - #ifdef CONFIG_PPC64 95 - #define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) 96 - #else 97 - #define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0) 98 - #endif 99 - 100 - #ifdef CONFIG_SMP 101 - #ifdef CONFIG_PPC64 102 - #define PPC_BPF_LOAD_CPU(r) \ 103 - do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \ 104 - PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ 105 - } while (0) 106 - #else 107 - #define PPC_BPF_LOAD_CPU(r) \ 108 - do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \ 109 - PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \ 110 - offsetof(struct thread_info, cpu)); \ 111 - } while(0) 112 - #endif 113 - #else 114 - #define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0) 115 132 #endif 116 133 117 134 #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) ··· 176 273 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) 177 274 #endif 178 275 179 - #define PPC_LHBRX_OFFS(r, base, i) \ 180 - do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) 181 - #ifdef __LITTLE_ENDIAN__ 182 - #define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) 183 - #else 184 - #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) 185 - #endif 186 - 187 276 static inline bool is_nearbranch(int offset) 188 277 { 189 278 return (offset < 32768) && (offset >= -32768); ··· 211 316 #define COND_EQ (CR0_EQ | COND_CMP_TRUE) 212 317 #define COND_NE (CR0_EQ | COND_CMP_FALSE) 213 318 #define COND_LT (CR0_LT | COND_CMP_TRUE) 214 - 215 - #define SEEN_DATAREF 0x10000 /* might call external helpers */ 216 - #define SEEN_XREG 0x20000 /* X reg is used */ 217 - #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary 218 - * storage */ 219 - #define SEEN_MEM_MSK 0x0ffff 220 - 221 - struct codegen_context { 222 - unsigned int seen; 223 - unsigned int idx; 224 - int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ 225 - }; 226 319 227 320 #endif 228 321
+139
arch/powerpc/net/bpf_jit32.h
··· 1 + /* 2 + * bpf_jit32.h: BPF JIT compiler for PPC 3 + * 4 + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation 5 + * 6 + * Split from bpf_jit.h 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; version 2 11 + * of the License. 12 + */ 13 + #ifndef _BPF_JIT32_H 14 + #define _BPF_JIT32_H 15 + 16 + #include "bpf_jit.h" 17 + 18 + #ifdef CONFIG_PPC64 19 + #define BPF_PPC_STACK_R3_OFF 48 20 + #define BPF_PPC_STACK_LOCALS 32 21 + #define BPF_PPC_STACK_BASIC (48+64) 22 + #define BPF_PPC_STACK_SAVE (18*8) 23 + #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ 24 + BPF_PPC_STACK_SAVE) 25 + #define BPF_PPC_SLOWPATH_FRAME (48+64) 26 + #else 27 + #define BPF_PPC_STACK_R3_OFF 24 28 + #define BPF_PPC_STACK_LOCALS 16 29 + #define BPF_PPC_STACK_BASIC (24+32) 30 + #define BPF_PPC_STACK_SAVE (18*4) 31 + #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ 32 + BPF_PPC_STACK_SAVE) 33 + #define BPF_PPC_SLOWPATH_FRAME (24+32) 34 + #endif 35 + 36 + #define REG_SZ (BITS_PER_LONG/8) 37 + 38 + /* 39 + * Generated code register usage: 40 + * 41 + * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: 42 + * 43 + * skb r3 (Entry parameter) 44 + * A register r4 45 + * X register r5 46 + * addr param r6 47 + * r7-r10 scratch 48 + * skb->data r14 49 + * skb headlen r15 (skb->len - skb->data_len) 50 + * m[0] r16 51 + * m[...] ... 52 + * m[15] r31 53 + */ 54 + #define r_skb 3 55 + #define r_ret 3 56 + #define r_A 4 57 + #define r_X 5 58 + #define r_addr 6 59 + #define r_scratch1 7 60 + #define r_scratch2 8 61 + #define r_D 14 62 + #define r_HL 15 63 + #define r_M 16 64 + 65 + #ifndef __ASSEMBLY__ 66 + 67 + /* 68 + * Assembly helpers from arch/powerpc/net/bpf_jit.S: 69 + */ 70 + #define DECLARE_LOAD_FUNC(func) \ 71 + extern u8 func[], func##_negative_offset[], func##_positive_offset[] 72 + 73 + DECLARE_LOAD_FUNC(sk_load_word); 74 + DECLARE_LOAD_FUNC(sk_load_half); 75 + DECLARE_LOAD_FUNC(sk_load_byte); 76 + DECLARE_LOAD_FUNC(sk_load_byte_msh); 77 + 78 + #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ 79 + else { PPC_ADDIS(r, base, IMM_HA(i)); \ 80 + PPC_LBZ(r, r, IMM_L(i)); } } while(0) 81 + 82 + #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ 83 + else { PPC_ADDIS(r, base, IMM_HA(i)); \ 84 + PPC_LD(r, r, IMM_L(i)); } } while(0) 85 + 86 + #define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ 87 + else { PPC_ADDIS(r, base, IMM_HA(i)); \ 88 + PPC_LWZ(r, r, IMM_L(i)); } } while(0) 89 + 90 + #define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ 91 + else { PPC_ADDIS(r, base, IMM_HA(i)); \ 92 + PPC_LHZ(r, r, IMM_L(i)); } } while(0) 93 + 94 + #ifdef CONFIG_PPC64 95 + #define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) 96 + #else 97 + #define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0) 98 + #endif 99 + 100 + #ifdef CONFIG_SMP 101 + #ifdef CONFIG_PPC64 102 + #define PPC_BPF_LOAD_CPU(r) \ 103 + do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \ 104 + PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ 105 + } while (0) 106 + #else 107 + #define PPC_BPF_LOAD_CPU(r) \ 108 + do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \ 109 + PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \ 110 + offsetof(struct thread_info, cpu)); \ 111 + } while(0) 112 + #endif 113 + #else 114 + #define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0) 115 + #endif 116 + 117 + #define PPC_LHBRX_OFFS(r, base, i) \ 118 + do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) 119 + #ifdef __LITTLE_ENDIAN__ 120 + #define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) 121 + #else 122 + #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) 123 + #endif 124 + 125 + #define SEEN_DATAREF 0x10000 /* might call external helpers */ 126 + #define SEEN_XREG 0x20000 /* X reg is used */ 127 + #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary 128 + * storage */ 129 + #define SEEN_MEM_MSK 0x0ffff 130 + 131 + struct codegen_context { 132 + unsigned int seen; 133 + unsigned int idx; 134 + int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ 135 + }; 136 + 137 + #endif 138 + 139 + #endif
+1 -1
arch/powerpc/net/bpf_jit_asm.S
··· 10 10 */ 11 11 12 12 #include <asm/ppc_asm.h> 13 - #include "bpf_jit.h" 13 + #include "bpf_jit32.h" 14 14 15 15 /* 16 16 * All of these routines are called directly from generated code,
+1 -1
arch/powerpc/net/bpf_jit_comp.c
··· 16 16 #include <linux/filter.h> 17 17 #include <linux/if_vlan.h> 18 18 19 - #include "bpf_jit.h" 19 + #include "bpf_jit32.h" 20 20 21 21 int bpf_jit_enable __read_mostly; 22 22