Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: filter: split 'struct sk_filter' into socket and bpf parts

clean up names related to socket filtering and bpf in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix

split 'struct sk_filter' into
struct sk_filter {
atomic_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
and
struct bpf_prog {
u32 jited:1,
len:31;
struct sock_fprog_kern *orig_prog;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
struct work_struct work;
};
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases

split SK_RUN_FILTER macro into:
SK_RUN_FILTER to be used with 'struct sk_filter *' and
BPF_PROG_RUN to be used with 'struct bpf_prog *'

__sk_filter_release(struct sk_filter *) gains
__bpf_prog_release(struct bpf_prog *) helper function

also perform related renames for the functions that work
with 'struct bpf_prog *', since they're on the same lines:

sk_filter_size -> bpf_prog_size
sk_filter_select_runtime -> bpf_prog_select_runtime
sk_filter_free -> bpf_prog_free
sk_unattached_filter_create -> bpf_prog_create
sk_unattached_filter_destroy -> bpf_prog_destroy
sk_store_orig_filter -> bpf_prog_store_orig_filter
sk_release_orig_filter -> bpf_release_orig_filter
__sk_migrate_filter -> bpf_migrate_filter
__sk_prepare_filter -> bpf_prepare_filter

API for attaching classic BPF to a socket stays the same:
sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
which is used by sockets, tun, af_packet

API for 'unattached' BPF programs becomes:
bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf

Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alexei Starovoitov and committed by
David S. Miller
7ae457c1 8fb575ca

+183 -169
+5 -5
Documentation/networking/filter.txt
··· 586 586 extension, PTP dissector/classifier, and much more. They are all internally 587 587 converted by the kernel into the new instruction set representation and run 588 588 in the eBPF interpreter. For in-kernel handlers, this all works transparently 589 - by using sk_unattached_filter_create() for setting up the filter, resp. 590 - sk_unattached_filter_destroy() for destroying it. The macro 591 - SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed 592 - code to run the filter. 'filter' is a pointer to struct sk_filter that we 593 - got from sk_unattached_filter_create(), and 'ctx' the given context (e.g. 589 + by using bpf_prog_create() for setting up the filter, resp. 590 + bpf_prog_destroy() for destroying it. The macro 591 + BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed 592 + code to run the filter. 'filter' is a pointer to struct bpf_prog that we 593 + got from bpf_prog_create(), and 'ctx' the given context (e.g. 594 594 skb pointer). All constraints and restrictions from bpf_check_classic() apply 595 595 before a conversion to the new layout is being done behind the scenes! 596 596
+4 -4
arch/arm/net/bpf_jit_32.c
··· 56 56 #define FLAG_NEED_X_RESET (1 << 0) 57 57 58 58 struct jit_ctx { 59 - const struct sk_filter *skf; 59 + const struct bpf_prog *skf; 60 60 unsigned idx; 61 61 unsigned prologue_bytes; 62 62 int ret0_fp_idx; ··· 465 465 static int build_body(struct jit_ctx *ctx) 466 466 { 467 467 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 468 - const struct sk_filter *prog = ctx->skf; 468 + const struct bpf_prog *prog = ctx->skf; 469 469 const struct sock_filter *inst; 470 470 unsigned i, load_order, off, condt; 471 471 int imm12; ··· 857 857 } 858 858 859 859 860 - void bpf_jit_compile(struct sk_filter *fp) 860 + void bpf_jit_compile(struct bpf_prog *fp) 861 861 { 862 862 struct jit_ctx ctx; 863 863 unsigned tmp_idx; ··· 926 926 return; 927 927 } 928 928 929 - void bpf_jit_free(struct sk_filter *fp) 929 + void bpf_jit_free(struct bpf_prog *fp) 930 930 { 931 931 if (fp->jited) 932 932 module_free(NULL, fp->bpf_func);
+4 -4
arch/mips/net/bpf_jit.c
··· 131 131 * @target: Memory location for the compiled filter 132 132 */ 133 133 struct jit_ctx { 134 - const struct sk_filter *skf; 134 + const struct bpf_prog *skf; 135 135 unsigned int prologue_bytes; 136 136 u32 idx; 137 137 u32 flags; ··· 789 789 static int build_body(struct jit_ctx *ctx) 790 790 { 791 791 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 792 - const struct sk_filter *prog = ctx->skf; 792 + const struct bpf_prog *prog = ctx->skf; 793 793 const struct sock_filter *inst; 794 794 unsigned int i, off, load_order, condt; 795 795 u32 k, b_off __maybe_unused; ··· 1369 1369 1370 1370 int bpf_jit_enable __read_mostly; 1371 1371 1372 - void bpf_jit_compile(struct sk_filter *fp) 1372 + void bpf_jit_compile(struct bpf_prog *fp) 1373 1373 { 1374 1374 struct jit_ctx ctx; 1375 1375 unsigned int alloc_size, tmp_idx; ··· 1423 1423 kfree(ctx.offsets); 1424 1424 } 1425 1425 1426 - void bpf_jit_free(struct sk_filter *fp) 1426 + void bpf_jit_free(struct bpf_prog *fp) 1427 1427 { 1428 1428 if (fp->jited) 1429 1429 module_free(NULL, fp->bpf_func);
+4 -4
arch/powerpc/net/bpf_jit_comp.c
··· 25 25 flush_icache_range((unsigned long)start, (unsigned long)end); 26 26 } 27 27 28 - static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, 28 + static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, 29 29 struct codegen_context *ctx) 30 30 { 31 31 int i; ··· 121 121 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 122 122 123 123 /* Assemble the body code between the prologue & epilogue. */ 124 - static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, 124 + static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, 125 125 struct codegen_context *ctx, 126 126 unsigned int *addrs) 127 127 { ··· 569 569 return 0; 570 570 } 571 571 572 - void bpf_jit_compile(struct sk_filter *fp) 572 + void bpf_jit_compile(struct bpf_prog *fp) 573 573 { 574 574 unsigned int proglen; 575 575 unsigned int alloclen; ··· 693 693 return; 694 694 } 695 695 696 - void bpf_jit_free(struct sk_filter *fp) 696 + void bpf_jit_free(struct bpf_prog *fp) 697 697 { 698 698 if (fp->jited) 699 699 module_free(NULL, fp->bpf_func);
+2 -2
arch/s390/net/bpf_jit_comp.c
··· 812 812 return header; 813 813 } 814 814 815 - void bpf_jit_compile(struct sk_filter *fp) 815 + void bpf_jit_compile(struct bpf_prog *fp) 816 816 { 817 817 struct bpf_binary_header *header = NULL; 818 818 unsigned long size, prg_len, lit_len; ··· 875 875 kfree(addrs); 876 876 } 877 877 878 - void bpf_jit_free(struct sk_filter *fp) 878 + void bpf_jit_free(struct bpf_prog *fp) 879 879 { 880 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 881 881 struct bpf_binary_header *header = (void *)addr;
+2 -2
arch/sparc/net/bpf_jit_comp.c
··· 354 354 * emit_jump() calls with adjusted offsets. 355 355 */ 356 356 357 - void bpf_jit_compile(struct sk_filter *fp) 357 + void bpf_jit_compile(struct bpf_prog *fp) 358 358 { 359 359 unsigned int cleanup_addr, proglen, oldproglen = 0; 360 360 u32 temp[8], *prog, *func, seen = 0, pass; ··· 808 808 return; 809 809 } 810 810 811 - void bpf_jit_free(struct sk_filter *fp) 811 + void bpf_jit_free(struct bpf_prog *fp) 812 812 { 813 813 if (fp->jited) 814 814 module_free(NULL, fp->bpf_func);
+6 -6
arch/x86/net/bpf_jit_comp.c
··· 211 211 bool seen_ld_abs; 212 212 }; 213 213 214 - static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, 214 + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 215 215 int oldproglen, struct jit_context *ctx) 216 216 { 217 217 struct bpf_insn *insn = bpf_prog->insnsi; ··· 841 841 /* By design x64 JIT should support all BPF instructions 842 842 * This error will be seen if new instruction was added 843 843 * to interpreter, but not to JIT 844 - * or if there is junk in sk_filter 844 + * or if there is junk in bpf_prog 845 845 */ 846 846 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 847 847 return -EINVAL; ··· 862 862 return proglen; 863 863 } 864 864 865 - void bpf_jit_compile(struct sk_filter *prog) 865 + void bpf_jit_compile(struct bpf_prog *prog) 866 866 { 867 867 } 868 868 869 - void bpf_int_jit_compile(struct sk_filter *prog) 869 + void bpf_int_jit_compile(struct bpf_prog *prog) 870 870 { 871 871 struct bpf_binary_header *header = NULL; 872 872 int proglen, oldproglen = 0; ··· 932 932 933 933 static void bpf_jit_free_deferred(struct work_struct *work) 934 934 { 935 - struct sk_filter *fp = container_of(work, struct sk_filter, work); 935 + struct bpf_prog *fp = container_of(work, struct bpf_prog, work); 936 936 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 937 937 struct bpf_binary_header *header = (void *)addr; 938 938 ··· 941 941 kfree(fp); 942 942 } 943 943 944 - void bpf_jit_free(struct sk_filter *fp) 944 + void bpf_jit_free(struct bpf_prog *fp) 945 945 { 946 946 if (fp->jited) { 947 947 INIT_WORK(&fp->work, bpf_jit_free_deferred);
+12 -14
drivers/isdn/i4l/isdn_ppp.c
··· 379 379 #endif 380 380 #ifdef CONFIG_IPPP_FILTER 381 381 if (is->pass_filter) { 382 - sk_unattached_filter_destroy(is->pass_filter); 382 + bpf_prog_destroy(is->pass_filter); 383 383 is->pass_filter = NULL; 384 384 } 385 385 386 386 if (is->active_filter) { 387 - sk_unattached_filter_destroy(is->active_filter); 387 + bpf_prog_destroy(is->active_filter); 388 388 is->active_filter = NULL; 389 389 } 390 390 #endif ··· 639 639 fprog.filter = code; 640 640 641 641 if (is->pass_filter) { 642 - sk_unattached_filter_destroy(is->pass_filter); 642 + bpf_prog_destroy(is->pass_filter); 643 643 is->pass_filter = NULL; 644 644 } 645 645 if (fprog.filter != NULL) 646 - err = sk_unattached_filter_create(&is->pass_filter, 647 - &fprog); 646 + err = bpf_prog_create(&is->pass_filter, &fprog); 648 647 else 649 648 err = 0; 650 649 kfree(code); ··· 663 664 fprog.filter = code; 664 665 665 666 if (is->active_filter) { 666 - sk_unattached_filter_destroy(is->active_filter); 667 + bpf_prog_destroy(is->active_filter); 667 668 is->active_filter = NULL; 668 669 } 669 670 if (fprog.filter != NULL) 670 - err = sk_unattached_filter_create(&is->active_filter, 671 - &fprog); 671 + err = bpf_prog_create(&is->active_filter, &fprog); 672 672 else 673 673 err = 0; 674 674 kfree(code); ··· 1172 1174 } 1173 1175 1174 1176 if (is->pass_filter 1175 - && SK_RUN_FILTER(is->pass_filter, skb) == 0) { 1177 + && BPF_PROG_RUN(is->pass_filter, skb) == 0) { 1176 1178 if (is->debug & 0x2) 1177 1179 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); 1178 1180 kfree_skb(skb); 1179 1181 return; 1180 1182 } 1181 1183 if (!(is->active_filter 1182 - && SK_RUN_FILTER(is->active_filter, skb) == 0)) { 1184 + && BPF_PROG_RUN(is->active_filter, skb) == 0)) { 1183 1185 if (is->debug & 0x2) 1184 1186 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); 1185 1187 lp->huptimer = 0; ··· 1318 1320 } 1319 1321 1320 1322 if (ipt->pass_filter 1321 - && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) { 1323 + && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) { 1322 1324 if (ipt->debug & 0x4) 1323 1325 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); 1324 1326 kfree_skb(skb); 1325 1327 goto unlock; 1326 1328 } 1327 1329 if (!(ipt->active_filter 1328 - && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) { 1330 + && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) { 1329 1331 if (ipt->debug & 0x4) 1330 1332 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); 1331 1333 lp->huptimer = 0; ··· 1515 1517 } 1516 1518 1517 1519 drop |= is->pass_filter 1518 - && SK_RUN_FILTER(is->pass_filter, skb) == 0; 1520 + && BPF_PROG_RUN(is->pass_filter, skb) == 0; 1519 1521 drop |= is->active_filter 1520 - && SK_RUN_FILTER(is->active_filter, skb) == 0; 1522 + && BPF_PROG_RUN(is->active_filter, skb) == 0; 1521 1523 1522 1524 skb_push(skb, IPPP_MAX_HEADER - 4); 1523 1525 return drop;
+14 -14
drivers/net/ppp/ppp_generic.c
··· 143 143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 144 144 #endif /* CONFIG_PPP_MULTILINK */ 145 145 #ifdef CONFIG_PPP_FILTER 146 - struct sk_filter *pass_filter; /* filter for packets to pass */ 147 - struct sk_filter *active_filter;/* filter for pkts to reset idle */ 146 + struct bpf_prog *pass_filter; /* filter for packets to pass */ 147 + struct bpf_prog *active_filter; /* filter for pkts to reset idle */ 148 148 #endif /* CONFIG_PPP_FILTER */ 149 149 struct net *ppp_net; /* the net we belong to */ 150 150 struct ppp_link_stats stats64; /* 64 bit network stats */ ··· 762 762 763 763 ppp_lock(ppp); 764 764 if (ppp->pass_filter) { 765 - sk_unattached_filter_destroy(ppp->pass_filter); 765 + bpf_prog_destroy(ppp->pass_filter); 766 766 ppp->pass_filter = NULL; 767 767 } 768 768 if (fprog.filter != NULL) 769 - err = sk_unattached_filter_create(&ppp->pass_filter, 770 - &fprog); 769 + err = bpf_prog_create(&ppp->pass_filter, 770 + &fprog); 771 771 else 772 772 err = 0; 773 773 kfree(code); ··· 788 788 789 789 ppp_lock(ppp); 790 790 if (ppp->active_filter) { 791 - sk_unattached_filter_destroy(ppp->active_filter); 791 + bpf_prog_destroy(ppp->active_filter); 792 792 ppp->active_filter = NULL; 793 793 } 794 794 if (fprog.filter != NULL) 795 - err = sk_unattached_filter_create(&ppp->active_filter, 796 - &fprog); 795 + err = bpf_prog_create(&ppp->active_filter, 796 + &fprog); 797 797 else 798 798 err = 0; 799 799 kfree(code); ··· 1205 1205 a four-byte PPP header on each packet */ 1206 1206 *skb_push(skb, 2) = 1; 1207 1207 if (ppp->pass_filter && 1208 - SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { 1208 + BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { 1209 1209 if (ppp->debug & 1) 1210 1210 netdev_printk(KERN_DEBUG, ppp->dev, 1211 1211 "PPP: outbound frame " ··· 1215 1215 } 1216 1216 /* if this packet passes the active filter, record the time */ 1217 1217 if (!(ppp->active_filter && 1218 - SK_RUN_FILTER(ppp->active_filter, skb) == 0)) 1218 + BPF_PROG_RUN(ppp->active_filter, skb) == 0)) 1219 1219 ppp->last_xmit = jiffies; 1220 1220 skb_pull(skb, 2); 1221 1221 #else ··· 1839 1839 1840 1840 *skb_push(skb, 2) = 0; 1841 1841 if (ppp->pass_filter && 1842 - SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { 1842 + BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { 1843 1843 if (ppp->debug & 1) 1844 1844 netdev_printk(KERN_DEBUG, ppp->dev, 1845 1845 "PPP: inbound frame " ··· 1848 1848 return; 1849 1849 } 1850 1850 if (!(ppp->active_filter && 1851 - SK_RUN_FILTER(ppp->active_filter, skb) == 0)) 1851 + BPF_PROG_RUN(ppp->active_filter, skb) == 0)) 1852 1852 ppp->last_recv = jiffies; 1853 1853 __skb_pull(skb, 2); 1854 1854 } else ··· 2829 2829 #endif /* CONFIG_PPP_MULTILINK */ 2830 2830 #ifdef CONFIG_PPP_FILTER 2831 2831 if (ppp->pass_filter) { 2832 - sk_unattached_filter_destroy(ppp->pass_filter); 2832 + bpf_prog_destroy(ppp->pass_filter); 2833 2833 ppp->pass_filter = NULL; 2834 2834 } 2835 2835 2836 2836 if (ppp->active_filter) { 2837 - sk_unattached_filter_destroy(ppp->active_filter); 2837 + bpf_prog_destroy(ppp->active_filter); 2838 2838 ppp->active_filter = NULL; 2839 2839 } 2840 2840 #endif /* CONFIG_PPP_FILTER */
+7 -7
drivers/net/team/team_mode_loadbalance.c
··· 58 58 }; 59 59 60 60 struct lb_priv { 61 - struct sk_filter __rcu *fp; 61 + struct bpf_prog __rcu *fp; 62 62 lb_select_tx_port_func_t __rcu *select_tx_port_func; 63 63 struct lb_pcpu_stats __percpu *pcpu_stats; 64 64 struct lb_priv_ex *ex; /* priv extension */ ··· 174 174 static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, 175 175 struct sk_buff *skb) 176 176 { 177 - struct sk_filter *fp; 177 + struct bpf_prog *fp; 178 178 uint32_t lhash; 179 179 unsigned char *c; 180 180 181 181 fp = rcu_dereference_bh(lb_priv->fp); 182 182 if (unlikely(!fp)) 183 183 return 0; 184 - lhash = SK_RUN_FILTER(fp, skb); 184 + lhash = BPF_PROG_RUN(fp, skb); 185 185 c = (char *) &lhash; 186 186 return c[0] ^ c[1] ^ c[2] ^ c[3]; 187 187 } ··· 271 271 static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) 272 272 { 273 273 struct lb_priv *lb_priv = get_lb_priv(team); 274 - struct sk_filter *fp = NULL; 275 - struct sk_filter *orig_fp = NULL; 274 + struct bpf_prog *fp = NULL; 275 + struct bpf_prog *orig_fp = NULL; 276 276 struct sock_fprog_kern *fprog = NULL; 277 277 int err; 278 278 ··· 281 281 ctx->data.bin_val.ptr); 282 282 if (err) 283 283 return err; 284 - err = sk_unattached_filter_create(&fp, fprog); 284 + err = bpf_prog_create(&fp, fprog); 285 285 if (err) { 286 286 __fprog_destroy(fprog); 287 287 return err; ··· 300 300 301 301 if (orig_fp) { 302 302 synchronize_rcu(); 303 - sk_unattached_filter_destroy(orig_fp); 303 + bpf_prog_destroy(orig_fp); 304 304 } 305 305 return 0; 306 306 }
+23 -17
include/linux/filter.h
··· 296 296 }) 297 297 298 298 /* Macro to invoke filter function. */ 299 - #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 299 + #define SK_RUN_FILTER(filter, ctx) \ 300 + (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) 300 301 301 302 struct bpf_insn { 302 303 __u8 code; /* opcode */ ··· 324 323 struct sock; 325 324 struct seccomp_data; 326 325 327 - struct sk_filter { 328 - atomic_t refcnt; 326 + struct bpf_prog { 329 327 u32 jited:1, /* Is our filter JIT'ed? */ 330 328 len:31; /* Number of filter blocks */ 331 329 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 332 - struct rcu_head rcu; 333 330 unsigned int (*bpf_func)(const struct sk_buff *skb, 334 331 const struct bpf_insn *filter); 335 332 union { ··· 337 338 }; 338 339 }; 339 340 340 - static inline unsigned int sk_filter_size(unsigned int proglen) 341 + struct sk_filter { 342 + atomic_t refcnt; 343 + struct rcu_head rcu; 344 + struct bpf_prog *prog; 345 + }; 346 + 347 + #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 348 + 349 + static inline unsigned int bpf_prog_size(unsigned int proglen) 341 350 { 342 - return max(sizeof(struct sk_filter), 343 - offsetof(struct sk_filter, insns[proglen])); 351 + return max(sizeof(struct bpf_prog), 352 + offsetof(struct bpf_prog, insns[proglen])); 344 353 } 345 354 346 355 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 347 356 348 357 int sk_filter(struct sock *sk, struct sk_buff *skb); 349 358 350 - void sk_filter_select_runtime(struct sk_filter *fp); 351 - void sk_filter_free(struct sk_filter *fp); 359 + void bpf_prog_select_runtime(struct bpf_prog *fp); 360 + void bpf_prog_free(struct bpf_prog *fp); 352 361 353 362 int bpf_convert_filter(struct sock_filter *prog, int len, 354 363 struct bpf_insn *new_prog, int *new_len); 355 364 356 - int sk_unattached_filter_create(struct sk_filter **pfp, 357 - struct sock_fprog_kern *fprog); 358 - void sk_unattached_filter_destroy(struct sk_filter *fp); 365 + int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 366 + void bpf_prog_destroy(struct bpf_prog *fp); 359 367 360 368 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 361 369 int sk_detach_filter(struct sock *sk); ··· 375 369 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 376 370 377 371 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 378 - void bpf_int_jit_compile(struct sk_filter *fp); 372 + void bpf_int_jit_compile(struct bpf_prog *fp); 379 373 380 374 #define BPF_ANC BIT(15) 381 375 ··· 429 423 #include <linux/linkage.h> 430 424 #include <linux/printk.h> 431 425 432 - void bpf_jit_compile(struct sk_filter *fp); 433 - void bpf_jit_free(struct sk_filter *fp); 426 + void bpf_jit_compile(struct bpf_prog *fp); 427 + void bpf_jit_free(struct bpf_prog *fp); 434 428 435 429 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 436 430 u32 pass, void *image) ··· 444 438 #else 445 439 #include <linux/slab.h> 446 440 447 - static inline void bpf_jit_compile(struct sk_filter *fp) 441 + static inline void bpf_jit_compile(struct bpf_prog *fp) 448 442 { 449 443 } 450 444 451 - static inline void bpf_jit_free(struct sk_filter *fp) 445 + static inline void bpf_jit_free(struct bpf_prog *fp) 452 446 { 453 447 kfree(fp); 454 448 }
+2 -2
include/linux/isdn_ppp.h
··· 180 180 struct slcompress *slcomp; 181 181 #endif 182 182 #ifdef CONFIG_IPPP_FILTER 183 - struct sk_filter *pass_filter; /* filter for packets to pass */ 184 - struct sk_filter *active_filter; /* filter for pkts to reset idle */ 183 + struct bpf_prog *pass_filter; /* filter for packets to pass */ 184 + struct bpf_prog *active_filter; /* filter for pkts to reset idle */ 185 185 #endif 186 186 unsigned long debug; 187 187 struct isdn_ppp_compressor *compressor,*decompressor;
+2 -2
include/uapi/linux/netfilter/xt_bpf.h
··· 6 6 7 7 #define XT_BPF_MAX_NUM_INSTR 64 8 8 9 - struct sk_filter; 9 + struct bpf_prog; 10 10 11 11 struct xt_bpf_info { 12 12 __u16 bpf_program_num_elem; 13 13 struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR]; 14 14 15 15 /* only used in the kernel */ 16 - struct sk_filter *filter __attribute__((aligned(8))); 16 + struct bpf_prog *filter __attribute__((aligned(8))); 17 17 }; 18 18 19 19 #endif /*_XT_BPF_H */
+14 -16
kernel/bpf/core.c
··· 73 73 } 74 74 75 75 /** 76 - * __sk_run_filter - run a filter on a given context 77 - * @ctx: buffer to run the filter on 78 - * @insn: filter to apply 76 + * __bpf_prog_run - run eBPF program on a given context 77 + * @ctx: is the data we are operating on 78 + * @insn: is the array of eBPF instructions 79 79 * 80 - * Decode and apply filter instructions to the skb->data. Return length to 81 - * keep, 0 for none. @ctx is the data we are operating on, @insn is the 82 - * array of filter instructions. 80 + * Decode and execute eBPF instructions. 83 81 */ 84 - static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn) 82 + static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) 85 83 { 86 84 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 87 85 u64 regs[MAX_BPF_REG], tmp; ··· 506 508 return 0; 507 509 } 508 510 509 - void __weak bpf_int_jit_compile(struct sk_filter *prog) 511 + void __weak bpf_int_jit_compile(struct bpf_prog *prog) 510 512 { 511 513 } 512 514 513 515 /** 514 - * sk_filter_select_runtime - select execution runtime for BPF program 515 - * @fp: sk_filter populated with internal BPF program 516 + * bpf_prog_select_runtime - select execution runtime for BPF program 517 + * @fp: bpf_prog populated with internal BPF program 516 518 * 517 519 * try to JIT internal BPF program, if JIT is not available select interpreter 518 - * BPF program will be executed via SK_RUN_FILTER() macro 520 + * BPF program will be executed via BPF_PROG_RUN() macro 519 521 */ 520 - void sk_filter_select_runtime(struct sk_filter *fp) 522 + void bpf_prog_select_runtime(struct bpf_prog *fp) 521 523 { 522 - fp->bpf_func = (void *) __sk_run_filter; 524 + fp->bpf_func = (void *) __bpf_prog_run; 523 525 524 526 /* Probe if internal BPF can be JITed */ 525 527 bpf_int_jit_compile(fp); 526 528 } 527 - EXPORT_SYMBOL_GPL(sk_filter_select_runtime); 529 + EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 528 530 529 531 /* free internal BPF program */ 530 - void sk_filter_free(struct sk_filter *fp) 532 + void bpf_prog_free(struct bpf_prog *fp) 531 533 { 532 534 bpf_jit_free(fp); 533 535 } 534 - EXPORT_SYMBOL_GPL(sk_filter_free); 536 + EXPORT_SYMBOL_GPL(bpf_prog_free);
+5 -5
kernel/seccomp.c
··· 54 54 struct seccomp_filter { 55 55 atomic_t usage; 56 56 struct seccomp_filter *prev; 57 - struct sk_filter *prog; 57 + struct bpf_prog *prog; 58 58 }; 59 59 60 60 /* Limit any path through the tree to 256KB worth of instructions. */ ··· 187 187 * value always takes priority (ignoring the DATA). 188 188 */ 189 189 for (f = current->seccomp.filter; f; f = f->prev) { 190 - u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); 190 + u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd); 191 191 192 192 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) 193 193 ret = cur_ret; ··· 260 260 if (!filter) 261 261 goto free_prog; 262 262 263 - filter->prog = kzalloc(sk_filter_size(new_len), 263 + filter->prog = kzalloc(bpf_prog_size(new_len), 264 264 GFP_KERNEL|__GFP_NOWARN); 265 265 if (!filter->prog) 266 266 goto free_filter; ··· 273 273 atomic_set(&filter->usage, 1); 274 274 filter->prog->len = new_len; 275 275 276 - sk_filter_select_runtime(filter->prog); 276 + bpf_prog_select_runtime(filter->prog); 277 277 278 278 /* 279 279 * If there is an existing filter, make it the prev and don't drop its ··· 337 337 while (orig && atomic_dec_and_test(&orig->usage)) { 338 338 struct seccomp_filter *freeme = orig; 339 339 orig = orig->prev; 340 - sk_filter_free(freeme->prog); 340 + bpf_prog_free(freeme->prog); 341 341 kfree(freeme); 342 342 } 343 343 }
+12 -12
lib/test_bpf.c
··· 1761 1761 return len + 1; 1762 1762 } 1763 1763 1764 - static struct sk_filter *generate_filter(int which, int *err) 1764 + static struct bpf_prog *generate_filter(int which, int *err) 1765 1765 { 1766 - struct sk_filter *fp; 1766 + struct bpf_prog *fp; 1767 1767 struct sock_fprog_kern fprog; 1768 1768 unsigned int flen = probe_filter_length(tests[which].u.insns); 1769 1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; ··· 1773 1773 fprog.filter = tests[which].u.insns; 1774 1774 fprog.len = flen; 1775 1775 1776 - *err = sk_unattached_filter_create(&fp, &fprog); 1776 + *err = bpf_prog_create(&fp, &fprog); 1777 1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) { 1778 1778 if (*err == -EINVAL) { 1779 1779 pr_cont("PASS\n"); ··· 1798 1798 break; 1799 1799 1800 1800 case INTERNAL: 1801 - fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); 1801 + fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); 1802 1802 if (fp == NULL) { 1803 1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1804 1804 *err = -ENOMEM; ··· 1809 1809 memcpy(fp->insnsi, tests[which].u.insns_int, 1810 1810 fp->len * sizeof(struct bpf_insn)); 1811 1811 1812 - sk_filter_select_runtime(fp); 1812 + bpf_prog_select_runtime(fp); 1813 1813 break; 1814 1814 } 1815 1815 ··· 1817 1817 return fp; 1818 1818 } 1819 1819 1820 - static void release_filter(struct sk_filter *fp, int which) 1820 + static void release_filter(struct bpf_prog *fp, int which) 1821 1821 { 1822 1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1823 1823 1824 1824 switch (test_type) { 1825 1825 case CLASSIC: 1826 - sk_unattached_filter_destroy(fp); 1826 + bpf_prog_destroy(fp); 1827 1827 break; 1828 1828 case INTERNAL: 1829 - sk_filter_free(fp); 1829 + bpf_prog_free(fp); 1830 1830 break; 1831 1831 } 1832 1832 } 1833 1833 1834 - static int __run_one(const struct sk_filter *fp, const void *data, 1834 + static int __run_one(const struct bpf_prog *fp, const void *data, 1835 1835 int runs, u64 *duration) 1836 1836 { 1837 1837 u64 start, finish; ··· 1840 1840 start = ktime_to_us(ktime_get()); 1841 1841 1842 1842 for (i = 0; i < runs; i++) 1843 - ret = SK_RUN_FILTER(fp, data); 1843 + ret = BPF_PROG_RUN(fp, data); 1844 1844 1845 1845 finish = ktime_to_us(ktime_get()); 1846 1846 ··· 1850 1850 return ret; 1851 1851 } 1852 1852 1853 - static int run_one(const struct sk_filter *fp, struct bpf_test *test) 1853 + static int run_one(const struct bpf_prog *fp, struct bpf_test *test) 1854 1854 { 1855 1855 int err_cnt = 0, i, runs = MAX_TESTRUNS; 1856 1856 ··· 1884 1884 int i, err_cnt = 0, pass_cnt = 0; 1885 1885 1886 1886 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1887 - struct sk_filter *fp; 1887 + struct bpf_prog *fp; 1888 1888 int err; 1889 1889 1890 1890 pr_info("#%d %s ", i, tests[i].descr);
+52 -40
net/core/filter.c
··· 810 810 } 811 811 EXPORT_SYMBOL(bpf_check_classic); 812 812 813 - static int sk_store_orig_filter(struct sk_filter *fp, 814 - const struct sock_fprog *fprog) 813 + static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 814 + const struct sock_fprog *fprog) 815 815 { 816 816 unsigned int fsize = bpf_classic_proglen(fprog); 817 817 struct sock_fprog_kern *fkprog; ··· 831 831 return 0; 832 832 } 833 833 834 - static void sk_release_orig_filter(struct sk_filter *fp) 834 + static void bpf_release_orig_filter(struct bpf_prog *fp) 835 835 { 836 836 struct sock_fprog_kern *fprog = fp->orig_prog; 837 837 ··· 841 841 } 842 842 } 843 843 844 + static void __bpf_prog_release(struct bpf_prog *prog) 845 + { 846 + bpf_release_orig_filter(prog); 847 + bpf_prog_free(prog); 848 + } 849 + 844 850 static void __sk_filter_release(struct sk_filter *fp) 845 851 { 846 - sk_release_orig_filter(fp); 847 - sk_filter_free(fp); 852 + __bpf_prog_release(fp->prog); 853 + kfree(fp); 848 854 } 849 855 850 856 /** ··· 878 872 879 873 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 880 874 { 881 - u32 filter_size = sk_filter_size(fp->len); 875 + u32 filter_size = bpf_prog_size(fp->prog->len); 882 876 883 877 atomic_sub(filter_size, &sk->sk_omem_alloc); 884 878 sk_filter_release(fp); ··· 889 883 */ 890 884 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 891 885 { 892 - u32 filter_size = sk_filter_size(fp->len); 886 + u32 filter_size = bpf_prog_size(fp->prog->len); 893 887 894 888 /* same check as in sock_kmalloc() */ 895 889 if (filter_size <= sysctl_optmem_max && ··· 901 895 return false; 902 896 } 903 897 904 - static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) 898 + static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 905 899 { 906 900 struct sock_filter *old_prog; 907 - struct sk_filter *old_fp; 901 + struct bpf_prog *old_fp; 908 902 int err, new_len, old_len = fp->len; 909 903 910 904 /* We are free to overwrite insns et al right here as it ··· 933 927 934 928 /* Expand fp for appending the new filter representation. */ 935 929 old_fp = fp; 936 - fp = krealloc(old_fp, sk_filter_size(new_len), GFP_KERNEL); 930 + fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL); 937 931 if (!fp) { 938 932 /* The old_fp is still around in case we couldn't 939 933 * allocate new memory, so uncharge on that one. ··· 955 949 */ 956 950 goto out_err_free; 957 951 958 - sk_filter_select_runtime(fp); 952 + bpf_prog_select_runtime(fp); 959 953 960 954 kfree(old_prog); 961 955 return fp; ··· 963 957 out_err_free: 964 958 kfree(old_prog); 965 959 out_err: 966 - __sk_filter_release(fp); 960 + __bpf_prog_release(fp); 967 961 return ERR_PTR(err); 968 962 } 969 963 970 - static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) 964 + static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp) 971 965 { 972 966 int err; 973 967 ··· 976 970 977 971 err = bpf_check_classic(fp->insns, fp->len); 978 972 if (err) { 979 - __sk_filter_release(fp); 973 + __bpf_prog_release(fp); 980 974 return ERR_PTR(err); 981 975 } 982 976 ··· 989 983 * internal BPF translation for the optimized interpreter. 990 984 */ 991 985 if (!fp->jited) 992 - fp = __sk_migrate_filter(fp); 986 + fp = bpf_migrate_filter(fp); 993 987 994 988 return fp; 995 989 } 996 990 997 991 /** 998 - * sk_unattached_filter_create - create an unattached filter 992 + * bpf_prog_create - create an unattached filter 999 993 * @pfp: the unattached filter that is created 1000 994 * @fprog: the filter program 1001 995 * ··· 1004 998 * If an error occurs or there is insufficient memory for the filter 1005 999 * a negative errno code is returned. On success the return is zero. 1006 1000 */ 1007 - int sk_unattached_filter_create(struct sk_filter **pfp, 1008 - struct sock_fprog_kern *fprog) 1001 + int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 1009 1002 { 1010 1003 unsigned int fsize = bpf_classic_proglen(fprog); 1011 - struct sk_filter *fp; 1004 + struct bpf_prog *fp; 1012 1005 1013 1006 /* Make sure new filter is there and in the right amounts. */ 1014 1007 if (fprog->filter == NULL) 1015 1008 return -EINVAL; 1016 1009 1017 - fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); 1010 + fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL); 1018 1011 if (!fp) 1019 1012 return -ENOMEM; 1020 1013 1021 1014 memcpy(fp->insns, fprog->filter, fsize); 1022 1015 1023 - atomic_set(&fp->refcnt, 1); 1024 1016 fp->len = fprog->len; 1025 1017 /* Since unattached filters are not copied back to user 1026 1018 * space through sk_get_filter(), we do not need to hold ··· 1026 1022 */ 1027 1023 fp->orig_prog = NULL; 1028 1024 1029 - /* __sk_prepare_filter() already takes care of freeing 1025 + /* bpf_prepare_filter() already takes care of freeing 1030 1026 * memory in case something goes wrong. 1031 1027 */ 1032 - fp = __sk_prepare_filter(fp); 1028 + fp = bpf_prepare_filter(fp); 1033 1029 if (IS_ERR(fp)) 1034 1030 return PTR_ERR(fp); 1035 1031 1036 1032 *pfp = fp; 1037 1033 return 0; 1038 1034 } 1039 - EXPORT_SYMBOL_GPL(sk_unattached_filter_create); 1035 + EXPORT_SYMBOL_GPL(bpf_prog_create); 1040 1036 1041 - void sk_unattached_filter_destroy(struct sk_filter *fp) 1037 + void bpf_prog_destroy(struct bpf_prog *fp) 1042 1038 { 1043 - __sk_filter_release(fp); 1039 + __bpf_prog_release(fp); 1044 1040 } 1045 - EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); 1041 + EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1046 1042 1047 1043 /** 1048 1044 * sk_attach_filter - attach a socket filter ··· 1058 1054 { 1059 1055 struct sk_filter *fp, *old_fp; 1060 1056 unsigned int fsize = bpf_classic_proglen(fprog); 1061 - unsigned int sk_fsize = sk_filter_size(fprog->len); 1057 + unsigned int bpf_fsize = bpf_prog_size(fprog->len); 1058 + struct bpf_prog *prog; 1062 1059 int err; 1063 1060 1064 1061 if (sock_flag(sk, SOCK_FILTER_LOCKED)) ··· 1069 1064 if (fprog->filter == NULL) 1070 1065 return -EINVAL; 1071 1066 1072 - fp = kmalloc(sk_fsize, GFP_KERNEL); 1073 - if (!fp) 1067 + prog = kmalloc(bpf_fsize, GFP_KERNEL); 1068 + if (!prog) 1074 1069 return -ENOMEM; 1075 1070 1076 - if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1077 - kfree(fp); 1071 + if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1072 + kfree(prog); 1078 1073 return -EFAULT; 1079 1074 } 1080 1075 1081 - fp->len = fprog->len; 1076 + prog->len = fprog->len; 1082 1077 1083 - err = sk_store_orig_filter(fp, fprog); 1078 + err = bpf_prog_store_orig_filter(prog, fprog); 1084 1079 if (err) { 1085 - kfree(fp); 1080 + kfree(prog); 1086 1081 return -ENOMEM; 1087 1082 } 1088 1083 1089 - /* __sk_prepare_filter() already takes care of freeing 1084 + /* bpf_prepare_filter() already takes care of freeing 1090 1085 * memory in case something goes wrong. 1091 1086 */ 1092 - fp = __sk_prepare_filter(fp); 1093 - if (IS_ERR(fp)) 1094 - return PTR_ERR(fp); 1087 + prog = bpf_prepare_filter(prog); 1088 + if (IS_ERR(prog)) 1089 + return PTR_ERR(prog); 1090 + 1091 + fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1092 + if (!fp) { 1093 + __bpf_prog_release(prog); 1094 + return -ENOMEM; 1095 + } 1096 + fp->prog = prog; 1095 1097 1096 1098 atomic_set(&fp->refcnt, 0); 1097 1099 ··· 1154 1142 /* We're copying the filter that has been originally attached, 1155 1143 * so no conversion/decode needed anymore. 1156 1144 */ 1157 - fprog = filter->orig_prog; 1145 + fprog = filter->prog->orig_prog; 1158 1146 1159 1147 ret = fprog->len; 1160 1148 if (!len)
+3 -3
net/core/ptp_classifier.c
··· 107 107 #include <linux/filter.h> 108 108 #include <linux/ptp_classify.h> 109 109 110 - static struct sk_filter *ptp_insns __read_mostly; 110 + static struct bpf_prog *ptp_insns __read_mostly; 111 111 112 112 unsigned int ptp_classify_raw(const struct sk_buff *skb) 113 113 { 114 - return SK_RUN_FILTER(ptp_insns, skb); 114 + return BPF_PROG_RUN(ptp_insns, skb); 115 115 } 116 116 EXPORT_SYMBOL_GPL(ptp_classify_raw); 117 117 ··· 189 189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 190 190 }; 191 191 192 - BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog)); 192 + BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); 193 193 }
+1 -1
net/core/sock_diag.c
··· 68 68 if (!filter) 69 69 goto out; 70 70 71 - fprog = filter->orig_prog; 71 + fprog = filter->prog->orig_prog; 72 72 flen = bpf_classic_proglen(fprog); 73 73 74 74 attr = nla_reserve(skb, attrtype, flen);
+3 -3
net/netfilter/xt_bpf.c
··· 28 28 program.len = info->bpf_program_num_elem; 29 29 program.filter = info->bpf_program; 30 30 31 - if (sk_unattached_filter_create(&info->filter, &program)) { 31 + if (bpf_prog_create(&info->filter, &program)) { 32 32 pr_info("bpf: check failed: parse error\n"); 33 33 return -EINVAL; 34 34 } ··· 40 40 { 41 41 const struct xt_bpf_info *info = par->matchinfo; 42 42 43 - return SK_RUN_FILTER(info->filter, skb); 43 + return BPF_PROG_RUN(info->filter, skb); 44 44 } 45 45 46 46 static void bpf_mt_destroy(const struct xt_mtdtor_param *par) 47 47 { 48 48 const struct xt_bpf_info *info = par->matchinfo; 49 - sk_unattached_filter_destroy(info->filter); 49 + bpf_prog_destroy(info->filter); 50 50 } 51 51 52 52 static struct xt_match bpf_mt_reg __read_mostly = {
+6 -6
net/sched/cls_bpf.c
··· 30 30 }; 31 31 32 32 struct cls_bpf_prog { 33 - struct sk_filter *filter; 33 + struct bpf_prog *filter; 34 34 struct sock_filter *bpf_ops; 35 35 struct tcf_exts exts; 36 36 struct tcf_result res; ··· 54 54 int ret; 55 55 56 56 list_for_each_entry(prog, &head->plist, link) { 57 - int filter_res = SK_RUN_FILTER(prog->filter, skb); 57 + int filter_res = BPF_PROG_RUN(prog->filter, skb); 58 58 59 59 if (filter_res == 0) 60 60 continue; ··· 92 92 tcf_unbind_filter(tp, &prog->res); 93 93 tcf_exts_destroy(tp, &prog->exts); 94 94 95 - sk_unattached_filter_destroy(prog->filter); 95 + bpf_prog_destroy(prog->filter); 96 96 97 97 kfree(prog->bpf_ops); 98 98 kfree(prog); ··· 161 161 struct sock_filter *bpf_ops, *bpf_old; 162 162 struct tcf_exts exts; 163 163 struct sock_fprog_kern tmp; 164 - struct sk_filter *fp, *fp_old; 164 + struct bpf_prog *fp, *fp_old; 165 165 u16 bpf_size, bpf_len; 166 166 u32 classid; 167 167 int ret; ··· 193 193 tmp.len = bpf_len; 194 194 tmp.filter = bpf_ops; 195 195 196 - ret = sk_unattached_filter_create(&fp, &tmp); 196 + ret = bpf_prog_create(&fp, &tmp); 197 197 if (ret) 198 198 goto errout_free; 199 199 ··· 211 211 tcf_exts_change(tp, &prog->exts, &exts); 212 212 213 213 if (fp_old) 214 - sk_unattached_filter_destroy(fp_old); 214 + bpf_prog_destroy(fp_old); 215 215 if (bpf_old) 216 216 kfree(bpf_old); 217 217