at v4.12 91 lines 2.8 kB view raw
1#ifndef _BPF_CGROUP_H 2#define _BPF_CGROUP_H 3 4#include <linux/jump_label.h> 5#include <uapi/linux/bpf.h> 6 7struct sock; 8struct cgroup; 9struct sk_buff; 10 11#ifdef CONFIG_CGROUP_BPF 12 13extern struct static_key_false cgroup_bpf_enabled_key; 14#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 15 16struct cgroup_bpf { 17 /* 18 * Store two sets of bpf_prog pointers, one for programs that are 19 * pinned directly to this cgroup, and one for those that are effective 20 * when this cgroup is accessed. 21 */ 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 24 bool disallow_override[MAX_BPF_ATTACH_TYPE]; 25}; 26 27void cgroup_bpf_put(struct cgroup *cgrp); 28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 29 30int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, 31 struct bpf_prog *prog, enum bpf_attach_type type, 32 bool overridable); 33 34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 35int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, 36 enum bpf_attach_type type, bool overridable); 37 38int __cgroup_bpf_run_filter_skb(struct sock *sk, 39 struct sk_buff *skb, 40 enum bpf_attach_type type); 41 42int __cgroup_bpf_run_filter_sk(struct sock *sk, 43 enum bpf_attach_type type); 44 45/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 46#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 47({ \ 48 int __ret = 0; \ 49 if (cgroup_bpf_enabled) \ 50 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ 51 BPF_CGROUP_INET_INGRESS); \ 52 \ 53 __ret; \ 54}) 55 56#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ 57({ \ 58 int __ret = 0; \ 59 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ 60 typeof(sk) __sk = sk_to_full_sk(sk); \ 61 if (sk_fullsock(__sk)) \ 62 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ 63 BPF_CGROUP_INET_EGRESS); \ 64 } \ 65 __ret; \ 66}) 67 68#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 69({ \ 70 int __ret = 0; \ 71 if (cgroup_bpf_enabled && sk) { \ 72 __ret = __cgroup_bpf_run_filter_sk(sk, \ 73 BPF_CGROUP_INET_SOCK_CREATE); \ 74 } \ 75 __ret; \ 76}) 77 78#else 79 80struct cgroup_bpf {}; 81static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 82static inline void cgroup_bpf_inherit(struct cgroup *cgrp, 83 struct cgroup *parent) {} 84 85#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 86#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 87#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 88 89#endif /* CONFIG_CGROUP_BPF */ 90 91#endif /* _BPF_CGROUP_H */