at v4.13 3.4 kB view raw
1#ifndef _BPF_CGROUP_H 2#define _BPF_CGROUP_H 3 4#include <linux/jump_label.h> 5#include <uapi/linux/bpf.h> 6 7struct sock; 8struct cgroup; 9struct sk_buff; 10struct bpf_sock_ops_kern; 11 12#ifdef CONFIG_CGROUP_BPF 13 14extern struct static_key_false cgroup_bpf_enabled_key; 15#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 16 17struct cgroup_bpf { 18 /* 19 * Store two sets of bpf_prog pointers, one for programs that are 20 * pinned directly to this cgroup, and one for those that are effective 21 * when this cgroup is accessed. 22 */ 23 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 24 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 25 bool disallow_override[MAX_BPF_ATTACH_TYPE]; 26}; 27 28void cgroup_bpf_put(struct cgroup *cgrp); 29void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 30 31int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, 32 struct bpf_prog *prog, enum bpf_attach_type type, 33 bool overridable); 34 35/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 36int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, 37 enum bpf_attach_type type, bool overridable); 38 39int __cgroup_bpf_run_filter_skb(struct sock *sk, 40 struct sk_buff *skb, 41 enum bpf_attach_type type); 42 43int __cgroup_bpf_run_filter_sk(struct sock *sk, 44 enum bpf_attach_type type); 45 46int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 47 struct bpf_sock_ops_kern *sock_ops, 48 enum bpf_attach_type type); 49 50/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 51#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 52({ \ 53 int __ret = 0; \ 54 if (cgroup_bpf_enabled) \ 55 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ 56 BPF_CGROUP_INET_INGRESS); \ 57 \ 58 __ret; \ 59}) 60 61#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ 62({ \ 63 int __ret = 0; \ 64 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ 65 typeof(sk) __sk = sk_to_full_sk(sk); \ 66 if (sk_fullsock(__sk)) \ 67 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ 68 BPF_CGROUP_INET_EGRESS); \ 69 } \ 70 __ret; \ 71}) 72 73#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 74({ \ 75 int __ret = 0; \ 76 if (cgroup_bpf_enabled && sk) { \ 77 __ret = __cgroup_bpf_run_filter_sk(sk, \ 78 BPF_CGROUP_INET_SOCK_CREATE); \ 79 } \ 80 __ret; \ 81}) 82 83#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ 84({ \ 85 int __ret = 0; \ 86 if (cgroup_bpf_enabled && (sock_ops)->sk) { \ 87 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ 88 if (__sk && sk_fullsock(__sk)) \ 89 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ 90 sock_ops, \ 91 BPF_CGROUP_SOCK_OPS); \ 92 } \ 93 __ret; \ 94}) 95#else 96 97struct cgroup_bpf {}; 98static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 99static inline void cgroup_bpf_inherit(struct cgroup *cgrp, 100 struct cgroup *parent) {} 101 102#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 103#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 104#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 105#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 106 107#endif /* CONFIG_CGROUP_BPF */ 108 109#endif /* _BPF_CGROUP_H */