Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.8 263 lines 11 kB view raw
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2#ifndef __BPF_CORE_READ_H__ 3#define __BPF_CORE_READ_H__ 4 5/* 6 * enum bpf_field_info_kind is passed as a second argument into 7 * __builtin_preserve_field_info() built-in to get a specific aspect of 8 * a field, captured as a first argument. __builtin_preserve_field_info(field, 9 * info_kind) returns __u32 integer and produces BTF field relocation, which 10 * is understood and processed by libbpf during BPF object loading. See 11 * selftests/bpf for examples. 12 */ 13enum bpf_field_info_kind { 14 BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ 15 BPF_FIELD_BYTE_SIZE = 1, 16 BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ 17 BPF_FIELD_SIGNED = 3, 18 BPF_FIELD_LSHIFT_U64 = 4, 19 BPF_FIELD_RSHIFT_U64 = 5, 20}; 21 22#define __CORE_RELO(src, field, info) \ 23 __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) 24 25#if __BYTE_ORDER == __LITTLE_ENDIAN 26#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ 27 bpf_probe_read((void *)dst, \ 28 __CORE_RELO(src, fld, BYTE_SIZE), \ 29 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) 30#else 31/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so 32 * for big-endian we need to adjust destination pointer accordingly, based on 33 * field byte size 34 */ 35#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ 36 bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ 37 __CORE_RELO(src, fld, BYTE_SIZE), \ 38 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) 39#endif 40 41/* 42 * Extract bitfield, identified by s->field, and return its value as u64. 43 * All this is done in relocatable manner, so bitfield changes such as 44 * signedness, bit size, offset changes, this will be handled automatically. 45 * This version of macro is using bpf_probe_read() to read underlying integer 46 * storage. Macro functions as an expression and its return type is 47 * bpf_probe_read()'s return value: 0, on success, <0 on error. 48 */ 49#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ 50 unsigned long long val = 0; \ 51 \ 52 __CORE_BITFIELD_PROBE_READ(&val, s, field); \ 53 val <<= __CORE_RELO(s, field, LSHIFT_U64); \ 54 if (__CORE_RELO(s, field, SIGNED)) \ 55 val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ 56 else \ 57 val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ 58 val; \ 59}) 60 61/* 62 * Extract bitfield, identified by s->field, and return its value as u64. 63 * This version of macro is using direct memory reads and should be used from 64 * BPF program types that support such functionality (e.g., typed raw 65 * tracepoints). 66 */ 67#define BPF_CORE_READ_BITFIELD(s, field) ({ \ 68 const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ 69 unsigned long long val; \ 70 \ 71 switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ 72 case 1: val = *(const unsigned char *)p; \ 73 case 2: val = *(const unsigned short *)p; \ 74 case 4: val = *(const unsigned int *)p; \ 75 case 8: val = *(const unsigned long long *)p; \ 76 } \ 77 val <<= __CORE_RELO(s, field, LSHIFT_U64); \ 78 if (__CORE_RELO(s, field, SIGNED)) \ 79 val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ 80 else \ 81 val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ 82 val; \ 83}) 84 85/* 86 * Convenience macro to check that field actually exists in target kernel's. 87 * Returns: 88 * 1, if matching field is present in target kernel; 89 * 0, if no matching field found. 90 */ 91#define bpf_core_field_exists(field) \ 92 __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) 93 94/* 95 * Convenience macro to get byte size of a field. Works for integers, 96 * struct/unions, pointers, arrays, and enums. 97 */ 98#define bpf_core_field_size(field) \ 99 __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) 100 101/* 102 * bpf_core_read() abstracts away bpf_probe_read() call and captures offset 103 * relocation for source address using __builtin_preserve_access_index() 104 * built-in, provided by Clang. 105 * 106 * __builtin_preserve_access_index() takes as an argument an expression of 107 * taking an address of a field within struct/union. It makes compiler emit 108 * a relocation, which records BTF type ID describing root struct/union and an 109 * accessor string which describes exact embedded field that was used to take 110 * an address. See detailed description of this relocation format and 111 * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. 112 * 113 * This relocation allows libbpf to adjust BPF instruction to use correct 114 * actual field offset, based on target kernel BTF type that matches original 115 * (local) BTF, used to record relocation. 116 */ 117#define bpf_core_read(dst, sz, src) \ 118 bpf_probe_read(dst, sz, \ 119 (const void *)__builtin_preserve_access_index(src)) 120 121/* 122 * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() 123 * additionally emitting BPF CO-RE field relocation for specified source 124 * argument. 125 */ 126#define bpf_core_read_str(dst, sz, src) \ 127 bpf_probe_read_str(dst, sz, \ 128 (const void *)__builtin_preserve_access_index(src)) 129 130#define ___concat(a, b) a ## b 131#define ___apply(fn, n) ___concat(fn, n) 132#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N 133 134/* 135 * return number of provided arguments; used for switch-based variadic macro 136 * definitions (see ___last, ___arrow, etc below) 137 */ 138#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 139/* 140 * return 0 if no arguments are passed, N - otherwise; used for 141 * recursively-defined macros to specify termination (0) case, and generic 142 * (N) case (e.g., ___read_ptrs, ___core_read) 143 */ 144#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) 145 146#define ___last1(x) x 147#define ___last2(a, x) x 148#define ___last3(a, b, x) x 149#define ___last4(a, b, c, x) x 150#define ___last5(a, b, c, d, x) x 151#define ___last6(a, b, c, d, e, x) x 152#define ___last7(a, b, c, d, e, f, x) x 153#define ___last8(a, b, c, d, e, f, g, x) x 154#define ___last9(a, b, c, d, e, f, g, h, x) x 155#define ___last10(a, b, c, d, e, f, g, h, i, x) x 156#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) 157 158#define ___nolast2(a, _) a 159#define ___nolast3(a, b, _) a, b 160#define ___nolast4(a, b, c, _) a, b, c 161#define ___nolast5(a, b, c, d, _) a, b, c, d 162#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e 163#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f 164#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g 165#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h 166#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i 167#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) 168 169#define ___arrow1(a) a 170#define ___arrow2(a, b) a->b 171#define ___arrow3(a, b, c) a->b->c 172#define ___arrow4(a, b, c, d) a->b->c->d 173#define ___arrow5(a, b, c, d, e) a->b->c->d->e 174#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f 175#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g 176#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h 177#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i 178#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j 179#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) 180 181#define ___type(...) typeof(___arrow(__VA_ARGS__)) 182 183#define ___read(read_fn, dst, src_type, src, accessor) \ 184 read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) 185 186/* "recursively" read a sequence of inner pointers using local __t var */ 187#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a); 188#define ___rd_last(...) \ 189 ___read(bpf_core_read, &__t, \ 190 ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); 191#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__) 192#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 193#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 194#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 195#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 196#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 197#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 198#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 199#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) 200#define ___read_ptrs(src, ...) \ 201 ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__) 202 203#define ___core_read0(fn, dst, src, a) \ 204 ___read(fn, dst, ___type(src), src, a); 205#define ___core_readN(fn, dst, src, ...) \ 206 ___read_ptrs(src, ___nolast(__VA_ARGS__)) \ 207 ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ 208 ___last(__VA_ARGS__)); 209#define ___core_read(fn, dst, src, a, ...) \ 210 ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \ 211 src, a, ##__VA_ARGS__) 212 213/* 214 * BPF_CORE_READ_INTO() is a more performance-conscious variant of 215 * BPF_CORE_READ(), in which final field is read into user-provided storage. 216 * See BPF_CORE_READ() below for more details on general usage. 217 */ 218#define BPF_CORE_READ_INTO(dst, src, a, ...) \ 219 ({ \ 220 ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \ 221 }) 222 223/* 224 * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as 225 * BPF_CORE_READ() for intermediate pointers, but then executes (and returns 226 * corresponding error code) bpf_core_read_str() for final string read. 227 */ 228#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \ 229 ({ \ 230 ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \ 231 }) 232 233/* 234 * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially 235 * when there are few pointer chasing steps. 236 * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: 237 * int x = s->a.b.c->d.e->f->g; 238 * can be succinctly achieved using BPF_CORE_READ as: 239 * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); 240 * 241 * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF 242 * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to: 243 * 1. const void *__t = s->a.b.c; 244 * 2. __t = __t->d.e; 245 * 3. __t = __t->f; 246 * 4. return __t->g; 247 * 248 * Equivalence is logical, because there is a heavy type casting/preservation 249 * involved, as well as all the reads are happening through bpf_probe_read() 250 * calls using __builtin_preserve_access_index() to emit CO-RE relocations. 251 * 252 * N.B. Only up to 9 "field accessors" are supported, which should be more 253 * than enough for any practical purpose. 254 */ 255#define BPF_CORE_READ(src, a, ...) \ 256 ({ \ 257 ___type(src, a, ##__VA_ARGS__) __r; \ 258 BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \ 259 __r; \ 260 }) 261 262#endif 263