Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2/* Copyright (c) 2021 Facebook */
3#ifndef __SKEL_INTERNAL_H
4#define __SKEL_INTERNAL_H
5
6#ifdef __KERNEL__
7#include <linux/fdtable.h>
8#include <linux/mm.h>
9#include <linux/mman.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
12#else
13#include <unistd.h>
14#include <sys/syscall.h>
15#include <sys/mman.h>
16#include <linux/keyctl.h>
17#include <stdlib.h>
18#include "bpf.h"
19#endif
20
21#ifndef SHA256_DIGEST_LENGTH
22#define SHA256_DIGEST_LENGTH 32
23#endif
24
25#ifndef __NR_bpf
26# if defined(__mips__) && defined(_ABIO32)
27# define __NR_bpf 4355
28# elif defined(__mips__) && defined(_ABIN32)
29# define __NR_bpf 6319
30# elif defined(__mips__) && defined(_ABI64)
31# define __NR_bpf 5315
32# endif
33#endif
34
35/* This file is a base header for auto-generated *.lskel.h files.
36 * Its contents will change and may become part of auto-generation in the future.
37 *
38 * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
39 * and will change from one version of libbpf to another and features
40 * requested during loader program generation.
41 */
42struct bpf_map_desc {
43 /* output of the loader prog */
44 int map_fd;
45 /* input for the loader prog */
46 __u32 max_entries;
47 __aligned_u64 initial_value;
48};
49struct bpf_prog_desc {
50 int prog_fd;
51};
52
53enum {
54 BPF_SKEL_KERNEL = (1ULL << 0),
55};
56
57struct bpf_loader_ctx {
58 __u32 sz;
59 __u32 flags;
60 __u32 log_level;
61 __u32 log_size;
62 __u64 log_buf;
63};
64
65struct bpf_load_and_run_opts {
66 struct bpf_loader_ctx *ctx;
67 const void *data;
68 const void *insns;
69 __u32 data_sz;
70 __u32 insns_sz;
71 const char *errstr;
72 void *signature;
73 __u32 signature_sz;
74 __s32 keyring_id;
75 void *excl_prog_hash;
76 __u32 excl_prog_hash_sz;
77};
78
79long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
80
81static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
82 unsigned int size)
83{
84#ifdef __KERNEL__
85 return kern_sys_bpf(cmd, attr, size);
86#else
87 return syscall(__NR_bpf, cmd, attr, size);
88#endif
89}
90
91#ifdef __KERNEL__
92static inline int close(int fd)
93{
94 return close_fd(fd);
95}
96
97static inline void *skel_alloc(size_t size)
98{
99 struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
100
101 if (!ctx)
102 return NULL;
103 ctx->flags |= BPF_SKEL_KERNEL;
104 return ctx;
105}
106
107static inline void skel_free(const void *p)
108{
109 kfree(p);
110}
111
112/* skel->bss/rodata maps are populated the following way:
113 *
114 * For kernel use:
115 * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
116 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
117 * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
118 * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
119 * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
120 * is not necessary.
121 *
122 * For user space:
123 * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
124 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
125 * The loader program will perform copy_from_user() from maps.rodata.initial_value.
126 * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
127 * skel->rodata address.
128 *
129 * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
130 * both kernel and user space. The generated loader program does
131 * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
132 * depending on bpf_loader_ctx->flags.
133 */
134static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
135{
136 if (addr != ~0ULL)
137 kvfree(p);
138 /* When addr == ~0ULL the 'p' points to
139 * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
140 */
141}
142
143static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
144{
145 void *addr;
146
147 addr = kvmalloc(val_sz, GFP_KERNEL);
148 if (!addr)
149 return NULL;
150 memcpy(addr, val, val_sz);
151 return addr;
152}
153
154static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
155{
156 struct bpf_map *map;
157 void *addr = NULL;
158
159 kvfree((void *) (long) *init_val);
160 *init_val = ~0ULL;
161
162 /* At this point bpf_load_and_run() finished without error and
163 * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
164 */
165 map = bpf_map_get(fd);
166 if (IS_ERR(map))
167 return NULL;
168 if (map->map_type != BPF_MAP_TYPE_ARRAY)
169 goto out;
170 addr = ((struct bpf_array *)map)->value;
171 /* the addr stays valid, since FD is not closed */
172out:
173 bpf_map_put(map);
174 return addr;
175}
176
177#else
178
179static inline void *skel_alloc(size_t size)
180{
181 return calloc(1, size);
182}
183
184static inline void skel_free(void *p)
185{
186 free(p);
187}
188
189static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
190{
191 munmap(p, sz);
192}
193
194static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
195{
196 void *addr;
197
198 addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
199 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
200 if (addr == (void *) -1)
201 return NULL;
202 memcpy(addr, val, val_sz);
203 return addr;
204}
205
206static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
207{
208 void *addr;
209
210 addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
211 if (addr == (void *) -1)
212 return NULL;
213 return addr;
214}
215#endif
216
217static inline int skel_closenz(int fd)
218{
219 if (fd > 0)
220 return close(fd);
221 return -EINVAL;
222}
223
224#ifndef offsetofend
225#define offsetofend(TYPE, MEMBER) \
226 (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
227#endif
228
229static inline int skel_map_create(enum bpf_map_type map_type,
230 const char *map_name,
231 __u32 key_size,
232 __u32 value_size,
233 __u32 max_entries,
234 const void *excl_prog_hash,
235 __u32 excl_prog_hash_sz)
236{
237 const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
238 union bpf_attr attr;
239
240 memset(&attr, 0, attr_sz);
241
242 attr.map_type = map_type;
243 attr.excl_prog_hash = (unsigned long) excl_prog_hash;
244 attr.excl_prog_hash_size = excl_prog_hash_sz;
245
246 strncpy(attr.map_name, map_name, sizeof(attr.map_name));
247 attr.key_size = key_size;
248 attr.value_size = value_size;
249 attr.max_entries = max_entries;
250
251 return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
252}
253
254static inline int skel_map_update_elem(int fd, const void *key,
255 const void *value, __u64 flags)
256{
257 const size_t attr_sz = offsetofend(union bpf_attr, flags);
258 union bpf_attr attr;
259
260 memset(&attr, 0, attr_sz);
261 attr.map_fd = fd;
262 attr.key = (long) key;
263 attr.value = (long) value;
264 attr.flags = flags;
265
266 return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
267}
268
269static inline int skel_map_delete_elem(int fd, const void *key)
270{
271 const size_t attr_sz = offsetofend(union bpf_attr, flags);
272 union bpf_attr attr;
273
274 memset(&attr, 0, attr_sz);
275 attr.map_fd = fd;
276 attr.key = (long)key;
277
278 return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
279}
280
281static inline int skel_map_get_fd_by_id(__u32 id)
282{
283 const size_t attr_sz = offsetofend(union bpf_attr, flags);
284 union bpf_attr attr;
285
286 memset(&attr, 0, attr_sz);
287 attr.map_id = id;
288
289 return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
290}
291
292static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
293{
294 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
295 union bpf_attr attr;
296
297 memset(&attr, 0, attr_sz);
298 attr.raw_tracepoint.name = (long) name;
299 attr.raw_tracepoint.prog_fd = prog_fd;
300
301 return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
302}
303
304static inline int skel_link_create(int prog_fd, int target_fd,
305 enum bpf_attach_type attach_type)
306{
307 const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
308 union bpf_attr attr;
309
310 memset(&attr, 0, attr_sz);
311 attr.link_create.prog_fd = prog_fd;
312 attr.link_create.target_fd = target_fd;
313 attr.link_create.attach_type = attach_type;
314
315 return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
316}
317
318static inline int skel_obj_get_info_by_fd(int fd)
319{
320 const size_t attr_sz = offsetofend(union bpf_attr, info);
321 __u8 sha[SHA256_DIGEST_LENGTH];
322 struct bpf_map_info info;
323 __u32 info_len = sizeof(info);
324 union bpf_attr attr;
325
326 memset(&info, 0, sizeof(info));
327 info.hash = (long) &sha;
328 info.hash_size = SHA256_DIGEST_LENGTH;
329
330 memset(&attr, 0, attr_sz);
331 attr.info.bpf_fd = fd;
332 attr.info.info = (long) &info;
333 attr.info.info_len = info_len;
334 return skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
335}
336
337static inline int skel_map_freeze(int fd)
338{
339 const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
340 union bpf_attr attr;
341
342 memset(&attr, 0, attr_sz);
343 attr.map_fd = fd;
344
345 return skel_sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
346}
347#ifdef __KERNEL__
348#define set_err
349#else
350#define set_err err = -errno
351#endif
352
353static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
354{
355 const size_t prog_load_attr_sz = offsetofend(union bpf_attr, keyring_id);
356 const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
357 int map_fd = -1, prog_fd = -1, key = 0, err;
358 union bpf_attr attr;
359
360 err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1,
361 opts->excl_prog_hash, opts->excl_prog_hash_sz);
362 if (map_fd < 0) {
363 opts->errstr = "failed to create loader map";
364 set_err;
365 goto out;
366 }
367
368 err = skel_map_update_elem(map_fd, &key, opts->data, 0);
369 if (err < 0) {
370 opts->errstr = "failed to update loader map";
371 set_err;
372 goto out;
373 }
374
375#ifndef __KERNEL__
376 err = skel_map_freeze(map_fd);
377 if (err < 0) {
378 opts->errstr = "failed to freeze map";
379 set_err;
380 goto out;
381 }
382 err = skel_obj_get_info_by_fd(map_fd);
383 if (err < 0) {
384 opts->errstr = "failed to fetch obj info";
385 set_err;
386 goto out;
387 }
388#endif
389
390 memset(&attr, 0, prog_load_attr_sz);
391 attr.prog_type = BPF_PROG_TYPE_SYSCALL;
392 attr.insns = (long) opts->insns;
393 attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
394 attr.license = (long) "Dual BSD/GPL";
395#ifndef __KERNEL__
396 attr.signature = (long) opts->signature;
397 attr.signature_size = opts->signature_sz;
398#else
399 if (opts->signature || opts->signature_sz)
400 pr_warn("signatures are not supported from bpf_preload\n");
401#endif
402 attr.keyring_id = opts->keyring_id;
403 memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
404 attr.fd_array = (long) &map_fd;
405 attr.log_level = opts->ctx->log_level;
406 attr.log_size = opts->ctx->log_size;
407 attr.log_buf = opts->ctx->log_buf;
408 attr.prog_flags = BPF_F_SLEEPABLE;
409 err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
410 if (prog_fd < 0) {
411 opts->errstr = "failed to load loader prog";
412 set_err;
413 goto out;
414 }
415
416 memset(&attr, 0, test_run_attr_sz);
417 attr.test.prog_fd = prog_fd;
418 attr.test.ctx_in = (long) opts->ctx;
419 attr.test.ctx_size_in = opts->ctx->sz;
420 err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
421 if (err < 0 || (int)attr.test.retval < 0) {
422 if (err < 0) {
423 opts->errstr = "failed to execute loader prog";
424 set_err;
425 } else {
426 opts->errstr = "error returned by loader prog";
427 err = (int)attr.test.retval;
428#ifndef __KERNEL__
429 errno = -err;
430#endif
431 }
432 goto out;
433 }
434 err = 0;
435out:
436 if (map_fd >= 0)
437 close(map_fd);
438 if (prog_fd >= 0)
439 close(prog_fd);
440 return err;
441}
442
443#endif