Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2021 Facebook */
3#include <stdio.h>
4#include <stdlib.h>
5#include <string.h>
6#include <errno.h>
7#include <asm/byteorder.h>
8#include <linux/filter.h>
9#include <sys/param.h>
10#include "btf.h"
11#include "bpf.h"
12#include "libbpf.h"
13#include "libbpf_internal.h"
14#include "hashmap.h"
15#include "bpf_gen_internal.h"
16#include "skel_internal.h"
17
18#define MAX_USED_MAPS 64
19#define MAX_USED_PROGS 32
20#define MAX_KFUNC_DESCS 256
21#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
22
23/* The following structure describes the stack layout of the loader program.
24 * In addition R6 contains the pointer to context.
25 * R7 contains the result of the last sys_bpf command (typically error or FD).
26 * R9 contains the result of the last sys_close command.
27 *
28 * Naming convention:
29 * ctx - bpf program context
30 * stack - bpf program stack
31 * blob - bpf_attr-s, strings, insns, map data.
32 * All the bytes that loader prog will use for read/write.
33 */
34struct loader_stack {
35 __u32 btf_fd;
36 __u32 inner_map_fd;
37 __u32 prog_fd[MAX_USED_PROGS];
38};
39
40#define stack_off(field) \
41 (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
42
43#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
44
45static int blob_fd_array_off(struct bpf_gen *gen, int index)
46{
47 return gen->fd_array + index * sizeof(int);
48}
49
50static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
51{
52 size_t off = gen->insn_cur - gen->insn_start;
53 void *insn_start;
54
55 if (gen->error)
56 return gen->error;
57 if (size > INT32_MAX || off + size > INT32_MAX) {
58 gen->error = -ERANGE;
59 return -ERANGE;
60 }
61 insn_start = realloc(gen->insn_start, off + size);
62 if (!insn_start) {
63 gen->error = -ENOMEM;
64 free(gen->insn_start);
65 gen->insn_start = NULL;
66 return -ENOMEM;
67 }
68 gen->insn_start = insn_start;
69 gen->insn_cur = insn_start + off;
70 return 0;
71}
72
73static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
74{
75 size_t off = gen->data_cur - gen->data_start;
76 void *data_start;
77
78 if (gen->error)
79 return gen->error;
80 if (size > INT32_MAX || off + size > INT32_MAX) {
81 gen->error = -ERANGE;
82 return -ERANGE;
83 }
84 data_start = realloc(gen->data_start, off + size);
85 if (!data_start) {
86 gen->error = -ENOMEM;
87 free(gen->data_start);
88 gen->data_start = NULL;
89 return -ENOMEM;
90 }
91 gen->data_start = data_start;
92 gen->data_cur = data_start + off;
93 return 0;
94}
95
96static void emit(struct bpf_gen *gen, struct bpf_insn insn)
97{
98 if (realloc_insn_buf(gen, sizeof(insn)))
99 return;
100 memcpy(gen->insn_cur, &insn, sizeof(insn));
101 gen->insn_cur += sizeof(insn);
102}
103
104static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
105{
106 emit(gen, insn1);
107 emit(gen, insn2);
108}
109
110static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
111static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
112static void emit_signature_match(struct bpf_gen *gen);
113
114void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
115{
116 size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
117 int i;
118
119 gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
120 gen->log_level = log_level;
121 /* save ctx pointer into R6 */
122 emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
123
124 /* bzero stack */
125 emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
126 emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
127 emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
128 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
129 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
130
131 /* amount of stack actually used, only used to calculate iterations, not stack offset */
132 nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
133 /* jump over cleanup code */
134 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
135 /* size of cleanup code below (including map fd cleanup) */
136 (nr_progs_sz / 4) * 3 + 2 +
137 /* 6 insns for emit_sys_close_blob,
138 * 6 insns for debug_regs in emit_sys_close_blob
139 */
140 nr_maps * (6 + (gen->log_level ? 6 : 0))));
141
142 /* remember the label where all error branches will jump to */
143 gen->cleanup_label = gen->insn_cur - gen->insn_start;
144 /* emit cleanup code: close all temp FDs */
145 for (i = 0; i < nr_progs_sz; i += 4) {
146 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
147 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
148 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
149 }
150 for (i = 0; i < nr_maps; i++)
151 emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
152 /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
153 emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
154 emit(gen, BPF_EXIT_INSN());
155 if (OPTS_GET(gen->opts, gen_hash, false))
156 emit_signature_match(gen);
157}
158
159static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
160{
161 __u32 size8 = roundup(size, 8);
162 __u64 zero = 0;
163 void *prev;
164
165 if (realloc_data_buf(gen, size8))
166 return 0;
167 prev = gen->data_cur;
168 if (data) {
169 memcpy(gen->data_cur, data, size);
170 memcpy(gen->data_cur + size, &zero, size8 - size);
171 } else {
172 memset(gen->data_cur, 0, size8);
173 }
174 gen->data_cur += size8;
175 return prev - gen->data_start;
176}
177
178/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
179 * to start of fd_array. Caller can decide if it is usable or not.
180 */
181static int add_map_fd(struct bpf_gen *gen)
182{
183 if (gen->nr_maps == MAX_USED_MAPS) {
184 pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
185 gen->error = -E2BIG;
186 return 0;
187 }
188 return gen->nr_maps++;
189}
190
191static int add_kfunc_btf_fd(struct bpf_gen *gen)
192{
193 int cur;
194
195 if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
196 cur = add_data(gen, NULL, sizeof(int));
197 return (cur - gen->fd_array) / sizeof(int);
198 }
199 return MAX_USED_MAPS + gen->nr_fd_array++;
200}
201
202static int insn_bytes_to_bpf_size(__u32 sz)
203{
204 switch (sz) {
205 case 8: return BPF_DW;
206 case 4: return BPF_W;
207 case 2: return BPF_H;
208 case 1: return BPF_B;
209 default: return -1;
210 }
211}
212
213/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
214static void emit_rel_store(struct bpf_gen *gen, int off, int data)
215{
216 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
217 0, 0, 0, data));
218 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
219 0, 0, 0, off));
220 emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
221}
222
223static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
224{
225 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
226 0, 0, 0, blob_off));
227 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
228 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
229 0, 0, 0, off));
230 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
231}
232
233static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
234{
235 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
236 0, 0, 0, blob_off));
237 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
238 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
239}
240
241static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
242 bool check_non_zero)
243{
244 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
245 if (check_non_zero)
246 /* If value in ctx is zero don't update the blob.
247 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
248 */
249 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
250 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
251 0, 0, 0, off));
252 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
253}
254
255static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
256{
257 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
258 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
259 0, 0, 0, off));
260 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
261}
262
263static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
264{
265 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
266 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
267}
268
269static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
270{
271 emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
272 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
273 0, 0, 0, attr));
274 emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
275 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
276 /* remember the result in R7 */
277 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
278}
279
280static bool is_simm16(__s64 value)
281{
282 return value == (__s64)(__s16)value;
283}
284
285static void emit_check_err(struct bpf_gen *gen)
286{
287 __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
288
289 /* R7 contains result of last sys_bpf command.
290 * if (R7 < 0) goto cleanup;
291 */
292 if (is_simm16(off)) {
293 emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
294 } else {
295 gen->error = -ERANGE;
296 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
297 }
298}
299
300/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
301static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
302 const char *fmt, va_list args)
303{
304 char buf[1024];
305 int addr, len, ret;
306
307 if (!gen->log_level)
308 return;
309 ret = vsnprintf(buf, sizeof(buf), fmt, args);
310 if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
311 /* The special case to accommodate common debug_ret():
312 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
313 * prints explicitly.
314 */
315 strcat(buf, " r=%d");
316 len = strlen(buf) + 1;
317 addr = add_data(gen, buf, len);
318
319 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
320 0, 0, 0, addr));
321 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
322 if (reg1 >= 0)
323 emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
324 if (reg2 >= 0)
325 emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
326 emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
327}
328
329static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
330{
331 va_list args;
332
333 va_start(args, fmt);
334 emit_debug(gen, reg1, reg2, fmt, args);
335 va_end(args);
336}
337
338static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
339{
340 va_list args;
341
342 va_start(args, fmt);
343 emit_debug(gen, BPF_REG_7, -1, fmt, args);
344 va_end(args);
345}
346
347static void __emit_sys_close(struct bpf_gen *gen)
348{
349 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
350 /* 2 is the number of the following insns
351 * * 6 is additional insns in debug_regs
352 */
353 2 + (gen->log_level ? 6 : 0)));
354 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
355 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
356 debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
357}
358
359static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
360{
361 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
362 __emit_sys_close(gen);
363}
364
365static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
366{
367 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
368 0, 0, 0, blob_off));
369 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
370 __emit_sys_close(gen);
371}
372
373static void compute_sha_update_offsets(struct bpf_gen *gen);
374
375int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
376{
377 int i;
378
379 if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
380 pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
381 nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
382 gen->error = -EFAULT;
383 return gen->error;
384 }
385 emit_sys_close_stack(gen, stack_off(btf_fd));
386 for (i = 0; i < gen->nr_progs; i++)
387 move_stack2ctx(gen,
388 sizeof(struct bpf_loader_ctx) +
389 sizeof(struct bpf_map_desc) * gen->nr_maps +
390 sizeof(struct bpf_prog_desc) * i +
391 offsetof(struct bpf_prog_desc, prog_fd), 4,
392 stack_off(prog_fd[i]));
393 for (i = 0; i < gen->nr_maps; i++)
394 move_blob2ctx(gen,
395 sizeof(struct bpf_loader_ctx) +
396 sizeof(struct bpf_map_desc) * i +
397 offsetof(struct bpf_map_desc, map_fd), 4,
398 blob_fd_array_off(gen, i));
399 emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
400 emit(gen, BPF_EXIT_INSN());
401 if (OPTS_GET(gen->opts, gen_hash, false))
402 compute_sha_update_offsets(gen);
403
404 pr_debug("gen: finish %s\n", errstr(gen->error));
405 if (!gen->error) {
406 struct gen_loader_opts *opts = gen->opts;
407
408 opts->insns = gen->insn_start;
409 opts->insns_sz = gen->insn_cur - gen->insn_start;
410 opts->data = gen->data_start;
411 opts->data_sz = gen->data_cur - gen->data_start;
412
413 /* use target endianness for embedded loader */
414 if (gen->swapped_endian) {
415 struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
416 int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
417
418 for (i = 0; i < insn_cnt; i++)
419 bpf_insn_bswap(insn++);
420 }
421 }
422 return gen->error;
423}
424
425void bpf_gen__free(struct bpf_gen *gen)
426{
427 if (!gen)
428 return;
429 free(gen->data_start);
430 free(gen->insn_start);
431 free(gen);
432}
433
434/*
435 * Fields of bpf_attr are set to values in native byte-order before being
436 * written to the target-bound data blob, and may need endian conversion.
437 * This macro allows providing the correct value in situ more simply than
438 * writing a separate converter for *all fields* of *all records* included
439 * in union bpf_attr. Note that sizeof(rval) should match the assignment
440 * target to avoid runtime problems.
441 */
442#define tgt_endian(rval) ({ \
443 typeof(rval) _val = (rval); \
444 if (gen->swapped_endian) { \
445 switch (sizeof(_val)) { \
446 case 1: break; \
447 case 2: _val = bswap_16(_val); break; \
448 case 4: _val = bswap_32(_val); break; \
449 case 8: _val = bswap_64(_val); break; \
450 default: pr_warn("unsupported bswap size!\n"); \
451 } \
452 } \
453 _val; \
454})
455
456static void compute_sha_update_offsets(struct bpf_gen *gen)
457{
458 __u64 sha[SHA256_DWORD_SIZE];
459 __u64 sha_dw;
460 int i;
461
462 libbpf_sha256(gen->data_start, gen->data_cur - gen->data_start, (__u8 *)sha);
463 for (i = 0; i < SHA256_DWORD_SIZE; i++) {
464 struct bpf_insn *insn =
465 (struct bpf_insn *)(gen->insn_start + gen->hash_insn_offset[i]);
466 sha_dw = tgt_endian(sha[i]);
467 insn[0].imm = (__u32)sha_dw;
468 insn[1].imm = sha_dw >> 32;
469 }
470}
471
472void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
473 __u32 btf_raw_size)
474{
475 int attr_size = offsetofend(union bpf_attr, btf_log_level);
476 int btf_data, btf_load_attr;
477 union bpf_attr attr;
478
479 memset(&attr, 0, attr_size);
480 btf_data = add_data(gen, btf_raw_data, btf_raw_size);
481
482 attr.btf_size = tgt_endian(btf_raw_size);
483 btf_load_attr = add_data(gen, &attr, attr_size);
484 pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
485 btf_data, btf_raw_size, btf_load_attr, attr_size);
486
487 /* populate union bpf_attr with user provided log details */
488 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
489 offsetof(struct bpf_loader_ctx, log_level), false);
490 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
491 offsetof(struct bpf_loader_ctx, log_size), false);
492 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
493 offsetof(struct bpf_loader_ctx, log_buf), false);
494 /* populate union bpf_attr with a pointer to the BTF data */
495 emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
496 /* emit BTF_LOAD command */
497 emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
498 debug_ret(gen, "btf_load size %d", btf_raw_size);
499 emit_check_err(gen);
500 /* remember btf_fd in the stack, if successful */
501 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
502}
503
504void bpf_gen__map_create(struct bpf_gen *gen,
505 enum bpf_map_type map_type,
506 const char *map_name,
507 __u32 key_size, __u32 value_size, __u32 max_entries,
508 struct bpf_map_create_opts *map_attr, int map_idx)
509{
510 int attr_size = offsetofend(union bpf_attr, map_extra);
511 bool close_inner_map_fd = false;
512 int map_create_attr, idx;
513 union bpf_attr attr;
514
515 memset(&attr, 0, attr_size);
516 attr.map_type = tgt_endian(map_type);
517 attr.key_size = tgt_endian(key_size);
518 attr.value_size = tgt_endian(value_size);
519 attr.map_flags = tgt_endian(map_attr->map_flags);
520 attr.map_extra = tgt_endian(map_attr->map_extra);
521 if (map_name)
522 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
523 attr.numa_node = tgt_endian(map_attr->numa_node);
524 attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
525 attr.max_entries = tgt_endian(max_entries);
526 attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
527 attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
528
529 map_create_attr = add_data(gen, &attr, attr_size);
530 pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
531 map_name, map_idx, map_type, map_attr->btf_value_type_id,
532 map_create_attr, attr_size);
533
534 if (map_attr->btf_value_type_id)
535 /* populate union bpf_attr with btf_fd saved in the stack earlier */
536 move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
537 stack_off(btf_fd));
538 switch (map_type) {
539 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
540 case BPF_MAP_TYPE_HASH_OF_MAPS:
541 move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
542 stack_off(inner_map_fd));
543 close_inner_map_fd = true;
544 break;
545 default:
546 break;
547 }
548 /* conditionally update max_entries */
549 if (map_idx >= 0)
550 move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
551 sizeof(struct bpf_loader_ctx) +
552 sizeof(struct bpf_map_desc) * map_idx +
553 offsetof(struct bpf_map_desc, max_entries),
554 true /* check that max_entries != 0 */);
555 /* emit MAP_CREATE command */
556 emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
557 debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
558 map_name, map_idx, map_type, value_size,
559 map_attr->btf_value_type_id);
560 emit_check_err(gen);
561 /* remember map_fd in the stack, if successful */
562 if (map_idx < 0) {
563 /* This bpf_gen__map_create() function is called with map_idx >= 0
564 * for all maps that libbpf loading logic tracks.
565 * It's called with -1 to create an inner map.
566 */
567 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
568 stack_off(inner_map_fd)));
569 } else if (map_idx != gen->nr_maps) {
570 gen->error = -EDOM; /* internal bug */
571 return;
572 } else {
573 /* add_map_fd does gen->nr_maps++ */
574 idx = add_map_fd(gen);
575 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
576 0, 0, 0, blob_fd_array_off(gen, idx)));
577 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
578 }
579 if (close_inner_map_fd)
580 emit_sys_close_stack(gen, stack_off(inner_map_fd));
581}
582
583static void emit_signature_match(struct bpf_gen *gen)
584{
585 __s64 off;
586 int i;
587
588 for (i = 0; i < SHA256_DWORD_SIZE; i++) {
589 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX,
590 0, 0, 0, 0));
591 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, i * sizeof(__u64)));
592 gen->hash_insn_offset[i] = gen->insn_cur - gen->insn_start;
593 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_3, 0, 0, 0, 0, 0));
594
595 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
596 if (is_simm16(off)) {
597 emit(gen, BPF_MOV64_IMM(BPF_REG_7, -EINVAL));
598 emit(gen, BPF_JMP_REG(BPF_JNE, BPF_REG_2, BPF_REG_3, off));
599 } else {
600 gen->error = -ERANGE;
601 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
602 }
603 }
604}
605
606void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
607 enum bpf_attach_type type)
608{
609 const char *prefix;
610 int kind, ret;
611
612 btf_get_kernel_prefix_kind(type, &prefix, &kind);
613 gen->attach_kind = kind;
614 ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
615 prefix, attach_name);
616 if (ret >= sizeof(gen->attach_target))
617 gen->error = -ENOSPC;
618}
619
620static void emit_find_attach_target(struct bpf_gen *gen)
621{
622 int name, len = strlen(gen->attach_target) + 1;
623
624 pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
625 name = add_data(gen, gen->attach_target, len);
626
627 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
628 0, 0, 0, name));
629 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
630 emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
631 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
632 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
633 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
634 debug_ret(gen, "find_by_name_kind(%s,%d)",
635 gen->attach_target, gen->attach_kind);
636 emit_check_err(gen);
637 /* if successful, btf_id is in lower 32-bit of R7 and
638 * btf_obj_fd is in upper 32-bit
639 */
640}
641
642void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
643 bool is_typeless, bool is_ld64, int kind, int insn_idx)
644{
645 struct ksym_relo_desc *relo;
646
647 relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
648 if (!relo) {
649 gen->error = -ENOMEM;
650 return;
651 }
652 gen->relos = relo;
653 relo += gen->relo_cnt;
654 relo->name = name;
655 relo->is_weak = is_weak;
656 relo->is_typeless = is_typeless;
657 relo->is_ld64 = is_ld64;
658 relo->kind = kind;
659 relo->insn_idx = insn_idx;
660 gen->relo_cnt++;
661}
662
663/* returns existing ksym_desc with ref incremented, or inserts a new one */
664static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
665{
666 struct ksym_desc *kdesc;
667 int i;
668
669 for (i = 0; i < gen->nr_ksyms; i++) {
670 kdesc = &gen->ksyms[i];
671 if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
672 !strcmp(kdesc->name, relo->name)) {
673 kdesc->ref++;
674 return kdesc;
675 }
676 }
677 kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
678 if (!kdesc) {
679 gen->error = -ENOMEM;
680 return NULL;
681 }
682 gen->ksyms = kdesc;
683 kdesc = &gen->ksyms[gen->nr_ksyms++];
684 kdesc->name = relo->name;
685 kdesc->kind = relo->kind;
686 kdesc->ref = 1;
687 kdesc->off = 0;
688 kdesc->insn = 0;
689 kdesc->is_ld64 = relo->is_ld64;
690 return kdesc;
691}
692
693/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
694 * Returns result in BPF_REG_7
695 */
696static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
697{
698 int name_off, len = strlen(relo->name) + 1;
699
700 name_off = add_data(gen, relo->name, len);
701 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
702 0, 0, 0, name_off));
703 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
704 emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
705 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
706 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
707 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
708 debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
709}
710
711/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
712 * Returns result in BPF_REG_7
713 * Returns u64 symbol addr in BPF_REG_9
714 */
715static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
716{
717 int name_off, len = strlen(relo->name) + 1, res_off;
718
719 name_off = add_data(gen, relo->name, len);
720 res_off = add_data(gen, NULL, 8); /* res is u64 */
721 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
722 0, 0, 0, name_off));
723 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
724 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
725 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
726 0, 0, 0, res_off));
727 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
728 emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
729 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
730 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
731 debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
732}
733
734/* Expects:
735 * BPF_REG_8 - pointer to instruction
736 *
737 * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
738 * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
739 * this would mean a new BTF fd index for each entry. By pairing symbol name
740 * with index, we get the insn->imm, insn->off pairing that kernel uses for
741 * kfunc_tab, which becomes the effective limit even though all of them may
742 * share same index in fd_array (such that kfunc_btf_tab has 1 element).
743 */
744static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
745{
746 struct ksym_desc *kdesc;
747 int btf_fd_idx;
748
749 kdesc = get_ksym_desc(gen, relo);
750 if (!kdesc)
751 return;
752 /* try to copy from existing bpf_insn */
753 if (kdesc->ref > 1) {
754 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
755 kdesc->insn + offsetof(struct bpf_insn, imm));
756 move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
757 kdesc->insn + offsetof(struct bpf_insn, off));
758 goto log;
759 }
760 /* remember insn offset, so we can copy BTF ID and FD later */
761 kdesc->insn = insn;
762 emit_bpf_find_by_name_kind(gen, relo);
763 if (!relo->is_weak)
764 emit_check_err(gen);
765 /* get index in fd_array to store BTF FD at */
766 btf_fd_idx = add_kfunc_btf_fd(gen);
767 if (btf_fd_idx > INT16_MAX) {
768 pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
769 btf_fd_idx, relo->name);
770 gen->error = -E2BIG;
771 return;
772 }
773 kdesc->off = btf_fd_idx;
774 /* jump to success case */
775 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
776 /* set value for imm, off as 0 */
777 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
778 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
779 /* skip success case for ret < 0 */
780 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
781 /* store btf_id into insn[insn_idx].imm */
782 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
783 /* obtain fd in BPF_REG_9 */
784 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
785 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
786 /* load fd_array slot pointer */
787 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
788 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
789 /* store BTF fd in slot, 0 for vmlinux */
790 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
791 /* jump to insn[insn_idx].off store if fd denotes module BTF */
792 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
793 /* set the default value for off */
794 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
795 /* skip BTF fd store for vmlinux BTF */
796 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
797 /* store index into insn[insn_idx].off */
798 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
799log:
800 if (!gen->log_level)
801 return;
802 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
803 offsetof(struct bpf_insn, imm)));
804 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
805 offsetof(struct bpf_insn, off)));
806 debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
807 relo->name, kdesc->ref);
808 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
809 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
810 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
811 debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
812 relo->name, kdesc->ref);
813}
814
815static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
816 int ref)
817{
818 if (!gen->log_level)
819 return;
820 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
821 offsetof(struct bpf_insn, imm)));
822 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
823 offsetof(struct bpf_insn, imm)));
824 debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
825 relo->is_typeless, relo->is_weak, relo->name, ref);
826 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
827 debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
828 relo->is_typeless, relo->is_weak, relo->name, ref);
829}
830
831/* Expects:
832 * BPF_REG_8 - pointer to instruction
833 */
834static void emit_relo_ksym_typeless(struct bpf_gen *gen,
835 struct ksym_relo_desc *relo, int insn)
836{
837 struct ksym_desc *kdesc;
838
839 kdesc = get_ksym_desc(gen, relo);
840 if (!kdesc)
841 return;
842 /* try to copy from existing ldimm64 insn */
843 if (kdesc->ref > 1) {
844 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
845 kdesc->insn + offsetof(struct bpf_insn, imm));
846 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
847 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
848 goto log;
849 }
850 /* remember insn offset, so we can copy ksym addr later */
851 kdesc->insn = insn;
852 /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
853 kdesc->typeless = true;
854 emit_bpf_kallsyms_lookup_name(gen, relo);
855 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
856 emit_check_err(gen);
857 /* store lower half of addr into insn[insn_idx].imm */
858 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
859 /* store upper half of addr into insn[insn_idx + 1].imm */
860 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
861 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
862 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
863log:
864 emit_ksym_relo_log(gen, relo, kdesc->ref);
865}
866
867static __u32 src_reg_mask(struct bpf_gen *gen)
868{
869#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
870 return gen->swapped_endian ? 0xf0 : 0x0f;
871#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
872 return gen->swapped_endian ? 0x0f : 0xf0;
873#else
874#error "Unsupported bit endianness, cannot proceed"
875#endif
876}
877
878/* Expects:
879 * BPF_REG_8 - pointer to instruction
880 */
881static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
882{
883 struct ksym_desc *kdesc;
884 __u32 reg_mask;
885
886 kdesc = get_ksym_desc(gen, relo);
887 if (!kdesc)
888 return;
889 /* try to copy from existing ldimm64 insn */
890 if (kdesc->ref > 1) {
891 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
892 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
893 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
894 kdesc->insn + offsetof(struct bpf_insn, imm));
895 /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
896 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
897 */
898 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
899 goto clear_src_reg;
900 }
901 /* remember insn offset, so we can copy BTF ID and FD later */
902 kdesc->insn = insn;
903 emit_bpf_find_by_name_kind(gen, relo);
904 if (!relo->is_weak)
905 emit_check_err(gen);
906 /* jump to success case */
907 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
908 /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
909 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
910 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
911 /* skip success case for ret < 0 */
912 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
913 /* store btf_id into insn[insn_idx].imm */
914 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
915 /* store btf_obj_fd into insn[insn_idx + 1].imm */
916 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
917 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
918 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
919 /* skip src_reg adjustment */
920 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
921clear_src_reg:
922 /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
923 reg_mask = src_reg_mask(gen);
924 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
925 emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
926 emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
927
928 emit_ksym_relo_log(gen, relo, kdesc->ref);
929}
930
931void bpf_gen__record_relo_core(struct bpf_gen *gen,
932 const struct bpf_core_relo *core_relo)
933{
934 struct bpf_core_relo *relos;
935
936 relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
937 if (!relos) {
938 gen->error = -ENOMEM;
939 return;
940 }
941 gen->core_relos = relos;
942 relos += gen->core_relo_cnt;
943 memcpy(relos, core_relo, sizeof(*relos));
944 gen->core_relo_cnt++;
945}
946
947static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
948{
949 int insn;
950
951 pr_debug("gen: emit_relo (%d): %s at %d %s\n",
952 relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
953 insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
954 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
955 if (relo->is_ld64) {
956 if (relo->is_typeless)
957 emit_relo_ksym_typeless(gen, relo, insn);
958 else
959 emit_relo_ksym_btf(gen, relo, insn);
960 } else {
961 emit_relo_kfunc_btf(gen, relo, insn);
962 }
963}
964
965static void emit_relos(struct bpf_gen *gen, int insns)
966{
967 int i;
968
969 for (i = 0; i < gen->relo_cnt; i++)
970 emit_relo(gen, gen->relos + i, insns);
971}
972
973static void cleanup_core_relo(struct bpf_gen *gen)
974{
975 if (!gen->core_relo_cnt)
976 return;
977 free(gen->core_relos);
978 gen->core_relo_cnt = 0;
979 gen->core_relos = NULL;
980}
981
982static void cleanup_relos(struct bpf_gen *gen, int insns)
983{
984 struct ksym_desc *kdesc;
985 int i, insn;
986
987 for (i = 0; i < gen->nr_ksyms; i++) {
988 kdesc = &gen->ksyms[i];
989 /* only close fds for typed ksyms and kfuncs */
990 if (kdesc->is_ld64 && !kdesc->typeless) {
991 /* close fd recorded in insn[insn_idx + 1].imm */
992 insn = kdesc->insn;
993 insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
994 emit_sys_close_blob(gen, insn);
995 } else if (!kdesc->is_ld64) {
996 emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
997 if (kdesc->off < MAX_FD_ARRAY_SZ)
998 gen->nr_fd_array--;
999 }
1000 }
1001 if (gen->nr_ksyms) {
1002 free(gen->ksyms);
1003 gen->nr_ksyms = 0;
1004 gen->ksyms = NULL;
1005 }
1006 if (gen->relo_cnt) {
1007 free(gen->relos);
1008 gen->relo_cnt = 0;
1009 gen->relos = NULL;
1010 }
1011 cleanup_core_relo(gen);
1012}
1013
1014/* Convert func, line, and core relo info blobs to target endianness */
1015static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
1016 int core_relos, struct bpf_prog_load_opts *load_attr)
1017{
1018 struct bpf_func_info *fi = gen->data_start + func_info;
1019 struct bpf_line_info *li = gen->data_start + line_info;
1020 struct bpf_core_relo *cr = gen->data_start + core_relos;
1021 int i;
1022
1023 for (i = 0; i < load_attr->func_info_cnt; i++)
1024 bpf_func_info_bswap(fi++);
1025
1026 for (i = 0; i < load_attr->line_info_cnt; i++)
1027 bpf_line_info_bswap(li++);
1028
1029 for (i = 0; i < gen->core_relo_cnt; i++)
1030 bpf_core_relo_bswap(cr++);
1031}
1032
1033void bpf_gen__prog_load(struct bpf_gen *gen,
1034 enum bpf_prog_type prog_type, const char *prog_name,
1035 const char *license, struct bpf_insn *insns, size_t insn_cnt,
1036 struct bpf_prog_load_opts *load_attr, int prog_idx)
1037{
1038 int func_info_tot_sz = load_attr->func_info_cnt *
1039 load_attr->func_info_rec_size;
1040 int line_info_tot_sz = load_attr->line_info_cnt *
1041 load_attr->line_info_rec_size;
1042 int core_relo_tot_sz = gen->core_relo_cnt *
1043 sizeof(struct bpf_core_relo);
1044 int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
1045 int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
1046 union bpf_attr attr;
1047
1048 memset(&attr, 0, attr_size);
1049 /* add license string to blob of bytes */
1050 license_off = add_data(gen, license, strlen(license) + 1);
1051 /* add insns to blob of bytes */
1052 insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
1053 pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
1054 prog_idx, prog_type, insns_off, insn_cnt, license_off);
1055
1056 /* convert blob insns to target endianness */
1057 if (gen->swapped_endian) {
1058 struct bpf_insn *insn = gen->data_start + insns_off;
1059 int i;
1060
1061 for (i = 0; i < insn_cnt; i++, insn++)
1062 bpf_insn_bswap(insn);
1063 }
1064
1065 attr.prog_type = tgt_endian(prog_type);
1066 attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
1067 attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
1068 attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
1069 attr.kern_version = 0;
1070 attr.insn_cnt = tgt_endian((__u32)insn_cnt);
1071 attr.prog_flags = tgt_endian(load_attr->prog_flags);
1072
1073 attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
1074 attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
1075 func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
1076 pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
1077 func_info, load_attr->func_info_cnt,
1078 load_attr->func_info_rec_size);
1079
1080 attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
1081 attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
1082 line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
1083 pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
1084 line_info, load_attr->line_info_cnt,
1085 load_attr->line_info_rec_size);
1086
1087 attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
1088 attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
1089 core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
1090 pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
1091 core_relos, gen->core_relo_cnt,
1092 sizeof(struct bpf_core_relo));
1093
1094 /* convert all info blobs to target endianness */
1095 if (gen->swapped_endian)
1096 info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
1097
1098 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
1099 prog_load_attr = add_data(gen, &attr, attr_size);
1100 pr_debug("gen: prog_load: attr: off %d size %d\n",
1101 prog_load_attr, attr_size);
1102
1103 /* populate union bpf_attr with a pointer to license */
1104 emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
1105
1106 /* populate union bpf_attr with a pointer to instructions */
1107 emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
1108
1109 /* populate union bpf_attr with a pointer to func_info */
1110 emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
1111
1112 /* populate union bpf_attr with a pointer to line_info */
1113 emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
1114
1115 /* populate union bpf_attr with a pointer to core_relos */
1116 emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
1117
1118 /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
1119 emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
1120
1121 /* populate union bpf_attr with user provided log details */
1122 move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
1123 offsetof(struct bpf_loader_ctx, log_level), false);
1124 move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
1125 offsetof(struct bpf_loader_ctx, log_size), false);
1126 move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
1127 offsetof(struct bpf_loader_ctx, log_buf), false);
1128 /* populate union bpf_attr with btf_fd saved in the stack earlier */
1129 move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
1130 stack_off(btf_fd));
1131 if (gen->attach_kind) {
1132 emit_find_attach_target(gen);
1133 /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
1134 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
1135 0, 0, 0, prog_load_attr));
1136 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1137 offsetof(union bpf_attr, attach_btf_id)));
1138 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
1139 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1140 offsetof(union bpf_attr, attach_btf_obj_fd)));
1141 }
1142 emit_relos(gen, insns_off);
1143 /* emit PROG_LOAD command */
1144 emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
1145 debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
1146 /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
1147 cleanup_relos(gen, insns_off);
1148 if (gen->attach_kind) {
1149 emit_sys_close_blob(gen,
1150 attr_field(prog_load_attr, attach_btf_obj_fd));
1151 gen->attach_kind = 0;
1152 }
1153 emit_check_err(gen);
1154 /* remember prog_fd in the stack, if successful */
1155 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
1156 stack_off(prog_fd[gen->nr_progs])));
1157 gen->nr_progs++;
1158}
1159
1160void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
1161 __u32 value_size)
1162{
1163 int attr_size = offsetofend(union bpf_attr, flags);
1164 int map_update_attr, value, key;
1165 union bpf_attr attr;
1166 int zero = 0;
1167
1168 memset(&attr, 0, attr_size);
1169
1170 value = add_data(gen, pvalue, value_size);
1171 key = add_data(gen, &zero, sizeof(zero));
1172
1173 /* if (map_desc[map_idx].initial_value) {
1174 * if (ctx->flags & BPF_SKEL_KERNEL)
1175 * bpf_probe_read_kernel(value, value_size, initial_value);
1176 * else
1177 * bpf_copy_from_user(value, value_size, initial_value);
1178 * }
1179 */
1180 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1181 sizeof(struct bpf_loader_ctx) +
1182 sizeof(struct bpf_map_desc) * map_idx +
1183 offsetof(struct bpf_map_desc, initial_value)));
1184 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
1185 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1186 0, 0, 0, value));
1187 emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1188 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1189 offsetof(struct bpf_loader_ctx, flags)));
1190 emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
1191 emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1192 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1193 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
1194
1195 map_update_attr = add_data(gen, &attr, attr_size);
1196 pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
1197 map_idx, value, value_size, map_update_attr, attr_size);
1198 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1199 blob_fd_array_off(gen, map_idx));
1200 emit_rel_store(gen, attr_field(map_update_attr, key), key);
1201 emit_rel_store(gen, attr_field(map_update_attr, value), value);
1202 /* emit MAP_UPDATE_ELEM command */
1203 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1204 debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1205 emit_check_err(gen);
1206}
1207
1208void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
1209 int inner_map_idx)
1210{
1211 int attr_size = offsetofend(union bpf_attr, flags);
1212 int map_update_attr, key;
1213 union bpf_attr attr;
1214 int tgt_slot;
1215
1216 memset(&attr, 0, attr_size);
1217
1218 tgt_slot = tgt_endian(slot);
1219 key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
1220
1221 map_update_attr = add_data(gen, &attr, attr_size);
1222 pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
1223 outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
1224 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1225 blob_fd_array_off(gen, outer_map_idx));
1226 emit_rel_store(gen, attr_field(map_update_attr, key), key);
1227 emit_rel_store(gen, attr_field(map_update_attr, value),
1228 blob_fd_array_off(gen, inner_map_idx));
1229
1230 /* emit MAP_UPDATE_ELEM command */
1231 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1232 debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
1233 outer_map_idx, slot, inner_map_idx);
1234 emit_check_err(gen);
1235}
1236
1237void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1238{
1239 int attr_size = offsetofend(union bpf_attr, map_fd);
1240 int map_freeze_attr;
1241 union bpf_attr attr;
1242
1243 memset(&attr, 0, attr_size);
1244 map_freeze_attr = add_data(gen, &attr, attr_size);
1245 pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
1246 map_idx, map_freeze_attr, attr_size);
1247 move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1248 blob_fd_array_off(gen, map_idx));
1249 /* emit MAP_FREEZE command */
1250 emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1251 debug_ret(gen, "map_freeze");
1252 emit_check_err(gen);
1253}