at v4.14-rc7 786 lines 19 kB view raw
1#include <stdio.h> 2#include <sys/types.h> 3#include <sys/stat.h> 4#include <fcntl.h> 5#include <libelf.h> 6#include <gelf.h> 7#include <errno.h> 8#include <unistd.h> 9#include <string.h> 10#include <stdbool.h> 11#include <stdlib.h> 12#include <linux/bpf.h> 13#include <linux/filter.h> 14#include <linux/perf_event.h> 15#include <linux/netlink.h> 16#include <linux/rtnetlink.h> 17#include <linux/types.h> 18#include <sys/types.h> 19#include <sys/socket.h> 20#include <sys/syscall.h> 21#include <sys/ioctl.h> 22#include <sys/mman.h> 23#include <poll.h> 24#include <ctype.h> 25#include <assert.h> 26#include "libbpf.h" 27#include "bpf_load.h" 28#include "perf-sys.h" 29 30#define DEBUGFS "/sys/kernel/debug/tracing/" 31 32static char license[128]; 33static int kern_version; 34static bool processed_sec[128]; 35char bpf_log_buf[BPF_LOG_BUF_SIZE]; 36int map_fd[MAX_MAPS]; 37int prog_fd[MAX_PROGS]; 38int event_fd[MAX_PROGS]; 39int prog_cnt; 40int prog_array_fd = -1; 41 42struct bpf_map_data map_data[MAX_MAPS]; 43int map_data_count = 0; 44 45static int populate_prog_array(const char *event, int prog_fd) 46{ 47 int ind = atoi(event), err; 48 49 err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY); 50 if (err < 0) { 51 printf("failed to store prog_fd in prog_array\n"); 52 return -1; 53 } 54 return 0; 55} 56 57static int load_and_attach(const char *event, struct bpf_insn *prog, int size) 58{ 59 bool is_socket = strncmp(event, "socket", 6) == 0; 60 bool is_kprobe = strncmp(event, "kprobe/", 7) == 0; 61 bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0; 62 bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0; 63 bool is_xdp = strncmp(event, "xdp", 3) == 0; 64 bool is_perf_event = strncmp(event, "perf_event", 10) == 0; 65 bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0; 66 bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0; 67 bool is_sockops = strncmp(event, "sockops", 7) == 0; 68 bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0; 69 size_t insns_cnt = size / sizeof(struct bpf_insn); 70 enum bpf_prog_type prog_type; 71 char buf[256]; 72 int fd, efd, err, id; 73 struct perf_event_attr attr = {}; 74 75 attr.type = PERF_TYPE_TRACEPOINT; 76 attr.sample_type = PERF_SAMPLE_RAW; 77 attr.sample_period = 1; 78 attr.wakeup_events = 1; 79 80 if (is_socket) { 81 prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 82 } else if (is_kprobe || is_kretprobe) { 83 prog_type = BPF_PROG_TYPE_KPROBE; 84 } else if (is_tracepoint) { 85 prog_type = BPF_PROG_TYPE_TRACEPOINT; 86 } else if (is_xdp) { 87 prog_type = BPF_PROG_TYPE_XDP; 88 } else if (is_perf_event) { 89 prog_type = BPF_PROG_TYPE_PERF_EVENT; 90 } else if (is_cgroup_skb) { 91 prog_type = BPF_PROG_TYPE_CGROUP_SKB; 92 } else if (is_cgroup_sk) { 93 prog_type = BPF_PROG_TYPE_CGROUP_SOCK; 94 } else if (is_sockops) { 95 prog_type = BPF_PROG_TYPE_SOCK_OPS; 96 } else if (is_sk_skb) { 97 prog_type = BPF_PROG_TYPE_SK_SKB; 98 } else { 99 printf("Unknown event '%s'\n", event); 100 return -1; 101 } 102 103 fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version, 104 bpf_log_buf, BPF_LOG_BUF_SIZE); 105 if (fd < 0) { 106 printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf); 107 return -1; 108 } 109 110 prog_fd[prog_cnt++] = fd; 111 112 if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk) 113 return 0; 114 115 if (is_socket || is_sockops || is_sk_skb) { 116 if (is_socket) 117 event += 6; 118 else 119 event += 7; 120 if (*event != '/') 121 return 0; 122 event++; 123 if (!isdigit(*event)) { 124 printf("invalid prog number\n"); 125 return -1; 126 } 127 return populate_prog_array(event, fd); 128 } 129 130 if (is_kprobe || is_kretprobe) { 131 if (is_kprobe) 132 event += 7; 133 else 134 event += 10; 135 136 if (*event == 0) { 137 printf("event name cannot be empty\n"); 138 return -1; 139 } 140 141 if (isdigit(*event)) 142 return populate_prog_array(event, fd); 143 144 snprintf(buf, sizeof(buf), 145 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events", 146 is_kprobe ? 'p' : 'r', event, event); 147 err = system(buf); 148 if (err < 0) { 149 printf("failed to create kprobe '%s' error '%s'\n", 150 event, strerror(errno)); 151 return -1; 152 } 153 154 strcpy(buf, DEBUGFS); 155 strcat(buf, "events/kprobes/"); 156 strcat(buf, event); 157 strcat(buf, "/id"); 158 } else if (is_tracepoint) { 159 event += 11; 160 161 if (*event == 0) { 162 printf("event name cannot be empty\n"); 163 return -1; 164 } 165 strcpy(buf, DEBUGFS); 166 strcat(buf, "events/"); 167 strcat(buf, event); 168 strcat(buf, "/id"); 169 } 170 171 efd = open(buf, O_RDONLY, 0); 172 if (efd < 0) { 173 printf("failed to open event %s\n", event); 174 return -1; 175 } 176 177 err = read(efd, buf, sizeof(buf)); 178 if (err < 0 || err >= sizeof(buf)) { 179 printf("read from '%s' failed '%s'\n", event, strerror(errno)); 180 return -1; 181 } 182 183 close(efd); 184 185 buf[err] = 0; 186 id = atoi(buf); 187 attr.config = id; 188 189 efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0); 190 if (efd < 0) { 191 printf("event %d fd %d err %s\n", id, efd, strerror(errno)); 192 return -1; 193 } 194 event_fd[prog_cnt - 1] = efd; 195 ioctl(efd, PERF_EVENT_IOC_ENABLE, 0); 196 ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd); 197 198 return 0; 199} 200 201static int load_maps(struct bpf_map_data *maps, int nr_maps, 202 fixup_map_cb fixup_map) 203{ 204 int i, numa_node; 205 206 for (i = 0; i < nr_maps; i++) { 207 if (fixup_map) { 208 fixup_map(&maps[i], i); 209 /* Allow userspace to assign map FD prior to creation */ 210 if (maps[i].fd != -1) { 211 map_fd[i] = maps[i].fd; 212 continue; 213 } 214 } 215 216 numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ? 217 maps[i].def.numa_node : -1; 218 219 if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 220 maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) { 221 int inner_map_fd = map_fd[maps[i].def.inner_map_idx]; 222 223 map_fd[i] = bpf_create_map_in_map_node(maps[i].def.type, 224 maps[i].def.key_size, 225 inner_map_fd, 226 maps[i].def.max_entries, 227 maps[i].def.map_flags, 228 numa_node); 229 } else { 230 map_fd[i] = bpf_create_map_node(maps[i].def.type, 231 maps[i].def.key_size, 232 maps[i].def.value_size, 233 maps[i].def.max_entries, 234 maps[i].def.map_flags, 235 numa_node); 236 } 237 if (map_fd[i] < 0) { 238 printf("failed to create a map: %d %s\n", 239 errno, strerror(errno)); 240 return 1; 241 } 242 maps[i].fd = map_fd[i]; 243 244 if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY) 245 prog_array_fd = map_fd[i]; 246 } 247 return 0; 248} 249 250static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname, 251 GElf_Shdr *shdr, Elf_Data **data) 252{ 253 Elf_Scn *scn; 254 255 scn = elf_getscn(elf, i); 256 if (!scn) 257 return 1; 258 259 if (gelf_getshdr(scn, shdr) != shdr) 260 return 2; 261 262 *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name); 263 if (!*shname || !shdr->sh_size) 264 return 3; 265 266 *data = elf_getdata(scn, 0); 267 if (!*data || elf_getdata(scn, *data) != NULL) 268 return 4; 269 270 return 0; 271} 272 273static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols, 274 GElf_Shdr *shdr, struct bpf_insn *insn, 275 struct bpf_map_data *maps, int nr_maps) 276{ 277 int i, nrels; 278 279 nrels = shdr->sh_size / shdr->sh_entsize; 280 281 for (i = 0; i < nrels; i++) { 282 GElf_Sym sym; 283 GElf_Rel rel; 284 unsigned int insn_idx; 285 bool match = false; 286 int j, map_idx; 287 288 gelf_getrel(data, i, &rel); 289 290 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 291 292 gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym); 293 294 if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 295 printf("invalid relo for insn[%d].code 0x%x\n", 296 insn_idx, insn[insn_idx].code); 297 return 1; 298 } 299 insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 300 301 /* Match FD relocation against recorded map_data[] offset */ 302 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 303 if (maps[map_idx].elf_offset == sym.st_value) { 304 match = true; 305 break; 306 } 307 } 308 if (match) { 309 insn[insn_idx].imm = maps[map_idx].fd; 310 } else { 311 printf("invalid relo for insn[%d] no map_data match\n", 312 insn_idx); 313 return 1; 314 } 315 } 316 317 return 0; 318} 319 320static int cmp_symbols(const void *l, const void *r) 321{ 322 const GElf_Sym *lsym = (const GElf_Sym *)l; 323 const GElf_Sym *rsym = (const GElf_Sym *)r; 324 325 if (lsym->st_value < rsym->st_value) 326 return -1; 327 else if (lsym->st_value > rsym->st_value) 328 return 1; 329 else 330 return 0; 331} 332 333static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx, 334 Elf *elf, Elf_Data *symbols, int strtabidx) 335{ 336 int map_sz_elf, map_sz_copy; 337 bool validate_zero = false; 338 Elf_Data *data_maps; 339 int i, nr_maps; 340 GElf_Sym *sym; 341 Elf_Scn *scn; 342 int copy_sz; 343 344 if (maps_shndx < 0) 345 return -EINVAL; 346 if (!symbols) 347 return -EINVAL; 348 349 /* Get data for maps section via elf index */ 350 scn = elf_getscn(elf, maps_shndx); 351 if (scn) 352 data_maps = elf_getdata(scn, NULL); 353 if (!scn || !data_maps) { 354 printf("Failed to get Elf_Data from maps section %d\n", 355 maps_shndx); 356 return -EINVAL; 357 } 358 359 /* For each map get corrosponding symbol table entry */ 360 sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym)); 361 for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 362 assert(nr_maps < MAX_MAPS+1); 363 if (!gelf_getsym(symbols, i, &sym[nr_maps])) 364 continue; 365 if (sym[nr_maps].st_shndx != maps_shndx) 366 continue; 367 /* Only increment iif maps section */ 368 nr_maps++; 369 } 370 371 /* Align to map_fd[] order, via sort on offset in sym.st_value */ 372 qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols); 373 374 /* Keeping compatible with ELF maps section changes 375 * ------------------------------------------------ 376 * The program size of struct bpf_map_def is known by loader 377 * code, but struct stored in ELF file can be different. 378 * 379 * Unfortunately sym[i].st_size is zero. To calculate the 380 * struct size stored in the ELF file, assume all struct have 381 * the same size, and simply divide with number of map 382 * symbols. 383 */ 384 map_sz_elf = data_maps->d_size / nr_maps; 385 map_sz_copy = sizeof(struct bpf_map_def); 386 if (map_sz_elf < map_sz_copy) { 387 /* 388 * Backward compat, loading older ELF file with 389 * smaller struct, keeping remaining bytes zero. 390 */ 391 map_sz_copy = map_sz_elf; 392 } else if (map_sz_elf > map_sz_copy) { 393 /* 394 * Forward compat, loading newer ELF file with larger 395 * struct with unknown features. Assume zero means 396 * feature not used. Thus, validate rest of struct 397 * data is zero. 398 */ 399 validate_zero = true; 400 } 401 402 /* Memcpy relevant part of ELF maps data to loader maps */ 403 for (i = 0; i < nr_maps; i++) { 404 unsigned char *addr, *end; 405 struct bpf_map_def *def; 406 const char *map_name; 407 size_t offset; 408 409 map_name = elf_strptr(elf, strtabidx, sym[i].st_name); 410 maps[i].name = strdup(map_name); 411 if (!maps[i].name) { 412 printf("strdup(%s): %s(%d)\n", map_name, 413 strerror(errno), errno); 414 free(sym); 415 return -errno; 416 } 417 418 /* Symbol value is offset into ELF maps section data area */ 419 offset = sym[i].st_value; 420 def = (struct bpf_map_def *)(data_maps->d_buf + offset); 421 maps[i].elf_offset = offset; 422 memset(&maps[i].def, 0, sizeof(struct bpf_map_def)); 423 memcpy(&maps[i].def, def, map_sz_copy); 424 425 /* Verify no newer features were requested */ 426 if (validate_zero) { 427 addr = (unsigned char*) def + map_sz_copy; 428 end = (unsigned char*) def + map_sz_elf; 429 for (; addr < end; addr++) { 430 if (*addr != 0) { 431 free(sym); 432 return -EFBIG; 433 } 434 } 435 } 436 } 437 438 free(sym); 439 return nr_maps; 440} 441 442static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) 443{ 444 int fd, i, ret, maps_shndx = -1, strtabidx = -1; 445 Elf *elf; 446 GElf_Ehdr ehdr; 447 GElf_Shdr shdr, shdr_prog; 448 Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL; 449 char *shname, *shname_prog; 450 int nr_maps = 0; 451 452 /* reset global variables */ 453 kern_version = 0; 454 memset(license, 0, sizeof(license)); 455 memset(processed_sec, 0, sizeof(processed_sec)); 456 457 if (elf_version(EV_CURRENT) == EV_NONE) 458 return 1; 459 460 fd = open(path, O_RDONLY, 0); 461 if (fd < 0) 462 return 1; 463 464 elf = elf_begin(fd, ELF_C_READ, NULL); 465 466 if (!elf) 467 return 1; 468 469 if (gelf_getehdr(elf, &ehdr) != &ehdr) 470 return 1; 471 472 /* clear all kprobes */ 473 i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events"); 474 475 /* scan over all elf sections to get license and map info */ 476 for (i = 1; i < ehdr.e_shnum; i++) { 477 478 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) 479 continue; 480 481 if (0) /* helpful for llvm debugging */ 482 printf("section %d:%s data %p size %zd link %d flags %d\n", 483 i, shname, data->d_buf, data->d_size, 484 shdr.sh_link, (int) shdr.sh_flags); 485 486 if (strcmp(shname, "license") == 0) { 487 processed_sec[i] = true; 488 memcpy(license, data->d_buf, data->d_size); 489 } else if (strcmp(shname, "version") == 0) { 490 processed_sec[i] = true; 491 if (data->d_size != sizeof(int)) { 492 printf("invalid size of version section %zd\n", 493 data->d_size); 494 return 1; 495 } 496 memcpy(&kern_version, data->d_buf, sizeof(int)); 497 } else if (strcmp(shname, "maps") == 0) { 498 int j; 499 500 maps_shndx = i; 501 data_maps = data; 502 for (j = 0; j < MAX_MAPS; j++) 503 map_data[j].fd = -1; 504 } else if (shdr.sh_type == SHT_SYMTAB) { 505 strtabidx = shdr.sh_link; 506 symbols = data; 507 } 508 } 509 510 ret = 1; 511 512 if (!symbols) { 513 printf("missing SHT_SYMTAB section\n"); 514 goto done; 515 } 516 517 if (data_maps) { 518 nr_maps = load_elf_maps_section(map_data, maps_shndx, 519 elf, symbols, strtabidx); 520 if (nr_maps < 0) { 521 printf("Error: Failed loading ELF maps (errno:%d):%s\n", 522 nr_maps, strerror(-nr_maps)); 523 ret = 1; 524 goto done; 525 } 526 if (load_maps(map_data, nr_maps, fixup_map)) 527 goto done; 528 map_data_count = nr_maps; 529 530 processed_sec[maps_shndx] = true; 531 } 532 533 /* process all relo sections, and rewrite bpf insns for maps */ 534 for (i = 1; i < ehdr.e_shnum; i++) { 535 if (processed_sec[i]) 536 continue; 537 538 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) 539 continue; 540 541 if (shdr.sh_type == SHT_REL) { 542 struct bpf_insn *insns; 543 544 /* locate prog sec that need map fixup (relocations) */ 545 if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog, 546 &shdr_prog, &data_prog)) 547 continue; 548 549 if (shdr_prog.sh_type != SHT_PROGBITS || 550 !(shdr_prog.sh_flags & SHF_EXECINSTR)) 551 continue; 552 553 insns = (struct bpf_insn *) data_prog->d_buf; 554 processed_sec[i] = true; /* relo section */ 555 556 if (parse_relo_and_apply(data, symbols, &shdr, insns, 557 map_data, nr_maps)) 558 continue; 559 } 560 } 561 562 /* load programs */ 563 for (i = 1; i < ehdr.e_shnum; i++) { 564 565 if (processed_sec[i]) 566 continue; 567 568 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) 569 continue; 570 571 if (memcmp(shname, "kprobe/", 7) == 0 || 572 memcmp(shname, "kretprobe/", 10) == 0 || 573 memcmp(shname, "tracepoint/", 11) == 0 || 574 memcmp(shname, "xdp", 3) == 0 || 575 memcmp(shname, "perf_event", 10) == 0 || 576 memcmp(shname, "socket", 6) == 0 || 577 memcmp(shname, "cgroup/", 7) == 0 || 578 memcmp(shname, "sockops", 7) == 0 || 579 memcmp(shname, "sk_skb", 6) == 0) { 580 ret = load_and_attach(shname, data->d_buf, 581 data->d_size); 582 if (ret != 0) 583 goto done; 584 } 585 } 586 587 ret = 0; 588done: 589 close(fd); 590 return ret; 591} 592 593int load_bpf_file(char *path) 594{ 595 return do_load_bpf_file(path, NULL); 596} 597 598int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map) 599{ 600 return do_load_bpf_file(path, fixup_map); 601} 602 603void read_trace_pipe(void) 604{ 605 int trace_fd; 606 607 trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0); 608 if (trace_fd < 0) 609 return; 610 611 while (1) { 612 static char buf[4096]; 613 ssize_t sz; 614 615 sz = read(trace_fd, buf, sizeof(buf)); 616 if (sz > 0) { 617 buf[sz] = 0; 618 puts(buf); 619 } 620 } 621} 622 623#define MAX_SYMS 300000 624static struct ksym syms[MAX_SYMS]; 625static int sym_cnt; 626 627static int ksym_cmp(const void *p1, const void *p2) 628{ 629 return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr; 630} 631 632int load_kallsyms(void) 633{ 634 FILE *f = fopen("/proc/kallsyms", "r"); 635 char func[256], buf[256]; 636 char symbol; 637 void *addr; 638 int i = 0; 639 640 if (!f) 641 return -ENOENT; 642 643 while (!feof(f)) { 644 if (!fgets(buf, sizeof(buf), f)) 645 break; 646 if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3) 647 break; 648 if (!addr) 649 continue; 650 syms[i].addr = (long) addr; 651 syms[i].name = strdup(func); 652 i++; 653 } 654 sym_cnt = i; 655 qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp); 656 return 0; 657} 658 659struct ksym *ksym_search(long key) 660{ 661 int start = 0, end = sym_cnt; 662 int result; 663 664 while (start < end) { 665 size_t mid = start + (end - start) / 2; 666 667 result = key - syms[mid].addr; 668 if (result < 0) 669 end = mid; 670 else if (result > 0) 671 start = mid + 1; 672 else 673 return &syms[mid]; 674 } 675 676 if (start >= 1 && syms[start - 1].addr < key && 677 key < syms[start].addr) 678 /* valid ksym */ 679 return &syms[start - 1]; 680 681 /* out of range. return _stext */ 682 return &syms[0]; 683} 684 685int set_link_xdp_fd(int ifindex, int fd, __u32 flags) 686{ 687 struct sockaddr_nl sa; 688 int sock, seq = 0, len, ret = -1; 689 char buf[4096]; 690 struct nlattr *nla, *nla_xdp; 691 struct { 692 struct nlmsghdr nh; 693 struct ifinfomsg ifinfo; 694 char attrbuf[64]; 695 } req; 696 struct nlmsghdr *nh; 697 struct nlmsgerr *err; 698 699 memset(&sa, 0, sizeof(sa)); 700 sa.nl_family = AF_NETLINK; 701 702 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); 703 if (sock < 0) { 704 printf("open netlink socket: %s\n", strerror(errno)); 705 return -1; 706 } 707 708 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { 709 printf("bind to netlink: %s\n", strerror(errno)); 710 goto cleanup; 711 } 712 713 memset(&req, 0, sizeof(req)); 714 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); 715 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; 716 req.nh.nlmsg_type = RTM_SETLINK; 717 req.nh.nlmsg_pid = 0; 718 req.nh.nlmsg_seq = ++seq; 719 req.ifinfo.ifi_family = AF_UNSPEC; 720 req.ifinfo.ifi_index = ifindex; 721 722 /* started nested attribute for XDP */ 723 nla = (struct nlattr *)(((char *)&req) 724 + NLMSG_ALIGN(req.nh.nlmsg_len)); 725 nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/; 726 nla->nla_len = NLA_HDRLEN; 727 728 /* add XDP fd */ 729 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); 730 nla_xdp->nla_type = 1/*IFLA_XDP_FD*/; 731 nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); 732 memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); 733 nla->nla_len += nla_xdp->nla_len; 734 735 /* if user passed in any flags, add those too */ 736 if (flags) { 737 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); 738 nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/; 739 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); 740 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); 741 nla->nla_len += nla_xdp->nla_len; 742 } 743 744 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); 745 746 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { 747 printf("send to netlink: %s\n", strerror(errno)); 748 goto cleanup; 749 } 750 751 len = recv(sock, buf, sizeof(buf), 0); 752 if (len < 0) { 753 printf("recv from netlink: %s\n", strerror(errno)); 754 goto cleanup; 755 } 756 757 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); 758 nh = NLMSG_NEXT(nh, len)) { 759 if (nh->nlmsg_pid != getpid()) { 760 printf("Wrong pid %d, expected %d\n", 761 nh->nlmsg_pid, getpid()); 762 goto cleanup; 763 } 764 if (nh->nlmsg_seq != seq) { 765 printf("Wrong seq %d, expected %d\n", 766 nh->nlmsg_seq, seq); 767 goto cleanup; 768 } 769 switch (nh->nlmsg_type) { 770 case NLMSG_ERROR: 771 err = (struct nlmsgerr *)NLMSG_DATA(nh); 772 if (!err->error) 773 continue; 774 printf("nlmsg error %s\n", strerror(-err->error)); 775 goto cleanup; 776 case NLMSG_DONE: 777 break; 778 } 779 } 780 781 ret = 0; 782 783cleanup: 784 close(sock); 785 return ret; 786}