Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: add initial suite for selftests

Add a start of a test suite for kernel selftests. This moves test_verifier
and test_maps over to tools/testing/selftests/bpf/ along with various
code improvements and also adds a script for invoking test_bpf module.
The test suite can simply be run via selftest framework, f.e.:

# cd tools/testing/selftests/bpf/
# make
# make run_tests

Both test_verifier and test_maps were kind of misplaced in samples/bpf/
directory and we were looking into adding them to selftests for a while
now, so it can be picked up by kbuild bot et al and hopefully also get
more exposure and thus new test case additions.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
5aa5bd14 1a776b9c

+1106 -775
+2 -1
MAINTAINERS
··· 2521 2521 L: linux-kernel@vger.kernel.org 2522 2522 S: Supported 2523 2523 F: kernel/bpf/ 2524 + F: tools/testing/selftests/bpf/ 2525 + F: lib/test_bpf.c 2524 2526 2525 2527 BROADCOM B44 10/100 ETHERNET DRIVER 2526 2528 M: Michael Chan <michael.chan@broadcom.com> ··· 8415 8413 F: tools/net/ 8416 8414 F: tools/testing/selftests/net/ 8417 8415 F: lib/random32.c 8418 - F: lib/test_bpf.c 8419 8416 8420 8417 NETWORKING [IPv4/IPv6] 8421 8418 M: "David S. Miller" <davem@davemloft.net>
-3
samples/bpf/Makefile
··· 2 2 obj- := dummy.o 3 3 4 4 # List of programs to build 5 - hostprogs-y := test_verifier test_maps 6 5 hostprogs-y += sock_example 7 6 hostprogs-y += fds_example 8 7 hostprogs-y += sockex1 ··· 27 28 hostprogs-y += trace_event 28 29 hostprogs-y += sampleip 29 30 30 - test_verifier-objs := test_verifier.o libbpf.o 31 - test_maps-objs := test_maps.o libbpf.o 32 31 sock_example-objs := sock_example.o libbpf.o 33 32 fds_example-objs := bpf_load.o libbpf.o fds_example.o 34 33 sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
-503
samples/bpf/test_maps.c
··· 1 - /* 2 - * Testsuite for eBPF maps 3 - * 4 - * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 5 - * Copyright (c) 2016 Facebook 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of version 2 of the GNU General Public 9 - * License as published by the Free Software Foundation. 10 - */ 11 - #include <stdio.h> 12 - #include <unistd.h> 13 - #include <linux/bpf.h> 14 - #include <errno.h> 15 - #include <string.h> 16 - #include <assert.h> 17 - #include <sys/wait.h> 18 - #include <stdlib.h> 19 - #include "libbpf.h" 20 - 21 - static int map_flags; 22 - 23 - /* sanity tests for map API */ 24 - static void test_hashmap_sanity(int i, void *data) 25 - { 26 - long long key, next_key, value; 27 - int map_fd; 28 - 29 - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 30 - 2, map_flags); 31 - if (map_fd < 0) { 32 - printf("failed to create hashmap '%s'\n", strerror(errno)); 33 - exit(1); 34 - } 35 - 36 - key = 1; 37 - value = 1234; 38 - /* insert key=1 element */ 39 - assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); 40 - 41 - value = 0; 42 - /* BPF_NOEXIST means: add new element if it doesn't exist */ 43 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 44 - /* key=1 already exists */ 45 - errno == EEXIST); 46 - 47 - assert(bpf_update_elem(map_fd, &key, &value, -1) == -1 && errno == EINVAL); 48 - 49 - /* check that key=1 can be found */ 50 - assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234); 51 - 52 - key = 2; 53 - /* check that key=2 is not found */ 54 - assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); 55 - 56 - /* BPF_EXIST means: update existing element */ 57 - assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 && 58 - /* key=2 is not there */ 59 - errno == ENOENT); 60 - 61 - /* insert key=2 element */ 62 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); 63 - 64 - /* key=1 and key=2 were inserted, check that key=0 cannot be inserted 65 - * due to max_entries limit 66 - */ 67 - key = 0; 68 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 69 - errno == E2BIG); 70 - 71 - /* update existing element, thought the map is full */ 72 - key = 1; 73 - assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); 74 - key = 2; 75 - assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); 76 - key = 1; 77 - assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); 78 - 79 - /* check that key = 0 doesn't exist */ 80 - key = 0; 81 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 82 - 83 - /* iterate over two elements */ 84 - assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && 85 - (next_key == 1 || next_key == 2)); 86 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && 87 - (next_key == 1 || next_key == 2)); 88 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && 89 - errno == ENOENT); 90 - 91 - /* delete both elements */ 92 - key = 1; 93 - assert(bpf_delete_elem(map_fd, &key) == 0); 94 - key = 2; 95 - assert(bpf_delete_elem(map_fd, &key) == 0); 96 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 97 - 98 - key = 0; 99 - /* check that map is empty */ 100 - assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 && 101 - errno == ENOENT); 102 - close(map_fd); 103 - } 104 - 105 - /* sanity tests for percpu map API */ 106 - static void test_percpu_hashmap_sanity(int task, void *data) 107 - { 108 - long long key, next_key; 109 - int expected_key_mask = 0; 110 - unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 111 - long long value[nr_cpus]; 112 - int map_fd, i; 113 - 114 - map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), 115 - sizeof(value[0]), 2, map_flags); 116 - if (map_fd < 0) { 117 - printf("failed to create hashmap '%s'\n", strerror(errno)); 118 - exit(1); 119 - } 120 - 121 - for (i = 0; i < nr_cpus; i++) 122 - value[i] = i + 100; 123 - key = 1; 124 - /* insert key=1 element */ 125 - assert(!(expected_key_mask & key)); 126 - assert(bpf_update_elem(map_fd, &key, value, BPF_ANY) == 0); 127 - expected_key_mask |= key; 128 - 129 - /* BPF_NOEXIST means: add new element if it doesn't exist */ 130 - assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 && 131 - /* key=1 already exists */ 132 - errno == EEXIST); 133 - 134 - /* -1 is an invalid flag */ 135 - assert(bpf_update_elem(map_fd, &key, value, -1) == -1 && 136 - errno == EINVAL); 137 - 138 - /* check that key=1 can be found. value could be 0 if the lookup 139 - * was run from a different cpu. 140 - */ 141 - value[0] = 1; 142 - assert(bpf_lookup_elem(map_fd, &key, value) == 0 && value[0] == 100); 143 - 144 - key = 2; 145 - /* check that key=2 is not found */ 146 - assert(bpf_lookup_elem(map_fd, &key, value) == -1 && errno == ENOENT); 147 - 148 - /* BPF_EXIST means: update existing element */ 149 - assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == -1 && 150 - /* key=2 is not there */ 151 - errno == ENOENT); 152 - 153 - /* insert key=2 element */ 154 - assert(!(expected_key_mask & key)); 155 - assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0); 156 - expected_key_mask |= key; 157 - 158 - /* key=1 and key=2 were inserted, check that key=0 cannot be inserted 159 - * due to max_entries limit 160 - */ 161 - key = 0; 162 - assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 && 163 - errno == E2BIG); 164 - 165 - /* check that key = 0 doesn't exist */ 166 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 167 - 168 - /* iterate over two elements */ 169 - while (!bpf_get_next_key(map_fd, &key, &next_key)) { 170 - assert((expected_key_mask & next_key) == next_key); 171 - expected_key_mask &= ~next_key; 172 - 173 - assert(bpf_lookup_elem(map_fd, &next_key, value) == 0); 174 - for (i = 0; i < nr_cpus; i++) 175 - assert(value[i] == i + 100); 176 - 177 - key = next_key; 178 - } 179 - assert(errno == ENOENT); 180 - 181 - /* Update with BPF_EXIST */ 182 - key = 1; 183 - assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == 0); 184 - 185 - /* delete both elements */ 186 - key = 1; 187 - assert(bpf_delete_elem(map_fd, &key) == 0); 188 - key = 2; 189 - assert(bpf_delete_elem(map_fd, &key) == 0); 190 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 191 - 192 - key = 0; 193 - /* check that map is empty */ 194 - assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 && 195 - errno == ENOENT); 196 - close(map_fd); 197 - } 198 - 199 - static void test_arraymap_sanity(int i, void *data) 200 - { 201 - int key, next_key, map_fd; 202 - long long value; 203 - 204 - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 205 - 2, 0); 206 - if (map_fd < 0) { 207 - printf("failed to create arraymap '%s'\n", strerror(errno)); 208 - exit(1); 209 - } 210 - 211 - key = 1; 212 - value = 1234; 213 - /* insert key=1 element */ 214 - assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); 215 - 216 - value = 0; 217 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 218 - errno == EEXIST); 219 - 220 - /* check that key=1 can be found */ 221 - assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234); 222 - 223 - key = 0; 224 - /* check that key=0 is also found and zero initialized */ 225 - assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0); 226 - 227 - 228 - /* key=0 and key=1 were inserted, check that key=2 cannot be inserted 229 - * due to max_entries limit 230 - */ 231 - key = 2; 232 - assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 && 233 - errno == E2BIG); 234 - 235 - /* check that key = 2 doesn't exist */ 236 - assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); 237 - 238 - /* iterate over two elements */ 239 - assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && 240 - next_key == 0); 241 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && 242 - next_key == 1); 243 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && 244 - errno == ENOENT); 245 - 246 - /* delete shouldn't succeed */ 247 - key = 1; 248 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL); 249 - 250 - close(map_fd); 251 - } 252 - 253 - static void test_percpu_arraymap_many_keys(void) 254 - { 255 - unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 256 - unsigned nr_keys = 20000; 257 - long values[nr_cpus]; 258 - int key, map_fd, i; 259 - 260 - map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 261 - sizeof(values[0]), nr_keys, 0); 262 - if (map_fd < 0) { 263 - printf("failed to create per-cpu arraymap '%s'\n", 264 - strerror(errno)); 265 - exit(1); 266 - } 267 - 268 - for (i = 0; i < nr_cpus; i++) 269 - values[i] = i + 10; 270 - 271 - for (key = 0; key < nr_keys; key++) 272 - assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0); 273 - 274 - for (key = 0; key < nr_keys; key++) { 275 - for (i = 0; i < nr_cpus; i++) 276 - values[i] = 0; 277 - assert(bpf_lookup_elem(map_fd, &key, values) == 0); 278 - for (i = 0; i < nr_cpus; i++) 279 - assert(values[i] == i + 10); 280 - } 281 - 282 - close(map_fd); 283 - } 284 - 285 - static void test_percpu_arraymap_sanity(int i, void *data) 286 - { 287 - unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 288 - long values[nr_cpus]; 289 - int key, next_key, map_fd; 290 - 291 - map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 292 - sizeof(values[0]), 2, 0); 293 - if (map_fd < 0) { 294 - printf("failed to create arraymap '%s'\n", strerror(errno)); 295 - exit(1); 296 - } 297 - 298 - for (i = 0; i < nr_cpus; i++) 299 - values[i] = i + 100; 300 - 301 - key = 1; 302 - /* insert key=1 element */ 303 - assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0); 304 - 305 - values[0] = 0; 306 - assert(bpf_update_elem(map_fd, &key, values, BPF_NOEXIST) == -1 && 307 - errno == EEXIST); 308 - 309 - /* check that key=1 can be found */ 310 - assert(bpf_lookup_elem(map_fd, &key, values) == 0 && values[0] == 100); 311 - 312 - key = 0; 313 - /* check that key=0 is also found and zero initialized */ 314 - assert(bpf_lookup_elem(map_fd, &key, values) == 0 && 315 - values[0] == 0 && values[nr_cpus - 1] == 0); 316 - 317 - 318 - /* check that key=2 cannot be inserted due to max_entries limit */ 319 - key = 2; 320 - assert(bpf_update_elem(map_fd, &key, values, BPF_EXIST) == -1 && 321 - errno == E2BIG); 322 - 323 - /* check that key = 2 doesn't exist */ 324 - assert(bpf_lookup_elem(map_fd, &key, values) == -1 && errno == ENOENT); 325 - 326 - /* iterate over two elements */ 327 - assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && 328 - next_key == 0); 329 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && 330 - next_key == 1); 331 - assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && 332 - errno == ENOENT); 333 - 334 - /* delete shouldn't succeed */ 335 - key = 1; 336 - assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL); 337 - 338 - close(map_fd); 339 - } 340 - 341 - #define MAP_SIZE (32 * 1024) 342 - static void test_map_large(void) 343 - { 344 - struct bigkey { 345 - int a; 346 - char b[116]; 347 - long long c; 348 - } key; 349 - int map_fd, i, value; 350 - 351 - /* allocate 4Mbyte of memory */ 352 - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 353 - MAP_SIZE, map_flags); 354 - if (map_fd < 0) { 355 - printf("failed to create large map '%s'\n", strerror(errno)); 356 - exit(1); 357 - } 358 - 359 - for (i = 0; i < MAP_SIZE; i++) { 360 - key = (struct bigkey) {.c = i}; 361 - value = i; 362 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); 363 - } 364 - key.c = -1; 365 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 366 - errno == E2BIG); 367 - 368 - /* iterate through all elements */ 369 - for (i = 0; i < MAP_SIZE; i++) 370 - assert(bpf_get_next_key(map_fd, &key, &key) == 0); 371 - assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); 372 - 373 - key.c = 0; 374 - assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0); 375 - key.a = 1; 376 - assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); 377 - 378 - close(map_fd); 379 - } 380 - 381 - /* fork N children and wait for them to complete */ 382 - static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data) 383 - { 384 - pid_t pid[tasks]; 385 - int i; 386 - 387 - for (i = 0; i < tasks; i++) { 388 - pid[i] = fork(); 389 - if (pid[i] == 0) { 390 - fn(i, data); 391 - exit(0); 392 - } else if (pid[i] == -1) { 393 - printf("couldn't spawn #%d process\n", i); 394 - exit(1); 395 - } 396 - } 397 - for (i = 0; i < tasks; i++) { 398 - int status; 399 - 400 - assert(waitpid(pid[i], &status, 0) == pid[i]); 401 - assert(status == 0); 402 - } 403 - } 404 - 405 - static void test_map_stress(void) 406 - { 407 - run_parallel(100, test_hashmap_sanity, NULL); 408 - run_parallel(100, test_percpu_hashmap_sanity, NULL); 409 - run_parallel(100, test_arraymap_sanity, NULL); 410 - run_parallel(100, test_percpu_arraymap_sanity, NULL); 411 - } 412 - 413 - #define TASKS 1024 414 - #define DO_UPDATE 1 415 - #define DO_DELETE 0 416 - static void do_work(int fn, void *data) 417 - { 418 - int map_fd = ((int *)data)[0]; 419 - int do_update = ((int *)data)[1]; 420 - int i; 421 - int key, value; 422 - 423 - for (i = fn; i < MAP_SIZE; i += TASKS) { 424 - key = value = i; 425 - if (do_update) { 426 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); 427 - assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); 428 - } else { 429 - assert(bpf_delete_elem(map_fd, &key) == 0); 430 - } 431 - } 432 - } 433 - 434 - static void test_map_parallel(void) 435 - { 436 - int i, map_fd, key = 0, value = 0; 437 - int data[2]; 438 - 439 - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 440 - MAP_SIZE, map_flags); 441 - if (map_fd < 0) { 442 - printf("failed to create map for parallel test '%s'\n", 443 - strerror(errno)); 444 - exit(1); 445 - } 446 - 447 - data[0] = map_fd; 448 - data[1] = DO_UPDATE; 449 - /* use the same map_fd in children to add elements to this map 450 - * child_0 adds key=0, key=1024, key=2048, ... 451 - * child_1 adds key=1, key=1025, key=2049, ... 452 - * child_1023 adds key=1023, ... 453 - */ 454 - run_parallel(TASKS, do_work, data); 455 - 456 - /* check that key=0 is already there */ 457 - assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 458 - errno == EEXIST); 459 - 460 - /* check that all elements were inserted */ 461 - key = -1; 462 - for (i = 0; i < MAP_SIZE; i++) 463 - assert(bpf_get_next_key(map_fd, &key, &key) == 0); 464 - assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); 465 - 466 - /* another check for all elements */ 467 - for (i = 0; i < MAP_SIZE; i++) { 468 - key = MAP_SIZE - i - 1; 469 - assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && 470 - value == key); 471 - } 472 - 473 - /* now let's delete all elemenets in parallel */ 474 - data[1] = DO_DELETE; 475 - run_parallel(TASKS, do_work, data); 476 - 477 - /* nothing should be left */ 478 - key = -1; 479 - assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); 480 - } 481 - 482 - static void run_all_tests(void) 483 - { 484 - test_hashmap_sanity(0, NULL); 485 - test_percpu_hashmap_sanity(0, NULL); 486 - test_arraymap_sanity(0, NULL); 487 - test_percpu_arraymap_sanity(0, NULL); 488 - test_percpu_arraymap_many_keys(); 489 - 490 - test_map_large(); 491 - test_map_parallel(); 492 - test_map_stress(); 493 - } 494 - 495 - int main(void) 496 - { 497 - map_flags = 0; 498 - run_all_tests(); 499 - map_flags = BPF_F_NO_PREALLOC; 500 - run_all_tests(); 501 - printf("test_maps: OK\n"); 502 - return 0; 503 - }
+386 -267
samples/bpf/test_verifier.c tools/testing/selftests/bpf/test_verifier.c
··· 7 7 * modify it under the terms of version 2 of the GNU General Public 8 8 * License as published by the Free Software Foundation. 9 9 */ 10 + 10 11 #include <stdio.h> 11 12 #include <unistd.h> 12 - #include <linux/bpf.h> 13 13 #include <errno.h> 14 - #include <linux/unistd.h> 15 14 #include <string.h> 16 - #include <linux/filter.h> 17 - #include <linux/bpf_perf_event.h> 18 15 #include <stddef.h> 19 16 #include <stdbool.h> 17 + #include <sched.h> 18 + 20 19 #include <sys/resource.h> 21 - #include "libbpf.h" 22 20 23 - #define MAX_INSNS 512 24 - #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) 21 + #include <linux/unistd.h> 22 + #include <linux/filter.h> 23 + #include <linux/bpf_perf_event.h> 24 + #include <linux/bpf.h> 25 25 26 - #define MAX_FIXUPS 8 26 + #include "../../../include/linux/filter.h" 27 + 28 + #include "bpf_sys.h" 29 + 30 + #ifndef ARRAY_SIZE 31 + # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 32 + #endif 33 + 34 + #define MAX_INSNS 512 35 + #define MAX_FIXUPS 8 27 36 28 37 struct bpf_test { 29 38 const char *descr; 30 39 struct bpf_insn insns[MAX_INSNS]; 31 - int fixup[MAX_FIXUPS]; 32 - int prog_array_fixup[MAX_FIXUPS]; 33 - int test_val_map_fixup[MAX_FIXUPS]; 40 + int fixup_map1[MAX_FIXUPS]; 41 + int fixup_map2[MAX_FIXUPS]; 42 + int fixup_prog[MAX_FIXUPS]; 34 43 const char *errstr; 35 44 const char *errstr_unpriv; 36 45 enum { ··· 54 45 * actually the end of the structure. 55 46 */ 56 47 #define MAX_ENTRIES 11 57 - struct test_val { 58 - unsigned index; 59 - int foo[MAX_ENTRIES]; 60 - }; 61 48 62 - struct other_val { 63 - unsigned int action[32]; 49 + struct test_val { 50 + unsigned int index; 51 + int foo[MAX_ENTRIES]; 64 52 }; 65 53 66 54 static struct bpf_test tests[] = { ··· 294 288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 295 289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 296 290 BPF_LD_MAP_FD(BPF_REG_1, 0), 297 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 291 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 292 + BPF_FUNC_map_lookup_elem), 298 293 BPF_EXIT_INSN(), 299 294 }, 300 - .fixup = {2}, 295 + .fixup_map1 = { 2 }, 301 296 .errstr = "invalid indirect read from stack", 302 297 .result = REJECT, 303 298 }, ··· 315 308 { 316 309 "invalid argument register", 317 310 .insns = { 318 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), 319 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), 311 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 312 + BPF_FUNC_get_cgroup_classid), 313 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 314 + BPF_FUNC_get_cgroup_classid), 320 315 BPF_EXIT_INSN(), 321 316 }, 322 317 .errstr = "R1 !read_ok", ··· 329 320 "non-invalid argument register", 330 321 .insns = { 331 322 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 332 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), 323 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 324 + BPF_FUNC_get_cgroup_classid), 333 325 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6), 334 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), 326 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 327 + BPF_FUNC_get_cgroup_classid), 335 328 BPF_EXIT_INSN(), 336 329 }, 337 330 .result = ACCEPT, ··· 344 333 .insns = { 345 334 /* spill R1(ctx) into stack */ 346 335 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 347 - 348 336 /* fill it back into R2 */ 349 337 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 350 - 351 338 /* should be able to access R0 = *(R2 + 8) */ 352 339 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ 353 340 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), ··· 373 364 .insns = { 374 365 /* spill R1(ctx) into stack */ 375 366 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 376 - 377 367 /* mess up with R1 pointer on stack */ 378 368 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), 379 - 380 369 /* fill back into R0 should fail */ 381 370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 382 - 383 371 BPF_EXIT_INSN(), 384 372 }, 385 373 .errstr_unpriv = "attempt to corrupt spilled", ··· 490 484 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), 491 485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 492 486 BPF_LD_MAP_FD(BPF_REG_1, 0), 493 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), 487 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 488 + BPF_FUNC_map_delete_elem), 494 489 BPF_EXIT_INSN(), 495 490 }, 496 491 .errstr = "fd 0 is not pointing to valid bpf_map", ··· 504 497 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 505 498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 506 499 BPF_LD_MAP_FD(BPF_REG_1, 0), 507 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 500 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 501 + BPF_FUNC_map_lookup_elem), 508 502 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 509 503 BPF_EXIT_INSN(), 510 504 }, 511 - .fixup = {3}, 505 + .fixup_map1 = { 3 }, 512 506 .errstr = "R0 invalid mem access 'map_value_or_null'", 513 507 .result = REJECT, 514 508 }, ··· 520 512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 521 513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 522 514 BPF_LD_MAP_FD(BPF_REG_1, 0), 523 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 515 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 516 + BPF_FUNC_map_lookup_elem), 524 517 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 525 518 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), 526 519 BPF_EXIT_INSN(), 527 520 }, 528 - .fixup = {3}, 521 + .fixup_map1 = { 3 }, 529 522 .errstr = "misaligned access", 530 523 .result = REJECT, 531 524 }, ··· 537 528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 538 529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 539 530 BPF_LD_MAP_FD(BPF_REG_1, 0), 540 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 531 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 532 + BPF_FUNC_map_lookup_elem), 541 533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 542 534 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 543 535 BPF_EXIT_INSN(), 544 536 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), 545 537 BPF_EXIT_INSN(), 546 538 }, 547 - .fixup = {3}, 539 + .fixup_map1 = { 3 }, 548 540 .errstr = "R0 invalid mem access", 549 541 .errstr_unpriv = "R0 leaks addr", 550 542 .result = REJECT, ··· 630 620 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), 631 621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), 632 622 BPF_LD_MAP_FD(BPF_REG_1, 0), 633 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), 623 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 624 + BPF_FUNC_map_delete_elem), 634 625 BPF_EXIT_INSN(), 635 626 }, 636 - .fixup = {24}, 627 + .fixup_map1 = { 24 }, 637 628 .errstr_unpriv = "R1 pointer comparison", 638 629 .result_unpriv = REJECT, 639 630 .result = ACCEPT, ··· 775 764 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 776 765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 777 766 BPF_LD_MAP_FD(BPF_REG_1, 0), 778 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 767 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 768 + BPF_FUNC_map_lookup_elem), 779 769 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 780 770 BPF_EXIT_INSN(), 781 771 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), ··· 784 772 offsetof(struct __sk_buff, pkt_type)), 785 773 BPF_EXIT_INSN(), 786 774 }, 787 - .fixup = {4}, 775 + .fixup_map1 = { 4 }, 788 776 .errstr = "different pointers", 789 777 .errstr_unpriv = "R1 pointer comparison", 790 778 .result = REJECT, ··· 800 788 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 801 789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 802 790 BPF_LD_MAP_FD(BPF_REG_1, 0), 803 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 791 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 792 + BPF_FUNC_map_lookup_elem), 804 793 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 805 794 BPF_EXIT_INSN(), 806 795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 807 796 BPF_JMP_IMM(BPF_JA, 0, 0, -12), 808 797 }, 809 - .fixup = {6}, 798 + .fixup_map1 = { 6 }, 810 799 .errstr = "different pointers", 811 800 .errstr_unpriv = "R1 pointer comparison", 812 801 .result = REJECT, ··· 824 811 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 825 812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 826 813 BPF_LD_MAP_FD(BPF_REG_1, 0), 827 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 814 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 815 + BPF_FUNC_map_lookup_elem), 828 816 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 829 817 BPF_EXIT_INSN(), 830 818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 831 819 BPF_JMP_IMM(BPF_JA, 0, 0, -13), 832 820 }, 833 - .fixup = {7}, 821 + .fixup_map1 = { 7 }, 834 822 .errstr = "different pointers", 835 823 .errstr_unpriv = "R1 pointer comparison", 836 824 .result = REJECT, ··· 1054 1040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1055 1041 BPF_MOV64_IMM(BPF_REG_2, 8), 1056 1042 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 1057 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk), 1043 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1044 + BPF_FUNC_trace_printk), 1058 1045 BPF_MOV64_IMM(BPF_REG_0, 0), 1059 1046 BPF_EXIT_INSN(), 1060 1047 }, ··· 1072 1057 BPF_LD_MAP_FD(BPF_REG_1, 0), 1073 1058 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 1074 1059 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 1075 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), 1060 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1061 + BPF_FUNC_map_update_elem), 1076 1062 BPF_MOV64_IMM(BPF_REG_0, 0), 1077 1063 BPF_EXIT_INSN(), 1078 1064 }, 1079 - .fixup = {3}, 1065 + .fixup_map1 = { 3 }, 1080 1066 .errstr_unpriv = "R4 leaks addr", 1081 1067 .result_unpriv = REJECT, 1082 1068 .result = ACCEPT, ··· 1089 1073 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1090 1074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1091 1075 BPF_LD_MAP_FD(BPF_REG_1, 0), 1092 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1076 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1077 + BPF_FUNC_map_lookup_elem), 1093 1078 BPF_MOV64_IMM(BPF_REG_0, 0), 1094 1079 BPF_EXIT_INSN(), 1095 1080 }, 1096 - .fixup = {3}, 1081 + .fixup_map1 = { 3 }, 1097 1082 .errstr = "invalid indirect read from stack off -8+0 size 8", 1098 1083 .result = REJECT, 1099 1084 }, ··· 1164 1147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1165 1148 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1166 1149 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1167 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), 1150 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1151 + BPF_FUNC_get_hash_recalc), 1168 1152 BPF_EXIT_INSN(), 1169 1153 }, 1170 1154 .result = ACCEPT, ··· 1179 1161 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1180 1162 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), 1181 1163 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1182 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), 1164 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1165 + BPF_FUNC_get_hash_recalc), 1183 1166 BPF_EXIT_INSN(), 1184 1167 }, 1185 1168 .result = REJECT, ··· 1194 1175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1195 1176 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1196 1177 BPF_MOV64_IMM(BPF_REG_0, 1), 1197 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0), 1178 + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, 1179 + BPF_REG_0, -8, 0), 1198 1180 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1199 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), 1181 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1182 + BPF_FUNC_get_hash_recalc), 1200 1183 BPF_EXIT_INSN(), 1201 1184 }, 1202 1185 .result = REJECT, ··· 1258 1237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1259 1238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1260 1239 BPF_LD_MAP_FD(BPF_REG_1, 0), 1261 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1240 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1241 + BPF_FUNC_map_lookup_elem), 1262 1242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1263 1243 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 1264 1244 BPF_EXIT_INSN(), 1265 1245 }, 1266 - .fixup = {3}, 1246 + .fixup_map1 = { 3 }, 1267 1247 .errstr_unpriv = "R0 leaks addr", 1268 1248 .result_unpriv = REJECT, 1269 1249 .result = ACCEPT, ··· 1285 1263 .insns = { 1286 1264 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 1287 1265 BPF_LD_MAP_FD(BPF_REG_2, 0), 1288 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 1266 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1267 + BPF_FUNC_tail_call), 1289 1268 BPF_MOV64_IMM(BPF_REG_0, 0), 1290 1269 BPF_EXIT_INSN(), 1291 1270 }, 1292 - .prog_array_fixup = {1}, 1271 + .fixup_prog = { 1 }, 1293 1272 .errstr_unpriv = "R3 leaks addr into helper", 1294 1273 .result_unpriv = REJECT, 1295 1274 .result = ACCEPT, ··· 1304 1281 BPF_MOV64_IMM(BPF_REG_0, 0), 1305 1282 BPF_EXIT_INSN(), 1306 1283 }, 1307 - .fixup = {1}, 1284 + .fixup_map1 = { 1 }, 1308 1285 .errstr_unpriv = "R1 pointer comparison", 1309 1286 .result_unpriv = REJECT, 1310 1287 .result = ACCEPT, ··· 1393 1370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1394 1371 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1395 1372 BPF_MOV64_IMM(BPF_REG_4, -8), 1396 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1373 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1374 + BPF_FUNC_skb_load_bytes), 1397 1375 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1398 1376 BPF_EXIT_INSN(), 1399 1377 }, ··· 1410 1386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1411 1387 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1412 1388 BPF_MOV64_IMM(BPF_REG_4, ~0), 1413 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1389 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1390 + BPF_FUNC_skb_load_bytes), 1414 1391 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1415 1392 BPF_EXIT_INSN(), 1416 1393 }, ··· 1427 1402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1428 1403 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1429 1404 BPF_MOV64_IMM(BPF_REG_4, 0), 1430 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1405 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1406 + BPF_FUNC_skb_load_bytes), 1431 1407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1432 1408 BPF_EXIT_INSN(), 1433 1409 }, ··· 1444 1418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1445 1419 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1446 1420 BPF_MOV64_IMM(BPF_REG_4, 8), 1447 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1421 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1422 + BPF_FUNC_skb_load_bytes), 1448 1423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1449 1424 BPF_EXIT_INSN(), 1450 1425 }, ··· 1461 1434 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), 1462 1435 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1463 1436 BPF_MOV64_IMM(BPF_REG_4, 8), 1464 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1437 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1438 + BPF_FUNC_skb_load_bytes), 1465 1439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1466 1440 BPF_EXIT_INSN(), 1467 1441 }, ··· 1475 1447 BPF_MOV64_IMM(BPF_REG_2, 4), 1476 1448 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1477 1449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 1478 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ 1479 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ 1450 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 1451 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 1480 1452 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1481 1453 BPF_MOV64_IMM(BPF_REG_4, 8), 1482 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1483 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ 1484 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ 1454 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1455 + BPF_FUNC_skb_load_bytes), 1456 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 1457 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 1485 1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 1486 1459 offsetof(struct __sk_buff, mark)), 1487 1460 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, ··· 1499 1470 BPF_MOV64_IMM(BPF_REG_2, 4), 1500 1471 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1501 1472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1502 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ 1473 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1503 1474 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1504 1475 BPF_MOV64_IMM(BPF_REG_4, 8), 1505 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1506 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */ 1476 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1477 + BPF_FUNC_skb_load_bytes), 1478 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1507 1479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 1508 1480 offsetof(struct __sk_buff, mark)), 1509 1481 BPF_EXIT_INSN(), ··· 1519 1489 BPF_MOV64_IMM(BPF_REG_2, 4), 1520 1490 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1521 1491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 1522 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ 1523 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ 1524 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ 1492 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 1493 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1494 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 1525 1495 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1526 1496 BPF_MOV64_IMM(BPF_REG_4, 8), 1527 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1528 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ 1529 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ 1530 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill ctx into R3 */ 1497 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1498 + BPF_FUNC_skb_load_bytes), 1499 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 1500 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 1501 + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 1531 1502 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 1532 1503 offsetof(struct __sk_buff, mark)), 1533 1504 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, ··· 1549 1518 BPF_MOV64_IMM(BPF_REG_2, 4), 1550 1519 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1551 1520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 1552 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */ 1553 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */ 1554 - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */ 1521 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 1522 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1523 + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 1555 1524 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1556 1525 BPF_MOV64_IMM(BPF_REG_4, 8), 1557 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1558 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */ 1559 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */ 1560 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill data into R3 */ 1526 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1527 + BPF_FUNC_skb_load_bytes), 1528 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 1529 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 1530 + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 1561 1531 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 1562 1532 offsetof(struct __sk_buff, mark)), 1563 1533 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, ··· 1578 1546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), 1579 1547 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1580 1548 BPF_MOV64_IMM(BPF_REG_4, 8), 1581 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1549 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1550 + BPF_FUNC_skb_load_bytes), 1582 1551 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1583 1552 BPF_EXIT_INSN(), 1584 1553 }, ··· 1595 1562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 1596 1563 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1597 1564 BPF_MOV64_IMM(BPF_REG_4, 8), 1598 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1565 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1566 + BPF_FUNC_skb_load_bytes), 1599 1567 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1600 1568 BPF_EXIT_INSN(), 1601 1569 }, ··· 1612 1578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), 1613 1579 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1614 1580 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 1615 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1581 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1582 + BPF_FUNC_skb_load_bytes), 1616 1583 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1617 1584 BPF_EXIT_INSN(), 1618 1585 }, ··· 1629 1594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 1630 1595 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1631 1596 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 1632 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1597 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1598 + BPF_FUNC_skb_load_bytes), 1633 1599 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1634 1600 BPF_EXIT_INSN(), 1635 1601 }, ··· 1646 1610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 1647 1611 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1648 1612 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 1649 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1613 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1614 + BPF_FUNC_skb_load_bytes), 1650 1615 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1651 1616 BPF_EXIT_INSN(), 1652 1617 }, ··· 1663 1626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 1664 1627 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1665 1628 BPF_MOV64_IMM(BPF_REG_4, 0), 1666 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1629 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1630 + BPF_FUNC_skb_load_bytes), 1667 1631 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1668 1632 BPF_EXIT_INSN(), 1669 1633 }, ··· 1680 1642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 1681 1643 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1682 1644 BPF_MOV64_IMM(BPF_REG_4, 512), 1683 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 1645 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1646 + BPF_FUNC_skb_load_bytes), 1684 1647 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1685 1648 BPF_EXIT_INSN(), 1686 1649 }, ··· 1902 1863 BPF_LD_MAP_FD(BPF_REG_1, 0), 1903 1864 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 1904 1865 BPF_MOV64_IMM(BPF_REG_4, 0), 1905 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), 1866 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1867 + BPF_FUNC_map_update_elem), 1906 1868 BPF_MOV64_IMM(BPF_REG_0, 0), 1907 1869 BPF_EXIT_INSN(), 1908 1870 }, 1909 - .fixup = {5}, 1871 + .fixup_map1 = { 5 }, 1910 1872 .result_unpriv = ACCEPT, 1911 1873 .result = ACCEPT, 1912 1874 .prog_type = BPF_PROG_TYPE_XDP, ··· 1918 1878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1919 1879 offsetof(struct xdp_md, data)), 1920 1880 BPF_LD_MAP_FD(BPF_REG_1, 0), 1921 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1881 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1882 + BPF_FUNC_map_lookup_elem), 1922 1883 BPF_MOV64_IMM(BPF_REG_0, 0), 1923 1884 BPF_EXIT_INSN(), 1924 1885 }, 1925 - .fixup = {1}, 1886 + .fixup_map1 = { 1 }, 1926 1887 .result = REJECT, 1927 1888 .errstr = "invalid access to packet", 1928 1889 .prog_type = BPF_PROG_TYPE_XDP, ··· 1946 1905 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 1947 1906 BPF_LD_MAP_FD(BPF_REG_1, 0), 1948 1907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 1949 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1908 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1909 + BPF_FUNC_map_lookup_elem), 1950 1910 BPF_MOV64_IMM(BPF_REG_0, 0), 1951 1911 BPF_EXIT_INSN(), 1952 1912 }, 1953 - .fixup = {11}, 1913 + .fixup_map1 = { 11 }, 1954 1914 .result = ACCEPT, 1955 1915 .prog_type = BPF_PROG_TYPE_XDP, 1956 1916 }, ··· 1968 1926 BPF_MOV64_IMM(BPF_REG_0, 0), 1969 1927 BPF_EXIT_INSN(), 1970 1928 BPF_LD_MAP_FD(BPF_REG_1, 0), 1971 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1929 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1930 + BPF_FUNC_map_lookup_elem), 1972 1931 BPF_MOV64_IMM(BPF_REG_0, 0), 1973 1932 BPF_EXIT_INSN(), 1974 1933 }, 1975 - .fixup = {7}, 1934 + .fixup_map1 = { 7 }, 1976 1935 .result = REJECT, 1977 1936 .errstr = "invalid access to packet", 1978 1937 .prog_type = BPF_PROG_TYPE_XDP, ··· 1990 1947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 1991 1948 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 1992 1949 BPF_LD_MAP_FD(BPF_REG_1, 0), 1993 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1950 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1951 + BPF_FUNC_map_lookup_elem), 1994 1952 BPF_MOV64_IMM(BPF_REG_0, 0), 1995 1953 BPF_EXIT_INSN(), 1996 1954 }, 1997 - .fixup = {6}, 1955 + .fixup_map1 = { 6 }, 1998 1956 .result = REJECT, 1999 1957 .errstr = "invalid access to packet", 2000 1958 .prog_type = BPF_PROG_TYPE_XDP, ··· 2013 1969 BPF_LD_MAP_FD(BPF_REG_1, 0), 2014 1970 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 2015 1971 BPF_MOV64_IMM(BPF_REG_4, 0), 2016 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), 1972 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1973 + BPF_FUNC_map_update_elem), 2017 1974 BPF_MOV64_IMM(BPF_REG_0, 0), 2018 1975 BPF_EXIT_INSN(), 2019 1976 }, 2020 - .fixup = {5}, 1977 + .fixup_map1 = { 5 }, 2021 1978 .result = ACCEPT, 2022 1979 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2023 1980 }, ··· 2028 1983 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2029 1984 offsetof(struct __sk_buff, data)), 2030 1985 BPF_LD_MAP_FD(BPF_REG_1, 0), 2031 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1986 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1987 + BPF_FUNC_map_lookup_elem), 2032 1988 BPF_MOV64_IMM(BPF_REG_0, 0), 2033 1989 BPF_EXIT_INSN(), 2034 1990 }, 2035 - .fixup = {1}, 1991 + .fixup_map1 = { 1 }, 2036 1992 .result = REJECT, 2037 1993 .errstr = "invalid access to packet", 2038 1994 .prog_type = BPF_PROG_TYPE_SCHED_CLS, ··· 2056 2010 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 2057 2011 BPF_LD_MAP_FD(BPF_REG_1, 0), 2058 2012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 2059 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2013 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2014 + BPF_FUNC_map_lookup_elem), 2060 2015 BPF_MOV64_IMM(BPF_REG_0, 0), 2061 2016 BPF_EXIT_INSN(), 2062 2017 }, 2063 - .fixup = {11}, 2018 + .fixup_map1 = { 11 }, 2064 2019 .result = ACCEPT, 2065 2020 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2066 2021 }, ··· 2078 2031 BPF_MOV64_IMM(BPF_REG_0, 0), 2079 2032 BPF_EXIT_INSN(), 2080 2033 BPF_LD_MAP_FD(BPF_REG_1, 0), 2081 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2034 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2035 + BPF_FUNC_map_lookup_elem), 2082 2036 BPF_MOV64_IMM(BPF_REG_0, 0), 2083 2037 BPF_EXIT_INSN(), 2084 2038 }, 2085 - .fixup = {7}, 2039 + .fixup_map1 = { 7 }, 2086 2040 .result = REJECT, 2087 2041 .errstr = "invalid access to packet", 2088 2042 .prog_type = BPF_PROG_TYPE_SCHED_CLS, ··· 2100 2052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 2101 2053 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 2102 2054 BPF_LD_MAP_FD(BPF_REG_1, 0), 2103 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2055 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2056 + BPF_FUNC_map_lookup_elem), 2104 2057 BPF_MOV64_IMM(BPF_REG_0, 0), 2105 2058 BPF_EXIT_INSN(), 2106 2059 }, 2107 - .fixup = {6}, 2060 + .fixup_map1 = { 6 }, 2108 2061 .result = REJECT, 2109 2062 .errstr = "invalid access to packet", 2110 2063 .prog_type = BPF_PROG_TYPE_SCHED_CLS, ··· 2124 2075 BPF_MOV64_IMM(BPF_REG_2, 0), 2125 2076 BPF_MOV64_IMM(BPF_REG_4, 42), 2126 2077 BPF_MOV64_IMM(BPF_REG_5, 0), 2127 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes), 2078 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2079 + BPF_FUNC_skb_store_bytes), 2128 2080 BPF_MOV64_IMM(BPF_REG_0, 0), 2129 2081 BPF_EXIT_INSN(), 2130 2082 }, ··· 2145 2095 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), 2146 2096 BPF_MOV64_IMM(BPF_REG_2, 0), 2147 2097 BPF_MOV64_IMM(BPF_REG_4, 4), 2148 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), 2098 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2099 + BPF_FUNC_skb_load_bytes), 2149 2100 BPF_MOV64_IMM(BPF_REG_0, 0), 2150 2101 BPF_EXIT_INSN(), 2151 2102 }, ··· 2170 2119 BPF_MOV64_IMM(BPF_REG_3, 0), 2171 2120 BPF_MOV64_IMM(BPF_REG_4, 0), 2172 2121 BPF_MOV64_IMM(BPF_REG_5, 0), 2173 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2122 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2123 + BPF_FUNC_csum_diff), 2174 2124 BPF_MOV64_IMM(BPF_REG_0, 0), 2175 2125 BPF_EXIT_INSN(), 2176 2126 }, ··· 2194 2142 BPF_MOV64_IMM(BPF_REG_3, 0), 2195 2143 BPF_MOV64_IMM(BPF_REG_4, 0), 2196 2144 BPF_MOV64_IMM(BPF_REG_5, 0), 2197 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2145 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2146 + BPF_FUNC_csum_diff), 2198 2147 BPF_MOV64_IMM(BPF_REG_0, 0), 2199 2148 BPF_EXIT_INSN(), 2200 2149 }, ··· 2219 2166 BPF_MOV64_IMM(BPF_REG_3, 0), 2220 2167 BPF_MOV64_IMM(BPF_REG_4, 0), 2221 2168 BPF_MOV64_IMM(BPF_REG_5, 0), 2222 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2169 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2170 + BPF_FUNC_csum_diff), 2223 2171 BPF_MOV64_IMM(BPF_REG_0, 0), 2224 2172 BPF_EXIT_INSN(), 2225 2173 }, ··· 2244 2190 BPF_MOV64_IMM(BPF_REG_3, 0), 2245 2191 BPF_MOV64_IMM(BPF_REG_4, 0), 2246 2192 BPF_MOV64_IMM(BPF_REG_5, 0), 2247 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2193 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2194 + BPF_FUNC_csum_diff), 2248 2195 BPF_MOV64_IMM(BPF_REG_0, 0), 2249 2196 BPF_EXIT_INSN(), 2250 2197 }, ··· 2269 2214 BPF_MOV64_IMM(BPF_REG_3, 0), 2270 2215 BPF_MOV64_IMM(BPF_REG_4, 0), 2271 2216 BPF_MOV64_IMM(BPF_REG_5, 0), 2272 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2217 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2218 + BPF_FUNC_csum_diff), 2273 2219 BPF_MOV64_IMM(BPF_REG_0, 0), 2274 2220 BPF_EXIT_INSN(), 2275 2221 }, ··· 2294 2238 BPF_MOV64_IMM(BPF_REG_3, 0), 2295 2239 BPF_MOV64_IMM(BPF_REG_4, 0), 2296 2240 BPF_MOV64_IMM(BPF_REG_5, 0), 2297 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2241 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2242 + BPF_FUNC_csum_diff), 2298 2243 BPF_MOV64_IMM(BPF_REG_0, 0), 2299 2244 BPF_EXIT_INSN(), 2300 2245 }, ··· 2319 2262 BPF_MOV64_IMM(BPF_REG_3, 0), 2320 2263 BPF_MOV64_IMM(BPF_REG_4, 0), 2321 2264 BPF_MOV64_IMM(BPF_REG_5, 0), 2322 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2265 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2266 + BPF_FUNC_csum_diff), 2323 2267 BPF_MOV64_IMM(BPF_REG_0, 0), 2324 2268 BPF_EXIT_INSN(), 2325 2269 }, ··· 2343 2285 BPF_MOV64_IMM(BPF_REG_3, 0), 2344 2286 BPF_MOV64_IMM(BPF_REG_4, 0), 2345 2287 BPF_MOV64_IMM(BPF_REG_5, 0), 2346 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), 2288 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2289 + BPF_FUNC_csum_diff), 2347 2290 BPF_MOV64_IMM(BPF_REG_0, 0), 2348 2291 BPF_EXIT_INSN(), 2349 2292 }, ··· 2359 2300 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2360 2301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2361 2302 BPF_LD_MAP_FD(BPF_REG_1, 0), 2362 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2303 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2304 + BPF_FUNC_map_lookup_elem), 2363 2305 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2364 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2306 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2307 + offsetof(struct test_val, foo)), 2365 2308 BPF_EXIT_INSN(), 2366 2309 }, 2367 - .test_val_map_fixup = {3}, 2310 + .fixup_map2 = { 3 }, 2368 2311 .errstr_unpriv = "R0 leaks addr", 2369 2312 .result_unpriv = REJECT, 2370 2313 .result = ACCEPT, ··· 2378 2317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2379 2318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2380 2319 BPF_LD_MAP_FD(BPF_REG_1, 0), 2381 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2320 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2321 + BPF_FUNC_map_lookup_elem), 2382 2322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 2383 2323 BPF_MOV64_IMM(BPF_REG_1, 4), 2384 2324 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 2385 2325 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2386 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2326 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2327 + offsetof(struct test_val, foo)), 2387 2328 BPF_EXIT_INSN(), 2388 2329 }, 2389 - .test_val_map_fixup = {3}, 2390 - .errstr_unpriv = "R0 leaks addr", 2330 + .fixup_map2 = { 3 }, 2331 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2391 2332 .result_unpriv = REJECT, 2392 2333 .result = ACCEPT, 2393 2334 }, ··· 2400 2337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2401 2338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2402 2339 BPF_LD_MAP_FD(BPF_REG_1, 0), 2403 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2340 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2341 + BPF_FUNC_map_lookup_elem), 2404 2342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 2405 2343 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 2406 2344 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), 2407 2345 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 2408 2346 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2409 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2347 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2348 + offsetof(struct test_val, foo)), 2410 2349 BPF_EXIT_INSN(), 2411 2350 }, 2412 - .test_val_map_fixup = {3}, 2413 - .errstr_unpriv = "R0 leaks addr", 2351 + .fixup_map2 = { 3 }, 2352 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2414 2353 .result_unpriv = REJECT, 2415 2354 .result = ACCEPT, 2416 2355 }, ··· 2423 2358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2424 2359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2425 2360 BPF_LD_MAP_FD(BPF_REG_1, 0), 2426 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2361 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2362 + BPF_FUNC_map_lookup_elem), 2427 2363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 2428 2364 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 2429 2365 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), ··· 2434 2368 BPF_MOV32_IMM(BPF_REG_1, 0), 2435 2369 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 2436 2370 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2437 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2371 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2372 + offsetof(struct test_val, foo)), 2438 2373 BPF_EXIT_INSN(), 2439 2374 }, 2440 - .test_val_map_fixup = {3}, 2441 - .errstr_unpriv = "R0 leaks addr", 2375 + .fixup_map2 = { 3 }, 2376 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2442 2377 .result_unpriv = REJECT, 2443 2378 .result = ACCEPT, 2444 2379 }, ··· 2450 2383 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2451 2384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2452 2385 BPF_LD_MAP_FD(BPF_REG_1, 0), 2453 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2386 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2387 + BPF_FUNC_map_lookup_elem), 2454 2388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2455 2389 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, 2456 2390 offsetof(struct test_val, foo)), 2457 2391 BPF_EXIT_INSN(), 2458 2392 }, 2459 - .test_val_map_fixup = {3}, 2393 + .fixup_map2 = { 3 }, 2460 2394 .errstr = "invalid access to map value, value_size=48 off=48 size=8", 2461 2395 .result = REJECT, 2462 2396 }, ··· 2468 2400 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2469 2401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2470 2402 BPF_LD_MAP_FD(BPF_REG_1, 0), 2471 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2403 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2404 + BPF_FUNC_map_lookup_elem), 2472 2405 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 2473 2406 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), 2474 2407 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 2475 2408 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2476 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2409 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2410 + offsetof(struct test_val, foo)), 2477 2411 BPF_EXIT_INSN(), 2478 2412 }, 2479 - .test_val_map_fixup = {3}, 2413 + .fixup_map2 = { 3 }, 2414 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2480 2415 .errstr = "R0 min value is outside of the array range", 2416 + .result_unpriv = REJECT, 2481 2417 .result = REJECT, 2482 2418 }, 2483 2419 { ··· 2491 2419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2492 2420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2493 2421 BPF_LD_MAP_FD(BPF_REG_1, 0), 2494 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2422 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2423 + BPF_FUNC_map_lookup_elem), 2495 2424 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 2496 2425 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 2497 2426 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 2498 2427 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2499 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2428 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2429 + offsetof(struct test_val, foo)), 2500 2430 BPF_EXIT_INSN(), 2501 2431 }, 2502 - .test_val_map_fixup = {3}, 2432 + .fixup_map2 = { 3 }, 2433 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2503 2434 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 2435 + .result_unpriv = REJECT, 2504 2436 .result = REJECT, 2505 2437 }, 2506 2438 { ··· 2514 2438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2515 2439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2516 2440 BPF_LD_MAP_FD(BPF_REG_1, 0), 2517 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2441 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2442 + BPF_FUNC_map_lookup_elem), 2518 2443 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 2519 2444 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 2520 2445 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), ··· 2523 2446 BPF_MOV32_IMM(BPF_REG_1, 0), 2524 2447 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 2525 2448 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2526 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2449 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2450 + offsetof(struct test_val, foo)), 2527 2451 BPF_EXIT_INSN(), 2528 2452 }, 2529 - .test_val_map_fixup = {3}, 2453 + .fixup_map2 = { 3 }, 2454 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2530 2455 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 2456 + .result_unpriv = REJECT, 2531 2457 .result = REJECT, 2532 2458 }, 2533 2459 { ··· 2540 2460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2541 2461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2542 2462 BPF_LD_MAP_FD(BPF_REG_1, 0), 2543 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2463 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2464 + BPF_FUNC_map_lookup_elem), 2544 2465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 2545 2466 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 2546 2467 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), ··· 2549 2468 BPF_MOV32_IMM(BPF_REG_1, 0), 2550 2469 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 2551 2470 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 2552 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 2471 + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 2472 + offsetof(struct test_val, foo)), 2553 2473 BPF_EXIT_INSN(), 2554 2474 }, 2555 - .test_val_map_fixup = {3}, 2475 + .fixup_map2 = { 3 }, 2476 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2556 2477 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 2478 + .result_unpriv = REJECT, 2557 2479 .result = REJECT, 2558 2480 }, 2559 2481 { ··· 2566 2482 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2567 2483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2568 2484 BPF_LD_MAP_FD(BPF_REG_1, 0), 2569 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2485 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2486 + BPF_FUNC_map_lookup_elem), 2570 2487 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 2571 2488 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 2572 2489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2573 2490 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2574 2491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2575 2492 BPF_LD_MAP_FD(BPF_REG_1, 0), 2576 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 2493 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2494 + BPF_FUNC_map_lookup_elem), 2577 2495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 2578 2496 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 2579 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct test_val, foo)), 2497 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2498 + offsetof(struct test_val, foo)), 2580 2499 BPF_EXIT_INSN(), 2581 2500 }, 2582 - .test_val_map_fixup = {3, 11}, 2501 + .fixup_map2 = { 3, 11 }, 2502 + .errstr_unpriv = "R0 pointer arithmetic prohibited", 2583 2503 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 2504 + .result_unpriv = REJECT, 2584 2505 .result = REJECT, 2585 2506 }, 2586 2507 }; 2587 2508 2588 - static int probe_filter_length(struct bpf_insn *fp) 2509 + static int probe_filter_length(const struct bpf_insn *fp) 2589 2510 { 2590 - int len = 0; 2511 + int len; 2591 2512 2592 2513 for (len = MAX_INSNS - 1; len > 0; --len) 2593 2514 if (fp[len].code != 0 || fp[len].imm != 0) 2594 2515 break; 2595 - 2596 2516 return len + 1; 2597 2517 } 2598 2518 2599 - static int create_map(size_t val_size, int num) 2519 + static int create_map(uint32_t size_value, uint32_t max_elem) 2600 2520 { 2601 - int map_fd; 2521 + int fd; 2602 2522 2603 - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, 2604 - sizeof(long long), val_size, num, 0); 2605 - if (map_fd < 0) 2606 - printf("failed to create map '%s'\n", strerror(errno)); 2523 + fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long), 2524 + size_value, max_elem, BPF_F_NO_PREALLOC); 2525 + if (fd < 0) 2526 + printf("Failed to create hash map '%s'!\n", strerror(errno)); 2607 2527 2608 - return map_fd; 2528 + return fd; 2609 2529 } 2610 2530 2611 2531 static int create_prog_array(void) 2612 2532 { 2613 - int map_fd; 2533 + int fd; 2614 2534 2615 - map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, 2616 - sizeof(int), sizeof(int), 4, 0); 2617 - if (map_fd < 0) 2618 - printf("failed to create prog_array '%s'\n", strerror(errno)); 2535 + fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 2536 + sizeof(int), 4, 0); 2537 + if (fd < 0) 2538 + printf("Failed to create prog array '%s'!\n", strerror(errno)); 2619 2539 2620 - return map_fd; 2540 + return fd; 2621 2541 } 2622 2542 2623 - static int test(void) 2543 + static char bpf_vlog[32768]; 2544 + 2545 + static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, 2546 + int *fd_f1, int *fd_f2, int *fd_f3) 2624 2547 { 2625 - int prog_fd, i, pass_cnt = 0, err_cnt = 0; 2548 + int *fixup_map1 = test->fixup_map1; 2549 + int *fixup_map2 = test->fixup_map2; 2550 + int *fixup_prog = test->fixup_prog; 2551 + 2552 + /* Allocating HTs with 1 elem is fine here, since we only test 2553 + * for verifier and not do a runtime lookup, so the only thing 2554 + * that really matters is value size in this case. 2555 + */ 2556 + if (*fixup_map1) { 2557 + *fd_f1 = create_map(sizeof(long long), 1); 2558 + do { 2559 + prog[*fixup_map1].imm = *fd_f1; 2560 + fixup_map1++; 2561 + } while (*fixup_map1); 2562 + } 2563 + 2564 + if (*fixup_map2) { 2565 + *fd_f2 = create_map(sizeof(struct test_val), 1); 2566 + do { 2567 + prog[*fixup_map2].imm = *fd_f2; 2568 + fixup_map2++; 2569 + } while (*fixup_map2); 2570 + } 2571 + 2572 + if (*fixup_prog) { 2573 + *fd_f3 = create_prog_array(); 2574 + do { 2575 + prog[*fixup_prog].imm = *fd_f3; 2576 + fixup_prog++; 2577 + } while (*fixup_prog); 2578 + } 2579 + } 2580 + 2581 + static void do_test_single(struct bpf_test *test, bool unpriv, 2582 + int *passes, int *errors) 2583 + { 2584 + struct bpf_insn *prog = test->insns; 2585 + int prog_len = probe_filter_length(prog); 2586 + int prog_type = test->prog_type; 2587 + int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; 2588 + int fd_prog, expected_ret; 2589 + const char *expected_err; 2590 + 2591 + do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 2592 + 2593 + fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 2594 + prog, prog_len * sizeof(struct bpf_insn), 2595 + "GPL", bpf_vlog, sizeof(bpf_vlog)); 2596 + 2597 + expected_ret = unpriv && test->result_unpriv != UNDEF ? 2598 + test->result_unpriv : test->result; 2599 + expected_err = unpriv && test->errstr_unpriv ? 2600 + test->errstr_unpriv : test->errstr; 2601 + if (expected_ret == ACCEPT) { 2602 + if (fd_prog < 0) { 2603 + printf("FAIL\nFailed to load prog '%s'!\n", 2604 + strerror(errno)); 2605 + goto fail_log; 2606 + } 2607 + } else { 2608 + if (fd_prog >= 0) { 2609 + printf("FAIL\nUnexpected success to load!\n"); 2610 + goto fail_log; 2611 + } 2612 + if (!strstr(bpf_vlog, expected_err)) { 2613 + printf("FAIL\nUnexpected error message!\n"); 2614 + goto fail_log; 2615 + } 2616 + } 2617 + 2618 + (*passes)++; 2619 + printf("OK\n"); 2620 + close_fds: 2621 + close(fd_prog); 2622 + close(fd_f1); 2623 + close(fd_f2); 2624 + close(fd_f3); 2625 + sched_yield(); 2626 + return; 2627 + fail_log: 2628 + (*errors)++; 2629 + printf("%s", bpf_vlog); 2630 + goto close_fds; 2631 + } 2632 + 2633 + static int do_test(bool unpriv, unsigned int from, unsigned int to) 2634 + { 2635 + int i, passes = 0, errors = 0; 2636 + 2637 + for (i = from; i < to; i++) { 2638 + struct bpf_test *test = &tests[i]; 2639 + 2640 + /* Program types that are not supported by non-root we 2641 + * skip right away. 2642 + */ 2643 + if (unpriv && test->prog_type) 2644 + continue; 2645 + 2646 + printf("#%d %s ", i, test->descr); 2647 + do_test_single(test, unpriv, &passes, &errors); 2648 + } 2649 + 2650 + printf("Summary: %d PASSED, %d FAILED\n", passes, errors); 2651 + return errors ? -errors : 0; 2652 + } 2653 + 2654 + int main(int argc, char **argv) 2655 + { 2656 + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 2657 + struct rlimit rlim = { 1 << 20, 1 << 20 }; 2658 + unsigned int from = 0, to = ARRAY_SIZE(tests); 2626 2659 bool unpriv = geteuid() != 0; 2627 2660 2628 - for (i = 0; i < ARRAY_SIZE(tests); i++) { 2629 - struct bpf_insn *prog = tests[i].insns; 2630 - int prog_type = tests[i].prog_type; 2631 - int prog_len = probe_filter_length(prog); 2632 - int *fixup = tests[i].fixup; 2633 - int *prog_array_fixup = tests[i].prog_array_fixup; 2634 - int *test_val_map_fixup = tests[i].test_val_map_fixup; 2635 - int expected_result; 2636 - const char *expected_errstr; 2637 - int map_fd = -1, prog_array_fd = -1, test_val_map_fd = -1; 2661 + if (argc == 3) { 2662 + unsigned int l = atoi(argv[argc - 2]); 2663 + unsigned int u = atoi(argv[argc - 1]); 2638 2664 2639 - if (*fixup) { 2640 - map_fd = create_map(sizeof(long long), 1024); 2641 - 2642 - do { 2643 - prog[*fixup].imm = map_fd; 2644 - fixup++; 2645 - } while (*fixup); 2665 + if (l < to && u < to) { 2666 + from = l; 2667 + to = u + 1; 2646 2668 } 2647 - if (*prog_array_fixup) { 2648 - prog_array_fd = create_prog_array(); 2669 + } else if (argc == 2) { 2670 + unsigned int t = atoi(argv[argc - 1]); 2649 2671 2650 - do { 2651 - prog[*prog_array_fixup].imm = prog_array_fd; 2652 - prog_array_fixup++; 2653 - } while (*prog_array_fixup); 2672 + if (t < to) { 2673 + from = t; 2674 + to = t + 1; 2654 2675 } 2655 - if (*test_val_map_fixup) { 2656 - /* Unprivileged can't create a hash map.*/ 2657 - if (unpriv) 2658 - continue; 2659 - test_val_map_fd = create_map(sizeof(struct test_val), 2660 - 256); 2661 - do { 2662 - prog[*test_val_map_fixup].imm = test_val_map_fd; 2663 - test_val_map_fixup++; 2664 - } while (*test_val_map_fixup); 2665 - } 2666 - 2667 - printf("#%d %s ", i, tests[i].descr); 2668 - 2669 - prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, 2670 - prog, prog_len * sizeof(struct bpf_insn), 2671 - "GPL", 0); 2672 - 2673 - if (unpriv && tests[i].result_unpriv != UNDEF) 2674 - expected_result = tests[i].result_unpriv; 2675 - else 2676 - expected_result = tests[i].result; 2677 - 2678 - if (unpriv && tests[i].errstr_unpriv) 2679 - expected_errstr = tests[i].errstr_unpriv; 2680 - else 2681 - expected_errstr = tests[i].errstr; 2682 - 2683 - if (expected_result == ACCEPT) { 2684 - if (prog_fd < 0) { 2685 - printf("FAIL\nfailed to load prog '%s'\n", 2686 - strerror(errno)); 2687 - printf("%s", bpf_log_buf); 2688 - err_cnt++; 2689 - goto fail; 2690 - } 2691 - } else { 2692 - if (prog_fd >= 0) { 2693 - printf("FAIL\nunexpected success to load\n"); 2694 - printf("%s", bpf_log_buf); 2695 - err_cnt++; 2696 - goto fail; 2697 - } 2698 - if (strstr(bpf_log_buf, expected_errstr) == 0) { 2699 - printf("FAIL\nunexpected error message: %s", 2700 - bpf_log_buf); 2701 - err_cnt++; 2702 - goto fail; 2703 - } 2704 - } 2705 - 2706 - pass_cnt++; 2707 - printf("OK\n"); 2708 - fail: 2709 - if (map_fd >= 0) 2710 - close(map_fd); 2711 - if (prog_array_fd >= 0) 2712 - close(prog_array_fd); 2713 - if (test_val_map_fd >= 0) 2714 - close(test_val_map_fd); 2715 - close(prog_fd); 2716 - 2717 2676 } 2718 - printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); 2719 2677 2720 - return 0; 2721 - } 2722 - 2723 - int main(void) 2724 - { 2725 - struct rlimit r = {1 << 20, 1 << 20}; 2726 - 2727 - setrlimit(RLIMIT_MEMLOCK, &r); 2728 - return test(); 2678 + setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); 2679 + return do_test(unpriv, from, to); 2729 2680 }
+24
tools/include/linux/filter.h
··· 218 218 .off = OFF, \ 219 219 .imm = IMM }) 220 220 221 + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 222 + 223 + #define BPF_LD_IMM64(DST, IMM) \ 224 + BPF_LD_IMM64_RAW(DST, 0, IMM) 225 + 226 + #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 227 + ((struct bpf_insn) { \ 228 + .code = BPF_LD | BPF_DW | BPF_IMM, \ 229 + .dst_reg = DST, \ 230 + .src_reg = SRC, \ 231 + .off = 0, \ 232 + .imm = (__u32) (IMM) }), \ 233 + ((struct bpf_insn) { \ 234 + .code = 0, /* zero is reserved opcode */ \ 235 + .dst_reg = 0, \ 236 + .src_reg = 0, \ 237 + .off = 0, \ 238 + .imm = ((__u64) (IMM)) >> 32 }) 239 + 240 + /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 241 + 242 + #define BPF_LD_MAP_FD(DST, MAP_FD) \ 243 + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) 244 + 221 245 /* Program exit */ 222 246 223 247 #define BPF_EXIT_INSN() \
+2 -1
tools/testing/selftests/Makefile
··· 1 - TARGETS = breakpoints 1 + TARGETS = bpf 2 + TARGETS += breakpoints 2 3 TARGETS += capabilities 3 4 TARGETS += cpu-hotplug 4 5 TARGETS += efivarfs
+2
tools/testing/selftests/bpf/.gitignore
··· 1 + test_verifier 2 + test_maps
+13
tools/testing/selftests/bpf/Makefile
··· 1 + CFLAGS += -Wall -O2 2 + 3 + test_objs = test_verifier test_maps 4 + 5 + TEST_PROGS := test_verifier test_maps test_kmod.sh 6 + TEST_FILES := $(test_objs) 7 + 8 + all: $(test_objs) 9 + 10 + include ../lib.mk 11 + 12 + clean: 13 + $(RM) $(test_objs)
+108
tools/testing/selftests/bpf/bpf_sys.h
··· 1 + #ifndef __BPF_SYS__ 2 + #define __BPF_SYS__ 3 + 4 + #include <stdint.h> 5 + #include <stdlib.h> 6 + 7 + #include <sys/syscall.h> 8 + 9 + #include <linux/bpf.h> 10 + 11 + static inline __u64 bpf_ptr_to_u64(const void *ptr) 12 + { 13 + return (__u64)(unsigned long) ptr; 14 + } 15 + 16 + static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) 17 + { 18 + #ifdef __NR_bpf 19 + return syscall(__NR_bpf, cmd, attr, size); 20 + #else 21 + fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); 22 + errno = ENOSYS; 23 + return -1; 24 + #endif 25 + } 26 + 27 + static inline int bpf_map_lookup(int fd, const void *key, void *value) 28 + { 29 + union bpf_attr attr = {}; 30 + 31 + attr.map_fd = fd; 32 + attr.key = bpf_ptr_to_u64(key); 33 + attr.value = bpf_ptr_to_u64(value); 34 + 35 + return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 36 + } 37 + 38 + static inline int bpf_map_update(int fd, const void *key, const void *value, 39 + uint64_t flags) 40 + { 41 + union bpf_attr attr = {}; 42 + 43 + attr.map_fd = fd; 44 + attr.key = bpf_ptr_to_u64(key); 45 + attr.value = bpf_ptr_to_u64(value); 46 + attr.flags = flags; 47 + 48 + return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 49 + } 50 + 51 + static inline int bpf_map_delete(int fd, const void *key) 52 + { 53 + union bpf_attr attr = {}; 54 + 55 + attr.map_fd = fd; 56 + attr.key = bpf_ptr_to_u64(key); 57 + 58 + return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 59 + } 60 + 61 + static inline int bpf_map_next_key(int fd, const void *key, void *next_key) 62 + { 63 + union bpf_attr attr = {}; 64 + 65 + attr.map_fd = fd; 66 + attr.key = bpf_ptr_to_u64(key); 67 + attr.next_key = bpf_ptr_to_u64(next_key); 68 + 69 + return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 70 + } 71 + 72 + static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key, 73 + uint32_t size_value, uint32_t max_elem, 74 + uint32_t flags) 75 + { 76 + union bpf_attr attr = {}; 77 + 78 + attr.map_type = type; 79 + attr.key_size = size_key; 80 + attr.value_size = size_value; 81 + attr.max_entries = max_elem; 82 + attr.map_flags = flags; 83 + 84 + return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 85 + } 86 + 87 + static inline int bpf_prog_load(enum bpf_prog_type type, 88 + const struct bpf_insn *insns, size_t size_insns, 89 + const char *license, char *log, size_t size_log) 90 + { 91 + union bpf_attr attr = {}; 92 + 93 + attr.prog_type = type; 94 + attr.insns = bpf_ptr_to_u64(insns); 95 + attr.insn_cnt = size_insns / sizeof(struct bpf_insn); 96 + attr.license = bpf_ptr_to_u64(license); 97 + 98 + if (size_log > 0) { 99 + attr.log_buf = bpf_ptr_to_u64(log); 100 + attr.log_size = size_log; 101 + attr.log_level = 1; 102 + log[0] = 0; 103 + } 104 + 105 + return bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 106 + } 107 + 108 + #endif /* __BPF_SYS__ */
+5
tools/testing/selftests/bpf/config
··· 1 + CONFIG_BPF=y 2 + CONFIG_BPF_SYSCALL=y 3 + CONFIG_NET_CLS_BPF=m 4 + CONFIG_BPF_EVENTS=y 5 + CONFIG_TEST_BPF=m
+39
tools/testing/selftests/bpf/test_kmod.sh
··· 1 + #!/bin/bash 2 + 3 + SRC_TREE=../../../../ 4 + 5 + test_run() 6 + { 7 + sysctl -w net.core.bpf_jit_enable=$1 2>&1 > /dev/null 8 + sysctl -w net.core.bpf_jit_harden=$2 2>&1 > /dev/null 9 + 10 + echo "[ JIT enabled:$1 hardened:$2 ]" 11 + dmesg -C 12 + insmod $SRC_TREE/lib/test_bpf.ko 2> /dev/null 13 + if [ $? -ne 0 ]; then 14 + rc=1 15 + fi 16 + rmmod test_bpf 2> /dev/null 17 + dmesg | grep FAIL 18 + } 19 + 20 + test_save() 21 + { 22 + JE=`sysctl -n net.core.bpf_jit_enable` 23 + JH=`sysctl -n net.core.bpf_jit_harden` 24 + } 25 + 26 + test_restore() 27 + { 28 + sysctl -w net.core.bpf_jit_enable=$JE 2>&1 > /dev/null 29 + sysctl -w net.core.bpf_jit_harden=$JH 2>&1 > /dev/null 30 + } 31 + 32 + rc=0 33 + test_save 34 + test_run 0 0 35 + test_run 1 0 36 + test_run 1 1 37 + test_run 1 2 38 + test_restore 39 + exit $rc
+525
tools/testing/selftests/bpf/test_maps.c
··· 1 + /* 2 + * Testsuite for eBPF maps 3 + * 4 + * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 5 + * Copyright (c) 2016 Facebook 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of version 2 of the GNU General Public 9 + * License as published by the Free Software Foundation. 10 + */ 11 + 12 + #include <stdio.h> 13 + #include <unistd.h> 14 + #include <errno.h> 15 + #include <string.h> 16 + #include <assert.h> 17 + #include <stdlib.h> 18 + 19 + #include <sys/wait.h> 20 + #include <sys/resource.h> 21 + 22 + #include <linux/bpf.h> 23 + 24 + #include "bpf_sys.h" 25 + 26 + static int map_flags; 27 + 28 + static void test_hashmap(int task, void *data) 29 + { 30 + long long key, next_key, value; 31 + int fd; 32 + 33 + fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 34 + 2, map_flags); 35 + if (fd < 0) { 36 + printf("Failed to create hashmap '%s'!\n", strerror(errno)); 37 + exit(1); 38 + } 39 + 40 + key = 1; 41 + value = 1234; 42 + /* Insert key=1 element. */ 43 + assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 44 + 45 + value = 0; 46 + /* BPF_NOEXIST means add new element if it doesn't exist. */ 47 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 48 + /* key=1 already exists. */ 49 + errno == EEXIST); 50 + 51 + /* -1 is an invalid flag. */ 52 + assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL); 53 + 54 + /* Check that key=1 can be found. */ 55 + assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 56 + 57 + key = 2; 58 + /* Check that key=2 is not found. */ 59 + assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 60 + 61 + /* BPF_EXIST means update existing element. */ 62 + assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 63 + /* key=2 is not there. */ 64 + errno == ENOENT); 65 + 66 + /* Insert key=2 element. */ 67 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 68 + 69 + /* key=1 and key=2 were inserted, check that key=0 cannot be 70 + * inserted due to max_entries limit. 71 + */ 72 + key = 0; 73 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 74 + errno == E2BIG); 75 + 76 + /* Update existing element, though the map is full. */ 77 + key = 1; 78 + assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 79 + key = 2; 80 + assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 81 + key = 1; 82 + assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 83 + 84 + /* Check that key = 0 doesn't exist. */ 85 + key = 0; 86 + assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 87 + 88 + /* Iterate over two elements. */ 89 + assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 90 + (next_key == 1 || next_key == 2)); 91 + assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 92 + (next_key == 1 || next_key == 2)); 93 + assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 94 + errno == ENOENT); 95 + 96 + /* Delete both elements. */ 97 + key = 1; 98 + assert(bpf_map_delete(fd, &key) == 0); 99 + key = 2; 100 + assert(bpf_map_delete(fd, &key) == 0); 101 + assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 102 + 103 + key = 0; 104 + /* Check that map is empty. */ 105 + assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 106 + errno == ENOENT); 107 + 108 + close(fd); 109 + } 110 + 111 + static void test_hashmap_percpu(int task, void *data) 112 + { 113 + unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 114 + long long value[nr_cpus]; 115 + long long key, next_key; 116 + int expected_key_mask = 0; 117 + int fd, i; 118 + 119 + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), 120 + sizeof(value[0]), 2, map_flags); 121 + if (fd < 0) { 122 + printf("Failed to create hashmap '%s'!\n", strerror(errno)); 123 + exit(1); 124 + } 125 + 126 + for (i = 0; i < nr_cpus; i++) 127 + value[i] = i + 100; 128 + 129 + key = 1; 130 + /* Insert key=1 element. */ 131 + assert(!(expected_key_mask & key)); 132 + assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0); 133 + expected_key_mask |= key; 134 + 135 + /* BPF_NOEXIST means add new element if it doesn't exist. */ 136 + assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 137 + /* key=1 already exists. */ 138 + errno == EEXIST); 139 + 140 + /* -1 is an invalid flag. */ 141 + assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL); 142 + 143 + /* Check that key=1 can be found. Value could be 0 if the lookup 144 + * was run from a different CPU. 145 + */ 146 + value[0] = 1; 147 + assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100); 148 + 149 + key = 2; 150 + /* Check that key=2 is not found. */ 151 + assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT); 152 + 153 + /* BPF_EXIST means update existing element. */ 154 + assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 && 155 + /* key=2 is not there. */ 156 + errno == ENOENT); 157 + 158 + /* Insert key=2 element. */ 159 + assert(!(expected_key_mask & key)); 160 + assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0); 161 + expected_key_mask |= key; 162 + 163 + /* key=1 and key=2 were inserted, check that key=0 cannot be 164 + * inserted due to max_entries limit. 165 + */ 166 + key = 0; 167 + assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 168 + errno == E2BIG); 169 + 170 + /* Check that key = 0 doesn't exist. */ 171 + assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 172 + 173 + /* Iterate over two elements. */ 174 + while (!bpf_map_next_key(fd, &key, &next_key)) { 175 + assert((expected_key_mask & next_key) == next_key); 176 + expected_key_mask &= ~next_key; 177 + 178 + assert(bpf_map_lookup(fd, &next_key, value) == 0); 179 + 180 + for (i = 0; i < nr_cpus; i++) 181 + assert(value[i] == i + 100); 182 + 183 + key = next_key; 184 + } 185 + assert(errno == ENOENT); 186 + 187 + /* Update with BPF_EXIST. */ 188 + key = 1; 189 + assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0); 190 + 191 + /* Delete both elements. */ 192 + key = 1; 193 + assert(bpf_map_delete(fd, &key) == 0); 194 + key = 2; 195 + assert(bpf_map_delete(fd, &key) == 0); 196 + assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 197 + 198 + key = 0; 199 + /* Check that map is empty. */ 200 + assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 201 + errno == ENOENT); 202 + 203 + close(fd); 204 + } 205 + 206 + static void test_arraymap(int task, void *data) 207 + { 208 + int key, next_key, fd; 209 + long long value; 210 + 211 + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 212 + 2, 0); 213 + if (fd < 0) { 214 + printf("Failed to create arraymap '%s'!\n", strerror(errno)); 215 + exit(1); 216 + } 217 + 218 + key = 1; 219 + value = 1234; 220 + /* Insert key=1 element. */ 221 + assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 222 + 223 + value = 0; 224 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 225 + errno == EEXIST); 226 + 227 + /* Check that key=1 can be found. */ 228 + assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 229 + 230 + key = 0; 231 + /* Check that key=0 is also found and zero initialized. */ 232 + assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 233 + 234 + /* key=0 and key=1 were inserted, check that key=2 cannot be inserted 235 + * due to max_entries limit. 236 + */ 237 + key = 2; 238 + assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 239 + errno == E2BIG); 240 + 241 + /* Check that key = 2 doesn't exist. */ 242 + assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 243 + 244 + /* Iterate over two elements. */ 245 + assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 246 + next_key == 0); 247 + assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 248 + next_key == 1); 249 + assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 250 + errno == ENOENT); 251 + 252 + /* Delete shouldn't succeed. */ 253 + key = 1; 254 + assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 255 + 256 + close(fd); 257 + } 258 + 259 + static void test_arraymap_percpu(int task, void *data) 260 + { 261 + unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 262 + int key, next_key, fd, i; 263 + long values[nr_cpus]; 264 + 265 + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 266 + sizeof(values[0]), 2, 0); 267 + if (fd < 0) { 268 + printf("Failed to create arraymap '%s'!\n", strerror(errno)); 269 + exit(1); 270 + } 271 + 272 + for (i = 0; i < nr_cpus; i++) 273 + values[i] = i + 100; 274 + 275 + key = 1; 276 + /* Insert key=1 element. */ 277 + assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 278 + 279 + values[0] = 0; 280 + assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 && 281 + errno == EEXIST); 282 + 283 + /* Check that key=1 can be found. */ 284 + assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100); 285 + 286 + key = 0; 287 + /* Check that key=0 is also found and zero initialized. */ 288 + assert(bpf_map_lookup(fd, &key, values) == 0 && 289 + values[0] == 0 && values[nr_cpus - 1] == 0); 290 + 291 + /* Check that key=2 cannot be inserted due to max_entries limit. */ 292 + key = 2; 293 + assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 && 294 + errno == E2BIG); 295 + 296 + /* Check that key = 2 doesn't exist. */ 297 + assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT); 298 + 299 + /* Iterate over two elements. */ 300 + assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 301 + next_key == 0); 302 + assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 303 + next_key == 1); 304 + assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 305 + errno == ENOENT); 306 + 307 + /* Delete shouldn't succeed. */ 308 + key = 1; 309 + assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 310 + 311 + close(fd); 312 + } 313 + 314 + static void test_arraymap_percpu_many_keys(void) 315 + { 316 + unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 317 + unsigned int nr_keys = 20000; 318 + long values[nr_cpus]; 319 + int key, fd, i; 320 + 321 + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 322 + sizeof(values[0]), nr_keys, 0); 323 + if (fd < 0) { 324 + printf("Failed to create per-cpu arraymap '%s'!\n", 325 + strerror(errno)); 326 + exit(1); 327 + } 328 + 329 + for (i = 0; i < nr_cpus; i++) 330 + values[i] = i + 10; 331 + 332 + for (key = 0; key < nr_keys; key++) 333 + assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 334 + 335 + for (key = 0; key < nr_keys; key++) { 336 + for (i = 0; i < nr_cpus; i++) 337 + values[i] = 0; 338 + 339 + assert(bpf_map_lookup(fd, &key, values) == 0); 340 + 341 + for (i = 0; i < nr_cpus; i++) 342 + assert(values[i] == i + 10); 343 + } 344 + 345 + close(fd); 346 + } 347 + 348 + #define MAP_SIZE (32 * 1024) 349 + 350 + static void test_map_large(void) 351 + { 352 + struct bigkey { 353 + int a; 354 + char b[116]; 355 + long long c; 356 + } key; 357 + int fd, i, value; 358 + 359 + fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 360 + MAP_SIZE, map_flags); 361 + if (fd < 0) { 362 + printf("Failed to create large map '%s'!\n", strerror(errno)); 363 + exit(1); 364 + } 365 + 366 + for (i = 0; i < MAP_SIZE; i++) { 367 + key = (struct bigkey) { .c = i }; 368 + value = i; 369 + 370 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 371 + } 372 + 373 + key.c = -1; 374 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 375 + errno == E2BIG); 376 + 377 + /* Iterate through all elements. */ 378 + for (i = 0; i < MAP_SIZE; i++) 379 + assert(bpf_map_next_key(fd, &key, &key) == 0); 380 + assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 381 + 382 + key.c = 0; 383 + assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 384 + key.a = 1; 385 + assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 386 + 387 + close(fd); 388 + } 389 + 390 + static void run_parallel(int tasks, void (*fn)(int task, void *data), 391 + void *data) 392 + { 393 + pid_t pid[tasks]; 394 + int i; 395 + 396 + for (i = 0; i < tasks; i++) { 397 + pid[i] = fork(); 398 + if (pid[i] == 0) { 399 + fn(i, data); 400 + exit(0); 401 + } else if (pid[i] == -1) { 402 + printf("Couldn't spawn #%d process!\n", i); 403 + exit(1); 404 + } 405 + } 406 + 407 + for (i = 0; i < tasks; i++) { 408 + int status; 409 + 410 + assert(waitpid(pid[i], &status, 0) == pid[i]); 411 + assert(status == 0); 412 + } 413 + } 414 + 415 + static void test_map_stress(void) 416 + { 417 + run_parallel(100, test_hashmap, NULL); 418 + run_parallel(100, test_hashmap_percpu, NULL); 419 + 420 + run_parallel(100, test_arraymap, NULL); 421 + run_parallel(100, test_arraymap_percpu, NULL); 422 + } 423 + 424 + #define TASKS 1024 425 + 426 + #define DO_UPDATE 1 427 + #define DO_DELETE 0 428 + 429 + static void do_work(int fn, void *data) 430 + { 431 + int do_update = ((int *)data)[1]; 432 + int fd = ((int *)data)[0]; 433 + int i, key, value; 434 + 435 + for (i = fn; i < MAP_SIZE; i += TASKS) { 436 + key = value = i; 437 + 438 + if (do_update) { 439 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 440 + assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 441 + } else { 442 + assert(bpf_map_delete(fd, &key) == 0); 443 + } 444 + } 445 + } 446 + 447 + static void test_map_parallel(void) 448 + { 449 + int i, fd, key = 0, value = 0; 450 + int data[2]; 451 + 452 + fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 453 + MAP_SIZE, map_flags); 454 + if (fd < 0) { 455 + printf("Failed to create map for parallel test '%s'!\n", 456 + strerror(errno)); 457 + exit(1); 458 + } 459 + 460 + /* Use the same fd in children to add elements to this map: 461 + * child_0 adds key=0, key=1024, key=2048, ... 462 + * child_1 adds key=1, key=1025, key=2049, ... 463 + * child_1023 adds key=1023, ... 464 + */ 465 + data[0] = fd; 466 + data[1] = DO_UPDATE; 467 + run_parallel(TASKS, do_work, data); 468 + 469 + /* Check that key=0 is already there. */ 470 + assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 471 + errno == EEXIST); 472 + 473 + /* Check that all elements were inserted. */ 474 + key = -1; 475 + for (i = 0; i < MAP_SIZE; i++) 476 + assert(bpf_map_next_key(fd, &key, &key) == 0); 477 + assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 478 + 479 + /* Another check for all elements */ 480 + for (i = 0; i < MAP_SIZE; i++) { 481 + key = MAP_SIZE - i - 1; 482 + 483 + assert(bpf_map_lookup(fd, &key, &value) == 0 && 484 + value == key); 485 + } 486 + 487 + /* Now let's delete all elemenets in parallel. */ 488 + data[1] = DO_DELETE; 489 + run_parallel(TASKS, do_work, data); 490 + 491 + /* Nothing should be left. */ 492 + key = -1; 493 + assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 494 + } 495 + 496 + static void run_all_tests(void) 497 + { 498 + test_hashmap(0, NULL); 499 + test_hashmap_percpu(0, NULL); 500 + 501 + test_arraymap(0, NULL); 502 + test_arraymap_percpu(0, NULL); 503 + 504 + test_arraymap_percpu_many_keys(); 505 + 506 + test_map_large(); 507 + test_map_parallel(); 508 + test_map_stress(); 509 + } 510 + 511 + int main(void) 512 + { 513 + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 514 + 515 + setrlimit(RLIMIT_MEMLOCK, &rinf); 516 + 517 + map_flags = 0; 518 + run_all_tests(); 519 + 520 + map_flags = BPF_F_NO_PREALLOC; 521 + run_all_tests(); 522 + 523 + printf("test_maps: OK\n"); 524 + return 0; 525 + }