Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bpf-selftests-improve-and-use-library'

Mickaël Salaün says:

====================
Improve BPF selftests and use the library (net-next tree)

This series brings some fixes to selftests, add the ability to test
unprivileged BPF programs as root and replace bpf_sys.h with calls to the BPF
library.

This is intended for the net-next tree and apply on c0e4dadb3494 ("net: dsa:
mv88e6xxx: Move forward declaration to where it is needed").

Changes since v4:
* align text for function calls as requested by Daniel Borkmann
(bpf_load_program and bpf_map_update_elem)
* rebase

Changes since v3:
* keep the bzero() calls

Changes since v2:
* use the patches from two previous series (unprivileged tests and bpf_sys.h
replacement)
* include one more stdint.h
* rebase on net-next
* add this cover letter

Changes since v1:
* exclude patches not intended for the net-next tree
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+301 -300
+22 -1
tools/include/uapi/linux/bpf.h
··· 63 63 __s32 imm; /* signed immediate constant */ 64 64 }; 65 65 66 + /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 67 + struct bpf_lpm_trie_key { 68 + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 69 + __u8 data[0]; /* Arbitrary size */ 70 + }; 71 + 66 72 /* BPF syscall commands, see bpf(2) man-page for details. */ 67 73 enum bpf_cmd { 68 74 BPF_MAP_CREATE, ··· 95 89 BPF_MAP_TYPE_CGROUP_ARRAY, 96 90 BPF_MAP_TYPE_LRU_HASH, 97 91 BPF_MAP_TYPE_LRU_PERCPU_HASH, 92 + BPF_MAP_TYPE_LPM_TRIE, 98 93 }; 99 94 100 95 enum bpf_prog_type { ··· 437 430 * @xdp_md: pointer to xdp_md 438 431 * @delta: An positive/negative integer to be added to xdp_md.data 439 432 * Return: 0 on success or negative on error 433 + * 434 + * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) 435 + * Copy a NUL terminated string from unsafe address. In case the string 436 + * length is smaller than size, the target is not padded with further NUL 437 + * bytes. In case the string length is larger than size, just count-1 438 + * bytes are copied and the last byte is set to NUL. 439 + * @dst: destination address 440 + * @size: maximum number of bytes to copy, including the trailing NUL 441 + * @unsafe_ptr: unsafe address 442 + * Return: 443 + * > 0 length of the string including the trailing NUL on success 444 + * < 0 error 440 445 */ 441 446 #define __BPF_FUNC_MAPPER(FN) \ 442 447 FN(unspec), \ ··· 495 476 FN(set_hash_invalid), \ 496 477 FN(get_numa_node_id), \ 497 478 FN(skb_change_head), \ 498 - FN(xdp_adjust_head), 479 + FN(xdp_adjust_head), \ 480 + FN(probe_read_str), 499 481 500 482 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 501 483 * function eBPF program intends to call ··· 522 502 /* BPF_FUNC_l4_csum_replace flags. */ 523 503 #define BPF_F_PSEUDO_HDR (1ULL << 4) 524 504 #define BPF_F_MARK_MANGLED_0 (1ULL << 5) 505 + #define BPF_F_MARK_ENFORCE (1ULL << 6) 525 506 526 507 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 527 508 #define BPF_F_INGRESS (1ULL << 0)
+13 -7
tools/lib/bpf/bpf.c
··· 42 42 # endif 43 43 #endif 44 44 45 - static __u64 ptr_to_u64(void *ptr) 45 + static __u64 ptr_to_u64(const void *ptr) 46 46 { 47 47 return (__u64) (unsigned long) ptr; 48 48 } ··· 50 50 static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 51 51 unsigned int size) 52 52 { 53 + #ifdef __NR_bpf 53 54 return syscall(__NR_bpf, cmd, attr, size); 55 + #else 56 + fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); 57 + errno = ENOSYS; 58 + return -1; 59 + #endif 54 60 } 55 61 56 62 int bpf_create_map(enum bpf_map_type map_type, int key_size, ··· 75 69 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 76 70 } 77 71 78 - int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, 79 - size_t insns_cnt, char *license, 72 + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 73 + size_t insns_cnt, const char *license, 80 74 __u32 kern_version, char *log_buf, size_t log_buf_sz) 81 75 { 82 76 int fd; ··· 104 98 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 105 99 } 106 100 107 - int bpf_map_update_elem(int fd, void *key, void *value, 101 + int bpf_map_update_elem(int fd, const void *key, const void *value, 108 102 __u64 flags) 109 103 { 110 104 union bpf_attr attr; ··· 118 112 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 119 113 } 120 114 121 - int bpf_map_lookup_elem(int fd, void *key, void *value) 115 + int bpf_map_lookup_elem(int fd, const void *key, void *value) 122 116 { 123 117 union bpf_attr attr; 124 118 ··· 130 124 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 131 125 } 132 126 133 - int bpf_map_delete_elem(int fd, void *key) 127 + int bpf_map_delete_elem(int fd, const void *key) 134 128 { 135 129 union bpf_attr attr; 136 130 ··· 141 135 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 142 136 } 143 137 144 - int bpf_map_get_next_key(int fd, void *key, void *next_key) 138 + int bpf_map_get_next_key(int fd, const void *key, void *next_key) 145 139 { 146 140 union bpf_attr attr; 147 141
+6 -6
tools/lib/bpf/bpf.h
··· 28 28 29 29 /* Recommend log buffer size */ 30 30 #define BPF_LOG_BUF_SIZE 65536 31 - int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, 32 - size_t insns_cnt, char *license, 31 + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 32 + size_t insns_cnt, const char *license, 33 33 __u32 kern_version, char *log_buf, 34 34 size_t log_buf_sz); 35 35 36 - int bpf_map_update_elem(int fd, void *key, void *value, 36 + int bpf_map_update_elem(int fd, const void *key, const void *value, 37 37 __u64 flags); 38 38 39 - int bpf_map_lookup_elem(int fd, void *key, void *value); 40 - int bpf_map_delete_elem(int fd, void *key); 41 - int bpf_map_get_next_key(int fd, void *key, void *next_key); 39 + int bpf_map_lookup_elem(int fd, const void *key, void *value); 40 + int bpf_map_delete_elem(int fd, const void *key); 41 + int bpf_map_get_next_key(int fd, const void *key, void *next_key); 42 42 int bpf_obj_pin(int fd, const char *pathname); 43 43 int bpf_obj_get(const char *pathname); 44 44 int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+1
tools/testing/selftests/bpf/.gitignore
··· 2 2 test_maps 3 3 test_lru_map 4 4 test_lpm_map 5 + test_tag
+3 -1
tools/testing/selftests/bpf/Makefile
··· 1 - CFLAGS += -Wall -O2 -I../../../../usr/include 1 + CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I../../../lib 2 2 3 3 test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map 4 4 ··· 6 6 TEST_FILES := $(test_objs) 7 7 8 8 all: $(test_objs) 9 + 10 + $(test_objs): ../../../lib/bpf/bpf.o 9 11 10 12 include ../lib.mk 11 13
-108
tools/testing/selftests/bpf/bpf_sys.h
··· 1 - #ifndef __BPF_SYS__ 2 - #define __BPF_SYS__ 3 - 4 - #include <stdint.h> 5 - #include <stdlib.h> 6 - 7 - #include <sys/syscall.h> 8 - 9 - #include <linux/bpf.h> 10 - 11 - static inline __u64 bpf_ptr_to_u64(const void *ptr) 12 - { 13 - return (__u64)(unsigned long) ptr; 14 - } 15 - 16 - static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size) 17 - { 18 - #ifdef __NR_bpf 19 - return syscall(__NR_bpf, cmd, attr, size); 20 - #else 21 - fprintf(stderr, "No bpf syscall, kernel headers too old?\n"); 22 - errno = ENOSYS; 23 - return -1; 24 - #endif 25 - } 26 - 27 - static inline int bpf_map_lookup(int fd, const void *key, void *value) 28 - { 29 - union bpf_attr attr = {}; 30 - 31 - attr.map_fd = fd; 32 - attr.key = bpf_ptr_to_u64(key); 33 - attr.value = bpf_ptr_to_u64(value); 34 - 35 - return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 36 - } 37 - 38 - static inline int bpf_map_update(int fd, const void *key, const void *value, 39 - uint64_t flags) 40 - { 41 - union bpf_attr attr = {}; 42 - 43 - attr.map_fd = fd; 44 - attr.key = bpf_ptr_to_u64(key); 45 - attr.value = bpf_ptr_to_u64(value); 46 - attr.flags = flags; 47 - 48 - return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 49 - } 50 - 51 - static inline int bpf_map_delete(int fd, const void *key) 52 - { 53 - union bpf_attr attr = {}; 54 - 55 - attr.map_fd = fd; 56 - attr.key = bpf_ptr_to_u64(key); 57 - 58 - return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 59 - } 60 - 61 - static inline int bpf_map_next_key(int fd, const void *key, void *next_key) 62 - { 63 - union bpf_attr attr = {}; 64 - 65 - attr.map_fd = fd; 66 - attr.key = bpf_ptr_to_u64(key); 67 - attr.next_key = bpf_ptr_to_u64(next_key); 68 - 69 - return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 70 - } 71 - 72 - static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key, 73 - uint32_t size_value, uint32_t max_elem, 74 - uint32_t flags) 75 - { 76 - union bpf_attr attr = {}; 77 - 78 - attr.map_type = type; 79 - attr.key_size = size_key; 80 - attr.value_size = size_value; 81 - attr.max_entries = max_elem; 82 - attr.map_flags = flags; 83 - 84 - return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 85 - } 86 - 87 - static inline int bpf_prog_load(enum bpf_prog_type type, 88 - const struct bpf_insn *insns, size_t size_insns, 89 - const char *license, char *log, size_t size_log) 90 - { 91 - union bpf_attr attr = {}; 92 - 93 - attr.prog_type = type; 94 - attr.insns = bpf_ptr_to_u64(insns); 95 - attr.insn_cnt = size_insns / sizeof(struct bpf_insn); 96 - attr.license = bpf_ptr_to_u64(license); 97 - 98 - if (size_log > 0) { 99 - attr.log_buf = bpf_ptr_to_u64(log); 100 - attr.log_size = size_log; 101 - attr.log_level = 1; 102 - log[0] = 0; 103 - } 104 - 105 - return bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 106 - } 107 - 108 - #endif /* __BPF_SYS__ */
+19 -19
tools/testing/selftests/bpf/test_lpm_map.c
··· 22 22 #include <sys/time.h> 23 23 #include <sys/resource.h> 24 24 25 - #include "bpf_sys.h" 25 + #include <bpf/bpf.h> 26 26 #include "bpf_util.h" 27 27 28 28 struct tlpm_node { ··· 182 182 key = alloca(sizeof(*key) + keysize); 183 183 memset(key, 0, sizeof(*key) + keysize); 184 184 185 - map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, 185 + map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, 186 186 sizeof(*key) + keysize, 187 187 keysize + 1, 188 188 4096, ··· 198 198 199 199 key->prefixlen = value[keysize]; 200 200 memcpy(key->data, value, keysize); 201 - r = bpf_map_update(map, key, value, 0); 201 + r = bpf_map_update_elem(map, key, value, 0); 202 202 assert(!r); 203 203 } 204 204 ··· 210 210 211 211 key->prefixlen = 8 * keysize; 212 212 memcpy(key->data, data, keysize); 213 - r = bpf_map_lookup(map, key, value); 213 + r = bpf_map_lookup_elem(map, key, value); 214 214 assert(!r || errno == ENOENT); 215 215 assert(!t == !!r); 216 216 ··· 252 252 key_ipv4 = alloca(key_size_ipv4); 253 253 key_ipv6 = alloca(key_size_ipv6); 254 254 255 - map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, 255 + map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, 256 256 key_size_ipv4, sizeof(value), 257 257 100, BPF_F_NO_PREALLOC); 258 258 assert(map_fd_ipv4 >= 0); 259 259 260 - map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, 260 + map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, 261 261 key_size_ipv6, sizeof(value), 262 262 100, BPF_F_NO_PREALLOC); 263 263 assert(map_fd_ipv6 >= 0); ··· 266 266 value = 1; 267 267 key_ipv4->prefixlen = 16; 268 268 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); 269 - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); 269 + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); 270 270 271 271 value = 2; 272 272 key_ipv4->prefixlen = 24; 273 273 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); 274 - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); 274 + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); 275 275 276 276 value = 3; 277 277 key_ipv4->prefixlen = 24; 278 278 inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); 279 - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); 279 + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); 280 280 281 281 value = 5; 282 282 key_ipv4->prefixlen = 24; 283 283 inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); 284 - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); 284 + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); 285 285 286 286 value = 4; 287 287 key_ipv4->prefixlen = 23; 288 288 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); 289 - assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0); 289 + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); 290 290 291 291 value = 0xdeadbeef; 292 292 key_ipv6->prefixlen = 64; 293 293 inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); 294 - assert(bpf_map_update(map_fd_ipv6, key_ipv6, &value, 0) == 0); 294 + assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0); 295 295 296 296 /* Set tprefixlen to maximum for lookups */ 297 297 key_ipv4->prefixlen = 32; ··· 299 299 300 300 /* Test some lookups that should come back with a value */ 301 301 inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); 302 - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); 302 + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); 303 303 assert(value == 3); 304 304 305 305 inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); 306 - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0); 306 + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); 307 307 assert(value == 2); 308 308 309 309 inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); 310 - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); 310 + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); 311 311 assert(value == 0xdeadbeef); 312 312 313 313 inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); 314 - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0); 314 + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); 315 315 assert(value == 0xdeadbeef); 316 316 317 317 /* Test some lookups that should not match any entry */ 318 318 inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); 319 - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && 319 + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && 320 320 errno == ENOENT); 321 321 322 322 inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); 323 - assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 && 323 + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && 324 324 errno == ENOENT); 325 325 326 326 inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); 327 - assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == -1 && 327 + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 && 328 328 errno == ENOENT); 329 329 330 330 close(map_fd_ipv4);
+76 -62
tools/testing/selftests/bpf/test_lru_map.c
··· 18 18 #include <sys/wait.h> 19 19 #include <sys/resource.h> 20 20 21 - #include "bpf_sys.h" 21 + #include <bpf/bpf.h> 22 22 #include "bpf_util.h" 23 23 24 24 #define LOCAL_FREE_TARGET (128) ··· 30 30 { 31 31 int map_fd; 32 32 33 - map_fd = bpf_map_create(map_type, sizeof(unsigned long long), 33 + map_fd = bpf_create_map(map_type, sizeof(unsigned long long), 34 34 sizeof(unsigned long long), size, map_flags); 35 35 36 36 if (map_fd == -1) 37 - perror("bpf_map_create"); 37 + perror("bpf_create_map"); 38 38 39 39 return map_fd; 40 40 } ··· 45 45 unsigned long long value0[nr_cpus], value1[nr_cpus]; 46 46 int ret; 47 47 48 - while (!bpf_map_next_key(map1, &next_key, &next_key)) { 49 - assert(!bpf_map_lookup(map1, &next_key, value1)); 50 - ret = bpf_map_lookup(map0, &next_key, value0); 48 + while (!bpf_map_get_next_key(map1, &next_key, &next_key)) { 49 + assert(!bpf_map_lookup_elem(map1, &next_key, value1)); 50 + ret = bpf_map_lookup_elem(map0, &next_key, value0); 51 51 if (ret) { 52 52 printf("key:%llu not found from map. %s(%d)\n", 53 53 next_key, strerror(errno), errno); ··· 119 119 /* insert key=1 element */ 120 120 121 121 key = 1; 122 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 123 - assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); 122 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 123 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 124 + BPF_NOEXIST)); 124 125 125 126 /* BPF_NOEXIST means: add new element if it doesn't exist */ 126 - assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 && 127 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1 127 128 /* key=1 already exists */ 128 - errno == EEXIST); 129 + && errno == EEXIST); 129 130 130 - assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 && 131 + assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 && 131 132 errno == EINVAL); 132 133 133 134 /* insert key=2 element */ 134 135 135 136 /* check that key=2 is not found */ 136 137 key = 2; 137 - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && 138 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 138 139 errno == ENOENT); 139 140 140 141 /* BPF_EXIST means: update existing element */ 141 - assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 && 142 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 && 142 143 /* key=2 is not there */ 143 144 errno == ENOENT); 144 145 145 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 146 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 146 147 147 148 /* insert key=3 element */ 148 149 149 150 /* check that key=3 is not found */ 150 151 key = 3; 151 - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && 152 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 152 153 errno == ENOENT); 153 154 154 155 /* check that key=1 can be found and mark the ref bit to 155 156 * stop LRU from removing key=1 156 157 */ 157 158 key = 1; 158 - assert(!bpf_map_lookup(lru_map_fd, &key, value)); 159 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 159 160 assert(value[0] == 1234); 160 161 161 162 key = 3; 162 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 163 - assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); 163 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 164 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 165 + BPF_NOEXIST)); 164 166 165 167 /* key=2 has been removed from the LRU */ 166 168 key = 2; 167 - assert(bpf_map_lookup(lru_map_fd, &key, value) == -1); 169 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1); 168 170 169 171 assert(map_equal(lru_map_fd, expected_map_fd)); 170 172 ··· 219 217 /* Insert 1 to tgt_free (+tgt_free keys) */ 220 218 end_key = 1 + tgt_free; 221 219 for (key = 1; key < end_key; key++) 222 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 220 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 221 + BPF_NOEXIST)); 223 222 224 223 /* Lookup 1 to tgt_free/2 */ 225 224 end_key = 1 + batch_size; 226 225 for (key = 1; key < end_key; key++) { 227 - assert(!bpf_map_lookup(lru_map_fd, &key, value)); 228 - assert(!bpf_map_update(expected_map_fd, &key, value, 229 - BPF_NOEXIST)); 226 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 227 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 228 + BPF_NOEXIST)); 230 229 } 231 230 232 231 /* Insert 1+tgt_free to 2*tgt_free ··· 237 234 key = 1 + tgt_free; 238 235 end_key = key + tgt_free; 239 236 for (; key < end_key; key++) { 240 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 241 - assert(!bpf_map_update(expected_map_fd, &key, value, 242 - BPF_NOEXIST)); 237 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 238 + BPF_NOEXIST)); 239 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 240 + BPF_NOEXIST)); 243 241 } 244 242 245 243 assert(map_equal(lru_map_fd, expected_map_fd)); ··· 305 301 /* Insert 1 to tgt_free (+tgt_free keys) */ 306 302 end_key = 1 + tgt_free; 307 303 for (key = 1; key < end_key; key++) 308 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 304 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 305 + BPF_NOEXIST)); 309 306 310 - /* Any bpf_map_update will require to acquire a new node 307 + /* Any bpf_map_update_elem will require to acquire a new node 311 308 * from LRU first. 312 309 * 313 310 * The local list is running out of free nodes. ··· 321 316 */ 322 317 key = 1; 323 318 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 324 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 325 - assert(!bpf_map_delete(lru_map_fd, &key)); 319 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 320 + BPF_NOEXIST)); 321 + assert(!bpf_map_delete_elem(lru_map_fd, &key)); 326 322 } else { 327 - assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST)); 323 + assert(bpf_map_update_elem(lru_map_fd, &key, value, 324 + BPF_EXIST)); 328 325 } 329 326 330 327 /* Re-insert 1 to tgt_free/2 again and do a lookup ··· 335 328 end_key = 1 + batch_size; 336 329 value[0] = 4321; 337 330 for (key = 1; key < end_key; key++) { 338 - assert(bpf_map_lookup(lru_map_fd, &key, value)); 339 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 340 - assert(!bpf_map_lookup(lru_map_fd, &key, value)); 331 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value)); 332 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 333 + BPF_NOEXIST)); 334 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 341 335 assert(value[0] == 4321); 342 - assert(!bpf_map_update(expected_map_fd, &key, value, 343 - BPF_NOEXIST)); 336 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 337 + BPF_NOEXIST)); 344 338 } 345 339 346 340 value[0] = 1234; ··· 352 344 /* These newly added but not referenced keys will be 353 345 * gone during the next LRU shrink. 354 346 */ 355 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 347 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 348 + BPF_NOEXIST)); 356 349 357 350 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ 358 351 end_key = key + tgt_free; 359 352 for (; key < end_key; key++) { 360 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 361 - assert(!bpf_map_update(expected_map_fd, &key, value, 362 - BPF_NOEXIST)); 353 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 354 + BPF_NOEXIST)); 355 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 356 + BPF_NOEXIST)); 363 357 } 364 358 365 359 assert(map_equal(lru_map_fd, expected_map_fd)); ··· 411 401 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ 412 402 end_key = 1 + (2 * tgt_free); 413 403 for (key = 1; key < end_key; key++) 414 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 404 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 405 + BPF_NOEXIST)); 415 406 416 407 /* Lookup key 1 to tgt_free*3/2 */ 417 408 end_key = tgt_free + batch_size; 418 409 for (key = 1; key < end_key; key++) { 419 - assert(!bpf_map_lookup(lru_map_fd, &key, value)); 420 - assert(!bpf_map_update(expected_map_fd, &key, value, 421 - BPF_NOEXIST)); 410 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 411 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 412 + BPF_NOEXIST)); 422 413 } 423 414 424 415 /* Add 1+2*tgt_free to tgt_free*5/2 ··· 428 417 key = 2 * tgt_free + 1; 429 418 end_key = key + batch_size; 430 419 for (; key < end_key; key++) { 431 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 432 - assert(!bpf_map_update(expected_map_fd, &key, value, 433 - BPF_NOEXIST)); 420 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 421 + BPF_NOEXIST)); 422 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 423 + BPF_NOEXIST)); 434 424 } 435 425 436 426 assert(map_equal(lru_map_fd, expected_map_fd)); ··· 469 457 value[0] = 1234; 470 458 471 459 for (key = 1; key <= 2 * tgt_free; key++) 472 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 460 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 461 + BPF_NOEXIST)); 473 462 474 463 key = 1; 475 - assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 464 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 476 465 477 466 for (key = 1; key <= tgt_free; key++) { 478 - assert(!bpf_map_lookup(lru_map_fd, &key, value)); 479 - assert(!bpf_map_update(expected_map_fd, &key, value, 480 - BPF_NOEXIST)); 467 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 468 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 469 + BPF_NOEXIST)); 481 470 } 482 471 483 472 for (; key <= 2 * tgt_free; key++) { 484 - assert(!bpf_map_delete(lru_map_fd, &key)); 485 - assert(bpf_map_delete(lru_map_fd, &key)); 473 + assert(!bpf_map_delete_elem(lru_map_fd, &key)); 474 + assert(bpf_map_delete_elem(lru_map_fd, &key)); 486 475 } 487 476 488 477 end_key = key + 2 * tgt_free; 489 478 for (; key < end_key; key++) { 490 - assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 491 - assert(!bpf_map_update(expected_map_fd, &key, value, 492 - BPF_NOEXIST)); 479 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, 480 + BPF_NOEXIST)); 481 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 482 + BPF_NOEXIST)); 493 483 } 494 484 495 485 assert(map_equal(lru_map_fd, expected_map_fd)); ··· 507 493 unsigned long long key, value[nr_cpus]; 508 494 509 495 /* Ensure the last key inserted by previous CPU can be found */ 510 - assert(!bpf_map_lookup(map_fd, &last_key, value)); 496 + assert(!bpf_map_lookup_elem(map_fd, &last_key, value)); 511 497 512 498 value[0] = 1234; 513 499 514 500 key = last_key + 1; 515 - assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 516 - assert(!bpf_map_lookup(map_fd, &key, value)); 501 + assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); 502 + assert(!bpf_map_lookup_elem(map_fd, &key, value)); 517 503 518 504 /* Cannot find the last key because it was removed by LRU */ 519 - assert(bpf_map_lookup(map_fd, &last_key, value)); 505 + assert(bpf_map_lookup_elem(map_fd, &last_key, value)); 520 506 } 521 507 522 508 /* Test map with only one element */ ··· 537 523 538 524 value[0] = 1234; 539 525 key = 0; 540 - assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 526 + assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); 541 527 542 528 while (sched_next_online(0, &next_cpu) != -1) { 543 529 pid_t pid;
+83 -79
tools/testing/selftests/bpf/test_maps.c
··· 21 21 22 22 #include <linux/bpf.h> 23 23 24 - #include "bpf_sys.h" 24 + #include <bpf/bpf.h> 25 25 #include "bpf_util.h" 26 26 27 27 static int map_flags; ··· 31 31 long long key, next_key, value; 32 32 int fd; 33 33 34 - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 34 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 35 35 2, map_flags); 36 36 if (fd < 0) { 37 37 printf("Failed to create hashmap '%s'!\n", strerror(errno)); ··· 41 41 key = 1; 42 42 value = 1234; 43 43 /* Insert key=1 element. */ 44 - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 44 + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 45 45 46 46 value = 0; 47 47 /* BPF_NOEXIST means add new element if it doesn't exist. */ 48 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 48 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 49 49 /* key=1 already exists. */ 50 50 errno == EEXIST); 51 51 52 52 /* -1 is an invalid flag. */ 53 - assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL); 53 + assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 && 54 + errno == EINVAL); 54 55 55 56 /* Check that key=1 can be found. */ 56 - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 57 + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234); 57 58 58 59 key = 2; 59 60 /* Check that key=2 is not found. */ 60 - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 61 + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); 61 62 62 63 /* BPF_EXIST means update existing element. */ 63 - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 64 + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 && 64 65 /* key=2 is not there. */ 65 66 errno == ENOENT); 66 67 67 68 /* Insert key=2 element. */ 68 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 69 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0); 69 70 70 71 /* key=1 and key=2 were inserted, check that key=0 cannot be 71 72 * inserted due to max_entries limit. 72 73 */ 73 74 key = 0; 74 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 75 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 75 76 errno == E2BIG); 76 77 77 78 /* Update existing element, though the map is full. */ 78 79 key = 1; 79 - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 80 + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); 80 81 key = 2; 81 - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 82 + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 82 83 key = 1; 83 - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 84 + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 84 85 85 86 /* Check that key = 0 doesn't exist. */ 86 87 key = 0; 87 - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 88 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); 88 89 89 90 /* Iterate over two elements. */ 90 - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 91 + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && 91 92 (next_key == 1 || next_key == 2)); 92 - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 93 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && 93 94 (next_key == 1 || next_key == 2)); 94 - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 95 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && 95 96 errno == ENOENT); 96 97 97 98 /* Delete both elements. */ 98 99 key = 1; 99 - assert(bpf_map_delete(fd, &key) == 0); 100 + assert(bpf_map_delete_elem(fd, &key) == 0); 100 101 key = 2; 101 - assert(bpf_map_delete(fd, &key) == 0); 102 - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 102 + assert(bpf_map_delete_elem(fd, &key) == 0); 103 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); 103 104 104 105 key = 0; 105 106 /* Check that map is empty. */ 106 - assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 107 + assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && 107 108 errno == ENOENT); 108 109 109 110 close(fd); ··· 118 117 int expected_key_mask = 0; 119 118 int fd, i; 120 119 121 - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), 120 + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), 122 121 sizeof(value[0]), 2, map_flags); 123 122 if (fd < 0) { 124 123 printf("Failed to create hashmap '%s'!\n", strerror(errno)); ··· 131 130 key = 1; 132 131 /* Insert key=1 element. */ 133 132 assert(!(expected_key_mask & key)); 134 - assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0); 133 + assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0); 135 134 expected_key_mask |= key; 136 135 137 136 /* BPF_NOEXIST means add new element if it doesn't exist. */ 138 - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 137 + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 && 139 138 /* key=1 already exists. */ 140 139 errno == EEXIST); 141 140 142 141 /* -1 is an invalid flag. */ 143 - assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL); 142 + assert(bpf_map_update_elem(fd, &key, value, -1) == -1 && 143 + errno == EINVAL); 144 144 145 145 /* Check that key=1 can be found. Value could be 0 if the lookup 146 146 * was run from a different CPU. 147 147 */ 148 148 value[0] = 1; 149 - assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100); 149 + assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); 150 150 151 151 key = 2; 152 152 /* Check that key=2 is not found. */ 153 - assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT); 153 + assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT); 154 154 155 155 /* BPF_EXIST means update existing element. */ 156 - assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 && 156 + assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 && 157 157 /* key=2 is not there. */ 158 158 errno == ENOENT); 159 159 160 160 /* Insert key=2 element. */ 161 161 assert(!(expected_key_mask & key)); 162 - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0); 162 + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0); 163 163 expected_key_mask |= key; 164 164 165 165 /* key=1 and key=2 were inserted, check that key=0 cannot be 166 166 * inserted due to max_entries limit. 167 167 */ 168 168 key = 0; 169 - assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 169 + assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 && 170 170 errno == E2BIG); 171 171 172 172 /* Check that key = 0 doesn't exist. */ 173 - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 173 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); 174 174 175 175 /* Iterate over two elements. */ 176 - while (!bpf_map_next_key(fd, &key, &next_key)) { 176 + while (!bpf_map_get_next_key(fd, &key, &next_key)) { 177 177 assert((expected_key_mask & next_key) == next_key); 178 178 expected_key_mask &= ~next_key; 179 179 180 - assert(bpf_map_lookup(fd, &next_key, value) == 0); 180 + assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); 181 181 182 182 for (i = 0; i < nr_cpus; i++) 183 183 assert(value[i] == i + 100); ··· 189 187 190 188 /* Update with BPF_EXIST. */ 191 189 key = 1; 192 - assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0); 190 + assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0); 193 191 194 192 /* Delete both elements. */ 195 193 key = 1; 196 - assert(bpf_map_delete(fd, &key) == 0); 194 + assert(bpf_map_delete_elem(fd, &key) == 0); 197 195 key = 2; 198 - assert(bpf_map_delete(fd, &key) == 0); 199 - assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 196 + assert(bpf_map_delete_elem(fd, &key) == 0); 197 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); 200 198 201 199 key = 0; 202 200 /* Check that map is empty. */ 203 - assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 201 + assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && 204 202 errno == ENOENT); 205 203 206 204 close(fd); ··· 211 209 int key, next_key, fd; 212 210 long long value; 213 211 214 - fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 212 + fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 215 213 2, 0); 216 214 if (fd < 0) { 217 215 printf("Failed to create arraymap '%s'!\n", strerror(errno)); ··· 221 219 key = 1; 222 220 value = 1234; 223 221 /* Insert key=1 element. */ 224 - assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 222 + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 225 223 226 224 value = 0; 227 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 225 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 228 226 errno == EEXIST); 229 227 230 228 /* Check that key=1 can be found. */ 231 - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 229 + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234); 232 230 233 231 key = 0; 234 232 /* Check that key=0 is also found and zero initialized. */ 235 - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 233 + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0); 236 234 237 235 /* key=0 and key=1 were inserted, check that key=2 cannot be inserted 238 236 * due to max_entries limit. 239 237 */ 240 238 key = 2; 241 - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 239 + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 && 242 240 errno == E2BIG); 243 241 244 242 /* Check that key = 2 doesn't exist. */ 245 - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 243 + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); 246 244 247 245 /* Iterate over two elements. */ 248 - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 246 + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && 249 247 next_key == 0); 250 - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 248 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && 251 249 next_key == 1); 252 - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 250 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && 253 251 errno == ENOENT); 254 252 255 253 /* Delete shouldn't succeed. */ 256 254 key = 1; 257 - assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 255 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL); 258 256 259 257 close(fd); 260 258 } ··· 265 263 int key, next_key, fd, i; 266 264 long values[nr_cpus]; 267 265 268 - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 266 + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 269 267 sizeof(values[0]), 2, 0); 270 268 if (fd < 0) { 271 269 printf("Failed to create arraymap '%s'!\n", strerror(errno)); ··· 277 275 278 276 key = 1; 279 277 /* Insert key=1 element. */ 280 - assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 278 + assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); 281 279 282 280 values[0] = 0; 283 - assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 && 281 + assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && 284 282 errno == EEXIST); 285 283 286 284 /* Check that key=1 can be found. */ 287 - assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100); 285 + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); 288 286 289 287 key = 0; 290 288 /* Check that key=0 is also found and zero initialized. */ 291 - assert(bpf_map_lookup(fd, &key, values) == 0 && 289 + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && 292 290 values[0] == 0 && values[nr_cpus - 1] == 0); 293 291 294 292 /* Check that key=2 cannot be inserted due to max_entries limit. */ 295 293 key = 2; 296 - assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 && 294 + assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 && 297 295 errno == E2BIG); 298 296 299 297 /* Check that key = 2 doesn't exist. */ 300 - assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT); 298 + assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT); 301 299 302 300 /* Iterate over two elements. */ 303 - assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 301 + assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && 304 302 next_key == 0); 305 - assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 303 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && 306 304 next_key == 1); 307 - assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 305 + assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && 308 306 errno == ENOENT); 309 307 310 308 /* Delete shouldn't succeed. */ 311 309 key = 1; 312 - assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 310 + assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL); 313 311 314 312 close(fd); 315 313 } ··· 321 319 long values[nr_cpus]; 322 320 int key, fd, i; 323 321 324 - fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 322 + fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 325 323 sizeof(values[0]), nr_keys, 0); 326 324 if (fd < 0) { 327 325 printf("Failed to create per-cpu arraymap '%s'!\n", ··· 333 331 values[i] = i + 10; 334 332 335 333 for (key = 0; key < nr_keys; key++) 336 - assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 334 + assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); 337 335 338 336 for (key = 0; key < nr_keys; key++) { 339 337 for (i = 0; i < nr_cpus; i++) 340 338 values[i] = 0; 341 339 342 - assert(bpf_map_lookup(fd, &key, values) == 0); 340 + assert(bpf_map_lookup_elem(fd, &key, values) == 0); 343 341 344 342 for (i = 0; i < nr_cpus; i++) 345 343 assert(values[i] == i + 10); ··· 359 357 } key; 360 358 int fd, i, value; 361 359 362 - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 360 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 363 361 MAP_SIZE, map_flags); 364 362 if (fd < 0) { 365 363 printf("Failed to create large map '%s'!\n", strerror(errno)); ··· 370 368 key = (struct bigkey) { .c = i }; 371 369 value = i; 372 370 373 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 371 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0); 374 372 } 375 373 376 374 key.c = -1; 377 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 375 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 378 376 errno == E2BIG); 379 377 380 378 /* Iterate through all elements. */ 381 379 for (i = 0; i < MAP_SIZE; i++) 382 - assert(bpf_map_next_key(fd, &key, &key) == 0); 383 - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 380 + assert(bpf_map_get_next_key(fd, &key, &key) == 0); 381 + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); 384 382 385 383 key.c = 0; 386 - assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 384 + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0); 387 385 key.a = 1; 388 - assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 386 + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); 389 387 390 388 close(fd); 391 389 } ··· 439 437 key = value = i; 440 438 441 439 if (do_update) { 442 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 443 - assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 440 + assert(bpf_map_update_elem(fd, &key, &value, 441 + BPF_NOEXIST) == 0); 442 + assert(bpf_map_update_elem(fd, &key, &value, 443 + BPF_EXIST) == 0); 444 444 } else { 445 - assert(bpf_map_delete(fd, &key) == 0); 445 + assert(bpf_map_delete_elem(fd, &key) == 0); 446 446 } 447 447 } 448 448 } ··· 454 450 int i, fd, key = 0, value = 0; 455 451 int data[2]; 456 452 457 - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 453 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 458 454 MAP_SIZE, map_flags); 459 455 if (fd < 0) { 460 456 printf("Failed to create map for parallel test '%s'!\n", ··· 472 468 run_parallel(TASKS, do_work, data); 473 469 474 470 /* Check that key=0 is already there. */ 475 - assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 471 + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && 476 472 errno == EEXIST); 477 473 478 474 /* Check that all elements were inserted. */ 479 475 key = -1; 480 476 for (i = 0; i < MAP_SIZE; i++) 481 - assert(bpf_map_next_key(fd, &key, &key) == 0); 482 - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 477 + assert(bpf_map_get_next_key(fd, &key, &key) == 0); 478 + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); 483 479 484 480 /* Another check for all elements */ 485 481 for (i = 0; i < MAP_SIZE; i++) { 486 482 key = MAP_SIZE - i - 1; 487 483 488 - assert(bpf_map_lookup(fd, &key, &value) == 0 && 484 + assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && 489 485 value == key); 490 486 } 491 487 ··· 495 491 496 492 /* Nothing should be left. */ 497 493 key = -1; 498 - assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 494 + assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); 499 495 } 500 496 501 497 static void run_all_tests(void)
+6 -5
tools/testing/selftests/bpf/test_tag.c
··· 1 + #include <stdint.h> 1 2 #include <stdio.h> 2 3 #include <stdlib.h> 3 4 #include <ctype.h> ··· 17 16 #include <linux/bpf.h> 18 17 #include <linux/if_alg.h> 19 18 20 - #include "../../../include/linux/filter.h" 19 + #include <bpf/bpf.h> 21 20 22 - #include "bpf_sys.h" 21 + #include "../../../include/linux/filter.h" 23 22 24 23 static struct bpf_insn prog[BPF_MAXINSNS]; 25 24 ··· 56 55 int fd_prog; 57 56 58 57 bpf_filler(insns, fd_map); 59 - fd_prog = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, prog, insns * 60 - sizeof(struct bpf_insn), "", NULL, 0); 58 + fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0, 59 + NULL, 0); 61 60 assert(fd_prog > 0); 62 61 if (fd_map > 0) 63 62 bpf_filler(insns, 0); ··· 188 187 int i, fd_map; 189 188 190 189 setrlimit(RLIMIT_MEMLOCK, &rinf); 191 - fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(int), 190 + fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), 192 191 sizeof(int), 1, BPF_F_NO_PREALLOC); 193 192 assert(fd_map > 0); 194 193
+72 -12
tools/testing/selftests/bpf/test_verifier.c
··· 8 8 * License as published by the Free Software Foundation. 9 9 */ 10 10 11 + #include <stdint.h> 11 12 #include <stdio.h> 13 + #include <stdlib.h> 12 14 #include <unistd.h> 13 15 #include <errno.h> 14 16 #include <string.h> ··· 18 16 #include <stdbool.h> 19 17 #include <sched.h> 20 18 19 + #include <sys/capability.h> 21 20 #include <sys/resource.h> 22 21 23 22 #include <linux/unistd.h> ··· 26 23 #include <linux/bpf_perf_event.h> 27 24 #include <linux/bpf.h> 28 25 29 - #include "../../../include/linux/filter.h" 26 + #include <bpf/bpf.h> 30 27 31 - #include "bpf_sys.h" 28 + #include "../../../include/linux/filter.h" 32 29 33 30 #ifndef ARRAY_SIZE 34 31 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ··· 4467 4464 { 4468 4465 int fd; 4469 4466 4470 - fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long), 4467 + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 4471 4468 size_value, max_elem, BPF_F_NO_PREALLOC); 4472 4469 if (fd < 0) 4473 4470 printf("Failed to create hash map '%s'!\n", strerror(errno)); ··· 4479 4476 { 4480 4477 int fd; 4481 4478 4482 - fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 4479 + fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 4483 4480 sizeof(int), 4, 0); 4484 4481 if (fd < 0) 4485 4482 printf("Failed to create prog array '%s'!\n", strerror(errno)); ··· 4537 4534 4538 4535 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 4539 4536 4540 - fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 4541 - prog, prog_len * sizeof(struct bpf_insn), 4542 - "GPL", bpf_vlog, sizeof(bpf_vlog)); 4537 + fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 4538 + prog, prog_len, "GPL", 0, bpf_vlog, 4539 + sizeof(bpf_vlog)); 4543 4540 4544 4541 expected_ret = unpriv && test->result_unpriv != UNDEF ? 4545 4542 test->result_unpriv : test->result; ··· 4577 4574 goto close_fds; 4578 4575 } 4579 4576 4577 + static bool is_admin(void) 4578 + { 4579 + cap_t caps; 4580 + cap_flag_value_t sysadmin = CAP_CLEAR; 4581 + const cap_value_t cap_val = CAP_SYS_ADMIN; 4582 + 4583 + if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 4584 + perror("cap_get_flag"); 4585 + return false; 4586 + } 4587 + caps = cap_get_proc(); 4588 + if (!caps) { 4589 + perror("cap_get_proc"); 4590 + return false; 4591 + } 4592 + if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin)) 4593 + perror("cap_get_flag"); 4594 + if (cap_free(caps)) 4595 + perror("cap_free"); 4596 + return (sysadmin == CAP_SET); 4597 + } 4598 + 4599 + static int set_admin(bool admin) 4600 + { 4601 + cap_t caps; 4602 + const cap_value_t cap_val = CAP_SYS_ADMIN; 4603 + int ret = -1; 4604 + 4605 + caps = cap_get_proc(); 4606 + if (!caps) { 4607 + perror("cap_get_proc"); 4608 + return -1; 4609 + } 4610 + if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, 4611 + admin ? CAP_SET : CAP_CLEAR)) { 4612 + perror("cap_set_flag"); 4613 + goto out; 4614 + } 4615 + if (cap_set_proc(caps)) { 4616 + perror("cap_set_proc"); 4617 + goto out; 4618 + } 4619 + ret = 0; 4620 + out: 4621 + if (cap_free(caps)) 4622 + perror("cap_free"); 4623 + return ret; 4624 + } 4625 + 4580 4626 static int do_test(bool unpriv, unsigned int from, unsigned int to) 4581 4627 { 4582 4628 int i, passes = 0, errors = 0; ··· 4636 4584 /* Program types that are not supported by non-root we 4637 4585 * skip right away. 4638 4586 */ 4639 - if (unpriv && test->prog_type) 4640 - continue; 4587 + if (!test->prog_type) { 4588 + if (!unpriv) 4589 + set_admin(false); 4590 + printf("#%d/u %s ", i, test->descr); 4591 + do_test_single(test, true, &passes, &errors); 4592 + if (!unpriv) 4593 + set_admin(true); 4594 + } 4641 4595 4642 - printf("#%d %s ", i, test->descr); 4643 - do_test_single(test, unpriv, &passes, &errors); 4596 + if (!unpriv) { 4597 + printf("#%d/p %s ", i, test->descr); 4598 + do_test_single(test, false, &passes, &errors); 4599 + } 4644 4600 } 4645 4601 4646 4602 printf("Summary: %d PASSED, %d FAILED\n", passes, errors); ··· 4660 4600 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 4661 4601 struct rlimit rlim = { 1 << 20, 1 << 20 }; 4662 4602 unsigned int from = 0, to = ARRAY_SIZE(tests); 4663 - bool unpriv = geteuid() != 0; 4603 + bool unpriv = !is_admin(); 4664 4604 4665 4605 if (argc == 3) { 4666 4606 unsigned int l = atoi(argv[argc - 2]);