Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3#include <stdio.h>
4#include <errno.h>
5#include <string.h>
6
7#include <bpf/bpf.h>
8#include <bpf/libbpf.h>
9
10#include <bpf_util.h>
11#include <test_maps.h>
12
13static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
14 void *values, bool is_pcpu)
15{
16 typedef BPF_DECLARE_PERCPU(int, value);
17 value *v = NULL;
18 int i, j, err;
19 DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
20 .elem_flags = 0,
21 .flags = 0,
22 );
23
24 if (is_pcpu)
25 v = (value *)values;
26
27 for (i = 0; i < max_entries; i++) {
28 keys[i] = i + 1;
29 if (is_pcpu)
30 for (j = 0; j < bpf_num_possible_cpus(); j++)
31 bpf_percpu(v[i], j) = i + 2 + j;
32 else
33 ((int *)values)[i] = i + 2;
34 }
35
36 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
37 CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
38}
39
40static void map_batch_verify(int *visited, __u32 max_entries,
41 int *keys, void *values, bool is_pcpu)
42{
43 typedef BPF_DECLARE_PERCPU(int, value);
44 value *v = NULL;
45 int i, j;
46
47 if (is_pcpu)
48 v = (value *)values;
49
50 memset(visited, 0, max_entries * sizeof(*visited));
51 for (i = 0; i < max_entries; i++) {
52
53 if (is_pcpu) {
54 for (j = 0; j < bpf_num_possible_cpus(); j++) {
55 CHECK(keys[i] + 1 + j != bpf_percpu(v[i], j),
56 "key/value checking",
57 "error: i %d j %d key %d value %d\n",
58 i, j, keys[i], bpf_percpu(v[i], j));
59 }
60 } else {
61 CHECK(keys[i] + 1 != ((int *)values)[i],
62 "key/value checking",
63 "error: i %d key %d value %d\n", i, keys[i],
64 ((int *)values)[i]);
65 }
66
67 visited[i] = 1;
68
69 }
70 for (i = 0; i < max_entries; i++) {
71 CHECK(visited[i] != 1, "visited checking",
72 "error: keys array at index %d missing\n", i);
73 }
74}
75
76void __test_map_lookup_and_delete_batch(bool is_pcpu)
77{
78 __u32 batch, count, total, total_success;
79 typedef BPF_DECLARE_PERCPU(int, value);
80 int map_fd, *keys, *visited, key;
81 const __u32 max_entries = 10;
82 value pcpu_values[max_entries];
83 int err, step, value_size;
84 bool nospace_err;
85 void *values;
86 DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
87 .elem_flags = 0,
88 .flags = 0,
89 );
90
91 map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH,
92 "hash_map", sizeof(int), sizeof(int), max_entries, NULL);
93 CHECK(map_fd == -1,
94 "bpf_map_create()", "error:%s\n", strerror(errno));
95
96 value_size = is_pcpu ? sizeof(value) : sizeof(int);
97 keys = malloc(max_entries * sizeof(int));
98 if (is_pcpu)
99 values = pcpu_values;
100 else
101 values = malloc(max_entries * sizeof(int));
102 visited = malloc(max_entries * sizeof(int));
103 CHECK(!keys || !values || !visited, "malloc()",
104 "error:%s\n", strerror(errno));
105
106 /* test 1: lookup/delete an empty hash table, -ENOENT */
107 count = max_entries;
108 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
109 values, &count, &opts);
110 CHECK((err && errno != ENOENT), "empty map",
111 "error: %s\n", strerror(errno));
112
113 /* populate elements to the map */
114 map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
115
116 /* test 2: lookup/delete with count = 0, success */
117 count = 0;
118 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
119 values, &count, &opts);
120 CHECK(err, "count = 0", "error: %s\n", strerror(errno));
121
122 /* test 3: lookup/delete with count = max_entries, success */
123 memset(keys, 0, max_entries * sizeof(*keys));
124 memset(values, 0, max_entries * value_size);
125 count = max_entries;
126 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
127 values, &count, &opts);
128 CHECK((err && errno != ENOENT), "count = max_entries",
129 "error: %s\n", strerror(errno));
130 CHECK(count != max_entries, "count = max_entries",
131 "count = %u, max_entries = %u\n", count, max_entries);
132 map_batch_verify(visited, max_entries, keys, values, is_pcpu);
133
134 /* bpf_map_get_next_key() should return -ENOENT for an empty map. */
135 err = bpf_map_get_next_key(map_fd, NULL, &key);
136 CHECK(!err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno));
137
138 /* test 4: lookup/delete in a loop with various steps. */
139 total_success = 0;
140 for (step = 1; step < max_entries; step++) {
141 map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
142 memset(keys, 0, max_entries * sizeof(*keys));
143 memset(values, 0, max_entries * value_size);
144 total = 0;
145 /* iteratively lookup/delete elements with 'step'
146 * elements each
147 */
148 count = step;
149 nospace_err = false;
150 while (true) {
151 err = bpf_map_lookup_batch(map_fd,
152 total ? &batch : NULL,
153 &batch, keys + total,
154 values +
155 total * value_size,
156 &count, &opts);
157 /* It is possible that we are failing due to buffer size
158 * not big enough. In such cases, let us just exit and
159 * go with large steps. Not that a buffer size with
160 * max_entries should always work.
161 */
162 if (err && errno == ENOSPC) {
163 nospace_err = true;
164 break;
165 }
166
167 CHECK((err && errno != ENOENT), "lookup with steps",
168 "error: %s\n", strerror(errno));
169
170 total += count;
171 if (err)
172 break;
173
174 }
175 if (nospace_err == true)
176 continue;
177
178 CHECK(total != max_entries, "lookup with steps",
179 "total = %u, max_entries = %u\n", total, max_entries);
180 map_batch_verify(visited, max_entries, keys, values, is_pcpu);
181
182 total = 0;
183 count = step;
184 while (total < max_entries) {
185 if (max_entries - total < step)
186 count = max_entries - total;
187 err = bpf_map_delete_batch(map_fd,
188 keys + total,
189 &count, &opts);
190 CHECK((err && errno != ENOENT), "delete batch",
191 "error: %s\n", strerror(errno));
192 total += count;
193 if (err)
194 break;
195 }
196 CHECK(total != max_entries, "delete with steps",
197 "total = %u, max_entries = %u\n", total, max_entries);
198
199 /* check map is empty, errono == ENOENT */
200 err = bpf_map_get_next_key(map_fd, NULL, &key);
201 CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
202 "error: %s\n", strerror(errno));
203
204 /* iteratively lookup/delete elements with 'step'
205 * elements each
206 */
207 map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
208 memset(keys, 0, max_entries * sizeof(*keys));
209 memset(values, 0, max_entries * value_size);
210 total = 0;
211 count = step;
212 nospace_err = false;
213 while (true) {
214 err = bpf_map_lookup_and_delete_batch(map_fd,
215 total ? &batch : NULL,
216 &batch, keys + total,
217 values +
218 total * value_size,
219 &count, &opts);
220 /* It is possible that we are failing due to buffer size
221 * not big enough. In such cases, let us just exit and
222 * go with large steps. Not that a buffer size with
223 * max_entries should always work.
224 */
225 if (err && errno == ENOSPC) {
226 nospace_err = true;
227 break;
228 }
229
230 CHECK((err && errno != ENOENT), "lookup with steps",
231 "error: %s\n", strerror(errno));
232
233 total += count;
234 if (err)
235 break;
236 }
237
238 if (nospace_err == true)
239 continue;
240
241 CHECK(total != max_entries, "lookup/delete with steps",
242 "total = %u, max_entries = %u\n", total, max_entries);
243
244 map_batch_verify(visited, max_entries, keys, values, is_pcpu);
245 err = bpf_map_get_next_key(map_fd, NULL, &key);
246 CHECK(!err, "bpf_map_get_next_key()", "error: %s\n",
247 strerror(errno));
248
249 total_success++;
250 }
251
252 CHECK(total_success == 0, "check total_success",
253 "unexpected failure\n");
254 free(keys);
255 free(visited);
256 if (!is_pcpu)
257 free(values);
258}
259
260void htab_map_batch_ops(void)
261{
262 __test_map_lookup_and_delete_batch(false);
263 printf("test_%s:PASS\n", __func__);
264}
265
266void htab_percpu_map_batch_ops(void)
267{
268 __test_map_lookup_and_delete_batch(true);
269 printf("test_%s:PASS\n", __func__);
270}
271
272void test_htab_map_batch_ops(void)
273{
274 htab_map_batch_ops();
275 htab_percpu_map_batch_ops();
276}