at v4.15 710 lines 23 kB view raw
1#include <asm/types.h> 2#include <linux/types.h> 3#include <stdint.h> 4#include <stdio.h> 5#include <stdlib.h> 6#include <unistd.h> 7#include <errno.h> 8#include <string.h> 9#include <stddef.h> 10#include <stdbool.h> 11 12#include <sys/resource.h> 13 14#include <linux/unistd.h> 15#include <linux/filter.h> 16#include <linux/bpf_perf_event.h> 17#include <linux/bpf.h> 18 19#include <bpf/bpf.h> 20 21#include "../../../include/linux/filter.h" 22 23#ifndef ARRAY_SIZE 24# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 25#endif 26 27#define MAX_INSNS 512 28#define MAX_MATCHES 16 29 30struct bpf_reg_match { 31 unsigned int line; 32 const char *match; 33}; 34 35struct bpf_align_test { 36 const char *descr; 37 struct bpf_insn insns[MAX_INSNS]; 38 enum { 39 UNDEF, 40 ACCEPT, 41 REJECT 42 } result; 43 enum bpf_prog_type prog_type; 44 /* Matches must be in order of increasing line */ 45 struct bpf_reg_match matches[MAX_MATCHES]; 46}; 47 48static struct bpf_align_test tests[] = { 49 /* Four tests of known constants. These aren't staggeringly 50 * interesting since we track exact values now. 51 */ 52 { 53 .descr = "mov", 54 .insns = { 55 BPF_MOV64_IMM(BPF_REG_3, 2), 56 BPF_MOV64_IMM(BPF_REG_3, 4), 57 BPF_MOV64_IMM(BPF_REG_3, 8), 58 BPF_MOV64_IMM(BPF_REG_3, 16), 59 BPF_MOV64_IMM(BPF_REG_3, 32), 60 BPF_MOV64_IMM(BPF_REG_0, 0), 61 BPF_EXIT_INSN(), 62 }, 63 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 64 .matches = { 65 {1, "R1=ctx(id=0,off=0,imm=0)"}, 66 {1, "R10=fp0"}, 67 {1, "R3=inv2"}, 68 {2, "R3=inv4"}, 69 {3, "R3=inv8"}, 70 {4, "R3=inv16"}, 71 {5, "R3=inv32"}, 72 }, 73 }, 74 { 75 .descr = "shift", 76 .insns = { 77 BPF_MOV64_IMM(BPF_REG_3, 1), 78 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 79 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 80 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 81 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 82 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), 83 BPF_MOV64_IMM(BPF_REG_4, 32), 84 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 85 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 86 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 87 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 88 BPF_MOV64_IMM(BPF_REG_0, 0), 89 BPF_EXIT_INSN(), 90 }, 91 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 92 .matches = { 93 {1, "R1=ctx(id=0,off=0,imm=0)"}, 94 {1, "R10=fp0"}, 95 {1, "R3=inv1"}, 96 {2, "R3=inv2"}, 97 {3, "R3=inv4"}, 98 {4, "R3=inv8"}, 99 {5, "R3=inv16"}, 100 {6, "R3=inv1"}, 101 {7, "R4=inv32"}, 102 {8, "R4=inv16"}, 103 {9, "R4=inv8"}, 104 {10, "R4=inv4"}, 105 {11, "R4=inv2"}, 106 }, 107 }, 108 { 109 .descr = "addsub", 110 .insns = { 111 BPF_MOV64_IMM(BPF_REG_3, 4), 112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), 113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), 114 BPF_MOV64_IMM(BPF_REG_4, 8), 115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 117 BPF_MOV64_IMM(BPF_REG_0, 0), 118 BPF_EXIT_INSN(), 119 }, 120 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 121 .matches = { 122 {1, "R1=ctx(id=0,off=0,imm=0)"}, 123 {1, "R10=fp0"}, 124 {1, "R3=inv4"}, 125 {2, "R3=inv8"}, 126 {3, "R3=inv10"}, 127 {4, "R4=inv8"}, 128 {5, "R4=inv12"}, 129 {6, "R4=inv14"}, 130 }, 131 }, 132 { 133 .descr = "mul", 134 .insns = { 135 BPF_MOV64_IMM(BPF_REG_3, 7), 136 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), 137 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), 138 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), 139 BPF_MOV64_IMM(BPF_REG_0, 0), 140 BPF_EXIT_INSN(), 141 }, 142 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 143 .matches = { 144 {1, "R1=ctx(id=0,off=0,imm=0)"}, 145 {1, "R10=fp0"}, 146 {1, "R3=inv7"}, 147 {2, "R3=inv7"}, 148 {3, "R3=inv14"}, 149 {4, "R3=inv56"}, 150 }, 151 }, 152 153 /* Tests using unknown values */ 154#define PREP_PKT_POINTERS \ 155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 156 offsetof(struct __sk_buff, data)), \ 157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 158 offsetof(struct __sk_buff, data_end)) 159 160#define LOAD_UNKNOWN(DST_REG) \ 161 PREP_PKT_POINTERS, \ 162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ 163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ 164 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ 165 BPF_EXIT_INSN(), \ 166 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) 167 168 { 169 .descr = "unknown shift", 170 .insns = { 171 LOAD_UNKNOWN(BPF_REG_3), 172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 174 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 175 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 176 LOAD_UNKNOWN(BPF_REG_4), 177 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), 178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 180 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 181 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 182 BPF_MOV64_IMM(BPF_REG_0, 0), 183 BPF_EXIT_INSN(), 184 }, 185 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 186 .matches = { 187 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"}, 188 {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 189 {8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 190 {9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 191 {10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 192 {11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 193 {18, "R3=pkt_end(id=0,off=0,imm=0)"}, 194 {18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 195 {19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, 196 {20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 197 {21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 198 {22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 199 {23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 200 }, 201 }, 202 { 203 .descr = "unknown mul", 204 .insns = { 205 LOAD_UNKNOWN(BPF_REG_3), 206 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 207 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), 208 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 209 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 210 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 211 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), 212 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), 214 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 215 BPF_MOV64_IMM(BPF_REG_0, 0), 216 BPF_EXIT_INSN(), 217 }, 218 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 219 .matches = { 220 {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 221 {8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 222 {9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 223 {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 224 {11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 225 {12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 226 {13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 227 {14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 228 {15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 229 {16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 230 }, 231 }, 232 { 233 .descr = "packet const offset", 234 .insns = { 235 PREP_PKT_POINTERS, 236 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 237 238 BPF_MOV64_IMM(BPF_REG_0, 0), 239 240 /* Skip over ethernet header. */ 241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 242 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 244 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 245 BPF_EXIT_INSN(), 246 247 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), 248 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), 249 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), 250 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), 251 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), 252 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), 253 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 254 255 BPF_MOV64_IMM(BPF_REG_0, 0), 256 BPF_EXIT_INSN(), 257 }, 258 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 259 .matches = { 260 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 261 {5, "R5=pkt(id=0,off=14,r=0,imm=0)"}, 262 {6, "R4=pkt(id=0,off=14,r=0,imm=0)"}, 263 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"}, 264 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, 265 {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 266 {14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 267 {15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 268 }, 269 }, 270 { 271 .descr = "packet variable offset", 272 .insns = { 273 LOAD_UNKNOWN(BPF_REG_6), 274 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 275 276 /* First, add a constant to the R5 packet pointer, 277 * then a variable with a known alignment. 278 */ 279 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 281 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 282 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 284 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 285 BPF_EXIT_INSN(), 286 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 287 288 /* Now, test in the other direction. Adding first 289 * the variable offset to R5, then the constant. 290 */ 291 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 292 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 294 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 296 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 297 BPF_EXIT_INSN(), 298 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 299 300 /* Test multiple accumulations of unknown values 301 * into a packet pointer. 302 */ 303 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 305 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), 307 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 308 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 310 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 311 BPF_EXIT_INSN(), 312 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 313 314 BPF_MOV64_IMM(BPF_REG_0, 0), 315 BPF_EXIT_INSN(), 316 }, 317 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 318 .matches = { 319 /* Calculated offset in R6 has unknown value, but known 320 * alignment of 4. 321 */ 322 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 323 {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 324 /* Offset is added to packet pointer R5, resulting in 325 * known fixed offset, and variable offset from R6. 326 */ 327 {11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 328 /* At the time the word size load is performed from R5, 329 * it's total offset is NET_IP_ALIGN + reg->off (0) + 330 * reg->aux_off (14) which is 16. Then the variable 331 * offset is considered using reg->aux_off_align which 332 * is 4 and meets the load's requirements. 333 */ 334 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 335 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 336 /* Variable offset is added to R5 packet pointer, 337 * resulting in auxiliary alignment of 4. 338 */ 339 {18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 340 /* Constant offset is added to R5, resulting in 341 * reg->off of 14. 342 */ 343 {19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 344 /* At the time the word size load is performed from R5, 345 * its total fixed offset is NET_IP_ALIGN + reg->off 346 * (14) which is 16. Then the variable offset is 4-byte 347 * aligned, so the total offset is 4-byte aligned and 348 * meets the load's requirements. 349 */ 350 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 351 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 352 /* Constant offset is added to R5 packet pointer, 353 * resulting in reg->off value of 14. 354 */ 355 {26, "R5=pkt(id=0,off=14,r=8"}, 356 /* Variable offset is added to R5, resulting in a 357 * variable offset of (4n). 358 */ 359 {27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 360 /* Constant is added to R5 again, setting reg->off to 18. */ 361 {28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 362 /* And once more we add a variable; resulting var_off 363 * is still (4n), fixed offset is not changed. 364 * Also, we create a new reg->id. 365 */ 366 {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"}, 367 /* At the time the word size load is performed from R5, 368 * its total fixed offset is NET_IP_ALIGN + reg->off (18) 369 * which is 20. Then the variable offset is (4n), so 370 * the total offset is 4-byte aligned and meets the 371 * load's requirements. 372 */ 373 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 374 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 375 }, 376 }, 377 { 378 .descr = "packet variable offset 2", 379 .insns = { 380 /* Create an unknown offset, (4n+2)-aligned */ 381 LOAD_UNKNOWN(BPF_REG_6), 382 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 384 /* Add it to the packet pointer */ 385 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 386 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 387 /* Check bounds and perform a read */ 388 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 390 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 391 BPF_EXIT_INSN(), 392 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 393 /* Make a (4n) offset from the value we just read */ 394 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff), 395 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 396 /* Add it to the packet pointer */ 397 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 398 /* Check bounds and perform a read */ 399 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 401 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 402 BPF_EXIT_INSN(), 403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 404 BPF_MOV64_IMM(BPF_REG_0, 0), 405 BPF_EXIT_INSN(), 406 }, 407 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 408 .matches = { 409 /* Calculated offset in R6 has unknown value, but known 410 * alignment of 4. 411 */ 412 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 413 {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 414 /* Adding 14 makes R6 be (4n+2) */ 415 {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 416 /* Packet pointer has (4n+2) offset */ 417 {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 418 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 419 /* At the time the word size load is performed from R5, 420 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 421 * which is 2. Then the variable offset is (4n+2), so 422 * the total offset is 4-byte aligned and meets the 423 * load's requirements. 424 */ 425 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 426 /* Newly read value in R6 was shifted left by 2, so has 427 * known alignment of 4. 428 */ 429 {18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 430 /* Added (4n) to packet pointer's (4n+2) var_off, giving 431 * another (4n+2). 432 */ 433 {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 434 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 435 /* At the time the word size load is performed from R5, 436 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 437 * which is 2. Then the variable offset is (4n+2), so 438 * the total offset is 4-byte aligned and meets the 439 * load's requirements. 440 */ 441 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 442 }, 443 }, 444 { 445 .descr = "dubious pointer arithmetic", 446 .insns = { 447 PREP_PKT_POINTERS, 448 BPF_MOV64_IMM(BPF_REG_0, 0), 449 /* ptr & const => unknown & const */ 450 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 451 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40), 452 /* ptr << const => unknown << const */ 453 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 454 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), 455 /* We have a (4n) value. Let's make a packet offset 456 * out of it. First add 14, to make it a (4n+2) 457 */ 458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 459 /* Then make sure it's nonnegative */ 460 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1), 461 BPF_EXIT_INSN(), 462 /* Add it to packet pointer */ 463 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 464 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 465 /* Check bounds and perform a read */ 466 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), 467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 468 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 469 BPF_EXIT_INSN(), 470 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0), 471 BPF_EXIT_INSN(), 472 }, 473 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 474 .result = REJECT, 475 .matches = { 476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 477 /* R5 bitwise operator &= on pointer prohibited */ 478 } 479 }, 480 { 481 .descr = "variable subtraction", 482 .insns = { 483 /* Create an unknown offset, (4n+2)-aligned */ 484 LOAD_UNKNOWN(BPF_REG_6), 485 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 486 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 488 /* Create another unknown, (4n)-aligned, and subtract 489 * it from the first one 490 */ 491 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 492 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7), 493 /* Bounds-check the result */ 494 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1), 495 BPF_EXIT_INSN(), 496 /* Add it to the packet pointer */ 497 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 498 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 499 /* Check bounds and perform a read */ 500 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 502 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 503 BPF_EXIT_INSN(), 504 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 505 BPF_EXIT_INSN(), 506 }, 507 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 508 .matches = { 509 /* Calculated offset in R6 has unknown value, but known 510 * alignment of 4. 511 */ 512 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 513 {9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 514 /* Adding 14 makes R6 be (4n+2) */ 515 {10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 516 /* New unknown value in R7 is (4n) */ 517 {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 518 /* Subtracting it from R6 blows our unsigned bounds */ 519 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"}, 520 /* Checked s>= 0 */ 521 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 522 /* At the time the word size load is performed from R5, 523 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 524 * which is 2. Then the variable offset is (4n+2), so 525 * the total offset is 4-byte aligned and meets the 526 * load's requirements. 527 */ 528 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 529 }, 530 }, 531 { 532 .descr = "pointer variable subtraction", 533 .insns = { 534 /* Create an unknown offset, (4n+2)-aligned and bounded 535 * to [14,74] 536 */ 537 LOAD_UNKNOWN(BPF_REG_6), 538 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 539 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf), 540 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 542 /* Subtract it from the packet pointer */ 543 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 544 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6), 545 /* Create another unknown, (4n)-aligned and >= 74. 546 * That in fact means >= 76, since 74 % 4 == 2 547 */ 548 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76), 550 /* Add it to the packet pointer */ 551 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7), 552 /* Check bounds and perform a read */ 553 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 555 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 556 BPF_EXIT_INSN(), 557 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 558 BPF_EXIT_INSN(), 559 }, 560 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 561 .matches = { 562 /* Calculated offset in R6 has unknown value, but known 563 * alignment of 4. 564 */ 565 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 566 {10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, 567 /* Adding 14 makes R6 be (4n+2) */ 568 {11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, 569 /* Subtracting from packet pointer overflows ubounds */ 570 {13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"}, 571 /* New unknown value in R7 is (4n), >= 76 */ 572 {15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, 573 /* Adding it to packet pointer gives nice bounds again */ 574 {16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 575 /* At the time the word size load is performed from R5, 576 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 577 * which is 2. Then the variable offset is (4n+2), so 578 * the total offset is 4-byte aligned and meets the 579 * load's requirements. 580 */ 581 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 582 }, 583 }, 584}; 585 586static int probe_filter_length(const struct bpf_insn *fp) 587{ 588 int len; 589 590 for (len = MAX_INSNS - 1; len > 0; --len) 591 if (fp[len].code != 0 || fp[len].imm != 0) 592 break; 593 return len + 1; 594} 595 596static char bpf_vlog[32768]; 597 598static int do_test_single(struct bpf_align_test *test) 599{ 600 struct bpf_insn *prog = test->insns; 601 int prog_type = test->prog_type; 602 char bpf_vlog_copy[32768]; 603 const char *line_ptr; 604 int cur_line = -1; 605 int prog_len, i; 606 int fd_prog; 607 int ret; 608 609 prog_len = probe_filter_length(prog); 610 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 611 prog, prog_len, 1, "GPL", 0, 612 bpf_vlog, sizeof(bpf_vlog), 2); 613 if (fd_prog < 0 && test->result != REJECT) { 614 printf("Failed to load program.\n"); 615 printf("%s", bpf_vlog); 616 ret = 1; 617 } else if (fd_prog >= 0 && test->result == REJECT) { 618 printf("Unexpected success to load!\n"); 619 printf("%s", bpf_vlog); 620 ret = 1; 621 close(fd_prog); 622 } else { 623 ret = 0; 624 /* We make a local copy so that we can strtok() it */ 625 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); 626 line_ptr = strtok(bpf_vlog_copy, "\n"); 627 for (i = 0; i < MAX_MATCHES; i++) { 628 struct bpf_reg_match m = test->matches[i]; 629 630 if (!m.match) 631 break; 632 while (line_ptr) { 633 cur_line = -1; 634 sscanf(line_ptr, "%u: ", &cur_line); 635 if (cur_line == m.line) 636 break; 637 line_ptr = strtok(NULL, "\n"); 638 } 639 if (!line_ptr) { 640 printf("Failed to find line %u for match: %s\n", 641 m.line, m.match); 642 ret = 1; 643 printf("%s", bpf_vlog); 644 break; 645 } 646 if (!strstr(line_ptr, m.match)) { 647 printf("Failed to find match %u: %s\n", 648 m.line, m.match); 649 ret = 1; 650 printf("%s", bpf_vlog); 651 break; 652 } 653 } 654 if (fd_prog >= 0) 655 close(fd_prog); 656 } 657 return ret; 658} 659 660static int do_test(unsigned int from, unsigned int to) 661{ 662 int all_pass = 0; 663 int all_fail = 0; 664 unsigned int i; 665 666 for (i = from; i < to; i++) { 667 struct bpf_align_test *test = &tests[i]; 668 int fail; 669 670 printf("Test %3d: %s ... ", 671 i, test->descr); 672 fail = do_test_single(test); 673 if (fail) { 674 all_fail++; 675 printf("FAIL\n"); 676 } else { 677 all_pass++; 678 printf("PASS\n"); 679 } 680 } 681 printf("Results: %d pass %d fail\n", 682 all_pass, all_fail); 683 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS; 684} 685 686int main(int argc, char **argv) 687{ 688 unsigned int from = 0, to = ARRAY_SIZE(tests); 689 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 690 691 setrlimit(RLIMIT_MEMLOCK, &rinf); 692 693 if (argc == 3) { 694 unsigned int l = atoi(argv[argc - 2]); 695 unsigned int u = atoi(argv[argc - 1]); 696 697 if (l < to && u < to) { 698 from = l; 699 to = u + 1; 700 } 701 } else if (argc == 2) { 702 unsigned int t = atoi(argv[argc - 1]); 703 704 if (t < to) { 705 from = t; 706 to = t + 1; 707 } 708 } 709 return do_test(from, to); 710}