at v4.16 726 lines 24 kB view raw
1#include <asm/types.h> 2#include <linux/types.h> 3#include <stdint.h> 4#include <stdio.h> 5#include <stdlib.h> 6#include <unistd.h> 7#include <errno.h> 8#include <string.h> 9#include <stddef.h> 10#include <stdbool.h> 11 12#include <sys/resource.h> 13 14#include <linux/unistd.h> 15#include <linux/filter.h> 16#include <linux/bpf_perf_event.h> 17#include <linux/bpf.h> 18 19#include <bpf/bpf.h> 20 21#include "../../../include/linux/filter.h" 22 23#ifndef ARRAY_SIZE 24# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 25#endif 26 27#define MAX_INSNS 512 28#define MAX_MATCHES 16 29 30struct bpf_reg_match { 31 unsigned int line; 32 const char *match; 33}; 34 35struct bpf_align_test { 36 const char *descr; 37 struct bpf_insn insns[MAX_INSNS]; 38 enum { 39 UNDEF, 40 ACCEPT, 41 REJECT 42 } result; 43 enum bpf_prog_type prog_type; 44 /* Matches must be in order of increasing line */ 45 struct bpf_reg_match matches[MAX_MATCHES]; 46}; 47 48static struct bpf_align_test tests[] = { 49 /* Four tests of known constants. These aren't staggeringly 50 * interesting since we track exact values now. 51 */ 52 { 53 .descr = "mov", 54 .insns = { 55 BPF_MOV64_IMM(BPF_REG_3, 2), 56 BPF_MOV64_IMM(BPF_REG_3, 4), 57 BPF_MOV64_IMM(BPF_REG_3, 8), 58 BPF_MOV64_IMM(BPF_REG_3, 16), 59 BPF_MOV64_IMM(BPF_REG_3, 32), 60 BPF_MOV64_IMM(BPF_REG_0, 0), 61 BPF_EXIT_INSN(), 62 }, 63 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 64 .matches = { 65 {1, "R1=ctx(id=0,off=0,imm=0)"}, 66 {1, "R10=fp0"}, 67 {1, "R3_w=inv2"}, 68 {2, "R3_w=inv4"}, 69 {3, "R3_w=inv8"}, 70 {4, "R3_w=inv16"}, 71 {5, "R3_w=inv32"}, 72 }, 73 }, 74 { 75 .descr = "shift", 76 .insns = { 77 BPF_MOV64_IMM(BPF_REG_3, 1), 78 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 79 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 80 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 81 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 82 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), 83 BPF_MOV64_IMM(BPF_REG_4, 32), 84 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 85 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 86 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 87 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 88 BPF_MOV64_IMM(BPF_REG_0, 0), 89 BPF_EXIT_INSN(), 90 }, 91 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 92 .matches = { 93 {1, "R1=ctx(id=0,off=0,imm=0)"}, 94 {1, "R10=fp0"}, 95 {1, "R3_w=inv1"}, 96 {2, "R3_w=inv2"}, 97 {3, "R3_w=inv4"}, 98 {4, "R3_w=inv8"}, 99 {5, "R3_w=inv16"}, 100 {6, "R3_w=inv1"}, 101 {7, "R4_w=inv32"}, 102 {8, "R4_w=inv16"}, 103 {9, "R4_w=inv8"}, 104 {10, "R4_w=inv4"}, 105 {11, "R4_w=inv2"}, 106 }, 107 }, 108 { 109 .descr = "addsub", 110 .insns = { 111 BPF_MOV64_IMM(BPF_REG_3, 4), 112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), 113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), 114 BPF_MOV64_IMM(BPF_REG_4, 8), 115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 117 BPF_MOV64_IMM(BPF_REG_0, 0), 118 BPF_EXIT_INSN(), 119 }, 120 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 121 .matches = { 122 {1, "R1=ctx(id=0,off=0,imm=0)"}, 123 {1, "R10=fp0"}, 124 {1, "R3_w=inv4"}, 125 {2, "R3_w=inv8"}, 126 {3, "R3_w=inv10"}, 127 {4, "R4_w=inv8"}, 128 {5, "R4_w=inv12"}, 129 {6, "R4_w=inv14"}, 130 }, 131 }, 132 { 133 .descr = "mul", 134 .insns = { 135 BPF_MOV64_IMM(BPF_REG_3, 7), 136 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), 137 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), 138 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), 139 BPF_MOV64_IMM(BPF_REG_0, 0), 140 BPF_EXIT_INSN(), 141 }, 142 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 143 .matches = { 144 {1, "R1=ctx(id=0,off=0,imm=0)"}, 145 {1, "R10=fp0"}, 146 {1, "R3_w=inv7"}, 147 {2, "R3_w=inv7"}, 148 {3, "R3_w=inv14"}, 149 {4, "R3_w=inv56"}, 150 }, 151 }, 152 153 /* Tests using unknown values */ 154#define PREP_PKT_POINTERS \ 155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 156 offsetof(struct __sk_buff, data)), \ 157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 158 offsetof(struct __sk_buff, data_end)) 159 160#define LOAD_UNKNOWN(DST_REG) \ 161 PREP_PKT_POINTERS, \ 162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ 163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ 164 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ 165 BPF_EXIT_INSN(), \ 166 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) 167 168 { 169 .descr = "unknown shift", 170 .insns = { 171 LOAD_UNKNOWN(BPF_REG_3), 172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 174 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 175 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 176 LOAD_UNKNOWN(BPF_REG_4), 177 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), 178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 180 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 181 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 182 BPF_MOV64_IMM(BPF_REG_0, 0), 183 BPF_EXIT_INSN(), 184 }, 185 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 186 .matches = { 187 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"}, 188 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 189 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 190 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 191 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 192 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 193 {18, "R3=pkt_end(id=0,off=0,imm=0)"}, 194 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 195 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, 196 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 197 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 198 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 199 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 200 }, 201 }, 202 { 203 .descr = "unknown mul", 204 .insns = { 205 LOAD_UNKNOWN(BPF_REG_3), 206 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 207 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), 208 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 209 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 210 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 211 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), 212 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), 214 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 215 BPF_MOV64_IMM(BPF_REG_0, 0), 216 BPF_EXIT_INSN(), 217 }, 218 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 219 .matches = { 220 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 221 {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 222 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 223 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 224 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 225 {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 226 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 227 {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 228 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 229 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 230 }, 231 }, 232 { 233 .descr = "packet const offset", 234 .insns = { 235 PREP_PKT_POINTERS, 236 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 237 238 BPF_MOV64_IMM(BPF_REG_0, 0), 239 240 /* Skip over ethernet header. */ 241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 242 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 244 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 245 BPF_EXIT_INSN(), 246 247 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), 248 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), 249 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), 250 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), 251 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), 252 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), 253 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 254 255 BPF_MOV64_IMM(BPF_REG_0, 0), 256 BPF_EXIT_INSN(), 257 }, 258 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 259 .matches = { 260 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, 261 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, 262 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, 263 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"}, 264 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, 265 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 266 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 267 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 268 }, 269 }, 270 { 271 .descr = "packet variable offset", 272 .insns = { 273 LOAD_UNKNOWN(BPF_REG_6), 274 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 275 276 /* First, add a constant to the R5 packet pointer, 277 * then a variable with a known alignment. 278 */ 279 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 281 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 282 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 284 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 285 BPF_EXIT_INSN(), 286 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 287 288 /* Now, test in the other direction. Adding first 289 * the variable offset to R5, then the constant. 290 */ 291 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 292 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 294 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 296 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 297 BPF_EXIT_INSN(), 298 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 299 300 /* Test multiple accumulations of unknown values 301 * into a packet pointer. 302 */ 303 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 305 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), 307 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 308 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 310 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 311 BPF_EXIT_INSN(), 312 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 313 314 BPF_MOV64_IMM(BPF_REG_0, 0), 315 BPF_EXIT_INSN(), 316 }, 317 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 318 .matches = { 319 /* Calculated offset in R6 has unknown value, but known 320 * alignment of 4. 321 */ 322 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 323 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 324 /* Offset is added to packet pointer R5, resulting in 325 * known fixed offset, and variable offset from R6. 326 */ 327 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 328 /* At the time the word size load is performed from R5, 329 * it's total offset is NET_IP_ALIGN + reg->off (0) + 330 * reg->aux_off (14) which is 16. Then the variable 331 * offset is considered using reg->aux_off_align which 332 * is 4 and meets the load's requirements. 333 */ 334 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 335 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 336 /* Variable offset is added to R5 packet pointer, 337 * resulting in auxiliary alignment of 4. 338 */ 339 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 340 /* Constant offset is added to R5, resulting in 341 * reg->off of 14. 342 */ 343 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 344 /* At the time the word size load is performed from R5, 345 * its total fixed offset is NET_IP_ALIGN + reg->off 346 * (14) which is 16. Then the variable offset is 4-byte 347 * aligned, so the total offset is 4-byte aligned and 348 * meets the load's requirements. 349 */ 350 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 351 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 352 /* Constant offset is added to R5 packet pointer, 353 * resulting in reg->off value of 14. 354 */ 355 {26, "R5_w=pkt(id=0,off=14,r=8"}, 356 /* Variable offset is added to R5, resulting in a 357 * variable offset of (4n). 358 */ 359 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 360 /* Constant is added to R5 again, setting reg->off to 18. */ 361 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 362 /* And once more we add a variable; resulting var_off 363 * is still (4n), fixed offset is not changed. 364 * Also, we create a new reg->id. 365 */ 366 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"}, 367 /* At the time the word size load is performed from R5, 368 * its total fixed offset is NET_IP_ALIGN + reg->off (18) 369 * which is 20. Then the variable offset is (4n), so 370 * the total offset is 4-byte aligned and meets the 371 * load's requirements. 372 */ 373 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 374 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 375 }, 376 }, 377 { 378 .descr = "packet variable offset 2", 379 .insns = { 380 /* Create an unknown offset, (4n+2)-aligned */ 381 LOAD_UNKNOWN(BPF_REG_6), 382 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 384 /* Add it to the packet pointer */ 385 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 386 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 387 /* Check bounds and perform a read */ 388 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 390 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 391 BPF_EXIT_INSN(), 392 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 393 /* Make a (4n) offset from the value we just read */ 394 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff), 395 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 396 /* Add it to the packet pointer */ 397 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 398 /* Check bounds and perform a read */ 399 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 401 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 402 BPF_EXIT_INSN(), 403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 404 BPF_MOV64_IMM(BPF_REG_0, 0), 405 BPF_EXIT_INSN(), 406 }, 407 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 408 .matches = { 409 /* Calculated offset in R6 has unknown value, but known 410 * alignment of 4. 411 */ 412 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 413 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 414 /* Adding 14 makes R6 be (4n+2) */ 415 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 416 /* Packet pointer has (4n+2) offset */ 417 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 418 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 419 /* At the time the word size load is performed from R5, 420 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 421 * which is 2. Then the variable offset is (4n+2), so 422 * the total offset is 4-byte aligned and meets the 423 * load's requirements. 424 */ 425 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 426 /* Newly read value in R6 was shifted left by 2, so has 427 * known alignment of 4. 428 */ 429 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 430 /* Added (4n) to packet pointer's (4n+2) var_off, giving 431 * another (4n+2). 432 */ 433 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 434 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 435 /* At the time the word size load is performed from R5, 436 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 437 * which is 2. Then the variable offset is (4n+2), so 438 * the total offset is 4-byte aligned and meets the 439 * load's requirements. 440 */ 441 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 442 }, 443 }, 444 { 445 .descr = "dubious pointer arithmetic", 446 .insns = { 447 PREP_PKT_POINTERS, 448 BPF_MOV64_IMM(BPF_REG_0, 0), 449 /* (ptr - ptr) << 2 */ 450 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 451 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2), 452 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), 453 /* We have a (4n) value. Let's make a packet offset 454 * out of it. First add 14, to make it a (4n+2) 455 */ 456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 457 /* Then make sure it's nonnegative */ 458 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1), 459 BPF_EXIT_INSN(), 460 /* Add it to packet pointer */ 461 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 462 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 463 /* Check bounds and perform a read */ 464 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), 465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 466 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 467 BPF_EXIT_INSN(), 468 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0), 469 BPF_EXIT_INSN(), 470 }, 471 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 472 .result = REJECT, 473 .matches = { 474 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"}, 475 /* (ptr - ptr) << 2 == unknown, (4n) */ 476 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"}, 477 /* (4n) + 14 == (4n+2). We blow our bounds, because 478 * the add could overflow. 479 */ 480 {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, 481 /* Checked s>=0 */ 482 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 483 /* packet pointer + nonnegative (4n+2) */ 484 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 485 {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 486 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. 487 * We checked the bounds, but it might have been able 488 * to overflow if the packet pointer started in the 489 * upper half of the address space. 490 * So we did not get a 'range' on R6, and the access 491 * attempt will fail. 492 */ 493 {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 494 } 495 }, 496 { 497 .descr = "variable subtraction", 498 .insns = { 499 /* Create an unknown offset, (4n+2)-aligned */ 500 LOAD_UNKNOWN(BPF_REG_6), 501 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 502 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 504 /* Create another unknown, (4n)-aligned, and subtract 505 * it from the first one 506 */ 507 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 508 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7), 509 /* Bounds-check the result */ 510 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1), 511 BPF_EXIT_INSN(), 512 /* Add it to the packet pointer */ 513 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 514 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 515 /* Check bounds and perform a read */ 516 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 518 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 519 BPF_EXIT_INSN(), 520 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 521 BPF_EXIT_INSN(), 522 }, 523 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 524 .matches = { 525 /* Calculated offset in R6 has unknown value, but known 526 * alignment of 4. 527 */ 528 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 529 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 530 /* Adding 14 makes R6 be (4n+2) */ 531 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 532 /* New unknown value in R7 is (4n) */ 533 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 534 /* Subtracting it from R6 blows our unsigned bounds */ 535 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"}, 536 /* Checked s>= 0 */ 537 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 538 /* At the time the word size load is performed from R5, 539 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 540 * which is 2. Then the variable offset is (4n+2), so 541 * the total offset is 4-byte aligned and meets the 542 * load's requirements. 543 */ 544 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 545 }, 546 }, 547 { 548 .descr = "pointer variable subtraction", 549 .insns = { 550 /* Create an unknown offset, (4n+2)-aligned and bounded 551 * to [14,74] 552 */ 553 LOAD_UNKNOWN(BPF_REG_6), 554 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 555 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf), 556 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 558 /* Subtract it from the packet pointer */ 559 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 560 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6), 561 /* Create another unknown, (4n)-aligned and >= 74. 562 * That in fact means >= 76, since 74 % 4 == 2 563 */ 564 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76), 566 /* Add it to the packet pointer */ 567 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7), 568 /* Check bounds and perform a read */ 569 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 571 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 572 BPF_EXIT_INSN(), 573 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 574 BPF_EXIT_INSN(), 575 }, 576 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 577 .matches = { 578 /* Calculated offset in R6 has unknown value, but known 579 * alignment of 4. 580 */ 581 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 582 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, 583 /* Adding 14 makes R6 be (4n+2) */ 584 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, 585 /* Subtracting from packet pointer overflows ubounds */ 586 {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"}, 587 /* New unknown value in R7 is (4n), >= 76 */ 588 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, 589 /* Adding it to packet pointer gives nice bounds again */ 590 {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 591 /* At the time the word size load is performed from R5, 592 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 593 * which is 2. Then the variable offset is (4n+2), so 594 * the total offset is 4-byte aligned and meets the 595 * load's requirements. 596 */ 597 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 598 }, 599 }, 600}; 601 602static int probe_filter_length(const struct bpf_insn *fp) 603{ 604 int len; 605 606 for (len = MAX_INSNS - 1; len > 0; --len) 607 if (fp[len].code != 0 || fp[len].imm != 0) 608 break; 609 return len + 1; 610} 611 612static char bpf_vlog[32768]; 613 614static int do_test_single(struct bpf_align_test *test) 615{ 616 struct bpf_insn *prog = test->insns; 617 int prog_type = test->prog_type; 618 char bpf_vlog_copy[32768]; 619 const char *line_ptr; 620 int cur_line = -1; 621 int prog_len, i; 622 int fd_prog; 623 int ret; 624 625 prog_len = probe_filter_length(prog); 626 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 627 prog, prog_len, 1, "GPL", 0, 628 bpf_vlog, sizeof(bpf_vlog), 2); 629 if (fd_prog < 0 && test->result != REJECT) { 630 printf("Failed to load program.\n"); 631 printf("%s", bpf_vlog); 632 ret = 1; 633 } else if (fd_prog >= 0 && test->result == REJECT) { 634 printf("Unexpected success to load!\n"); 635 printf("%s", bpf_vlog); 636 ret = 1; 637 close(fd_prog); 638 } else { 639 ret = 0; 640 /* We make a local copy so that we can strtok() it */ 641 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); 642 line_ptr = strtok(bpf_vlog_copy, "\n"); 643 for (i = 0; i < MAX_MATCHES; i++) { 644 struct bpf_reg_match m = test->matches[i]; 645 646 if (!m.match) 647 break; 648 while (line_ptr) { 649 cur_line = -1; 650 sscanf(line_ptr, "%u: ", &cur_line); 651 if (cur_line == m.line) 652 break; 653 line_ptr = strtok(NULL, "\n"); 654 } 655 if (!line_ptr) { 656 printf("Failed to find line %u for match: %s\n", 657 m.line, m.match); 658 ret = 1; 659 printf("%s", bpf_vlog); 660 break; 661 } 662 if (!strstr(line_ptr, m.match)) { 663 printf("Failed to find match %u: %s\n", 664 m.line, m.match); 665 ret = 1; 666 printf("%s", bpf_vlog); 667 break; 668 } 669 } 670 if (fd_prog >= 0) 671 close(fd_prog); 672 } 673 return ret; 674} 675 676static int do_test(unsigned int from, unsigned int to) 677{ 678 int all_pass = 0; 679 int all_fail = 0; 680 unsigned int i; 681 682 for (i = from; i < to; i++) { 683 struct bpf_align_test *test = &tests[i]; 684 int fail; 685 686 printf("Test %3d: %s ... ", 687 i, test->descr); 688 fail = do_test_single(test); 689 if (fail) { 690 all_fail++; 691 printf("FAIL\n"); 692 } else { 693 all_pass++; 694 printf("PASS\n"); 695 } 696 } 697 printf("Results: %d pass %d fail\n", 698 all_pass, all_fail); 699 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS; 700} 701 702int main(int argc, char **argv) 703{ 704 unsigned int from = 0, to = ARRAY_SIZE(tests); 705 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 706 707 setrlimit(RLIMIT_MEMLOCK, &rinf); 708 709 if (argc == 3) { 710 unsigned int l = atoi(argv[argc - 2]); 711 unsigned int u = atoi(argv[argc - 1]); 712 713 if (l < to && u < to) { 714 from = l; 715 to = u + 1; 716 } 717 } else if (argc == 2) { 718 unsigned int t = atoi(argv[argc - 1]); 719 720 if (t < to) { 721 from = t; 722 to = t + 1; 723 } 724 } 725 return do_test(from, to); 726}