at v4.17 722 lines 24 kB view raw
1#include <asm/types.h> 2#include <linux/types.h> 3#include <stdint.h> 4#include <stdio.h> 5#include <stdlib.h> 6#include <unistd.h> 7#include <errno.h> 8#include <string.h> 9#include <stddef.h> 10#include <stdbool.h> 11 12#include <linux/unistd.h> 13#include <linux/filter.h> 14#include <linux/bpf_perf_event.h> 15#include <linux/bpf.h> 16 17#include <bpf/bpf.h> 18 19#include "../../../include/linux/filter.h" 20#include "bpf_rlimit.h" 21 22#ifndef ARRAY_SIZE 23# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 24#endif 25 26#define MAX_INSNS 512 27#define MAX_MATCHES 16 28 29struct bpf_reg_match { 30 unsigned int line; 31 const char *match; 32}; 33 34struct bpf_align_test { 35 const char *descr; 36 struct bpf_insn insns[MAX_INSNS]; 37 enum { 38 UNDEF, 39 ACCEPT, 40 REJECT 41 } result; 42 enum bpf_prog_type prog_type; 43 /* Matches must be in order of increasing line */ 44 struct bpf_reg_match matches[MAX_MATCHES]; 45}; 46 47static struct bpf_align_test tests[] = { 48 /* Four tests of known constants. These aren't staggeringly 49 * interesting since we track exact values now. 50 */ 51 { 52 .descr = "mov", 53 .insns = { 54 BPF_MOV64_IMM(BPF_REG_3, 2), 55 BPF_MOV64_IMM(BPF_REG_3, 4), 56 BPF_MOV64_IMM(BPF_REG_3, 8), 57 BPF_MOV64_IMM(BPF_REG_3, 16), 58 BPF_MOV64_IMM(BPF_REG_3, 32), 59 BPF_MOV64_IMM(BPF_REG_0, 0), 60 BPF_EXIT_INSN(), 61 }, 62 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 63 .matches = { 64 {1, "R1=ctx(id=0,off=0,imm=0)"}, 65 {1, "R10=fp0"}, 66 {1, "R3_w=inv2"}, 67 {2, "R3_w=inv4"}, 68 {3, "R3_w=inv8"}, 69 {4, "R3_w=inv16"}, 70 {5, "R3_w=inv32"}, 71 }, 72 }, 73 { 74 .descr = "shift", 75 .insns = { 76 BPF_MOV64_IMM(BPF_REG_3, 1), 77 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 78 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 79 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 80 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 81 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), 82 BPF_MOV64_IMM(BPF_REG_4, 32), 83 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 84 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 85 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 86 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 87 BPF_MOV64_IMM(BPF_REG_0, 0), 88 BPF_EXIT_INSN(), 89 }, 90 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 91 .matches = { 92 {1, "R1=ctx(id=0,off=0,imm=0)"}, 93 {1, "R10=fp0"}, 94 {1, "R3_w=inv1"}, 95 {2, "R3_w=inv2"}, 96 {3, "R3_w=inv4"}, 97 {4, "R3_w=inv8"}, 98 {5, "R3_w=inv16"}, 99 {6, "R3_w=inv1"}, 100 {7, "R4_w=inv32"}, 101 {8, "R4_w=inv16"}, 102 {9, "R4_w=inv8"}, 103 {10, "R4_w=inv4"}, 104 {11, "R4_w=inv2"}, 105 }, 106 }, 107 { 108 .descr = "addsub", 109 .insns = { 110 BPF_MOV64_IMM(BPF_REG_3, 4), 111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), 112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), 113 BPF_MOV64_IMM(BPF_REG_4, 8), 114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 116 BPF_MOV64_IMM(BPF_REG_0, 0), 117 BPF_EXIT_INSN(), 118 }, 119 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 120 .matches = { 121 {1, "R1=ctx(id=0,off=0,imm=0)"}, 122 {1, "R10=fp0"}, 123 {1, "R3_w=inv4"}, 124 {2, "R3_w=inv8"}, 125 {3, "R3_w=inv10"}, 126 {4, "R4_w=inv8"}, 127 {5, "R4_w=inv12"}, 128 {6, "R4_w=inv14"}, 129 }, 130 }, 131 { 132 .descr = "mul", 133 .insns = { 134 BPF_MOV64_IMM(BPF_REG_3, 7), 135 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), 136 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), 137 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), 138 BPF_MOV64_IMM(BPF_REG_0, 0), 139 BPF_EXIT_INSN(), 140 }, 141 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 142 .matches = { 143 {1, "R1=ctx(id=0,off=0,imm=0)"}, 144 {1, "R10=fp0"}, 145 {1, "R3_w=inv7"}, 146 {2, "R3_w=inv7"}, 147 {3, "R3_w=inv14"}, 148 {4, "R3_w=inv56"}, 149 }, 150 }, 151 152 /* Tests using unknown values */ 153#define PREP_PKT_POINTERS \ 154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 155 offsetof(struct __sk_buff, data)), \ 156 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 157 offsetof(struct __sk_buff, data_end)) 158 159#define LOAD_UNKNOWN(DST_REG) \ 160 PREP_PKT_POINTERS, \ 161 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ 162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ 163 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ 164 BPF_EXIT_INSN(), \ 165 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) 166 167 { 168 .descr = "unknown shift", 169 .insns = { 170 LOAD_UNKNOWN(BPF_REG_3), 171 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 174 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), 175 LOAD_UNKNOWN(BPF_REG_4), 176 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), 177 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 180 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), 181 BPF_MOV64_IMM(BPF_REG_0, 0), 182 BPF_EXIT_INSN(), 183 }, 184 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 185 .matches = { 186 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"}, 187 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 188 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 189 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 190 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 191 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 192 {18, "R3=pkt_end(id=0,off=0,imm=0)"}, 193 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 194 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, 195 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 196 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 197 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 198 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 199 }, 200 }, 201 { 202 .descr = "unknown mul", 203 .insns = { 204 LOAD_UNKNOWN(BPF_REG_3), 205 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 206 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), 207 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 208 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 209 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 210 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), 211 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), 212 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), 213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), 214 BPF_MOV64_IMM(BPF_REG_0, 0), 215 BPF_EXIT_INSN(), 216 }, 217 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 218 .matches = { 219 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 220 {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 221 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 222 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 223 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, 224 {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 225 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 226 {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 227 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, 228 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, 229 }, 230 }, 231 { 232 .descr = "packet const offset", 233 .insns = { 234 PREP_PKT_POINTERS, 235 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 236 237 BPF_MOV64_IMM(BPF_REG_0, 0), 238 239 /* Skip over ethernet header. */ 240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 243 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 244 BPF_EXIT_INSN(), 245 246 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), 247 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), 248 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), 249 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), 250 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), 251 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), 252 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 253 254 BPF_MOV64_IMM(BPF_REG_0, 0), 255 BPF_EXIT_INSN(), 256 }, 257 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 258 .matches = { 259 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, 260 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, 261 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, 262 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"}, 263 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, 264 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, 265 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 266 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, 267 }, 268 }, 269 { 270 .descr = "packet variable offset", 271 .insns = { 272 LOAD_UNKNOWN(BPF_REG_6), 273 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 274 275 /* First, add a constant to the R5 packet pointer, 276 * then a variable with a known alignment. 277 */ 278 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 280 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 281 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 283 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 284 BPF_EXIT_INSN(), 285 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 286 287 /* Now, test in the other direction. Adding first 288 * the variable offset to R5, then the constant. 289 */ 290 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 291 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 293 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 295 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 296 BPF_EXIT_INSN(), 297 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 298 299 /* Test multiple accumulations of unknown values 300 * into a packet pointer. 301 */ 302 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 304 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), 306 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 307 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 309 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 310 BPF_EXIT_INSN(), 311 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), 312 313 BPF_MOV64_IMM(BPF_REG_0, 0), 314 BPF_EXIT_INSN(), 315 }, 316 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 317 .matches = { 318 /* Calculated offset in R6 has unknown value, but known 319 * alignment of 4. 320 */ 321 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 322 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 323 /* Offset is added to packet pointer R5, resulting in 324 * known fixed offset, and variable offset from R6. 325 */ 326 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 327 /* At the time the word size load is performed from R5, 328 * it's total offset is NET_IP_ALIGN + reg->off (0) + 329 * reg->aux_off (14) which is 16. Then the variable 330 * offset is considered using reg->aux_off_align which 331 * is 4 and meets the load's requirements. 332 */ 333 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 334 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 335 /* Variable offset is added to R5 packet pointer, 336 * resulting in auxiliary alignment of 4. 337 */ 338 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 339 /* Constant offset is added to R5, resulting in 340 * reg->off of 14. 341 */ 342 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 343 /* At the time the word size load is performed from R5, 344 * its total fixed offset is NET_IP_ALIGN + reg->off 345 * (14) which is 16. Then the variable offset is 4-byte 346 * aligned, so the total offset is 4-byte aligned and 347 * meets the load's requirements. 348 */ 349 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 350 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, 351 /* Constant offset is added to R5 packet pointer, 352 * resulting in reg->off value of 14. 353 */ 354 {26, "R5_w=pkt(id=0,off=14,r=8"}, 355 /* Variable offset is added to R5, resulting in a 356 * variable offset of (4n). 357 */ 358 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 359 /* Constant is added to R5 again, setting reg->off to 18. */ 360 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 361 /* And once more we add a variable; resulting var_off 362 * is still (4n), fixed offset is not changed. 363 * Also, we create a new reg->id. 364 */ 365 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"}, 366 /* At the time the word size load is performed from R5, 367 * its total fixed offset is NET_IP_ALIGN + reg->off (18) 368 * which is 20. Then the variable offset is (4n), so 369 * the total offset is 4-byte aligned and meets the 370 * load's requirements. 371 */ 372 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 373 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, 374 }, 375 }, 376 { 377 .descr = "packet variable offset 2", 378 .insns = { 379 /* Create an unknown offset, (4n+2)-aligned */ 380 LOAD_UNKNOWN(BPF_REG_6), 381 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 383 /* Add it to the packet pointer */ 384 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 385 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 386 /* Check bounds and perform a read */ 387 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 389 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 390 BPF_EXIT_INSN(), 391 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 392 /* Make a (4n) offset from the value we just read */ 393 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff), 394 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 395 /* Add it to the packet pointer */ 396 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 397 /* Check bounds and perform a read */ 398 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 400 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 401 BPF_EXIT_INSN(), 402 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 403 BPF_MOV64_IMM(BPF_REG_0, 0), 404 BPF_EXIT_INSN(), 405 }, 406 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 407 .matches = { 408 /* Calculated offset in R6 has unknown value, but known 409 * alignment of 4. 410 */ 411 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 412 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 413 /* Adding 14 makes R6 be (4n+2) */ 414 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 415 /* Packet pointer has (4n+2) offset */ 416 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 417 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 418 /* At the time the word size load is performed from R5, 419 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 420 * which is 2. Then the variable offset is (4n+2), so 421 * the total offset is 4-byte aligned and meets the 422 * load's requirements. 423 */ 424 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 425 /* Newly read value in R6 was shifted left by 2, so has 426 * known alignment of 4. 427 */ 428 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 429 /* Added (4n) to packet pointer's (4n+2) var_off, giving 430 * another (4n+2). 431 */ 432 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 433 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 434 /* At the time the word size load is performed from R5, 435 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 436 * which is 2. Then the variable offset is (4n+2), so 437 * the total offset is 4-byte aligned and meets the 438 * load's requirements. 439 */ 440 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, 441 }, 442 }, 443 { 444 .descr = "dubious pointer arithmetic", 445 .insns = { 446 PREP_PKT_POINTERS, 447 BPF_MOV64_IMM(BPF_REG_0, 0), 448 /* (ptr - ptr) << 2 */ 449 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 450 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2), 451 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), 452 /* We have a (4n) value. Let's make a packet offset 453 * out of it. First add 14, to make it a (4n+2) 454 */ 455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 456 /* Then make sure it's nonnegative */ 457 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1), 458 BPF_EXIT_INSN(), 459 /* Add it to packet pointer */ 460 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 461 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 462 /* Check bounds and perform a read */ 463 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), 464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 465 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 466 BPF_EXIT_INSN(), 467 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0), 468 BPF_EXIT_INSN(), 469 }, 470 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 471 .result = REJECT, 472 .matches = { 473 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"}, 474 /* (ptr - ptr) << 2 == unknown, (4n) */ 475 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"}, 476 /* (4n) + 14 == (4n+2). We blow our bounds, because 477 * the add could overflow. 478 */ 479 {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, 480 /* Checked s>=0 */ 481 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 482 /* packet pointer + nonnegative (4n+2) */ 483 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 484 {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 485 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. 486 * We checked the bounds, but it might have been able 487 * to overflow if the packet pointer started in the 488 * upper half of the address space. 489 * So we did not get a 'range' on R6, and the access 490 * attempt will fail. 491 */ 492 {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, 493 } 494 }, 495 { 496 .descr = "variable subtraction", 497 .insns = { 498 /* Create an unknown offset, (4n+2)-aligned */ 499 LOAD_UNKNOWN(BPF_REG_6), 500 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 501 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 503 /* Create another unknown, (4n)-aligned, and subtract 504 * it from the first one 505 */ 506 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 507 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7), 508 /* Bounds-check the result */ 509 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1), 510 BPF_EXIT_INSN(), 511 /* Add it to the packet pointer */ 512 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 513 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), 514 /* Check bounds and perform a read */ 515 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 517 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 518 BPF_EXIT_INSN(), 519 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 520 BPF_EXIT_INSN(), 521 }, 522 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 523 .matches = { 524 /* Calculated offset in R6 has unknown value, but known 525 * alignment of 4. 526 */ 527 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 528 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 529 /* Adding 14 makes R6 be (4n+2) */ 530 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, 531 /* New unknown value in R7 is (4n) */ 532 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, 533 /* Subtracting it from R6 blows our unsigned bounds */ 534 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"}, 535 /* Checked s>= 0 */ 536 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 537 /* At the time the word size load is performed from R5, 538 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 539 * which is 2. Then the variable offset is (4n+2), so 540 * the total offset is 4-byte aligned and meets the 541 * load's requirements. 542 */ 543 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, 544 }, 545 }, 546 { 547 .descr = "pointer variable subtraction", 548 .insns = { 549 /* Create an unknown offset, (4n+2)-aligned and bounded 550 * to [14,74] 551 */ 552 LOAD_UNKNOWN(BPF_REG_6), 553 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 554 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf), 555 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), 556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), 557 /* Subtract it from the packet pointer */ 558 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 559 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6), 560 /* Create another unknown, (4n)-aligned and >= 74. 561 * That in fact means >= 76, since 74 % 4 == 2 562 */ 563 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), 564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76), 565 /* Add it to the packet pointer */ 566 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7), 567 /* Check bounds and perform a read */ 568 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), 569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 570 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), 571 BPF_EXIT_INSN(), 572 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), 573 BPF_EXIT_INSN(), 574 }, 575 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 576 .matches = { 577 /* Calculated offset in R6 has unknown value, but known 578 * alignment of 4. 579 */ 580 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, 581 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, 582 /* Adding 14 makes R6 be (4n+2) */ 583 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, 584 /* Subtracting from packet pointer overflows ubounds */ 585 {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"}, 586 /* New unknown value in R7 is (4n), >= 76 */ 587 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, 588 /* Adding it to packet pointer gives nice bounds again */ 589 {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 590 /* At the time the word size load is performed from R5, 591 * its total fixed offset is NET_IP_ALIGN + reg->off (0) 592 * which is 2. Then the variable offset is (4n+2), so 593 * the total offset is 4-byte aligned and meets the 594 * load's requirements. 595 */ 596 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, 597 }, 598 }, 599}; 600 601static int probe_filter_length(const struct bpf_insn *fp) 602{ 603 int len; 604 605 for (len = MAX_INSNS - 1; len > 0; --len) 606 if (fp[len].code != 0 || fp[len].imm != 0) 607 break; 608 return len + 1; 609} 610 611static char bpf_vlog[32768]; 612 613static int do_test_single(struct bpf_align_test *test) 614{ 615 struct bpf_insn *prog = test->insns; 616 int prog_type = test->prog_type; 617 char bpf_vlog_copy[32768]; 618 const char *line_ptr; 619 int cur_line = -1; 620 int prog_len, i; 621 int fd_prog; 622 int ret; 623 624 prog_len = probe_filter_length(prog); 625 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 626 prog, prog_len, 1, "GPL", 0, 627 bpf_vlog, sizeof(bpf_vlog), 2); 628 if (fd_prog < 0 && test->result != REJECT) { 629 printf("Failed to load program.\n"); 630 printf("%s", bpf_vlog); 631 ret = 1; 632 } else if (fd_prog >= 0 && test->result == REJECT) { 633 printf("Unexpected success to load!\n"); 634 printf("%s", bpf_vlog); 635 ret = 1; 636 close(fd_prog); 637 } else { 638 ret = 0; 639 /* We make a local copy so that we can strtok() it */ 640 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); 641 line_ptr = strtok(bpf_vlog_copy, "\n"); 642 for (i = 0; i < MAX_MATCHES; i++) { 643 struct bpf_reg_match m = test->matches[i]; 644 645 if (!m.match) 646 break; 647 while (line_ptr) { 648 cur_line = -1; 649 sscanf(line_ptr, "%u: ", &cur_line); 650 if (cur_line == m.line) 651 break; 652 line_ptr = strtok(NULL, "\n"); 653 } 654 if (!line_ptr) { 655 printf("Failed to find line %u for match: %s\n", 656 m.line, m.match); 657 ret = 1; 658 printf("%s", bpf_vlog); 659 break; 660 } 661 if (!strstr(line_ptr, m.match)) { 662 printf("Failed to find match %u: %s\n", 663 m.line, m.match); 664 ret = 1; 665 printf("%s", bpf_vlog); 666 break; 667 } 668 } 669 if (fd_prog >= 0) 670 close(fd_prog); 671 } 672 return ret; 673} 674 675static int do_test(unsigned int from, unsigned int to) 676{ 677 int all_pass = 0; 678 int all_fail = 0; 679 unsigned int i; 680 681 for (i = from; i < to; i++) { 682 struct bpf_align_test *test = &tests[i]; 683 int fail; 684 685 printf("Test %3d: %s ... ", 686 i, test->descr); 687 fail = do_test_single(test); 688 if (fail) { 689 all_fail++; 690 printf("FAIL\n"); 691 } else { 692 all_pass++; 693 printf("PASS\n"); 694 } 695 } 696 printf("Results: %d pass %d fail\n", 697 all_pass, all_fail); 698 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS; 699} 700 701int main(int argc, char **argv) 702{ 703 unsigned int from = 0, to = ARRAY_SIZE(tests); 704 705 if (argc == 3) { 706 unsigned int l = atoi(argv[argc - 2]); 707 unsigned int u = atoi(argv[argc - 1]); 708 709 if (l < to && u < to) { 710 from = l; 711 to = u + 1; 712 } 713 } else if (argc == 2) { 714 unsigned int t = atoi(argv[argc - 1]); 715 716 if (t < to) { 717 from = t; 718 to = t + 1; 719 } 720 } 721 return do_test(from, to); 722}