Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'filter-next'

Daniel Borkmann says:

====================
BPF updates

These were still in my queue. Please see individual patches for
details.

I have rebased these on top of current net-next with Andrew's
gcc union fixup [1] applied to avoid dealing with an unnecessary
merge conflict.

[1] http://patchwork.ozlabs.org/patch/351577/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+457 -268
+14
Documentation/networking/filter.txt
··· 833 833 descends all possible paths. It simulates execution of every insn and observes 834 834 the state change of registers and stack. 835 835 836 + Testing 837 + ------- 838 + 839 + Next to the BPF toolchain, the kernel also ships a test module that contains 840 + various test cases for classic and internal BPF that can be executed against 841 + the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and 842 + enabled via Kconfig: 843 + 844 + CONFIG_TEST_BPF=m 845 + 846 + After the module has been built and installed, the test suite can be executed 847 + via insmod or modprobe against 'test_bpf' module. Results of the test cases 848 + including timings in nsec can be found in the kernel log (dmesg). 849 + 836 850 Misc 837 851 ---- 838 852
+2 -2
drivers/isdn/i4l/isdn_ppp.c
··· 634 634 #ifdef CONFIG_IPPP_FILTER 635 635 case PPPIOCSPASS: 636 636 { 637 - struct sock_fprog fprog; 637 + struct sock_fprog_kern fprog; 638 638 struct sock_filter *code; 639 639 int err, len = get_filter(argp, &code); 640 640 ··· 653 653 } 654 654 case PPPIOCSACTIVE: 655 655 { 656 - struct sock_fprog fprog; 656 + struct sock_fprog_kern fprog; 657 657 struct sock_filter *code; 658 658 int err, len = get_filter(argp, &code); 659 659
+2 -2
drivers/net/ppp/ppp_generic.c
··· 757 757 758 758 err = get_filter(argp, &code); 759 759 if (err >= 0) { 760 - struct sock_fprog fprog = { 760 + struct sock_fprog_kern fprog = { 761 761 .len = err, 762 762 .filter = code, 763 763 }; ··· 778 778 779 779 err = get_filter(argp, &code); 780 780 if (err >= 0) { 781 - struct sock_fprog fprog = { 781 + struct sock_fprog_kern fprog = { 782 782 .len = err, 783 783 .filter = code, 784 784 };
+5 -5
drivers/net/team/team_mode_loadbalance.c
··· 49 49 struct lb_priv_ex { 50 50 struct team *team; 51 51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; 52 - struct sock_fprog *orig_fprog; 52 + struct sock_fprog_kern *orig_fprog; 53 53 struct { 54 54 unsigned int refresh_interval; /* in tenths of second */ 55 55 struct delayed_work refresh_dw; ··· 241 241 return 0; 242 242 } 243 243 244 - static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, 244 + static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len, 245 245 const void *data) 246 246 { 247 - struct sock_fprog *fprog; 247 + struct sock_fprog_kern *fprog; 248 248 struct sock_filter *filter = (struct sock_filter *) data; 249 249 250 250 if (data_len % sizeof(struct sock_filter)) ··· 262 262 return 0; 263 263 } 264 264 265 - static void __fprog_destroy(struct sock_fprog *fprog) 265 + static void __fprog_destroy(struct sock_fprog_kern *fprog) 266 266 { 267 267 kfree(fprog->filter); 268 268 kfree(fprog); ··· 273 273 struct lb_priv *lb_priv = get_lb_priv(team); 274 274 struct sk_filter *fp = NULL; 275 275 struct sk_filter *orig_fp; 276 - struct sock_fprog *fprog = NULL; 276 + struct sock_fprog_kern *fprog = NULL; 277 277 int err; 278 278 279 279 if (ctx->data.bin_val.len) {
+1 -4
include/linux/filter.h
··· 37 37 #define BPF_CALL 0x80 /* function call */ 38 38 #define BPF_EXIT 0x90 /* function return */ 39 39 40 - /* Placeholder/dummy for 0 */ 41 - #define BPF_0 0 42 - 43 40 /* Register numbers */ 44 41 enum { 45 42 BPF_REG_0 = 0, ··· 188 191 struct sock_filter_int *new_prog, int *new_len); 189 192 190 193 int sk_unattached_filter_create(struct sk_filter **pfp, 191 - struct sock_fprog *fprog); 194 + struct sock_fprog_kern *fprog); 192 195 void sk_unattached_filter_destroy(struct sk_filter *fp); 193 196 194 197 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+327 -155
lib/test_bpf.c
··· 22 22 #include <linux/netdevice.h> 23 23 #include <linux/if_vlan.h> 24 24 25 + /* General test specific settings */ 25 26 #define MAX_SUBTESTS 3 27 + #define MAX_TESTRUNS 10000 26 28 #define MAX_DATA 128 27 29 #define MAX_INSNS 512 28 30 #define MAX_K 0xffffFFFF 29 31 30 - /* define few constants used to init test 'skb' */ 32 + /* Few constants used to init test 'skb' */ 31 33 #define SKB_TYPE 3 32 34 #define SKB_MARK 0x1234aaaa 33 35 #define SKB_HASH 0x1234aaab ··· 38 36 #define SKB_DEV_IFINDEX 577 39 37 #define SKB_DEV_TYPE 588 40 38 41 - /* redefine REGs to make tests less verbose */ 42 - #define R0 BPF_REG_0 43 - #define R1 BPF_REG_1 44 - #define R2 BPF_REG_2 45 - #define R3 BPF_REG_3 46 - #define R4 BPF_REG_4 47 - #define R5 BPF_REG_5 48 - #define R6 BPF_REG_6 49 - #define R7 BPF_REG_7 50 - #define R8 BPF_REG_8 51 - #define R9 BPF_REG_9 52 - #define R10 BPF_REG_10 39 + /* Redefine REGs to make tests less verbose */ 40 + #define R0 BPF_REG_0 41 + #define R1 BPF_REG_1 42 + #define R2 BPF_REG_2 43 + #define R3 BPF_REG_3 44 + #define R4 BPF_REG_4 45 + #define R5 BPF_REG_5 46 + #define R6 BPF_REG_6 47 + #define R7 BPF_REG_7 48 + #define R8 BPF_REG_8 49 + #define R9 BPF_REG_9 50 + #define R10 BPF_REG_10 51 + 52 + /* Flags that can be passed to test cases */ 53 + #define FLAG_NO_DATA BIT(0) 54 + #define FLAG_EXPECTED_FAIL BIT(1) 55 + 56 + enum { 57 + CLASSIC = BIT(6), /* Old BPF instructions only. */ 58 + INTERNAL = BIT(7), /* Extended instruction set. */ 59 + }; 60 + 61 + #define TEST_TYPE_MASK (CLASSIC | INTERNAL) 53 62 54 63 struct bpf_test { 55 64 const char *descr; ··· 68 55 struct sock_filter insns[MAX_INSNS]; 69 56 struct sock_filter_int insns_int[MAX_INSNS]; 70 57 } u; 71 - enum { 72 - NO_DATA, 73 - EXPECTED_FAIL, 74 - SKB, 75 - SKB_INT 76 - } data_type; 58 + __u8 aux; 77 59 __u8 data[MAX_DATA]; 78 60 struct { 79 61 int data_size; ··· 92 84 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1), 93 85 BPF_STMT(BPF_RET | BPF_A, 0) 94 86 }, 95 - SKB, 87 + CLASSIC, 96 88 { 10, 20, 30, 40, 50 }, 97 89 { { 2, 10 }, { 3, 20 }, { 4, 30 } }, 98 90 }, ··· 104 96 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), 105 97 BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */ 106 98 }, 107 - SKB, 99 + CLASSIC, 108 100 { 10, 20, 30, 40, 50 }, 109 101 { { 1, 2 }, { 3, 6 }, { 4, 8 } }, 110 102 }, ··· 119 111 BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3), 120 112 BPF_STMT(BPF_RET | BPF_A, 0) 121 113 }, 122 - 0, 114 + CLASSIC | FLAG_NO_DATA, 123 115 { }, 124 116 { { 0, 0xfffffffd } } 125 117 }, ··· 137 129 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), 138 130 BPF_STMT(BPF_RET | BPF_A, 0) 139 131 }, 140 - 0, 132 + CLASSIC | FLAG_NO_DATA, 141 133 { }, 142 134 { { 0, 0x40000001 } } 143 135 }, ··· 153 145 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), 154 146 BPF_STMT(BPF_RET | BPF_A, 0) 155 147 }, 156 - 0, 148 + CLASSIC | FLAG_NO_DATA, 157 149 { }, 158 150 { { 0, 0x800000ff }, { 1, 0x800000ff } }, 159 151 }, ··· 164 156 BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K), 165 157 BPF_STMT(BPF_RET | BPF_K, 1) 166 158 }, 167 - SKB, 159 + CLASSIC, 168 160 { }, 169 161 { { 1, 0 }, { 10, 0 }, { 60, 0 } }, 170 162 }, ··· 174 166 BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000), 175 167 BPF_STMT(BPF_RET | BPF_K, 1) 176 168 }, 177 - SKB, 169 + CLASSIC, 178 170 { }, 179 171 { { 1, 0 }, { 10, 0 }, { 60, 0 } }, 180 172 }, ··· 187 179 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), 188 180 BPF_STMT(BPF_RET | BPF_A, 0) 189 181 }, 190 - SKB, 182 + CLASSIC, 191 183 { 1, 2, 3 }, 192 184 { { 1, 0 }, { 2, 3 } }, 193 185 }, ··· 201 193 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), 202 194 BPF_STMT(BPF_RET | BPF_A, 0) 203 195 }, 204 - SKB, 196 + CLASSIC, 205 197 { 1, 2, 3, 0xff }, 206 198 { { 1, 1 }, { 3, 3 }, { 4, 0xff } }, 207 199 }, ··· 214 206 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), 215 207 BPF_STMT(BPF_RET | BPF_A, 0) 216 208 }, 217 - SKB, 209 + CLASSIC, 218 210 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, 219 211 { { 15, 0 }, { 16, 3 } }, 220 212 }, ··· 228 220 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), 229 221 BPF_STMT(BPF_RET | BPF_A, 0) 230 222 }, 231 - SKB, 223 + CLASSIC, 232 224 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, 233 225 { { 14, 0 }, { 15, 1 }, { 17, 3 } }, 234 226 }, ··· 249 241 BPF_STMT(BPF_RET | BPF_K, 1), 250 242 BPF_STMT(BPF_RET | BPF_A, 0) 251 243 }, 252 - SKB, 244 + CLASSIC, 253 245 { }, 254 246 { { 1, 3 }, { 10, 3 } }, 255 247 }, ··· 260 252 SKF_AD_OFF + SKF_AD_MARK), 261 253 BPF_STMT(BPF_RET | BPF_A, 0) 262 254 }, 263 - SKB, 255 + CLASSIC, 264 256 { }, 265 257 { { 1, SKB_MARK}, { 10, SKB_MARK} }, 266 258 }, ··· 271 263 SKF_AD_OFF + SKF_AD_RXHASH), 272 264 BPF_STMT(BPF_RET | BPF_A, 0) 273 265 }, 274 - SKB, 266 + CLASSIC, 275 267 { }, 276 268 { { 1, SKB_HASH}, { 10, SKB_HASH} }, 277 269 }, ··· 282 274 SKF_AD_OFF + SKF_AD_QUEUE), 283 275 BPF_STMT(BPF_RET | BPF_A, 0) 284 276 }, 285 - SKB, 277 + CLASSIC, 286 278 { }, 287 279 { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } }, 288 280 }, ··· 301 293 BPF_STMT(BPF_MISC | BPF_TXA, 0), 302 294 BPF_STMT(BPF_RET | BPF_A, 0) 303 295 }, 304 - SKB, 296 + CLASSIC, 305 297 { 10, 20, 30 }, 306 298 { { 10, ETH_P_IP }, { 100, ETH_P_IP } }, 307 299 }, ··· 312 304 SKF_AD_OFF + SKF_AD_VLAN_TAG), 313 305 BPF_STMT(BPF_RET | BPF_A, 0) 314 306 }, 315 - SKB, 307 + CLASSIC, 316 308 { }, 317 309 { 318 310 { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, ··· 326 318 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT), 327 319 BPF_STMT(BPF_RET | BPF_A, 0) 328 320 }, 329 - SKB, 321 + CLASSIC, 330 322 { }, 331 323 { 332 324 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, ··· 340 332 SKF_AD_OFF + SKF_AD_IFINDEX), 341 333 BPF_STMT(BPF_RET | BPF_A, 0) 342 334 }, 343 - SKB, 335 + CLASSIC, 344 336 { }, 345 337 { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } }, 346 338 }, ··· 351 343 SKF_AD_OFF + SKF_AD_HATYPE), 352 344 BPF_STMT(BPF_RET | BPF_A, 0) 353 345 }, 354 - SKB, 346 + CLASSIC, 355 347 { }, 356 348 { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } }, 357 349 }, ··· 366 358 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), 367 359 BPF_STMT(BPF_RET | BPF_A, 0) 368 360 }, 369 - SKB, 361 + CLASSIC, 370 362 { }, 371 363 { { 1, 0 }, { 10, 0 } }, 372 364 }, ··· 380 372 SKF_AD_OFF + SKF_AD_NLATTR), 381 373 BPF_STMT(BPF_RET | BPF_A, 0) 382 374 }, 383 - SKB, 375 + CLASSIC, 384 376 { 0xff, 4, 0, 2, 0, 4, 0, 3, 0 }, 385 377 { { 4, 0 }, { 20, 5 } }, 386 378 }, ··· 414 406 SKF_AD_OFF + SKF_AD_NLATTR_NEST), 415 407 BPF_STMT(BPF_RET | BPF_A, 0) 416 408 }, 417 - SKB, 409 + CLASSIC, 418 410 { 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 }, 419 411 { { 4, 0 }, { 20, 9 } }, 420 412 }, ··· 433 425 SKF_AD_OFF + SKF_AD_PAY_OFFSET), 434 426 BPF_STMT(BPF_RET | BPF_A, 0) 435 427 }, 436 - SKB, 428 + CLASSIC, 437 429 /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800), 438 430 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request, 439 431 * id 9737, seq 1, length 64 ··· 454 446 SKF_AD_OFF + SKF_AD_ALU_XOR_X), 455 447 BPF_STMT(BPF_RET | BPF_A, 0) 456 448 }, 457 - SKB, 449 + CLASSIC, 458 450 { }, 459 451 { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } }, 460 452 }, ··· 476 468 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), 477 469 BPF_STMT(BPF_RET | BPF_A, 0) 478 470 }, 479 - SKB, 471 + CLASSIC, 480 472 { }, 481 473 { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } } 482 474 }, ··· 489 481 BPF_STMT(BPF_RET | BPF_K, 1), 490 482 BPF_STMT(BPF_RET | BPF_K, MAX_K) 491 483 }, 492 - SKB, 484 + CLASSIC, 493 485 { 3, 3, 3, 3, 3 }, 494 486 { { 1, 0 }, { 3, 1 }, { 4, MAX_K } }, 495 487 }, ··· 502 494 BPF_STMT(BPF_RET | BPF_K, 1), 503 495 BPF_STMT(BPF_RET | BPF_K, MAX_K) 504 496 }, 505 - SKB, 497 + CLASSIC, 506 498 { 4, 4, 4, 3, 3 }, 507 499 { { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, 508 500 }, ··· 521 513 BPF_STMT(BPF_RET | BPF_K, 40), 522 514 BPF_STMT(BPF_RET | BPF_K, MAX_K) 523 515 }, 524 - SKB, 516 + CLASSIC, 525 517 { 1, 2, 3, 4, 5 }, 526 518 { { 1, 20 }, { 3, 40 }, { 5, MAX_K } }, 527 519 }, ··· 553 545 BPF_STMT(BPF_RET | BPF_K, 30), 554 546 BPF_STMT(BPF_RET | BPF_K, MAX_K) 555 547 }, 556 - SKB, 548 + CLASSIC, 557 549 { 0, 0xAA, 0x55, 1 }, 558 550 { { 4, 10 }, { 5, 20 }, { 6, MAX_K } }, 559 551 }, ··· 585 577 { 0x06, 0, 0, 0x0000ffff }, 586 578 { 0x06, 0, 0, 0x00000000 }, 587 579 }, 588 - SKB, 580 + CLASSIC, 589 581 /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800) 590 582 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.], 591 583 * seq 1305692979:1305693027, ack 3650467037, win 65535, ··· 643 635 { 0x06, 0, 0, 0x0000ffff }, 644 636 { 0x06, 0, 0, 0x00000000 }, 645 637 }, 646 - SKB, 638 + CLASSIC, 647 639 { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, 648 640 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76, 649 641 0x08, 0x00, ··· 662 654 BPF_STMT(BPF_MISC | BPF_TXA, 0), 663 655 BPF_STMT(BPF_RET | BPF_A, 0) 664 656 }, 665 - SKB, 666 - {}, 657 + CLASSIC, 658 + { }, 667 659 { {1, 0}, {2, 0} }, 668 660 }, 669 661 { ··· 678 670 BPF_ALU64_REG(BPF_MOV, R0, R1), 679 671 BPF_EXIT_INSN(), 680 672 }, 681 - SKB_INT, 673 + INTERNAL, 682 674 { }, 683 675 { { 0, 0xfffffffd } } 684 676 }, ··· 694 686 BPF_ALU64_IMM(BPF_MOV, R0, 1), 695 687 BPF_EXIT_INSN(), 696 688 }, 697 - SKB_INT, 689 + INTERNAL, 698 690 { }, 699 691 { { 0, 1 } } 700 692 }, ··· 711 703 BPF_ALU32_IMM(BPF_MOV, R0, 1), 712 704 BPF_EXIT_INSN(), 713 705 }, 714 - SKB_INT, 706 + INTERNAL, 715 707 { }, 716 708 { { 0, 1 } } 717 709 }, ··· 728 720 BPF_ALU32_IMM(BPF_MOV, R0, 1), 729 721 BPF_EXIT_INSN(), 730 722 }, 731 - SKB_INT, 723 + INTERNAL, 732 724 { }, 733 725 { { 0, 1 } } 734 726 }, ··· 890 882 BPF_ALU64_REG(BPF_MOV, R0, R9), 891 883 BPF_EXIT_INSN(), 892 884 }, 893 - SKB_INT, 885 + INTERNAL, 894 886 { }, 895 887 { { 0, 2957380 } } 896 888 }, ··· 1036 1028 BPF_ALU32_REG(BPF_MOV, R0, R9), 1037 1029 BPF_EXIT_INSN(), 1038 1030 }, 1039 - SKB_INT, 1031 + INTERNAL, 1040 1032 { }, 1041 1033 { { 0, 2957380 } } 1042 1034 }, ··· 1169 1161 BPF_ALU64_REG(BPF_SUB, R0, R9), 1170 1162 BPF_EXIT_INSN(), 1171 1163 }, 1172 - SKB_INT, 1164 + INTERNAL, 1173 1165 { }, 1174 1166 { { 0, 11 } } 1175 1167 }, ··· 1235 1227 BPF_ALU64_IMM(BPF_MOV, R0, 1), 1236 1228 BPF_EXIT_INSN(), 1237 1229 }, 1238 - SKB_INT, 1230 + INTERNAL, 1239 1231 { }, 1240 1232 { { 0, 1 } } 1241 1233 }, ··· 1297 1289 BPF_ALU64_REG(BPF_MOV, R0, R2), 1298 1290 BPF_EXIT_INSN(), 1299 1291 }, 1300 - SKB_INT, 1292 + INTERNAL, 1301 1293 { }, 1302 1294 { { 0, 0x35d97ef2 } } 1303 1295 }, ··· 1317 1309 BPF_ALU64_IMM(BPF_MOV, R0, -1), 1318 1310 BPF_EXIT_INSN(), 1319 1311 }, 1320 - SKB_INT, 1312 + INTERNAL, 1321 1313 { }, 1322 1314 { { 0, -1 } } 1323 1315 }, ··· 1334 1326 BPF_LD_IND(BPF_B, R8, -70), 1335 1327 BPF_EXIT_INSN(), 1336 1328 }, 1337 - SKB_INT, 1329 + INTERNAL, 1338 1330 { 10, 20, 30, 40, 50 }, 1339 1331 { { 4, 0 }, { 5, 10 } } 1340 1332 }, ··· 1347 1339 BPF_ALU32_REG(BPF_DIV, R0, R7), 1348 1340 BPF_EXIT_INSN(), 1349 1341 }, 1350 - SKB_INT, 1342 + INTERNAL, 1351 1343 { 10, 20, 30, 40, 50 }, 1352 1344 { { 3, 0 }, { 4, 0 } } 1353 1345 }, ··· 1356 1348 .u.insns = { 1357 1349 BPF_STMT(BPF_LD | BPF_IMM, 1), 1358 1350 }, 1359 - EXPECTED_FAIL, 1351 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1360 1352 { }, 1361 1353 { } 1362 1354 }, ··· 1366 1358 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0), 1367 1359 BPF_STMT(BPF_RET | BPF_K, 0) 1368 1360 }, 1369 - EXPECTED_FAIL, 1361 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1370 1362 { }, 1371 1363 { } 1372 1364 }, ··· 1377 1369 BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0), 1378 1370 BPF_STMT(BPF_RET | BPF_K, 0) 1379 1371 }, 1380 - EXPECTED_FAIL, 1372 + CLASSIC | FLAG_EXPECTED_FAIL, 1381 1373 { }, 1382 1374 { } 1383 1375 }, ··· 1387 1379 BPF_STMT(BPF_STX, 16), 1388 1380 BPF_STMT(BPF_RET | BPF_K, 0) 1389 1381 }, 1390 - EXPECTED_FAIL, 1382 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1391 1383 { }, 1392 1384 { } 1393 1385 }, 1386 + { 1387 + "JUMPS + HOLES", 1388 + .u.insns = { 1389 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1390 + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15), 1391 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1392 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1393 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1394 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1395 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1396 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1397 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1398 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1399 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1400 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1401 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1402 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1403 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1404 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4), 1405 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1406 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2), 1407 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1408 + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), 1409 + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), 1410 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1411 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1412 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1413 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1414 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1415 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1416 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1417 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1418 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1419 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1420 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1421 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1422 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1423 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3), 1424 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2), 1425 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1426 + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), 1427 + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), 1428 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1429 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1430 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1431 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1432 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1433 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1434 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1435 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1436 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1437 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1438 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1439 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1440 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1441 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3), 1442 + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2), 1443 + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), 1444 + BPF_STMT(BPF_RET | BPF_A, 0), 1445 + BPF_STMT(BPF_RET | BPF_A, 0), 1446 + }, 1447 + CLASSIC, 1448 + { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8, 0x90, 0xe2, 1449 + 0xba, 0x0a, 0x56, 0xb4, 0x08, 0x00, 0x45, 0x00, 1450 + 0x00, 0x28, 0x00, 0x00, 0x20, 0x00, 0x40, 0x11, 1451 + 0x00, 0x00, 0xc0, 0xa8, 0x33, 0x01, 0xc0, 0xa8, 1452 + 0x33, 0x02, 0xbb, 0xb6, 0xa9, 0xfa, 0x00, 0x14, 1453 + 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 1454 + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 1455 + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 1456 + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 1457 + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 1458 + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc }, 1459 + { { 88, 0x001b } } 1460 + }, 1461 + { 1462 + "check: RET X", 1463 + .u.insns = { 1464 + BPF_STMT(BPF_RET | BPF_X, 0), 1465 + }, 1466 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1467 + { }, 1468 + { }, 1469 + }, 1470 + { 1471 + "check: LDX + RET X", 1472 + .u.insns = { 1473 + BPF_STMT(BPF_LDX | BPF_IMM, 42), 1474 + BPF_STMT(BPF_RET | BPF_X, 0), 1475 + }, 1476 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1477 + { }, 1478 + { }, 1479 + }, 1394 1480 }; 1395 1481 1396 - static int get_length(struct sock_filter *fp) 1397 - { 1398 - int len = 0; 1482 + static struct net_device dev; 1399 1483 1400 - while (fp->code != 0 || fp->k != 0) { 1401 - fp++; 1402 - len++; 1403 - } 1404 - 1405 - return len; 1406 - } 1407 - 1408 - struct net_device dev; 1409 - struct sk_buff *populate_skb(char *buf, int size) 1484 + static struct sk_buff *populate_skb(char *buf, int size) 1410 1485 { 1411 1486 struct sk_buff *skb; 1412 1487 ··· 1501 1410 return NULL; 1502 1411 1503 1412 memcpy(__skb_put(skb, size), buf, size); 1413 + 1414 + /* Initialize a fake skb with test pattern. */ 1504 1415 skb_reset_mac_header(skb); 1505 1416 skb->protocol = htons(ETH_P_IP); 1506 1417 skb->pkt_type = SKB_TYPE; ··· 1518 1425 return skb; 1519 1426 } 1520 1427 1521 - static int run_one(struct sk_filter *fp, struct bpf_test *t) 1428 + static void *generate_test_data(struct bpf_test *test, int sub) 1522 1429 { 1523 - u64 start, finish, res, cnt = 100000; 1524 - int err_cnt = 0, err, i, j; 1525 - u32 ret = 0; 1526 - void *data; 1430 + if (test->aux & FLAG_NO_DATA) 1431 + return NULL; 1527 1432 1528 - for (i = 0; i < MAX_SUBTESTS; i++) { 1529 - if (t->test[i].data_size == 0 && 1530 - t->test[i].result == 0) 1531 - break; 1532 - if (t->data_type == SKB || 1533 - t->data_type == SKB_INT) { 1534 - data = populate_skb(t->data, t->test[i].data_size); 1535 - if (!data) 1536 - return -ENOMEM; 1537 - } else { 1538 - data = NULL; 1433 + /* Test case expects an skb, so populate one. Various 1434 + * subtests generate skbs of different sizes based on 1435 + * the same data. 1436 + */ 1437 + return populate_skb(test->data, test->test[sub].data_size); 1438 + } 1439 + 1440 + static void release_test_data(const struct bpf_test *test, void *data) 1441 + { 1442 + if (test->aux & FLAG_NO_DATA) 1443 + return; 1444 + 1445 + kfree_skb(data); 1446 + } 1447 + 1448 + static int probe_filter_length(struct sock_filter *fp) 1449 + { 1450 + int len = 0; 1451 + 1452 + while (fp->code != 0 || fp->k != 0) { 1453 + fp++; 1454 + len++; 1455 + } 1456 + 1457 + return len; 1458 + } 1459 + 1460 + static struct sk_filter *generate_filter(int which, int *err) 1461 + { 1462 + struct sk_filter *fp; 1463 + struct sock_fprog_kern fprog; 1464 + unsigned int flen = probe_filter_length(tests[which].u.insns); 1465 + __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1466 + 1467 + switch (test_type) { 1468 + case CLASSIC: 1469 + fprog.filter = tests[which].u.insns; 1470 + fprog.len = flen; 1471 + 1472 + *err = sk_unattached_filter_create(&fp, &fprog); 1473 + if (tests[which].aux & FLAG_EXPECTED_FAIL) { 1474 + if (*err == -EINVAL) { 1475 + pr_cont("PASS\n"); 1476 + /* Verifier rejected filter as expected. */ 1477 + *err = 0; 1478 + return NULL; 1479 + } else { 1480 + pr_cont("UNEXPECTED_PASS\n"); 1481 + /* Verifier didn't reject the test that's 1482 + * bad enough, just return! 1483 + */ 1484 + *err = -EINVAL; 1485 + return NULL; 1486 + } 1487 + } 1488 + /* We don't expect to fail. */ 1489 + if (*err) { 1490 + pr_cont("FAIL to attach err=%d len=%d\n", 1491 + *err, fprog.len); 1492 + return NULL; 1493 + } 1494 + break; 1495 + 1496 + case INTERNAL: 1497 + fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); 1498 + if (fp == NULL) { 1499 + pr_cont("UNEXPECTED_FAIL no memory left\n"); 1500 + *err = -ENOMEM; 1501 + return NULL; 1539 1502 } 1540 1503 1541 - start = ktime_to_us(ktime_get()); 1542 - for (j = 0; j < cnt; j++) 1543 - ret = SK_RUN_FILTER(fp, data); 1544 - finish = ktime_to_us(ktime_get()); 1504 + fp->len = flen; 1505 + memcpy(fp->insnsi, tests[which].u.insns_int, 1506 + fp->len * sizeof(struct sock_filter_int)); 1545 1507 1546 - res = (finish - start) * 1000; 1547 - do_div(res, cnt); 1508 + sk_filter_select_runtime(fp); 1509 + break; 1510 + } 1548 1511 1549 - err = ret != t->test[i].result; 1550 - if (!err) 1551 - pr_cont("%lld ", res); 1512 + *err = 0; 1513 + return fp; 1514 + } 1552 1515 1553 - if (t->data_type == SKB || t->data_type == SKB_INT) 1554 - kfree_skb(data); 1516 + static void release_filter(struct sk_filter *fp, int which) 1517 + { 1518 + __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1555 1519 1556 - if (err) { 1557 - pr_cont("ret %d != %d ", ret, t->test[i].result); 1520 + switch (test_type) { 1521 + case CLASSIC: 1522 + sk_unattached_filter_destroy(fp); 1523 + break; 1524 + case INTERNAL: 1525 + sk_filter_free(fp); 1526 + break; 1527 + } 1528 + } 1529 + 1530 + static int __run_one(const struct sk_filter *fp, const void *data, 1531 + int runs, u64 *duration) 1532 + { 1533 + u64 start, finish; 1534 + int ret, i; 1535 + 1536 + start = ktime_to_us(ktime_get()); 1537 + 1538 + for (i = 0; i < runs; i++) 1539 + ret = SK_RUN_FILTER(fp, data); 1540 + 1541 + finish = ktime_to_us(ktime_get()); 1542 + 1543 + *duration = (finish - start) * 1000ULL; 1544 + do_div(*duration, runs); 1545 + 1546 + return ret; 1547 + } 1548 + 1549 + static int run_one(const struct sk_filter *fp, struct bpf_test *test) 1550 + { 1551 + int err_cnt = 0, i, runs = MAX_TESTRUNS; 1552 + 1553 + for (i = 0; i < MAX_SUBTESTS; i++) { 1554 + void *data; 1555 + u64 duration; 1556 + u32 ret; 1557 + 1558 + if (test->test[i].data_size == 0 && 1559 + test->test[i].result == 0) 1560 + break; 1561 + 1562 + data = generate_test_data(test, i); 1563 + ret = __run_one(fp, data, runs, &duration); 1564 + release_test_data(test, data); 1565 + 1566 + if (ret == test->test[i].result) { 1567 + pr_cont("%lld ", duration); 1568 + } else { 1569 + pr_cont("ret %d != %d ", ret, 1570 + test->test[i].result); 1558 1571 err_cnt++; 1559 1572 } 1560 1573 } ··· 1670 1471 1671 1472 static __init int test_bpf(void) 1672 1473 { 1673 - struct sk_filter *fp, *fp_ext = NULL; 1674 - struct sock_fprog fprog; 1675 - int err, i, err_cnt = 0; 1474 + int i, err_cnt = 0, pass_cnt = 0; 1676 1475 1677 1476 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1477 + struct sk_filter *fp; 1478 + int err; 1479 + 1678 1480 pr_info("#%d %s ", i, tests[i].descr); 1679 1481 1680 - fprog.filter = tests[i].u.insns; 1681 - fprog.len = get_length(fprog.filter); 1482 + fp = generate_filter(i, &err); 1483 + if (fp == NULL) { 1484 + if (err == 0) { 1485 + pass_cnt++; 1486 + continue; 1487 + } 1682 1488 1683 - if (tests[i].data_type == SKB_INT) { 1684 - fp_ext = kzalloc(4096, GFP_KERNEL); 1685 - if (!fp_ext) 1686 - return -ENOMEM; 1687 - fp = fp_ext; 1688 - memcpy(fp_ext->insns, tests[i].u.insns_int, 1689 - fprog.len * 8); 1690 - fp->len = fprog.len; 1691 - sk_filter_select_runtime(fp); 1692 - } else { 1693 - err = sk_unattached_filter_create(&fp, &fprog); 1694 - if (tests[i].data_type == EXPECTED_FAIL) { 1695 - if (err == -EINVAL) { 1696 - pr_cont("PASS\n"); 1697 - continue; 1698 - } else { 1699 - pr_cont("UNEXPECTED_PASS\n"); 1700 - /* verifier didn't reject the test 1701 - * that's bad enough, just return 1702 - */ 1703 - return -EINVAL; 1704 - } 1705 - } 1706 - if (err) { 1707 - pr_cont("FAIL to attach err=%d len=%d\n", 1708 - err, fprog.len); 1709 - return err; 1710 - } 1489 + return err; 1711 1490 } 1712 - 1713 1491 err = run_one(fp, &tests[i]); 1714 - 1715 - if (tests[i].data_type != SKB_INT) 1716 - sk_unattached_filter_destroy(fp); 1717 - else 1718 - sk_filter_free(fp); 1492 + release_filter(fp, i); 1719 1493 1720 1494 if (err) { 1721 - pr_cont("FAIL %d\n", err); 1495 + pr_cont("FAIL (%d times)\n", err); 1722 1496 err_cnt++; 1723 1497 } else { 1724 1498 pr_cont("PASS\n"); 1499 + pass_cnt++; 1725 1500 } 1726 1501 } 1727 1502 1728 - if (err_cnt) 1729 - return -EINVAL; 1730 - else 1731 - return 0; 1503 + pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); 1504 + return err_cnt ? -EINVAL : 0; 1732 1505 } 1733 1506 1734 1507 static int __init test_bpf_init(void) ··· 1714 1543 1715 1544 module_init(test_bpf_init); 1716 1545 module_exit(test_bpf_exit); 1546 + 1717 1547 MODULE_LICENSE("GPL");
+100 -95
net/core/filter.c
··· 160 160 static const void *jumptable[256] = { 161 161 [0 ... 255] = &&default_label, 162 162 /* Now overwrite non-defaults ... */ 163 - #define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C 164 - DL(ALU, ADD, X), 165 - DL(ALU, ADD, K), 166 - DL(ALU, SUB, X), 167 - DL(ALU, SUB, K), 168 - DL(ALU, AND, X), 169 - DL(ALU, AND, K), 170 - DL(ALU, OR, X), 171 - DL(ALU, OR, K), 172 - DL(ALU, LSH, X), 173 - DL(ALU, LSH, K), 174 - DL(ALU, RSH, X), 175 - DL(ALU, RSH, K), 176 - DL(ALU, XOR, X), 177 - DL(ALU, XOR, K), 178 - DL(ALU, MUL, X), 179 - DL(ALU, MUL, K), 180 - DL(ALU, MOV, X), 181 - DL(ALU, MOV, K), 182 - DL(ALU, DIV, X), 183 - DL(ALU, DIV, K), 184 - DL(ALU, MOD, X), 185 - DL(ALU, MOD, K), 186 - DL(ALU, NEG, 0), 187 - DL(ALU, END, TO_BE), 188 - DL(ALU, END, TO_LE), 189 - DL(ALU64, ADD, X), 190 - DL(ALU64, ADD, K), 191 - DL(ALU64, SUB, X), 192 - DL(ALU64, SUB, K), 193 - DL(ALU64, AND, X), 194 - DL(ALU64, AND, K), 195 - DL(ALU64, OR, X), 196 - DL(ALU64, OR, K), 197 - DL(ALU64, LSH, X), 198 - DL(ALU64, LSH, K), 199 - DL(ALU64, RSH, X), 200 - DL(ALU64, RSH, K), 201 - DL(ALU64, XOR, X), 202 - DL(ALU64, XOR, K), 203 - DL(ALU64, MUL, X), 204 - DL(ALU64, MUL, K), 205 - DL(ALU64, MOV, X), 206 - DL(ALU64, MOV, K), 207 - DL(ALU64, ARSH, X), 208 - DL(ALU64, ARSH, K), 209 - DL(ALU64, DIV, X), 210 - DL(ALU64, DIV, K), 211 - DL(ALU64, MOD, X), 212 - DL(ALU64, MOD, K), 213 - DL(ALU64, NEG, 0), 214 - DL(JMP, CALL, 0), 215 - DL(JMP, JA, 0), 216 - DL(JMP, JEQ, X), 217 - DL(JMP, JEQ, K), 218 - DL(JMP, JNE, X), 219 - DL(JMP, JNE, K), 220 - DL(JMP, JGT, X), 221 - DL(JMP, JGT, K), 222 - DL(JMP, JGE, X), 223 - DL(JMP, JGE, K), 224 - DL(JMP, JSGT, X), 225 - DL(JMP, JSGT, K), 226 - DL(JMP, JSGE, X), 227 - DL(JMP, JSGE, K), 228 - DL(JMP, JSET, X), 229 - DL(JMP, JSET, K), 230 - DL(JMP, EXIT, 0), 231 - DL(STX, MEM, B), 232 - DL(STX, MEM, H), 233 - DL(STX, MEM, W), 234 - DL(STX, MEM, DW), 235 - DL(STX, XADD, W), 236 - DL(STX, XADD, DW), 237 - DL(ST, MEM, B), 238 - DL(ST, MEM, H), 239 - DL(ST, MEM, W), 240 - DL(ST, MEM, DW), 241 - DL(LDX, MEM, B), 242 - DL(LDX, MEM, H), 243 - DL(LDX, MEM, W), 244 - DL(LDX, MEM, DW), 245 - DL(LD, ABS, W), 246 - DL(LD, ABS, H), 247 - DL(LD, ABS, B), 248 - DL(LD, IND, W), 249 - DL(LD, IND, H), 250 - DL(LD, IND, B), 251 - #undef DL 163 + /* 32 bit ALU operations */ 164 + [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, 165 + [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K, 166 + [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X, 167 + [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K, 168 + [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X, 169 + [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K, 170 + [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X, 171 + [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K, 172 + [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X, 173 + [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K, 174 + [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X, 175 + [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K, 176 + [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X, 177 + [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K, 178 + [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X, 179 + [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K, 180 + [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X, 181 + [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K, 182 + [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X, 183 + [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K, 184 + [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X, 185 + [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K, 186 + [BPF_ALU | BPF_NEG] = &&ALU_NEG, 187 + [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE, 188 + [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE, 189 + /* 64 bit ALU operations */ 190 + [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X, 191 + [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K, 192 + [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X, 193 + [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K, 194 + [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X, 195 + [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K, 196 + [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X, 197 + [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K, 198 + [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X, 199 + [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K, 200 + [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X, 201 + [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K, 202 + [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X, 203 + [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K, 204 + [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X, 205 + [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K, 206 + [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X, 207 + [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K, 208 + [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X, 209 + [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K, 210 + [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X, 211 + [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K, 212 + [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X, 213 + [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K, 214 + [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, 215 + /* Call instruction */ 216 + [BPF_JMP | BPF_CALL] = &&JMP_CALL, 217 + /* Jumps */ 218 + [BPF_JMP | BPF_JA] = &&JMP_JA, 219 + [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, 220 + [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K, 221 + [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X, 222 + [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, 223 + [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, 224 + [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, 225 + [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, 226 + [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, 227 + [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, 228 + [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, 229 + [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, 230 + [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, 231 + [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, 232 + [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, 233 + /* Program return */ 234 + [BPF_JMP | BPF_EXIT] = &&JMP_EXIT, 235 + /* Store instructions */ 236 + [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B, 237 + [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H, 238 + [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W, 239 + [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW, 240 + [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W, 241 + [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW, 242 + [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B, 243 + [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H, 244 + [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W, 245 + [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW, 246 + /* Load instructions */ 247 + [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B, 248 + [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H, 249 + [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W, 250 + [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW, 251 + [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W, 252 + [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H, 253 + [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B, 254 + [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, 255 + [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, 256 + [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, 252 257 }; 253 258 void *ptr; 254 259 int off; ··· 295 290 ALU(XOR, ^) 296 291 ALU(MUL, *) 297 292 #undef ALU 298 - ALU_NEG_0: 293 + ALU_NEG: 299 294 A = (u32) -A; 300 295 CONT; 301 - ALU64_NEG_0: 296 + ALU64_NEG: 302 297 A = -A; 303 298 CONT; 304 299 ALU_MOV_X: ··· 387 382 CONT; 388 383 389 384 /* CALL */ 390 - JMP_CALL_0: 385 + JMP_CALL: 391 386 /* Function call scratches BPF_R1-BPF_R5 registers, 392 387 * preserves BPF_R6-BPF_R9, and stores return value 393 388 * into BPF_R0. ··· 397 392 CONT; 398 393 399 394 /* JMP */ 400 - JMP_JA_0: 395 + JMP_JA: 401 396 insn += insn->off; 402 397 CONT; 403 398 JMP_JEQ_X: ··· 484 479 CONT_JMP; 485 480 } 486 481 CONT; 487 - JMP_EXIT_0: 482 + JMP_EXIT: 488 483 return BPF_R0; 489 484 490 485 /* STX and ST and LDX*/ ··· 1585 1580 * a negative errno code is returned. On success the return is zero. 1586 1581 */ 1587 1582 int sk_unattached_filter_create(struct sk_filter **pfp, 1588 - struct sock_fprog *fprog) 1583 + struct sock_fprog_kern *fprog) 1589 1584 { 1590 1585 unsigned int fsize = sk_filter_proglen(fprog); 1591 1586 struct sk_filter *fp;
+1 -1
net/core/ptp_classifier.c
··· 133 133 { 0x16, 0, 0, 0x00000000 }, 134 134 { 0x06, 0, 0, 0x00000000 }, 135 135 }; 136 - struct sock_fprog ptp_prog = { 136 + struct sock_fprog_kern ptp_prog = { 137 137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 138 138 }; 139 139
+3 -2
net/netfilter/xt_bpf.c
··· 23 23 static int bpf_mt_check(const struct xt_mtchk_param *par) 24 24 { 25 25 struct xt_bpf_info *info = par->matchinfo; 26 - struct sock_fprog program; 26 + struct sock_fprog_kern program; 27 27 28 28 program.len = info->bpf_program_num_elem; 29 - program.filter = (struct sock_filter __user *) info->bpf_program; 29 + program.filter = info->bpf_program; 30 + 30 31 if (sk_unattached_filter_create(&info->filter, &program)) { 31 32 pr_info("bpf: check failed: parse error\n"); 32 33 return -EINVAL;
+2 -2
net/sched/cls_bpf.c
··· 160 160 { 161 161 struct sock_filter *bpf_ops, *bpf_old; 162 162 struct tcf_exts exts; 163 - struct sock_fprog tmp; 163 + struct sock_fprog_kern tmp; 164 164 struct sk_filter *fp, *fp_old; 165 165 u16 bpf_size, bpf_len; 166 166 u32 classid; ··· 191 191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 192 192 193 193 tmp.len = bpf_len; 194 - tmp.filter = (struct sock_filter __user *) bpf_ops; 194 + tmp.filter = bpf_ops; 195 195 196 196 ret = sk_unattached_filter_create(&fp, &tmp); 197 197 if (ret)