Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add asm tests for pkt vs pkt_end comparison.

Add few assembly tests for packet comparison.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20201111031213.25109-4-alexei.starovoitov@gmail.com

authored by

Alexei Starovoitov and committed by
Daniel Borkmann
cb62d340 9cc873e8

+42
+42
tools/testing/selftests/bpf/verifier/ctx_skb.c
··· 1089 1089 .errstr_unpriv = "R1 leaks addr", 1090 1090 .result = REJECT, 1091 1091 }, 1092 + { 1093 + "pkt > pkt_end taken check", 1094 + .insns = { 1095 + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end) 1096 + offsetof(struct __sk_buff, data_end)), 1097 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data) 1098 + offsetof(struct __sk_buff, data)), 1099 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4 1100 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42 1101 + BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0 1102 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8 1103 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14 1104 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4 1105 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), // 8. if r3 > r2 goto 10 1106 + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9) 1107 + BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0 1108 + BPF_EXIT_INSN(), // 11. exit 1109 + }, 1110 + .result = ACCEPT, 1111 + .prog_type = BPF_PROG_TYPE_SK_SKB, 1112 + }, 1113 + { 1114 + "pkt_end < pkt taken check", 1115 + .insns = { 1116 + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end) 1117 + offsetof(struct __sk_buff, data_end)), 1118 + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data) 1119 + offsetof(struct __sk_buff, data)), 1120 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4 1121 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42 1122 + BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0 1123 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8 1124 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14 1125 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4 1126 + BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1), // 8. if r2 < r3 goto 10 1127 + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9) 1128 + BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0 1129 + BPF_EXIT_INSN(), // 11. exit 1130 + }, 1131 + .result = ACCEPT, 1132 + .prog_type = BPF_PROG_TYPE_SK_SKB, 1133 + },