Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf, tests: Add more LD_IMM64 tests

This patch adds new tests for the two-instruction LD_IMM64. The new tests
verify the operation with immediate values of different byte patterns.
Mainly intended to cover JITs that want to be clever when loading 64-bit
constants.

Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20211007143006.634308-1-johan.almbladh@anyfinetworks.com

authored by

Johan Almbladh and committed by
Daniel Borkmann
0eb4ef88 bbf731b3

+117 -3
+117 -3
lib/test_bpf.c
··· 2134 2134 * of the immediate value. This is often the case if the native instruction 2135 2135 * immediate field width is narrower than 32 bits. 2136 2136 */ 2137 - static int bpf_fill_ld_imm64(struct bpf_test *self) 2137 + static int bpf_fill_ld_imm64_magn(struct bpf_test *self) 2138 2138 { 2139 2139 int block = 64; /* Increase for more tests per MSB position */ 2140 2140 int len = 3 + 8 * 63 * block * 2; ··· 2178 2178 BUG_ON(i != len); 2179 2179 2180 2180 return 0; 2181 + } 2182 + 2183 + /* 2184 + * Test the two-instruction 64-bit immediate load operation for different 2185 + * combinations of bytes. Each byte in the 64-bit word is constructed as 2186 + * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG. 2187 + * All patterns (base1, mask1) and (base2, mask2) bytes are tested. 2188 + */ 2189 + static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self, 2190 + u8 base1, u8 mask1, 2191 + u8 base2, u8 mask2) 2192 + { 2193 + struct bpf_insn *insn; 2194 + int len = 3 + 8 * BIT(8); 2195 + int pattern, index; 2196 + u32 rand = 1; 2197 + int i = 0; 2198 + 2199 + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); 2200 + if (!insn) 2201 + return -ENOMEM; 2202 + 2203 + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); 2204 + 2205 + for (pattern = 0; pattern < BIT(8); pattern++) { 2206 + u64 imm = 0; 2207 + 2208 + for (index = 0; index < 8; index++) { 2209 + int byte; 2210 + 2211 + if (pattern & BIT(index)) 2212 + byte = (base1 & mask1) | (rand & ~mask1); 2213 + else 2214 + byte = (base2 & mask2) | (rand & ~mask2); 2215 + imm = (imm << 8) | byte; 2216 + } 2217 + 2218 + /* Update our LCG */ 2219 + rand = rand * 1664525 + 1013904223; 2220 + 2221 + /* Perform operation */ 2222 + i += __bpf_ld_imm64(&insn[i], R1, imm); 2223 + 2224 + /* Load reference */ 2225 + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm); 2226 + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32)); 2227 + insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32); 2228 + insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3); 2229 + 2230 + /* Check result */ 2231 + insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); 2232 + insn[i++] = BPF_EXIT_INSN(); 2233 + } 2234 + 2235 + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); 2236 + insn[i++] = BPF_EXIT_INSN(); 2237 + 2238 + self->u.ptr.insns = insn; 2239 + self->u.ptr.len = len; 2240 + BUG_ON(i != len); 2241 + 2242 + return 0; 2243 + } 2244 + 2245 + static int bpf_fill_ld_imm64_checker(struct bpf_test *self) 2246 + { 2247 + return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff); 2248 + } 2249 + 2250 + static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self) 2251 + { 2252 + return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80); 2253 + } 2254 + 2255 + static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self) 2256 + { 2257 + return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff); 2258 + } 2259 + 2260 + static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self) 2261 + { 2262 + return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff); 2181 2263 } 2182 2264 2183 2265 /* ··· 12483 12401 .fill_helper = bpf_fill_alu32_mod_reg, 12484 12402 .nr_testruns = NR_PATTERN_RUNS, 12485 12403 }, 12486 - /* LD_IMM64 immediate magnitudes */ 12404 + /* LD_IMM64 immediate magnitudes and byte patterns */ 12487 12405 { 12488 12406 "LD_IMM64: all immediate value magnitudes", 12489 12407 { }, 12490 12408 INTERNAL | FLAG_NO_DATA, 12491 12409 { }, 12492 12410 { { 0, 1 } }, 12493 - .fill_helper = bpf_fill_ld_imm64, 12411 + .fill_helper = bpf_fill_ld_imm64_magn, 12412 + }, 12413 + { 12414 + "LD_IMM64: checker byte patterns", 12415 + { }, 12416 + INTERNAL | FLAG_NO_DATA, 12417 + { }, 12418 + { { 0, 1 } }, 12419 + .fill_helper = bpf_fill_ld_imm64_checker, 12420 + }, 12421 + { 12422 + "LD_IMM64: random positive and zero byte patterns", 12423 + { }, 12424 + INTERNAL | FLAG_NO_DATA, 12425 + { }, 12426 + { { 0, 1 } }, 12427 + .fill_helper = bpf_fill_ld_imm64_pos_zero, 12428 + }, 12429 + { 12430 + "LD_IMM64: random negative and zero byte patterns", 12431 + { }, 12432 + INTERNAL | FLAG_NO_DATA, 12433 + { }, 12434 + { { 0, 1 } }, 12435 + .fill_helper = bpf_fill_ld_imm64_neg_zero, 12436 + }, 12437 + { 12438 + "LD_IMM64: random positive and negative byte patterns", 12439 + { }, 12440 + INTERNAL | FLAG_NO_DATA, 12441 + { }, 12442 + { { 0, 1 } }, 12443 + .fill_helper = bpf_fill_ld_imm64_pos_neg, 12494 12444 }, 12495 12445 /* 64-bit ATOMIC register combinations */ 12496 12446 {