Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: bpf: add tests for shifts by zero

There are currently no tests for ALU64 shift operations when the shift
amount is 0. This adds 6 new tests to make sure they are equivalent
to a no-op. The x32 JIT had such bugs that could have been caught by
these tests.

Cc: Xi Wang <xi.wang@gmail.com>
Signed-off-by: Luke Nelson <luke.r.nels@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>

authored by

Luke Nelson and committed by
Daniel Borkmann
ac8786c7 6fa632e7

+85
+85
tools/testing/selftests/bpf/verifier/basic_instr.c
··· 91 91 .result = ACCEPT, 92 92 }, 93 93 { 94 + "lsh64 by 0 imm", 95 + .insns = { 96 + BPF_LD_IMM64(BPF_REG_0, 1), 97 + BPF_LD_IMM64(BPF_REG_1, 1), 98 + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0), 99 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), 100 + BPF_MOV64_IMM(BPF_REG_0, 2), 101 + BPF_EXIT_INSN(), 102 + }, 103 + .result = ACCEPT, 104 + .retval = 1, 105 + }, 106 + { 107 + "rsh64 by 0 imm", 108 + .insns = { 109 + BPF_LD_IMM64(BPF_REG_0, 1), 110 + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), 111 + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 112 + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0), 113 + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), 114 + BPF_MOV64_IMM(BPF_REG_0, 2), 115 + BPF_EXIT_INSN(), 116 + }, 117 + .result = ACCEPT, 118 + .retval = 1, 119 + }, 120 + { 121 + "arsh64 by 0 imm", 122 + .insns = { 123 + BPF_LD_IMM64(BPF_REG_0, 1), 124 + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), 125 + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 126 + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0), 127 + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), 128 + BPF_MOV64_IMM(BPF_REG_0, 2), 129 + BPF_EXIT_INSN(), 130 + }, 131 + .result = ACCEPT, 132 + .retval = 1, 133 + }, 134 + { 135 + "lsh64 by 0 reg", 136 + .insns = { 137 + BPF_LD_IMM64(BPF_REG_0, 1), 138 + BPF_LD_IMM64(BPF_REG_1, 1), 139 + BPF_LD_IMM64(BPF_REG_2, 0), 140 + BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), 141 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), 142 + BPF_MOV64_IMM(BPF_REG_0, 2), 143 + BPF_EXIT_INSN(), 144 + }, 145 + .result = ACCEPT, 146 + .retval = 1, 147 + }, 148 + { 149 + "rsh64 by 0 reg", 150 + .insns = { 151 + BPF_LD_IMM64(BPF_REG_0, 1), 152 + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), 153 + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 154 + BPF_LD_IMM64(BPF_REG_3, 0), 155 + BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3), 156 + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), 157 + BPF_MOV64_IMM(BPF_REG_0, 2), 158 + BPF_EXIT_INSN(), 159 + }, 160 + .result = ACCEPT, 161 + .retval = 1, 162 + }, 163 + { 164 + "arsh64 by 0 reg", 165 + .insns = { 166 + BPF_LD_IMM64(BPF_REG_0, 1), 167 + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), 168 + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 169 + BPF_LD_IMM64(BPF_REG_3, 0), 170 + BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3), 171 + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), 172 + BPF_MOV64_IMM(BPF_REG_0, 2), 173 + BPF_EXIT_INSN(), 174 + }, 175 + .result = ACCEPT, 176 + .retval = 1, 177 + }, 178 + { 94 179 "invalid 64-bit BPF_END", 95 180 .insns = { 96 181 BPF_MOV32_IMM(BPF_REG_0, 0),