Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"The pull requests are getting smaller, that's progress I suppose :-)

1) Fix infinite loop in CIPSO option parsing, from Yujuan Qi.

2) Fix remote checksum handling in VXLAN and GUE tunneling drivers,
from Koichiro Den.

3) Missing u64_stats_init() calls in several drivers, from Florian
Fainelli.

4) TCP can set the congestion window to an invalid ssthresh value
after congestion window reductions, from Yuchung Cheng.

5) Fix BPF jit branch generation on s390, from Daniel Borkmann.

6) Correct MIPS ebpf JIT merge, from David Daney.

7) Correct byte order test in BPF test_verifier.c, from Daniel
Borkmann.

8) Fix various crashes and leaks in ASIX driver, from Dean Jenkins.

9) Handle SCTP checksums properly in mlx4 driver, from Davide
Caratti.

10) We can potentially enter tcp_connect() with a cached route
already, due to fastopen, so we have to explicitly invalidate it.

11) skb_warn_bad_offload() can bark in legitimate situations, fix from
Willem de Bruijn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (52 commits)
net: avoid skb_warn_bad_offload false positives on UFO
qmi_wwan: fix NULL deref on disconnect
ppp: fix xmit recursion detection on ppp channels
rds: Reintroduce statistics counting
tcp: fastopen: tcp_connect() must refresh the route
net: sched: set xt_tgchk_param par.net properly in ipt_init_target
net: dsa: mediatek: add adjust link support for user ports
net/mlx4_en: don't set CHECKSUM_COMPLETE on SCTP packets
qed: Fix a memory allocation failure test in 'qed_mcp_cmd_init()'
hysdn: fix to a race condition in put_log_buffer
s390/qeth: fix L3 next-hop in xmit qeth hdr
asix: Fix small memory leak in ax88772_unbind()
asix: Ensure asix_rx_fixup_info members are all reset
asix: Add rx->ax_skb = NULL after usbnet_skb_return()
bpf: fix selftest/bpf/test_pkt_md_access on s390x
netvsc: fix race on sub channel creation
bpf: fix byte order test in test_verifier
xgene: Always get clk source, but ignore if it's missing for SGMII ports
MIPS: Add missing file for eBPF JIT.
bpf, s390: fix build for libbpf and selftest suite
...

+2490 -186
+1950
arch/mips/net/ebpf_jit.c
··· 1 + /* 2 + * Just-In-Time compiler for eBPF filters on MIPS 3 + * 4 + * Copyright (c) 2017 Cavium, Inc. 5 + * 6 + * Based on code from: 7 + * 8 + * Copyright (c) 2014 Imagination Technologies Ltd. 9 + * Author: Markos Chandras <markos.chandras@imgtec.com> 10 + * 11 + * This program is free software; you can redistribute it and/or modify it 12 + * under the terms of the GNU General Public License as published by the 13 + * Free Software Foundation; version 2 of the License. 14 + */ 15 + 16 + #include <linux/bitops.h> 17 + #include <linux/errno.h> 18 + #include <linux/filter.h> 19 + #include <linux/bpf.h> 20 + #include <linux/slab.h> 21 + #include <asm/bitops.h> 22 + #include <asm/byteorder.h> 23 + #include <asm/cacheflush.h> 24 + #include <asm/cpu-features.h> 25 + #include <asm/uasm.h> 26 + 27 + /* Registers used by JIT */ 28 + #define MIPS_R_ZERO 0 29 + #define MIPS_R_AT 1 30 + #define MIPS_R_V0 2 /* BPF_R0 */ 31 + #define MIPS_R_V1 3 32 + #define MIPS_R_A0 4 /* BPF_R1 */ 33 + #define MIPS_R_A1 5 /* BPF_R2 */ 34 + #define MIPS_R_A2 6 /* BPF_R3 */ 35 + #define MIPS_R_A3 7 /* BPF_R4 */ 36 + #define MIPS_R_A4 8 /* BPF_R5 */ 37 + #define MIPS_R_T4 12 /* BPF_AX */ 38 + #define MIPS_R_T5 13 39 + #define MIPS_R_T6 14 40 + #define MIPS_R_T7 15 41 + #define MIPS_R_S0 16 /* BPF_R6 */ 42 + #define MIPS_R_S1 17 /* BPF_R7 */ 43 + #define MIPS_R_S2 18 /* BPF_R8 */ 44 + #define MIPS_R_S3 19 /* BPF_R9 */ 45 + #define MIPS_R_S4 20 /* BPF_TCC */ 46 + #define MIPS_R_S5 21 47 + #define MIPS_R_S6 22 48 + #define MIPS_R_S7 23 49 + #define MIPS_R_T8 24 50 + #define MIPS_R_T9 25 51 + #define MIPS_R_SP 29 52 + #define MIPS_R_RA 31 53 + 54 + /* eBPF flags */ 55 + #define EBPF_SAVE_S0 BIT(0) 56 + #define EBPF_SAVE_S1 BIT(1) 57 + #define EBPF_SAVE_S2 BIT(2) 58 + #define EBPF_SAVE_S3 BIT(3) 59 + #define EBPF_SAVE_S4 BIT(4) 60 + #define EBPF_SAVE_RA BIT(5) 61 + #define EBPF_SEEN_FP BIT(6) 62 + #define EBPF_SEEN_TC BIT(7) 63 + #define EBPF_TCC_IN_V1 BIT(8) 64 + 65 + /* 66 + * For the mips64 ISA, we need to track the value range or type for 67 + * each JIT register. The BPF machine requires zero extended 32-bit 68 + * values, but the mips64 ISA requires sign extended 32-bit values. 69 + * At each point in the BPF program we track the state of every 70 + * register so that we can zero extend or sign extend as the BPF 71 + * semantics require. 72 + */ 73 + enum reg_val_type { 74 + /* uninitialized */ 75 + REG_UNKNOWN, 76 + /* not known to be 32-bit compatible. */ 77 + REG_64BIT, 78 + /* 32-bit compatible, no truncation needed for 64-bit ops. */ 79 + REG_64BIT_32BIT, 80 + /* 32-bit compatible, need truncation for 64-bit ops. */ 81 + REG_32BIT, 82 + /* 32-bit zero extended. */ 83 + REG_32BIT_ZERO_EX, 84 + /* 32-bit no sign/zero extension needed. */ 85 + REG_32BIT_POS 86 + }; 87 + 88 + /* 89 + * high bit of offsets indicates if long branch conversion done at 90 + * this insn. 91 + */ 92 + #define OFFSETS_B_CONV BIT(31) 93 + 94 + /** 95 + * struct jit_ctx - JIT context 96 + * @skf: The sk_filter 97 + * @stack_size: eBPF stack size 98 + * @tmp_offset: eBPF $sp offset to 8-byte temporary memory 99 + * @idx: Instruction index 100 + * @flags: JIT flags 101 + * @offsets: Instruction offsets 102 + * @target: Memory location for the compiled filter 103 + * @reg_val_types Packed enum reg_val_type for each register. 104 + */ 105 + struct jit_ctx { 106 + const struct bpf_prog *skf; 107 + int stack_size; 108 + int tmp_offset; 109 + u32 idx; 110 + u32 flags; 111 + u32 *offsets; 112 + u32 *target; 113 + u64 *reg_val_types; 114 + unsigned int long_b_conversion:1; 115 + unsigned int gen_b_offsets:1; 116 + }; 117 + 118 + static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) 119 + { 120 + *rvt &= ~(7ull << (reg * 3)); 121 + *rvt |= ((u64)type << (reg * 3)); 122 + } 123 + 124 + static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, 125 + int index, int reg) 126 + { 127 + return (ctx->reg_val_types[index] >> (reg * 3)) & 7; 128 + } 129 + 130 + /* Simply emit the instruction if the JIT memory space has been allocated */ 131 + #define emit_instr(ctx, func, ...) \ 132 + do { \ 133 + if ((ctx)->target != NULL) { \ 134 + u32 *p = &(ctx)->target[ctx->idx]; \ 135 + uasm_i_##func(&p, ##__VA_ARGS__); \ 136 + } \ 137 + (ctx)->idx++; \ 138 + } while (0) 139 + 140 + static unsigned int j_target(struct jit_ctx *ctx, int target_idx) 141 + { 142 + unsigned long target_va, base_va; 143 + unsigned int r; 144 + 145 + if (!ctx->target) 146 + return 0; 147 + 148 + base_va = (unsigned long)ctx->target; 149 + target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV); 150 + 151 + if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful)) 152 + return (unsigned int)-1; 153 + r = target_va & 0x0ffffffful; 154 + return r; 155 + } 156 + 157 + /* Compute the immediate value for PC-relative branches. */ 158 + static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) 159 + { 160 + if (!ctx->gen_b_offsets) 161 + return 0; 162 + 163 + /* 164 + * We want a pc-relative branch. tgt is the instruction offset 165 + * we want to jump to. 166 + 167 + * Branch on MIPS: 168 + * I: target_offset <- sign_extend(offset) 169 + * I+1: PC += target_offset (delay slot) 170 + * 171 + * ctx->idx currently points to the branch instruction 172 + * but the offset is added to the delay slot so we need 173 + * to subtract 4. 174 + */ 175 + return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) - 176 + (ctx->idx * 4) - 4; 177 + } 178 + 179 + int bpf_jit_enable __read_mostly; 180 + 181 + enum which_ebpf_reg { 182 + src_reg, 183 + src_reg_no_fp, 184 + dst_reg, 185 + dst_reg_fp_ok 186 + }; 187 + 188 + /* 189 + * For eBPF, the register mapping naturally falls out of the 190 + * requirements of eBPF and the MIPS n64 ABI. We don't maintain a 191 + * separate frame pointer, so BPF_REG_10 relative accesses are 192 + * adjusted to be $sp relative. 193 + */ 194 + int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn, 195 + enum which_ebpf_reg w) 196 + { 197 + int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? 198 + insn->src_reg : insn->dst_reg; 199 + 200 + switch (ebpf_reg) { 201 + case BPF_REG_0: 202 + return MIPS_R_V0; 203 + case BPF_REG_1: 204 + return MIPS_R_A0; 205 + case BPF_REG_2: 206 + return MIPS_R_A1; 207 + case BPF_REG_3: 208 + return MIPS_R_A2; 209 + case BPF_REG_4: 210 + return MIPS_R_A3; 211 + case BPF_REG_5: 212 + return MIPS_R_A4; 213 + case BPF_REG_6: 214 + ctx->flags |= EBPF_SAVE_S0; 215 + return MIPS_R_S0; 216 + case BPF_REG_7: 217 + ctx->flags |= EBPF_SAVE_S1; 218 + return MIPS_R_S1; 219 + case BPF_REG_8: 220 + ctx->flags |= EBPF_SAVE_S2; 221 + return MIPS_R_S2; 222 + case BPF_REG_9: 223 + ctx->flags |= EBPF_SAVE_S3; 224 + return MIPS_R_S3; 225 + case BPF_REG_10: 226 + if (w == dst_reg || w == src_reg_no_fp) 227 + goto bad_reg; 228 + ctx->flags |= EBPF_SEEN_FP; 229 + /* 230 + * Needs special handling, return something that 231 + * cannot be clobbered just in case. 232 + */ 233 + return MIPS_R_ZERO; 234 + case BPF_REG_AX: 235 + return MIPS_R_T4; 236 + default: 237 + bad_reg: 238 + WARN(1, "Illegal bpf reg: %d\n", ebpf_reg); 239 + return -EINVAL; 240 + } 241 + } 242 + /* 243 + * eBPF stack frame will be something like: 244 + * 245 + * Entry $sp ------> +--------------------------------+ 246 + * | $ra (optional) | 247 + * +--------------------------------+ 248 + * | $s0 (optional) | 249 + * +--------------------------------+ 250 + * | $s1 (optional) | 251 + * +--------------------------------+ 252 + * | $s2 (optional) | 253 + * +--------------------------------+ 254 + * | $s3 (optional) | 255 + * +--------------------------------+ 256 + * | $s4 (optional) | 257 + * +--------------------------------+ 258 + * | tmp-storage (if $ra saved) | 259 + * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10 260 + * | BPF_REG_10 relative storage | 261 + * | MAX_BPF_STACK (optional) | 262 + * | . | 263 + * | . | 264 + * | . | 265 + * $sp --------> +--------------------------------+ 266 + * 267 + * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized 268 + * area is not allocated. 269 + */ 270 + static int gen_int_prologue(struct jit_ctx *ctx) 271 + { 272 + int stack_adjust = 0; 273 + int store_offset; 274 + int locals_size; 275 + 276 + if (ctx->flags & EBPF_SAVE_RA) 277 + /* 278 + * If RA we are doing a function call and may need 279 + * extra 8-byte tmp area. 280 + */ 281 + stack_adjust += 16; 282 + if (ctx->flags & EBPF_SAVE_S0) 283 + stack_adjust += 8; 284 + if (ctx->flags & EBPF_SAVE_S1) 285 + stack_adjust += 8; 286 + if (ctx->flags & EBPF_SAVE_S2) 287 + stack_adjust += 8; 288 + if (ctx->flags & EBPF_SAVE_S3) 289 + stack_adjust += 8; 290 + if (ctx->flags & EBPF_SAVE_S4) 291 + stack_adjust += 8; 292 + 293 + BUILD_BUG_ON(MAX_BPF_STACK & 7); 294 + locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; 295 + 296 + stack_adjust += locals_size; 297 + ctx->tmp_offset = locals_size; 298 + 299 + ctx->stack_size = stack_adjust; 300 + 301 + /* 302 + * First instruction initializes the tail call count (TCC). 303 + * On tail call we skip this instruction, and the TCC is 304 + * passed in $v1 from the caller. 305 + */ 306 + emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 307 + if (stack_adjust) 308 + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); 309 + else 310 + return 0; 311 + 312 + store_offset = stack_adjust - 8; 313 + 314 + if (ctx->flags & EBPF_SAVE_RA) { 315 + emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); 316 + store_offset -= 8; 317 + } 318 + if (ctx->flags & EBPF_SAVE_S0) { 319 + emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); 320 + store_offset -= 8; 321 + } 322 + if (ctx->flags & EBPF_SAVE_S1) { 323 + emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); 324 + store_offset -= 8; 325 + } 326 + if (ctx->flags & EBPF_SAVE_S2) { 327 + emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); 328 + store_offset -= 8; 329 + } 330 + if (ctx->flags & EBPF_SAVE_S3) { 331 + emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); 332 + store_offset -= 8; 333 + } 334 + if (ctx->flags & EBPF_SAVE_S4) { 335 + emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); 336 + store_offset -= 8; 337 + } 338 + 339 + if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) 340 + emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); 341 + 342 + return 0; 343 + } 344 + 345 + static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) 346 + { 347 + const struct bpf_prog *prog = ctx->skf; 348 + int stack_adjust = ctx->stack_size; 349 + int store_offset = stack_adjust - 8; 350 + int r0 = MIPS_R_V0; 351 + 352 + if (dest_reg == MIPS_R_RA && 353 + get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) 354 + /* Don't let zero extended value escape. */ 355 + emit_instr(ctx, sll, r0, r0, 0); 356 + 357 + if (ctx->flags & EBPF_SAVE_RA) { 358 + emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 359 + store_offset -= 8; 360 + } 361 + if (ctx->flags & EBPF_SAVE_S0) { 362 + emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); 363 + store_offset -= 8; 364 + } 365 + if (ctx->flags & EBPF_SAVE_S1) { 366 + emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); 367 + store_offset -= 8; 368 + } 369 + if (ctx->flags & EBPF_SAVE_S2) { 370 + emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); 371 + store_offset -= 8; 372 + } 373 + if (ctx->flags & EBPF_SAVE_S3) { 374 + emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); 375 + store_offset -= 8; 376 + } 377 + if (ctx->flags & EBPF_SAVE_S4) { 378 + emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); 379 + store_offset -= 8; 380 + } 381 + emit_instr(ctx, jr, dest_reg); 382 + 383 + if (stack_adjust) 384 + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); 385 + else 386 + emit_instr(ctx, nop); 387 + 388 + return 0; 389 + } 390 + 391 + static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, 392 + struct jit_ctx *ctx) 393 + { 394 + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { 395 + emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm); 396 + } else { 397 + int lower = (s16)(insn->imm & 0xffff); 398 + int upper = insn->imm - lower; 399 + 400 + emit_instr(ctx, lui, reg, upper >> 16); 401 + emit_instr(ctx, addiu, reg, reg, lower); 402 + } 403 + 404 + } 405 + 406 + static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 407 + int idx) 408 + { 409 + int upper_bound, lower_bound; 410 + int dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 411 + 412 + if (dst < 0) 413 + return dst; 414 + 415 + switch (BPF_OP(insn->code)) { 416 + case BPF_MOV: 417 + case BPF_ADD: 418 + upper_bound = S16_MAX; 419 + lower_bound = S16_MIN; 420 + break; 421 + case BPF_SUB: 422 + upper_bound = -(int)S16_MIN; 423 + lower_bound = -(int)S16_MAX; 424 + break; 425 + case BPF_AND: 426 + case BPF_OR: 427 + case BPF_XOR: 428 + upper_bound = 0xffff; 429 + lower_bound = 0; 430 + break; 431 + case BPF_RSH: 432 + case BPF_LSH: 433 + case BPF_ARSH: 434 + /* Shift amounts are truncated, no need for bounds */ 435 + upper_bound = S32_MAX; 436 + lower_bound = S32_MIN; 437 + break; 438 + default: 439 + return -EINVAL; 440 + } 441 + 442 + /* 443 + * Immediate move clobbers the register, so no sign/zero 444 + * extension needed. 445 + */ 446 + if (BPF_CLASS(insn->code) == BPF_ALU64 && 447 + BPF_OP(insn->code) != BPF_MOV && 448 + get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT) 449 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 450 + /* BPF_ALU | BPF_LSH doesn't need separate sign extension */ 451 + if (BPF_CLASS(insn->code) == BPF_ALU && 452 + BPF_OP(insn->code) != BPF_LSH && 453 + BPF_OP(insn->code) != BPF_MOV && 454 + get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT) 455 + emit_instr(ctx, sll, dst, dst, 0); 456 + 457 + if (insn->imm >= lower_bound && insn->imm <= upper_bound) { 458 + /* single insn immediate case */ 459 + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { 460 + case BPF_ALU64 | BPF_MOV: 461 + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm); 462 + break; 463 + case BPF_ALU64 | BPF_AND: 464 + case BPF_ALU | BPF_AND: 465 + emit_instr(ctx, andi, dst, dst, insn->imm); 466 + break; 467 + case BPF_ALU64 | BPF_OR: 468 + case BPF_ALU | BPF_OR: 469 + emit_instr(ctx, ori, dst, dst, insn->imm); 470 + break; 471 + case BPF_ALU64 | BPF_XOR: 472 + case BPF_ALU | BPF_XOR: 473 + emit_instr(ctx, xori, dst, dst, insn->imm); 474 + break; 475 + case BPF_ALU64 | BPF_ADD: 476 + emit_instr(ctx, daddiu, dst, dst, insn->imm); 477 + break; 478 + case BPF_ALU64 | BPF_SUB: 479 + emit_instr(ctx, daddiu, dst, dst, -insn->imm); 480 + break; 481 + case BPF_ALU64 | BPF_RSH: 482 + emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f); 483 + break; 484 + case BPF_ALU | BPF_RSH: 485 + emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f); 486 + break; 487 + case BPF_ALU64 | BPF_LSH: 488 + emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f); 489 + break; 490 + case BPF_ALU | BPF_LSH: 491 + emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f); 492 + break; 493 + case BPF_ALU64 | BPF_ARSH: 494 + emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f); 495 + break; 496 + case BPF_ALU | BPF_ARSH: 497 + emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f); 498 + break; 499 + case BPF_ALU | BPF_MOV: 500 + emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm); 501 + break; 502 + case BPF_ALU | BPF_ADD: 503 + emit_instr(ctx, addiu, dst, dst, insn->imm); 504 + break; 505 + case BPF_ALU | BPF_SUB: 506 + emit_instr(ctx, addiu, dst, dst, -insn->imm); 507 + break; 508 + default: 509 + return -EINVAL; 510 + } 511 + } else { 512 + /* multi insn immediate case */ 513 + if (BPF_OP(insn->code) == BPF_MOV) { 514 + gen_imm_to_reg(insn, dst, ctx); 515 + } else { 516 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 517 + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { 518 + case BPF_ALU64 | BPF_AND: 519 + case BPF_ALU | BPF_AND: 520 + emit_instr(ctx, and, dst, dst, MIPS_R_AT); 521 + break; 522 + case BPF_ALU64 | BPF_OR: 523 + case BPF_ALU | BPF_OR: 524 + emit_instr(ctx, or, dst, dst, MIPS_R_AT); 525 + break; 526 + case BPF_ALU64 | BPF_XOR: 527 + case BPF_ALU | BPF_XOR: 528 + emit_instr(ctx, xor, dst, dst, MIPS_R_AT); 529 + break; 530 + case BPF_ALU64 | BPF_ADD: 531 + emit_instr(ctx, daddu, dst, dst, MIPS_R_AT); 532 + break; 533 + case BPF_ALU64 | BPF_SUB: 534 + emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT); 535 + break; 536 + case BPF_ALU | BPF_ADD: 537 + emit_instr(ctx, addu, dst, dst, MIPS_R_AT); 538 + break; 539 + case BPF_ALU | BPF_SUB: 540 + emit_instr(ctx, subu, dst, dst, MIPS_R_AT); 541 + break; 542 + default: 543 + return -EINVAL; 544 + } 545 + } 546 + } 547 + 548 + return 0; 549 + } 550 + 551 + static void * __must_check 552 + ool_skb_header_pointer(const struct sk_buff *skb, int offset, 553 + int len, void *buffer) 554 + { 555 + return skb_header_pointer(skb, offset, len, buffer); 556 + } 557 + 558 + static int size_to_len(const struct bpf_insn *insn) 559 + { 560 + switch (BPF_SIZE(insn->code)) { 561 + case BPF_B: 562 + return 1; 563 + case BPF_H: 564 + return 2; 565 + case BPF_W: 566 + return 4; 567 + case BPF_DW: 568 + return 8; 569 + } 570 + return 0; 571 + } 572 + 573 + static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) 574 + { 575 + if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { 576 + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value); 577 + } else if (value >= 0xffffffff80000000ull || 578 + (value < 0x80000000 && value > 0xffff)) { 579 + emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16)); 580 + emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff)); 581 + } else { 582 + int i; 583 + bool seen_part = false; 584 + int needed_shift = 0; 585 + 586 + for (i = 0; i < 4; i++) { 587 + u64 part = (value >> (16 * (3 - i))) & 0xffff; 588 + 589 + if (seen_part && needed_shift > 0 && (part || i == 3)) { 590 + emit_instr(ctx, dsll_safe, dst, dst, needed_shift); 591 + needed_shift = 0; 592 + } 593 + if (part) { 594 + if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) { 595 + emit_instr(ctx, lui, dst, (s32)(s16)part); 596 + needed_shift = -16; 597 + } else { 598 + emit_instr(ctx, ori, dst, 599 + seen_part ? dst : MIPS_R_ZERO, 600 + (unsigned int)part); 601 + } 602 + seen_part = true; 603 + } 604 + if (seen_part) 605 + needed_shift += 16; 606 + } 607 + } 608 + } 609 + 610 + static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) 611 + { 612 + int off, b_off; 613 + 614 + ctx->flags |= EBPF_SEEN_TC; 615 + /* 616 + * if (index >= array->map.max_entries) 617 + * goto out; 618 + */ 619 + off = offsetof(struct bpf_array, map.max_entries); 620 + emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1); 621 + emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2); 622 + b_off = b_imm(this_idx + 1, ctx); 623 + emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); 624 + /* 625 + * if (--TCC < 0) 626 + * goto out; 627 + */ 628 + /* Delay slot */ 629 + emit_instr(ctx, daddiu, MIPS_R_T5, 630 + (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); 631 + b_off = b_imm(this_idx + 1, ctx); 632 + emit_instr(ctx, bltz, MIPS_R_T5, b_off); 633 + /* 634 + * prog = array->ptrs[index]; 635 + * if (prog == NULL) 636 + * goto out; 637 + */ 638 + /* Delay slot */ 639 + emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3); 640 + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1); 641 + off = offsetof(struct bpf_array, ptrs); 642 + emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8); 643 + b_off = b_imm(this_idx + 1, ctx); 644 + emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off); 645 + /* Delay slot */ 646 + emit_instr(ctx, nop); 647 + 648 + /* goto *(prog->bpf_func + 4); */ 649 + off = offsetof(struct bpf_prog, bpf_func); 650 + emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT); 651 + /* All systems are go... propagate TCC */ 652 + emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO); 653 + /* Skip first instruction (TCC initialization) */ 654 + emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4); 655 + return build_int_epilogue(ctx, MIPS_R_T9); 656 + } 657 + 658 + static bool use_bbit_insns(void) 659 + { 660 + switch (current_cpu_type()) { 661 + case CPU_CAVIUM_OCTEON: 662 + case CPU_CAVIUM_OCTEON_PLUS: 663 + case CPU_CAVIUM_OCTEON2: 664 + case CPU_CAVIUM_OCTEON3: 665 + return true; 666 + default: 667 + return false; 668 + } 669 + } 670 + 671 + static bool is_bad_offset(int b_off) 672 + { 673 + return b_off > 0x1ffff || b_off < -0x20000; 674 + } 675 + 676 + /* Returns the number of insn slots consumed. */ 677 + static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 678 + int this_idx, int exit_idx) 679 + { 680 + int src, dst, r, td, ts, mem_off, b_off; 681 + bool need_swap, did_move, cmp_eq; 682 + unsigned int target; 683 + u64 t64; 684 + s64 t64s; 685 + 686 + switch (insn->code) { 687 + case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ 688 + case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ 689 + case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */ 690 + case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */ 691 + case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */ 692 + case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */ 693 + case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */ 694 + case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */ 695 + case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */ 696 + case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */ 697 + case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */ 698 + case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */ 699 + case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */ 700 + case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */ 701 + case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */ 702 + case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */ 703 + case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */ 704 + case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */ 705 + r = gen_imm_insn(insn, ctx, this_idx); 706 + if (r < 0) 707 + return r; 708 + break; 709 + case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */ 710 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 711 + if (dst < 0) 712 + return dst; 713 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 714 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 715 + if (insn->imm == 1) /* Mult by 1 is a nop */ 716 + break; 717 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 718 + emit_instr(ctx, dmultu, MIPS_R_AT, dst); 719 + emit_instr(ctx, mflo, dst); 720 + break; 721 + case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ 722 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 723 + if (dst < 0) 724 + return dst; 725 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 726 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 727 + emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst); 728 + break; 729 + case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */ 730 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 731 + if (dst < 0) 732 + return dst; 733 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 734 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 735 + /* sign extend */ 736 + emit_instr(ctx, sll, dst, dst, 0); 737 + } 738 + if (insn->imm == 1) /* Mult by 1 is a nop */ 739 + break; 740 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 741 + emit_instr(ctx, multu, dst, MIPS_R_AT); 742 + emit_instr(ctx, mflo, dst); 743 + break; 744 + case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ 745 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 746 + if (dst < 0) 747 + return dst; 748 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 749 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 750 + /* sign extend */ 751 + emit_instr(ctx, sll, dst, dst, 0); 752 + } 753 + emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst); 754 + break; 755 + case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ 756 + case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ 757 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 758 + if (dst < 0) 759 + return dst; 760 + if (insn->imm == 0) { /* Div by zero */ 761 + b_off = b_imm(exit_idx, ctx); 762 + if (is_bad_offset(b_off)) 763 + return -E2BIG; 764 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 765 + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); 766 + } 767 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 768 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) 769 + /* sign extend */ 770 + emit_instr(ctx, sll, dst, dst, 0); 771 + if (insn->imm == 1) { 772 + /* div by 1 is a nop, mod by 1 is zero */ 773 + if (BPF_OP(insn->code) == BPF_MOD) 774 + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); 775 + break; 776 + } 777 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 778 + emit_instr(ctx, divu, dst, MIPS_R_AT); 779 + if (BPF_OP(insn->code) == BPF_DIV) 780 + emit_instr(ctx, mflo, dst); 781 + else 782 + emit_instr(ctx, mfhi, dst); 783 + break; 784 + case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ 785 + case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ 786 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 787 + if (dst < 0) 788 + return dst; 789 + if (insn->imm == 0) { /* Div by zero */ 790 + b_off = b_imm(exit_idx, ctx); 791 + if (is_bad_offset(b_off)) 792 + return -E2BIG; 793 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 794 + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); 795 + } 796 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 797 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 798 + 799 + if (insn->imm == 1) { 800 + /* div by 1 is a nop, mod by 1 is zero */ 801 + if (BPF_OP(insn->code) == BPF_MOD) 802 + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); 803 + break; 804 + } 805 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 806 + emit_instr(ctx, ddivu, dst, MIPS_R_AT); 807 + if (BPF_OP(insn->code) == BPF_DIV) 808 + emit_instr(ctx, mflo, dst); 809 + else 810 + emit_instr(ctx, mfhi, dst); 811 + break; 812 + case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */ 813 + case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */ 814 + case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */ 815 + case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */ 816 + case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */ 817 + case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */ 818 + case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */ 819 + case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */ 820 + case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */ 821 + case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */ 822 + case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */ 823 + case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */ 824 + src = ebpf_to_mips_reg(ctx, insn, src_reg); 825 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 826 + if (src < 0 || dst < 0) 827 + return -EINVAL; 828 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 829 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 830 + did_move = false; 831 + if (insn->src_reg == BPF_REG_10) { 832 + if (BPF_OP(insn->code) == BPF_MOV) { 833 + emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); 834 + did_move = true; 835 + } else { 836 + emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK); 837 + src = MIPS_R_AT; 838 + } 839 + } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 840 + int tmp_reg = MIPS_R_AT; 841 + 842 + if (BPF_OP(insn->code) == BPF_MOV) { 843 + tmp_reg = dst; 844 + did_move = true; 845 + } 846 + emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO); 847 + emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); 848 + src = MIPS_R_AT; 849 + } 850 + switch (BPF_OP(insn->code)) { 851 + case BPF_MOV: 852 + if (!did_move) 853 + emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); 854 + break; 855 + case BPF_ADD: 856 + emit_instr(ctx, daddu, dst, dst, src); 857 + break; 858 + case BPF_SUB: 859 + emit_instr(ctx, dsubu, dst, dst, src); 860 + break; 861 + case BPF_XOR: 862 + emit_instr(ctx, xor, dst, dst, src); 863 + break; 864 + case BPF_OR: 865 + emit_instr(ctx, or, dst, dst, src); 866 + break; 867 + case BPF_AND: 868 + emit_instr(ctx, and, dst, dst, src); 869 + break; 870 + case BPF_MUL: 871 + emit_instr(ctx, dmultu, dst, src); 872 + emit_instr(ctx, mflo, dst); 873 + break; 874 + case BPF_DIV: 875 + case BPF_MOD: 876 + b_off = b_imm(exit_idx, ctx); 877 + if (is_bad_offset(b_off)) 878 + return -E2BIG; 879 + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); 880 + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); 881 + emit_instr(ctx, ddivu, dst, src); 882 + if (BPF_OP(insn->code) == BPF_DIV) 883 + emit_instr(ctx, mflo, dst); 884 + else 885 + emit_instr(ctx, mfhi, dst); 886 + break; 887 + case BPF_LSH: 888 + emit_instr(ctx, dsllv, dst, dst, src); 889 + break; 890 + case BPF_RSH: 891 + emit_instr(ctx, dsrlv, dst, dst, src); 892 + break; 893 + case BPF_ARSH: 894 + emit_instr(ctx, dsrav, dst, dst, src); 895 + break; 896 + default: 897 + pr_err("ALU64_REG NOT HANDLED\n"); 898 + return -EINVAL; 899 + } 900 + break; 901 + case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */ 902 + case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */ 903 + case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */ 904 + case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */ 905 + case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */ 906 + case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */ 907 + case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */ 908 + case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */ 909 + case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ 910 + case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ 911 + case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ 912 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 913 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 914 + if (src < 0 || dst < 0) 915 + return -EINVAL; 916 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 917 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 918 + /* sign extend */ 919 + emit_instr(ctx, sll, dst, dst, 0); 920 + } 921 + did_move = false; 922 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 923 + if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { 924 + int tmp_reg = MIPS_R_AT; 925 + 926 + if (BPF_OP(insn->code) == BPF_MOV) { 927 + tmp_reg = dst; 928 + did_move = true; 929 + } 930 + /* sign extend */ 931 + emit_instr(ctx, sll, tmp_reg, src, 0); 932 + src = MIPS_R_AT; 933 + } 934 + switch (BPF_OP(insn->code)) { 935 + case BPF_MOV: 936 + if (!did_move) 937 + emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); 938 + break; 939 + case BPF_ADD: 940 + emit_instr(ctx, addu, dst, dst, src); 941 + break; 942 + case BPF_SUB: 943 + emit_instr(ctx, subu, dst, dst, src); 944 + break; 945 + case BPF_XOR: 946 + emit_instr(ctx, xor, dst, dst, src); 947 + break; 948 + case BPF_OR: 949 + emit_instr(ctx, or, dst, dst, src); 950 + break; 951 + case BPF_AND: 952 + emit_instr(ctx, and, dst, dst, src); 953 + break; 954 + case BPF_MUL: 955 + emit_instr(ctx, mul, dst, dst, src); 956 + break; 957 + case BPF_DIV: 958 + case BPF_MOD: 959 + b_off = b_imm(exit_idx, ctx); 960 + if (is_bad_offset(b_off)) 961 + return -E2BIG; 962 + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); 963 + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); 964 + emit_instr(ctx, divu, dst, src); 965 + if (BPF_OP(insn->code) == BPF_DIV) 966 + emit_instr(ctx, mflo, dst); 967 + else 968 + emit_instr(ctx, mfhi, dst); 969 + break; 970 + case BPF_LSH: 971 + emit_instr(ctx, sllv, dst, dst, src); 972 + break; 973 + case BPF_RSH: 974 + emit_instr(ctx, srlv, dst, dst, src); 975 + break; 976 + default: 977 + pr_err("ALU_REG NOT HANDLED\n"); 978 + return -EINVAL; 979 + } 980 + break; 981 + case BPF_JMP | BPF_EXIT: 982 + if (this_idx + 1 < exit_idx) { 983 + b_off = b_imm(exit_idx, ctx); 984 + if (is_bad_offset(b_off)) 985 + return -E2BIG; 986 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 987 + emit_instr(ctx, nop); 988 + } 989 + break; 990 + case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ 991 + case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ 992 + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); 993 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 994 + if (dst < 0) 995 + return dst; 996 + if (insn->imm == 0) { 997 + src = MIPS_R_ZERO; 998 + } else { 999 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1000 + src = MIPS_R_AT; 1001 + } 1002 + goto jeq_common; 1003 + case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ 1004 + case BPF_JMP | BPF_JNE | BPF_X: 1005 + case BPF_JMP | BPF_JSGT | BPF_X: 1006 + case BPF_JMP | BPF_JSGE | BPF_X: 1007 + case BPF_JMP | BPF_JGT | BPF_X: 1008 + case BPF_JMP | BPF_JGE | BPF_X: 1009 + case BPF_JMP | BPF_JSET | BPF_X: 1010 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1011 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1012 + if (src < 0 || dst < 0) 1013 + return -EINVAL; 1014 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 1015 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 1016 + if (td == REG_32BIT && ts != REG_32BIT) { 1017 + emit_instr(ctx, sll, MIPS_R_AT, src, 0); 1018 + src = MIPS_R_AT; 1019 + } else if (ts == REG_32BIT && td != REG_32BIT) { 1020 + emit_instr(ctx, sll, MIPS_R_AT, dst, 0); 1021 + dst = MIPS_R_AT; 1022 + } 1023 + if (BPF_OP(insn->code) == BPF_JSET) { 1024 + emit_instr(ctx, and, MIPS_R_AT, dst, src); 1025 + cmp_eq = false; 1026 + dst = MIPS_R_AT; 1027 + src = MIPS_R_ZERO; 1028 + } else if (BPF_OP(insn->code) == BPF_JSGT) { 1029 + emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); 1030 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1031 + b_off = b_imm(exit_idx, ctx); 1032 + if (is_bad_offset(b_off)) 1033 + return -E2BIG; 1034 + emit_instr(ctx, blez, MIPS_R_AT, b_off); 1035 + emit_instr(ctx, nop); 1036 + return 2; /* We consumed the exit. */ 1037 + } 1038 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1039 + if (is_bad_offset(b_off)) 1040 + return -E2BIG; 1041 + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); 1042 + emit_instr(ctx, nop); 1043 + break; 1044 + } else if (BPF_OP(insn->code) == BPF_JSGE) { 1045 + emit_instr(ctx, slt, MIPS_R_AT, dst, src); 1046 + cmp_eq = true; 1047 + dst = MIPS_R_AT; 1048 + src = MIPS_R_ZERO; 1049 + } else if (BPF_OP(insn->code) == BPF_JGT) { 1050 + /* dst or src could be AT */ 1051 + emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); 1052 + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1053 + /* SP known to be non-zero, movz becomes boolean not */ 1054 + emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); 1055 + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); 1056 + emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); 1057 + cmp_eq = true; 1058 + dst = MIPS_R_AT; 1059 + src = MIPS_R_ZERO; 1060 + } else if (BPF_OP(insn->code) == BPF_JGE) { 1061 + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1062 + cmp_eq = true; 1063 + dst = MIPS_R_AT; 1064 + src = MIPS_R_ZERO; 1065 + } else { /* JNE/JEQ case */ 1066 + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); 1067 + } 1068 + jeq_common: 1069 + /* 1070 + * If the next insn is EXIT and we are jumping arround 1071 + * only it, invert the sense of the compare and 1072 + * conditionally jump to the exit. Poor man's branch 1073 + * chaining. 1074 + */ 1075 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1076 + b_off = b_imm(exit_idx, ctx); 1077 + if (is_bad_offset(b_off)) { 1078 + target = j_target(ctx, exit_idx); 1079 + if (target == (unsigned int)-1) 1080 + return -E2BIG; 1081 + cmp_eq = !cmp_eq; 1082 + b_off = 4 * 3; 1083 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1084 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1085 + ctx->long_b_conversion = 1; 1086 + } 1087 + } 1088 + 1089 + if (cmp_eq) 1090 + emit_instr(ctx, bne, dst, src, b_off); 1091 + else 1092 + emit_instr(ctx, beq, dst, src, b_off); 1093 + emit_instr(ctx, nop); 1094 + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { 1095 + emit_instr(ctx, j, target); 1096 + emit_instr(ctx, nop); 1097 + } 1098 + return 2; /* We consumed the exit. */ 1099 + } 1100 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1101 + if (is_bad_offset(b_off)) { 1102 + target = j_target(ctx, this_idx + insn->off + 1); 1103 + if (target == (unsigned int)-1) 1104 + return -E2BIG; 1105 + cmp_eq = !cmp_eq; 1106 + b_off = 4 * 3; 1107 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1108 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1109 + ctx->long_b_conversion = 1; 1110 + } 1111 + } 1112 + 1113 + if (cmp_eq) 1114 + emit_instr(ctx, beq, dst, src, b_off); 1115 + else 1116 + emit_instr(ctx, bne, dst, src, b_off); 1117 + emit_instr(ctx, nop); 1118 + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { 1119 + emit_instr(ctx, j, target); 1120 + emit_instr(ctx, nop); 1121 + } 1122 + break; 1123 + case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ 1124 + case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ 1125 + cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); 1126 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1127 + if (dst < 0) 1128 + return dst; 1129 + 1130 + if (insn->imm == 0) { 1131 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1132 + b_off = b_imm(exit_idx, ctx); 1133 + if (is_bad_offset(b_off)) 1134 + return -E2BIG; 1135 + if (cmp_eq) 1136 + emit_instr(ctx, bltz, dst, b_off); 1137 + else 1138 + emit_instr(ctx, blez, dst, b_off); 1139 + emit_instr(ctx, nop); 1140 + return 2; /* We consumed the exit. */ 1141 + } 1142 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1143 + if (is_bad_offset(b_off)) 1144 + return -E2BIG; 1145 + if (cmp_eq) 1146 + emit_instr(ctx, bgez, dst, b_off); 1147 + else 1148 + emit_instr(ctx, bgtz, dst, b_off); 1149 + emit_instr(ctx, nop); 1150 + break; 1151 + } 1152 + /* 1153 + * only "LT" compare available, so we must use imm + 1 1154 + * to generate "GT" 1155 + */ 1156 + t64s = insn->imm + (cmp_eq ? 0 : 1); 1157 + if (t64s >= S16_MIN && t64s <= S16_MAX) { 1158 + emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); 1159 + src = MIPS_R_AT; 1160 + dst = MIPS_R_ZERO; 1161 + cmp_eq = true; 1162 + goto jeq_common; 1163 + } 1164 + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); 1165 + emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); 1166 + src = MIPS_R_AT; 1167 + dst = MIPS_R_ZERO; 1168 + cmp_eq = true; 1169 + goto jeq_common; 1170 + 1171 + case BPF_JMP | BPF_JGT | BPF_K: 1172 + case BPF_JMP | BPF_JGE | BPF_K: 1173 + cmp_eq = (BPF_OP(insn->code) == BPF_JGE); 1174 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1175 + if (dst < 0) 1176 + return dst; 1177 + /* 1178 + * only "LT" compare available, so we must use imm + 1 1179 + * to generate "GT" 1180 + */ 1181 + t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); 1182 + if (t64s >= 0 && t64s <= S16_MAX) { 1183 + emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); 1184 + src = MIPS_R_AT; 1185 + dst = MIPS_R_ZERO; 1186 + cmp_eq = true; 1187 + goto jeq_common; 1188 + } 1189 + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); 1190 + emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); 1191 + src = MIPS_R_AT; 1192 + dst = MIPS_R_ZERO; 1193 + cmp_eq = true; 1194 + goto jeq_common; 1195 + 1196 + case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ 1197 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1198 + if (dst < 0) 1199 + return dst; 1200 + 1201 + if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { 1202 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1203 + b_off = b_imm(exit_idx, ctx); 1204 + if (is_bad_offset(b_off)) 1205 + return -E2BIG; 1206 + emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off); 1207 + emit_instr(ctx, nop); 1208 + return 2; /* We consumed the exit. */ 1209 + } 1210 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1211 + if (is_bad_offset(b_off)) 1212 + return -E2BIG; 1213 + emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off); 1214 + emit_instr(ctx, nop); 1215 + break; 1216 + } 1217 + t64 = (u32)insn->imm; 1218 + emit_const_to_reg(ctx, MIPS_R_AT, t64); 1219 + emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT); 1220 + src = MIPS_R_AT; 1221 + dst = MIPS_R_ZERO; 1222 + cmp_eq = false; 1223 + goto jeq_common; 1224 + 1225 + case BPF_JMP | BPF_JA: 1226 + /* 1227 + * Prefer relative branch for easier debugging, but 1228 + * fall back if needed. 1229 + */ 1230 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1231 + if (is_bad_offset(b_off)) { 1232 + target = j_target(ctx, this_idx + insn->off + 1); 1233 + if (target == (unsigned int)-1) 1234 + return -E2BIG; 1235 + emit_instr(ctx, j, target); 1236 + } else { 1237 + emit_instr(ctx, b, b_off); 1238 + } 1239 + emit_instr(ctx, nop); 1240 + break; 1241 + case BPF_LD | BPF_DW | BPF_IMM: 1242 + if (insn->src_reg != 0) 1243 + return -EINVAL; 1244 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1245 + if (dst < 0) 1246 + return dst; 1247 + t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32); 1248 + emit_const_to_reg(ctx, dst, t64); 1249 + return 2; /* Double slot insn */ 1250 + 1251 + case BPF_JMP | BPF_CALL: 1252 + ctx->flags |= EBPF_SAVE_RA; 1253 + t64s = (s64)insn->imm + (s64)__bpf_call_base; 1254 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); 1255 + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1256 + /* delay slot */ 1257 + emit_instr(ctx, nop); 1258 + break; 1259 + 1260 + case BPF_JMP | BPF_TAIL_CALL: 1261 + if (emit_bpf_tail_call(ctx, this_idx)) 1262 + return -EINVAL; 1263 + break; 1264 + 1265 + case BPF_LD | BPF_B | BPF_ABS: 1266 + case BPF_LD | BPF_H | BPF_ABS: 1267 + case BPF_LD | BPF_W | BPF_ABS: 1268 + case BPF_LD | BPF_DW | BPF_ABS: 1269 + ctx->flags |= EBPF_SAVE_RA; 1270 + 1271 + gen_imm_to_reg(insn, MIPS_R_A1, ctx); 1272 + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); 1273 + 1274 + if (insn->imm < 0) { 1275 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper); 1276 + } else { 1277 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); 1278 + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); 1279 + } 1280 + goto ld_skb_common; 1281 + 1282 + case BPF_LD | BPF_B | BPF_IND: 1283 + case BPF_LD | BPF_H | BPF_IND: 1284 + case BPF_LD | BPF_W | BPF_IND: 1285 + case BPF_LD | BPF_DW | BPF_IND: 1286 + ctx->flags |= EBPF_SAVE_RA; 1287 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1288 + if (src < 0) 1289 + return src; 1290 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 1291 + if (ts == REG_32BIT_ZERO_EX) { 1292 + /* sign extend */ 1293 + emit_instr(ctx, sll, MIPS_R_A1, src, 0); 1294 + src = MIPS_R_A1; 1295 + } 1296 + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { 1297 + emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm); 1298 + } else { 1299 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1300 + emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src); 1301 + } 1302 + /* truncate to 32-bit int */ 1303 + emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0); 1304 + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); 1305 + emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO); 1306 + 1307 + emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper); 1308 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); 1309 + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); 1310 + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT); 1311 + 1312 + ld_skb_common: 1313 + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1314 + /* delay slot move */ 1315 + emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO); 1316 + 1317 + /* Check the error value */ 1318 + b_off = b_imm(exit_idx, ctx); 1319 + if (is_bad_offset(b_off)) { 1320 + target = j_target(ctx, exit_idx); 1321 + if (target == (unsigned int)-1) 1322 + return -E2BIG; 1323 + 1324 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1325 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1326 + ctx->long_b_conversion = 1; 1327 + } 1328 + emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3); 1329 + emit_instr(ctx, nop); 1330 + emit_instr(ctx, j, target); 1331 + emit_instr(ctx, nop); 1332 + } else { 1333 + emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off); 1334 + emit_instr(ctx, nop); 1335 + } 1336 + 1337 + #ifdef __BIG_ENDIAN 1338 + need_swap = false; 1339 + #else 1340 + need_swap = true; 1341 + #endif 1342 + dst = MIPS_R_V0; 1343 + switch (BPF_SIZE(insn->code)) { 1344 + case BPF_B: 1345 + emit_instr(ctx, lbu, dst, 0, MIPS_R_V0); 1346 + break; 1347 + case BPF_H: 1348 + emit_instr(ctx, lhu, dst, 0, MIPS_R_V0); 1349 + if (need_swap) 1350 + emit_instr(ctx, wsbh, dst, dst); 1351 + break; 1352 + case BPF_W: 1353 + emit_instr(ctx, lw, dst, 0, MIPS_R_V0); 1354 + if (need_swap) { 1355 + emit_instr(ctx, wsbh, dst, dst); 1356 + emit_instr(ctx, rotr, dst, dst, 16); 1357 + } 1358 + break; 1359 + case BPF_DW: 1360 + emit_instr(ctx, ld, dst, 0, MIPS_R_V0); 1361 + if (need_swap) { 1362 + emit_instr(ctx, dsbh, dst, dst); 1363 + emit_instr(ctx, dshd, dst, dst); 1364 + } 1365 + break; 1366 + } 1367 + 1368 + break; 1369 + case BPF_ALU | BPF_END | BPF_FROM_BE: 1370 + case BPF_ALU | BPF_END | BPF_FROM_LE: 1371 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1372 + if (dst < 0) 1373 + return dst; 1374 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 1375 + if (insn->imm == 64 && td == REG_32BIT) 1376 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 1377 + 1378 + if (insn->imm != 64 && 1379 + (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { 1380 + /* sign extend */ 1381 + emit_instr(ctx, sll, dst, dst, 0); 1382 + } 1383 + 1384 + #ifdef __BIG_ENDIAN 1385 + need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE); 1386 + #else 1387 + need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE); 1388 + #endif 1389 + if (insn->imm == 16) { 1390 + if (need_swap) 1391 + emit_instr(ctx, wsbh, dst, dst); 1392 + emit_instr(ctx, andi, dst, dst, 0xffff); 1393 + } else if (insn->imm == 32) { 1394 + if (need_swap) { 1395 + emit_instr(ctx, wsbh, dst, dst); 1396 + emit_instr(ctx, rotr, dst, dst, 16); 1397 + } 1398 + } else { /* 64-bit*/ 1399 + if (need_swap) { 1400 + emit_instr(ctx, dsbh, dst, dst); 1401 + emit_instr(ctx, dshd, dst, dst); 1402 + } 1403 + } 1404 + break; 1405 + 1406 + case BPF_ST | BPF_B | BPF_MEM: 1407 + case BPF_ST | BPF_H | BPF_MEM: 1408 + case BPF_ST | BPF_W | BPF_MEM: 1409 + case BPF_ST | BPF_DW | BPF_MEM: 1410 + if (insn->dst_reg == BPF_REG_10) { 1411 + ctx->flags |= EBPF_SEEN_FP; 1412 + dst = MIPS_R_SP; 1413 + mem_off = insn->off + MAX_BPF_STACK; 1414 + } else { 1415 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1416 + if (dst < 0) 1417 + return dst; 1418 + mem_off = insn->off; 1419 + } 1420 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1421 + switch (BPF_SIZE(insn->code)) { 1422 + case BPF_B: 1423 + emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst); 1424 + break; 1425 + case BPF_H: 1426 + emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); 1427 + break; 1428 + case BPF_W: 1429 + emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); 1430 + break; 1431 + case BPF_DW: 1432 + emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst); 1433 + break; 1434 + } 1435 + break; 1436 + 1437 + case BPF_LDX | BPF_B | BPF_MEM: 1438 + case BPF_LDX | BPF_H | BPF_MEM: 1439 + case BPF_LDX | BPF_W | BPF_MEM: 1440 + case BPF_LDX | BPF_DW | BPF_MEM: 1441 + if (insn->src_reg == BPF_REG_10) { 1442 + ctx->flags |= EBPF_SEEN_FP; 1443 + src = MIPS_R_SP; 1444 + mem_off = insn->off + MAX_BPF_STACK; 1445 + } else { 1446 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1447 + if (src < 0) 1448 + return src; 1449 + mem_off = insn->off; 1450 + } 1451 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1452 + if (dst < 0) 1453 + return dst; 1454 + switch (BPF_SIZE(insn->code)) { 1455 + case BPF_B: 1456 + emit_instr(ctx, lbu, dst, mem_off, src); 1457 + break; 1458 + case BPF_H: 1459 + emit_instr(ctx, lhu, dst, mem_off, src); 1460 + break; 1461 + case BPF_W: 1462 + emit_instr(ctx, lw, dst, mem_off, src); 1463 + break; 1464 + case BPF_DW: 1465 + emit_instr(ctx, ld, dst, mem_off, src); 1466 + break; 1467 + } 1468 + break; 1469 + 1470 + case BPF_STX | BPF_B | BPF_MEM: 1471 + case BPF_STX | BPF_H | BPF_MEM: 1472 + case BPF_STX | BPF_W | BPF_MEM: 1473 + case BPF_STX | BPF_DW | BPF_MEM: 1474 + case BPF_STX | BPF_W | BPF_XADD: 1475 + case BPF_STX | BPF_DW | BPF_XADD: 1476 + if (insn->dst_reg == BPF_REG_10) { 1477 + ctx->flags |= EBPF_SEEN_FP; 1478 + dst = MIPS_R_SP; 1479 + mem_off = insn->off + MAX_BPF_STACK; 1480 + } else { 1481 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1482 + if (dst < 0) 1483 + return dst; 1484 + mem_off = insn->off; 1485 + } 1486 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1487 + if (src < 0) 1488 + return dst; 1489 + if (BPF_MODE(insn->code) == BPF_XADD) { 1490 + switch (BPF_SIZE(insn->code)) { 1491 + case BPF_W: 1492 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1493 + emit_instr(ctx, sll, MIPS_R_AT, src, 0); 1494 + src = MIPS_R_AT; 1495 + } 1496 + emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst); 1497 + emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src); 1498 + emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst); 1499 + /* 1500 + * On failure back up to LL (-4 1501 + * instructions of 4 bytes each 1502 + */ 1503 + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); 1504 + emit_instr(ctx, nop); 1505 + break; 1506 + case BPF_DW: 1507 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1508 + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); 1509 + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); 1510 + src = MIPS_R_AT; 1511 + } 1512 + emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst); 1513 + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src); 1514 + emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst); 1515 + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); 1516 + emit_instr(ctx, nop); 1517 + break; 1518 + } 1519 + } else { /* BPF_MEM */ 1520 + switch (BPF_SIZE(insn->code)) { 1521 + case BPF_B: 1522 + emit_instr(ctx, sb, src, mem_off, dst); 1523 + break; 1524 + case BPF_H: 1525 + emit_instr(ctx, sh, src, mem_off, dst); 1526 + break; 1527 + case BPF_W: 1528 + emit_instr(ctx, sw, src, mem_off, dst); 1529 + break; 1530 + case BPF_DW: 1531 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1532 + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); 1533 + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); 1534 + src = MIPS_R_AT; 1535 + } 1536 + emit_instr(ctx, sd, src, mem_off, dst); 1537 + break; 1538 + } 1539 + } 1540 + break; 1541 + 1542 + default: 1543 + pr_err("NOT HANDLED %d - (%02x)\n", 1544 + this_idx, (unsigned int)insn->code); 1545 + return -EINVAL; 1546 + } 1547 + return 1; 1548 + } 1549 + 1550 + #define RVT_VISITED_MASK 0xc000000000000000ull 1551 + #define RVT_FALL_THROUGH 0x4000000000000000ull 1552 + #define RVT_BRANCH_TAKEN 0x8000000000000000ull 1553 + #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN) 1554 + 1555 + static int build_int_body(struct jit_ctx *ctx) 1556 + { 1557 + const struct bpf_prog *prog = ctx->skf; 1558 + const struct bpf_insn *insn; 1559 + int i, r; 1560 + 1561 + for (i = 0; i < prog->len; ) { 1562 + insn = prog->insnsi + i; 1563 + if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) { 1564 + /* dead instruction, don't emit it. */ 1565 + i++; 1566 + continue; 1567 + } 1568 + 1569 + if (ctx->target == NULL) 1570 + ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4); 1571 + 1572 + r = build_one_insn(insn, ctx, i, prog->len); 1573 + if (r < 0) 1574 + return r; 1575 + i += r; 1576 + } 1577 + /* epilogue offset */ 1578 + if (ctx->target == NULL) 1579 + ctx->offsets[i] = ctx->idx * 4; 1580 + 1581 + /* 1582 + * All exits have an offset of the epilogue, some offsets may 1583 + * not have been set due to banch-around threading, so set 1584 + * them now. 1585 + */ 1586 + if (ctx->target == NULL) 1587 + for (i = 0; i < prog->len; i++) { 1588 + insn = prog->insnsi + i; 1589 + if (insn->code == (BPF_JMP | BPF_EXIT)) 1590 + ctx->offsets[i] = ctx->idx * 4; 1591 + } 1592 + return 0; 1593 + } 1594 + 1595 + /* return the last idx processed, or negative for error */ 1596 + static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, 1597 + int start_idx, bool follow_taken) 1598 + { 1599 + const struct bpf_prog *prog = ctx->skf; 1600 + const struct bpf_insn *insn; 1601 + u64 exit_rvt = initial_rvt; 1602 + u64 *rvt = ctx->reg_val_types; 1603 + int idx; 1604 + int reg; 1605 + 1606 + for (idx = start_idx; idx < prog->len; idx++) { 1607 + rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt; 1608 + insn = prog->insnsi + idx; 1609 + switch (BPF_CLASS(insn->code)) { 1610 + case BPF_ALU: 1611 + switch (BPF_OP(insn->code)) { 1612 + case BPF_ADD: 1613 + case BPF_SUB: 1614 + case BPF_MUL: 1615 + case BPF_DIV: 1616 + case BPF_OR: 1617 + case BPF_AND: 1618 + case BPF_LSH: 1619 + case BPF_RSH: 1620 + case BPF_NEG: 1621 + case BPF_MOD: 1622 + case BPF_XOR: 1623 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1624 + break; 1625 + case BPF_MOV: 1626 + if (BPF_SRC(insn->code)) { 1627 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1628 + } else { 1629 + /* IMM to REG move*/ 1630 + if (insn->imm >= 0) 1631 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1632 + else 1633 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1634 + } 1635 + break; 1636 + case BPF_END: 1637 + if (insn->imm == 64) 1638 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1639 + else if (insn->imm == 32) 1640 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1641 + else /* insn->imm == 16 */ 1642 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1643 + break; 1644 + } 1645 + rvt[idx] |= RVT_DONE; 1646 + break; 1647 + case BPF_ALU64: 1648 + switch (BPF_OP(insn->code)) { 1649 + case BPF_MOV: 1650 + if (BPF_SRC(insn->code)) { 1651 + /* REG to REG move*/ 1652 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1653 + } else { 1654 + /* IMM to REG move*/ 1655 + if (insn->imm >= 0) 1656 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1657 + else 1658 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); 1659 + } 1660 + break; 1661 + default: 1662 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1663 + } 1664 + rvt[idx] |= RVT_DONE; 1665 + break; 1666 + case BPF_LD: 1667 + switch (BPF_SIZE(insn->code)) { 1668 + case BPF_DW: 1669 + if (BPF_MODE(insn->code) == BPF_IMM) { 1670 + s64 val; 1671 + 1672 + val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32)); 1673 + if (val > 0 && val <= S32_MAX) 1674 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1675 + else if (val >= S32_MIN && val <= S32_MAX) 1676 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); 1677 + else 1678 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1679 + rvt[idx] |= RVT_DONE; 1680 + idx++; 1681 + } else { 1682 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1683 + } 1684 + break; 1685 + case BPF_B: 1686 + case BPF_H: 1687 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1688 + break; 1689 + case BPF_W: 1690 + if (BPF_MODE(insn->code) == BPF_IMM) 1691 + set_reg_val_type(&exit_rvt, insn->dst_reg, 1692 + insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT); 1693 + else 1694 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1695 + break; 1696 + } 1697 + rvt[idx] |= RVT_DONE; 1698 + break; 1699 + case BPF_LDX: 1700 + switch (BPF_SIZE(insn->code)) { 1701 + case BPF_DW: 1702 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1703 + break; 1704 + case BPF_B: 1705 + case BPF_H: 1706 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1707 + break; 1708 + case BPF_W: 1709 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1710 + break; 1711 + } 1712 + rvt[idx] |= RVT_DONE; 1713 + break; 1714 + case BPF_JMP: 1715 + switch (BPF_OP(insn->code)) { 1716 + case BPF_EXIT: 1717 + rvt[idx] = RVT_DONE | exit_rvt; 1718 + rvt[prog->len] = exit_rvt; 1719 + return idx; 1720 + case BPF_JA: 1721 + rvt[idx] |= RVT_DONE; 1722 + idx += insn->off; 1723 + break; 1724 + case BPF_JEQ: 1725 + case BPF_JGT: 1726 + case BPF_JGE: 1727 + case BPF_JSET: 1728 + case BPF_JNE: 1729 + case BPF_JSGT: 1730 + case BPF_JSGE: 1731 + if (follow_taken) { 1732 + rvt[idx] |= RVT_BRANCH_TAKEN; 1733 + idx += insn->off; 1734 + follow_taken = false; 1735 + } else { 1736 + rvt[idx] |= RVT_FALL_THROUGH; 1737 + } 1738 + break; 1739 + case BPF_CALL: 1740 + set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT); 1741 + /* Upon call return, argument registers are clobbered. */ 1742 + for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++) 1743 + set_reg_val_type(&exit_rvt, reg, REG_64BIT); 1744 + 1745 + rvt[idx] |= RVT_DONE; 1746 + break; 1747 + default: 1748 + WARN(1, "Unhandled BPF_JMP case.\n"); 1749 + rvt[idx] |= RVT_DONE; 1750 + break; 1751 + } 1752 + break; 1753 + default: 1754 + rvt[idx] |= RVT_DONE; 1755 + break; 1756 + } 1757 + } 1758 + return idx; 1759 + } 1760 + 1761 + /* 1762 + * Track the value range (i.e. 32-bit vs. 64-bit) of each register at 1763 + * each eBPF insn. This allows unneeded sign and zero extension 1764 + * operations to be omitted. 1765 + * 1766 + * Doesn't handle yet confluence of control paths with conflicting 1767 + * ranges, but it is good enough for most sane code. 1768 + */ 1769 + static int reg_val_propagate(struct jit_ctx *ctx) 1770 + { 1771 + const struct bpf_prog *prog = ctx->skf; 1772 + u64 exit_rvt; 1773 + int reg; 1774 + int i; 1775 + 1776 + /* 1777 + * 11 registers * 3 bits/reg leaves top bits free for other 1778 + * uses. Bit-62..63 used to see if we have visited an insn. 1779 + */ 1780 + exit_rvt = 0; 1781 + 1782 + /* Upon entry, argument registers are 64-bit. */ 1783 + for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++) 1784 + set_reg_val_type(&exit_rvt, reg, REG_64BIT); 1785 + 1786 + /* 1787 + * First follow all conditional branches on the fall-through 1788 + * edge of control flow.. 1789 + */ 1790 + reg_val_propagate_range(ctx, exit_rvt, 0, false); 1791 + restart_search: 1792 + /* 1793 + * Then repeatedly find the first conditional branch where 1794 + * both edges of control flow have not been taken, and follow 1795 + * the branch taken edge. We will end up restarting the 1796 + * search once per conditional branch insn. 1797 + */ 1798 + for (i = 0; i < prog->len; i++) { 1799 + u64 rvt = ctx->reg_val_types[i]; 1800 + 1801 + if ((rvt & RVT_VISITED_MASK) == RVT_DONE || 1802 + (rvt & RVT_VISITED_MASK) == 0) 1803 + continue; 1804 + if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) { 1805 + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true); 1806 + } else { /* RVT_BRANCH_TAKEN */ 1807 + WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n"); 1808 + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false); 1809 + } 1810 + goto restart_search; 1811 + } 1812 + /* 1813 + * Eventually all conditional branches have been followed on 1814 + * both branches and we are done. Any insn that has not been 1815 + * visited at this point is dead. 1816 + */ 1817 + 1818 + return 0; 1819 + } 1820 + 1821 + static void jit_fill_hole(void *area, unsigned int size) 1822 + { 1823 + u32 *p; 1824 + 1825 + /* We are guaranteed to have aligned memory. */ 1826 + for (p = area; size >= sizeof(u32); size -= sizeof(u32)) 1827 + uasm_i_break(&p, BRK_BUG); /* Increments p */ 1828 + } 1829 + 1830 + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1831 + { 1832 + struct bpf_prog *orig_prog = prog; 1833 + bool tmp_blinded = false; 1834 + struct bpf_prog *tmp; 1835 + struct bpf_binary_header *header = NULL; 1836 + struct jit_ctx ctx; 1837 + unsigned int image_size; 1838 + u8 *image_ptr; 1839 + 1840 + if (!bpf_jit_enable || !cpu_has_mips64r2) 1841 + return prog; 1842 + 1843 + tmp = bpf_jit_blind_constants(prog); 1844 + /* If blinding was requested and we failed during blinding, 1845 + * we must fall back to the interpreter. 1846 + */ 1847 + if (IS_ERR(tmp)) 1848 + return orig_prog; 1849 + if (tmp != prog) { 1850 + tmp_blinded = true; 1851 + prog = tmp; 1852 + } 1853 + 1854 + memset(&ctx, 0, sizeof(ctx)); 1855 + 1856 + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); 1857 + if (ctx.offsets == NULL) 1858 + goto out_err; 1859 + 1860 + ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL); 1861 + if (ctx.reg_val_types == NULL) 1862 + goto out_err; 1863 + 1864 + ctx.skf = prog; 1865 + 1866 + if (reg_val_propagate(&ctx)) 1867 + goto out_err; 1868 + 1869 + /* 1870 + * First pass discovers used resources and instruction offsets 1871 + * assuming short branches are used. 1872 + */ 1873 + if (build_int_body(&ctx)) 1874 + goto out_err; 1875 + 1876 + /* 1877 + * If no calls are made (EBPF_SAVE_RA), then tail call count 1878 + * in $v1, else we must save in n$s4. 1879 + */ 1880 + if (ctx.flags & EBPF_SEEN_TC) { 1881 + if (ctx.flags & EBPF_SAVE_RA) 1882 + ctx.flags |= EBPF_SAVE_S4; 1883 + else 1884 + ctx.flags |= EBPF_TCC_IN_V1; 1885 + } 1886 + 1887 + /* 1888 + * Second pass generates offsets, if any branches are out of 1889 + * range a jump-around long sequence is generated, and we have 1890 + * to try again from the beginning to generate the new 1891 + * offsets. This is done until no additional conversions are 1892 + * necessary. 1893 + */ 1894 + do { 1895 + ctx.idx = 0; 1896 + ctx.gen_b_offsets = 1; 1897 + ctx.long_b_conversion = 0; 1898 + if (gen_int_prologue(&ctx)) 1899 + goto out_err; 1900 + if (build_int_body(&ctx)) 1901 + goto out_err; 1902 + if (build_int_epilogue(&ctx, MIPS_R_RA)) 1903 + goto out_err; 1904 + } while (ctx.long_b_conversion); 1905 + 1906 + image_size = 4 * ctx.idx; 1907 + 1908 + header = bpf_jit_binary_alloc(image_size, &image_ptr, 1909 + sizeof(u32), jit_fill_hole); 1910 + if (header == NULL) 1911 + goto out_err; 1912 + 1913 + ctx.target = (u32 *)image_ptr; 1914 + 1915 + /* Third pass generates the code */ 1916 + ctx.idx = 0; 1917 + if (gen_int_prologue(&ctx)) 1918 + goto out_err; 1919 + if (build_int_body(&ctx)) 1920 + goto out_err; 1921 + if (build_int_epilogue(&ctx, MIPS_R_RA)) 1922 + goto out_err; 1923 + 1924 + /* Update the icache */ 1925 + flush_icache_range((unsigned long)ctx.target, 1926 + (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); 1927 + 1928 + if (bpf_jit_enable > 1) 1929 + /* Dump JIT code */ 1930 + bpf_jit_dump(prog->len, image_size, 2, ctx.target); 1931 + 1932 + bpf_jit_binary_lock_ro(header); 1933 + prog->bpf_func = (void *)ctx.target; 1934 + prog->jited = 1; 1935 + prog->jited_len = image_size; 1936 + out_normal: 1937 + if (tmp_blinded) 1938 + bpf_jit_prog_release_other(prog, prog == orig_prog ? 1939 + tmp : orig_prog); 1940 + kfree(ctx.offsets); 1941 + kfree(ctx.reg_val_types); 1942 + 1943 + return prog; 1944 + 1945 + out_err: 1946 + prog = orig_prog; 1947 + if (header) 1948 + bpf_jit_binary_free(header); 1949 + goto out_normal; 1950 + }
+2 -1
arch/s390/net/bpf_jit_comp.c
··· 1253 1253 insn_count = bpf_jit_insn(jit, fp, i); 1254 1254 if (insn_count < 0) 1255 1255 return -1; 1256 - jit->addrs[i + 1] = jit->prg; /* Next instruction address */ 1256 + /* Next instruction address */ 1257 + jit->addrs[i + insn_count] = jit->prg; 1257 1258 } 1258 1259 bpf_jit_epilogue(jit); 1259 1260
+13 -15
drivers/isdn/hysdn/hysdn_proclog.c
··· 44 44 char log_name[15]; /* log filename */ 45 45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 46 46 int if_used; /* open count for interface */ 47 - int volatile del_lock; /* lock for delete operations */ 48 47 unsigned char logtmp[LOG_MAX_LINELEN]; 49 48 wait_queue_head_t rd_queue; 50 49 }; ··· 101 102 { 102 103 struct log_data *ib; 103 104 struct procdata *pd = card->proclog; 104 - int i; 105 105 unsigned long flags; 106 106 107 107 if (!pd) ··· 124 126 else 125 127 pd->log_tail->next = ib; /* follows existing messages */ 126 128 pd->log_tail = ib; /* new tail */ 127 - i = pd->del_lock++; /* get lock state */ 128 - spin_unlock_irqrestore(&card->hysdn_lock, flags); 129 129 130 130 /* delete old entrys */ 131 - if (!i) 132 - while (pd->log_head->next) { 133 - if ((pd->log_head->usage_cnt <= 0) && 134 - (pd->log_head->next->usage_cnt <= 0)) { 135 - ib = pd->log_head; 136 - pd->log_head = pd->log_head->next; 137 - kfree(ib); 138 - } else 139 - break; 140 - } /* pd->log_head->next */ 141 - pd->del_lock--; /* release lock level */ 131 + while (pd->log_head->next) { 132 + if ((pd->log_head->usage_cnt <= 0) && 133 + (pd->log_head->next->usage_cnt <= 0)) { 134 + ib = pd->log_head; 135 + pd->log_head = pd->log_head->next; 136 + kfree(ib); 137 + } else { 138 + break; 139 + } 140 + } /* pd->log_head->next */ 141 + 142 + spin_unlock_irqrestore(&card->hysdn_lock, flags); 143 + 142 144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 143 145 } /* put_log_buffer */ 144 146
+38
drivers/net/dsa/mt7530.c
··· 625 625 * all finished. 626 626 */ 627 627 mt7623_pad_clk_setup(ds); 628 + } else { 629 + u16 lcl_adv = 0, rmt_adv = 0; 630 + u8 flowctrl; 631 + u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; 632 + 633 + switch (phydev->speed) { 634 + case SPEED_1000: 635 + mcr |= PMCR_FORCE_SPEED_1000; 636 + break; 637 + case SPEED_100: 638 + mcr |= PMCR_FORCE_SPEED_100; 639 + break; 640 + }; 641 + 642 + if (phydev->link) 643 + mcr |= PMCR_FORCE_LNK; 644 + 645 + if (phydev->duplex) { 646 + mcr |= PMCR_FORCE_FDX; 647 + 648 + if (phydev->pause) 649 + rmt_adv = LPA_PAUSE_CAP; 650 + if (phydev->asym_pause) 651 + rmt_adv |= LPA_PAUSE_ASYM; 652 + 653 + if (phydev->advertising & ADVERTISED_Pause) 654 + lcl_adv |= ADVERTISE_PAUSE_CAP; 655 + if (phydev->advertising & ADVERTISED_Asym_Pause) 656 + lcl_adv |= ADVERTISE_PAUSE_ASYM; 657 + 658 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 659 + 660 + if (flowctrl & FLOW_CTRL_TX) 661 + mcr |= PMCR_TX_FC_EN; 662 + if (flowctrl & FLOW_CTRL_RX) 663 + mcr |= PMCR_RX_FC_EN; 664 + } 665 + mt7530_write(priv, MT7530_PMCR_P(port), mcr); 628 666 } 629 667 } 630 668
+1
drivers/net/dsa/mt7530.h
··· 151 151 #define PMCR_TX_FC_EN BIT(5) 152 152 #define PMCR_RX_FC_EN BIT(4) 153 153 #define PMCR_FORCE_SPEED_1000 BIT(3) 154 + #define PMCR_FORCE_SPEED_100 BIT(2) 154 155 #define PMCR_FORCE_FDX BIT(1) 155 156 #define PMCR_FORCE_LNK BIT(0) 156 157 #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+3 -3
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1785 1785 1786 1786 xgene_enet_gpiod_get(pdata); 1787 1787 1788 - if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1789 - pdata->clk = devm_clk_get(&pdev->dev, NULL); 1790 - if (IS_ERR(pdata->clk)) { 1788 + pdata->clk = devm_clk_get(&pdev->dev, NULL); 1789 + if (IS_ERR(pdata->clk)) { 1790 + if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1791 1791 /* Abort if the clock is defined but couldn't be 1792 1792 * retrived. Always abort if the clock is missing on 1793 1793 * DT system as the driver can't cope with this case.
+1
drivers/net/ethernet/broadcom/b44.c
··· 2368 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2369 2369 2370 2370 spin_lock_init(&bp->lock); 2371 + u64_stats_init(&bp->hw_stats.syncp); 2371 2372 2372 2373 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2373 2374 bp->tx_pending = B44_DEF_TX_RING_PENDING;
+14 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 111 111 static void send_request_unmap(struct ibmvnic_adapter *, u8); 112 112 static void send_login(struct ibmvnic_adapter *adapter); 113 113 static void send_cap_queries(struct ibmvnic_adapter *adapter); 114 + static int init_sub_crqs(struct ibmvnic_adapter *); 114 115 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 115 116 static int ibmvnic_init(struct ibmvnic_adapter *); 116 117 static void release_crq_queue(struct ibmvnic_adapter *); ··· 652 651 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 653 652 unsigned long timeout = msecs_to_jiffies(30000); 654 653 struct device *dev = &adapter->vdev->dev; 654 + int rc; 655 655 656 656 do { 657 657 if (adapter->renegotiate) { ··· 664 662 if (!wait_for_completion_timeout(&adapter->init_done, 665 663 timeout)) { 666 664 dev_err(dev, "Capabilities query timeout\n"); 665 + return -1; 666 + } 667 + rc = init_sub_crqs(adapter); 668 + if (rc) { 669 + dev_err(dev, 670 + "Initialization of SCRQ's failed\n"); 671 + return -1; 672 + } 673 + rc = init_sub_crq_irqs(adapter); 674 + if (rc) { 675 + dev_err(dev, 676 + "Initialization of SCRQ's irqs failed\n"); 667 677 return -1; 668 678 } 669 679 } ··· 3018 3004 *req_value, 3019 3005 (long int)be64_to_cpu(crq->request_capability_rsp. 3020 3006 number), name); 3021 - release_sub_crqs(adapter); 3022 3007 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3023 3008 ibmvnic_send_req_caps(adapter, 1); 3024 3009 return;
+2
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1113 1113 if (!tx_ring->tx_bi) 1114 1114 goto err; 1115 1115 1116 + u64_stats_init(&tx_ring->syncp); 1117 + 1116 1118 /* round up to nearest 4K */ 1117 1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1118 1120 /* add u32 for head writeback, align after this takes care of
+4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 2988 2988 if (!tx_ring->tx_buffer_info) 2989 2989 goto err; 2990 2990 2991 + u64_stats_init(&tx_ring->syncp); 2992 + 2991 2993 /* round up to nearest 4K */ 2992 2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2993 2995 tx_ring->size = ALIGN(tx_ring->size, 4096); ··· 3047 3045 rx_ring->rx_buffer_info = vzalloc(size); 3048 3046 if (!rx_ring->rx_buffer_info) 3049 3047 goto err; 3048 + 3049 + u64_stats_init(&rx_ring->syncp); 3050 3050 3051 3051 /* Round up to nearest 4K */ 3052 3052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+8 -7
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 223 223 struct ethtool_wolinfo *wol) 224 224 { 225 225 struct mlx4_en_priv *priv = netdev_priv(netdev); 226 + struct mlx4_caps *caps = &priv->mdev->dev->caps; 226 227 int err = 0; 227 228 u64 config = 0; 228 229 u64 mask; ··· 236 235 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 237 236 MLX4_DEV_CAP_FLAG_WOL_PORT2; 238 237 239 - if (!(priv->mdev->dev->caps.flags & mask)) { 238 + if (!(caps->flags & mask)) { 240 239 wol->supported = 0; 241 240 wol->wolopts = 0; 242 241 return; 243 242 } 243 + 244 + if (caps->wol_port[priv->port]) 245 + wol->supported = WAKE_MAGIC; 246 + else 247 + wol->supported = 0; 244 248 245 249 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 246 250 if (err) { ··· 253 247 return; 254 248 } 255 249 256 - if (config & MLX4_EN_WOL_MAGIC) 257 - wol->supported = WAKE_MAGIC; 258 - else 259 - wol->supported = 0; 260 - 261 - if (config & MLX4_EN_WOL_ENABLED) 250 + if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC)) 262 251 wol->wolopts = WAKE_MAGIC; 263 252 else 264 253 wol->wolopts = 0;
+18 -11
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 574 574 * header, the HW adds it. To address that, we are subtracting the pseudo 575 575 * header checksum from the checksum value provided by the HW. 576 576 */ 577 - static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 578 - struct iphdr *iph) 577 + static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 578 + struct iphdr *iph) 579 579 { 580 580 __u16 length_for_csum = 0; 581 581 __wsum csum_pseudo_header = 0; 582 + __u8 ipproto = iph->protocol; 583 + 584 + if (unlikely(ipproto == IPPROTO_SCTP)) 585 + return -1; 582 586 583 587 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 584 588 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 585 - length_for_csum, iph->protocol, 0); 589 + length_for_csum, ipproto, 0); 586 590 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 591 + return 0; 587 592 } 588 593 589 594 #if IS_ENABLED(CONFIG_IPV6) ··· 599 594 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 600 595 struct ipv6hdr *ipv6h) 601 596 { 597 + __u8 nexthdr = ipv6h->nexthdr; 602 598 __wsum csum_pseudo_hdr = 0; 603 599 604 - if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || 605 - ipv6h->nexthdr == IPPROTO_HOPOPTS)) 600 + if (unlikely(nexthdr == IPPROTO_FRAGMENT || 601 + nexthdr == IPPROTO_HOPOPTS || 602 + nexthdr == IPPROTO_SCTP)) 606 603 return -1; 607 - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 604 + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); 608 605 609 606 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 610 607 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 611 608 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 612 - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); 609 + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, 610 + (__force __wsum)htons(nexthdr)); 613 611 614 612 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 615 613 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); ··· 635 627 } 636 628 637 629 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 638 - get_fixed_ipv4_csum(hw_checksum, skb, hdr); 630 + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); 639 631 #if IS_ENABLED(CONFIG_IPV6) 640 - else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 641 - if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) 642 - return -1; 632 + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 633 + return get_fixed_ipv6_csum(hw_checksum, skb, hdr); 643 634 #endif 644 635 return 0; 645 636 }
+7 -2
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 159 159 [32] = "Loopback source checks support", 160 160 [33] = "RoCEv2 support", 161 161 [34] = "DMFS Sniffer support (UC & MC)", 162 - [35] = "QinQ VST mode support", 163 - [36] = "sl to vl mapping table change event support" 162 + [35] = "Diag counters per port", 163 + [36] = "QinQ VST mode support", 164 + [37] = "sl to vl mapping table change event support", 164 165 }; 165 166 int i; 166 167 ··· 765 764 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 766 765 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 767 766 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 767 + #define QUERY_DEV_CAP_WOL_OFFSET 0x43 768 768 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769 769 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770 770 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 ··· 922 920 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 923 921 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 924 922 dev_cap->flags = flags | (u64)ext_flags << 32; 923 + MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); 924 + dev_cap->wol_port[1] = !!(field & 0x20); 925 + dev_cap->wol_port[2] = !!(field & 0x40); 925 926 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 926 927 dev_cap->reserved_uars = field >> 4; 927 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
+1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 129 129 u32 dmfs_high_rate_qpn_range; 130 130 struct mlx4_rate_limit_caps rl_caps; 131 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 132 + bool wol_port[MLX4_MAX_PORTS + 1]; 132 133 }; 133 134 134 135 struct mlx4_func_cap {
+2
drivers/net/ethernet/mellanox/mlx4/main.c
··· 424 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 425 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 426 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 427 + dev->caps.wol_port[1] = dev_cap->wol_port[1]; 428 + dev->caps.wol_port[2] = dev_cap->wol_port[2]; 427 429 428 430 /* Save uar page shift */ 429 431 if (!mlx4_is_slave(dev)) {
+24 -13
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 626 626 627 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 628 628 orig_dev); 629 - if (WARN_ON(!bridge_port)) 630 - return -EINVAL; 629 + if (!bridge_port) 630 + return 0; 631 631 632 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 633 633 MLXSW_SP_FLOOD_TYPE_UC, ··· 711 711 712 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 713 713 orig_dev); 714 - if (WARN_ON(!bridge_port)) 715 - return -EINVAL; 714 + if (!bridge_port) 715 + return 0; 716 716 717 717 if (!bridge_port->bridge_device->multicast_enabled) 718 718 return 0; ··· 1283 1283 return 0; 1284 1284 1285 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1286 - if (WARN_ON(!bridge_port)) 1287 - return -EINVAL; 1286 + if (!bridge_port) 1287 + return 0; 1288 1288 1289 1289 bridge_device = bridge_port->bridge_device; 1290 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1291 1291 bridge_device, 1292 1292 mdb->vid); 1293 - if (WARN_ON(!mlxsw_sp_port_vlan)) 1294 - return -EINVAL; 1293 + if (!mlxsw_sp_port_vlan) 1294 + return 0; 1295 1295 1296 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1297 1297 ··· 1407 1407 int err = 0; 1408 1408 1409 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1410 - if (WARN_ON(!bridge_port)) 1411 - return -EINVAL; 1410 + if (!bridge_port) 1411 + return 0; 1412 1412 1413 1413 bridge_device = bridge_port->bridge_device; 1414 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1415 1415 bridge_device, 1416 1416 mdb->vid); 1417 - if (WARN_ON(!mlxsw_sp_port_vlan)) 1418 - return -EINVAL; 1417 + if (!mlxsw_sp_port_vlan) 1418 + return 0; 1419 1419 1420 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1421 1421 ··· 1974 1974 1975 1975 } 1976 1976 1977 + static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) 1978 + { 1979 + struct mlxsw_sp_mid *mid, *tmp; 1980 + 1981 + list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { 1982 + list_del(&mid->list); 1983 + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1984 + kfree(mid); 1985 + } 1986 + } 1987 + 1977 1988 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1978 1989 { 1979 1990 struct mlxsw_sp_bridge *bridge; ··· 2007 1996 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2008 1997 { 2009 1998 mlxsw_sp_fdb_fini(mlxsw_sp); 2010 - WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); 1999 + mlxsw_sp_mids_fini(mlxsw_sp); 2011 2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2012 2001 kfree(mlxsw_sp->bridge); 2013 2002 }
+2
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 513 513 tx_ring->idx = idx; 514 514 tx_ring->r_vec = r_vec; 515 515 tx_ring->is_xdp = is_xdp; 516 + u64_stats_init(&tx_ring->r_vec->tx_sync); 516 517 517 518 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 518 519 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); ··· 533 532 534 533 rx_ring->idx = idx; 535 534 rx_ring->r_vec = r_vec; 535 + u64_stats_init(&rx_ring->r_vec->rx_sync); 536 536 537 537 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 538 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+1 -1
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 253 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 254 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 255 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 256 - if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 256 + if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) 257 257 goto err; 258 258 259 259 return 0;
+95 -16
drivers/net/ethernet/ti/cpts.c
··· 31 31 32 32 #include "cpts.h" 33 33 34 + #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ 35 + 36 + struct cpts_skb_cb_data { 37 + unsigned long tmo; 38 + }; 39 + 34 40 #define cpts_read32(c, r) readl_relaxed(&c->reg->r) 35 41 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 42 + 43 + static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 44 + u16 ts_seqid, u8 ts_msgtype); 36 45 37 46 static int event_expired(struct cpts_event *event) 38 47 { ··· 86 77 return removed ? 0 : -1; 87 78 } 88 79 80 + static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) 81 + { 82 + struct sk_buff *skb, *tmp; 83 + u16 seqid; 84 + u8 mtype; 85 + bool found = false; 86 + 87 + mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; 88 + seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; 89 + 90 + /* no need to grab txq.lock as access is always done under cpts->lock */ 91 + skb_queue_walk_safe(&cpts->txq, skb, tmp) { 92 + struct skb_shared_hwtstamps ssh; 93 + unsigned int class = ptp_classify_raw(skb); 94 + struct cpts_skb_cb_data *skb_cb = 95 + (struct cpts_skb_cb_data *)skb->cb; 96 + 97 + if (cpts_match(skb, class, seqid, mtype)) { 98 + u64 ns = timecounter_cyc2time(&cpts->tc, event->low); 99 + 100 + memset(&ssh, 0, sizeof(ssh)); 101 + ssh.hwtstamp = ns_to_ktime(ns); 102 + skb_tstamp_tx(skb, &ssh); 103 + found = true; 104 + __skb_unlink(skb, &cpts->txq); 105 + dev_consume_skb_any(skb); 106 + dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", 107 + mtype, seqid); 108 + } else if (time_after(jiffies, skb_cb->tmo)) { 109 + /* timeout any expired skbs over 1s */ 110 + dev_dbg(cpts->dev, 111 + "expiring tx timestamp mtype %u seqid %04x\n", 112 + mtype, seqid); 113 + __skb_unlink(skb, &cpts->txq); 114 + dev_consume_skb_any(skb); 115 + } 116 + } 117 + 118 + return found; 119 + } 120 + 89 121 /* 90 122 * Returns zero if matching event type was found. 91 123 */ ··· 151 101 event->low = lo; 152 102 type = event_type(event); 153 103 switch (type) { 104 + case CPTS_EV_TX: 105 + if (cpts_match_tx_ts(cpts, event)) { 106 + /* if the new event matches an existing skb, 107 + * then don't queue it 108 + */ 109 + break; 110 + } 154 111 case CPTS_EV_PUSH: 155 112 case CPTS_EV_RX: 156 - case CPTS_EV_TX: 157 113 list_del_init(&event->list); 158 114 list_add_tail(&event->list, &cpts->events); 159 115 break; ··· 280 224 return -EOPNOTSUPP; 281 225 } 282 226 227 + static long cpts_overflow_check(struct ptp_clock_info *ptp) 228 + { 229 + struct cpts *cpts = container_of(ptp, struct cpts, info); 230 + unsigned long delay = cpts->ov_check_period; 231 + struct timespec64 ts; 232 + unsigned long flags; 233 + 234 + spin_lock_irqsave(&cpts->lock, flags); 235 + ts = ns_to_timespec64(timecounter_read(&cpts->tc)); 236 + 237 + if (!skb_queue_empty(&cpts->txq)) 238 + delay = CPTS_SKB_TX_WORK_TIMEOUT; 239 + spin_unlock_irqrestore(&cpts->lock, flags); 240 + 241 + pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); 242 + return (long)delay; 243 + } 244 + 283 245 static struct ptp_clock_info cpts_info = { 284 246 .owner = THIS_MODULE, 285 247 .name = "CTPS timer", ··· 310 236 .gettime64 = cpts_ptp_gettime, 311 237 .settime64 = cpts_ptp_settime, 312 238 .enable = cpts_ptp_enable, 239 + .do_aux_work = cpts_overflow_check, 313 240 }; 314 - 315 - static void cpts_overflow_check(struct work_struct *work) 316 - { 317 - struct timespec64 ts; 318 - struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); 319 - 320 - cpts_ptp_gettime(&cpts->info, &ts); 321 - pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); 322 - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 323 - } 324 241 325 242 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 326 243 u16 ts_seqid, u8 ts_msgtype) ··· 364 299 return 0; 365 300 366 301 spin_lock_irqsave(&cpts->lock, flags); 367 - cpts_fifo_read(cpts, CPTS_EV_PUSH); 302 + cpts_fifo_read(cpts, -1); 368 303 list_for_each_safe(this, next, &cpts->events) { 369 304 event = list_entry(this, struct cpts_event, list); 370 305 if (event_expired(event)) { ··· 381 316 list_add(&event->list, &cpts->pool); 382 317 break; 383 318 } 319 + } 320 + 321 + if (ev_type == CPTS_EV_TX && !ns) { 322 + struct cpts_skb_cb_data *skb_cb = 323 + (struct cpts_skb_cb_data *)skb->cb; 324 + /* Not found, add frame to queue for processing later. 325 + * The periodic FIFO check will handle this. 326 + */ 327 + skb_get(skb); 328 + /* get the timestamp for timeouts */ 329 + skb_cb->tmo = jiffies + msecs_to_jiffies(100); 330 + __skb_queue_tail(&cpts->txq, skb); 331 + ptp_schedule_worker(cpts->clock, 0); 384 332 } 385 333 spin_unlock_irqrestore(&cpts->lock, flags); 386 334 ··· 436 358 { 437 359 int err, i; 438 360 361 + skb_queue_head_init(&cpts->txq); 439 362 INIT_LIST_HEAD(&cpts->events); 440 363 INIT_LIST_HEAD(&cpts->pool); 441 364 for (i = 0; i < CPTS_MAX_EVENTS; i++) ··· 457 378 } 458 379 cpts->phc_index = ptp_clock_index(cpts->clock); 459 380 460 - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 381 + ptp_schedule_worker(cpts->clock, cpts->ov_check_period); 461 382 return 0; 462 383 463 384 err_ptp: ··· 471 392 if (WARN_ON(!cpts->clock)) 472 393 return; 473 394 474 - cancel_delayed_work_sync(&cpts->overflow_work); 475 - 476 395 ptp_clock_unregister(cpts->clock); 477 396 cpts->clock = NULL; 478 397 479 398 cpts_write32(cpts, 0, int_enable); 480 399 cpts_write32(cpts, 0, control); 400 + 401 + /* Drop all packet */ 402 + skb_queue_purge(&cpts->txq); 481 403 482 404 clk_disable(cpts->refclk); 483 405 } ··· 556 476 cpts->dev = dev; 557 477 cpts->reg = (struct cpsw_cpts __iomem *)regs; 558 478 spin_lock_init(&cpts->lock); 559 - INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); 560 479 561 480 ret = cpts_of_parse(cpts, node); 562 481 if (ret)
+1 -1
drivers/net/ethernet/ti/cpts.h
··· 119 119 u32 cc_mult; /* for the nominal frequency */ 120 120 struct cyclecounter cc; 121 121 struct timecounter tc; 122 - struct delayed_work overflow_work; 123 122 int phc_index; 124 123 struct clk *refclk; 125 124 struct list_head events; 126 125 struct list_head pool; 127 126 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 128 127 unsigned long ov_check_period; 128 + struct sk_buff_head txq; 129 129 }; 130 130 131 131 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+1 -1
drivers/net/gtp.c
··· 364 364 365 365 gtp->dev = dev; 366 366 367 - dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 367 + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 368 368 if (!dev->tstats) 369 369 return -ENOMEM; 370 370
+2 -1
drivers/net/hyperv/hyperv_net.h
··· 765 765 u32 max_chn; 766 766 u32 num_chn; 767 767 768 - refcount_t sc_offered; 768 + atomic_t open_chn; 769 + wait_queue_head_t subchan_open; 769 770 770 771 struct rndis_device *extension; 771 772
+3
drivers/net/hyperv/netvsc.c
··· 78 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 79 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 80 80 init_completion(&net_device->channel_init_wait); 81 + init_waitqueue_head(&net_device->subchan_open); 81 82 82 83 return net_device; 83 84 } ··· 1303 1302 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1304 1303 1305 1304 nvchan->channel = device->channel; 1305 + u64_stats_init(&nvchan->tx_stats.syncp); 1306 + u64_stats_init(&nvchan->rx_stats.syncp); 1306 1307 } 1307 1308 1308 1309 /* Enable NAPI handler before init callbacks */
+8 -6
drivers/net/hyperv/rndis_filter.c
··· 1048 1048 else 1049 1049 netif_napi_del(&nvchan->napi); 1050 1050 1051 - if (refcount_dec_and_test(&nvscdev->sc_offered)) 1052 - complete(&nvscdev->channel_init_wait); 1051 + atomic_inc(&nvscdev->open_chn); 1052 + wake_up(&nvscdev->subchan_open); 1053 1053 } 1054 1054 1055 1055 int rndis_filter_device_add(struct hv_device *dev, ··· 1089 1089 net_device = net_device_ctx->nvdev; 1090 1090 net_device->max_chn = 1; 1091 1091 net_device->num_chn = 1; 1092 - 1093 - refcount_set(&net_device->sc_offered, 0); 1094 1092 1095 1093 net_device->extension = rndis_device; 1096 1094 rndis_device->ndev = net; ··· 1219 1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1220 1222 net_device->num_chn); 1221 1223 1224 + atomic_set(&net_device->open_chn, 1); 1222 1225 num_rss_qs = net_device->num_chn - 1; 1223 1226 if (num_rss_qs == 0) 1224 1227 return 0; 1225 1228 1226 - refcount_set(&net_device->sc_offered, num_rss_qs); 1227 1229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1228 1230 1229 1231 init_packet = &net_device->channel_init_pkt; ··· 1240 1242 if (ret) 1241 1243 goto out; 1242 1244 1245 + wait_for_completion(&net_device->channel_init_wait); 1243 1246 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1244 1247 ret = -ENODEV; 1245 1248 goto out; 1246 1249 } 1247 - wait_for_completion(&net_device->channel_init_wait); 1248 1250 1249 1251 net_device->num_chn = 1 + 1250 1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1253 + 1254 + /* wait for all sub channels to open */ 1255 + wait_event(net_device->subchan_open, 1256 + atomic_read(&net_device->open_chn) == net_device->num_chn); 1251 1257 1252 1258 /* ignore failues from setting rss parameters, still have channels */ 1253 1259 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+1 -1
drivers/net/ipvlan/ipvlan_main.c
··· 192 192 193 193 netdev_lockdep_set_classes(dev); 194 194 195 - ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 195 + ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); 196 196 if (!ipvlan->pcpu_stats) 197 197 return -ENOMEM; 198 198
+10 -8
drivers/net/ppp/ppp_generic.c
··· 1915 1915 spin_unlock(&pch->downl); 1916 1916 /* see if there is anything from the attached unit to be sent */ 1917 1917 if (skb_queue_empty(&pch->file.xq)) { 1918 - read_lock(&pch->upl); 1919 1918 ppp = pch->ppp; 1920 1919 if (ppp) 1921 - ppp_xmit_process(ppp); 1922 - read_unlock(&pch->upl); 1920 + __ppp_xmit_process(ppp); 1923 1921 } 1924 1922 } 1925 1923 1926 1924 static void ppp_channel_push(struct channel *pch) 1927 1925 { 1928 - local_bh_disable(); 1929 - 1930 - __ppp_channel_push(pch); 1931 - 1932 - local_bh_enable(); 1926 + read_lock_bh(&pch->upl); 1927 + if (pch->ppp) { 1928 + (*this_cpu_ptr(pch->ppp->xmit_recursion))++; 1929 + __ppp_channel_push(pch); 1930 + (*this_cpu_ptr(pch->ppp->xmit_recursion))--; 1931 + } else { 1932 + __ppp_channel_push(pch); 1933 + } 1934 + read_unlock_bh(&pch->upl); 1933 1935 } 1934 1936 1935 1937 /*
+1
drivers/net/usb/asix.h
··· 209 209 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 210 210 struct asix_rx_fixup_info *rx); 211 211 int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 212 + void asix_rx_fixup_common_free(struct asix_common_private *dp); 212 213 213 214 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 214 215 gfp_t flags);
+43 -10
drivers/net/usb/asix_common.c
··· 75 75 value, index, data, size); 76 76 } 77 77 78 + static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) 79 + { 80 + /* Reset the variables that have a lifetime outside of 81 + * asix_rx_fixup_internal() so that future processing starts from a 82 + * known set of initial conditions. 83 + */ 84 + 85 + if (rx->ax_skb) { 86 + /* Discard any incomplete Ethernet frame in the netdev buffer */ 87 + kfree_skb(rx->ax_skb); 88 + rx->ax_skb = NULL; 89 + } 90 + 91 + /* Assume the Data header 32-bit word is at the start of the current 92 + * or next URB socket buffer so reset all the state variables. 93 + */ 94 + rx->remaining = 0; 95 + rx->split_head = false; 96 + rx->header = 0; 97 + } 98 + 78 99 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 79 100 struct asix_rx_fixup_info *rx) 80 101 { ··· 120 99 if (size != ((~rx->header >> 16) & 0x7ff)) { 121 100 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 122 101 rx->remaining); 123 - if (rx->ax_skb) { 124 - kfree_skb(rx->ax_skb); 125 - rx->ax_skb = NULL; 126 - /* Discard the incomplete netdev Ethernet frame 127 - * and assume the Data header is at the start of 128 - * the current URB socket buffer. 129 - */ 130 - } 131 - rx->remaining = 0; 102 + reset_asix_rx_fixup_info(rx); 132 103 } 133 104 } 134 105 ··· 152 139 if (size != ((~rx->header >> 16) & 0x7ff)) { 153 140 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 154 141 rx->header, offset); 142 + reset_asix_rx_fixup_info(rx); 155 143 return 0; 156 144 } 157 145 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 158 146 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 159 147 size); 148 + reset_asix_rx_fixup_info(rx); 160 149 return 0; 161 150 } 162 151 ··· 183 168 if (rx->ax_skb) { 184 169 skb_put_data(rx->ax_skb, skb->data + offset, 185 170 copy_length); 186 - if (!rx->remaining) 171 + if (!rx->remaining) { 187 172 usbnet_skb_return(dev, rx->ax_skb); 173 + rx->ax_skb = NULL; 174 + } 188 175 } 189 176 190 177 offset += (copy_length + 1) & 0xfffe; ··· 195 178 if (skb->len != offset) { 196 179 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 197 180 skb->len, offset); 181 + reset_asix_rx_fixup_info(rx); 198 182 return 0; 199 183 } 200 184 ··· 208 190 struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; 209 191 210 192 return asix_rx_fixup_internal(dev, skb, rx); 193 + } 194 + 195 + void asix_rx_fixup_common_free(struct asix_common_private *dp) 196 + { 197 + struct asix_rx_fixup_info *rx; 198 + 199 + if (!dp) 200 + return; 201 + 202 + rx = &dp->rx_fixup_info; 203 + 204 + if (rx->ax_skb) { 205 + kfree_skb(rx->ax_skb); 206 + rx->ax_skb = NULL; 207 + } 211 208 } 212 209 213 210 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+1
drivers/net/usb/asix_devices.c
··· 764 764 765 765 static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 766 766 { 767 + asix_rx_fixup_common_free(dev->driver_priv); 767 768 kfree(dev->driver_priv); 768 769 } 769 770
+9 -9
drivers/net/usb/lan78xx.c
··· 2367 2367 /* Init LTM */ 2368 2368 lan78xx_init_ltm(dev); 2369 2369 2370 - dev->net->hard_header_len += TX_OVERHEAD; 2371 - dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 2372 - 2373 2370 if (dev->udev->speed == USB_SPEED_SUPER) { 2374 2371 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2375 2372 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; ··· 2852 2855 return ret; 2853 2856 } 2854 2857 2858 + dev->net->hard_header_len += TX_OVERHEAD; 2859 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 2860 + 2855 2861 /* Init all registers */ 2856 2862 ret = lan78xx_reset(dev); 2857 2863 2858 - lan78xx_mdio_init(dev); 2864 + ret = lan78xx_mdio_init(dev); 2859 2865 2860 2866 dev->net->flags |= IFF_MULTICAST; 2861 2867 2862 2868 pdata->wol = WAKE_MAGIC; 2863 2869 2864 - return 0; 2870 + return ret; 2865 2871 } 2866 2872 2867 2873 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) ··· 3525 3525 udev = interface_to_usbdev(intf); 3526 3526 udev = usb_get_dev(udev); 3527 3527 3528 - ret = -ENOMEM; 3529 3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3530 3529 if (!netdev) { 3531 - dev_err(&intf->dev, "Error: OOM\n"); 3532 - goto out1; 3530 + dev_err(&intf->dev, "Error: OOM\n"); 3531 + ret = -ENOMEM; 3532 + goto out1; 3533 3533 } 3534 3534 3535 3535 /* netdev_printk() needs this */ ··· 3610 3610 ret = register_netdev(netdev); 3611 3611 if (ret != 0) { 3612 3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3613 - goto out2; 3613 + goto out3; 3614 3614 } 3615 3615 3616 3616 usb_set_intfdata(intf, dev);
+6 -1
drivers/net/usb/qmi_wwan.c
··· 1175 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1176 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1177 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1178 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1178 1179 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1179 1180 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1180 1181 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ ··· 1341 1340 static void qmi_wwan_disconnect(struct usb_interface *intf) 1342 1341 { 1343 1342 struct usbnet *dev = usb_get_intfdata(intf); 1344 - struct qmi_wwan_state *info = (void *)&dev->data; 1343 + struct qmi_wwan_state *info; 1345 1344 struct list_head *iter; 1346 1345 struct net_device *ldev; 1347 1346 1347 + /* called twice if separate control and data intf */ 1348 + if (!dev) 1349 + return; 1350 + info = (void *)&dev->data; 1348 1351 if (info->flags & QMI_WWAN_FLAG_MUX) { 1349 1352 if (!rtnl_trylock()) { 1350 1353 restart_syscall();
+1
drivers/net/vxlan.c
··· 623 623 624 624 out: 625 625 skb_gro_remcsum_cleanup(skb, &grc); 626 + skb->remcsum_offload = 0; 626 627 NAPI_GRO_CB(skb)->flush |= flush; 627 628 628 629 return pp;
+42
drivers/ptp/ptp_clock.c
··· 28 28 #include <linux/slab.h> 29 29 #include <linux/syscalls.h> 30 30 #include <linux/uaccess.h> 31 + #include <uapi/linux/sched/types.h> 31 32 32 33 #include "ptp_private.h" 33 34 ··· 185 184 kfree(ptp); 186 185 } 187 186 187 + static void ptp_aux_kworker(struct kthread_work *work) 188 + { 189 + struct ptp_clock *ptp = container_of(work, struct ptp_clock, 190 + aux_work.work); 191 + struct ptp_clock_info *info = ptp->info; 192 + long delay; 193 + 194 + delay = info->do_aux_work(info); 195 + 196 + if (delay >= 0) 197 + kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 198 + } 199 + 188 200 /* public interface */ 189 201 190 202 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ··· 230 216 mutex_init(&ptp->tsevq_mux); 231 217 mutex_init(&ptp->pincfg_mux); 232 218 init_waitqueue_head(&ptp->tsev_wq); 219 + 220 + if (ptp->info->do_aux_work) { 221 + char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index); 222 + 223 + kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 224 + ptp->kworker = kthread_create_worker(0, worker_name ? 225 + worker_name : info->name); 226 + kfree(worker_name); 227 + if (IS_ERR(ptp->kworker)) { 228 + err = PTR_ERR(ptp->kworker); 229 + pr_err("failed to create ptp aux_worker %d\n", err); 230 + goto kworker_err; 231 + } 232 + } 233 233 234 234 err = ptp_populate_pin_groups(ptp); 235 235 if (err) ··· 287 259 no_device: 288 260 ptp_cleanup_pin_groups(ptp); 289 261 no_pin_groups: 262 + if (ptp->kworker) 263 + kthread_destroy_worker(ptp->kworker); 264 + kworker_err: 290 265 mutex_destroy(&ptp->tsevq_mux); 291 266 mutex_destroy(&ptp->pincfg_mux); 292 267 ida_simple_remove(&ptp_clocks_map, index); ··· 304 273 { 305 274 ptp->defunct = 1; 306 275 wake_up_interruptible(&ptp->tsev_wq); 276 + 277 + if (ptp->kworker) { 278 + kthread_cancel_delayed_work_sync(&ptp->aux_work); 279 + kthread_destroy_worker(ptp->kworker); 280 + } 307 281 308 282 /* Release the clock's resources. */ 309 283 if (ptp->pps_source) ··· 374 338 return pin ? i : -1; 375 339 } 376 340 EXPORT_SYMBOL(ptp_find_pin); 341 + 342 + int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 343 + { 344 + return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 345 + } 346 + EXPORT_SYMBOL(ptp_schedule_worker); 377 347 378 348 /* module operations */ 379 349
+3
drivers/ptp/ptp_private.h
··· 22 22 23 23 #include <linux/cdev.h> 24 24 #include <linux/device.h> 25 + #include <linux/kthread.h> 25 26 #include <linux/mutex.h> 26 27 #include <linux/posix-clock.h> 27 28 #include <linux/ptp_clock.h> ··· 57 56 struct attribute_group pin_attr_group; 58 57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 59 58 const struct attribute_group *pin_attr_groups[2]; 59 + struct kthread_worker *kworker; 60 + struct kthread_delayed_work aux_work; 60 61 }; 61 62 62 63 /*
+2 -2
drivers/s390/net/qeth_l3_main.c
··· 2512 2512 struct rtable *rt = (struct rtable *) dst; 2513 2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2514 2514 2515 - if (rt->rt_gateway) 2515 + if (rt && rt->rt_gateway) 2516 2516 pkey = &rt->rt_gateway; 2517 2517 2518 2518 /* IPv4 */ ··· 2523 2523 struct rt6_info *rt = (struct rt6_info *) dst; 2524 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2525 2525 2526 - if (!ipv6_addr_any(&rt->rt6i_gateway)) 2526 + if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 2527 2527 pkey = &rt->rt6i_gateway; 2528 2528 2529 2529 /* IPv6 */
+1
include/linux/mlx4/device.h
··· 620 620 u32 dmfs_high_rate_qpn_base; 621 621 u32 dmfs_high_rate_qpn_range; 622 622 u32 vf_caps; 623 + bool wol_port[MLX4_MAX_PORTS + 1]; 623 624 struct mlx4_rate_limit_caps rl_caps; 624 625 }; 625 626
+20
include/linux/ptp_clock_kernel.h
··· 99 99 * parameter func: the desired function to use. 100 100 * parameter chan: the function channel index to use. 101 101 * 102 + * @do_work: Request driver to perform auxiliary (periodic) operations 103 + * Driver should return delay of the next auxiliary work scheduling 104 + * time (>=0) or negative value in case further scheduling 105 + * is not required. 106 + * 102 107 * Drivers should embed their ptp_clock_info within a private 103 108 * structure, obtaining a reference to it using container_of(). 104 109 * ··· 131 126 struct ptp_clock_request *request, int on); 132 127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, 133 128 enum ptp_pin_function func, unsigned int chan); 129 + long (*do_aux_work)(struct ptp_clock_info *ptp); 134 130 }; 135 131 136 132 struct ptp_clock; ··· 217 211 int ptp_find_pin(struct ptp_clock *ptp, 218 212 enum ptp_pin_function func, unsigned int chan); 219 213 214 + /** 215 + * ptp_schedule_worker() - schedule ptp auxiliary work 216 + * 217 + * @ptp: The clock obtained from ptp_clock_register(). 218 + * @delay: number of jiffies to wait before queuing 219 + * See kthread_queue_delayed_work() for more info. 220 + */ 221 + 222 + int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); 223 + 220 224 #else 221 225 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 222 226 struct device *parent) ··· 241 225 static inline int ptp_find_pin(struct ptp_clock *ptp, 242 226 enum ptp_pin_function func, unsigned int chan) 243 227 { return -1; } 228 + static inline int ptp_schedule_worker(struct ptp_clock *ptp, 229 + unsigned long delay) 230 + { return -EOPNOTSUPP; } 231 + 244 232 #endif 245 233 246 234 #endif
+10
include/net/tcp.h
··· 1916 1916 u64 xmit_time); 1917 1917 extern void tcp_rack_reo_timeout(struct sock *sk); 1918 1918 1919 + /* At how many usecs into the future should the RTO fire? */ 1920 + static inline s64 tcp_rto_delta_us(const struct sock *sk) 1921 + { 1922 + const struct sk_buff *skb = tcp_write_queue_head(sk); 1923 + u32 rto = inet_csk(sk)->icsk_rto; 1924 + u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); 1925 + 1926 + return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 1927 + } 1928 + 1919 1929 /* 1920 1930 * Save and compile IPv4 options, return a pointer to it 1921 1931 */
+51 -9
net/batman-adv/translation-table.c
··· 1549 1549 return found; 1550 1550 } 1551 1551 1552 + /** 1553 + * batadv_tt_global_sync_flags - update TT sync flags 1554 + * @tt_global: the TT global entry to update sync flags in 1555 + * 1556 + * Updates the sync flag bits in the tt_global flag attribute with a logical 1557 + * OR of all sync flags from any of its TT orig entries. 1558 + */ 1559 + static void 1560 + batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global) 1561 + { 1562 + struct batadv_tt_orig_list_entry *orig_entry; 1563 + const struct hlist_head *head; 1564 + u16 flags = BATADV_NO_FLAGS; 1565 + 1566 + rcu_read_lock(); 1567 + head = &tt_global->orig_list; 1568 + hlist_for_each_entry_rcu(orig_entry, head, list) 1569 + flags |= orig_entry->flags; 1570 + rcu_read_unlock(); 1571 + 1572 + flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK); 1573 + tt_global->common.flags = flags; 1574 + } 1575 + 1576 + /** 1577 + * batadv_tt_global_orig_entry_add - add or update a TT orig entry 1578 + * @tt_global: the TT global entry to add an orig entry in 1579 + * @orig_node: the originator to add an orig entry for 1580 + * @ttvn: translation table version number of this changeset 1581 + * @flags: TT sync flags 1582 + */ 1552 1583 static void 1553 1584 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 1554 - struct batadv_orig_node *orig_node, int ttvn) 1585 + struct batadv_orig_node *orig_node, int ttvn, 1586 + u8 flags) 1555 1587 { 1556 1588 struct batadv_tt_orig_list_entry *orig_entry; 1557 1589 ··· 1593 1561 * was added during a "temporary client detection" 1594 1562 */ 1595 1563 orig_entry->ttvn = ttvn; 1596 - goto out; 1564 + orig_entry->flags = flags; 1565 + goto sync_flags; 1597 1566 } 1598 1567 1599 1568 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); ··· 1606 1573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid); 1607 1574 orig_entry->orig_node = orig_node; 1608 1575 orig_entry->ttvn = ttvn; 1576 + orig_entry->flags = flags; 1609 1577 kref_init(&orig_entry->refcount); 1610 1578 1611 1579 spin_lock_bh(&tt_global->list_lock); ··· 1616 1582 spin_unlock_bh(&tt_global->list_lock); 1617 1583 atomic_inc(&tt_global->orig_list_count); 1618 1584 1585 + sync_flags: 1586 + batadv_tt_global_sync_flags(tt_global); 1619 1587 out: 1620 1588 if (orig_entry) 1621 1589 batadv_tt_orig_list_entry_put(orig_entry); ··· 1739 1703 } 1740 1704 1741 1705 /* the change can carry possible "attribute" flags like the 1742 - * TT_CLIENT_WIFI, therefore they have to be copied in the 1706 + * TT_CLIENT_TEMP, therefore they have to be copied in the 1743 1707 * client entry 1744 1708 */ 1745 - common->flags |= flags; 1709 + common->flags |= flags & (~BATADV_TT_SYNC_MASK); 1746 1710 1747 1711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1748 1712 * one originator left in the list and we previously received a ··· 1759 1723 } 1760 1724 add_orig_entry: 1761 1725 /* add the new orig_entry (if needed) or update it */ 1762 - batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 1726 + batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn, 1727 + flags & BATADV_TT_SYNC_MASK); 1763 1728 1764 1729 batadv_dbg(BATADV_DBG_TT, bat_priv, 1765 1730 "Creating new global tt entry: %pM (vid: %d, via %pM)\n", ··· 1983 1946 struct batadv_tt_orig_list_entry *orig, 1984 1947 bool best) 1985 1948 { 1949 + u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags; 1986 1950 void *hdr; 1987 1951 struct batadv_orig_node_vlan *vlan; 1988 1952 u8 last_ttvn; ··· 2013 1975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || 2014 1976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || 2015 1977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || 2016 - nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) 1978 + nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags)) 2017 1979 goto nla_put_failure; 2018 1980 2019 1981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) ··· 2627 2589 unsigned short vid) 2628 2590 { 2629 2591 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 2592 + struct batadv_tt_orig_list_entry *tt_orig; 2630 2593 struct batadv_tt_common_entry *tt_common; 2631 2594 struct batadv_tt_global_entry *tt_global; 2632 2595 struct hlist_head *head; ··· 2666 2627 /* find out if this global entry is announced by this 2667 2628 * originator 2668 2629 */ 2669 - if (!batadv_tt_global_entry_has_orig(tt_global, 2670 - orig_node)) 2630 + tt_orig = batadv_tt_global_orig_entry_find(tt_global, 2631 + orig_node); 2632 + if (!tt_orig) 2671 2633 continue; 2672 2634 2673 2635 /* use network order to read the VID: this ensures that ··· 2680 2640 /* compute the CRC on flags that have to be kept in sync 2681 2641 * among nodes 2682 2642 */ 2683 - flags = tt_common->flags & BATADV_TT_SYNC_MASK; 2643 + flags = tt_orig->flags; 2684 2644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); 2685 2645 2686 2646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); 2647 + 2648 + batadv_tt_orig_list_entry_put(tt_orig); 2687 2649 } 2688 2650 rcu_read_unlock(); 2689 2651 }
+2
net/batman-adv/types.h
··· 1260 1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client 1261 1261 * @orig_node: pointer to orig node announcing this non-mesh client 1262 1262 * @ttvn: translation table version number which added the non-mesh client 1263 + * @flags: per orig entry TT sync flags 1263 1264 * @list: list node for batadv_tt_global_entry::orig_list 1264 1265 * @refcount: number of contexts the object is used 1265 1266 * @rcu: struct used for freeing in an RCU-safe manner ··· 1268 1267 struct batadv_tt_orig_list_entry { 1269 1268 struct batadv_orig_node *orig_node; 1270 1269 u8 ttvn; 1270 + u8 flags; 1271 1271 struct hlist_node list; 1272 1272 struct kref refcount; 1273 1273 struct rcu_head rcu;
+1 -1
net/core/dev.c
··· 2739 2739 { 2740 2740 if (tx_path) 2741 2741 return skb->ip_summed != CHECKSUM_PARTIAL && 2742 - skb->ip_summed != CHECKSUM_NONE; 2742 + skb->ip_summed != CHECKSUM_UNNECESSARY; 2743 2743 2744 2744 return skb->ip_summed == CHECKSUM_NONE; 2745 2745 }
+10 -2
net/ipv4/cipso_ipv4.c
··· 1523 1523 int taglen; 1524 1524 1525 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1526 - if (optptr[0] == IPOPT_CIPSO) 1526 + switch (optptr[0]) { 1527 + case IPOPT_CIPSO: 1527 1528 return optptr; 1528 - taglen = optptr[1]; 1529 + case IPOPT_END: 1530 + return NULL; 1531 + case IPOPT_NOOP: 1532 + taglen = 1; 1533 + break; 1534 + default: 1535 + taglen = optptr[1]; 1536 + } 1529 1537 optlen -= taglen; 1530 1538 optptr += taglen; 1531 1539 }
+1
net/ipv4/fou.c
··· 450 450 out: 451 451 NAPI_GRO_CB(skb)->flush |= flush; 452 452 skb_gro_remcsum_cleanup(skb, &grc); 453 + skb->remcsum_offload = 0; 453 454 454 455 return pp; 455 456 }
+19 -15
net/ipv4/tcp_input.c
··· 107 107 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 108 108 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 109 109 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 110 + #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ 110 111 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 111 112 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 112 113 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ ··· 2521 2520 return; 2522 2521 2523 2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2524 - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2525 - (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2523 + if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2524 + (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2526 2525 tp->snd_cwnd = tp->snd_ssthresh; 2527 2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2528 2527 } ··· 3005 3004 /* Offset the time elapsed after installing regular RTO */ 3006 3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3007 3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3008 - struct sk_buff *skb = tcp_write_queue_head(sk); 3009 - u64 rto_time_stamp = skb->skb_mstamp + 3010 - jiffies_to_usecs(rto); 3011 - s64 delta_us = rto_time_stamp - tp->tcp_mstamp; 3007 + s64 delta_us = tcp_rto_delta_us(sk); 3012 3008 /* delta_us may not be positive if the socket is locked 3013 3009 * when the retrans timer fires and is rescheduled. 3014 3010 */ ··· 3015 3017 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3016 3018 TCP_RTO_MAX); 3017 3019 } 3020 + } 3021 + 3022 + /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ 3023 + static void tcp_set_xmit_timer(struct sock *sk) 3024 + { 3025 + if (!tcp_schedule_loss_probe(sk)) 3026 + tcp_rearm_rto(sk); 3018 3027 } 3019 3028 3020 3029 /* If we get here, the whole TSO packet has not been acked. */ ··· 3185 3180 ca_rtt_us, sack->rate); 3186 3181 3187 3182 if (flag & FLAG_ACKED) { 3188 - tcp_rearm_rto(sk); 3183 + flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3189 3184 if (unlikely(icsk->icsk_mtup.probe_size && 3190 3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3191 3186 tcp_mtup_probe_success(sk); ··· 3213 3208 * after when the head was last (re)transmitted. Otherwise the 3214 3209 * timeout may continue to extend in loss recovery. 3215 3210 */ 3216 - tcp_rearm_rto(sk); 3211 + flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3217 3212 } 3218 3213 3219 3214 if (icsk->icsk_ca_ops->pkts_acked) { ··· 3585 3580 if (after(ack, tp->snd_nxt)) 3586 3581 goto invalid_ack; 3587 3582 3588 - if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 3589 - tcp_rearm_rto(sk); 3590 - 3591 3583 if (after(ack, prior_snd_una)) { 3592 3584 flag |= FLAG_SND_UNA_ADVANCED; 3593 3585 icsk->icsk_retransmits = 0; ··· 3649 3647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3650 3648 &sack_state); 3651 3649 3650 + if (tp->tlp_high_seq) 3651 + tcp_process_tlp_ack(sk, ack, flag); 3652 + /* If needed, reset TLP/RTO timer; RACK may later override this. */ 3653 + if (flag & FLAG_SET_XMIT_TIMER) 3654 + tcp_set_xmit_timer(sk); 3655 + 3652 3656 if (tcp_ack_is_dubious(sk, flag)) { 3653 3657 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3654 3658 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3655 3659 } 3656 - if (tp->tlp_high_seq) 3657 - tcp_process_tlp_ack(sk, ack, flag); 3658 3660 3659 3661 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3660 3662 sk_dst_confirm(sk); 3661 3663 3662 - if (icsk->icsk_pending == ICSK_TIME_RETRANS) 3663 - tcp_schedule_loss_probe(sk); 3664 3664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3665 3665 lost = tp->lost - lost; /* freshly marked lost */ 3666 3666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+9 -18
net/ipv4/tcp_output.c
··· 2377 2377 { 2378 2378 struct inet_connection_sock *icsk = inet_csk(sk); 2379 2379 struct tcp_sock *tp = tcp_sk(sk); 2380 - u32 timeout, tlp_time_stamp, rto_time_stamp; 2381 2380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2381 + u32 timeout, rto_delta_us; 2382 2382 2383 - /* No consecutive loss probes. */ 2384 - if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 2385 - tcp_rearm_rto(sk); 2386 - return false; 2387 - } 2388 2383 /* Don't do any loss probe on a Fast Open connection before 3WHS 2389 2384 * finishes. 2390 2385 */ 2391 2386 if (tp->fastopen_rsk) 2392 - return false; 2393 - 2394 - /* TLP is only scheduled when next timer event is RTO. */ 2395 - if (icsk->icsk_pending != ICSK_TIME_RETRANS) 2396 2387 return false; 2397 2388 2398 2389 /* Schedule a loss probe in 2*RTT for SACK capable connections ··· 2408 2417 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2409 2418 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2410 2419 2411 - /* If RTO is shorter, just schedule TLP in its place. */ 2412 - tlp_time_stamp = tcp_jiffies32 + timeout; 2413 - rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2414 - if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2415 - s32 delta = rto_time_stamp - tcp_jiffies32; 2416 - if (delta > 0) 2417 - timeout = delta; 2418 - } 2420 + /* If the RTO formula yields an earlier time, then use that time. */ 2421 + rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ 2422 + if (rto_delta_us > 0) 2423 + timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); 2419 2424 2420 2425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2421 2426 TCP_RTO_MAX); ··· 3436 3449 int err; 3437 3450 3438 3451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3452 + 3453 + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 3454 + return -EHOSTUNREACH; /* Routing failure or similar. */ 3455 + 3439 3456 tcp_connect_init(sk); 3440 3457 3441 3458 if (unlikely(tp->repair)) {
+2 -1
net/ipv4/tcp_timer.c
··· 652 652 goto death; 653 653 } 654 654 655 - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 655 + if (!sock_flag(sk, SOCK_KEEPOPEN) || 656 + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) 656 657 goto out; 657 658 658 659 elapsed = keepalive_time_when(tp);
+1 -1
net/ipv4/udp_offload.c
··· 235 235 if (uh->check == 0) 236 236 uh->check = CSUM_MANGLED_0; 237 237 238 - skb->ip_summed = CHECKSUM_NONE; 238 + skb->ip_summed = CHECKSUM_UNNECESSARY; 239 239 240 240 /* If there is no outer header we can fake a checksum offload 241 241 * due to the fact that we have already done the checksum in
+3 -8
net/ipv6/route.c
··· 2351 2351 if (on_link) 2352 2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2353 2353 2354 + nrt->rt6i_protocol = RTPROT_REDIRECT; 2354 2355 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2355 2356 2356 2357 if (ip6_ins_rt(nrt)) ··· 2462 2461 .fc_dst_len = prefixlen, 2463 2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2464 2463 RTF_UP | RTF_PREF(pref), 2464 + .fc_protocol = RTPROT_RA, 2465 2465 .fc_nlinfo.portid = 0, 2466 2466 .fc_nlinfo.nlh = NULL, 2467 2467 .fc_nlinfo.nl_net = net, ··· 2515 2513 .fc_ifindex = dev->ifindex, 2516 2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2517 2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2516 + .fc_protocol = RTPROT_RA, 2518 2517 .fc_nlinfo.portid = 0, 2519 2518 .fc_nlinfo.nlh = NULL, 2520 2519 .fc_nlinfo.nl_net = dev_net(dev), ··· 3427 3424 rtm->rtm_flags = 0; 3428 3425 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3429 3426 rtm->rtm_protocol = rt->rt6i_protocol; 3430 - if (rt->rt6i_flags & RTF_DYNAMIC) 3431 - rtm->rtm_protocol = RTPROT_REDIRECT; 3432 - else if (rt->rt6i_flags & RTF_ADDRCONF) { 3433 - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) 3434 - rtm->rtm_protocol = RTPROT_RA; 3435 - else 3436 - rtm->rtm_protocol = RTPROT_KERNEL; 3437 - } 3438 3427 3439 3428 if (rt->rt6i_flags & RTF_CACHE) 3440 3429 rtm->rtm_flags |= RTM_F_CLONED;
+1 -1
net/ipv6/udp_offload.c
··· 72 72 if (uh->check == 0) 73 73 uh->check = CSUM_MANGLED_0; 74 74 75 - skb->ip_summed = CHECKSUM_NONE; 75 + skb->ip_summed = CHECKSUM_UNNECESSARY; 76 76 77 77 /* If there is no outer header we can fake a checksum offload 78 78 * due to the fact that we have already done the checksum in
+4 -1
net/rds/ib_recv.c
··· 1015 1015 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1016 1016 rds_ib_stats_inc(s_ib_rx_ring_empty); 1017 1017 1018 - if (rds_ib_ring_low(&ic->i_recv_ring)) 1018 + if (rds_ib_ring_low(&ic->i_recv_ring)) { 1019 1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1020 + rds_ib_stats_inc(s_ib_rx_refill_from_cq); 1021 + } 1020 1022 } 1021 1023 1022 1024 int rds_ib_recv_path(struct rds_conn_path *cp) ··· 1031 1029 if (rds_conn_up(conn)) { 1032 1030 rds_ib_attempt_ack(ic); 1033 1031 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1032 + rds_ib_stats_inc(s_ib_rx_refill_from_thread); 1034 1033 } 1035 1034 1036 1035 return ret;
+10 -10
net/sched/act_ipt.c
··· 36 36 static unsigned int xt_net_id; 37 37 static struct tc_action_ops act_xt_ops; 38 38 39 - static int ipt_init_target(struct xt_entry_target *t, char *table, 40 - unsigned int hook) 39 + static int ipt_init_target(struct net *net, struct xt_entry_target *t, 40 + char *table, unsigned int hook) 41 41 { 42 42 struct xt_tgchk_param par; 43 43 struct xt_target *target; ··· 49 49 return PTR_ERR(target); 50 50 51 51 t->u.kernel.target = target; 52 + par.net = net; 52 53 par.table = table; 53 54 par.entryinfo = NULL; 54 55 par.target = target; ··· 92 91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 93 92 }; 94 93 95 - static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, 94 + static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, 96 95 struct nlattr *est, struct tc_action **a, 97 96 const struct tc_action_ops *ops, int ovr, int bind) 98 97 { 98 + struct tc_action_net *tn = net_generic(net, id); 99 99 struct nlattr *tb[TCA_IPT_MAX + 1]; 100 100 struct tcf_ipt *ipt; 101 101 struct xt_entry_target *td, *t; ··· 161 159 if (unlikely(!t)) 162 160 goto err2; 163 161 164 - err = ipt_init_target(t, tname, hook); 162 + err = ipt_init_target(net, t, tname, hook); 165 163 if (err < 0) 166 164 goto err3; 167 165 ··· 195 193 struct nlattr *est, struct tc_action **a, int ovr, 196 194 int bind) 197 195 { 198 - struct tc_action_net *tn = net_generic(net, ipt_net_id); 199 - 200 - return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind); 196 + return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, 197 + bind); 201 198 } 202 199 203 200 static int tcf_xt_init(struct net *net, struct nlattr *nla, 204 201 struct nlattr *est, struct tc_action **a, int ovr, 205 202 int bind) 206 203 { 207 - struct tc_action_net *tn = net_generic(net, xt_net_id); 208 - 209 - return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind); 204 + return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, 205 + bind); 210 206 } 211 207 212 208 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
+2
tools/build/feature/test-bpf.c
··· 11 11 # define __NR_bpf 280 12 12 # elif defined(__sparc__) 13 13 # define __NR_bpf 349 14 + # elif defined(__s390__) 15 + # define __NR_bpf 351 14 16 # else 15 17 # error __NR_bpf not defined. libbpf does not support your arch. 16 18 # endif
+2
tools/lib/bpf/bpf.c
··· 39 39 # define __NR_bpf 280 40 40 # elif defined(__sparc__) 41 41 # define __NR_bpf 349 42 + # elif defined(__s390__) 43 + # define __NR_bpf 351 42 44 # else 43 45 # error __NR_bpf not defined. libbpf does not support your arch. 44 46 # endif
+11
tools/testing/selftests/bpf/test_pkt_md_access.c
··· 12 12 13 13 int _version SEC("version") = 1; 14 14 15 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 15 16 #define TEST_FIELD(TYPE, FIELD, MASK) \ 16 17 { \ 17 18 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ 18 19 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 19 20 return TC_ACT_SHOT; \ 20 21 } 22 + #else 23 + #define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b)) 24 + #define TEST_FIELD(TYPE, FIELD, MASK) \ 25 + { \ 26 + TYPE tmp = *((volatile TYPE *)&skb->FIELD + \ 27 + TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \ 28 + if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 29 + return TC_ACT_SHOT; \ 30 + } 31 + #endif 21 32 22 33 SEC("test1") 23 34 int process(struct __sk_buff *skb)
+10 -9
tools/testing/selftests/bpf/test_verifier.c
··· 8 8 * License as published by the Free Software Foundation. 9 9 */ 10 10 11 + #include <endian.h> 11 12 #include <asm/types.h> 12 13 #include <linux/types.h> 13 14 #include <stdint.h> ··· 1099 1098 "check skb->hash byte load permitted", 1100 1099 .insns = { 1101 1100 BPF_MOV64_IMM(BPF_REG_0, 0), 1102 - #ifdef __LITTLE_ENDIAN 1101 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1103 1102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1104 1103 offsetof(struct __sk_buff, hash)), 1105 1104 #else ··· 1136 1135 "check skb->hash byte load not permitted 3", 1137 1136 .insns = { 1138 1137 BPF_MOV64_IMM(BPF_REG_0, 0), 1139 - #ifdef __LITTLE_ENDIAN 1138 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1140 1139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1141 1140 offsetof(struct __sk_buff, hash) + 3), 1142 1141 #else ··· 1245 1244 "check skb->hash half load permitted", 1246 1245 .insns = { 1247 1246 BPF_MOV64_IMM(BPF_REG_0, 0), 1248 - #ifdef __LITTLE_ENDIAN 1247 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1249 1248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1250 1249 offsetof(struct __sk_buff, hash)), 1251 1250 #else ··· 1260 1259 "check skb->hash half load not permitted", 1261 1260 .insns = { 1262 1261 BPF_MOV64_IMM(BPF_REG_0, 0), 1263 - #ifdef __LITTLE_ENDIAN 1262 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1264 1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1265 1264 offsetof(struct __sk_buff, hash) + 2), 1266 1265 #else ··· 5423 5422 "check bpf_perf_event_data->sample_period byte load permitted", 5424 5423 .insns = { 5425 5424 BPF_MOV64_IMM(BPF_REG_0, 0), 5426 - #ifdef __LITTLE_ENDIAN 5425 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5427 5426 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5428 5427 offsetof(struct bpf_perf_event_data, sample_period)), 5429 5428 #else ··· 5439 5438 "check bpf_perf_event_data->sample_period half load permitted", 5440 5439 .insns = { 5441 5440 BPF_MOV64_IMM(BPF_REG_0, 0), 5442 - #ifdef __LITTLE_ENDIAN 5441 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5443 5442 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5444 5443 offsetof(struct bpf_perf_event_data, sample_period)), 5445 5444 #else ··· 5455 5454 "check bpf_perf_event_data->sample_period word load permitted", 5456 5455 .insns = { 5457 5456 BPF_MOV64_IMM(BPF_REG_0, 0), 5458 - #ifdef __LITTLE_ENDIAN 5457 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5459 5458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5460 5459 offsetof(struct bpf_perf_event_data, sample_period)), 5461 5460 #else ··· 5482 5481 "check skb->data half load not permitted", 5483 5482 .insns = { 5484 5483 BPF_MOV64_IMM(BPF_REG_0, 0), 5485 - #ifdef __LITTLE_ENDIAN 5484 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5486 5485 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5487 5486 offsetof(struct __sk_buff, data)), 5488 5487 #else ··· 5498 5497 "check skb->tc_classid half load not permitted for lwt prog", 5499 5498 .insns = { 5500 5499 BPF_MOV64_IMM(BPF_REG_0, 0), 5501 - #ifdef __LITTLE_ENDIAN 5500 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5502 5501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5503 5502 offsetof(struct __sk_buff, tc_classid)), 5504 5503 #else