net: bpf: arm64: address randomize and write protect JIT code

This is the ARM64 variant for 314beb9bcab ("x86: bpf_jit_comp: secure bpf
jit against spraying attacks").

Thanks to commit 11d91a770f1f ("arm64: Add CONFIG_DEBUG_SET_MODULE_RONX
support") which added necessary infrastructure, we can now implement
RO marking of eBPF generated JIT image pages and randomize start offset
for the JIT code, so that it does not reside directly on a page boundary
anymore. Likewise, the holes are filled with illegal instructions: here
we use BRK #0x100 (opcode 0xd4202000) to trigger a fault in the kernel
(unallocated BRKs would trigger a fault through do_debug_exception). This
seems more reliable as we don't have a guaranteed undefined instruction
space on ARM64.

This is basically the ARM64 variant of what we already have in ARM via
commit 55309dd3d4cd ("net: bpf: arm: address randomize and write protect
JIT code"). Moreover, this commit also presents a merge resolution due to
conflicts with commit 60a3b2253c41 ("net: bpf: make eBPF interpreter images
read-only") as we don't use kfree() in bpf_jit_free() anymore to release
the locked bpf_prog structure, but instead bpf_prog_unlock_free() through
a different allocator.

JIT tested on aarch64 with BPF test suite.

Reference: http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.html
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Reviewed-by: Zi Shen Lim <zlim.lnx@gmail.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by Daniel Borkmann and committed by Catalin Marinas b569c1c6 c0260ba9

+30 -9
+30 -9
arch/arm64/net/bpf_jit_comp.c
··· 19 19 #define pr_fmt(fmt) "bpf_jit: " fmt 20 20 21 21 #include <linux/filter.h> 22 - #include <linux/moduleloader.h> 23 22 #include <linux/printk.h> 24 23 #include <linux/skbuff.h> 25 24 #include <linux/slab.h> 25 + 26 26 #include <asm/byteorder.h> 27 27 #include <asm/cacheflush.h> 28 + #include <asm/debug-monitors.h> 28 29 29 30 #include "bpf_jit.h" 30 31 ··· 118 117 int from = ctx->offset[bpf_from + 1] - 1; 119 118 120 119 return to - from; 120 + } 121 + 122 + static void jit_fill_hole(void *area, unsigned int size) 123 + { 124 + u32 *ptr; 125 + /* We are guaranteed to have aligned memory. */ 126 + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 127 + *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 121 128 } 122 129 123 130 static inline int epilogue_offset(const struct jit_ctx *ctx) ··· 622 613 623 614 void bpf_int_jit_compile(struct bpf_prog *prog) 624 615 { 616 + struct bpf_binary_header *header; 625 617 struct jit_ctx ctx; 626 618 int image_size; 619 + u8 *image_ptr; 627 620 628 621 if (!bpf_jit_enable) 629 622 return; ··· 647 636 goto out; 648 637 649 638 build_prologue(&ctx); 650 - 651 639 build_epilogue(&ctx); 652 640 653 641 /* Now we know the actual image size. */ 654 642 image_size = sizeof(u32) * ctx.idx; 655 - ctx.image = module_alloc(image_size); 656 - if (unlikely(ctx.image == NULL)) 643 + header = bpf_jit_binary_alloc(image_size, &image_ptr, 644 + sizeof(u32), jit_fill_hole); 645 + if (header == NULL) 657 646 goto out; 658 647 659 648 /* 2. Now, the actual pass. */ 660 649 650 + ctx.image = (u32 *)image_ptr; 661 651 ctx.idx = 0; 652 + 662 653 build_prologue(&ctx); 663 654 664 655 ctx.body_offset = ctx.idx; 665 656 if (build_body(&ctx)) { 666 - module_free(NULL, ctx.image); 657 + bpf_jit_binary_free(header); 667 658 goto out; 668 659 } 669 660 ··· 676 663 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 677 664 678 665 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 666 + 667 + set_memory_ro((unsigned long)header, header->pages); 679 668 prog->bpf_func = (void *)ctx.image; 680 669 prog->jited = 1; 681 - 682 670 out: 683 671 kfree(ctx.offset); 684 672 } 685 673 686 674 void bpf_jit_free(struct bpf_prog *prog) 687 675 { 688 - if (prog->jited) 689 - module_free(NULL, prog->bpf_func); 676 + unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; 677 + struct bpf_binary_header *header = (void *)addr; 690 678 691 - kfree(prog); 679 + if (!prog->jited) 680 + goto free_filter; 681 + 682 + set_memory_rw(addr, header->pages); 683 + bpf_jit_binary_free(header); 684 + 685 + free_filter: 686 + bpf_prog_unlock_free(prog); 692 687 }