Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Paul Burton:

- A set of memblock initialization improvements thanks to Serge Semin,
tidying up after our conversion from bootmem to memblock back in
v4.20.

- Our eBPF JIT the previously supported only MIPS64r2 through MIPS64r5
is improved to also support MIPS64r6. Support for MIPS32 systems is
introduced, with the caveat that it only works for programs that
don't use 64 bit registers or operations - those will bail out & need
to be interpreted.

- Improvements to the allocation & configuration of our exception
vector that should fix issues seen on some platforms using recent
versions of U-Boot.

- Some minor improvements to code generated for jump labels, along with
enabling them by default for generic kernels.

* tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (27 commits)
mips: Manually call fdt_init_reserved_mem() method
mips: Make sure dt memory regions are valid
mips: Perform early low memory test
mips: Dump memblock regions for debugging
mips: Add reserve-nomap memory type support
mips: Use memblock to reserve the __nosave memory range
mips: Discard post-CMA-init foreach loop
mips: Reserve memory for the kernel image resources
MIPS: Remove duplicate EBase configuration
MIPS: Sync icache for whole exception vector
MIPS: Always allocate exception vector for MIPSr2+
MIPS: Use memblock_phys_alloc() for exception vector
mips: Combine memblock init and memory reservation loops
mips: Discard rudiments from bootmem_init
mips: Make sure kernel .bss exists in boot mem pool
mips: vdso: drop unnecessary cc-ldoption
Revert "MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices"
MIPS: generic: Enable CONFIG_JUMP_LABEL
MIPS: jump_label: Use compact branches for >= r6
MIPS: jump_label: Remove redundant nops
...

+341 -1825
+31 -33
arch/mips/Kconfig
··· 44 44 select HAVE_ARCH_SECCOMP_FILTER 45 45 select HAVE_ARCH_TRACEHOOK 46 46 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT 47 - select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS) 48 - select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) 47 + select HAVE_EBPF_JIT if (!CPU_MICROMIPS) 49 48 select HAVE_CONTEXT_TRACKING 50 49 select HAVE_COPY_THREAD_TLS 51 50 select HAVE_C_RECORDMCOUNT ··· 275 276 select BCM47XX_SPROM 276 277 select BCM47XX_SSB if !BCM47XX_BCMA 277 278 help 278 - Support for BCM47XX based boards 279 + Support for BCM47XX based boards 279 280 280 281 config BCM63XX 281 282 bool "Broadcom BCM63XX based boards" ··· 294 295 select MIPS_L1_CACHE_SHIFT_4 295 296 select CLKDEV_LOOKUP 296 297 help 297 - Support for BCM63XX based boards 298 + Support for BCM63XX based boards 298 299 299 300 config MIPS_COBALT 300 301 bool "Cobalt Server" ··· 373 374 select SYS_SUPPORTS_64BIT_KERNEL 374 375 select SYS_SUPPORTS_100HZ 375 376 help 376 - This a family of machines based on the MIPS R4030 chipset which was 377 - used by several vendors to build RISC/os and Windows NT workstations. 378 - Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and 379 - Olivetti M700-10 workstations. 377 + This a family of machines based on the MIPS R4030 chipset which was 378 + used by several vendors to build RISC/os and Windows NT workstations. 379 + Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and 380 + Olivetti M700-10 workstations. 380 381 381 382 config MACH_INGENIC 382 383 bool "Ingenic SoC based machines" ··· 572 573 bool "NXP STB220 board" 573 574 select SOC_PNX833X 574 575 help 575 - Support for NXP Semiconductors STB220 Development Board. 576 + Support for NXP Semiconductors STB220 Development Board. 576 577 577 578 config NXP_STB225 578 579 bool "NXP 225 board" 579 580 select SOC_PNX833X 580 581 select SOC_PNX8335 581 582 help 582 - Support for NXP Semiconductors STB225 Development Board. 583 + Support for NXP Semiconductors STB225 Development Board. 583 584 584 585 config PMC_MSP 585 586 bool "PMC-Sierra MSP chipsets" ··· 721 722 select SYS_SUPPORTS_64BIT_KERNEL 722 723 select SYS_SUPPORTS_BIG_ENDIAN 723 724 select MIPS_L1_CACHE_SHIFT_7 724 - help 725 - This is the SGI Indigo2 with R10000 processor. To compile a Linux 726 - kernel that runs on these, say Y here. 725 + help 726 + This is the SGI Indigo2 with R10000 processor. To compile a Linux 727 + kernel that runs on these, say Y here. 727 728 728 729 config SGI_IP32 729 730 bool "SGI IP32 (O2)" ··· 1167 1168 config SYS_SUPPORTS_RELOCATABLE 1168 1169 bool 1169 1170 help 1170 - Selected if the platform supports relocating the kernel. 1171 - The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF 1172 - to allow access to command line and entropy sources. 1171 + Selected if the platform supports relocating the kernel. 1172 + The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF 1173 + to allow access to command line and entropy sources. 1173 1174 1174 1175 config MIPS_CBPF_JIT 1175 1176 def_bool y ··· 2112 2113 # Set to y for ptrace access to watch registers. 2113 2114 # 2114 2115 config HARDWARE_WATCHPOINTS 2115 - bool 2116 - default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6 2116 + bool 2117 + default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6 2117 2118 2118 2119 menu "Kernel type" 2119 2120 ··· 2177 2178 bool "4kB" 2178 2179 depends on !CPU_LOONGSON2 && !CPU_LOONGSON3 2179 2180 help 2180 - This option select the standard 4kB Linux page size. On some 2181 - R3000-family processors this is the only available page size. Using 2182 - 4kB page size will minimize memory consumption and is therefore 2183 - recommended for low memory systems. 2181 + This option select the standard 4kB Linux page size. On some 2182 + R3000-family processors this is the only available page size. Using 2183 + 4kB page size will minimize memory consumption and is therefore 2184 + recommended for low memory systems. 2184 2185 2185 2186 config PAGE_SIZE_8KB 2186 2187 bool "8kB" ··· 2473 2474 depends on CPU_SB1 && CPU_SB1_PASS_2 2474 2475 default y 2475 2476 2476 - 2477 2477 choice 2478 2478 prompt "SmartMIPS or microMIPS ASE support" 2479 2479 ··· 2680 2682 bool "Randomize the address of the kernel image" 2681 2683 depends on RELOCATABLE 2682 2684 ---help--- 2683 - Randomizes the physical and virtual address at which the 2684 - kernel image is loaded, as a security feature that 2685 - deters exploit attempts relying on knowledge of the location 2686 - of kernel internals. 2685 + Randomizes the physical and virtual address at which the 2686 + kernel image is loaded, as a security feature that 2687 + deters exploit attempts relying on knowledge of the location 2688 + of kernel internals. 2687 2689 2688 - Entropy is generated using any coprocessor 0 registers available. 2690 + Entropy is generated using any coprocessor 0 registers available. 2689 2691 2690 - The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. 2692 + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. 2691 2693 2692 - If unsure, say N. 2694 + If unsure, say N. 2693 2695 2694 2696 config RANDOMIZE_BASE_MAX_OFFSET 2695 2697 hex "Maximum kASLR offset" if EXPERT ··· 2819 2821 prompt "Timer frequency" 2820 2822 default HZ_250 2821 2823 help 2822 - Allows the configuration of the timer frequency. 2824 + Allows the configuration of the timer frequency. 2823 2825 2824 2826 config HZ_24 2825 2827 bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ ··· 3119 3121 default 15 3120 3122 3121 3123 config ARCH_MMAP_RND_COMPAT_BITS_MIN 3122 - default 8 3124 + default 8 3123 3125 3124 3126 config ARCH_MMAP_RND_COMPAT_BITS_MAX 3125 - default 15 3127 + default 15 3126 3128 3127 3129 config I8253 3128 3130 bool
+4 -4
arch/mips/bcm47xx/Kconfig
··· 15 15 select SSB_DRIVER_GPIO 16 16 default y 17 17 help 18 - Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. 18 + Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. 19 19 20 - This will generate an image with support for SSB and MIPS32 R1 instruction set. 20 + This will generate an image with support for SSB and MIPS32 R1 instruction set. 21 21 22 22 config BCM47XX_BCMA 23 23 bool "BCMA Support for Broadcom BCM47XX" ··· 31 31 select BCMA_DRIVER_GPIO 32 32 default y 33 33 help 34 - Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. 34 + Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. 35 35 36 - This will generate an image with support for BCMA and MIPS32 R2 instruction set. 36 + This will generate an image with support for BCMA and MIPS32 R2 instruction set. 37 37 38 38 endif
+1 -1
arch/mips/bcm63xx/boards/Kconfig
··· 5 5 default BOARD_BCM963XX 6 6 7 7 config BOARD_BCM963XX 8 - bool "Generic Broadcom 963xx boards" 8 + bool "Generic Broadcom 963xx boards" 9 9 select SSB 10 10 11 11 endchoice
+1
arch/mips/configs/generic_defconfig
··· 26 26 CONFIG_HIGHMEM=y 27 27 CONFIG_NR_CPUS=16 28 28 CONFIG_MIPS_O32_FP64_SUPPORT=y 29 + CONFIG_JUMP_LABEL=y 29 30 CONFIG_MODULES=y 30 31 CONFIG_MODULE_UNLOAD=y 31 32 CONFIG_TRIM_UNUSED_KSYMS=y
+1
arch/mips/include/asm/bootinfo.h
··· 92 92 #define BOOT_MEM_ROM_DATA 2 93 93 #define BOOT_MEM_RESERVED 3 94 94 #define BOOT_MEM_INIT_RAM 4 95 + #define BOOT_MEM_NOMAP 5 95 96 96 97 /* 97 98 * A memory map that's built upon what was determined
+10 -5
arch/mips/include/asm/jump_label.h
··· 11 11 #ifndef __ASSEMBLY__ 12 12 13 13 #include <linux/types.h> 14 + #include <asm/isa-rev.h> 14 15 15 16 #define JUMP_LABEL_NOP_SIZE 4 16 17 ··· 22 21 #endif 23 22 24 23 #ifdef CONFIG_CPU_MICROMIPS 25 - #define B_INSN "b32" 24 + # define B_INSN "b32" 25 + # define J_INSN "j32" 26 + #elif MIPS_ISA_REV >= 6 27 + # define B_INSN "bc" 28 + # define J_INSN "bc" 26 29 #else 27 - #define B_INSN "b" 30 + # define B_INSN "b" 31 + # define J_INSN "j" 28 32 #endif 29 33 30 34 static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 31 35 { 32 36 asm_volatile_goto("1:\t" B_INSN " 2f\n\t" 33 - "2:\tnop\n\t" 37 + "2:\t.insn\n\t" 34 38 ".pushsection __jump_table, \"aw\"\n\t" 35 39 WORD_INSN " 1b, %l[l_yes], %0\n\t" 36 40 ".popsection\n\t" ··· 48 42 49 43 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 50 44 { 51 - asm_volatile_goto("1:\tj %l[l_yes]\n\t" 52 - "nop\n\t" 45 + asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t" 53 46 ".pushsection __jump_table, \"aw\"\n\t" 54 47 WORD_INSN " 1b, %l[l_yes], %0\n\t" 55 48 ".popsection\n\t"
+8
arch/mips/include/asm/uasm.h
··· 86 86 Ip_u2u1s3(_daddiu); 87 87 Ip_u3u1u2(_daddu); 88 88 Ip_u1u2(_ddivu); 89 + Ip_u3u1u2(_ddivu_r6); 89 90 Ip_u1(_di); 90 91 Ip_u2u1msbu3(_dins); 91 92 Ip_u2u1msbu3(_dinsm); 92 93 Ip_u2u1msbu3(_dinsu); 93 94 Ip_u1u2(_divu); 95 + Ip_u3u1u2(_divu_r6); 94 96 Ip_u1u2u3(_dmfc0); 97 + Ip_u3u1u2(_dmodu); 95 98 Ip_u1u2u3(_dmtc0); 96 99 Ip_u1u2(_dmultu); 100 + Ip_u3u1u2(_dmulu); 97 101 Ip_u2u1u3(_drotr); 98 102 Ip_u2u1u3(_drotr32); 99 103 Ip_u2u1(_dsbh); ··· 135 131 Ip_u1u2u3(_mfhc0); 136 132 Ip_u1(_mfhi); 137 133 Ip_u1(_mflo); 134 + Ip_u3u1u2(_modu); 138 135 Ip_u3u1u2(_movn); 139 136 Ip_u3u1u2(_movz); 140 137 Ip_u1u2u3(_mtc0); ··· 144 139 Ip_u1(_mtlo); 145 140 Ip_u3u1u2(_mul); 146 141 Ip_u1u2(_multu); 142 + Ip_u3u1u2(_mulu); 147 143 Ip_u3u1u2(_nor); 148 144 Ip_u3u1u2(_or); 149 145 Ip_u2u1u3(_ori); ··· 155 149 Ip_u2s3u1(_sc); 156 150 Ip_u2s3u1(_scd); 157 151 Ip_u2s3u1(_sd); 152 + Ip_u3u1u2(_seleqz); 153 + Ip_u3u1u2(_selnez); 158 154 Ip_u2s3u1(_sh); 159 155 Ip_u2u1u3(_sll); 160 156 Ip_u3u2u1(_sllv);
+3 -3
arch/mips/include/uapi/asm/inst.h
··· 55 55 spec3_unused_op, spec4_unused_op, slt_op, sltu_op, 56 56 dadd_op, daddu_op, dsub_op, dsubu_op, 57 57 tge_op, tgeu_op, tlt_op, tltu_op, 58 - teq_op, spec5_unused_op, tne_op, spec6_unused_op, 59 - dsll_op, spec7_unused_op, dsrl_op, dsra_op, 60 - dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op 58 + teq_op, seleqz_op, tne_op, selnez_op, 59 + dsll_op, spec5_unused_op, dsrl_op, dsra_op, 60 + dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op 61 61 }; 62 62 63 63 /*
+2 -3
arch/mips/kernel/entry.S
··· 58 58 local_irq_disable 59 59 lw t0, TI_PRE_COUNT($28) 60 60 bnez t0, restore_all 61 - need_resched: 62 61 LONG_L t0, TI_FLAGS($28) 63 62 andi t1, t0, _TIF_NEED_RESCHED 64 63 beqz t1, restore_all 65 64 LONG_L t0, PT_STATUS(sp) # Interrupts off? 66 65 andi t0, 1 67 66 beqz t0, restore_all 68 - jal preempt_schedule_irq 69 - b need_resched 67 + PTR_LA ra, restore_all 68 + j preempt_schedule_irq 70 69 #endif 71 70 72 71 FEXPORT(ret_from_kernel_thread)
+25 -5
arch/mips/kernel/jump_label.c
··· 40 40 { 41 41 union mips_instruction *insn_p; 42 42 union mips_instruction insn; 43 + long offset; 43 44 44 45 insn_p = (union mips_instruction *)msk_isa16_mode(e->code); 45 - 46 - /* Jump only works within an aligned region its delay slot is in. */ 47 - BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); 48 46 49 47 /* Target must have the right alignment and ISA must be preserved. */ 50 48 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); 51 49 52 50 if (type == JUMP_LABEL_JMP) { 53 - insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; 54 - insn.j_format.target = e->target >> J_RANGE_SHIFT; 51 + if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) { 52 + offset = e->target - ((unsigned long)insn_p + 4); 53 + offset >>= 2; 54 + 55 + /* 56 + * The branch offset must fit in the instruction's 26 57 + * bit field. 58 + */ 59 + WARN_ON((offset >= BIT(25)) || 60 + (offset < -(long)BIT(25))); 61 + 62 + insn.j_format.opcode = bc6_op; 63 + insn.j_format.target = offset; 64 + } else { 65 + /* 66 + * Jump only works within an aligned region its delay 67 + * slot is in. 68 + */ 69 + WARN_ON((e->target & ~J_RANGE_MASK) != 70 + ((e->code + 4) & ~J_RANGE_MASK)); 71 + 72 + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; 73 + insn.j_format.target = e->target >> J_RANGE_SHIFT; 74 + } 55 75 } else { 56 76 insn.word = 0; /* nop */ 57 77 }
+16 -2
arch/mips/kernel/prom.c
··· 41 41 #ifdef CONFIG_USE_OF 42 42 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 43 43 { 44 - return add_memory_region(base, size, BOOT_MEM_RAM); 44 + if (base >= PHYS_ADDR_MAX) { 45 + pr_warn("Trying to add an invalid memory region, skipped\n"); 46 + return; 47 + } 48 + 49 + /* Truncate the passed memory region instead of type casting */ 50 + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { 51 + pr_warn("Truncate memory region %llx @ %llx to size %llx\n", 52 + size, base, PHYS_ADDR_MAX - base); 53 + size = PHYS_ADDR_MAX - base; 54 + } 55 + 56 + add_memory_region(base, size, BOOT_MEM_RAM); 45 57 } 46 58 47 59 int __init early_init_dt_reserve_memory_arch(phys_addr_t base, 48 60 phys_addr_t size, bool nomap) 49 61 { 50 - add_memory_region(base, size, BOOT_MEM_RESERVED); 62 + add_memory_region(base, size, 63 + nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED); 64 + 51 65 return 0; 52 66 } 53 67
+35 -94
arch/mips/kernel/setup.c
··· 27 27 #include <linux/dma-contiguous.h> 28 28 #include <linux/decompress/generic.h> 29 29 #include <linux/of_fdt.h> 30 + #include <linux/of_reserved_mem.h> 30 31 31 32 #include <asm/addrspace.h> 32 33 #include <asm/bootinfo.h> ··· 179 178 in_ram = true; 180 179 break; 181 180 case BOOT_MEM_RESERVED: 181 + case BOOT_MEM_NOMAP: 182 182 if ((start >= start_ && start < end_) || 183 183 (start < start_ && start + size >= start_)) 184 184 free = false; ··· 214 212 break; 215 213 case BOOT_MEM_RESERVED: 216 214 printk(KERN_CONT "(reserved)\n"); 215 + break; 216 + case BOOT_MEM_NOMAP: 217 + printk(KERN_CONT "(nomap)\n"); 217 218 break; 218 219 default: 219 220 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); ··· 376 371 377 372 static void __init bootmem_init(void) 378 373 { 379 - unsigned long reserved_end; 380 374 phys_addr_t ramstart = PHYS_ADDR_MAX; 381 375 int i; 382 376 ··· 386 382 * will reserve the area used for the initrd. 387 383 */ 388 384 init_initrd(); 389 - reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); 390 385 391 - memblock_reserve(PHYS_OFFSET, 392 - (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); 386 + /* Reserve memory occupied by kernel. */ 387 + memblock_reserve(__pa_symbol(&_text), 388 + __pa_symbol(&_end) - __pa_symbol(&_text)); 393 389 394 390 /* 395 391 * max_low_pfn is not a number of pages. The number of pages ··· 398 394 min_low_pfn = ~0UL; 399 395 max_low_pfn = 0; 400 396 401 - /* 402 - * Find the highest page frame number we have available 403 - * and the lowest used RAM address 404 - */ 397 + /* Find the highest and lowest page frame numbers we have available. */ 405 398 for (i = 0; i < boot_mem_map.nr_map; i++) { 406 399 unsigned long start, end; 407 400 ··· 428 427 max_low_pfn = end; 429 428 if (start < min_low_pfn) 430 429 min_low_pfn = start; 431 - if (end <= reserved_end) 432 - continue; 433 - #ifdef CONFIG_BLK_DEV_INITRD 434 - /* Skip zones before initrd and initrd itself */ 435 - if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) 436 - continue; 437 - #endif 438 430 } 439 431 440 432 if (min_low_pfn >= max_low_pfn) ··· 468 474 max_low_pfn = PFN_DOWN(HIGHMEM_START); 469 475 } 470 476 477 + /* Install all valid RAM ranges to the memblock memory region */ 471 478 for (i = 0; i < boot_mem_map.nr_map; i++) { 472 479 unsigned long start, end; 473 480 ··· 476 481 end = PFN_DOWN(boot_mem_map.map[i].addr 477 482 + boot_mem_map.map[i].size); 478 483 479 - if (start <= min_low_pfn) 484 + if (start < min_low_pfn) 480 485 start = min_low_pfn; 481 - if (start >= end) 482 - continue; 483 - 484 486 #ifndef CONFIG_HIGHMEM 487 + /* Ignore highmem regions if highmem is unsupported */ 485 488 if (end > max_low_pfn) 486 489 end = max_low_pfn; 487 - 488 - /* 489 - * ... finally, is the area going away? 490 - */ 490 + #endif 491 491 if (end <= start) 492 492 continue; 493 - #endif 494 493 495 494 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); 496 - } 497 495 498 - /* 499 - * Register fully available low RAM pages with the bootmem allocator. 500 - */ 501 - for (i = 0; i < boot_mem_map.nr_map; i++) { 502 - unsigned long start, end, size; 503 - 504 - start = PFN_UP(boot_mem_map.map[i].addr); 505 - end = PFN_DOWN(boot_mem_map.map[i].addr 506 - + boot_mem_map.map[i].size); 507 - 508 - /* 509 - * Reserve usable memory. 510 - */ 496 + /* Reserve any memory except the ordinary RAM ranges. */ 511 497 switch (boot_mem_map.map[i].type) { 512 498 case BOOT_MEM_RAM: 513 499 break; 514 - case BOOT_MEM_INIT_RAM: 515 - memory_present(0, start, end); 500 + case BOOT_MEM_NOMAP: /* Discard the range from the system. */ 501 + memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start)); 516 502 continue; 517 - default: 518 - /* Not usable memory */ 519 - if (start > min_low_pfn && end < max_low_pfn) 520 - memblock_reserve(boot_mem_map.map[i].addr, 521 - boot_mem_map.map[i].size); 522 - 523 - continue; 503 + default: /* Reserve the rest of the memory types at boot time */ 504 + memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start)); 505 + break; 524 506 } 525 507 526 508 /* 527 - * We are rounding up the start address of usable memory 528 - * and at the end of the usable range downwards. 509 + * In any case the added to the memblock memory regions 510 + * (highmem/lowmem, available/reserved, etc) are considered 511 + * as present, so inform sparsemem about them. 529 512 */ 530 - if (start >= max_low_pfn) 531 - continue; 532 - if (start < reserved_end) 533 - start = reserved_end; 534 - if (end > max_low_pfn) 535 - end = max_low_pfn; 536 - 537 - /* 538 - * ... finally, is the area going away? 539 - */ 540 - if (end <= start) 541 - continue; 542 - size = end - start; 543 - 544 - /* Register lowmem ranges */ 545 513 memory_present(0, start, end); 546 514 } 547 - 548 - #ifdef CONFIG_RELOCATABLE 549 - /* 550 - * The kernel reserves all memory below its _end symbol as bootmem, 551 - * but the kernel may now be at a much higher address. The memory 552 - * between the original and new locations may be returned to the system. 553 - */ 554 - if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) { 555 - unsigned long offset; 556 - extern void show_kernel_relocation(const char *level); 557 - 558 - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS); 559 - memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset); 560 - 561 - #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO) 562 - /* 563 - * This information is necessary when debugging the kernel 564 - * But is a security vulnerability otherwise! 565 - */ 566 - show_kernel_relocation(KERN_INFO); 567 - #endif 568 - } 569 - #endif 570 515 571 516 /* 572 517 * Reserve initrd memory if needed. ··· 716 781 */ 717 782 static void __init arch_mem_init(char **cmdline_p) 718 783 { 719 - struct memblock_region *reg; 720 784 extern void plat_mem_setup(void); 721 785 722 786 /* ··· 743 809 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, 744 810 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, 745 811 BOOT_MEM_INIT_RAM); 812 + arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT, 813 + PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT, 814 + BOOT_MEM_RAM); 746 815 747 816 pr_info("Determined physical RAM map:\n"); 748 817 print_memory_map(); ··· 821 884 plat_swiotlb_setup(); 822 885 823 886 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 824 - /* Tell bootmem about cma reserved memblock section */ 825 - for_each_memblock(reserved, reg) 826 - if (reg->size != 0) 827 - memblock_reserve(reg->base, reg->size); 828 887 829 - reserve_bootmem_region(__pa_symbol(&__nosave_begin), 830 - __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ 888 + /* Reserve for hibernation. */ 889 + memblock_reserve(__pa_symbol(&__nosave_begin), 890 + __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); 891 + 892 + fdt_init_reserved_mem(); 893 + 894 + memblock_dump_all(); 895 + 896 + early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn)); 831 897 } 832 898 833 899 static void __init resource_init(void) ··· 875 935 res->flags |= IORESOURCE_SYSRAM; 876 936 break; 877 937 case BOOT_MEM_RESERVED: 938 + case BOOT_MEM_NOMAP: 878 939 default: 879 940 res->name = "reserved"; 880 941 }
+22 -41
arch/mips/kernel/traps.c
··· 2151 2151 2152 2152 static void configure_exception_vector(void) 2153 2153 { 2154 - if (cpu_has_veic || cpu_has_vint) { 2154 + if (cpu_has_mips_r2_r6) { 2155 2155 unsigned long sr = set_c0_status(ST0_BEV); 2156 2156 /* If available, use WG to set top bits of EBASE */ 2157 2157 if (cpu_has_ebase_wg) { ··· 2163 2163 } 2164 2164 write_c0_ebase(ebase); 2165 2165 write_c0_status(sr); 2166 + } 2167 + if (cpu_has_veic || cpu_has_vint) { 2166 2168 /* Setting vector spacing enables EI/VI mode */ 2167 2169 change_c0_intctl(0x3e0, VECTORSPACING); 2168 2170 } ··· 2195 2193 * o read IntCtl.IPFDC to determine the fast debug channel interrupt 2196 2194 */ 2197 2195 if (cpu_has_mips_r2_r6) { 2198 - /* 2199 - * We shouldn't trust a secondary core has a sane EBASE register 2200 - * so use the one calculated by the boot CPU. 2201 - */ 2202 - if (!is_boot_cpu) { 2203 - /* If available, use WG to set top bits of EBASE */ 2204 - if (cpu_has_ebase_wg) { 2205 - #ifdef CONFIG_64BIT 2206 - write_c0_ebase_64(ebase | MIPS_EBASE_WG); 2207 - #else 2208 - write_c0_ebase(ebase | MIPS_EBASE_WG); 2209 - #endif 2210 - } 2211 - write_c0_ebase(ebase); 2212 - } 2213 - 2214 2196 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2215 2197 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2216 2198 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; ··· 2270 2284 extern char except_vec3_generic; 2271 2285 extern char except_vec4; 2272 2286 extern char except_vec3_r4000; 2273 - unsigned long i; 2287 + unsigned long i, vec_size; 2288 + phys_addr_t ebase_pa; 2274 2289 2275 2290 check_wait(); 2276 2291 2277 - if (cpu_has_veic || cpu_has_vint) { 2278 - unsigned long size = 0x200 + VECTORSPACING*64; 2279 - phys_addr_t ebase_pa; 2292 + if (!cpu_has_mips_r2_r6) { 2293 + ebase = CAC_BASE; 2294 + ebase_pa = virt_to_phys((void *)ebase); 2295 + vec_size = 0x400; 2280 2296 2281 - ebase = (unsigned long) 2282 - memblock_alloc(size, 1 << fls(size)); 2283 - if (!ebase) 2297 + memblock_reserve(ebase_pa, vec_size); 2298 + } else { 2299 + if (cpu_has_veic || cpu_has_vint) 2300 + vec_size = 0x200 + VECTORSPACING*64; 2301 + else 2302 + vec_size = PAGE_SIZE; 2303 + 2304 + ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size)); 2305 + if (!ebase_pa) 2284 2306 panic("%s: Failed to allocate %lu bytes align=0x%x\n", 2285 - __func__, size, 1 << fls(size)); 2307 + __func__, vec_size, 1 << fls(vec_size)); 2286 2308 2287 2309 /* 2288 2310 * Try to ensure ebase resides in KSeg0 if possible. ··· 2303 2309 * EVA is special though as it allows segments to be rearranged 2304 2310 * and to become uncached during cache error handling. 2305 2311 */ 2306 - ebase_pa = __pa(ebase); 2307 2312 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) 2308 2313 ebase = CKSEG0ADDR(ebase_pa); 2309 - } else { 2310 - ebase = CAC_BASE; 2311 - 2312 - if (cpu_has_mips_r2_r6) { 2313 - if (cpu_has_ebase_wg) { 2314 - #ifdef CONFIG_64BIT 2315 - ebase = (read_c0_ebase_64() & ~0xfff); 2316 - #else 2317 - ebase = (read_c0_ebase() & ~0xfff); 2318 - #endif 2319 - } else { 2320 - ebase += (read_c0_ebase() & 0x3ffff000); 2321 - } 2322 - } 2314 + else 2315 + ebase = (unsigned long)phys_to_virt(ebase_pa); 2323 2316 } 2324 2317 2325 2318 if (cpu_has_mmips) { ··· 2440 2459 else 2441 2460 set_handler(0x080, &except_vec3_generic, 0x80); 2442 2461 2443 - local_flush_icache_range(ebase, ebase + 0x400); 2462 + local_flush_icache_range(ebase, ebase + vec_size); 2444 2463 2445 2464 sort_extable(__start___dbe_table, __stop___dbe_table); 2446 2465
+1 -3
arch/mips/kvm/emulate.c
··· 1141 1141 unsigned long pc = vcpu->arch.pc; 1142 1142 int index; 1143 1143 1144 - get_random_bytes(&index, sizeof(index)); 1145 - index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 1146 - 1144 + index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); 1147 1145 tlb = &vcpu->arch.guest_tlb[index]; 1148 1146 1149 1147 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+14
arch/mips/mm/uasm-mips.c
··· 76 76 [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 77 77 [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD}, 78 78 [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT}, 79 + [insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op), 80 + RS | RT | RD}, 79 81 [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT}, 80 82 [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE}, 81 83 [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE}, 82 84 [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE}, 83 85 [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT}, 86 + [insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op), 87 + RS | RT | RD}, 84 88 [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 89 + [insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op), 90 + RS | RT | RD}, 85 91 [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 86 92 [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, 93 + [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), 94 + RS | RT | RD}, 87 95 [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, 88 96 [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, 89 97 [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD}, ··· 140 132 [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, 141 133 [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD}, 142 134 [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD}, 135 + [insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op), 136 + RS | RT | RD}, 143 137 [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD}, 144 138 [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD}, 145 139 [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 146 140 [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, 147 141 [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS}, 148 142 [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, 143 + [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), 144 + RS | RT | RD}, 149 145 #ifndef CONFIG_CPU_MIPSR6 150 146 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 151 147 #else ··· 175 163 [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9}, 176 164 #endif 177 165 [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 166 + [insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD}, 167 + [insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD}, 178 168 [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 179 169 [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE}, 180 170 [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
+24 -15
arch/mips/mm/uasm.c
··· 50 50 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez, 51 51 insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1, 52 52 insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu, 53 - insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0, 54 - insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, 55 - insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, 56 - insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, 57 - insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, 58 - insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, 59 - insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, 60 - insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0, 61 - insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor, 62 - insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, 63 - insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv, 64 - insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav, 65 - insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 66 - insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 67 - insn_xor, insn_xori, insn_yield, 53 + insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, 54 + insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu, 55 + insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll, 56 + insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl, 57 + insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins, 58 + insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld, 59 + insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, 60 + insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, 61 + insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, 62 + insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, 63 + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, 64 + insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, 65 + insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, 66 + insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, 67 + insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, 68 + insn_wsbh, insn_xor, insn_xori, insn_yield, 68 69 insn_invalid /* insn_invalid must be last */ 69 70 }; 70 71 ··· 288 287 I_u1u2(_ctc1) 289 288 I_u2u1(_ctcmsa) 290 289 I_u1u2(_ddivu) 290 + I_u3u1u2(_ddivu_r6) 291 291 I_u1u2u3(_dmfc0) 292 + I_u3u1u2(_dmodu) 292 293 I_u1u2u3(_dmtc0) 293 294 I_u1u2(_dmultu) 295 + I_u3u1u2(_dmulu) 294 296 I_u2u1s3(_daddiu) 295 297 I_u3u1u2(_daddu) 296 298 I_u1(_di); 297 299 I_u1u2(_divu) 300 + I_u3u1u2(_divu_r6) 298 301 I_u2u1(_dsbh); 299 302 I_u2u1(_dshd); 300 303 I_u2u1u3(_dsll) ··· 332 327 I_u2s3u1(_lwu) 333 328 I_u1u2u3(_mfc0) 334 329 I_u1u2u3(_mfhc0) 330 + I_u3u1u2(_modu) 335 331 I_u3u1u2(_movn) 336 332 I_u3u1u2(_movz) 337 333 I_u1(_mfhi) ··· 343 337 I_u1(_mtlo) 344 338 I_u3u1u2(_mul) 345 339 I_u1u2(_multu) 340 + I_u3u1u2(_mulu) 346 341 I_u3u1u2(_nor) 347 342 I_u3u1u2(_or) 348 343 I_u2u1u3(_ori) ··· 352 345 I_u2s3u1(_sc) 353 346 I_u2s3u1(_scd) 354 347 I_u2s3u1(_sd) 348 + I_u3u1u2(_seleqz) 349 + I_u3u1u2(_selnez) 355 350 I_u2s3u1(_sh) 356 351 I_u2u1u3(_sll) 357 352 I_u3u2u1(_sllv)
-1
arch/mips/net/Makefile
··· 1 1 # MIPS networking code 2 2 3 - obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o 4 3 obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
-1270
arch/mips/net/bpf_jit.c
··· 1 - /* 2 - * Just-In-Time compiler for BPF filters on MIPS 3 - * 4 - * Copyright (c) 2014 Imagination Technologies Ltd. 5 - * Author: Markos Chandras <markos.chandras@imgtec.com> 6 - * 7 - * This program is free software; you can redistribute it and/or modify it 8 - * under the terms of the GNU General Public License as published by the 9 - * Free Software Foundation; version 2 of the License. 10 - */ 11 - 12 - #include <linux/bitops.h> 13 - #include <linux/compiler.h> 14 - #include <linux/errno.h> 15 - #include <linux/filter.h> 16 - #include <linux/if_vlan.h> 17 - #include <linux/moduleloader.h> 18 - #include <linux/netdevice.h> 19 - #include <linux/string.h> 20 - #include <linux/slab.h> 21 - #include <linux/types.h> 22 - #include <asm/asm.h> 23 - #include <asm/bitops.h> 24 - #include <asm/cacheflush.h> 25 - #include <asm/cpu-features.h> 26 - #include <asm/uasm.h> 27 - 28 - #include "bpf_jit.h" 29 - 30 - /* ABI 31 - * r_skb_hl SKB header length 32 - * r_data SKB data pointer 33 - * r_off Offset 34 - * r_A BPF register A 35 - * r_X BPF register X 36 - * r_skb *skb 37 - * r_M *scratch memory 38 - * r_skb_len SKB length 39 - * 40 - * On entry (*bpf_func)(*skb, *filter) 41 - * a0 = MIPS_R_A0 = skb; 42 - * a1 = MIPS_R_A1 = filter; 43 - * 44 - * Stack 45 - * ... 46 - * M[15] 47 - * M[14] 48 - * M[13] 49 - * ... 50 - * M[0] <-- r_M 51 - * saved reg k-1 52 - * saved reg k-2 53 - * ... 54 - * saved reg 0 <-- r_sp 55 - * <no argument area> 56 - * 57 - * Packet layout 58 - * 59 - * <--------------------- len ------------------------> 60 - * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------> 61 - * ---------------------------------------------------- 62 - * | skb->data | 63 - * ---------------------------------------------------- 64 - */ 65 - 66 - #define ptr typeof(unsigned long) 67 - 68 - #define SCRATCH_OFF(k) (4 * (k)) 69 - 70 - /* JIT flags */ 71 - #define SEEN_CALL (1 << BPF_MEMWORDS) 72 - #define SEEN_SREG_SFT (BPF_MEMWORDS + 1) 73 - #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT) 74 - #define SEEN_SREG(x) (SEEN_SREG_BASE << (x)) 75 - #define SEEN_OFF SEEN_SREG(2) 76 - #define SEEN_A SEEN_SREG(3) 77 - #define SEEN_X SEEN_SREG(4) 78 - #define SEEN_SKB SEEN_SREG(5) 79 - #define SEEN_MEM SEEN_SREG(6) 80 - /* SEEN_SK_DATA also implies skb_hl an skb_len */ 81 - #define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0)) 82 - 83 - /* Arguments used by JIT */ 84 - #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ 85 - 86 - #define SBIT(x) (1 << (x)) /* Signed version of BIT() */ 87 - 88 - /** 89 - * struct jit_ctx - JIT context 90 - * @skf: The sk_filter 91 - * @prologue_bytes: Number of bytes for prologue 92 - * @idx: Instruction index 93 - * @flags: JIT flags 94 - * @offsets: Instruction offsets 95 - * @target: Memory location for the compiled filter 96 - */ 97 - struct jit_ctx { 98 - const struct bpf_prog *skf; 99 - unsigned int prologue_bytes; 100 - u32 idx; 101 - u32 flags; 102 - u32 *offsets; 103 - u32 *target; 104 - }; 105 - 106 - 107 - static inline int optimize_div(u32 *k) 108 - { 109 - /* power of 2 divides can be implemented with right shift */ 110 - if (!(*k & (*k-1))) { 111 - *k = ilog2(*k); 112 - return 1; 113 - } 114 - 115 - return 0; 116 - } 117 - 118 - static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx); 119 - 120 - /* Simply emit the instruction if the JIT memory space has been allocated */ 121 - #define emit_instr(ctx, func, ...) \ 122 - do { \ 123 - if ((ctx)->target != NULL) { \ 124 - u32 *p = &(ctx)->target[ctx->idx]; \ 125 - uasm_i_##func(&p, ##__VA_ARGS__); \ 126 - } \ 127 - (ctx)->idx++; \ 128 - } while (0) 129 - 130 - /* 131 - * Similar to emit_instr but it must be used when we need to emit 132 - * 32-bit or 64-bit instructions 133 - */ 134 - #define emit_long_instr(ctx, func, ...) \ 135 - do { \ 136 - if ((ctx)->target != NULL) { \ 137 - u32 *p = &(ctx)->target[ctx->idx]; \ 138 - UASM_i_##func(&p, ##__VA_ARGS__); \ 139 - } \ 140 - (ctx)->idx++; \ 141 - } while (0) 142 - 143 - /* Determine if immediate is within the 16-bit signed range */ 144 - static inline bool is_range16(s32 imm) 145 - { 146 - return !(imm >= SBIT(15) || imm < -SBIT(15)); 147 - } 148 - 149 - static inline void emit_addu(unsigned int dst, unsigned int src1, 150 - unsigned int src2, struct jit_ctx *ctx) 151 - { 152 - emit_instr(ctx, addu, dst, src1, src2); 153 - } 154 - 155 - static inline void emit_nop(struct jit_ctx *ctx) 156 - { 157 - emit_instr(ctx, nop); 158 - } 159 - 160 - /* Load a u32 immediate to a register */ 161 - static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) 162 - { 163 - if (ctx->target != NULL) { 164 - /* addiu can only handle s16 */ 165 - if (!is_range16(imm)) { 166 - u32 *p = &ctx->target[ctx->idx]; 167 - uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); 168 - p = &ctx->target[ctx->idx + 1]; 169 - uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); 170 - } else { 171 - u32 *p = &ctx->target[ctx->idx]; 172 - uasm_i_addiu(&p, dst, r_zero, imm); 173 - } 174 - } 175 - ctx->idx++; 176 - 177 - if (!is_range16(imm)) 178 - ctx->idx++; 179 - } 180 - 181 - static inline void emit_or(unsigned int dst, unsigned int src1, 182 - unsigned int src2, struct jit_ctx *ctx) 183 - { 184 - emit_instr(ctx, or, dst, src1, src2); 185 - } 186 - 187 - static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, 188 - struct jit_ctx *ctx) 189 - { 190 - if (imm >= BIT(16)) { 191 - emit_load_imm(r_tmp, imm, ctx); 192 - emit_or(dst, src, r_tmp, ctx); 193 - } else { 194 - emit_instr(ctx, ori, dst, src, imm); 195 - } 196 - } 197 - 198 - static inline void emit_daddiu(unsigned int dst, unsigned int src, 199 - int imm, struct jit_ctx *ctx) 200 - { 201 - /* 202 - * Only used for stack, so the imm is relatively small 203 - * and it fits in 15-bits 204 - */ 205 - emit_instr(ctx, daddiu, dst, src, imm); 206 - } 207 - 208 - static inline void emit_addiu(unsigned int dst, unsigned int src, 209 - u32 imm, struct jit_ctx *ctx) 210 - { 211 - if (!is_range16(imm)) { 212 - emit_load_imm(r_tmp, imm, ctx); 213 - emit_addu(dst, r_tmp, src, ctx); 214 - } else { 215 - emit_instr(ctx, addiu, dst, src, imm); 216 - } 217 - } 218 - 219 - static inline void emit_and(unsigned int dst, unsigned int src1, 220 - unsigned int src2, struct jit_ctx *ctx) 221 - { 222 - emit_instr(ctx, and, dst, src1, src2); 223 - } 224 - 225 - static inline void emit_andi(unsigned int dst, unsigned int src, 226 - u32 imm, struct jit_ctx *ctx) 227 - { 228 - /* If imm does not fit in u16 then load it to register */ 229 - if (imm >= BIT(16)) { 230 - emit_load_imm(r_tmp, imm, ctx); 231 - emit_and(dst, src, r_tmp, ctx); 232 - } else { 233 - emit_instr(ctx, andi, dst, src, imm); 234 - } 235 - } 236 - 237 - static inline void emit_xor(unsigned int dst, unsigned int src1, 238 - unsigned int src2, struct jit_ctx *ctx) 239 - { 240 - emit_instr(ctx, xor, dst, src1, src2); 241 - } 242 - 243 - static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) 244 - { 245 - /* If imm does not fit in u16 then load it to register */ 246 - if (imm >= BIT(16)) { 247 - emit_load_imm(r_tmp, imm, ctx); 248 - emit_xor(dst, src, r_tmp, ctx); 249 - } else { 250 - emit_instr(ctx, xori, dst, src, imm); 251 - } 252 - } 253 - 254 - static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) 255 - { 256 - emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); 257 - } 258 - 259 - static inline void emit_subu(unsigned int dst, unsigned int src1, 260 - unsigned int src2, struct jit_ctx *ctx) 261 - { 262 - emit_instr(ctx, subu, dst, src1, src2); 263 - } 264 - 265 - static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) 266 - { 267 - emit_subu(reg, r_zero, reg, ctx); 268 - } 269 - 270 - static inline void emit_sllv(unsigned int dst, unsigned int src, 271 - unsigned int sa, struct jit_ctx *ctx) 272 - { 273 - emit_instr(ctx, sllv, dst, src, sa); 274 - } 275 - 276 - static inline void emit_sll(unsigned int dst, unsigned int src, 277 - unsigned int sa, struct jit_ctx *ctx) 278 - { 279 - /* sa is 5-bits long */ 280 - if (sa >= BIT(5)) 281 - /* Shifting >= 32 results in zero */ 282 - emit_jit_reg_move(dst, r_zero, ctx); 283 - else 284 - emit_instr(ctx, sll, dst, src, sa); 285 - } 286 - 287 - static inline void emit_srlv(unsigned int dst, unsigned int src, 288 - unsigned int sa, struct jit_ctx *ctx) 289 - { 290 - emit_instr(ctx, srlv, dst, src, sa); 291 - } 292 - 293 - static inline void emit_srl(unsigned int dst, unsigned int src, 294 - unsigned int sa, struct jit_ctx *ctx) 295 - { 296 - /* sa is 5-bits long */ 297 - if (sa >= BIT(5)) 298 - /* Shifting >= 32 results in zero */ 299 - emit_jit_reg_move(dst, r_zero, ctx); 300 - else 301 - emit_instr(ctx, srl, dst, src, sa); 302 - } 303 - 304 - static inline void emit_slt(unsigned int dst, unsigned int src1, 305 - unsigned int src2, struct jit_ctx *ctx) 306 - { 307 - emit_instr(ctx, slt, dst, src1, src2); 308 - } 309 - 310 - static inline void emit_sltu(unsigned int dst, unsigned int src1, 311 - unsigned int src2, struct jit_ctx *ctx) 312 - { 313 - emit_instr(ctx, sltu, dst, src1, src2); 314 - } 315 - 316 - static inline void emit_sltiu(unsigned dst, unsigned int src, 317 - unsigned int imm, struct jit_ctx *ctx) 318 - { 319 - /* 16 bit immediate */ 320 - if (!is_range16((s32)imm)) { 321 - emit_load_imm(r_tmp, imm, ctx); 322 - emit_sltu(dst, src, r_tmp, ctx); 323 - } else { 324 - emit_instr(ctx, sltiu, dst, src, imm); 325 - } 326 - 327 - } 328 - 329 - /* Store register on the stack */ 330 - static inline void emit_store_stack_reg(ptr reg, ptr base, 331 - unsigned int offset, 332 - struct jit_ctx *ctx) 333 - { 334 - emit_long_instr(ctx, SW, reg, offset, base); 335 - } 336 - 337 - static inline void emit_store(ptr reg, ptr base, unsigned int offset, 338 - struct jit_ctx *ctx) 339 - { 340 - emit_instr(ctx, sw, reg, offset, base); 341 - } 342 - 343 - static inline void emit_load_stack_reg(ptr reg, ptr base, 344 - unsigned int offset, 345 - struct jit_ctx *ctx) 346 - { 347 - emit_long_instr(ctx, LW, reg, offset, base); 348 - } 349 - 350 - static inline void emit_load(unsigned int reg, unsigned int base, 351 - unsigned int offset, struct jit_ctx *ctx) 352 - { 353 - emit_instr(ctx, lw, reg, offset, base); 354 - } 355 - 356 - static inline void emit_load_byte(unsigned int reg, unsigned int base, 357 - unsigned int offset, struct jit_ctx *ctx) 358 - { 359 - emit_instr(ctx, lb, reg, offset, base); 360 - } 361 - 362 - static inline void emit_half_load(unsigned int reg, unsigned int base, 363 - unsigned int offset, struct jit_ctx *ctx) 364 - { 365 - emit_instr(ctx, lh, reg, offset, base); 366 - } 367 - 368 - static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base, 369 - unsigned int offset, struct jit_ctx *ctx) 370 - { 371 - emit_instr(ctx, lhu, reg, offset, base); 372 - } 373 - 374 - static inline void emit_mul(unsigned int dst, unsigned int src1, 375 - unsigned int src2, struct jit_ctx *ctx) 376 - { 377 - emit_instr(ctx, mul, dst, src1, src2); 378 - } 379 - 380 - static inline void emit_div(unsigned int dst, unsigned int src, 381 - struct jit_ctx *ctx) 382 - { 383 - if (ctx->target != NULL) { 384 - u32 *p = &ctx->target[ctx->idx]; 385 - uasm_i_divu(&p, dst, src); 386 - p = &ctx->target[ctx->idx + 1]; 387 - uasm_i_mflo(&p, dst); 388 - } 389 - ctx->idx += 2; /* 2 insts */ 390 - } 391 - 392 - static inline void emit_mod(unsigned int dst, unsigned int src, 393 - struct jit_ctx *ctx) 394 - { 395 - if (ctx->target != NULL) { 396 - u32 *p = &ctx->target[ctx->idx]; 397 - uasm_i_divu(&p, dst, src); 398 - p = &ctx->target[ctx->idx + 1]; 399 - uasm_i_mfhi(&p, dst); 400 - } 401 - ctx->idx += 2; /* 2 insts */ 402 - } 403 - 404 - static inline void emit_dsll(unsigned int dst, unsigned int src, 405 - unsigned int sa, struct jit_ctx *ctx) 406 - { 407 - emit_instr(ctx, dsll, dst, src, sa); 408 - } 409 - 410 - static inline void emit_dsrl32(unsigned int dst, unsigned int src, 411 - unsigned int sa, struct jit_ctx *ctx) 412 - { 413 - emit_instr(ctx, dsrl32, dst, src, sa); 414 - } 415 - 416 - static inline void emit_wsbh(unsigned int dst, unsigned int src, 417 - struct jit_ctx *ctx) 418 - { 419 - emit_instr(ctx, wsbh, dst, src); 420 - } 421 - 422 - /* load pointer to register */ 423 - static inline void emit_load_ptr(unsigned int dst, unsigned int src, 424 - int imm, struct jit_ctx *ctx) 425 - { 426 - /* src contains the base addr of the 32/64-pointer */ 427 - emit_long_instr(ctx, LW, dst, imm, src); 428 - } 429 - 430 - /* load a function pointer to register */ 431 - static inline void emit_load_func(unsigned int reg, ptr imm, 432 - struct jit_ctx *ctx) 433 - { 434 - if (IS_ENABLED(CONFIG_64BIT)) { 435 - /* At this point imm is always 64-bit */ 436 - emit_load_imm(r_tmp, (u64)imm >> 32, ctx); 437 - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ 438 - emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); 439 - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ 440 - emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); 441 - } else { 442 - emit_load_imm(reg, imm, ctx); 443 - } 444 - } 445 - 446 - /* Move to real MIPS register */ 447 - static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) 448 - { 449 - emit_long_instr(ctx, ADDU, dst, src, r_zero); 450 - } 451 - 452 - /* Move to JIT (32-bit) register */ 453 - static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) 454 - { 455 - emit_addu(dst, src, r_zero, ctx); 456 - } 457 - 458 - /* Compute the immediate value for PC-relative branches. */ 459 - static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) 460 - { 461 - if (ctx->target == NULL) 462 - return 0; 463 - 464 - /* 465 - * We want a pc-relative branch. We only do forward branches 466 - * so tgt is always after pc. tgt is the instruction offset 467 - * we want to jump to. 468 - 469 - * Branch on MIPS: 470 - * I: target_offset <- sign_extend(offset) 471 - * I+1: PC += target_offset (delay slot) 472 - * 473 - * ctx->idx currently points to the branch instruction 474 - * but the offset is added to the delay slot so we need 475 - * to subtract 4. 476 - */ 477 - return ctx->offsets[tgt] - 478 - (ctx->idx * 4 - ctx->prologue_bytes) - 4; 479 - } 480 - 481 - static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2, 482 - unsigned int imm, struct jit_ctx *ctx) 483 - { 484 - if (ctx->target != NULL) { 485 - u32 *p = &ctx->target[ctx->idx]; 486 - 487 - switch (cond) { 488 - case MIPS_COND_EQ: 489 - uasm_i_beq(&p, reg1, reg2, imm); 490 - break; 491 - case MIPS_COND_NE: 492 - uasm_i_bne(&p, reg1, reg2, imm); 493 - break; 494 - case MIPS_COND_ALL: 495 - uasm_i_b(&p, imm); 496 - break; 497 - default: 498 - pr_warn("%s: Unhandled branch conditional: %d\n", 499 - __func__, cond); 500 - } 501 - } 502 - ctx->idx++; 503 - } 504 - 505 - static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) 506 - { 507 - emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); 508 - } 509 - 510 - static inline void emit_jalr(unsigned int link, unsigned int reg, 511 - struct jit_ctx *ctx) 512 - { 513 - emit_instr(ctx, jalr, link, reg); 514 - } 515 - 516 - static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) 517 - { 518 - emit_instr(ctx, jr, reg); 519 - } 520 - 521 - static inline u16 align_sp(unsigned int num) 522 - { 523 - /* Double word alignment for 32-bit, quadword for 64-bit */ 524 - unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8; 525 - num = (num + (align - 1)) & -align; 526 - return num; 527 - } 528 - 529 - static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) 530 - { 531 - int i = 0, real_off = 0; 532 - u32 sflags, tmp_flags; 533 - 534 - /* Adjust the stack pointer */ 535 - if (offset) 536 - emit_stack_offset(-align_sp(offset), ctx); 537 - 538 - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; 539 - /* sflags is essentially a bitmap */ 540 - while (tmp_flags) { 541 - if ((sflags >> i) & 0x1) { 542 - emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off, 543 - ctx); 544 - real_off += SZREG; 545 - } 546 - i++; 547 - tmp_flags >>= 1; 548 - } 549 - 550 - /* save return address */ 551 - if (ctx->flags & SEEN_CALL) { 552 - emit_store_stack_reg(r_ra, r_sp, real_off, ctx); 553 - real_off += SZREG; 554 - } 555 - 556 - /* Setup r_M leaving the alignment gap if necessary */ 557 - if (ctx->flags & SEEN_MEM) { 558 - if (real_off % (SZREG * 2)) 559 - real_off += SZREG; 560 - emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); 561 - } 562 - } 563 - 564 - static void restore_bpf_jit_regs(struct jit_ctx *ctx, 565 - unsigned int offset) 566 - { 567 - int i, real_off = 0; 568 - u32 sflags, tmp_flags; 569 - 570 - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; 571 - /* sflags is a bitmap */ 572 - i = 0; 573 - while (tmp_flags) { 574 - if ((sflags >> i) & 0x1) { 575 - emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off, 576 - ctx); 577 - real_off += SZREG; 578 - } 579 - i++; 580 - tmp_flags >>= 1; 581 - } 582 - 583 - /* restore return address */ 584 - if (ctx->flags & SEEN_CALL) 585 - emit_load_stack_reg(r_ra, r_sp, real_off, ctx); 586 - 587 - /* Restore the sp and discard the scrach memory */ 588 - if (offset) 589 - emit_stack_offset(align_sp(offset), ctx); 590 - } 591 - 592 - static unsigned int get_stack_depth(struct jit_ctx *ctx) 593 - { 594 - int sp_off = 0; 595 - 596 - 597 - /* How may s* regs do we need to preserved? */ 598 - sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG; 599 - 600 - if (ctx->flags & SEEN_MEM) 601 - sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */ 602 - 603 - if (ctx->flags & SEEN_CALL) 604 - sp_off += SZREG; /* Space for our ra register */ 605 - 606 - return sp_off; 607 - } 608 - 609 - static void build_prologue(struct jit_ctx *ctx) 610 - { 611 - int sp_off; 612 - 613 - /* Calculate the total offset for the stack pointer */ 614 - sp_off = get_stack_depth(ctx); 615 - save_bpf_jit_regs(ctx, sp_off); 616 - 617 - if (ctx->flags & SEEN_SKB) 618 - emit_reg_move(r_skb, MIPS_R_A0, ctx); 619 - 620 - if (ctx->flags & SEEN_SKB_DATA) { 621 - /* Load packet length */ 622 - emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len), 623 - ctx); 624 - emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len), 625 - ctx); 626 - /* Load the data pointer */ 627 - emit_load_ptr(r_skb_data, r_skb, 628 - offsetof(struct sk_buff, data), ctx); 629 - /* Load the header length */ 630 - emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx); 631 - } 632 - 633 - if (ctx->flags & SEEN_X) 634 - emit_jit_reg_move(r_X, r_zero, ctx); 635 - 636 - /* 637 - * Do not leak kernel data to userspace, we only need to clear 638 - * r_A if it is ever used. In fact if it is never used, we 639 - * will not save/restore it, so clearing it in this case would 640 - * corrupt the state of the caller. 641 - */ 642 - if (bpf_needs_clear_a(&ctx->skf->insns[0]) && 643 - (ctx->flags & SEEN_A)) 644 - emit_jit_reg_move(r_A, r_zero, ctx); 645 - } 646 - 647 - static void build_epilogue(struct jit_ctx *ctx) 648 - { 649 - unsigned int sp_off; 650 - 651 - /* Calculate the total offset for the stack pointer */ 652 - 653 - sp_off = get_stack_depth(ctx); 654 - restore_bpf_jit_regs(ctx, sp_off); 655 - 656 - /* Return */ 657 - emit_jr(r_ra, ctx); 658 - emit_nop(ctx); 659 - } 660 - 661 - #define CHOOSE_LOAD_FUNC(K, func) \ 662 - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ 663 - func##_positive) 664 - 665 - static int build_body(struct jit_ctx *ctx) 666 - { 667 - const struct bpf_prog *prog = ctx->skf; 668 - const struct sock_filter *inst; 669 - unsigned int i, off, condt; 670 - u32 k, b_off __maybe_unused; 671 - u8 (*sk_load_func)(unsigned long *skb, int offset); 672 - 673 - for (i = 0; i < prog->len; i++) { 674 - u16 code; 675 - 676 - inst = &(prog->insns[i]); 677 - pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", 678 - __func__, inst->code, inst->jt, inst->jf, inst->k); 679 - k = inst->k; 680 - code = bpf_anc_helper(inst); 681 - 682 - if (ctx->target == NULL) 683 - ctx->offsets[i] = ctx->idx * 4; 684 - 685 - switch (code) { 686 - case BPF_LD | BPF_IMM: 687 - /* A <- k ==> li r_A, k */ 688 - ctx->flags |= SEEN_A; 689 - emit_load_imm(r_A, k, ctx); 690 - break; 691 - case BPF_LD | BPF_W | BPF_LEN: 692 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 693 - /* A <- len ==> lw r_A, offset(skb) */ 694 - ctx->flags |= SEEN_SKB | SEEN_A; 695 - off = offsetof(struct sk_buff, len); 696 - emit_load(r_A, r_skb, off, ctx); 697 - break; 698 - case BPF_LD | BPF_MEM: 699 - /* A <- M[k] ==> lw r_A, offset(M) */ 700 - ctx->flags |= SEEN_MEM | SEEN_A; 701 - emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); 702 - break; 703 - case BPF_LD | BPF_W | BPF_ABS: 704 - /* A <- P[k:4] */ 705 - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word); 706 - goto load; 707 - case BPF_LD | BPF_H | BPF_ABS: 708 - /* A <- P[k:2] */ 709 - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half); 710 - goto load; 711 - case BPF_LD | BPF_B | BPF_ABS: 712 - /* A <- P[k:1] */ 713 - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte); 714 - load: 715 - emit_load_imm(r_off, k, ctx); 716 - load_common: 717 - ctx->flags |= SEEN_CALL | SEEN_OFF | 718 - SEEN_SKB | SEEN_A | SEEN_SKB_DATA; 719 - 720 - emit_load_func(r_s0, (ptr)sk_load_func, ctx); 721 - emit_reg_move(MIPS_R_A0, r_skb, ctx); 722 - emit_jalr(MIPS_R_RA, r_s0, ctx); 723 - /* Load second argument to delay slot */ 724 - emit_reg_move(MIPS_R_A1, r_off, ctx); 725 - /* Check the error value */ 726 - emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx), 727 - ctx); 728 - /* Load return register on DS for failures */ 729 - emit_reg_move(r_ret, r_zero, ctx); 730 - /* Return with error */ 731 - emit_b(b_imm(prog->len, ctx), ctx); 732 - emit_nop(ctx); 733 - break; 734 - case BPF_LD | BPF_W | BPF_IND: 735 - /* A <- P[X + k:4] */ 736 - sk_load_func = sk_load_word; 737 - goto load_ind; 738 - case BPF_LD | BPF_H | BPF_IND: 739 - /* A <- P[X + k:2] */ 740 - sk_load_func = sk_load_half; 741 - goto load_ind; 742 - case BPF_LD | BPF_B | BPF_IND: 743 - /* A <- P[X + k:1] */ 744 - sk_load_func = sk_load_byte; 745 - load_ind: 746 - ctx->flags |= SEEN_OFF | SEEN_X; 747 - emit_addiu(r_off, r_X, k, ctx); 748 - goto load_common; 749 - case BPF_LDX | BPF_IMM: 750 - /* X <- k */ 751 - ctx->flags |= SEEN_X; 752 - emit_load_imm(r_X, k, ctx); 753 - break; 754 - case BPF_LDX | BPF_MEM: 755 - /* X <- M[k] */ 756 - ctx->flags |= SEEN_X | SEEN_MEM; 757 - emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); 758 - break; 759 - case BPF_LDX | BPF_W | BPF_LEN: 760 - /* X <- len */ 761 - ctx->flags |= SEEN_X | SEEN_SKB; 762 - off = offsetof(struct sk_buff, len); 763 - emit_load(r_X, r_skb, off, ctx); 764 - break; 765 - case BPF_LDX | BPF_B | BPF_MSH: 766 - /* X <- 4 * (P[k:1] & 0xf) */ 767 - ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB; 768 - /* Load offset to a1 */ 769 - emit_load_func(r_s0, (ptr)sk_load_byte, ctx); 770 - /* 771 - * This may emit two instructions so it may not fit 772 - * in the delay slot. So use a0 in the delay slot. 773 - */ 774 - emit_load_imm(MIPS_R_A1, k, ctx); 775 - emit_jalr(MIPS_R_RA, r_s0, ctx); 776 - emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ 777 - /* Check the error value */ 778 - emit_bcond(MIPS_COND_NE, r_ret, 0, 779 - b_imm(prog->len, ctx), ctx); 780 - emit_reg_move(r_ret, r_zero, ctx); 781 - /* We are good */ 782 - /* X <- P[1:K] & 0xf */ 783 - emit_andi(r_X, r_A, 0xf, ctx); 784 - /* X << 2 */ 785 - emit_b(b_imm(i + 1, ctx), ctx); 786 - emit_sll(r_X, r_X, 2, ctx); /* delay slot */ 787 - break; 788 - case BPF_ST: 789 - /* M[k] <- A */ 790 - ctx->flags |= SEEN_MEM | SEEN_A; 791 - emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); 792 - break; 793 - case BPF_STX: 794 - /* M[k] <- X */ 795 - ctx->flags |= SEEN_MEM | SEEN_X; 796 - emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); 797 - break; 798 - case BPF_ALU | BPF_ADD | BPF_K: 799 - /* A += K */ 800 - ctx->flags |= SEEN_A; 801 - emit_addiu(r_A, r_A, k, ctx); 802 - break; 803 - case BPF_ALU | BPF_ADD | BPF_X: 804 - /* A += X */ 805 - ctx->flags |= SEEN_A | SEEN_X; 806 - emit_addu(r_A, r_A, r_X, ctx); 807 - break; 808 - case BPF_ALU | BPF_SUB | BPF_K: 809 - /* A -= K */ 810 - ctx->flags |= SEEN_A; 811 - emit_addiu(r_A, r_A, -k, ctx); 812 - break; 813 - case BPF_ALU | BPF_SUB | BPF_X: 814 - /* A -= X */ 815 - ctx->flags |= SEEN_A | SEEN_X; 816 - emit_subu(r_A, r_A, r_X, ctx); 817 - break; 818 - case BPF_ALU | BPF_MUL | BPF_K: 819 - /* A *= K */ 820 - /* Load K to scratch register before MUL */ 821 - ctx->flags |= SEEN_A; 822 - emit_load_imm(r_s0, k, ctx); 823 - emit_mul(r_A, r_A, r_s0, ctx); 824 - break; 825 - case BPF_ALU | BPF_MUL | BPF_X: 826 - /* A *= X */ 827 - ctx->flags |= SEEN_A | SEEN_X; 828 - emit_mul(r_A, r_A, r_X, ctx); 829 - break; 830 - case BPF_ALU | BPF_DIV | BPF_K: 831 - /* A /= k */ 832 - if (k == 1) 833 - break; 834 - if (optimize_div(&k)) { 835 - ctx->flags |= SEEN_A; 836 - emit_srl(r_A, r_A, k, ctx); 837 - break; 838 - } 839 - ctx->flags |= SEEN_A; 840 - emit_load_imm(r_s0, k, ctx); 841 - emit_div(r_A, r_s0, ctx); 842 - break; 843 - case BPF_ALU | BPF_MOD | BPF_K: 844 - /* A %= k */ 845 - if (k == 1) { 846 - ctx->flags |= SEEN_A; 847 - emit_jit_reg_move(r_A, r_zero, ctx); 848 - } else { 849 - ctx->flags |= SEEN_A; 850 - emit_load_imm(r_s0, k, ctx); 851 - emit_mod(r_A, r_s0, ctx); 852 - } 853 - break; 854 - case BPF_ALU | BPF_DIV | BPF_X: 855 - /* A /= X */ 856 - ctx->flags |= SEEN_X | SEEN_A; 857 - /* Check if r_X is zero */ 858 - emit_bcond(MIPS_COND_EQ, r_X, r_zero, 859 - b_imm(prog->len, ctx), ctx); 860 - emit_load_imm(r_ret, 0, ctx); /* delay slot */ 861 - emit_div(r_A, r_X, ctx); 862 - break; 863 - case BPF_ALU | BPF_MOD | BPF_X: 864 - /* A %= X */ 865 - ctx->flags |= SEEN_X | SEEN_A; 866 - /* Check if r_X is zero */ 867 - emit_bcond(MIPS_COND_EQ, r_X, r_zero, 868 - b_imm(prog->len, ctx), ctx); 869 - emit_load_imm(r_ret, 0, ctx); /* delay slot */ 870 - emit_mod(r_A, r_X, ctx); 871 - break; 872 - case BPF_ALU | BPF_OR | BPF_K: 873 - /* A |= K */ 874 - ctx->flags |= SEEN_A; 875 - emit_ori(r_A, r_A, k, ctx); 876 - break; 877 - case BPF_ALU | BPF_OR | BPF_X: 878 - /* A |= X */ 879 - ctx->flags |= SEEN_A; 880 - emit_ori(r_A, r_A, r_X, ctx); 881 - break; 882 - case BPF_ALU | BPF_XOR | BPF_K: 883 - /* A ^= k */ 884 - ctx->flags |= SEEN_A; 885 - emit_xori(r_A, r_A, k, ctx); 886 - break; 887 - case BPF_ANC | SKF_AD_ALU_XOR_X: 888 - case BPF_ALU | BPF_XOR | BPF_X: 889 - /* A ^= X */ 890 - ctx->flags |= SEEN_A; 891 - emit_xor(r_A, r_A, r_X, ctx); 892 - break; 893 - case BPF_ALU | BPF_AND | BPF_K: 894 - /* A &= K */ 895 - ctx->flags |= SEEN_A; 896 - emit_andi(r_A, r_A, k, ctx); 897 - break; 898 - case BPF_ALU | BPF_AND | BPF_X: 899 - /* A &= X */ 900 - ctx->flags |= SEEN_A | SEEN_X; 901 - emit_and(r_A, r_A, r_X, ctx); 902 - break; 903 - case BPF_ALU | BPF_LSH | BPF_K: 904 - /* A <<= K */ 905 - ctx->flags |= SEEN_A; 906 - emit_sll(r_A, r_A, k, ctx); 907 - break; 908 - case BPF_ALU | BPF_LSH | BPF_X: 909 - /* A <<= X */ 910 - ctx->flags |= SEEN_A | SEEN_X; 911 - emit_sllv(r_A, r_A, r_X, ctx); 912 - break; 913 - case BPF_ALU | BPF_RSH | BPF_K: 914 - /* A >>= K */ 915 - ctx->flags |= SEEN_A; 916 - emit_srl(r_A, r_A, k, ctx); 917 - break; 918 - case BPF_ALU | BPF_RSH | BPF_X: 919 - ctx->flags |= SEEN_A | SEEN_X; 920 - emit_srlv(r_A, r_A, r_X, ctx); 921 - break; 922 - case BPF_ALU | BPF_NEG: 923 - /* A = -A */ 924 - ctx->flags |= SEEN_A; 925 - emit_neg(r_A, ctx); 926 - break; 927 - case BPF_JMP | BPF_JA: 928 - /* pc += K */ 929 - emit_b(b_imm(i + k + 1, ctx), ctx); 930 - emit_nop(ctx); 931 - break; 932 - case BPF_JMP | BPF_JEQ | BPF_K: 933 - /* pc += ( A == K ) ? pc->jt : pc->jf */ 934 - condt = MIPS_COND_EQ | MIPS_COND_K; 935 - goto jmp_cmp; 936 - case BPF_JMP | BPF_JEQ | BPF_X: 937 - ctx->flags |= SEEN_X; 938 - /* pc += ( A == X ) ? pc->jt : pc->jf */ 939 - condt = MIPS_COND_EQ | MIPS_COND_X; 940 - goto jmp_cmp; 941 - case BPF_JMP | BPF_JGE | BPF_K: 942 - /* pc += ( A >= K ) ? pc->jt : pc->jf */ 943 - condt = MIPS_COND_GE | MIPS_COND_K; 944 - goto jmp_cmp; 945 - case BPF_JMP | BPF_JGE | BPF_X: 946 - ctx->flags |= SEEN_X; 947 - /* pc += ( A >= X ) ? pc->jt : pc->jf */ 948 - condt = MIPS_COND_GE | MIPS_COND_X; 949 - goto jmp_cmp; 950 - case BPF_JMP | BPF_JGT | BPF_K: 951 - /* pc += ( A > K ) ? pc->jt : pc->jf */ 952 - condt = MIPS_COND_GT | MIPS_COND_K; 953 - goto jmp_cmp; 954 - case BPF_JMP | BPF_JGT | BPF_X: 955 - ctx->flags |= SEEN_X; 956 - /* pc += ( A > X ) ? pc->jt : pc->jf */ 957 - condt = MIPS_COND_GT | MIPS_COND_X; 958 - jmp_cmp: 959 - /* Greater or Equal */ 960 - if ((condt & MIPS_COND_GE) || 961 - (condt & MIPS_COND_GT)) { 962 - if (condt & MIPS_COND_K) { /* K */ 963 - ctx->flags |= SEEN_A; 964 - emit_sltiu(r_s0, r_A, k, ctx); 965 - } else { /* X */ 966 - ctx->flags |= SEEN_A | 967 - SEEN_X; 968 - emit_sltu(r_s0, r_A, r_X, ctx); 969 - } 970 - /* A < (K|X) ? r_scrach = 1 */ 971 - b_off = b_imm(i + inst->jf + 1, ctx); 972 - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, 973 - ctx); 974 - emit_nop(ctx); 975 - /* A > (K|X) ? scratch = 0 */ 976 - if (condt & MIPS_COND_GT) { 977 - /* Checking for equality */ 978 - ctx->flags |= SEEN_A | SEEN_X; 979 - if (condt & MIPS_COND_K) 980 - emit_load_imm(r_s0, k, ctx); 981 - else 982 - emit_jit_reg_move(r_s0, r_X, 983 - ctx); 984 - b_off = b_imm(i + inst->jf + 1, ctx); 985 - emit_bcond(MIPS_COND_EQ, r_A, r_s0, 986 - b_off, ctx); 987 - emit_nop(ctx); 988 - /* Finally, A > K|X */ 989 - b_off = b_imm(i + inst->jt + 1, ctx); 990 - emit_b(b_off, ctx); 991 - emit_nop(ctx); 992 - } else { 993 - /* A >= (K|X) so jump */ 994 - b_off = b_imm(i + inst->jt + 1, ctx); 995 - emit_b(b_off, ctx); 996 - emit_nop(ctx); 997 - } 998 - } else { 999 - /* A == K|X */ 1000 - if (condt & MIPS_COND_K) { /* K */ 1001 - ctx->flags |= SEEN_A; 1002 - emit_load_imm(r_s0, k, ctx); 1003 - /* jump true */ 1004 - b_off = b_imm(i + inst->jt + 1, ctx); 1005 - emit_bcond(MIPS_COND_EQ, r_A, r_s0, 1006 - b_off, ctx); 1007 - emit_nop(ctx); 1008 - /* jump false */ 1009 - b_off = b_imm(i + inst->jf + 1, 1010 - ctx); 1011 - emit_bcond(MIPS_COND_NE, r_A, r_s0, 1012 - b_off, ctx); 1013 - emit_nop(ctx); 1014 - } else { /* X */ 1015 - /* jump true */ 1016 - ctx->flags |= SEEN_A | SEEN_X; 1017 - b_off = b_imm(i + inst->jt + 1, 1018 - ctx); 1019 - emit_bcond(MIPS_COND_EQ, r_A, r_X, 1020 - b_off, ctx); 1021 - emit_nop(ctx); 1022 - /* jump false */ 1023 - b_off = b_imm(i + inst->jf + 1, ctx); 1024 - emit_bcond(MIPS_COND_NE, r_A, r_X, 1025 - b_off, ctx); 1026 - emit_nop(ctx); 1027 - } 1028 - } 1029 - break; 1030 - case BPF_JMP | BPF_JSET | BPF_K: 1031 - ctx->flags |= SEEN_A; 1032 - /* pc += (A & K) ? pc -> jt : pc -> jf */ 1033 - emit_load_imm(r_s1, k, ctx); 1034 - emit_and(r_s0, r_A, r_s1, ctx); 1035 - /* jump true */ 1036 - b_off = b_imm(i + inst->jt + 1, ctx); 1037 - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); 1038 - emit_nop(ctx); 1039 - /* jump false */ 1040 - b_off = b_imm(i + inst->jf + 1, ctx); 1041 - emit_b(b_off, ctx); 1042 - emit_nop(ctx); 1043 - break; 1044 - case BPF_JMP | BPF_JSET | BPF_X: 1045 - ctx->flags |= SEEN_X | SEEN_A; 1046 - /* pc += (A & X) ? pc -> jt : pc -> jf */ 1047 - emit_and(r_s0, r_A, r_X, ctx); 1048 - /* jump true */ 1049 - b_off = b_imm(i + inst->jt + 1, ctx); 1050 - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); 1051 - emit_nop(ctx); 1052 - /* jump false */ 1053 - b_off = b_imm(i + inst->jf + 1, ctx); 1054 - emit_b(b_off, ctx); 1055 - emit_nop(ctx); 1056 - break; 1057 - case BPF_RET | BPF_A: 1058 - ctx->flags |= SEEN_A; 1059 - if (i != prog->len - 1) 1060 - /* 1061 - * If this is not the last instruction 1062 - * then jump to the epilogue 1063 - */ 1064 - emit_b(b_imm(prog->len, ctx), ctx); 1065 - emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1066 - break; 1067 - case BPF_RET | BPF_K: 1068 - /* 1069 - * It can emit two instructions so it does not fit on 1070 - * the delay slot. 1071 - */ 1072 - emit_load_imm(r_ret, k, ctx); 1073 - if (i != prog->len - 1) { 1074 - /* 1075 - * If this is not the last instruction 1076 - * then jump to the epilogue 1077 - */ 1078 - emit_b(b_imm(prog->len, ctx), ctx); 1079 - emit_nop(ctx); 1080 - } 1081 - break; 1082 - case BPF_MISC | BPF_TAX: 1083 - /* X = A */ 1084 - ctx->flags |= SEEN_X | SEEN_A; 1085 - emit_jit_reg_move(r_X, r_A, ctx); 1086 - break; 1087 - case BPF_MISC | BPF_TXA: 1088 - /* A = X */ 1089 - ctx->flags |= SEEN_A | SEEN_X; 1090 - emit_jit_reg_move(r_A, r_X, ctx); 1091 - break; 1092 - /* AUX */ 1093 - case BPF_ANC | SKF_AD_PROTOCOL: 1094 - /* A = ntohs(skb->protocol */ 1095 - ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; 1096 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1097 - protocol) != 2); 1098 - off = offsetof(struct sk_buff, protocol); 1099 - emit_half_load(r_A, r_skb, off, ctx); 1100 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 1101 - /* This needs little endian fixup */ 1102 - if (cpu_has_wsbh) { 1103 - /* R2 and later have the wsbh instruction */ 1104 - emit_wsbh(r_A, r_A, ctx); 1105 - } else { 1106 - /* Get first byte */ 1107 - emit_andi(r_tmp_imm, r_A, 0xff, ctx); 1108 - /* Shift it */ 1109 - emit_sll(r_tmp, r_tmp_imm, 8, ctx); 1110 - /* Get second byte */ 1111 - emit_srl(r_tmp_imm, r_A, 8, ctx); 1112 - emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); 1113 - /* Put everyting together in r_A */ 1114 - emit_or(r_A, r_tmp, r_tmp_imm, ctx); 1115 - } 1116 - #endif 1117 - break; 1118 - case BPF_ANC | SKF_AD_CPU: 1119 - ctx->flags |= SEEN_A | SEEN_OFF; 1120 - /* A = current_thread_info()->cpu */ 1121 - BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, 1122 - cpu) != 4); 1123 - off = offsetof(struct thread_info, cpu); 1124 - /* $28/gp points to the thread_info struct */ 1125 - emit_load(r_A, 28, off, ctx); 1126 - break; 1127 - case BPF_ANC | SKF_AD_IFINDEX: 1128 - /* A = skb->dev->ifindex */ 1129 - case BPF_ANC | SKF_AD_HATYPE: 1130 - /* A = skb->dev->type */ 1131 - ctx->flags |= SEEN_SKB | SEEN_A; 1132 - off = offsetof(struct sk_buff, dev); 1133 - /* Load *dev pointer */ 1134 - emit_load_ptr(r_s0, r_skb, off, ctx); 1135 - /* error (0) in the delay slot */ 1136 - emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1137 - b_imm(prog->len, ctx), ctx); 1138 - emit_reg_move(r_ret, r_zero, ctx); 1139 - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { 1140 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 1141 - off = offsetof(struct net_device, ifindex); 1142 - emit_load(r_A, r_s0, off, ctx); 1143 - } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */ 1144 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 1145 - off = offsetof(struct net_device, type); 1146 - emit_half_load_unsigned(r_A, r_s0, off, ctx); 1147 - } 1148 - break; 1149 - case BPF_ANC | SKF_AD_MARK: 1150 - ctx->flags |= SEEN_SKB | SEEN_A; 1151 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 1152 - off = offsetof(struct sk_buff, mark); 1153 - emit_load(r_A, r_skb, off, ctx); 1154 - break; 1155 - case BPF_ANC | SKF_AD_RXHASH: 1156 - ctx->flags |= SEEN_SKB | SEEN_A; 1157 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 1158 - off = offsetof(struct sk_buff, hash); 1159 - emit_load(r_A, r_skb, off, ctx); 1160 - break; 1161 - case BPF_ANC | SKF_AD_VLAN_TAG: 1162 - ctx->flags |= SEEN_SKB | SEEN_A; 1163 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1164 - vlan_tci) != 2); 1165 - off = offsetof(struct sk_buff, vlan_tci); 1166 - emit_half_load_unsigned(r_A, r_skb, off, ctx); 1167 - break; 1168 - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 1169 - ctx->flags |= SEEN_SKB | SEEN_A; 1170 - emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx); 1171 - if (PKT_VLAN_PRESENT_BIT) 1172 - emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx); 1173 - if (PKT_VLAN_PRESENT_BIT < 7) 1174 - emit_andi(r_A, r_A, 1, ctx); 1175 - break; 1176 - case BPF_ANC | SKF_AD_PKTTYPE: 1177 - ctx->flags |= SEEN_SKB; 1178 - 1179 - emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); 1180 - /* Keep only the last 3 bits */ 1181 - emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); 1182 - #ifdef __BIG_ENDIAN_BITFIELD 1183 - /* Get the actual packet type to the lower 3 bits */ 1184 - emit_srl(r_A, r_A, 5, ctx); 1185 - #endif 1186 - break; 1187 - case BPF_ANC | SKF_AD_QUEUE: 1188 - ctx->flags |= SEEN_SKB | SEEN_A; 1189 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1190 - queue_mapping) != 2); 1191 - BUILD_BUG_ON(offsetof(struct sk_buff, 1192 - queue_mapping) > 0xff); 1193 - off = offsetof(struct sk_buff, queue_mapping); 1194 - emit_half_load_unsigned(r_A, r_skb, off, ctx); 1195 - break; 1196 - default: 1197 - pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__, 1198 - inst->code); 1199 - return -1; 1200 - } 1201 - } 1202 - 1203 - /* compute offsets only during the first pass */ 1204 - if (ctx->target == NULL) 1205 - ctx->offsets[i] = ctx->idx * 4; 1206 - 1207 - return 0; 1208 - } 1209 - 1210 - void bpf_jit_compile(struct bpf_prog *fp) 1211 - { 1212 - struct jit_ctx ctx; 1213 - unsigned int alloc_size, tmp_idx; 1214 - 1215 - if (!bpf_jit_enable) 1216 - return; 1217 - 1218 - memset(&ctx, 0, sizeof(ctx)); 1219 - 1220 - ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); 1221 - if (ctx.offsets == NULL) 1222 - return; 1223 - 1224 - ctx.skf = fp; 1225 - 1226 - if (build_body(&ctx)) 1227 - goto out; 1228 - 1229 - tmp_idx = ctx.idx; 1230 - build_prologue(&ctx); 1231 - ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 1232 - /* just to complete the ctx.idx count */ 1233 - build_epilogue(&ctx); 1234 - 1235 - alloc_size = 4 * ctx.idx; 1236 - ctx.target = module_alloc(alloc_size); 1237 - if (ctx.target == NULL) 1238 - goto out; 1239 - 1240 - /* Clean it */ 1241 - memset(ctx.target, 0, alloc_size); 1242 - 1243 - ctx.idx = 0; 1244 - 1245 - /* Generate the actual JIT code */ 1246 - build_prologue(&ctx); 1247 - build_body(&ctx); 1248 - build_epilogue(&ctx); 1249 - 1250 - /* Update the icache */ 1251 - flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); 1252 - 1253 - if (bpf_jit_enable > 1) 1254 - /* Dump JIT code */ 1255 - bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); 1256 - 1257 - fp->bpf_func = (void *)ctx.target; 1258 - fp->jited = 1; 1259 - 1260 - out: 1261 - kfree(ctx.offsets); 1262 - } 1263 - 1264 - void bpf_jit_free(struct bpf_prog *fp) 1265 - { 1266 - if (fp->jited) 1267 - module_memfree(fp->bpf_func); 1268 - 1269 - bpf_prog_unlock_free(fp); 1270 - }
-285
arch/mips/net/bpf_jit_asm.S
··· 1 - /* 2 - * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF 3 - * compiler. 4 - * 5 - * Copyright (C) 2015 Imagination Technologies Ltd. 6 - * Author: Markos Chandras <markos.chandras@imgtec.com> 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License as published by the 10 - * Free Software Foundation; version 2 of the License. 11 - */ 12 - 13 - #include <asm/asm.h> 14 - #include <asm/isa-rev.h> 15 - #include <asm/regdef.h> 16 - #include "bpf_jit.h" 17 - 18 - /* ABI 19 - * 20 - * r_skb_hl skb header length 21 - * r_skb_data skb data 22 - * r_off(a1) offset register 23 - * r_A BPF register A 24 - * r_X PF register X 25 - * r_skb(a0) *skb 26 - * r_M *scratch memory 27 - * r_skb_le skb length 28 - * r_s0 Scratch register 0 29 - * r_s1 Scratch register 1 30 - * 31 - * On entry: 32 - * a0: *skb 33 - * a1: offset (imm or imm + X) 34 - * 35 - * All non-BPF-ABI registers are free for use. On return, we only 36 - * care about r_ret. The BPF-ABI registers are assumed to remain 37 - * unmodified during the entire filter operation. 38 - */ 39 - 40 - #define skb a0 41 - #define offset a1 42 - #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ 43 - 44 - /* We know better :) so prevent assembler reordering etc */ 45 - .set noreorder 46 - 47 - #define is_offset_negative(TYPE) \ 48 - /* If offset is negative we have more work to do */ \ 49 - slti t0, offset, 0; \ 50 - bgtz t0, bpf_slow_path_##TYPE##_neg; \ 51 - /* Be careful what follows in DS. */ 52 - 53 - #define is_offset_in_header(SIZE, TYPE) \ 54 - /* Reading from header? */ \ 55 - addiu $r_s0, $r_skb_hl, -SIZE; \ 56 - slt t0, $r_s0, offset; \ 57 - bgtz t0, bpf_slow_path_##TYPE; \ 58 - 59 - LEAF(sk_load_word) 60 - is_offset_negative(word) 61 - FEXPORT(sk_load_word_positive) 62 - is_offset_in_header(4, word) 63 - /* Offset within header boundaries */ 64 - PTR_ADDU t1, $r_skb_data, offset 65 - .set reorder 66 - lw $r_A, 0(t1) 67 - .set noreorder 68 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 69 - # if MIPS_ISA_REV >= 2 70 - wsbh t0, $r_A 71 - rotr $r_A, t0, 16 72 - # else 73 - sll t0, $r_A, 24 74 - srl t1, $r_A, 24 75 - srl t2, $r_A, 8 76 - or t0, t0, t1 77 - andi t2, t2, 0xff00 78 - andi t1, $r_A, 0xff00 79 - or t0, t0, t2 80 - sll t1, t1, 8 81 - or $r_A, t0, t1 82 - # endif 83 - #endif 84 - jr $r_ra 85 - move $r_ret, zero 86 - END(sk_load_word) 87 - 88 - LEAF(sk_load_half) 89 - is_offset_negative(half) 90 - FEXPORT(sk_load_half_positive) 91 - is_offset_in_header(2, half) 92 - /* Offset within header boundaries */ 93 - PTR_ADDU t1, $r_skb_data, offset 94 - lhu $r_A, 0(t1) 95 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 96 - # if MIPS_ISA_REV >= 2 97 - wsbh $r_A, $r_A 98 - # else 99 - sll t0, $r_A, 8 100 - srl t1, $r_A, 8 101 - andi t0, t0, 0xff00 102 - or $r_A, t0, t1 103 - # endif 104 - #endif 105 - jr $r_ra 106 - move $r_ret, zero 107 - END(sk_load_half) 108 - 109 - LEAF(sk_load_byte) 110 - is_offset_negative(byte) 111 - FEXPORT(sk_load_byte_positive) 112 - is_offset_in_header(1, byte) 113 - /* Offset within header boundaries */ 114 - PTR_ADDU t1, $r_skb_data, offset 115 - lbu $r_A, 0(t1) 116 - jr $r_ra 117 - move $r_ret, zero 118 - END(sk_load_byte) 119 - 120 - /* 121 - * call skb_copy_bits: 122 - * (prototype in linux/skbuff.h) 123 - * 124 - * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) 125 - * 126 - * o32 mandates we leave 4 spaces for argument registers in case 127 - * the callee needs to use them. Even though we don't care about 128 - * the argument registers ourselves, we need to allocate that space 129 - * to remain ABI compliant since the callee may want to use that space. 130 - * We also allocate 2 more spaces for $r_ra and our return register (*to). 131 - * 132 - * n64 is a bit different. The *caller* will allocate the space to preserve 133 - * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no 134 - * good reason but it does not matter that much really. 135 - * 136 - * (void *to) is returned in r_s0 137 - * 138 - */ 139 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 140 - #define DS_OFFSET(SIZE) (4 * SZREG) 141 - #else 142 - #define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) 143 - #endif 144 - #define bpf_slow_path_common(SIZE) \ 145 - /* Quick check. Are we within reasonable boundaries? */ \ 146 - LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ 147 - sltu $r_s0, offset, $r_s1; \ 148 - beqz $r_s0, fault; \ 149 - /* Load 4th argument in DS */ \ 150 - LONG_ADDIU a3, zero, SIZE; \ 151 - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ 152 - PTR_LA t0, skb_copy_bits; \ 153 - PTR_S $r_ra, (5 * SZREG)($r_sp); \ 154 - /* Assign low slot to a2 */ \ 155 - PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ 156 - jalr t0; \ 157 - /* Reset our destination slot (DS but it's ok) */ \ 158 - INT_S zero, (4 * SZREG)($r_sp); \ 159 - /* \ 160 - * skb_copy_bits returns 0 on success and -EFAULT \ 161 - * on error. Our data live in a2. Do not bother with \ 162 - * our data if an error has been returned. \ 163 - */ \ 164 - /* Restore our frame */ \ 165 - PTR_L $r_ra, (5 * SZREG)($r_sp); \ 166 - INT_L $r_s0, (4 * SZREG)($r_sp); \ 167 - bltz v0, fault; \ 168 - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ 169 - move $r_ret, zero; \ 170 - 171 - NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) 172 - bpf_slow_path_common(4) 173 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 174 - # if MIPS_ISA_REV >= 2 175 - wsbh t0, $r_s0 176 - jr $r_ra 177 - rotr $r_A, t0, 16 178 - # else 179 - sll t0, $r_s0, 24 180 - srl t1, $r_s0, 24 181 - srl t2, $r_s0, 8 182 - or t0, t0, t1 183 - andi t2, t2, 0xff00 184 - andi t1, $r_s0, 0xff00 185 - or t0, t0, t2 186 - sll t1, t1, 8 187 - jr $r_ra 188 - or $r_A, t0, t1 189 - # endif 190 - #else 191 - jr $r_ra 192 - move $r_A, $r_s0 193 - #endif 194 - 195 - END(bpf_slow_path_word) 196 - 197 - NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) 198 - bpf_slow_path_common(2) 199 - #ifdef CONFIG_CPU_LITTLE_ENDIAN 200 - # if MIPS_ISA_REV >= 2 201 - jr $r_ra 202 - wsbh $r_A, $r_s0 203 - # else 204 - sll t0, $r_s0, 8 205 - andi t1, $r_s0, 0xff00 206 - andi t0, t0, 0xff00 207 - srl t1, t1, 8 208 - jr $r_ra 209 - or $r_A, t0, t1 210 - # endif 211 - #else 212 - jr $r_ra 213 - move $r_A, $r_s0 214 - #endif 215 - 216 - END(bpf_slow_path_half) 217 - 218 - NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) 219 - bpf_slow_path_common(1) 220 - jr $r_ra 221 - move $r_A, $r_s0 222 - 223 - END(bpf_slow_path_byte) 224 - 225 - /* 226 - * Negative entry points 227 - */ 228 - .macro bpf_is_end_of_data 229 - li t0, SKF_LL_OFF 230 - /* Reading link layer data? */ 231 - slt t1, offset, t0 232 - bgtz t1, fault 233 - /* Be careful what follows in DS. */ 234 - .endm 235 - /* 236 - * call skb_copy_bits: 237 - * (prototype in linux/filter.h) 238 - * 239 - * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, 240 - * int k, unsigned int size) 241 - * 242 - * see above (bpf_slow_path_common) for ABI restrictions 243 - */ 244 - #define bpf_negative_common(SIZE) \ 245 - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ 246 - PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ 247 - PTR_S $r_ra, (5 * SZREG)($r_sp); \ 248 - jalr t0; \ 249 - li a2, SIZE; \ 250 - PTR_L $r_ra, (5 * SZREG)($r_sp); \ 251 - /* Check return pointer */ \ 252 - beqz v0, fault; \ 253 - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ 254 - /* Preserve our pointer */ \ 255 - move $r_s0, v0; \ 256 - /* Set return value */ \ 257 - move $r_ret, zero; \ 258 - 259 - bpf_slow_path_word_neg: 260 - bpf_is_end_of_data 261 - NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) 262 - bpf_negative_common(4) 263 - jr $r_ra 264 - lw $r_A, 0($r_s0) 265 - END(sk_load_word_negative) 266 - 267 - bpf_slow_path_half_neg: 268 - bpf_is_end_of_data 269 - NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) 270 - bpf_negative_common(2) 271 - jr $r_ra 272 - lhu $r_A, 0($r_s0) 273 - END(sk_load_half_negative) 274 - 275 - bpf_slow_path_byte_neg: 276 - bpf_is_end_of_data 277 - NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) 278 - bpf_negative_common(1) 279 - jr $r_ra 280 - lbu $r_A, 0($r_s0) 281 - END(sk_load_byte_negative) 282 - 283 - fault: 284 - jr $r_ra 285 - addiu $r_ret, zero, 1
+138 -53
arch/mips/net/ebpf_jit.c
··· 22 22 #include <asm/byteorder.h> 23 23 #include <asm/cacheflush.h> 24 24 #include <asm/cpu-features.h> 25 + #include <asm/isa-rev.h> 25 26 #include <asm/uasm.h> 26 27 27 28 /* Registers used by JIT */ ··· 126 125 } 127 126 128 127 /* Simply emit the instruction if the JIT memory space has been allocated */ 129 - #define emit_instr(ctx, func, ...) \ 130 - do { \ 131 - if ((ctx)->target != NULL) { \ 132 - u32 *p = &(ctx)->target[ctx->idx]; \ 133 - uasm_i_##func(&p, ##__VA_ARGS__); \ 134 - } \ 135 - (ctx)->idx++; \ 128 + #define emit_instr_long(ctx, func64, func32, ...) \ 129 + do { \ 130 + if ((ctx)->target != NULL) { \ 131 + u32 *p = &(ctx)->target[ctx->idx]; \ 132 + if (IS_ENABLED(CONFIG_64BIT)) \ 133 + uasm_i_##func64(&p, ##__VA_ARGS__); \ 134 + else \ 135 + uasm_i_##func32(&p, ##__VA_ARGS__); \ 136 + } \ 137 + (ctx)->idx++; \ 136 138 } while (0) 139 + 140 + #define emit_instr(ctx, func, ...) \ 141 + emit_instr_long(ctx, func, func, ##__VA_ARGS__) 137 142 138 143 static unsigned int j_target(struct jit_ctx *ctx, int target_idx) 139 144 { ··· 281 274 * If RA we are doing a function call and may need 282 275 * extra 8-byte tmp area. 283 276 */ 284 - stack_adjust += 16; 277 + stack_adjust += 2 * sizeof(long); 285 278 if (ctx->flags & EBPF_SAVE_S0) 286 - stack_adjust += 8; 279 + stack_adjust += sizeof(long); 287 280 if (ctx->flags & EBPF_SAVE_S1) 288 - stack_adjust += 8; 281 + stack_adjust += sizeof(long); 289 282 if (ctx->flags & EBPF_SAVE_S2) 290 - stack_adjust += 8; 283 + stack_adjust += sizeof(long); 291 284 if (ctx->flags & EBPF_SAVE_S3) 292 - stack_adjust += 8; 285 + stack_adjust += sizeof(long); 293 286 if (ctx->flags & EBPF_SAVE_S4) 294 - stack_adjust += 8; 287 + stack_adjust += sizeof(long); 295 288 296 289 BUILD_BUG_ON(MAX_BPF_STACK & 7); 297 290 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; ··· 305 298 * On tail call we skip this instruction, and the TCC is 306 299 * passed in $v1 from the caller. 307 300 */ 308 - emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 301 + emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 309 302 if (stack_adjust) 310 - emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); 303 + emit_instr_long(ctx, daddiu, addiu, 304 + MIPS_R_SP, MIPS_R_SP, -stack_adjust); 311 305 else 312 306 return 0; 313 307 314 - store_offset = stack_adjust - 8; 308 + store_offset = stack_adjust - sizeof(long); 315 309 316 310 if (ctx->flags & EBPF_SAVE_RA) { 317 - emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); 318 - store_offset -= 8; 311 + emit_instr_long(ctx, sd, sw, 312 + MIPS_R_RA, store_offset, MIPS_R_SP); 313 + store_offset -= sizeof(long); 319 314 } 320 315 if (ctx->flags & EBPF_SAVE_S0) { 321 - emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); 322 - store_offset -= 8; 316 + emit_instr_long(ctx, sd, sw, 317 + MIPS_R_S0, store_offset, MIPS_R_SP); 318 + store_offset -= sizeof(long); 323 319 } 324 320 if (ctx->flags & EBPF_SAVE_S1) { 325 - emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); 326 - store_offset -= 8; 321 + emit_instr_long(ctx, sd, sw, 322 + MIPS_R_S1, store_offset, MIPS_R_SP); 323 + store_offset -= sizeof(long); 327 324 } 328 325 if (ctx->flags & EBPF_SAVE_S2) { 329 - emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); 330 - store_offset -= 8; 326 + emit_instr_long(ctx, sd, sw, 327 + MIPS_R_S2, store_offset, MIPS_R_SP); 328 + store_offset -= sizeof(long); 331 329 } 332 330 if (ctx->flags & EBPF_SAVE_S3) { 333 - emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); 334 - store_offset -= 8; 331 + emit_instr_long(ctx, sd, sw, 332 + MIPS_R_S3, store_offset, MIPS_R_SP); 333 + store_offset -= sizeof(long); 335 334 } 336 335 if (ctx->flags & EBPF_SAVE_S4) { 337 - emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); 338 - store_offset -= 8; 336 + emit_instr_long(ctx, sd, sw, 337 + MIPS_R_S4, store_offset, MIPS_R_SP); 338 + store_offset -= sizeof(long); 339 339 } 340 340 341 341 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) 342 - emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); 342 + emit_instr_long(ctx, daddu, addu, 343 + MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); 343 344 344 345 return 0; 345 346 } ··· 356 341 { 357 342 const struct bpf_prog *prog = ctx->skf; 358 343 int stack_adjust = ctx->stack_size; 359 - int store_offset = stack_adjust - 8; 344 + int store_offset = stack_adjust - sizeof(long); 360 345 enum reg_val_type td; 361 346 int r0 = MIPS_R_V0; 362 347 ··· 368 353 } 369 354 370 355 if (ctx->flags & EBPF_SAVE_RA) { 371 - emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 372 - store_offset -= 8; 356 + emit_instr_long(ctx, ld, lw, 357 + MIPS_R_RA, store_offset, MIPS_R_SP); 358 + store_offset -= sizeof(long); 373 359 } 374 360 if (ctx->flags & EBPF_SAVE_S0) { 375 - emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); 376 - store_offset -= 8; 361 + emit_instr_long(ctx, ld, lw, 362 + MIPS_R_S0, store_offset, MIPS_R_SP); 363 + store_offset -= sizeof(long); 377 364 } 378 365 if (ctx->flags & EBPF_SAVE_S1) { 379 - emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); 380 - store_offset -= 8; 366 + emit_instr_long(ctx, ld, lw, 367 + MIPS_R_S1, store_offset, MIPS_R_SP); 368 + store_offset -= sizeof(long); 381 369 } 382 370 if (ctx->flags & EBPF_SAVE_S2) { 383 - emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); 384 - store_offset -= 8; 371 + emit_instr_long(ctx, ld, lw, 372 + MIPS_R_S2, store_offset, MIPS_R_SP); 373 + store_offset -= sizeof(long); 385 374 } 386 375 if (ctx->flags & EBPF_SAVE_S3) { 387 - emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); 388 - store_offset -= 8; 376 + emit_instr_long(ctx, ld, lw, 377 + MIPS_R_S3, store_offset, MIPS_R_SP); 378 + store_offset -= sizeof(long); 389 379 } 390 380 if (ctx->flags & EBPF_SAVE_S4) { 391 - emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); 392 - store_offset -= 8; 381 + emit_instr_long(ctx, ld, lw, 382 + MIPS_R_S4, store_offset, MIPS_R_SP); 383 + store_offset -= sizeof(long); 393 384 } 394 385 emit_instr(ctx, jr, dest_reg); 395 386 396 387 if (stack_adjust) 397 - emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); 388 + emit_instr_long(ctx, daddiu, addiu, 389 + MIPS_R_SP, MIPS_R_SP, stack_adjust); 398 390 else 399 391 emit_instr(ctx, nop); 400 392 ··· 668 646 s64 t64s; 669 647 int bpf_op = BPF_OP(insn->code); 670 648 649 + if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64) 650 + || (bpf_op == BPF_DW))) 651 + return -EINVAL; 652 + 671 653 switch (insn->code) { 672 654 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ 673 655 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ ··· 704 678 if (insn->imm == 1) /* Mult by 1 is a nop */ 705 679 break; 706 680 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 707 - emit_instr(ctx, dmultu, MIPS_R_AT, dst); 708 - emit_instr(ctx, mflo, dst); 681 + if (MIPS_ISA_REV >= 6) { 682 + emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT); 683 + } else { 684 + emit_instr(ctx, dmultu, MIPS_R_AT, dst); 685 + emit_instr(ctx, mflo, dst); 686 + } 709 687 break; 710 688 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ 711 689 dst = ebpf_to_mips_reg(ctx, insn, dst_reg); ··· 731 701 if (insn->imm == 1) /* Mult by 1 is a nop */ 732 702 break; 733 703 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 734 - emit_instr(ctx, multu, dst, MIPS_R_AT); 735 - emit_instr(ctx, mflo, dst); 704 + if (MIPS_ISA_REV >= 6) { 705 + emit_instr(ctx, mulu, dst, dst, MIPS_R_AT); 706 + } else { 707 + emit_instr(ctx, multu, dst, MIPS_R_AT); 708 + emit_instr(ctx, mflo, dst); 709 + } 736 710 break; 737 711 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ 738 712 dst = ebpf_to_mips_reg(ctx, insn, dst_reg); ··· 767 733 break; 768 734 } 769 735 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 736 + if (MIPS_ISA_REV >= 6) { 737 + if (bpf_op == BPF_DIV) 738 + emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT); 739 + else 740 + emit_instr(ctx, modu, dst, dst, MIPS_R_AT); 741 + break; 742 + } 770 743 emit_instr(ctx, divu, dst, MIPS_R_AT); 771 744 if (bpf_op == BPF_DIV) 772 745 emit_instr(ctx, mflo, dst); ··· 796 755 break; 797 756 } 798 757 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 758 + if (MIPS_ISA_REV >= 6) { 759 + if (bpf_op == BPF_DIV) 760 + emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT); 761 + else 762 + emit_instr(ctx, modu, dst, dst, MIPS_R_AT); 763 + break; 764 + } 799 765 emit_instr(ctx, ddivu, dst, MIPS_R_AT); 800 766 if (bpf_op == BPF_DIV) 801 767 emit_instr(ctx, mflo, dst); ··· 868 820 emit_instr(ctx, and, dst, dst, src); 869 821 break; 870 822 case BPF_MUL: 871 - emit_instr(ctx, dmultu, dst, src); 872 - emit_instr(ctx, mflo, dst); 823 + if (MIPS_ISA_REV >= 6) { 824 + emit_instr(ctx, dmulu, dst, dst, src); 825 + } else { 826 + emit_instr(ctx, dmultu, dst, src); 827 + emit_instr(ctx, mflo, dst); 828 + } 873 829 break; 874 830 case BPF_DIV: 875 831 case BPF_MOD: 832 + if (MIPS_ISA_REV >= 6) { 833 + if (bpf_op == BPF_DIV) 834 + emit_instr(ctx, ddivu_r6, 835 + dst, dst, src); 836 + else 837 + emit_instr(ctx, modu, dst, dst, src); 838 + break; 839 + } 876 840 emit_instr(ctx, ddivu, dst, src); 877 841 if (bpf_op == BPF_DIV) 878 842 emit_instr(ctx, mflo, dst); ··· 964 904 break; 965 905 case BPF_DIV: 966 906 case BPF_MOD: 907 + if (MIPS_ISA_REV >= 6) { 908 + if (bpf_op == BPF_DIV) 909 + emit_instr(ctx, divu_r6, dst, dst, src); 910 + else 911 + emit_instr(ctx, modu, dst, dst, src); 912 + break; 913 + } 967 914 emit_instr(ctx, divu, dst, src); 968 915 if (bpf_op == BPF_DIV) 969 916 emit_instr(ctx, mflo, dst); ··· 1074 1007 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); 1075 1008 emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1076 1009 /* SP known to be non-zero, movz becomes boolean not */ 1077 - emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); 1078 - emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); 1010 + if (MIPS_ISA_REV >= 6) { 1011 + emit_instr(ctx, seleqz, MIPS_R_T9, 1012 + MIPS_R_SP, MIPS_R_T8); 1013 + } else { 1014 + emit_instr(ctx, movz, MIPS_R_T9, 1015 + MIPS_R_SP, MIPS_R_T8); 1016 + emit_instr(ctx, movn, MIPS_R_T9, 1017 + MIPS_R_ZERO, MIPS_R_T8); 1018 + } 1079 1019 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); 1080 1020 cmp_eq = bpf_op == BPF_JGT; 1081 1021 dst = MIPS_R_AT; ··· 1309 1235 1310 1236 case BPF_JMP | BPF_CALL: 1311 1237 ctx->flags |= EBPF_SAVE_RA; 1312 - t64s = (s64)insn->imm + (s64)__bpf_call_base; 1238 + t64s = (s64)insn->imm + (long)__bpf_call_base; 1313 1239 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); 1314 1240 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1315 1241 /* delay slot */ ··· 1441 1367 if (src < 0) 1442 1368 return src; 1443 1369 if (BPF_MODE(insn->code) == BPF_XADD) { 1370 + /* 1371 + * If mem_off does not fit within the 9 bit ll/sc 1372 + * instruction immediate field, use a temp reg. 1373 + */ 1374 + if (MIPS_ISA_REV >= 6 && 1375 + (mem_off >= BIT(8) || mem_off < -BIT(8))) { 1376 + emit_instr(ctx, daddiu, MIPS_R_T6, 1377 + dst, mem_off); 1378 + mem_off = 0; 1379 + dst = MIPS_R_T6; 1380 + } 1444 1381 switch (BPF_SIZE(insn->code)) { 1445 1382 case BPF_W: 1446 1383 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { ··· 1806 1721 unsigned int image_size; 1807 1722 u8 *image_ptr; 1808 1723 1809 - if (!prog->jit_requested || !cpu_has_mips64r2) 1724 + if (!prog->jit_requested || MIPS_ISA_REV < 2) 1810 1725 return prog; 1811 1726 1812 1727 tmp = bpf_jit_blind_constants(prog);
+4 -4
arch/mips/pic32/Kconfig
··· 39 39 Select the devicetree. 40 40 41 41 config DTB_PIC32_NONE 42 - bool "None" 42 + bool "None" 43 43 44 44 config DTB_PIC32_MZDA_SK 45 - bool "PIC32MZDA Starter Kit" 46 - depends on PIC32MZDA 47 - select BUILTIN_DTB 45 + bool "PIC32MZDA Starter Kit" 46 + depends on PIC32MZDA 47 + select BUILTIN_DTB 48 48 49 49 endchoice 50 50
+1 -3
arch/mips/vdso/Makefile
··· 46 46 VDSO_LDFLAGS := \ 47 47 -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ 48 48 $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \ 49 - -nostdlib -shared \ 50 - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ 51 - $(call cc-ldoption, -Wl$(comma)--build-id) 49 + -nostdlib -shared -Wl,--hash-style=sysv -Wl,--build-id 52 50 53 51 GCOV_PROFILE := n 54 52 UBSAN_SANITIZE := n