Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Hexagon: add target builtins to kernel

Add the compiler-rt builtins like memcpy to the hexagon kernel.

Signed-off-by: Sid Manning <sidneym@codeaurora.org>
Add SYM_FUNC_START/END, ksyms exports
Signed-off-by: Brian Cain <bcain@codeaurora.org>

Tested-by: Nick Desaulniers <ndesaulniers@google.com>

authored by

Sid Manning and committed by
Brian Cain
f1f99adf aaa44952

+249 -8
-3
arch/hexagon/Makefile
··· 33 33 KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__ 34 34 KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME) 35 35 36 - LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null) 37 - libs-y += $(LIBGCC) 38 - 39 36 head-y := arch/hexagon/kernel/head.o 40 37 41 38 core-y += arch/hexagon/kernel/ \
+4 -4
arch/hexagon/kernel/hexagon_ksyms.c
··· 35 35 DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes); 36 36 37 37 /* Additional functions */ 38 - DECLARE_EXPORT(__divsi3); 39 - DECLARE_EXPORT(__modsi3); 40 - DECLARE_EXPORT(__udivsi3); 41 - DECLARE_EXPORT(__umodsi3); 38 + DECLARE_EXPORT(__hexagon_divsi3); 39 + DECLARE_EXPORT(__hexagon_modsi3); 40 + DECLARE_EXPORT(__hexagon_udivsi3); 41 + DECLARE_EXPORT(__hexagon_umodsi3); 42 42 DECLARE_EXPORT(csum_tcpudp_magic);
+2 -1
arch/hexagon/lib/Makefile
··· 2 2 # 3 3 # Makefile for hexagon-specific library files. 4 4 # 5 - obj-y = checksum.o io.o memcpy.o memset.o 5 + obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \ 6 + divsi3.o modsi3.o udivsi3.o umodsi3.o
+67
arch/hexagon/lib/divsi3.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2021, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/linkage.h> 7 + 8 + SYM_FUNC_START(__hexagon_divsi3) 9 + { 10 + p0 = cmp.gt(r0,#-1) 11 + p1 = cmp.gt(r1,#-1) 12 + r3:2 = vabsw(r1:0) 13 + } 14 + { 15 + p3 = xor(p0,p1) 16 + r4 = sub(r2,r3) 17 + r6 = cl0(r2) 18 + p0 = cmp.gtu(r3,r2) 19 + } 20 + { 21 + r0 = mux(p3,#-1,#1) 22 + r7 = cl0(r3) 23 + p1 = cmp.gtu(r3,r4) 24 + } 25 + { 26 + r0 = mux(p0,#0,r0) 27 + p0 = or(p0,p1) 28 + if (p0.new) jumpr:nt r31 29 + r6 = sub(r7,r6) 30 + } 31 + { 32 + r7 = r6 33 + r5:4 = combine(#1,r3) 34 + r6 = add(#1,lsr(r6,#1)) 35 + p0 = cmp.gtu(r6,#4) 36 + } 37 + { 38 + r5:4 = vaslw(r5:4,r7) 39 + if (!p0) r6 = #3 40 + } 41 + { 42 + loop0(1f,r6) 43 + r7:6 = vlsrw(r5:4,#1) 44 + r1:0 = #0 45 + } 46 + .falign 47 + 1: 48 + { 49 + r5:4 = vlsrw(r5:4,#2) 50 + if (!p0.new) r0 = add(r0,r5) 51 + if (!p0.new) r2 = sub(r2,r4) 52 + p0 = cmp.gtu(r4,r2) 53 + } 54 + { 55 + r7:6 = vlsrw(r7:6,#2) 56 + if (!p0.new) r0 = add(r0,r7) 57 + if (!p0.new) r2 = sub(r2,r6) 58 + p0 = cmp.gtu(r6,r2) 59 + }:endloop0 60 + { 61 + if (!p0) r0 = add(r0,r7) 62 + } 63 + { 64 + if (p3) r0 = sub(r1,r0) 65 + jumpr r31 66 + } 67 + SYM_FUNC_END(__hexagon_divsi3)
+56
arch/hexagon/lib/memcpy_likely_aligned.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2021, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/linkage.h> 7 + 8 + SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) 9 + { 10 + p0 = bitsclr(r1,#7) 11 + p0 = bitsclr(r0,#7) 12 + if (p0.new) r5:4 = memd(r1) 13 + if (p0.new) r7:6 = memd(r1+#8) 14 + } 15 + { 16 + if (!p0) jump:nt .Lmemcpy_call 17 + if (p0) r9:8 = memd(r1+#16) 18 + if (p0) r11:10 = memd(r1+#24) 19 + p0 = cmp.gtu(r2,#64) 20 + } 21 + { 22 + if (p0) jump:nt .Lmemcpy_call 23 + if (!p0) memd(r0) = r5:4 24 + if (!p0) memd(r0+#8) = r7:6 25 + p0 = cmp.gtu(r2,#32) 26 + } 27 + { 28 + p1 = cmp.gtu(r2,#40) 29 + p2 = cmp.gtu(r2,#48) 30 + if (p0) r13:12 = memd(r1+#32) 31 + if (p1.new) r15:14 = memd(r1+#40) 32 + } 33 + { 34 + memd(r0+#16) = r9:8 35 + memd(r0+#24) = r11:10 36 + } 37 + { 38 + if (p0) memd(r0+#32) = r13:12 39 + if (p1) memd(r0+#40) = r15:14 40 + if (!p2) jumpr:t r31 41 + } 42 + { 43 + p0 = cmp.gtu(r2,#56) 44 + r5:4 = memd(r1+#48) 45 + if (p0.new) r7:6 = memd(r1+#56) 46 + } 47 + { 48 + memd(r0+#48) = r5:4 49 + if (p0) memd(r0+#56) = r7:6 50 + jumpr r31 51 + } 52 + 53 + .Lmemcpy_call: 54 + jump memcpy 55 + 56 + SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes)
+46
arch/hexagon/lib/modsi3.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2021, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/linkage.h> 7 + 8 + SYM_FUNC_START(__hexagon_modsi3) 9 + { 10 + p2 = cmp.ge(r0,#0) 11 + r2 = abs(r0) 12 + r1 = abs(r1) 13 + } 14 + { 15 + r3 = cl0(r2) 16 + r4 = cl0(r1) 17 + p0 = cmp.gtu(r1,r2) 18 + } 19 + { 20 + r3 = sub(r4,r3) 21 + if (p0) jumpr r31 22 + } 23 + { 24 + p1 = cmp.eq(r3,#0) 25 + loop0(1f,r3) 26 + r0 = r2 27 + r2 = lsl(r1,r3) 28 + } 29 + .falign 30 + 1: 31 + { 32 + p0 = cmp.gtu(r2,r0) 33 + if (!p0.new) r0 = sub(r0,r2) 34 + r2 = lsr(r2,#1) 35 + if (p1) r1 = #0 36 + }:endloop0 37 + { 38 + p0 = cmp.gtu(r2,r0) 39 + if (!p0.new) r0 = sub(r0,r1) 40 + if (p2) jumpr r31 41 + } 42 + { 43 + r0 = neg(r0) 44 + jumpr r31 45 + } 46 + SYM_FUNC_END(__hexagon_modsi3)
+38
arch/hexagon/lib/udivsi3.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2021, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/linkage.h> 7 + 8 + SYM_FUNC_START(__hexagon_udivsi3) 9 + { 10 + r2 = cl0(r0) 11 + r3 = cl0(r1) 12 + r5:4 = combine(#1,#0) 13 + p0 = cmp.gtu(r1,r0) 14 + } 15 + { 16 + r6 = sub(r3,r2) 17 + r4 = r1 18 + r1:0 = combine(r0,r4) 19 + if (p0) jumpr r31 20 + } 21 + { 22 + r3:2 = vlslw(r5:4,r6) 23 + loop0(1f,r6) 24 + } 25 + .falign 26 + 1: 27 + { 28 + p0 = cmp.gtu(r2,r1) 29 + if (!p0.new) r1 = sub(r1,r2) 30 + if (!p0.new) r0 = add(r0,r3) 31 + r3:2 = vlsrw(r3:2,#1) 32 + }:endloop0 33 + { 34 + p0 = cmp.gtu(r2,r1) 35 + if (!p0.new) r0 = add(r0,r3) 36 + jumpr r31 37 + } 38 + SYM_FUNC_END(__hexagon_udivsi3)
+36
arch/hexagon/lib/umodsi3.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2021, The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/linkage.h> 7 + 8 + SYM_FUNC_START(__hexagon_umodsi3) 9 + { 10 + r2 = cl0(r0) 11 + r3 = cl0(r1) 12 + p0 = cmp.gtu(r1,r0) 13 + } 14 + { 15 + r2 = sub(r3,r2) 16 + if (p0) jumpr r31 17 + } 18 + { 19 + loop0(1f,r2) 20 + p1 = cmp.eq(r2,#0) 21 + r2 = lsl(r1,r2) 22 + } 23 + .falign 24 + 1: 25 + { 26 + p0 = cmp.gtu(r2,r0) 27 + if (!p0.new) r0 = sub(r0,r2) 28 + r2 = lsr(r2,#1) 29 + if (p1) r1 = #0 30 + }:endloop0 31 + { 32 + p0 = cmp.gtu(r2,r0) 33 + if (!p0.new) r0 = sub(r0,r1) 34 + jumpr r31 35 + } 36 + SYM_FUNC_END(__hexagon_umodsi3)