Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'implement-cpuv4-support-for-s390x'

Ilya Leoshkevich says:

====================
Implement cpuv4 support for s390x

v1: https://lore.kernel.org/bpf/20230830011128.1415752-1-iii@linux.ibm.com/
v1 -> v2:
- Redo Disable zero-extension for BPF_MEMSX as Puranjay and Alexei
suggested.
- Drop the bpf_ct_insert_entry() patch, it went in via the bpf tree.
- Rebase, don't apply A-bs because there were fixed conflicts.

Hi,

This series adds the cpuv4 support to the s390x eBPF JIT.
Patches 1-3 are preliminary bugfixes.
Patches 4-8 implement the new instructions.
Patches 9-10 enable the tests.

Best regards,
Ilya
====================

Link: https://lore.kernel.org/r/20230919101336.2223655-1-iii@linux.ibm.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+331 -164
+202 -63
arch/s390/net/bpf_jit_comp.c
··· 670 670 static int get_probe_mem_regno(const u8 *insn) 671 671 { 672 672 /* 673 - * insn must point to llgc, llgh, llgf or lg, which have destination 674 - * register at the same position. 673 + * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have 674 + * destination register at the same position. 675 675 */ 676 - if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */ 676 + if (insn[0] != 0xe3) /* common prefix */ 677 677 return -1; 678 678 if (insn[5] != 0x90 && /* llgc */ 679 679 insn[5] != 0x91 && /* llgh */ 680 680 insn[5] != 0x16 && /* llgf */ 681 - insn[5] != 0x04) /* lg */ 681 + insn[5] != 0x04 && /* lg */ 682 + insn[5] != 0x77 && /* lgb */ 683 + insn[5] != 0x15 && /* lgh */ 684 + insn[5] != 0x14) /* lgf */ 682 685 return -1; 683 686 return insn[1] >> 4; 684 687 } ··· 779 776 int i, bool extra_pass, u32 stack_depth) 780 777 { 781 778 struct bpf_insn *insn = &fp->insnsi[i]; 779 + s16 branch_oc_off = insn->off; 782 780 u32 dst_reg = insn->dst_reg; 783 781 u32 src_reg = insn->src_reg; 784 782 int last, insn_count = 1; ··· 792 788 int err; 793 789 794 790 if (BPF_CLASS(insn->code) == BPF_LDX && 795 - BPF_MODE(insn->code) == BPF_PROBE_MEM) 791 + (BPF_MODE(insn->code) == BPF_PROBE_MEM || 792 + BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) 796 793 probe_prg = jit->prg; 797 794 798 795 switch (insn->code) { 799 796 /* 800 797 * BPF_MOV 801 798 */ 802 - case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */ 803 - /* llgfr %dst,%src */ 804 - EMIT4(0xb9160000, dst_reg, src_reg); 805 - if (insn_is_zext(&insn[1])) 806 - insn_count = 2; 799 + case BPF_ALU | BPF_MOV | BPF_X: 800 + switch (insn->off) { 801 + case 0: /* DST = (u32) SRC */ 802 + /* llgfr %dst,%src */ 803 + EMIT4(0xb9160000, dst_reg, src_reg); 804 + if (insn_is_zext(&insn[1])) 805 + insn_count = 2; 806 + break; 807 + case 8: /* DST = (u32)(s8) SRC */ 808 + /* lbr %dst,%src */ 809 + EMIT4(0xb9260000, dst_reg, src_reg); 810 + /* llgfr %dst,%dst */ 811 + EMIT4(0xb9160000, dst_reg, dst_reg); 812 + break; 813 + case 16: /* DST = (u32)(s16) SRC */ 814 + /* lhr %dst,%src */ 815 + EMIT4(0xb9270000, dst_reg, src_reg); 816 + /* llgfr %dst,%dst */ 817 + EMIT4(0xb9160000, dst_reg, dst_reg); 818 + break; 819 + } 807 820 break; 808 - case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ 809 - /* lgr %dst,%src */ 810 - EMIT4(0xb9040000, dst_reg, src_reg); 821 + case BPF_ALU64 | BPF_MOV | BPF_X: 822 + switch (insn->off) { 823 + case 0: /* DST = SRC */ 824 + /* lgr %dst,%src */ 825 + EMIT4(0xb9040000, dst_reg, src_reg); 826 + break; 827 + case 8: /* DST = (s8) SRC */ 828 + /* lgbr %dst,%src */ 829 + EMIT4(0xb9060000, dst_reg, src_reg); 830 + break; 831 + case 16: /* DST = (s16) SRC */ 832 + /* lghr %dst,%src */ 833 + EMIT4(0xb9070000, dst_reg, src_reg); 834 + break; 835 + case 32: /* DST = (s32) SRC */ 836 + /* lgfr %dst,%src */ 837 + EMIT4(0xb9140000, dst_reg, src_reg); 838 + break; 839 + } 811 840 break; 812 841 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */ 813 842 /* llilf %dst,imm */ ··· 949 912 /* 950 913 * BPF_DIV / BPF_MOD 951 914 */ 952 - case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */ 953 - case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */ 915 + case BPF_ALU | BPF_DIV | BPF_X: 916 + case BPF_ALU | BPF_MOD | BPF_X: 954 917 { 955 918 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 956 919 957 - /* lhi %w0,0 */ 958 - EMIT4_IMM(0xa7080000, REG_W0, 0); 959 - /* lr %w1,%dst */ 960 - EMIT2(0x1800, REG_W1, dst_reg); 961 - /* dlr %w0,%src */ 962 - EMIT4(0xb9970000, REG_W0, src_reg); 920 + switch (off) { 921 + case 0: /* dst = (u32) dst {/,%} (u32) src */ 922 + /* xr %w0,%w0 */ 923 + EMIT2(0x1700, REG_W0, REG_W0); 924 + /* lr %w1,%dst */ 925 + EMIT2(0x1800, REG_W1, dst_reg); 926 + /* dlr %w0,%src */ 927 + EMIT4(0xb9970000, REG_W0, src_reg); 928 + break; 929 + case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */ 930 + /* lgfr %r1,%dst */ 931 + EMIT4(0xb9140000, REG_W1, dst_reg); 932 + /* dsgfr %r0,%src */ 933 + EMIT4(0xb91d0000, REG_W0, src_reg); 934 + break; 935 + } 963 936 /* llgfr %dst,%rc */ 964 937 EMIT4(0xb9160000, dst_reg, rc_reg); 965 938 if (insn_is_zext(&insn[1])) 966 939 insn_count = 2; 967 940 break; 968 941 } 969 - case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */ 970 - case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */ 942 + case BPF_ALU64 | BPF_DIV | BPF_X: 943 + case BPF_ALU64 | BPF_MOD | BPF_X: 971 944 { 972 945 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 973 946 974 - /* lghi %w0,0 */ 975 - EMIT4_IMM(0xa7090000, REG_W0, 0); 976 - /* lgr %w1,%dst */ 977 - EMIT4(0xb9040000, REG_W1, dst_reg); 978 - /* dlgr %w0,%dst */ 979 - EMIT4(0xb9870000, REG_W0, src_reg); 947 + switch (off) { 948 + case 0: /* dst = dst {/,%} src */ 949 + /* lghi %w0,0 */ 950 + EMIT4_IMM(0xa7090000, REG_W0, 0); 951 + /* lgr %w1,%dst */ 952 + EMIT4(0xb9040000, REG_W1, dst_reg); 953 + /* dlgr %w0,%src */ 954 + EMIT4(0xb9870000, REG_W0, src_reg); 955 + break; 956 + case 1: /* dst = (s64) dst {/,%} (s64) src */ 957 + /* lgr %w1,%dst */ 958 + EMIT4(0xb9040000, REG_W1, dst_reg); 959 + /* dsgr %w0,%src */ 960 + EMIT4(0xb90d0000, REG_W0, src_reg); 961 + break; 962 + } 980 963 /* lgr %dst,%rc */ 981 964 EMIT4(0xb9040000, dst_reg, rc_reg); 982 965 break; 983 966 } 984 - case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */ 985 - case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */ 967 + case BPF_ALU | BPF_DIV | BPF_K: 968 + case BPF_ALU | BPF_MOD | BPF_K: 986 969 { 987 970 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 988 971 989 972 if (imm == 1) { 990 973 if (BPF_OP(insn->code) == BPF_MOD) 991 - /* lhgi %dst,0 */ 974 + /* lghi %dst,0 */ 992 975 EMIT4_IMM(0xa7090000, dst_reg, 0); 993 976 else 994 977 EMIT_ZERO(dst_reg); 995 978 break; 996 979 } 997 - /* lhi %w0,0 */ 998 - EMIT4_IMM(0xa7080000, REG_W0, 0); 999 - /* lr %w1,%dst */ 1000 - EMIT2(0x1800, REG_W1, dst_reg); 1001 980 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) { 1002 - /* dl %w0,<d(imm)>(%l) */ 1003 - EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L, 1004 - EMIT_CONST_U32(imm)); 981 + switch (off) { 982 + case 0: /* dst = (u32) dst {/,%} (u32) imm */ 983 + /* xr %w0,%w0 */ 984 + EMIT2(0x1700, REG_W0, REG_W0); 985 + /* lr %w1,%dst */ 986 + EMIT2(0x1800, REG_W1, dst_reg); 987 + /* dl %w0,<d(imm)>(%l) */ 988 + EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, 989 + REG_L, EMIT_CONST_U32(imm)); 990 + break; 991 + case 1: /* dst = (s32) dst {/,%} (s32) imm */ 992 + /* lgfr %r1,%dst */ 993 + EMIT4(0xb9140000, REG_W1, dst_reg); 994 + /* dsgf %r0,<d(imm)>(%l) */ 995 + EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0, 996 + REG_L, EMIT_CONST_U32(imm)); 997 + break; 998 + } 1005 999 } else { 1006 - /* lgfrl %dst,imm */ 1007 - EMIT6_PCREL_RILB(0xc40c0000, dst_reg, 1008 - _EMIT_CONST_U32(imm)); 1009 - jit->seen |= SEEN_LITERAL; 1010 - /* dlr %w0,%dst */ 1011 - EMIT4(0xb9970000, REG_W0, dst_reg); 1000 + switch (off) { 1001 + case 0: /* dst = (u32) dst {/,%} (u32) imm */ 1002 + /* xr %w0,%w0 */ 1003 + EMIT2(0x1700, REG_W0, REG_W0); 1004 + /* lr %w1,%dst */ 1005 + EMIT2(0x1800, REG_W1, dst_reg); 1006 + /* lrl %dst,imm */ 1007 + EMIT6_PCREL_RILB(0xc40d0000, dst_reg, 1008 + _EMIT_CONST_U32(imm)); 1009 + jit->seen |= SEEN_LITERAL; 1010 + /* dlr %w0,%dst */ 1011 + EMIT4(0xb9970000, REG_W0, dst_reg); 1012 + break; 1013 + case 1: /* dst = (s32) dst {/,%} (s32) imm */ 1014 + /* lgfr %w1,%dst */ 1015 + EMIT4(0xb9140000, REG_W1, dst_reg); 1016 + /* lgfrl %dst,imm */ 1017 + EMIT6_PCREL_RILB(0xc40c0000, dst_reg, 1018 + _EMIT_CONST_U32(imm)); 1019 + jit->seen |= SEEN_LITERAL; 1020 + /* dsgr %w0,%dst */ 1021 + EMIT4(0xb90d0000, REG_W0, dst_reg); 1022 + break; 1023 + } 1012 1024 } 1013 1025 /* llgfr %dst,%rc */ 1014 1026 EMIT4(0xb9160000, dst_reg, rc_reg); ··· 1065 979 insn_count = 2; 1066 980 break; 1067 981 } 1068 - case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */ 1069 - case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */ 982 + case BPF_ALU64 | BPF_DIV | BPF_K: 983 + case BPF_ALU64 | BPF_MOD | BPF_K: 1070 984 { 1071 985 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 1072 986 ··· 1076 990 EMIT4_IMM(0xa7090000, dst_reg, 0); 1077 991 break; 1078 992 } 1079 - /* lghi %w0,0 */ 1080 - EMIT4_IMM(0xa7090000, REG_W0, 0); 1081 - /* lgr %w1,%dst */ 1082 - EMIT4(0xb9040000, REG_W1, dst_reg); 1083 993 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { 1084 - /* dlg %w0,<d(imm)>(%l) */ 1085 - EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, 1086 - EMIT_CONST_U64(imm)); 994 + switch (off) { 995 + case 0: /* dst = dst {/,%} imm */ 996 + /* lghi %w0,0 */ 997 + EMIT4_IMM(0xa7090000, REG_W0, 0); 998 + /* lgr %w1,%dst */ 999 + EMIT4(0xb9040000, REG_W1, dst_reg); 1000 + /* dlg %w0,<d(imm)>(%l) */ 1001 + EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, 1002 + REG_L, EMIT_CONST_U64(imm)); 1003 + break; 1004 + case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1005 + /* lgr %w1,%dst */ 1006 + EMIT4(0xb9040000, REG_W1, dst_reg); 1007 + /* dsg %w0,<d(imm)>(%l) */ 1008 + EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0, 1009 + REG_L, EMIT_CONST_U64(imm)); 1010 + break; 1011 + } 1087 1012 } else { 1088 - /* lgrl %dst,imm */ 1089 - EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1090 - _EMIT_CONST_U64(imm)); 1091 - jit->seen |= SEEN_LITERAL; 1092 - /* dlgr %w0,%dst */ 1093 - EMIT4(0xb9870000, REG_W0, dst_reg); 1013 + switch (off) { 1014 + case 0: /* dst = dst {/,%} imm */ 1015 + /* lghi %w0,0 */ 1016 + EMIT4_IMM(0xa7090000, REG_W0, 0); 1017 + /* lgr %w1,%dst */ 1018 + EMIT4(0xb9040000, REG_W1, dst_reg); 1019 + /* lgrl %dst,imm */ 1020 + EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1021 + _EMIT_CONST_U64(imm)); 1022 + jit->seen |= SEEN_LITERAL; 1023 + /* dlgr %w0,%dst */ 1024 + EMIT4(0xb9870000, REG_W0, dst_reg); 1025 + break; 1026 + case 1: /* dst = (s64) dst {/,%} (s64) imm */ 1027 + /* lgr %w1,%dst */ 1028 + EMIT4(0xb9040000, REG_W1, dst_reg); 1029 + /* lgrl %dst,imm */ 1030 + EMIT6_PCREL_RILB(0xc4080000, dst_reg, 1031 + _EMIT_CONST_U64(imm)); 1032 + jit->seen |= SEEN_LITERAL; 1033 + /* dsgr %w0,%dst */ 1034 + EMIT4(0xb90d0000, REG_W0, dst_reg); 1035 + break; 1036 + } 1094 1037 } 1095 1038 /* lgr %dst,%rc */ 1096 1039 EMIT4(0xb9040000, dst_reg, rc_reg); ··· 1332 1217 } 1333 1218 break; 1334 1219 case BPF_ALU | BPF_END | BPF_FROM_LE: 1220 + case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1335 1221 switch (imm) { 1336 1222 case 16: /* dst = (u16) cpu_to_le16(dst) */ 1337 1223 /* lrvr %dst,%dst */ ··· 1490 1374 if (insn_is_zext(&insn[1])) 1491 1375 insn_count = 2; 1492 1376 break; 1377 + case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ 1378 + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1379 + /* lgb %dst,0(off,%src) */ 1380 + EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); 1381 + jit->seen |= SEEN_MEM; 1382 + break; 1493 1383 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 1494 1384 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1495 1385 /* llgh %dst,0(off,%src) */ ··· 1504 1382 if (insn_is_zext(&insn[1])) 1505 1383 insn_count = 2; 1506 1384 break; 1385 + case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ 1386 + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1387 + /* lgh %dst,0(off,%src) */ 1388 + EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); 1389 + jit->seen |= SEEN_MEM; 1390 + break; 1507 1391 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 1508 1392 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1509 1393 /* llgf %dst,off(%src) */ ··· 1517 1389 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); 1518 1390 if (insn_is_zext(&insn[1])) 1519 1391 insn_count = 2; 1392 + break; 1393 + case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ 1394 + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1395 + /* lgf %dst,off(%src) */ 1396 + jit->seen |= SEEN_MEM; 1397 + EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); 1520 1398 break; 1521 1399 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 1522 1400 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: ··· 1704 1570 * instruction itself (loop) and for BPF with offset 0 we 1705 1571 * branch to the instruction behind the branch. 1706 1572 */ 1573 + case BPF_JMP32 | BPF_JA: /* if (true) */ 1574 + branch_oc_off = imm; 1575 + fallthrough; 1707 1576 case BPF_JMP | BPF_JA: /* if (true) */ 1708 1577 mask = 0xf000; /* j */ 1709 1578 goto branch_oc; ··· 1875 1738 break; 1876 1739 branch_oc: 1877 1740 if (!is_first_pass(jit) && 1878 - can_use_rel(jit, addrs[i + off + 1])) { 1741 + can_use_rel(jit, addrs[i + branch_oc_off + 1])) { 1879 1742 /* brc mask,off */ 1880 1743 EMIT4_PCREL_RIC(0xa7040000, 1881 - mask >> 12, addrs[i + off + 1]); 1744 + mask >> 12, 1745 + addrs[i + branch_oc_off + 1]); 1882 1746 } else { 1883 1747 /* brcl mask,off */ 1884 1748 EMIT6_PCREL_RILC(0xc0040000, 1885 - mask >> 12, addrs[i + off + 1]); 1749 + mask >> 12, 1750 + addrs[i + branch_oc_off + 1]); 1886 1751 } 1887 1752 break; 1888 1753 }
+1 -1
kernel/bpf/verifier.c
··· 3114 3114 3115 3115 if (class == BPF_LDX) { 3116 3116 if (t != SRC_OP) 3117 - return BPF_SIZE(code) == BPF_DW; 3117 + return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX; 3118 3118 /* LDX source must be ptr. */ 3119 3119 return true; 3120 3120 }
-25
tools/testing/selftests/bpf/DENYLIST.s390x
··· 1 1 # TEMPORARY 2 2 # Alphabetical order 3 - bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?) 4 - bpf_cookie # failed to open_and_load program: -524 (trampoline) 5 - bpf_loop # attaches to __x64_sys_nanosleep 6 - cgrp_local_storage # prog_attach unexpected error: -524 (trampoline) 7 - dynptr/test_dynptr_skb_data 8 - dynptr/test_skb_readonly 9 3 exceptions # JIT does not support calling kfunc bpf_throw (exceptions) 10 - fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline) 11 4 get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) 12 - iters/testmod_seq* # s390x doesn't support kfuncs in modules yet 13 - kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 14 - kprobe_multi_test # relies on fentry 15 - ksyms_btf/weak_ksyms* # test_ksyms_weak__open_and_load unexpected error: -22 (kfunc) 16 - ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?) 17 - ksyms_module_libbpf # JIT does not support calling kernel function (kfunc) 18 - ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?) 19 - module_attach # skel_attach skeleton attach failed: -524 (trampoline) 20 - ringbuf # skel_load skeleton load failed (?) 21 5 stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?) 22 - test_lsm # attach unexpected error: -524 (trampoline) 23 - trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?) 24 - trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?) 25 - unpriv_bpf_disabled # fentry 26 - user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?) 27 - verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?) 28 - xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline) 29 - xdp_metadata # JIT does not support calling kernel function (kfunc) 30 - test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
+26 -7
tools/testing/selftests/bpf/cgroup_helpers.c
··· 49 49 snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ 50 50 CGROUP_WORK_DIR) 51 51 52 + static __thread bool cgroup_workdir_mounted; 53 + 54 + static void __cleanup_cgroup_environment(void); 55 + 52 56 static int __enable_controllers(const char *cgroup_path, const char *controllers) 53 57 { 54 58 char path[PATH_MAX + 1]; ··· 213 209 log_err("mount cgroup2"); 214 210 return 1; 215 211 } 212 + cgroup_workdir_mounted = true; 216 213 217 214 /* Cleanup existing failed runs, now that the environment is setup */ 218 - cleanup_cgroup_environment(); 215 + __cleanup_cgroup_environment(); 219 216 220 217 if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) { 221 218 log_err("mkdir cgroup work dir"); ··· 311 306 } 312 307 313 308 /** 309 + * __cleanup_cgroup_environment() - Delete temporary cgroups 310 + * 311 + * This is a helper for cleanup_cgroup_environment() that is responsible for 312 + * deletion of all temporary cgroups that have been created during the test. 313 + */ 314 + static void __cleanup_cgroup_environment(void) 315 + { 316 + char cgroup_workdir[PATH_MAX + 1]; 317 + 318 + format_cgroup_path(cgroup_workdir, ""); 319 + join_cgroup_from_top(CGROUP_MOUNT_PATH); 320 + nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT); 321 + } 322 + 323 + /** 314 324 * cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment 315 325 * 316 326 * This is an idempotent function to delete all temporary cgroups that 317 - * have been created during the test, including the cgroup testing work 327 + * have been created during the test and unmount the cgroup testing work 318 328 * directory. 319 329 * 320 330 * At call time, it moves the calling process to the root cgroup, and then ··· 340 320 */ 341 321 void cleanup_cgroup_environment(void) 342 322 { 343 - char cgroup_workdir[PATH_MAX + 1]; 344 - 345 - format_cgroup_path(cgroup_workdir, ""); 346 - join_cgroup_from_top(CGROUP_MOUNT_PATH); 347 - nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT); 323 + __cleanup_cgroup_environment(); 324 + if (cgroup_workdir_mounted && umount(CGROUP_MOUNT_PATH)) 325 + log_err("umount cgroup2"); 326 + cgroup_workdir_mounted = false; 348 327 } 349 328 350 329 /**
+7 -2
tools/testing/selftests/bpf/progs/test_ldsx_insn.c
··· 6 6 #include <bpf/bpf_tracing.h> 7 7 8 8 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 9 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 9 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 10 + defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18 10 11 const volatile int skip = 0; 11 12 #else 12 13 const volatile int skip = 1; ··· 105 104 "%[tmp_mark] = r1" 106 105 : [tmp_mark]"=r"(tmp_mark) 107 106 : [ctx]"r"(skb), 108 - [off_mark]"i"(offsetof(struct __sk_buff, mark)) 107 + [off_mark]"i"(offsetof(struct __sk_buff, mark) 108 + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 109 + + sizeof(skb->mark) - 1 110 + #endif 111 + ) 109 112 : "r1"); 110 113 #else 111 114 tmp_mark = (char)skb->mark;
+2 -1
tools/testing/selftests/bpf/progs/verifier_bswap.c
··· 5 5 #include "bpf_misc.h" 6 6 7 7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 8 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \ 8 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 9 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 9 10 __clang_major__ >= 18 10 11 11 12 SEC("socket")
+2 -1
tools/testing/selftests/bpf/progs/verifier_gotol.c
··· 5 5 #include "bpf_misc.h" 6 6 7 7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 8 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \ 8 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 9 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 9 10 __clang_major__ >= 18 10 11 11 12 SEC("socket")
+87 -62
tools/testing/selftests/bpf/progs/verifier_ldsx.c
··· 5 5 #include "bpf_misc.h" 6 6 7 7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 8 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \ 8 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 9 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 9 10 __clang_major__ >= 18 10 11 11 12 SEC("socket") ··· 14 13 __success __success_unpriv __retval(-2) 15 14 __naked void ldsx_s8(void) 16 15 { 17 - asm volatile (" \ 18 - r1 = 0x3fe; \ 19 - *(u64 *)(r10 - 8) = r1; \ 20 - r0 = *(s8 *)(r10 - 8); \ 21 - exit; \ 22 - " ::: __clobber_all); 16 + asm volatile ( 17 + "r1 = 0x3fe;" 18 + "*(u64 *)(r10 - 8) = r1;" 19 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 20 + "r0 = *(s8 *)(r10 - 8);" 21 + #else 22 + "r0 = *(s8 *)(r10 - 1);" 23 + #endif 24 + "exit;" 25 + ::: __clobber_all); 23 26 } 24 27 25 28 SEC("socket") ··· 31 26 __success __success_unpriv __retval(-2) 32 27 __naked void ldsx_s16(void) 33 28 { 34 - asm volatile (" \ 35 - r1 = 0x3fffe; \ 36 - *(u64 *)(r10 - 8) = r1; \ 37 - r0 = *(s16 *)(r10 - 8); \ 38 - exit; \ 39 - " ::: __clobber_all); 29 + asm volatile ( 30 + "r1 = 0x3fffe;" 31 + "*(u64 *)(r10 - 8) = r1;" 32 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 33 + "r0 = *(s16 *)(r10 - 8);" 34 + #else 35 + "r0 = *(s16 *)(r10 - 2);" 36 + #endif 37 + "exit;" 38 + ::: __clobber_all); 40 39 } 41 40 42 41 SEC("socket") ··· 48 39 __success __success_unpriv __retval(-1) 49 40 __naked void ldsx_s32(void) 50 41 { 51 - asm volatile (" \ 52 - r1 = 0xfffffffe; \ 53 - *(u64 *)(r10 - 8) = r1; \ 54 - r0 = *(s32 *)(r10 - 8); \ 55 - r0 >>= 1; \ 56 - exit; \ 57 - " ::: __clobber_all); 42 + asm volatile ( 43 + "r1 = 0xfffffffe;" 44 + "*(u64 *)(r10 - 8) = r1;" 45 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 46 + "r0 = *(s32 *)(r10 - 8);" 47 + #else 48 + "r0 = *(s32 *)(r10 - 4);" 49 + #endif 50 + "r0 >>= 1;" 51 + "exit;" 52 + ::: __clobber_all); 58 53 } 59 54 60 55 SEC("socket") ··· 67 54 __msg("R1_w=scalar(smin=-128,smax=127)") 68 55 __naked void ldsx_s8_range_priv(void) 69 56 { 70 - asm volatile (" \ 71 - call %[bpf_get_prandom_u32]; \ 72 - *(u64 *)(r10 - 8) = r0; \ 73 - r1 = *(s8 *)(r10 - 8); \ 74 - /* r1 with s8 range */ \ 75 - if r1 s> 0x7f goto l0_%=; \ 76 - if r1 s< -0x80 goto l0_%=; \ 77 - r0 = 1; \ 78 - l1_%=: \ 79 - exit; \ 80 - l0_%=: \ 81 - r0 = 2; \ 82 - goto l1_%=; \ 83 - " : 57 + asm volatile ( 58 + "call %[bpf_get_prandom_u32];" 59 + "*(u64 *)(r10 - 8) = r0;" 60 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 61 + "r1 = *(s8 *)(r10 - 8);" 62 + #else 63 + "r1 = *(s8 *)(r10 - 1);" 64 + #endif 65 + /* r1 with s8 range */ 66 + "if r1 s> 0x7f goto l0_%=;" 67 + "if r1 s< -0x80 goto l0_%=;" 68 + "r0 = 1;" 69 + "l1_%=:" 70 + "exit;" 71 + "l0_%=:" 72 + "r0 = 2;" 73 + "goto l1_%=;" 74 + : 84 75 : __imm(bpf_get_prandom_u32) 85 76 : __clobber_all); 86 77 } ··· 94 77 __success __success_unpriv __retval(1) 95 78 __naked void ldsx_s16_range(void) 96 79 { 97 - asm volatile (" \ 98 - call %[bpf_get_prandom_u32]; \ 99 - *(u64 *)(r10 - 8) = r0; \ 100 - r1 = *(s16 *)(r10 - 8); \ 101 - /* r1 with s16 range */ \ 102 - if r1 s> 0x7fff goto l0_%=; \ 103 - if r1 s< -0x8000 goto l0_%=; \ 104 - r0 = 1; \ 105 - l1_%=: \ 106 - exit; \ 107 - l0_%=: \ 108 - r0 = 2; \ 109 - goto l1_%=; \ 110 - " : 80 + asm volatile ( 81 + "call %[bpf_get_prandom_u32];" 82 + "*(u64 *)(r10 - 8) = r0;" 83 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 84 + "r1 = *(s16 *)(r10 - 8);" 85 + #else 86 + "r1 = *(s16 *)(r10 - 2);" 87 + #endif 88 + /* r1 with s16 range */ 89 + "if r1 s> 0x7fff goto l0_%=;" 90 + "if r1 s< -0x8000 goto l0_%=;" 91 + "r0 = 1;" 92 + "l1_%=:" 93 + "exit;" 94 + "l0_%=:" 95 + "r0 = 2;" 96 + "goto l1_%=;" 97 + : 111 98 : __imm(bpf_get_prandom_u32) 112 99 : __clobber_all); 113 100 } ··· 121 100 __success __success_unpriv __retval(1) 122 101 __naked void ldsx_s32_range(void) 123 102 { 124 - asm volatile (" \ 125 - call %[bpf_get_prandom_u32]; \ 126 - *(u64 *)(r10 - 8) = r0; \ 127 - r1 = *(s32 *)(r10 - 8); \ 128 - /* r1 with s16 range */ \ 129 - if r1 s> 0x7fffFFFF goto l0_%=; \ 130 - if r1 s< -0x80000000 goto l0_%=; \ 131 - r0 = 1; \ 132 - l1_%=: \ 133 - exit; \ 134 - l0_%=: \ 135 - r0 = 2; \ 136 - goto l1_%=; \ 137 - " : 103 + asm volatile ( 104 + "call %[bpf_get_prandom_u32];" 105 + "*(u64 *)(r10 - 8) = r0;" 106 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 107 + "r1 = *(s32 *)(r10 - 8);" 108 + #else 109 + "r1 = *(s32 *)(r10 - 4);" 110 + #endif 111 + /* r1 with s16 range */ 112 + "if r1 s> 0x7fffFFFF goto l0_%=;" 113 + "if r1 s< -0x80000000 goto l0_%=;" 114 + "r0 = 1;" 115 + "l1_%=:" 116 + "exit;" 117 + "l0_%=:" 118 + "r0 = 2;" 119 + "goto l1_%=;" 120 + : 138 121 : __imm(bpf_get_prandom_u32) 139 122 : __clobber_all); 140 123 }
+2 -1
tools/testing/selftests/bpf/progs/verifier_movsx.c
··· 5 5 #include "bpf_misc.h" 6 6 7 7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 8 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \ 8 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 9 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 9 10 __clang_major__ >= 18 10 11 11 12 SEC("socket")
+2 -1
tools/testing/selftests/bpf/progs/verifier_sdiv.c
··· 5 5 #include "bpf_misc.h" 6 6 7 7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 8 - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \ 8 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 9 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 9 10 __clang_major__ >= 18 10 11 11 12 SEC("socket")