Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
bpf-next 2021-08-10

We've added 31 non-merge commits during the last 8 day(s) which contain
a total of 28 files changed, 3644 insertions(+), 519 deletions(-).

1) Native XDP support for bonding driver & related BPF selftests, from Jussi Maki.

2) Large batch of new BPF JIT tests for test_bpf.ko that came out as a result from
32-bit MIPS JIT development, from Johan Almbladh.

3) Rewrite of netcnt BPF selftest and merge into test_progs, from Stanislav Fomichev.

4) Fix XDP bpf_prog_test_run infra after net to net-next merge, from Andrii Nakryiko.

5) Follow-up fix in unix_bpf_update_proto() to enforce socket type, from Cong Wang.

6) Fix bpf-iter-tcp4 selftest to print the correct dest IP, from Jose Blanquicet.

7) Various misc BPF XDP sample improvements, from Niklas Söderlund, Matthew Cover,
and Muhammad Falak R Wani.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (31 commits)
bpf, tests: Add tail call test suite
bpf, tests: Add tests for BPF_CMPXCHG
bpf, tests: Add tests for atomic operations
bpf, tests: Add test for 32-bit context pointer argument passing
bpf, tests: Add branch conversion JIT test
bpf, tests: Add word-order tests for load/store of double words
bpf, tests: Add tests for ALU operations implemented with function calls
bpf, tests: Add more ALU64 BPF_MUL tests
bpf, tests: Add more BPF_LSH/RSH/ARSH tests for ALU64
bpf, tests: Add more ALU32 tests for BPF_LSH/RSH/ARSH
bpf, tests: Add more tests of ALU32 and ALU64 bitwise operations
bpf, tests: Fix typos in test case descriptions
bpf, tests: Add BPF_MOV tests for zero and sign extension
bpf, tests: Add BPF_JMP32 test cases
samples, bpf: Add an explict comment to handle nested vlan tagging.
selftests/bpf: Add tests for XDP bonding
selftests/bpf: Fix xdp_tx.c prog section name
net, core: Allow netdev_lower_get_next_private_rcu in bh context
bpf, devmap: Exclude XDP broadcast to master device
net, bonding: Add XDP support to the bonding driver
...
====================

Link: https://lore.kernel.org/r/20210810130038.16927-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+3442 -317
+400 -60
drivers/net/bonding/bond_main.c
··· 317 317 } 318 318 } 319 319 320 + static bool bond_xdp_check(struct bonding *bond) 321 + { 322 + switch (BOND_MODE(bond)) { 323 + case BOND_MODE_ROUNDROBIN: 324 + case BOND_MODE_ACTIVEBACKUP: 325 + case BOND_MODE_8023AD: 326 + case BOND_MODE_XOR: 327 + return true; 328 + default: 329 + return false; 330 + } 331 + } 332 + 320 333 /*---------------------------------- VLAN -----------------------------------*/ 321 334 322 335 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, ··· 2146 2133 bond_update_slave_arr(bond, NULL); 2147 2134 2148 2135 2136 + if (!slave_dev->netdev_ops->ndo_bpf || 2137 + !slave_dev->netdev_ops->ndo_xdp_xmit) { 2138 + if (bond->xdp_prog) { 2139 + NL_SET_ERR_MSG(extack, "Slave does not support XDP"); 2140 + slave_err(bond_dev, slave_dev, "Slave does not support XDP\n"); 2141 + res = -EOPNOTSUPP; 2142 + goto err_sysfs_del; 2143 + } 2144 + } else { 2145 + struct netdev_bpf xdp = { 2146 + .command = XDP_SETUP_PROG, 2147 + .flags = 0, 2148 + .prog = bond->xdp_prog, 2149 + .extack = extack, 2150 + }; 2151 + 2152 + if (dev_xdp_prog_count(slave_dev) > 0) { 2153 + NL_SET_ERR_MSG(extack, 2154 + "Slave has XDP program loaded, please unload before enslaving"); 2155 + slave_err(bond_dev, slave_dev, 2156 + "Slave has XDP program loaded, please unload before enslaving\n"); 2157 + res = -EOPNOTSUPP; 2158 + goto err_sysfs_del; 2159 + } 2160 + 2161 + res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 2162 + if (res < 0) { 2163 + /* ndo_bpf() sets extack error message */ 2164 + slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res); 2165 + goto err_sysfs_del; 2166 + } 2167 + if (bond->xdp_prog) 2168 + bpf_prog_inc(bond->xdp_prog); 2169 + } 2170 + 2149 2171 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", 2150 2172 bond_is_active_slave(new_slave) ? "an active" : "a backup", 2151 2173 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); ··· 2299 2251 2300 2252 /* recompute stats just before removing the slave */ 2301 2253 bond_get_stats(bond->dev, &bond->bond_stats); 2254 + 2255 + if (bond->xdp_prog) { 2256 + struct netdev_bpf xdp = { 2257 + .command = XDP_SETUP_PROG, 2258 + .flags = 0, 2259 + .prog = NULL, 2260 + .extack = NULL, 2261 + }; 2262 + if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) 2263 + slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n"); 2264 + } 2302 2265 2303 2266 /* unregister rx_handler early so bond_handle_frame wouldn't be called 2304 2267 * for this slave anymore. ··· 3673 3614 3674 3615 /*---------------------------- Hashing Policies -----------------------------*/ 3675 3616 3676 - /* L2 hash helper */ 3677 - static inline u32 bond_eth_hash(struct sk_buff *skb) 3617 + /* Helper to access data in a packet, with or without a backing skb. 3618 + * If skb is given the data is linearized if necessary via pskb_may_pull. 3619 + */ 3620 + static inline const void *bond_pull_data(struct sk_buff *skb, 3621 + const void *data, int hlen, int n) 3678 3622 { 3679 - struct ethhdr *ep, hdr_tmp; 3623 + if (likely(n <= hlen)) 3624 + return data; 3625 + else if (skb && likely(pskb_may_pull(skb, n))) 3626 + return skb->head; 3680 3627 3681 - ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp); 3682 - if (ep) 3683 - return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto; 3684 - return 0; 3628 + return NULL; 3685 3629 } 3686 3630 3687 - static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, 3688 - int *noff, int *proto, bool l34) 3631 + /* L2 hash helper */ 3632 + static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 3633 + { 3634 + struct ethhdr *ep; 3635 + 3636 + data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 3637 + if (!data) 3638 + return 0; 3639 + 3640 + ep = (struct ethhdr *)(data + mhoff); 3641 + return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); 3642 + } 3643 + 3644 + static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, 3645 + int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) 3689 3646 { 3690 3647 const struct ipv6hdr *iph6; 3691 3648 const struct iphdr *iph; 3692 3649 3693 - if (skb->protocol == htons(ETH_P_IP)) { 3694 - if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph)))) 3650 + if (l2_proto == htons(ETH_P_IP)) { 3651 + data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph)); 3652 + if (!data) 3695 3653 return false; 3696 - iph = (const struct iphdr *)(skb->data + *noff); 3654 + 3655 + iph = (const struct iphdr *)(data + *nhoff); 3697 3656 iph_to_flow_copy_v4addrs(fk, iph); 3698 - *noff += iph->ihl << 2; 3657 + *nhoff += iph->ihl << 2; 3699 3658 if (!ip_is_fragment(iph)) 3700 - *proto = iph->protocol; 3701 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 3702 - if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6)))) 3659 + *ip_proto = iph->protocol; 3660 + } else if (l2_proto == htons(ETH_P_IPV6)) { 3661 + data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6)); 3662 + if (!data) 3703 3663 return false; 3704 - iph6 = (const struct ipv6hdr *)(skb->data + *noff); 3664 + 3665 + iph6 = (const struct ipv6hdr *)(data + *nhoff); 3705 3666 iph_to_flow_copy_v6addrs(fk, iph6); 3706 - *noff += sizeof(*iph6); 3707 - *proto = iph6->nexthdr; 3667 + *nhoff += sizeof(*iph6); 3668 + *ip_proto = iph6->nexthdr; 3708 3669 } else { 3709 3670 return false; 3710 3671 } 3711 3672 3712 - if (l34 && *proto >= 0) 3713 - fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto); 3673 + if (l34 && *ip_proto >= 0) 3674 + fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); 3714 3675 3715 3676 return true; 3716 3677 } 3717 3678 3718 - static u32 bond_vlan_srcmac_hash(struct sk_buff *skb) 3679 + static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) 3719 3680 { 3720 - struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); 3681 + struct ethhdr *mac_hdr; 3721 3682 u32 srcmac_vendor = 0, srcmac_dev = 0; 3722 3683 u16 vlan; 3723 3684 int i; 3685 + 3686 + data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); 3687 + if (!data) 3688 + return 0; 3689 + mac_hdr = (struct ethhdr *)(data + mhoff); 3724 3690 3725 3691 for (i = 0; i < 3; i++) 3726 3692 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; ··· 3762 3678 } 3763 3679 3764 3680 /* Extract the appropriate headers based on bond's xmit policy */ 3765 - static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, 3766 - struct flow_keys *fk) 3681 + static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, 3682 + __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk) 3767 3683 { 3768 3684 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; 3769 - int noff, proto = -1; 3685 + int ip_proto = -1; 3770 3686 3771 3687 switch (bond->params.xmit_policy) { 3772 3688 case BOND_XMIT_POLICY_ENCAP23: 3773 3689 case BOND_XMIT_POLICY_ENCAP34: 3774 3690 memset(fk, 0, sizeof(*fk)); 3775 3691 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding, 3776 - fk, NULL, 0, 0, 0, 0); 3692 + fk, data, l2_proto, nhoff, hlen, 0); 3777 3693 default: 3778 3694 break; 3779 3695 } 3780 3696 3781 3697 fk->ports.ports = 0; 3782 3698 memset(&fk->icmp, 0, sizeof(fk->icmp)); 3783 - noff = skb_network_offset(skb); 3784 - if (!bond_flow_ip(skb, fk, &noff, &proto, l34)) 3699 + if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34)) 3785 3700 return false; 3786 3701 3787 3702 /* ICMP error packets contains at least 8 bytes of the header ··· 3788 3705 * to correlate ICMP error packets within the same flow which 3789 3706 * generated the error. 3790 3707 */ 3791 - if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) { 3792 - skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data, 3793 - skb_transport_offset(skb), 3794 - skb_headlen(skb)); 3795 - if (proto == IPPROTO_ICMP) { 3708 + if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) { 3709 + skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen); 3710 + if (ip_proto == IPPROTO_ICMP) { 3796 3711 if (!icmp_is_err(fk->icmp.type)) 3797 3712 return true; 3798 3713 3799 - noff += sizeof(struct icmphdr); 3800 - } else if (proto == IPPROTO_ICMPV6) { 3714 + nhoff += sizeof(struct icmphdr); 3715 + } else if (ip_proto == IPPROTO_ICMPV6) { 3801 3716 if (!icmpv6_is_err(fk->icmp.type)) 3802 3717 return true; 3803 3718 3804 - noff += sizeof(struct icmp6hdr); 3719 + nhoff += sizeof(struct icmp6hdr); 3805 3720 } 3806 - return bond_flow_ip(skb, fk, &noff, &proto, l34); 3721 + return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34); 3807 3722 } 3808 3723 3809 3724 return true; ··· 3817 3736 return hash >> 1; 3818 3737 } 3819 3738 3739 + /* Generate hash based on xmit policy. If @skb is given it is used to linearize 3740 + * the data as required, but this function can be used without it if the data is 3741 + * known to be linear (e.g. with xdp_buff). 3742 + */ 3743 + static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, 3744 + __be16 l2_proto, int mhoff, int nhoff, int hlen) 3745 + { 3746 + struct flow_keys flow; 3747 + u32 hash; 3748 + 3749 + if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) 3750 + return bond_vlan_srcmac_hash(skb, data, mhoff, hlen); 3751 + 3752 + if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 3753 + !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) 3754 + return bond_eth_hash(skb, data, mhoff, hlen); 3755 + 3756 + if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 3757 + bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { 3758 + hash = bond_eth_hash(skb, data, mhoff, hlen); 3759 + } else { 3760 + if (flow.icmp.id) 3761 + memcpy(&hash, &flow.icmp, sizeof(hash)); 3762 + else 3763 + memcpy(&hash, &flow.ports.ports, sizeof(hash)); 3764 + } 3765 + 3766 + return bond_ip_hash(hash, &flow); 3767 + } 3768 + 3820 3769 /** 3821 3770 * bond_xmit_hash - generate a hash value based on the xmit policy 3822 3771 * @bond: bonding device ··· 3857 3746 */ 3858 3747 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) 3859 3748 { 3860 - struct flow_keys flow; 3861 - u32 hash; 3862 - 3863 3749 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && 3864 3750 skb->l4_hash) 3865 3751 return skb->hash; 3866 3752 3867 - if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) 3868 - return bond_vlan_srcmac_hash(skb); 3753 + return __bond_xmit_hash(bond, skb, skb->head, skb->protocol, 3754 + skb->mac_header, skb->network_header, 3755 + skb_headlen(skb)); 3756 + } 3869 3757 3870 - if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 3871 - !bond_flow_dissect(bond, skb, &flow)) 3872 - return bond_eth_hash(skb); 3758 + /** 3759 + * bond_xmit_hash_xdp - generate a hash value based on the xmit policy 3760 + * @bond: bonding device 3761 + * @xdp: buffer to use for headers 3762 + * 3763 + * The XDP variant of bond_xmit_hash. 3764 + */ 3765 + static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) 3766 + { 3767 + struct ethhdr *eth; 3873 3768 3874 - if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 3875 - bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { 3876 - hash = bond_eth_hash(skb); 3877 - } else { 3878 - if (flow.icmp.id) 3879 - memcpy(&hash, &flow.icmp, sizeof(hash)); 3880 - else 3881 - memcpy(&hash, &flow.ports.ports, sizeof(hash)); 3882 - } 3769 + if (xdp->data + sizeof(struct ethhdr) > xdp->data_end) 3770 + return 0; 3883 3771 3884 - return bond_ip_hash(hash, &flow); 3772 + eth = (struct ethhdr *)xdp->data; 3773 + 3774 + return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, 3775 + sizeof(struct ethhdr), xdp->data_end - xdp->data); 3885 3776 } 3886 3777 3887 3778 /*-------------------------- Device entry points ----------------------------*/ ··· 4534 4421 return NULL; 4535 4422 } 4536 4423 4424 + static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, 4425 + struct xdp_buff *xdp) 4426 + { 4427 + struct slave *slave; 4428 + int slave_cnt; 4429 + u32 slave_id; 4430 + const struct ethhdr *eth; 4431 + void *data = xdp->data; 4432 + 4433 + if (data + sizeof(struct ethhdr) > xdp->data_end) 4434 + goto non_igmp; 4435 + 4436 + eth = (struct ethhdr *)data; 4437 + data += sizeof(struct ethhdr); 4438 + 4439 + /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */ 4440 + if (eth->h_proto == htons(ETH_P_IP)) { 4441 + const struct iphdr *iph; 4442 + 4443 + if (data + sizeof(struct iphdr) > xdp->data_end) 4444 + goto non_igmp; 4445 + 4446 + iph = (struct iphdr *)data; 4447 + 4448 + if (iph->protocol == IPPROTO_IGMP) { 4449 + slave = rcu_dereference(bond->curr_active_slave); 4450 + if (slave) 4451 + return slave; 4452 + return bond_get_slave_by_id(bond, 0); 4453 + } 4454 + } 4455 + 4456 + non_igmp: 4457 + slave_cnt = READ_ONCE(bond->slave_cnt); 4458 + if (likely(slave_cnt)) { 4459 + slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; 4460 + return bond_get_slave_by_id(bond, slave_id); 4461 + } 4462 + return NULL; 4463 + } 4464 + 4537 4465 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, 4538 4466 struct net_device *bond_dev) 4539 4467 { ··· 4588 4434 return bond_tx_drop(bond_dev, skb); 4589 4435 } 4590 4436 4591 - static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond, 4592 - struct sk_buff *skb) 4437 + static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) 4593 4438 { 4594 4439 return rcu_dereference(bond->curr_active_slave); 4595 4440 } ··· 4602 4449 struct bonding *bond = netdev_priv(bond_dev); 4603 4450 struct slave *slave; 4604 4451 4605 - slave = bond_xmit_activebackup_slave_get(bond, skb); 4452 + slave = bond_xmit_activebackup_slave_get(bond); 4606 4453 if (slave) 4607 4454 return bond_dev_queue_xmit(bond, skb, slave->dev); 4608 4455 ··· 4790 4637 return slave; 4791 4638 } 4792 4639 4640 + static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, 4641 + struct xdp_buff *xdp) 4642 + { 4643 + struct bond_up_slave *slaves; 4644 + unsigned int count; 4645 + u32 hash; 4646 + 4647 + hash = bond_xmit_hash_xdp(bond, xdp); 4648 + slaves = rcu_dereference(bond->usable_slaves); 4649 + count = slaves ? READ_ONCE(slaves->count) : 0; 4650 + if (unlikely(!count)) 4651 + return NULL; 4652 + 4653 + return slaves->arr[hash % count]; 4654 + } 4655 + 4793 4656 /* Use this Xmit function for 3AD as well as XOR modes. The current 4794 4657 * usable slave array is formed in the control path. The xmit function 4795 4658 * just calculates hash and sends the packet out. ··· 4916 4747 slave = bond_xmit_roundrobin_slave_get(bond, skb); 4917 4748 break; 4918 4749 case BOND_MODE_ACTIVEBACKUP: 4919 - slave = bond_xmit_activebackup_slave_get(bond, skb); 4750 + slave = bond_xmit_activebackup_slave_get(bond); 4920 4751 break; 4921 4752 case BOND_MODE_8023AD: 4922 4753 case BOND_MODE_XOR: ··· 5090 4921 return ret; 5091 4922 } 5092 4923 4924 + static struct net_device * 4925 + bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) 4926 + { 4927 + struct bonding *bond = netdev_priv(bond_dev); 4928 + struct slave *slave; 4929 + 4930 + /* Caller needs to hold rcu_read_lock() */ 4931 + 4932 + switch (BOND_MODE(bond)) { 4933 + case BOND_MODE_ROUNDROBIN: 4934 + slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); 4935 + break; 4936 + 4937 + case BOND_MODE_ACTIVEBACKUP: 4938 + slave = bond_xmit_activebackup_slave_get(bond); 4939 + break; 4940 + 4941 + case BOND_MODE_8023AD: 4942 + case BOND_MODE_XOR: 4943 + slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); 4944 + break; 4945 + 4946 + default: 4947 + /* Should never happen. Mode guarded by bond_xdp_check() */ 4948 + netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); 4949 + WARN_ON_ONCE(1); 4950 + return NULL; 4951 + } 4952 + 4953 + if (slave) 4954 + return slave->dev; 4955 + 4956 + return NULL; 4957 + } 4958 + 4959 + static int bond_xdp_xmit(struct net_device *bond_dev, 4960 + int n, struct xdp_frame **frames, u32 flags) 4961 + { 4962 + int nxmit, err = -ENXIO; 4963 + 4964 + rcu_read_lock(); 4965 + 4966 + for (nxmit = 0; nxmit < n; nxmit++) { 4967 + struct xdp_frame *frame = frames[nxmit]; 4968 + struct xdp_frame *frames1[] = {frame}; 4969 + struct net_device *slave_dev; 4970 + struct xdp_buff xdp; 4971 + 4972 + xdp_convert_frame_to_buff(frame, &xdp); 4973 + 4974 + slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp); 4975 + if (!slave_dev) { 4976 + err = -ENXIO; 4977 + break; 4978 + } 4979 + 4980 + err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags); 4981 + if (err < 1) 4982 + break; 4983 + } 4984 + 4985 + rcu_read_unlock(); 4986 + 4987 + /* If error happened on the first frame then we can pass the error up, otherwise 4988 + * report the number of frames that were xmitted. 4989 + */ 4990 + if (err < 0) 4991 + return (nxmit == 0 ? err : nxmit); 4992 + 4993 + return nxmit; 4994 + } 4995 + 4996 + static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, 4997 + struct netlink_ext_ack *extack) 4998 + { 4999 + struct bonding *bond = netdev_priv(dev); 5000 + struct list_head *iter; 5001 + struct slave *slave, *rollback_slave; 5002 + struct bpf_prog *old_prog; 5003 + struct netdev_bpf xdp = { 5004 + .command = XDP_SETUP_PROG, 5005 + .flags = 0, 5006 + .prog = prog, 5007 + .extack = extack, 5008 + }; 5009 + int err; 5010 + 5011 + ASSERT_RTNL(); 5012 + 5013 + if (!bond_xdp_check(bond)) 5014 + return -EOPNOTSUPP; 5015 + 5016 + old_prog = bond->xdp_prog; 5017 + bond->xdp_prog = prog; 5018 + 5019 + bond_for_each_slave(bond, slave, iter) { 5020 + struct net_device *slave_dev = slave->dev; 5021 + 5022 + if (!slave_dev->netdev_ops->ndo_bpf || 5023 + !slave_dev->netdev_ops->ndo_xdp_xmit) { 5024 + NL_SET_ERR_MSG(extack, "Slave device does not support XDP"); 5025 + slave_err(dev, slave_dev, "Slave does not support XDP\n"); 5026 + err = -EOPNOTSUPP; 5027 + goto err; 5028 + } 5029 + 5030 + if (dev_xdp_prog_count(slave_dev) > 0) { 5031 + NL_SET_ERR_MSG(extack, 5032 + "Slave has XDP program loaded, please unload before enslaving"); 5033 + slave_err(dev, slave_dev, 5034 + "Slave has XDP program loaded, please unload before enslaving\n"); 5035 + err = -EOPNOTSUPP; 5036 + goto err; 5037 + } 5038 + 5039 + err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5040 + if (err < 0) { 5041 + /* ndo_bpf() sets extack error message */ 5042 + slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err); 5043 + goto err; 5044 + } 5045 + if (prog) 5046 + bpf_prog_inc(prog); 5047 + } 5048 + 5049 + if (old_prog) 5050 + bpf_prog_put(old_prog); 5051 + 5052 + if (prog) 5053 + static_branch_inc(&bpf_master_redirect_enabled_key); 5054 + else 5055 + static_branch_dec(&bpf_master_redirect_enabled_key); 5056 + 5057 + return 0; 5058 + 5059 + err: 5060 + /* unwind the program changes */ 5061 + bond->xdp_prog = old_prog; 5062 + xdp.prog = old_prog; 5063 + xdp.extack = NULL; /* do not overwrite original error */ 5064 + 5065 + bond_for_each_slave(bond, rollback_slave, iter) { 5066 + struct net_device *slave_dev = rollback_slave->dev; 5067 + int err_unwind; 5068 + 5069 + if (slave == rollback_slave) 5070 + break; 5071 + 5072 + err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); 5073 + if (err_unwind < 0) 5074 + slave_err(dev, slave_dev, 5075 + "Error %d when unwinding XDP program change\n", err_unwind); 5076 + else if (xdp.prog) 5077 + bpf_prog_inc(xdp.prog); 5078 + } 5079 + return err; 5080 + } 5081 + 5082 + static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5083 + { 5084 + switch (xdp->command) { 5085 + case XDP_SETUP_PROG: 5086 + return bond_xdp_set(dev, xdp->prog, xdp->extack); 5087 + default: 5088 + return -EINVAL; 5089 + } 5090 + } 5091 + 5093 5092 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) 5094 5093 { 5095 5094 if (speed == 0 || speed == SPEED_UNKNOWN) ··· 5346 5009 .ndo_features_check = passthru_features_check, 5347 5010 .ndo_get_xmit_slave = bond_xmit_get_slave, 5348 5011 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, 5012 + .ndo_bpf = bond_xdp, 5013 + .ndo_xdp_xmit = bond_xdp_xmit, 5014 + .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, 5349 5015 }; 5350 5016 5351 5017 static const struct device_type bond_type = {
+12 -1
include/linux/filter.h
··· 776 776 777 777 DECLARE_BPF_DISPATCHER(xdp) 778 778 779 + DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 780 + 781 + u32 xdp_master_redirect(struct xdp_buff *xdp); 782 + 779 783 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, 780 784 struct xdp_buff *xdp) 781 785 { ··· 787 783 * under local_bh_disable(), which provides the needed RCU protection 788 784 * for accessing map entries. 789 785 */ 790 - return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); 786 + u32 act = __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); 787 + 788 + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { 789 + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) 790 + act = xdp_master_redirect(xdp); 791 + } 792 + 793 + return act; 791 794 } 792 795 793 796 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+6
include/linux/netdevice.h
··· 1318 1318 * that got dropped are freed/returned via xdp_return_frame(). 1319 1319 * Returns negative number, means general error invoking ndo, meaning 1320 1320 * no frames were xmit'ed and core-caller will free all frames. 1321 + * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1322 + * struct xdp_buff *xdp); 1323 + * Get the xmit slave of master device based on the xdp_buff. 1321 1324 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1322 1325 * This function is used to wake up the softirq, ksoftirqd or kthread 1323 1326 * responsible for sending and/or receiving packets on a specific ··· 1548 1545 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1549 1546 struct xdp_frame **xdp, 1550 1547 u32 flags); 1548 + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1549 + struct xdp_buff *xdp); 1551 1550 int (*ndo_xsk_wakeup)(struct net_device *dev, 1552 1551 u32 queue_id, u32 flags); 1553 1552 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); ··· 4081 4076 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 4082 4077 int fd, int expected_fd, u32 flags); 4083 4078 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 4079 + u8 dev_xdp_prog_count(struct net_device *dev); 4084 4080 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 4085 4081 4086 4082 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+1
include/net/bonding.h
··· 259 259 /* protecting ipsec_list */ 260 260 spinlock_t ipsec_lock; 261 261 #endif /* CONFIG_XFRM_OFFLOAD */ 262 + struct bpf_prog *xdp_prog; 262 263 }; 263 264 264 265 #define bond_slave_get_rcu(dev) \
+1 -1
kernel/bpf/core.c
··· 1562 1562 1563 1563 if (unlikely(index >= array->map.max_entries)) 1564 1564 goto out; 1565 - if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1565 + if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 1566 1566 goto out; 1567 1567 1568 1568 tail_call_cnt++;
+60 -9
kernel/bpf/devmap.c
··· 534 534 return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog); 535 535 } 536 536 537 - static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp, 538 - int exclude_ifindex) 537 + static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp) 539 538 { 540 - if (!obj || obj->dev->ifindex == exclude_ifindex || 539 + if (!obj || 541 540 !obj->dev->netdev_ops->ndo_xdp_xmit) 542 541 return false; 543 542 ··· 561 562 return 0; 562 563 } 563 564 565 + static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex) 566 + { 567 + while (num_excluded--) { 568 + if (ifindex == excluded[num_excluded]) 569 + return true; 570 + } 571 + return false; 572 + } 573 + 574 + /* Get ifindex of each upper device. 'indexes' must be able to hold at 575 + * least MAX_NEST_DEV elements. 576 + * Returns the number of ifindexes added. 577 + */ 578 + static int get_upper_ifindexes(struct net_device *dev, int *indexes) 579 + { 580 + struct net_device *upper; 581 + struct list_head *iter; 582 + int n = 0; 583 + 584 + netdev_for_each_upper_dev_rcu(dev, upper, iter) { 585 + indexes[n++] = upper->ifindex; 586 + } 587 + return n; 588 + } 589 + 564 590 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, 565 591 struct bpf_map *map, bool exclude_ingress) 566 592 { 567 593 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 568 - int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0; 569 594 struct bpf_dtab_netdev *dst, *last_dst = NULL; 595 + int excluded_devices[1+MAX_NEST_DEV]; 570 596 struct hlist_head *head; 571 597 struct xdp_frame *xdpf; 598 + int num_excluded = 0; 572 599 unsigned int i; 573 600 int err; 601 + 602 + if (exclude_ingress) { 603 + num_excluded = get_upper_ifindexes(dev_rx, excluded_devices); 604 + excluded_devices[num_excluded++] = dev_rx->ifindex; 605 + } 574 606 575 607 xdpf = xdp_convert_buff_to_frame(xdp); 576 608 if (unlikely(!xdpf)) ··· 611 581 for (i = 0; i < map->max_entries; i++) { 612 582 dst = rcu_dereference_check(dtab->netdev_map[i], 613 583 rcu_read_lock_bh_held()); 614 - if (!is_valid_dst(dst, xdp, exclude_ifindex)) 584 + if (!is_valid_dst(dst, xdp)) 585 + continue; 586 + 587 + if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) 615 588 continue; 616 589 617 590 /* we only need n-1 clones; last_dst enqueued below */ ··· 634 601 head = dev_map_index_hash(dtab, i); 635 602 hlist_for_each_entry_rcu(dst, head, index_hlist, 636 603 lockdep_is_held(&dtab->index_lock)) { 637 - if (!is_valid_dst(dst, xdp, exclude_ifindex)) 604 + if (!is_valid_dst(dst, xdp)) 605 + continue; 606 + 607 + if (is_ifindex_excluded(excluded_devices, num_excluded, 608 + dst->dev->ifindex)) 638 609 continue; 639 610 640 611 /* we only need n-1 clones; last_dst enqueued below */ ··· 712 675 bool exclude_ingress) 713 676 { 714 677 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); 715 - int exclude_ifindex = exclude_ingress ? dev->ifindex : 0; 716 678 struct bpf_dtab_netdev *dst, *last_dst = NULL; 679 + int excluded_devices[1+MAX_NEST_DEV]; 717 680 struct hlist_head *head; 718 681 struct hlist_node *next; 682 + int num_excluded = 0; 719 683 unsigned int i; 720 684 int err; 685 + 686 + if (exclude_ingress) { 687 + num_excluded = get_upper_ifindexes(dev, excluded_devices); 688 + excluded_devices[num_excluded++] = dev->ifindex; 689 + } 721 690 722 691 if (map->map_type == BPF_MAP_TYPE_DEVMAP) { 723 692 for (i = 0; i < map->max_entries; i++) { 724 693 dst = rcu_dereference_check(dtab->netdev_map[i], 725 694 rcu_read_lock_bh_held()); 726 - if (!dst || dst->dev->ifindex == exclude_ifindex) 695 + if (!dst) 696 + continue; 697 + 698 + if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) 727 699 continue; 728 700 729 701 /* we only need n-1 clones; last_dst enqueued below */ ··· 746 700 return err; 747 701 748 702 last_dst = dst; 703 + 749 704 } 750 705 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ 751 706 for (i = 0; i < dtab->n_buckets; i++) { 752 707 head = dev_map_index_hash(dtab, i); 753 708 hlist_for_each_entry_safe(dst, next, head, index_hlist) { 754 - if (!dst || dst->dev->ifindex == exclude_ifindex) 709 + if (!dst) 710 + continue; 711 + 712 + if (is_ifindex_excluded(excluded_devices, num_excluded, 713 + dst->dev->ifindex)) 755 714 continue; 756 715 757 716 /* we only need n-1 clones; last_dst enqueued below */
+2279 -54
lib/test_bpf.c
··· 461 461 return __bpf_fill_stxdw(self, BPF_DW); 462 462 } 463 463 464 + static int bpf_fill_long_jmp(struct bpf_test *self) 465 + { 466 + unsigned int len = BPF_MAXINSNS; 467 + struct bpf_insn *insn; 468 + int i; 469 + 470 + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); 471 + if (!insn) 472 + return -ENOMEM; 473 + 474 + insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1); 475 + insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1); 476 + 477 + /* 478 + * Fill with a complex 64-bit operation that expands to a lot of 479 + * instructions on 32-bit JITs. The large jump offset can then 480 + * overflow the conditional branch field size, triggering a branch 481 + * conversion mechanism in some JITs. 482 + * 483 + * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch 484 + * conversion on the 32-bit MIPS JIT. For other JITs, the instruction 485 + * count and/or operation may need to be modified to trigger the 486 + * branch conversion. 487 + */ 488 + for (i = 2; i < len - 1; i++) 489 + insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i); 490 + 491 + insn[len - 1] = BPF_EXIT_INSN(); 492 + 493 + self->u.ptr.insns = insn; 494 + self->u.ptr.len = len; 495 + 496 + return 0; 497 + } 498 + 464 499 static struct bpf_test tests[] = { 465 500 { 466 501 "TAX", ··· 1952 1917 { { 0, -1 } } 1953 1918 }, 1954 1919 { 1920 + /* 1921 + * Register (non-)clobbering test, in the case where a 32-bit 1922 + * JIT implements complex ALU64 operations via function calls. 1923 + * If so, the function call must be invisible in the eBPF 1924 + * registers. The JIT must then save and restore relevant 1925 + * registers during the call. The following tests check that 1926 + * the eBPF registers retain their values after such a call. 1927 + */ 1928 + "INT: Register clobbering, R1 updated", 1929 + .u.insns_int = { 1930 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 1931 + BPF_ALU32_IMM(BPF_MOV, R1, 123456789), 1932 + BPF_ALU32_IMM(BPF_MOV, R2, 2), 1933 + BPF_ALU32_IMM(BPF_MOV, R3, 3), 1934 + BPF_ALU32_IMM(BPF_MOV, R4, 4), 1935 + BPF_ALU32_IMM(BPF_MOV, R5, 5), 1936 + BPF_ALU32_IMM(BPF_MOV, R6, 6), 1937 + BPF_ALU32_IMM(BPF_MOV, R7, 7), 1938 + BPF_ALU32_IMM(BPF_MOV, R8, 8), 1939 + BPF_ALU32_IMM(BPF_MOV, R9, 9), 1940 + BPF_ALU64_IMM(BPF_DIV, R1, 123456789), 1941 + BPF_JMP_IMM(BPF_JNE, R0, 0, 10), 1942 + BPF_JMP_IMM(BPF_JNE, R1, 1, 9), 1943 + BPF_JMP_IMM(BPF_JNE, R2, 2, 8), 1944 + BPF_JMP_IMM(BPF_JNE, R3, 3, 7), 1945 + BPF_JMP_IMM(BPF_JNE, R4, 4, 6), 1946 + BPF_JMP_IMM(BPF_JNE, R5, 5, 5), 1947 + BPF_JMP_IMM(BPF_JNE, R6, 6, 4), 1948 + BPF_JMP_IMM(BPF_JNE, R7, 7, 3), 1949 + BPF_JMP_IMM(BPF_JNE, R8, 8, 2), 1950 + BPF_JMP_IMM(BPF_JNE, R9, 9, 1), 1951 + BPF_ALU32_IMM(BPF_MOV, R0, 1), 1952 + BPF_EXIT_INSN(), 1953 + }, 1954 + INTERNAL, 1955 + { }, 1956 + { { 0, 1 } } 1957 + }, 1958 + { 1959 + "INT: Register clobbering, R2 updated", 1960 + .u.insns_int = { 1961 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 1962 + BPF_ALU32_IMM(BPF_MOV, R1, 1), 1963 + BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789), 1964 + BPF_ALU32_IMM(BPF_MOV, R3, 3), 1965 + BPF_ALU32_IMM(BPF_MOV, R4, 4), 1966 + BPF_ALU32_IMM(BPF_MOV, R5, 5), 1967 + BPF_ALU32_IMM(BPF_MOV, R6, 6), 1968 + BPF_ALU32_IMM(BPF_MOV, R7, 7), 1969 + BPF_ALU32_IMM(BPF_MOV, R8, 8), 1970 + BPF_ALU32_IMM(BPF_MOV, R9, 9), 1971 + BPF_ALU64_IMM(BPF_DIV, R2, 123456789), 1972 + BPF_JMP_IMM(BPF_JNE, R0, 0, 10), 1973 + BPF_JMP_IMM(BPF_JNE, R1, 1, 9), 1974 + BPF_JMP_IMM(BPF_JNE, R2, 2, 8), 1975 + BPF_JMP_IMM(BPF_JNE, R3, 3, 7), 1976 + BPF_JMP_IMM(BPF_JNE, R4, 4, 6), 1977 + BPF_JMP_IMM(BPF_JNE, R5, 5, 5), 1978 + BPF_JMP_IMM(BPF_JNE, R6, 6, 4), 1979 + BPF_JMP_IMM(BPF_JNE, R7, 7, 3), 1980 + BPF_JMP_IMM(BPF_JNE, R8, 8, 2), 1981 + BPF_JMP_IMM(BPF_JNE, R9, 9, 1), 1982 + BPF_ALU32_IMM(BPF_MOV, R0, 1), 1983 + BPF_EXIT_INSN(), 1984 + }, 1985 + INTERNAL, 1986 + { }, 1987 + { { 0, 1 } } 1988 + }, 1989 + { 1990 + /* 1991 + * Test 32-bit JITs that implement complex ALU64 operations as 1992 + * function calls R0 = f(R1, R2), and must re-arrange operands. 1993 + */ 1994 + #define NUMER 0xfedcba9876543210ULL 1995 + #define DENOM 0x0123456789abcdefULL 1996 + "ALU64_DIV X: Operand register permutations", 1997 + .u.insns_int = { 1998 + /* R0 / R2 */ 1999 + BPF_LD_IMM64(R0, NUMER), 2000 + BPF_LD_IMM64(R2, DENOM), 2001 + BPF_ALU64_REG(BPF_DIV, R0, R2), 2002 + BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1), 2003 + BPF_EXIT_INSN(), 2004 + /* R1 / R0 */ 2005 + BPF_LD_IMM64(R1, NUMER), 2006 + BPF_LD_IMM64(R0, DENOM), 2007 + BPF_ALU64_REG(BPF_DIV, R1, R0), 2008 + BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1), 2009 + BPF_EXIT_INSN(), 2010 + /* R0 / R1 */ 2011 + BPF_LD_IMM64(R0, NUMER), 2012 + BPF_LD_IMM64(R1, DENOM), 2013 + BPF_ALU64_REG(BPF_DIV, R0, R1), 2014 + BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1), 2015 + BPF_EXIT_INSN(), 2016 + /* R2 / R0 */ 2017 + BPF_LD_IMM64(R2, NUMER), 2018 + BPF_LD_IMM64(R0, DENOM), 2019 + BPF_ALU64_REG(BPF_DIV, R2, R0), 2020 + BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1), 2021 + BPF_EXIT_INSN(), 2022 + /* R2 / R1 */ 2023 + BPF_LD_IMM64(R2, NUMER), 2024 + BPF_LD_IMM64(R1, DENOM), 2025 + BPF_ALU64_REG(BPF_DIV, R2, R1), 2026 + BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1), 2027 + BPF_EXIT_INSN(), 2028 + /* R1 / R2 */ 2029 + BPF_LD_IMM64(R1, NUMER), 2030 + BPF_LD_IMM64(R2, DENOM), 2031 + BPF_ALU64_REG(BPF_DIV, R1, R2), 2032 + BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1), 2033 + BPF_EXIT_INSN(), 2034 + /* R1 / R1 */ 2035 + BPF_LD_IMM64(R1, NUMER), 2036 + BPF_ALU64_REG(BPF_DIV, R1, R1), 2037 + BPF_JMP_IMM(BPF_JEQ, R1, 1, 1), 2038 + BPF_EXIT_INSN(), 2039 + /* R2 / R2 */ 2040 + BPF_LD_IMM64(R2, DENOM), 2041 + BPF_ALU64_REG(BPF_DIV, R2, R2), 2042 + BPF_JMP_IMM(BPF_JEQ, R2, 1, 1), 2043 + BPF_EXIT_INSN(), 2044 + /* R3 / R4 */ 2045 + BPF_LD_IMM64(R3, NUMER), 2046 + BPF_LD_IMM64(R4, DENOM), 2047 + BPF_ALU64_REG(BPF_DIV, R3, R4), 2048 + BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1), 2049 + BPF_EXIT_INSN(), 2050 + /* Successful return */ 2051 + BPF_LD_IMM64(R0, 1), 2052 + BPF_EXIT_INSN(), 2053 + }, 2054 + INTERNAL, 2055 + { }, 2056 + { { 0, 1 } }, 2057 + #undef NUMER 2058 + #undef DENOM 2059 + }, 2060 + #ifdef CONFIG_32BIT 2061 + { 2062 + "INT: 32-bit context pointer word order and zero-extension", 2063 + .u.insns_int = { 2064 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 2065 + BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3), 2066 + BPF_ALU64_IMM(BPF_RSH, R1, 32), 2067 + BPF_JMP32_IMM(BPF_JNE, R1, 0, 1), 2068 + BPF_ALU32_IMM(BPF_MOV, R0, 1), 2069 + BPF_EXIT_INSN(), 2070 + }, 2071 + INTERNAL, 2072 + { }, 2073 + { { 0, 1 } } 2074 + }, 2075 + #endif 2076 + { 1955 2077 "check: missing ret", 1956 2078 .u.insns = { 1957 2079 BPF_STMT(BPF_LD | BPF_IMM, 1), ··· 2553 2361 { { 0, 0x1 } }, 2554 2362 }, 2555 2363 { 2364 + "ALU_MOV_K: small negative", 2365 + .u.insns_int = { 2366 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 2367 + BPF_EXIT_INSN(), 2368 + }, 2369 + INTERNAL, 2370 + { }, 2371 + { { 0, -123 } } 2372 + }, 2373 + { 2374 + "ALU_MOV_K: small negative zero extension", 2375 + .u.insns_int = { 2376 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 2377 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 2378 + BPF_EXIT_INSN(), 2379 + }, 2380 + INTERNAL, 2381 + { }, 2382 + { { 0, 0 } } 2383 + }, 2384 + { 2385 + "ALU_MOV_K: large negative", 2386 + .u.insns_int = { 2387 + BPF_ALU32_IMM(BPF_MOV, R0, -123456789), 2388 + BPF_EXIT_INSN(), 2389 + }, 2390 + INTERNAL, 2391 + { }, 2392 + { { 0, -123456789 } } 2393 + }, 2394 + { 2395 + "ALU_MOV_K: large negative zero extension", 2396 + .u.insns_int = { 2397 + BPF_ALU32_IMM(BPF_MOV, R0, -123456789), 2398 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 2399 + BPF_EXIT_INSN(), 2400 + }, 2401 + INTERNAL, 2402 + { }, 2403 + { { 0, 0 } } 2404 + }, 2405 + { 2556 2406 "ALU64_MOV_K: dst = 2", 2557 2407 .u.insns_int = { 2558 2408 BPF_ALU64_IMM(BPF_MOV, R0, 2), ··· 2645 2411 INTERNAL, 2646 2412 { }, 2647 2413 { { 0, 0x1 } }, 2414 + }, 2415 + { 2416 + "ALU64_MOV_K: small negative", 2417 + .u.insns_int = { 2418 + BPF_ALU64_IMM(BPF_MOV, R0, -123), 2419 + BPF_EXIT_INSN(), 2420 + }, 2421 + INTERNAL, 2422 + { }, 2423 + { { 0, -123 } } 2424 + }, 2425 + { 2426 + "ALU64_MOV_K: small negative sign extension", 2427 + .u.insns_int = { 2428 + BPF_ALU64_IMM(BPF_MOV, R0, -123), 2429 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 2430 + BPF_EXIT_INSN(), 2431 + }, 2432 + INTERNAL, 2433 + { }, 2434 + { { 0, 0xffffffff } } 2435 + }, 2436 + { 2437 + "ALU64_MOV_K: large negative", 2438 + .u.insns_int = { 2439 + BPF_ALU64_IMM(BPF_MOV, R0, -123456789), 2440 + BPF_EXIT_INSN(), 2441 + }, 2442 + INTERNAL, 2443 + { }, 2444 + { { 0, -123456789 } } 2445 + }, 2446 + { 2447 + "ALU64_MOV_K: large negative sign extension", 2448 + .u.insns_int = { 2449 + BPF_ALU64_IMM(BPF_MOV, R0, -123456789), 2450 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 2451 + BPF_EXIT_INSN(), 2452 + }, 2453 + INTERNAL, 2454 + { }, 2455 + { { 0, 0xffffffff } } 2648 2456 }, 2649 2457 /* BPF_ALU | BPF_ADD | BPF_X */ 2650 2458 { ··· 3243 2967 { }, 3244 2968 { { 0, 2147483647 } }, 3245 2969 }, 2970 + { 2971 + "ALU64_MUL_X: 64x64 multiply, low word", 2972 + .u.insns_int = { 2973 + BPF_LD_IMM64(R0, 0x0fedcba987654321LL), 2974 + BPF_LD_IMM64(R1, 0x123456789abcdef0LL), 2975 + BPF_ALU64_REG(BPF_MUL, R0, R1), 2976 + BPF_EXIT_INSN(), 2977 + }, 2978 + INTERNAL, 2979 + { }, 2980 + { { 0, 0xe5618cf0 } } 2981 + }, 2982 + { 2983 + "ALU64_MUL_X: 64x64 multiply, high word", 2984 + .u.insns_int = { 2985 + BPF_LD_IMM64(R0, 0x0fedcba987654321LL), 2986 + BPF_LD_IMM64(R1, 0x123456789abcdef0LL), 2987 + BPF_ALU64_REG(BPF_MUL, R0, R1), 2988 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 2989 + BPF_EXIT_INSN(), 2990 + }, 2991 + INTERNAL, 2992 + { }, 2993 + { { 0, 0x2236d88f } } 2994 + }, 3246 2995 /* BPF_ALU | BPF_MUL | BPF_K */ 3247 2996 { 3248 2997 "ALU_MUL_K: 2 * 3 = 6", ··· 3377 3076 INTERNAL, 3378 3077 { }, 3379 3078 { { 0, 0x1 } }, 3079 + }, 3080 + { 3081 + "ALU64_MUL_K: 64x32 multiply, low word", 3082 + .u.insns_int = { 3083 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3084 + BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), 3085 + BPF_EXIT_INSN(), 3086 + }, 3087 + INTERNAL, 3088 + { }, 3089 + { { 0, 0xe242d208 } } 3090 + }, 3091 + { 3092 + "ALU64_MUL_K: 64x32 multiply, high word", 3093 + .u.insns_int = { 3094 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3095 + BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), 3096 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3097 + BPF_EXIT_INSN(), 3098 + }, 3099 + INTERNAL, 3100 + { }, 3101 + { { 0, 0xc28f5c28 } } 3380 3102 }, 3381 3103 /* BPF_ALU | BPF_DIV | BPF_X */ 3382 3104 { ··· 3755 3431 { { 0, 0xffffffff } }, 3756 3432 }, 3757 3433 { 3434 + "ALU_AND_K: Small immediate", 3435 + .u.insns_int = { 3436 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), 3437 + BPF_ALU32_IMM(BPF_AND, R0, 15), 3438 + BPF_EXIT_INSN(), 3439 + }, 3440 + INTERNAL, 3441 + { }, 3442 + { { 0, 4 } } 3443 + }, 3444 + { 3445 + "ALU_AND_K: Large immediate", 3446 + .u.insns_int = { 3447 + BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), 3448 + BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf), 3449 + BPF_EXIT_INSN(), 3450 + }, 3451 + INTERNAL, 3452 + { }, 3453 + { { 0, 0xa1b2c3d4 } } 3454 + }, 3455 + { 3456 + "ALU_AND_K: Zero extension", 3457 + .u.insns_int = { 3458 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3459 + BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL), 3460 + BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0), 3461 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3462 + BPF_MOV32_IMM(R0, 2), 3463 + BPF_EXIT_INSN(), 3464 + BPF_MOV32_IMM(R0, 1), 3465 + BPF_EXIT_INSN(), 3466 + }, 3467 + INTERNAL, 3468 + { }, 3469 + { { 0, 1 } } 3470 + }, 3471 + { 3758 3472 "ALU64_AND_K: 3 & 2 = 2", 3759 3473 .u.insns_int = { 3760 3474 BPF_LD_IMM64(R0, 3), ··· 3815 3453 { { 0, 0xffffffff } }, 3816 3454 }, 3817 3455 { 3818 - "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000", 3456 + "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000", 3819 3457 .u.insns_int = { 3820 3458 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), 3821 3459 BPF_LD_IMM64(R3, 0x0000000000000000LL), ··· 3831 3469 { { 0, 0x1 } }, 3832 3470 }, 3833 3471 { 3834 - "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff", 3472 + "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000", 3835 3473 .u.insns_int = { 3836 3474 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), 3837 3475 BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), ··· 3861 3499 INTERNAL, 3862 3500 { }, 3863 3501 { { 0, 0x1 } }, 3502 + }, 3503 + { 3504 + "ALU64_AND_K: Sign extension 1", 3505 + .u.insns_int = { 3506 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3507 + BPF_LD_IMM64(R1, 0x00000000090b0d0fLL), 3508 + BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f), 3509 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3510 + BPF_MOV32_IMM(R0, 2), 3511 + BPF_EXIT_INSN(), 3512 + BPF_MOV32_IMM(R0, 1), 3513 + BPF_EXIT_INSN(), 3514 + }, 3515 + INTERNAL, 3516 + { }, 3517 + { { 0, 1 } } 3518 + }, 3519 + { 3520 + "ALU64_AND_K: Sign extension 2", 3521 + .u.insns_int = { 3522 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3523 + BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL), 3524 + BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0), 3525 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3526 + BPF_MOV32_IMM(R0, 2), 3527 + BPF_EXIT_INSN(), 3528 + BPF_MOV32_IMM(R0, 1), 3529 + BPF_EXIT_INSN(), 3530 + }, 3531 + INTERNAL, 3532 + { }, 3533 + { { 0, 1 } } 3864 3534 }, 3865 3535 /* BPF_ALU | BPF_OR | BPF_X */ 3866 3536 { ··· 3967 3573 { { 0, 0xffffffff } }, 3968 3574 }, 3969 3575 { 3576 + "ALU_OR_K: Small immediate", 3577 + .u.insns_int = { 3578 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), 3579 + BPF_ALU32_IMM(BPF_OR, R0, 1), 3580 + BPF_EXIT_INSN(), 3581 + }, 3582 + INTERNAL, 3583 + { }, 3584 + { { 0, 0x01020305 } } 3585 + }, 3586 + { 3587 + "ALU_OR_K: Large immediate", 3588 + .u.insns_int = { 3589 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), 3590 + BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0), 3591 + BPF_EXIT_INSN(), 3592 + }, 3593 + INTERNAL, 3594 + { }, 3595 + { { 0, 0xa1b2c3d4 } } 3596 + }, 3597 + { 3598 + "ALU_OR_K: Zero extension", 3599 + .u.insns_int = { 3600 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3601 + BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL), 3602 + BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0), 3603 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3604 + BPF_MOV32_IMM(R0, 2), 3605 + BPF_EXIT_INSN(), 3606 + BPF_MOV32_IMM(R0, 1), 3607 + BPF_EXIT_INSN(), 3608 + }, 3609 + INTERNAL, 3610 + { }, 3611 + { { 0, 1 } } 3612 + }, 3613 + { 3970 3614 "ALU64_OR_K: 1 | 2 = 3", 3971 3615 .u.insns_int = { 3972 3616 BPF_LD_IMM64(R0, 1), ··· 4027 3595 { { 0, 0xffffffff } }, 4028 3596 }, 4029 3597 { 4030 - "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000", 3598 + "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000", 4031 3599 .u.insns_int = { 4032 3600 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), 4033 3601 BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), ··· 4073 3641 INTERNAL, 4074 3642 { }, 4075 3643 { { 0, 0x1 } }, 3644 + }, 3645 + { 3646 + "ALU64_OR_K: Sign extension 1", 3647 + .u.insns_int = { 3648 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3649 + BPF_LD_IMM64(R1, 0x012345678fafcfefLL), 3650 + BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f), 3651 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3652 + BPF_MOV32_IMM(R0, 2), 3653 + BPF_EXIT_INSN(), 3654 + BPF_MOV32_IMM(R0, 1), 3655 + BPF_EXIT_INSN(), 3656 + }, 3657 + INTERNAL, 3658 + { }, 3659 + { { 0, 1 } } 3660 + }, 3661 + { 3662 + "ALU64_OR_K: Sign extension 2", 3663 + .u.insns_int = { 3664 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3665 + BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL), 3666 + BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0), 3667 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3668 + BPF_MOV32_IMM(R0, 2), 3669 + BPF_EXIT_INSN(), 3670 + BPF_MOV32_IMM(R0, 1), 3671 + BPF_EXIT_INSN(), 3672 + }, 3673 + INTERNAL, 3674 + { }, 3675 + { { 0, 1 } } 4076 3676 }, 4077 3677 /* BPF_ALU | BPF_XOR | BPF_X */ 4078 3678 { ··· 4179 3715 { { 0, 0xfffffffe } }, 4180 3716 }, 4181 3717 { 3718 + "ALU_XOR_K: Small immediate", 3719 + .u.insns_int = { 3720 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), 3721 + BPF_ALU32_IMM(BPF_XOR, R0, 15), 3722 + BPF_EXIT_INSN(), 3723 + }, 3724 + INTERNAL, 3725 + { }, 3726 + { { 0, 0x0102030b } } 3727 + }, 3728 + { 3729 + "ALU_XOR_K: Large immediate", 3730 + .u.insns_int = { 3731 + BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), 3732 + BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf), 3733 + BPF_EXIT_INSN(), 3734 + }, 3735 + INTERNAL, 3736 + { }, 3737 + { { 0, 0x5e4d3c2b } } 3738 + }, 3739 + { 3740 + "ALU_XOR_K: Zero extension", 3741 + .u.insns_int = { 3742 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3743 + BPF_LD_IMM64(R1, 0x00000000795b3d1fLL), 3744 + BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0), 3745 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3746 + BPF_MOV32_IMM(R0, 2), 3747 + BPF_EXIT_INSN(), 3748 + BPF_MOV32_IMM(R0, 1), 3749 + BPF_EXIT_INSN(), 3750 + }, 3751 + INTERNAL, 3752 + { }, 3753 + { { 0, 1 } } 3754 + }, 3755 + { 4182 3756 "ALU64_XOR_K: 5 ^ 6 = 3", 4183 3757 .u.insns_int = { 4184 3758 BPF_LD_IMM64(R0, 5), ··· 4228 3726 { { 0, 3 } }, 4229 3727 }, 4230 3728 { 4231 - "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe", 3729 + "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", 4232 3730 .u.insns_int = { 4233 3731 BPF_LD_IMM64(R0, 1), 4234 3732 BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff), ··· 4286 3784 { }, 4287 3785 { { 0, 0x1 } }, 4288 3786 }, 3787 + { 3788 + "ALU64_XOR_K: Sign extension 1", 3789 + .u.insns_int = { 3790 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3791 + BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL), 3792 + BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f), 3793 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3794 + BPF_MOV32_IMM(R0, 2), 3795 + BPF_EXIT_INSN(), 3796 + BPF_MOV32_IMM(R0, 1), 3797 + BPF_EXIT_INSN(), 3798 + }, 3799 + INTERNAL, 3800 + { }, 3801 + { { 0, 1 } } 3802 + }, 3803 + { 3804 + "ALU64_XOR_K: Sign extension 2", 3805 + .u.insns_int = { 3806 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3807 + BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL), 3808 + BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0), 3809 + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), 3810 + BPF_MOV32_IMM(R0, 2), 3811 + BPF_EXIT_INSN(), 3812 + BPF_MOV32_IMM(R0, 1), 3813 + BPF_EXIT_INSN(), 3814 + }, 3815 + INTERNAL, 3816 + { }, 3817 + { { 0, 1 } } 3818 + }, 4289 3819 /* BPF_ALU | BPF_LSH | BPF_X */ 4290 3820 { 4291 3821 "ALU_LSH_X: 1 << 1 = 2", ··· 4344 3810 { { 0, 0x80000000 } }, 4345 3811 }, 4346 3812 { 3813 + "ALU_LSH_X: 0x12345678 << 12 = 0x45678000", 3814 + .u.insns_int = { 3815 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3816 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3817 + BPF_ALU32_REG(BPF_LSH, R0, R1), 3818 + BPF_EXIT_INSN(), 3819 + }, 3820 + INTERNAL, 3821 + { }, 3822 + { { 0, 0x45678000 } } 3823 + }, 3824 + { 4347 3825 "ALU64_LSH_X: 1 << 1 = 2", 4348 3826 .u.insns_int = { 4349 3827 BPF_LD_IMM64(R0, 1), ··· 4378 3832 INTERNAL, 4379 3833 { }, 4380 3834 { { 0, 0x80000000 } }, 3835 + }, 3836 + { 3837 + "ALU64_LSH_X: Shift < 32, low word", 3838 + .u.insns_int = { 3839 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3840 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3841 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3842 + BPF_EXIT_INSN(), 3843 + }, 3844 + INTERNAL, 3845 + { }, 3846 + { { 0, 0xbcdef000 } } 3847 + }, 3848 + { 3849 + "ALU64_LSH_X: Shift < 32, high word", 3850 + .u.insns_int = { 3851 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3852 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3853 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3854 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3855 + BPF_EXIT_INSN(), 3856 + }, 3857 + INTERNAL, 3858 + { }, 3859 + { { 0, 0x3456789a } } 3860 + }, 3861 + { 3862 + "ALU64_LSH_X: Shift > 32, low word", 3863 + .u.insns_int = { 3864 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3865 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 3866 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3867 + BPF_EXIT_INSN(), 3868 + }, 3869 + INTERNAL, 3870 + { }, 3871 + { { 0, 0 } } 3872 + }, 3873 + { 3874 + "ALU64_LSH_X: Shift > 32, high word", 3875 + .u.insns_int = { 3876 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3877 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 3878 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3879 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3880 + BPF_EXIT_INSN(), 3881 + }, 3882 + INTERNAL, 3883 + { }, 3884 + { { 0, 0x9abcdef0 } } 3885 + }, 3886 + { 3887 + "ALU64_LSH_X: Shift == 32, low word", 3888 + .u.insns_int = { 3889 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3890 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 3891 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3892 + BPF_EXIT_INSN(), 3893 + }, 3894 + INTERNAL, 3895 + { }, 3896 + { { 0, 0 } } 3897 + }, 3898 + { 3899 + "ALU64_LSH_X: Shift == 32, high word", 3900 + .u.insns_int = { 3901 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3902 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 3903 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3904 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3905 + BPF_EXIT_INSN(), 3906 + }, 3907 + INTERNAL, 3908 + { }, 3909 + { { 0, 0x89abcdef } } 3910 + }, 3911 + { 3912 + "ALU64_LSH_X: Zero shift, low word", 3913 + .u.insns_int = { 3914 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3915 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 3916 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3917 + BPF_EXIT_INSN(), 3918 + }, 3919 + INTERNAL, 3920 + { }, 3921 + { { 0, 0x89abcdef } } 3922 + }, 3923 + { 3924 + "ALU64_LSH_X: Zero shift, high word", 3925 + .u.insns_int = { 3926 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3927 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 3928 + BPF_ALU64_REG(BPF_LSH, R0, R1), 3929 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3930 + BPF_EXIT_INSN(), 3931 + }, 3932 + INTERNAL, 3933 + { }, 3934 + { { 0, 0x01234567 } } 4381 3935 }, 4382 3936 /* BPF_ALU | BPF_LSH | BPF_K */ 4383 3937 { ··· 4503 3857 { { 0, 0x80000000 } }, 4504 3858 }, 4505 3859 { 3860 + "ALU_LSH_K: 0x12345678 << 12 = 0x45678000", 3861 + .u.insns_int = { 3862 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3863 + BPF_ALU32_IMM(BPF_LSH, R0, 12), 3864 + BPF_EXIT_INSN(), 3865 + }, 3866 + INTERNAL, 3867 + { }, 3868 + { { 0, 0x45678000 } } 3869 + }, 3870 + { 3871 + "ALU_LSH_K: 0x12345678 << 0 = 0x12345678", 3872 + .u.insns_int = { 3873 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3874 + BPF_ALU32_IMM(BPF_LSH, R0, 0), 3875 + BPF_EXIT_INSN(), 3876 + }, 3877 + INTERNAL, 3878 + { }, 3879 + { { 0, 0x12345678 } } 3880 + }, 3881 + { 4506 3882 "ALU64_LSH_K: 1 << 1 = 2", 4507 3883 .u.insns_int = { 4508 3884 BPF_LD_IMM64(R0, 1), ··· 4545 3877 INTERNAL, 4546 3878 { }, 4547 3879 { { 0, 0x80000000 } }, 3880 + }, 3881 + { 3882 + "ALU64_LSH_K: Shift < 32, low word", 3883 + .u.insns_int = { 3884 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3885 + BPF_ALU64_IMM(BPF_LSH, R0, 12), 3886 + BPF_EXIT_INSN(), 3887 + }, 3888 + INTERNAL, 3889 + { }, 3890 + { { 0, 0xbcdef000 } } 3891 + }, 3892 + { 3893 + "ALU64_LSH_K: Shift < 32, high word", 3894 + .u.insns_int = { 3895 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3896 + BPF_ALU64_IMM(BPF_LSH, R0, 12), 3897 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3898 + BPF_EXIT_INSN(), 3899 + }, 3900 + INTERNAL, 3901 + { }, 3902 + { { 0, 0x3456789a } } 3903 + }, 3904 + { 3905 + "ALU64_LSH_K: Shift > 32, low word", 3906 + .u.insns_int = { 3907 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3908 + BPF_ALU64_IMM(BPF_LSH, R0, 36), 3909 + BPF_EXIT_INSN(), 3910 + }, 3911 + INTERNAL, 3912 + { }, 3913 + { { 0, 0 } } 3914 + }, 3915 + { 3916 + "ALU64_LSH_K: Shift > 32, high word", 3917 + .u.insns_int = { 3918 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3919 + BPF_ALU64_IMM(BPF_LSH, R0, 36), 3920 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3921 + BPF_EXIT_INSN(), 3922 + }, 3923 + INTERNAL, 3924 + { }, 3925 + { { 0, 0x9abcdef0 } } 3926 + }, 3927 + { 3928 + "ALU64_LSH_K: Shift == 32, low word", 3929 + .u.insns_int = { 3930 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3931 + BPF_ALU64_IMM(BPF_LSH, R0, 32), 3932 + BPF_EXIT_INSN(), 3933 + }, 3934 + INTERNAL, 3935 + { }, 3936 + { { 0, 0 } } 3937 + }, 3938 + { 3939 + "ALU64_LSH_K: Shift == 32, high word", 3940 + .u.insns_int = { 3941 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3942 + BPF_ALU64_IMM(BPF_LSH, R0, 32), 3943 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3944 + BPF_EXIT_INSN(), 3945 + }, 3946 + INTERNAL, 3947 + { }, 3948 + { { 0, 0x89abcdef } } 3949 + }, 3950 + { 3951 + "ALU64_LSH_K: Zero shift", 3952 + .u.insns_int = { 3953 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 3954 + BPF_ALU64_IMM(BPF_LSH, R0, 0), 3955 + BPF_EXIT_INSN(), 3956 + }, 3957 + INTERNAL, 3958 + { }, 3959 + { { 0, 0x89abcdef } } 4548 3960 }, 4549 3961 /* BPF_ALU | BPF_RSH | BPF_X */ 4550 3962 { ··· 4652 3904 { { 0, 1 } }, 4653 3905 }, 4654 3906 { 3907 + "ALU_RSH_X: 0x12345678 >> 20 = 0x123", 3908 + .u.insns_int = { 3909 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3910 + BPF_ALU32_IMM(BPF_MOV, R1, 20), 3911 + BPF_ALU32_REG(BPF_RSH, R0, R1), 3912 + BPF_EXIT_INSN(), 3913 + }, 3914 + INTERNAL, 3915 + { }, 3916 + { { 0, 0x123 } } 3917 + }, 3918 + { 4655 3919 "ALU64_RSH_X: 2 >> 1 = 1", 4656 3920 .u.insns_int = { 4657 3921 BPF_LD_IMM64(R0, 2), ··· 4686 3926 INTERNAL, 4687 3927 { }, 4688 3928 { { 0, 1 } }, 3929 + }, 3930 + { 3931 + "ALU64_RSH_X: Shift < 32, low word", 3932 + .u.insns_int = { 3933 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3934 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3935 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3936 + BPF_EXIT_INSN(), 3937 + }, 3938 + INTERNAL, 3939 + { }, 3940 + { { 0, 0x56789abc } } 3941 + }, 3942 + { 3943 + "ALU64_RSH_X: Shift < 32, high word", 3944 + .u.insns_int = { 3945 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3946 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3947 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3948 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3949 + BPF_EXIT_INSN(), 3950 + }, 3951 + INTERNAL, 3952 + { }, 3953 + { { 0, 0x00081234 } } 3954 + }, 3955 + { 3956 + "ALU64_RSH_X: Shift > 32, low word", 3957 + .u.insns_int = { 3958 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3959 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 3960 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3961 + BPF_EXIT_INSN(), 3962 + }, 3963 + INTERNAL, 3964 + { }, 3965 + { { 0, 0x08123456 } } 3966 + }, 3967 + { 3968 + "ALU64_RSH_X: Shift > 32, high word", 3969 + .u.insns_int = { 3970 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3971 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 3972 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3973 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3974 + BPF_EXIT_INSN(), 3975 + }, 3976 + INTERNAL, 3977 + { }, 3978 + { { 0, 0 } } 3979 + }, 3980 + { 3981 + "ALU64_RSH_X: Shift == 32, low word", 3982 + .u.insns_int = { 3983 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3984 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 3985 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3986 + BPF_EXIT_INSN(), 3987 + }, 3988 + INTERNAL, 3989 + { }, 3990 + { { 0, 0x81234567 } } 3991 + }, 3992 + { 3993 + "ALU64_RSH_X: Shift == 32, high word", 3994 + .u.insns_int = { 3995 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3996 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 3997 + BPF_ALU64_REG(BPF_RSH, R0, R1), 3998 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3999 + BPF_EXIT_INSN(), 4000 + }, 4001 + INTERNAL, 4002 + { }, 4003 + { { 0, 0 } } 4004 + }, 4005 + { 4006 + "ALU64_RSH_X: Zero shift, low word", 4007 + .u.insns_int = { 4008 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4009 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 4010 + BPF_ALU64_REG(BPF_RSH, R0, R1), 4011 + BPF_EXIT_INSN(), 4012 + }, 4013 + INTERNAL, 4014 + { }, 4015 + { { 0, 0x89abcdef } } 4016 + }, 4017 + { 4018 + "ALU64_RSH_X: Zero shift, high word", 4019 + .u.insns_int = { 4020 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4021 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 4022 + BPF_ALU64_REG(BPF_RSH, R0, R1), 4023 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4024 + BPF_EXIT_INSN(), 4025 + }, 4026 + INTERNAL, 4027 + { }, 4028 + { { 0, 0x81234567 } } 4689 4029 }, 4690 4030 /* BPF_ALU | BPF_RSH | BPF_K */ 4691 4031 { ··· 4811 3951 { { 0, 1 } }, 4812 3952 }, 4813 3953 { 3954 + "ALU_RSH_K: 0x12345678 >> 20 = 0x123", 3955 + .u.insns_int = { 3956 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3957 + BPF_ALU32_IMM(BPF_RSH, R0, 20), 3958 + BPF_EXIT_INSN(), 3959 + }, 3960 + INTERNAL, 3961 + { }, 3962 + { { 0, 0x123 } } 3963 + }, 3964 + { 3965 + "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678", 3966 + .u.insns_int = { 3967 + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), 3968 + BPF_ALU32_IMM(BPF_RSH, R0, 0), 3969 + BPF_EXIT_INSN(), 3970 + }, 3971 + INTERNAL, 3972 + { }, 3973 + { { 0, 0x12345678 } } 3974 + }, 3975 + { 4814 3976 "ALU64_RSH_K: 2 >> 1 = 1", 4815 3977 .u.insns_int = { 4816 3978 BPF_LD_IMM64(R0, 2), ··· 4854 3972 { }, 4855 3973 { { 0, 1 } }, 4856 3974 }, 3975 + { 3976 + "ALU64_RSH_K: Shift < 32, low word", 3977 + .u.insns_int = { 3978 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3979 + BPF_ALU64_IMM(BPF_RSH, R0, 12), 3980 + BPF_EXIT_INSN(), 3981 + }, 3982 + INTERNAL, 3983 + { }, 3984 + { { 0, 0x56789abc } } 3985 + }, 3986 + { 3987 + "ALU64_RSH_K: Shift < 32, high word", 3988 + .u.insns_int = { 3989 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3990 + BPF_ALU64_IMM(BPF_RSH, R0, 12), 3991 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 3992 + BPF_EXIT_INSN(), 3993 + }, 3994 + INTERNAL, 3995 + { }, 3996 + { { 0, 0x00081234 } } 3997 + }, 3998 + { 3999 + "ALU64_RSH_K: Shift > 32, low word", 4000 + .u.insns_int = { 4001 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4002 + BPF_ALU64_IMM(BPF_RSH, R0, 36), 4003 + BPF_EXIT_INSN(), 4004 + }, 4005 + INTERNAL, 4006 + { }, 4007 + { { 0, 0x08123456 } } 4008 + }, 4009 + { 4010 + "ALU64_RSH_K: Shift > 32, high word", 4011 + .u.insns_int = { 4012 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4013 + BPF_ALU64_IMM(BPF_RSH, R0, 36), 4014 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4015 + BPF_EXIT_INSN(), 4016 + }, 4017 + INTERNAL, 4018 + { }, 4019 + { { 0, 0 } } 4020 + }, 4021 + { 4022 + "ALU64_RSH_K: Shift == 32, low word", 4023 + .u.insns_int = { 4024 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4025 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4026 + BPF_EXIT_INSN(), 4027 + }, 4028 + INTERNAL, 4029 + { }, 4030 + { { 0, 0x81234567 } } 4031 + }, 4032 + { 4033 + "ALU64_RSH_K: Shift == 32, high word", 4034 + .u.insns_int = { 4035 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4036 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4037 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4038 + BPF_EXIT_INSN(), 4039 + }, 4040 + INTERNAL, 4041 + { }, 4042 + { { 0, 0 } } 4043 + }, 4044 + { 4045 + "ALU64_RSH_K: Zero shift", 4046 + .u.insns_int = { 4047 + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), 4048 + BPF_ALU64_IMM(BPF_RSH, R0, 0), 4049 + BPF_EXIT_INSN(), 4050 + }, 4051 + INTERNAL, 4052 + { }, 4053 + { { 0, 0x89abcdef } } 4054 + }, 4857 4055 /* BPF_ALU | BPF_ARSH | BPF_X */ 4858 4056 { 4859 - "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", 4057 + "ALU32_ARSH_X: -1234 >> 7 = -10", 4058 + .u.insns_int = { 4059 + BPF_ALU32_IMM(BPF_MOV, R0, -1234), 4060 + BPF_ALU32_IMM(BPF_MOV, R1, 7), 4061 + BPF_ALU32_REG(BPF_ARSH, R0, R1), 4062 + BPF_EXIT_INSN(), 4063 + }, 4064 + INTERNAL, 4065 + { }, 4066 + { { 0, -10 } } 4067 + }, 4068 + { 4069 + "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", 4860 4070 .u.insns_int = { 4861 4071 BPF_LD_IMM64(R0, 0xff00ff0000000000LL), 4862 4072 BPF_ALU32_IMM(BPF_MOV, R1, 40), ··· 4959 3985 { }, 4960 3986 { { 0, 0xffff00ff } }, 4961 3987 }, 3988 + { 3989 + "ALU64_ARSH_X: Shift < 32, low word", 3990 + .u.insns_int = { 3991 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 3992 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 3993 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 3994 + BPF_EXIT_INSN(), 3995 + }, 3996 + INTERNAL, 3997 + { }, 3998 + { { 0, 0x56789abc } } 3999 + }, 4000 + { 4001 + "ALU64_ARSH_X: Shift < 32, high word", 4002 + .u.insns_int = { 4003 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4004 + BPF_ALU32_IMM(BPF_MOV, R1, 12), 4005 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4006 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4007 + BPF_EXIT_INSN(), 4008 + }, 4009 + INTERNAL, 4010 + { }, 4011 + { { 0, 0xfff81234 } } 4012 + }, 4013 + { 4014 + "ALU64_ARSH_X: Shift > 32, low word", 4015 + .u.insns_int = { 4016 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4017 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 4018 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4019 + BPF_EXIT_INSN(), 4020 + }, 4021 + INTERNAL, 4022 + { }, 4023 + { { 0, 0xf8123456 } } 4024 + }, 4025 + { 4026 + "ALU64_ARSH_X: Shift > 32, high word", 4027 + .u.insns_int = { 4028 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4029 + BPF_ALU32_IMM(BPF_MOV, R1, 36), 4030 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4031 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4032 + BPF_EXIT_INSN(), 4033 + }, 4034 + INTERNAL, 4035 + { }, 4036 + { { 0, -1 } } 4037 + }, 4038 + { 4039 + "ALU64_ARSH_X: Shift == 32, low word", 4040 + .u.insns_int = { 4041 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4042 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 4043 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4044 + BPF_EXIT_INSN(), 4045 + }, 4046 + INTERNAL, 4047 + { }, 4048 + { { 0, 0x81234567 } } 4049 + }, 4050 + { 4051 + "ALU64_ARSH_X: Shift == 32, high word", 4052 + .u.insns_int = { 4053 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4054 + BPF_ALU32_IMM(BPF_MOV, R1, 32), 4055 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4056 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4057 + BPF_EXIT_INSN(), 4058 + }, 4059 + INTERNAL, 4060 + { }, 4061 + { { 0, -1 } } 4062 + }, 4063 + { 4064 + "ALU64_ARSH_X: Zero shift, low word", 4065 + .u.insns_int = { 4066 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4067 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 4068 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4069 + BPF_EXIT_INSN(), 4070 + }, 4071 + INTERNAL, 4072 + { }, 4073 + { { 0, 0x89abcdef } } 4074 + }, 4075 + { 4076 + "ALU64_ARSH_X: Zero shift, high word", 4077 + .u.insns_int = { 4078 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4079 + BPF_ALU32_IMM(BPF_MOV, R1, 0), 4080 + BPF_ALU64_REG(BPF_ARSH, R0, R1), 4081 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4082 + BPF_EXIT_INSN(), 4083 + }, 4084 + INTERNAL, 4085 + { }, 4086 + { { 0, 0x81234567 } } 4087 + }, 4962 4088 /* BPF_ALU | BPF_ARSH | BPF_K */ 4963 4089 { 4964 - "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", 4090 + "ALU32_ARSH_K: -1234 >> 7 = -10", 4091 + .u.insns_int = { 4092 + BPF_ALU32_IMM(BPF_MOV, R0, -1234), 4093 + BPF_ALU32_IMM(BPF_ARSH, R0, 7), 4094 + BPF_EXIT_INSN(), 4095 + }, 4096 + INTERNAL, 4097 + { }, 4098 + { { 0, -10 } } 4099 + }, 4100 + { 4101 + "ALU32_ARSH_K: -1234 >> 0 = -1234", 4102 + .u.insns_int = { 4103 + BPF_ALU32_IMM(BPF_MOV, R0, -1234), 4104 + BPF_ALU32_IMM(BPF_ARSH, R0, 0), 4105 + BPF_EXIT_INSN(), 4106 + }, 4107 + INTERNAL, 4108 + { }, 4109 + { { 0, -1234 } } 4110 + }, 4111 + { 4112 + "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", 4965 4113 .u.insns_int = { 4966 4114 BPF_LD_IMM64(R0, 0xff00ff0000000000LL), 4967 4115 BPF_ALU64_IMM(BPF_ARSH, R0, 40), ··· 5092 3996 INTERNAL, 5093 3997 { }, 5094 3998 { { 0, 0xffff00ff } }, 3999 + }, 4000 + { 4001 + "ALU64_ARSH_K: Shift < 32, low word", 4002 + .u.insns_int = { 4003 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4004 + BPF_ALU64_IMM(BPF_RSH, R0, 12), 4005 + BPF_EXIT_INSN(), 4006 + }, 4007 + INTERNAL, 4008 + { }, 4009 + { { 0, 0x56789abc } } 4010 + }, 4011 + { 4012 + "ALU64_ARSH_K: Shift < 32, high word", 4013 + .u.insns_int = { 4014 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4015 + BPF_ALU64_IMM(BPF_ARSH, R0, 12), 4016 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4017 + BPF_EXIT_INSN(), 4018 + }, 4019 + INTERNAL, 4020 + { }, 4021 + { { 0, 0xfff81234 } } 4022 + }, 4023 + { 4024 + "ALU64_ARSH_K: Shift > 32, low word", 4025 + .u.insns_int = { 4026 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4027 + BPF_ALU64_IMM(BPF_ARSH, R0, 36), 4028 + BPF_EXIT_INSN(), 4029 + }, 4030 + INTERNAL, 4031 + { }, 4032 + { { 0, 0xf8123456 } } 4033 + }, 4034 + { 4035 + "ALU64_ARSH_K: Shift > 32, high word", 4036 + .u.insns_int = { 4037 + BPF_LD_IMM64(R0, 0xf123456789abcdefLL), 4038 + BPF_ALU64_IMM(BPF_ARSH, R0, 36), 4039 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4040 + BPF_EXIT_INSN(), 4041 + }, 4042 + INTERNAL, 4043 + { }, 4044 + { { 0, -1 } } 4045 + }, 4046 + { 4047 + "ALU64_ARSH_K: Shift == 32, low word", 4048 + .u.insns_int = { 4049 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4050 + BPF_ALU64_IMM(BPF_ARSH, R0, 32), 4051 + BPF_EXIT_INSN(), 4052 + }, 4053 + INTERNAL, 4054 + { }, 4055 + { { 0, 0x81234567 } } 4056 + }, 4057 + { 4058 + "ALU64_ARSH_K: Shift == 32, high word", 4059 + .u.insns_int = { 4060 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4061 + BPF_ALU64_IMM(BPF_ARSH, R0, 32), 4062 + BPF_ALU64_IMM(BPF_RSH, R0, 32), 4063 + BPF_EXIT_INSN(), 4064 + }, 4065 + INTERNAL, 4066 + { }, 4067 + { { 0, -1 } } 4068 + }, 4069 + { 4070 + "ALU64_ARSH_K: Zero shoft", 4071 + .u.insns_int = { 4072 + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), 4073 + BPF_ALU64_IMM(BPF_ARSH, R0, 0), 4074 + BPF_EXIT_INSN(), 4075 + }, 4076 + INTERNAL, 4077 + { }, 4078 + { { 0, 0x89abcdef } } 5095 4079 }, 5096 4080 /* BPF_ALU | BPF_NEG */ 5097 4081 { ··· 5471 4295 { { 0, 0xffffffff } }, 5472 4296 .stack_depth = 40, 5473 4297 }, 5474 - /* BPF_STX | BPF_ATOMIC | BPF_W/DW */ 5475 4298 { 5476 - "STX_XADD_W: Test: 0x12 + 0x10 = 0x22", 4299 + "STX_MEM_DW: Store double word: first word in memory", 5477 4300 .u.insns_int = { 5478 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5479 - BPF_ST_MEM(BPF_W, R10, -40, 0x10), 5480 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 4301 + BPF_LD_IMM64(R0, 0), 4302 + BPF_LD_IMM64(R1, 0x0123456789abcdefLL), 4303 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 5481 4304 BPF_LDX_MEM(BPF_W, R0, R10, -40), 5482 4305 BPF_EXIT_INSN(), 5483 4306 }, 5484 4307 INTERNAL, 5485 4308 { }, 5486 - { { 0, 0x22 } }, 4309 + #ifdef __BIG_ENDIAN 4310 + { { 0, 0x01234567 } }, 4311 + #else 4312 + { { 0, 0x89abcdef } }, 4313 + #endif 5487 4314 .stack_depth = 40, 5488 4315 }, 5489 4316 { 5490 - "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", 4317 + "STX_MEM_DW: Store double word: second word in memory", 5491 4318 .u.insns_int = { 5492 - BPF_ALU64_REG(BPF_MOV, R1, R10), 5493 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5494 - BPF_ST_MEM(BPF_W, R10, -40, 0x10), 5495 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 5496 - BPF_ALU64_REG(BPF_MOV, R0, R10), 5497 - BPF_ALU64_REG(BPF_SUB, R0, R1), 4319 + BPF_LD_IMM64(R0, 0), 4320 + BPF_LD_IMM64(R1, 0x0123456789abcdefLL), 4321 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 4322 + BPF_LDX_MEM(BPF_W, R0, R10, -36), 5498 4323 BPF_EXIT_INSN(), 5499 4324 }, 5500 4325 INTERNAL, 5501 4326 { }, 5502 - { { 0, 0 } }, 4327 + #ifdef __BIG_ENDIAN 4328 + { { 0, 0x89abcdef } }, 4329 + #else 4330 + { { 0, 0x01234567 } }, 4331 + #endif 5503 4332 .stack_depth = 40, 5504 4333 }, 5505 - { 5506 - "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", 5507 - .u.insns_int = { 5508 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5509 - BPF_ST_MEM(BPF_W, R10, -40, 0x10), 5510 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 5511 - BPF_EXIT_INSN(), 5512 - }, 5513 - INTERNAL, 5514 - { }, 5515 - { { 0, 0x12 } }, 5516 - .stack_depth = 40, 5517 - }, 4334 + /* BPF_STX | BPF_ATOMIC | BPF_W/DW */ 5518 4335 { 5519 4336 "STX_XADD_W: X + 1 + 1 + 1 + ...", 5520 4337 { }, ··· 5517 4348 .fill_helper = bpf_fill_stxw, 5518 4349 }, 5519 4350 { 5520 - "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", 4351 + "STX_XADD_DW: X + 1 + 1 + 1 + ...", 4352 + { }, 4353 + INTERNAL, 4354 + { }, 4355 + { { 0, 4134 } }, 4356 + .fill_helper = bpf_fill_stxdw, 4357 + }, 4358 + /* 4359 + * Exhaustive tests of atomic operation variants. 4360 + * Individual tests are expanded from template macros for all 4361 + * combinations of ALU operation, word size and fetching. 4362 + */ 4363 + #define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \ 4364 + { \ 4365 + "BPF_ATOMIC | " #width ", " #op ": Test: " \ 4366 + #old " " #logic " " #update " = " #result, \ 4367 + .u.insns_int = { \ 4368 + BPF_ALU32_IMM(BPF_MOV, R5, update), \ 4369 + BPF_ST_MEM(width, R10, -40, old), \ 4370 + BPF_ATOMIC_OP(width, op, R10, R5, -40), \ 4371 + BPF_LDX_MEM(width, R0, R10, -40), \ 4372 + BPF_EXIT_INSN(), \ 4373 + }, \ 4374 + INTERNAL, \ 4375 + { }, \ 4376 + { { 0, result } }, \ 4377 + .stack_depth = 40, \ 4378 + } 4379 + #define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \ 4380 + { \ 4381 + "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \ 4382 + #old " " #logic " " #update " = " #result, \ 4383 + .u.insns_int = { \ 4384 + BPF_ALU64_REG(BPF_MOV, R1, R10), \ 4385 + BPF_ALU32_IMM(BPF_MOV, R0, update), \ 4386 + BPF_ST_MEM(BPF_W, R10, -40, old), \ 4387 + BPF_ATOMIC_OP(width, op, R10, R0, -40), \ 4388 + BPF_ALU64_REG(BPF_MOV, R0, R10), \ 4389 + BPF_ALU64_REG(BPF_SUB, R0, R1), \ 4390 + BPF_EXIT_INSN(), \ 4391 + }, \ 4392 + INTERNAL, \ 4393 + { }, \ 4394 + { { 0, 0 } }, \ 4395 + .stack_depth = 40, \ 4396 + } 4397 + #define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \ 4398 + { \ 4399 + "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \ 4400 + #old " " #logic " " #update " = " #result, \ 4401 + .u.insns_int = { \ 4402 + BPF_ALU64_REG(BPF_MOV, R0, R10), \ 4403 + BPF_ALU32_IMM(BPF_MOV, R1, update), \ 4404 + BPF_ST_MEM(width, R10, -40, old), \ 4405 + BPF_ATOMIC_OP(width, op, R10, R1, -40), \ 4406 + BPF_ALU64_REG(BPF_SUB, R0, R10), \ 4407 + BPF_EXIT_INSN(), \ 4408 + }, \ 4409 + INTERNAL, \ 4410 + { }, \ 4411 + { { 0, 0 } }, \ 4412 + .stack_depth = 40, \ 4413 + } 4414 + #define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \ 4415 + { \ 4416 + "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \ 4417 + #old " " #logic " " #update " = " #result, \ 4418 + .u.insns_int = { \ 4419 + BPF_ALU32_IMM(BPF_MOV, R3, update), \ 4420 + BPF_ST_MEM(width, R10, -40, old), \ 4421 + BPF_ATOMIC_OP(width, op, R10, R3, -40), \ 4422 + BPF_ALU64_REG(BPF_MOV, R0, R3), \ 4423 + BPF_EXIT_INSN(), \ 4424 + }, \ 4425 + INTERNAL, \ 4426 + { }, \ 4427 + { { 0, (op) & BPF_FETCH ? old : update } }, \ 4428 + .stack_depth = 40, \ 4429 + } 4430 + /* BPF_ATOMIC | BPF_W: BPF_ADD */ 4431 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), 4432 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), 4433 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), 4434 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), 4435 + /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */ 4436 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4437 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4438 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4439 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4440 + /* BPF_ATOMIC | BPF_DW: BPF_ADD */ 4441 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), 4442 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), 4443 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), 4444 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), 4445 + /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */ 4446 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4447 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4448 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4449 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), 4450 + /* BPF_ATOMIC | BPF_W: BPF_AND */ 4451 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), 4452 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), 4453 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), 4454 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), 4455 + /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */ 4456 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4457 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4458 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4459 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4460 + /* BPF_ATOMIC | BPF_DW: BPF_AND */ 4461 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), 4462 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), 4463 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), 4464 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), 4465 + /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */ 4466 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4467 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4468 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4469 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), 4470 + /* BPF_ATOMIC | BPF_W: BPF_OR */ 4471 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), 4472 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), 4473 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), 4474 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), 4475 + /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */ 4476 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4477 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4478 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4479 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4480 + /* BPF_ATOMIC | BPF_DW: BPF_OR */ 4481 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), 4482 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), 4483 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), 4484 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), 4485 + /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */ 4486 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4487 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4488 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4489 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), 4490 + /* BPF_ATOMIC | BPF_W: BPF_XOR */ 4491 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4492 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4493 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4494 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4495 + /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */ 4496 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4497 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4498 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4499 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4500 + /* BPF_ATOMIC | BPF_DW: BPF_XOR */ 4501 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4502 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4503 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4504 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), 4505 + /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */ 4506 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4507 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4508 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4509 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), 4510 + /* BPF_ATOMIC | BPF_W: BPF_XCHG */ 4511 + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4512 + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4513 + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4514 + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4515 + /* BPF_ATOMIC | BPF_DW: BPF_XCHG */ 4516 + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4517 + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4518 + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4519 + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), 4520 + #undef BPF_ATOMIC_OP_TEST1 4521 + #undef BPF_ATOMIC_OP_TEST2 4522 + #undef BPF_ATOMIC_OP_TEST3 4523 + #undef BPF_ATOMIC_OP_TEST4 4524 + /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */ 4525 + { 4526 + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return", 5521 4527 .u.insns_int = { 5522 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5523 - BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 5524 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 5525 - BPF_LDX_MEM(BPF_DW, R0, R10, -40), 4528 + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), 4529 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), 4530 + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), 4531 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 5526 4532 BPF_EXIT_INSN(), 5527 4533 }, 5528 4534 INTERNAL, 5529 4535 { }, 5530 - { { 0, 0x22 } }, 4536 + { { 0, 0x01234567 } }, 5531 4537 .stack_depth = 40, 5532 4538 }, 5533 4539 { 5534 - "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", 4540 + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store", 5535 4541 .u.insns_int = { 5536 - BPF_ALU64_REG(BPF_MOV, R1, R10), 5537 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5538 - BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 5539 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 5540 - BPF_ALU64_REG(BPF_MOV, R0, R10), 4542 + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), 4543 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), 4544 + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), 4545 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 4546 + BPF_LDX_MEM(BPF_W, R0, R10, -40), 4547 + BPF_EXIT_INSN(), 4548 + }, 4549 + INTERNAL, 4550 + { }, 4551 + { { 0, 0x89abcdef } }, 4552 + .stack_depth = 40, 4553 + }, 4554 + { 4555 + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return", 4556 + .u.insns_int = { 4557 + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), 4558 + BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), 4559 + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), 4560 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 4561 + BPF_EXIT_INSN(), 4562 + }, 4563 + INTERNAL, 4564 + { }, 4565 + { { 0, 0x01234567 } }, 4566 + .stack_depth = 40, 4567 + }, 4568 + { 4569 + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store", 4570 + .u.insns_int = { 4571 + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), 4572 + BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), 4573 + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), 4574 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 4575 + BPF_LDX_MEM(BPF_W, R0, R10, -40), 4576 + BPF_EXIT_INSN(), 4577 + }, 4578 + INTERNAL, 4579 + { }, 4580 + { { 0, 0x01234567 } }, 4581 + .stack_depth = 40, 4582 + }, 4583 + { 4584 + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects", 4585 + .u.insns_int = { 4586 + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), 4587 + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), 4588 + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), 4589 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 4590 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), 4591 + BPF_ALU32_REG(BPF_MOV, R0, R3), 4592 + BPF_EXIT_INSN(), 4593 + }, 4594 + INTERNAL, 4595 + { }, 4596 + { { 0, 0x89abcdef } }, 4597 + .stack_depth = 40, 4598 + }, 4599 + /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */ 4600 + { 4601 + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return", 4602 + .u.insns_int = { 4603 + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), 4604 + BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), 4605 + BPF_ALU64_REG(BPF_MOV, R0, R1), 4606 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 4607 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), 4608 + BPF_JMP_REG(BPF_JNE, R0, R1, 1), 5541 4609 BPF_ALU64_REG(BPF_SUB, R0, R1), 5542 4610 BPF_EXIT_INSN(), 5543 4611 }, ··· 5784 4378 .stack_depth = 40, 5785 4379 }, 5786 4380 { 5787 - "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", 4381 + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store", 5788 4382 .u.insns_int = { 5789 - BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 5790 - BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 5791 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 4383 + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), 4384 + BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), 4385 + BPF_ALU64_REG(BPF_MOV, R0, R1), 4386 + BPF_STX_MEM(BPF_DW, R10, R0, -40), 4387 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), 4388 + BPF_LDX_MEM(BPF_DW, R0, R10, -40), 4389 + BPF_JMP_REG(BPF_JNE, R0, R2, 1), 4390 + BPF_ALU64_REG(BPF_SUB, R0, R2), 5792 4391 BPF_EXIT_INSN(), 5793 4392 }, 5794 4393 INTERNAL, 5795 4394 { }, 5796 - { { 0, 0x12 } }, 4395 + { { 0, 0 } }, 5797 4396 .stack_depth = 40, 5798 4397 }, 5799 4398 { 5800 - "STX_XADD_DW: X + 1 + 1 + 1 + ...", 5801 - { }, 4399 + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return", 4400 + .u.insns_int = { 4401 + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), 4402 + BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), 4403 + BPF_ALU64_REG(BPF_MOV, R0, R1), 4404 + BPF_ALU64_IMM(BPF_ADD, R0, 1), 4405 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 4406 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), 4407 + BPF_JMP_REG(BPF_JNE, R0, R1, 1), 4408 + BPF_ALU64_REG(BPF_SUB, R0, R1), 4409 + BPF_EXIT_INSN(), 4410 + }, 5802 4411 INTERNAL, 5803 4412 { }, 5804 - { { 0, 4134 } }, 5805 - .fill_helper = bpf_fill_stxdw, 4413 + { { 0, 0 } }, 4414 + .stack_depth = 40, 4415 + }, 4416 + { 4417 + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store", 4418 + .u.insns_int = { 4419 + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), 4420 + BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), 4421 + BPF_ALU64_REG(BPF_MOV, R0, R1), 4422 + BPF_ALU64_IMM(BPF_ADD, R0, 1), 4423 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 4424 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), 4425 + BPF_LDX_MEM(BPF_DW, R0, R10, -40), 4426 + BPF_JMP_REG(BPF_JNE, R0, R1, 1), 4427 + BPF_ALU64_REG(BPF_SUB, R0, R1), 4428 + BPF_EXIT_INSN(), 4429 + }, 4430 + INTERNAL, 4431 + { }, 4432 + { { 0, 0 } }, 4433 + .stack_depth = 40, 4434 + }, 4435 + { 4436 + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects", 4437 + .u.insns_int = { 4438 + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), 4439 + BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), 4440 + BPF_ALU64_REG(BPF_MOV, R0, R1), 4441 + BPF_STX_MEM(BPF_DW, R10, R1, -40), 4442 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), 4443 + BPF_LD_IMM64(R0, 0xfecdba9876543210ULL), 4444 + BPF_JMP_REG(BPF_JNE, R0, R2, 1), 4445 + BPF_ALU64_REG(BPF_SUB, R0, R2), 4446 + BPF_EXIT_INSN(), 4447 + }, 4448 + INTERNAL, 4449 + { }, 4450 + { { 0, 0 } }, 4451 + .stack_depth = 40, 4452 + }, 4453 + /* BPF_JMP32 | BPF_JEQ | BPF_K */ 4454 + { 4455 + "JMP32_JEQ_K: Small immediate", 4456 + .u.insns_int = { 4457 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4458 + BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1), 4459 + BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), 4460 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4461 + BPF_EXIT_INSN(), 4462 + }, 4463 + INTERNAL, 4464 + { }, 4465 + { { 0, 123 } } 4466 + }, 4467 + { 4468 + "JMP32_JEQ_K: Large immediate", 4469 + .u.insns_int = { 4470 + BPF_ALU32_IMM(BPF_MOV, R0, 12345678), 4471 + BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1), 4472 + BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1), 4473 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4474 + BPF_EXIT_INSN(), 4475 + }, 4476 + INTERNAL, 4477 + { }, 4478 + { { 0, 12345678 } } 4479 + }, 4480 + { 4481 + "JMP32_JEQ_K: negative immediate", 4482 + .u.insns_int = { 4483 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4484 + BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), 4485 + BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1), 4486 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4487 + BPF_EXIT_INSN(), 4488 + }, 4489 + INTERNAL, 4490 + { }, 4491 + { { 0, -123 } } 4492 + }, 4493 + /* BPF_JMP32 | BPF_JEQ | BPF_X */ 4494 + { 4495 + "JMP32_JEQ_X", 4496 + .u.insns_int = { 4497 + BPF_ALU32_IMM(BPF_MOV, R0, 1234), 4498 + BPF_ALU32_IMM(BPF_MOV, R1, 4321), 4499 + BPF_JMP32_REG(BPF_JEQ, R0, R1, 2), 4500 + BPF_ALU32_IMM(BPF_MOV, R1, 1234), 4501 + BPF_JMP32_REG(BPF_JEQ, R0, R1, 1), 4502 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4503 + BPF_EXIT_INSN(), 4504 + }, 4505 + INTERNAL, 4506 + { }, 4507 + { { 0, 1234 } } 4508 + }, 4509 + /* BPF_JMP32 | BPF_JNE | BPF_K */ 4510 + { 4511 + "JMP32_JNE_K: Small immediate", 4512 + .u.insns_int = { 4513 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4514 + BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), 4515 + BPF_JMP32_IMM(BPF_JNE, R0, 321, 1), 4516 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4517 + BPF_EXIT_INSN(), 4518 + }, 4519 + INTERNAL, 4520 + { }, 4521 + { { 0, 123 } } 4522 + }, 4523 + { 4524 + "JMP32_JNE_K: Large immediate", 4525 + .u.insns_int = { 4526 + BPF_ALU32_IMM(BPF_MOV, R0, 12345678), 4527 + BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1), 4528 + BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1), 4529 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4530 + BPF_EXIT_INSN(), 4531 + }, 4532 + INTERNAL, 4533 + { }, 4534 + { { 0, 12345678 } } 4535 + }, 4536 + { 4537 + "JMP32_JNE_K: negative immediate", 4538 + .u.insns_int = { 4539 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4540 + BPF_JMP32_IMM(BPF_JNE, R0, -123, 1), 4541 + BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), 4542 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4543 + BPF_EXIT_INSN(), 4544 + }, 4545 + INTERNAL, 4546 + { }, 4547 + { { 0, -123 } } 4548 + }, 4549 + /* BPF_JMP32 | BPF_JNE | BPF_X */ 4550 + { 4551 + "JMP32_JNE_X", 4552 + .u.insns_int = { 4553 + BPF_ALU32_IMM(BPF_MOV, R0, 1234), 4554 + BPF_ALU32_IMM(BPF_MOV, R1, 1234), 4555 + BPF_JMP32_REG(BPF_JNE, R0, R1, 2), 4556 + BPF_ALU32_IMM(BPF_MOV, R1, 4321), 4557 + BPF_JMP32_REG(BPF_JNE, R0, R1, 1), 4558 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4559 + BPF_EXIT_INSN(), 4560 + }, 4561 + INTERNAL, 4562 + { }, 4563 + { { 0, 1234 } } 4564 + }, 4565 + /* BPF_JMP32 | BPF_JSET | BPF_K */ 4566 + { 4567 + "JMP32_JSET_K: Small immediate", 4568 + .u.insns_int = { 4569 + BPF_ALU32_IMM(BPF_MOV, R0, 1), 4570 + BPF_JMP32_IMM(BPF_JSET, R0, 2, 1), 4571 + BPF_JMP32_IMM(BPF_JSET, R0, 3, 1), 4572 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4573 + BPF_EXIT_INSN(), 4574 + }, 4575 + INTERNAL, 4576 + { }, 4577 + { { 0, 1 } } 4578 + }, 4579 + { 4580 + "JMP32_JSET_K: Large immediate", 4581 + .u.insns_int = { 4582 + BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000), 4583 + BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1), 4584 + BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1), 4585 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4586 + BPF_EXIT_INSN(), 4587 + }, 4588 + INTERNAL, 4589 + { }, 4590 + { { 0, 0x40000000 } } 4591 + }, 4592 + { 4593 + "JMP32_JSET_K: negative immediate", 4594 + .u.insns_int = { 4595 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4596 + BPF_JMP32_IMM(BPF_JSET, R0, -1, 1), 4597 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4598 + BPF_EXIT_INSN(), 4599 + }, 4600 + INTERNAL, 4601 + { }, 4602 + { { 0, -123 } } 4603 + }, 4604 + /* BPF_JMP32 | BPF_JSET | BPF_X */ 4605 + { 4606 + "JMP32_JSET_X", 4607 + .u.insns_int = { 4608 + BPF_ALU32_IMM(BPF_MOV, R0, 8), 4609 + BPF_ALU32_IMM(BPF_MOV, R1, 7), 4610 + BPF_JMP32_REG(BPF_JSET, R0, R1, 2), 4611 + BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2), 4612 + BPF_JMP32_REG(BPF_JNE, R0, R1, 1), 4613 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4614 + BPF_EXIT_INSN(), 4615 + }, 4616 + INTERNAL, 4617 + { }, 4618 + { { 0, 8 } } 4619 + }, 4620 + /* BPF_JMP32 | BPF_JGT | BPF_K */ 4621 + { 4622 + "JMP32_JGT_K: Small immediate", 4623 + .u.insns_int = { 4624 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4625 + BPF_JMP32_IMM(BPF_JGT, R0, 123, 1), 4626 + BPF_JMP32_IMM(BPF_JGT, R0, 122, 1), 4627 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4628 + BPF_EXIT_INSN(), 4629 + }, 4630 + INTERNAL, 4631 + { }, 4632 + { { 0, 123 } } 4633 + }, 4634 + { 4635 + "JMP32_JGT_K: Large immediate", 4636 + .u.insns_int = { 4637 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4638 + BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1), 4639 + BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1), 4640 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4641 + BPF_EXIT_INSN(), 4642 + }, 4643 + INTERNAL, 4644 + { }, 4645 + { { 0, 0xfffffffe } } 4646 + }, 4647 + /* BPF_JMP32 | BPF_JGT | BPF_X */ 4648 + { 4649 + "JMP32_JGT_X", 4650 + .u.insns_int = { 4651 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4652 + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), 4653 + BPF_JMP32_REG(BPF_JGT, R0, R1, 2), 4654 + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), 4655 + BPF_JMP32_REG(BPF_JGT, R0, R1, 1), 4656 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4657 + BPF_EXIT_INSN(), 4658 + }, 4659 + INTERNAL, 4660 + { }, 4661 + { { 0, 0xfffffffe } } 4662 + }, 4663 + /* BPF_JMP32 | BPF_JGE | BPF_K */ 4664 + { 4665 + "JMP32_JGE_K: Small immediate", 4666 + .u.insns_int = { 4667 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4668 + BPF_JMP32_IMM(BPF_JGE, R0, 124, 1), 4669 + BPF_JMP32_IMM(BPF_JGE, R0, 123, 1), 4670 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4671 + BPF_EXIT_INSN(), 4672 + }, 4673 + INTERNAL, 4674 + { }, 4675 + { { 0, 123 } } 4676 + }, 4677 + { 4678 + "JMP32_JGE_K: Large immediate", 4679 + .u.insns_int = { 4680 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4681 + BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1), 4682 + BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1), 4683 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4684 + BPF_EXIT_INSN(), 4685 + }, 4686 + INTERNAL, 4687 + { }, 4688 + { { 0, 0xfffffffe } } 4689 + }, 4690 + /* BPF_JMP32 | BPF_JGE | BPF_X */ 4691 + { 4692 + "JMP32_JGE_X", 4693 + .u.insns_int = { 4694 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4695 + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), 4696 + BPF_JMP32_REG(BPF_JGE, R0, R1, 2), 4697 + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), 4698 + BPF_JMP32_REG(BPF_JGE, R0, R1, 1), 4699 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4700 + BPF_EXIT_INSN(), 4701 + }, 4702 + INTERNAL, 4703 + { }, 4704 + { { 0, 0xfffffffe } } 4705 + }, 4706 + /* BPF_JMP32 | BPF_JLT | BPF_K */ 4707 + { 4708 + "JMP32_JLT_K: Small immediate", 4709 + .u.insns_int = { 4710 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4711 + BPF_JMP32_IMM(BPF_JLT, R0, 123, 1), 4712 + BPF_JMP32_IMM(BPF_JLT, R0, 124, 1), 4713 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4714 + BPF_EXIT_INSN(), 4715 + }, 4716 + INTERNAL, 4717 + { }, 4718 + { { 0, 123 } } 4719 + }, 4720 + { 4721 + "JMP32_JLT_K: Large immediate", 4722 + .u.insns_int = { 4723 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4724 + BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1), 4725 + BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1), 4726 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4727 + BPF_EXIT_INSN(), 4728 + }, 4729 + INTERNAL, 4730 + { }, 4731 + { { 0, 0xfffffffe } } 4732 + }, 4733 + /* BPF_JMP32 | BPF_JLT | BPF_X */ 4734 + { 4735 + "JMP32_JLT_X", 4736 + .u.insns_int = { 4737 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4738 + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), 4739 + BPF_JMP32_REG(BPF_JLT, R0, R1, 2), 4740 + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), 4741 + BPF_JMP32_REG(BPF_JLT, R0, R1, 1), 4742 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4743 + BPF_EXIT_INSN(), 4744 + }, 4745 + INTERNAL, 4746 + { }, 4747 + { { 0, 0xfffffffe } } 4748 + }, 4749 + /* BPF_JMP32 | BPF_JLE | BPF_K */ 4750 + { 4751 + "JMP32_JLE_K: Small immediate", 4752 + .u.insns_int = { 4753 + BPF_ALU32_IMM(BPF_MOV, R0, 123), 4754 + BPF_JMP32_IMM(BPF_JLE, R0, 122, 1), 4755 + BPF_JMP32_IMM(BPF_JLE, R0, 123, 1), 4756 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4757 + BPF_EXIT_INSN(), 4758 + }, 4759 + INTERNAL, 4760 + { }, 4761 + { { 0, 123 } } 4762 + }, 4763 + { 4764 + "JMP32_JLE_K: Large immediate", 4765 + .u.insns_int = { 4766 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4767 + BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1), 4768 + BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1), 4769 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4770 + BPF_EXIT_INSN(), 4771 + }, 4772 + INTERNAL, 4773 + { }, 4774 + { { 0, 0xfffffffe } } 4775 + }, 4776 + /* BPF_JMP32 | BPF_JLE | BPF_X */ 4777 + { 4778 + "JMP32_JLE_X", 4779 + .u.insns_int = { 4780 + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), 4781 + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), 4782 + BPF_JMP32_REG(BPF_JLE, R0, R1, 2), 4783 + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), 4784 + BPF_JMP32_REG(BPF_JLE, R0, R1, 1), 4785 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4786 + BPF_EXIT_INSN(), 4787 + }, 4788 + INTERNAL, 4789 + { }, 4790 + { { 0, 0xfffffffe } } 4791 + }, 4792 + /* BPF_JMP32 | BPF_JSGT | BPF_K */ 4793 + { 4794 + "JMP32_JSGT_K: Small immediate", 4795 + .u.insns_int = { 4796 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4797 + BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1), 4798 + BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1), 4799 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4800 + BPF_EXIT_INSN(), 4801 + }, 4802 + INTERNAL, 4803 + { }, 4804 + { { 0, -123 } } 4805 + }, 4806 + { 4807 + "JMP32_JSGT_K: Large immediate", 4808 + .u.insns_int = { 4809 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4810 + BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1), 4811 + BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1), 4812 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4813 + BPF_EXIT_INSN(), 4814 + }, 4815 + INTERNAL, 4816 + { }, 4817 + { { 0, -12345678 } } 4818 + }, 4819 + /* BPF_JMP32 | BPF_JSGT | BPF_X */ 4820 + { 4821 + "JMP32_JSGT_X", 4822 + .u.insns_int = { 4823 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4824 + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), 4825 + BPF_JMP32_REG(BPF_JSGT, R0, R1, 2), 4826 + BPF_ALU32_IMM(BPF_MOV, R1, -12345679), 4827 + BPF_JMP32_REG(BPF_JSGT, R0, R1, 1), 4828 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4829 + BPF_EXIT_INSN(), 4830 + }, 4831 + INTERNAL, 4832 + { }, 4833 + { { 0, -12345678 } } 4834 + }, 4835 + /* BPF_JMP32 | BPF_JSGE | BPF_K */ 4836 + { 4837 + "JMP32_JSGE_K: Small immediate", 4838 + .u.insns_int = { 4839 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4840 + BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1), 4841 + BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1), 4842 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4843 + BPF_EXIT_INSN(), 4844 + }, 4845 + INTERNAL, 4846 + { }, 4847 + { { 0, -123 } } 4848 + }, 4849 + { 4850 + "JMP32_JSGE_K: Large immediate", 4851 + .u.insns_int = { 4852 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4853 + BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1), 4854 + BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1), 4855 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4856 + BPF_EXIT_INSN(), 4857 + }, 4858 + INTERNAL, 4859 + { }, 4860 + { { 0, -12345678 } } 4861 + }, 4862 + /* BPF_JMP32 | BPF_JSGE | BPF_X */ 4863 + { 4864 + "JMP32_JSGE_X", 4865 + .u.insns_int = { 4866 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4867 + BPF_ALU32_IMM(BPF_MOV, R1, -12345677), 4868 + BPF_JMP32_REG(BPF_JSGE, R0, R1, 2), 4869 + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), 4870 + BPF_JMP32_REG(BPF_JSGE, R0, R1, 1), 4871 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4872 + BPF_EXIT_INSN(), 4873 + }, 4874 + INTERNAL, 4875 + { }, 4876 + { { 0, -12345678 } } 4877 + }, 4878 + /* BPF_JMP32 | BPF_JSLT | BPF_K */ 4879 + { 4880 + "JMP32_JSLT_K: Small immediate", 4881 + .u.insns_int = { 4882 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4883 + BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1), 4884 + BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1), 4885 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4886 + BPF_EXIT_INSN(), 4887 + }, 4888 + INTERNAL, 4889 + { }, 4890 + { { 0, -123 } } 4891 + }, 4892 + { 4893 + "JMP32_JSLT_K: Large immediate", 4894 + .u.insns_int = { 4895 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4896 + BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1), 4897 + BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1), 4898 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4899 + BPF_EXIT_INSN(), 4900 + }, 4901 + INTERNAL, 4902 + { }, 4903 + { { 0, -12345678 } } 4904 + }, 4905 + /* BPF_JMP32 | BPF_JSLT | BPF_X */ 4906 + { 4907 + "JMP32_JSLT_X", 4908 + .u.insns_int = { 4909 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4910 + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), 4911 + BPF_JMP32_REG(BPF_JSLT, R0, R1, 2), 4912 + BPF_ALU32_IMM(BPF_MOV, R1, -12345677), 4913 + BPF_JMP32_REG(BPF_JSLT, R0, R1, 1), 4914 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4915 + BPF_EXIT_INSN(), 4916 + }, 4917 + INTERNAL, 4918 + { }, 4919 + { { 0, -12345678 } } 4920 + }, 4921 + /* BPF_JMP32 | BPF_JSLE | BPF_K */ 4922 + { 4923 + "JMP32_JSLE_K: Small immediate", 4924 + .u.insns_int = { 4925 + BPF_ALU32_IMM(BPF_MOV, R0, -123), 4926 + BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1), 4927 + BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1), 4928 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4929 + BPF_EXIT_INSN(), 4930 + }, 4931 + INTERNAL, 4932 + { }, 4933 + { { 0, -123 } } 4934 + }, 4935 + { 4936 + "JMP32_JSLE_K: Large immediate", 4937 + .u.insns_int = { 4938 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4939 + BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1), 4940 + BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1), 4941 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4942 + BPF_EXIT_INSN(), 4943 + }, 4944 + INTERNAL, 4945 + { }, 4946 + { { 0, -12345678 } } 4947 + }, 4948 + /* BPF_JMP32 | BPF_JSLE | BPF_K */ 4949 + { 4950 + "JMP32_JSLE_X", 4951 + .u.insns_int = { 4952 + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), 4953 + BPF_ALU32_IMM(BPF_MOV, R1, -12345679), 4954 + BPF_JMP32_REG(BPF_JSLE, R0, R1, 2), 4955 + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), 4956 + BPF_JMP32_REG(BPF_JSLE, R0, R1, 1), 4957 + BPF_ALU32_IMM(BPF_MOV, R0, 0), 4958 + BPF_EXIT_INSN(), 4959 + }, 4960 + INTERNAL, 4961 + { }, 4962 + { { 0, -12345678 } } 5806 4963 }, 5807 4964 /* BPF_JMP | BPF_EXIT */ 5808 4965 { ··· 7191 5222 INTERNAL, 7192 5223 { }, 7193 5224 { { 0, 1 } }, 5225 + }, 5226 + { /* Mainly checking JIT here. */ 5227 + "BPF_MAXINSNS: Very long conditional jump", 5228 + { }, 5229 + INTERNAL | FLAG_NO_DATA, 5230 + { }, 5231 + { { 0, 1 } }, 5232 + .fill_helper = bpf_fill_long_jmp, 7194 5233 }, 7195 5234 { 7196 5235 "JMP_JA: Jump, gap, jump, ...", ··· 8989 7012 return err_cnt ? -EINVAL : 0; 8990 7013 } 8991 7014 7015 + struct tail_call_test { 7016 + const char *descr; 7017 + struct bpf_insn insns[MAX_INSNS]; 7018 + int result; 7019 + int stack_depth; 7020 + }; 7021 + 7022 + /* 7023 + * Magic marker used in test snippets for tail calls below. 7024 + * BPF_LD/MOV to R2 and R2 with this immediate value is replaced 7025 + * with the proper values by the test runner. 7026 + */ 7027 + #define TAIL_CALL_MARKER 0x7a11ca11 7028 + 7029 + /* Special offset to indicate a NULL call target */ 7030 + #define TAIL_CALL_NULL 0x7fff 7031 + 7032 + /* Special offset to indicate an out-of-range index */ 7033 + #define TAIL_CALL_INVALID 0x7ffe 7034 + 7035 + #define TAIL_CALL(offset) \ 7036 + BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \ 7037 + BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \ 7038 + offset, TAIL_CALL_MARKER), \ 7039 + BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0) 7040 + 7041 + /* 7042 + * Tail call tests. Each test case may call any other test in the table, 7043 + * including itself, specified as a relative index offset from the calling 7044 + * test. The index TAIL_CALL_NULL can be used to specify a NULL target 7045 + * function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID 7046 + * results in a target index that is out of range. 7047 + */ 7048 + static struct tail_call_test tail_call_tests[] = { 7049 + { 7050 + "Tail call leaf", 7051 + .insns = { 7052 + BPF_ALU64_REG(BPF_MOV, R0, R1), 7053 + BPF_ALU64_IMM(BPF_ADD, R0, 1), 7054 + BPF_EXIT_INSN(), 7055 + }, 7056 + .result = 1, 7057 + }, 7058 + { 7059 + "Tail call 2", 7060 + .insns = { 7061 + BPF_ALU64_IMM(BPF_ADD, R1, 2), 7062 + TAIL_CALL(-1), 7063 + BPF_ALU64_IMM(BPF_MOV, R0, -1), 7064 + BPF_EXIT_INSN(), 7065 + }, 7066 + .result = 3, 7067 + }, 7068 + { 7069 + "Tail call 3", 7070 + .insns = { 7071 + BPF_ALU64_IMM(BPF_ADD, R1, 3), 7072 + TAIL_CALL(-1), 7073 + BPF_ALU64_IMM(BPF_MOV, R0, -1), 7074 + BPF_EXIT_INSN(), 7075 + }, 7076 + .result = 6, 7077 + }, 7078 + { 7079 + "Tail call 4", 7080 + .insns = { 7081 + BPF_ALU64_IMM(BPF_ADD, R1, 4), 7082 + TAIL_CALL(-1), 7083 + BPF_ALU64_IMM(BPF_MOV, R0, -1), 7084 + BPF_EXIT_INSN(), 7085 + }, 7086 + .result = 10, 7087 + }, 7088 + { 7089 + "Tail call error path, max count reached", 7090 + .insns = { 7091 + BPF_ALU64_IMM(BPF_ADD, R1, 1), 7092 + BPF_ALU64_REG(BPF_MOV, R0, R1), 7093 + TAIL_CALL(0), 7094 + BPF_EXIT_INSN(), 7095 + }, 7096 + .result = MAX_TAIL_CALL_CNT + 1, 7097 + }, 7098 + { 7099 + "Tail call error path, NULL target", 7100 + .insns = { 7101 + BPF_ALU64_IMM(BPF_MOV, R0, -1), 7102 + TAIL_CALL(TAIL_CALL_NULL), 7103 + BPF_ALU64_IMM(BPF_MOV, R0, 1), 7104 + BPF_EXIT_INSN(), 7105 + }, 7106 + .result = 1, 7107 + }, 7108 + { 7109 + "Tail call error path, index out of range", 7110 + .insns = { 7111 + BPF_ALU64_IMM(BPF_MOV, R0, -1), 7112 + TAIL_CALL(TAIL_CALL_INVALID), 7113 + BPF_ALU64_IMM(BPF_MOV, R0, 1), 7114 + BPF_EXIT_INSN(), 7115 + }, 7116 + .result = 1, 7117 + }, 7118 + }; 7119 + 7120 + static void __init destroy_tail_call_tests(struct bpf_array *progs) 7121 + { 7122 + int i; 7123 + 7124 + for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) 7125 + if (progs->ptrs[i]) 7126 + bpf_prog_free(progs->ptrs[i]); 7127 + kfree(progs); 7128 + } 7129 + 7130 + static __init int prepare_tail_call_tests(struct bpf_array **pprogs) 7131 + { 7132 + int ntests = ARRAY_SIZE(tail_call_tests); 7133 + struct bpf_array *progs; 7134 + int which, err; 7135 + 7136 + /* Allocate the table of programs to be used for tall calls */ 7137 + progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]), 7138 + GFP_KERNEL); 7139 + if (!progs) 7140 + goto out_nomem; 7141 + 7142 + /* Create all eBPF programs and populate the table */ 7143 + for (which = 0; which < ntests; which++) { 7144 + struct tail_call_test *test = &tail_call_tests[which]; 7145 + struct bpf_prog *fp; 7146 + int len, i; 7147 + 7148 + /* Compute the number of program instructions */ 7149 + for (len = 0; len < MAX_INSNS; len++) { 7150 + struct bpf_insn *insn = &test->insns[len]; 7151 + 7152 + if (len < MAX_INSNS - 1 && 7153 + insn->code == (BPF_LD | BPF_DW | BPF_IMM)) 7154 + len++; 7155 + if (insn->code == 0) 7156 + break; 7157 + } 7158 + 7159 + /* Allocate and initialize the program */ 7160 + fp = bpf_prog_alloc(bpf_prog_size(len), 0); 7161 + if (!fp) 7162 + goto out_nomem; 7163 + 7164 + fp->len = len; 7165 + fp->type = BPF_PROG_TYPE_SOCKET_FILTER; 7166 + fp->aux->stack_depth = test->stack_depth; 7167 + memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn)); 7168 + 7169 + /* Relocate runtime tail call offsets and addresses */ 7170 + for (i = 0; i < len; i++) { 7171 + struct bpf_insn *insn = &fp->insnsi[i]; 7172 + 7173 + if (insn->imm != TAIL_CALL_MARKER) 7174 + continue; 7175 + 7176 + switch (insn->code) { 7177 + case BPF_LD | BPF_DW | BPF_IMM: 7178 + insn[0].imm = (u32)(long)progs; 7179 + insn[1].imm = ((u64)(long)progs) >> 32; 7180 + break; 7181 + 7182 + case BPF_ALU | BPF_MOV | BPF_K: 7183 + if (insn->off == TAIL_CALL_NULL) 7184 + insn->imm = ntests; 7185 + else if (insn->off == TAIL_CALL_INVALID) 7186 + insn->imm = ntests + 1; 7187 + else 7188 + insn->imm = which + insn->off; 7189 + insn->off = 0; 7190 + } 7191 + } 7192 + 7193 + fp = bpf_prog_select_runtime(fp, &err); 7194 + if (err) 7195 + goto out_err; 7196 + 7197 + progs->ptrs[which] = fp; 7198 + } 7199 + 7200 + /* The last entry contains a NULL program pointer */ 7201 + progs->map.max_entries = ntests + 1; 7202 + *pprogs = progs; 7203 + return 0; 7204 + 7205 + out_nomem: 7206 + err = -ENOMEM; 7207 + 7208 + out_err: 7209 + if (progs) 7210 + destroy_tail_call_tests(progs); 7211 + return err; 7212 + } 7213 + 7214 + static __init int test_tail_calls(struct bpf_array *progs) 7215 + { 7216 + int i, err_cnt = 0, pass_cnt = 0; 7217 + int jit_cnt = 0, run_cnt = 0; 7218 + 7219 + for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) { 7220 + struct tail_call_test *test = &tail_call_tests[i]; 7221 + struct bpf_prog *fp = progs->ptrs[i]; 7222 + u64 duration; 7223 + int ret; 7224 + 7225 + cond_resched(); 7226 + 7227 + pr_info("#%d %s ", i, test->descr); 7228 + if (!fp) { 7229 + err_cnt++; 7230 + continue; 7231 + } 7232 + pr_cont("jited:%u ", fp->jited); 7233 + 7234 + run_cnt++; 7235 + if (fp->jited) 7236 + jit_cnt++; 7237 + 7238 + ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration); 7239 + if (ret == test->result) { 7240 + pr_cont("%lld PASS", duration); 7241 + pass_cnt++; 7242 + } else { 7243 + pr_cont("ret %d != %d FAIL", ret, test->result); 7244 + err_cnt++; 7245 + } 7246 + } 7247 + 7248 + pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n", 7249 + __func__, pass_cnt, err_cnt, jit_cnt, run_cnt); 7250 + 7251 + return err_cnt ? -EINVAL : 0; 7252 + } 7253 + 8992 7254 static int __init test_bpf_init(void) 8993 7255 { 7256 + struct bpf_array *progs = NULL; 8994 7257 int ret; 8995 7258 8996 7259 ret = prepare_bpf_tests(); ··· 9239 7022 9240 7023 ret = test_bpf(); 9241 7024 destroy_bpf_tests(); 7025 + if (ret) 7026 + return ret; 7027 + 7028 + ret = prepare_tail_call_tests(&progs); 7029 + if (ret) 7030 + return ret; 7031 + ret = test_tail_calls(progs); 7032 + destroy_tail_call_tests(progs); 9242 7033 if (ret) 9243 7034 return ret; 9244 7035
+1 -2
net/bpf/test_run.c
··· 763 763 if (prog->expected_attach_type == BPF_XDP_DEVMAP || 764 764 prog->expected_attach_type == BPF_XDP_CPUMAP) 765 765 return -EINVAL; 766 - if (kattr->test.ctx_in || kattr->test.ctx_out) 767 - return -EINVAL; 766 + 768 767 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 769 768 if (IS_ERR(ctx)) 770 769 return PTR_ERR(ctx);
+13 -2
net/core/dev.c
··· 7532 7532 { 7533 7533 struct netdev_adjacent *lower; 7534 7534 7535 - WARN_ON_ONCE(!rcu_read_lock_held()); 7535 + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7536 7536 7537 7537 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7538 7538 ··· 9297 9297 return dev->xdp_state[mode].prog; 9298 9298 } 9299 9299 9300 - static u8 dev_xdp_prog_count(struct net_device *dev) 9300 + u8 dev_xdp_prog_count(struct net_device *dev) 9301 9301 { 9302 9302 u8 count = 0; 9303 9303 int i; ··· 9307 9307 count++; 9308 9308 return count; 9309 9309 } 9310 + EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9310 9311 9311 9312 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9312 9313 { ··· 9401 9400 { 9402 9401 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9403 9402 struct bpf_prog *cur_prog; 9403 + struct net_device *upper; 9404 + struct list_head *iter; 9404 9405 enum bpf_xdp_mode mode; 9405 9406 bpf_op_t bpf_op; 9406 9407 int err; ··· 9439 9436 if (dev_xdp_link(dev, mode)) { 9440 9437 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9441 9438 return -EBUSY; 9439 + } 9440 + 9441 + /* don't allow if an upper device already has a program */ 9442 + netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9443 + if (dev_xdp_prog_count(upper) > 0) { 9444 + NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9445 + return -EEXIST; 9446 + } 9442 9447 } 9443 9448 9444 9449 cur_prog = dev_xdp_prog(dev, mode);
+25
net/core/filter.c
··· 3933 3933 } 3934 3934 } 3935 3935 3936 + DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 3937 + EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); 3938 + 3939 + u32 xdp_master_redirect(struct xdp_buff *xdp) 3940 + { 3941 + struct net_device *master, *slave; 3942 + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 3943 + 3944 + master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); 3945 + slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); 3946 + if (slave && slave != xdp->rxq->dev) { 3947 + /* The target device is different from the receiving device, so 3948 + * redirect it to the new device. 3949 + * Using XDP_REDIRECT gets the correct behaviour from XDP enabled 3950 + * drivers to unmap the packet from their rx ring. 3951 + */ 3952 + ri->tgt_index = slave->ifindex; 3953 + ri->map_id = INT_MAX; 3954 + ri->map_type = BPF_MAP_TYPE_UNSPEC; 3955 + return XDP_REDIRECT; 3956 + } 3957 + return XDP_TX; 3958 + } 3959 + EXPORT_SYMBOL_GPL(xdp_master_redirect); 3960 + 3936 3961 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 3937 3962 struct bpf_prog *xdp_prog) 3938 3963 {
+3
net/unix/unix_bpf.c
··· 105 105 106 106 int unix_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 107 107 { 108 + if (sk->sk_type != SOCK_DGRAM) 109 + return -EOPNOTSUPP; 110 + 108 111 if (restore) { 109 112 sk->sk_write_space = psock->saved_write_space; 110 113 WRITE_ONCE(sk->sk_prot, psock->sk_proto);
+2
samples/bpf/xdp1_kern.c
··· 57 57 58 58 h_proto = eth->h_proto; 59 59 60 + /* Handle VLAN tagged packet */ 60 61 if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { 61 62 struct vlan_hdr *vhdr; 62 63 ··· 67 66 return rc; 68 67 h_proto = vhdr->h_vlan_encapsulated_proto; 69 68 } 69 + /* Handle double VLAN tagged packet */ 70 70 if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { 71 71 struct vlan_hdr *vhdr; 72 72
+2
samples/bpf/xdp2_kern.c
··· 73 73 74 74 h_proto = eth->h_proto; 75 75 76 + /* Handle VLAN tagged packet */ 76 77 if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { 77 78 struct vlan_hdr *vhdr; 78 79 ··· 83 82 return rc; 84 83 h_proto = vhdr->h_vlan_encapsulated_proto; 85 84 } 85 + /* Handle double VLAN tagged packet */ 86 86 if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { 87 87 struct vlan_hdr *vhdr; 88 88
+1 -1
samples/bpf/xdp_redirect_cpu_user.c
··· 841 841 memset(cpu, 0, n_cpus * sizeof(int)); 842 842 843 843 /* Parse commands line args */ 844 - while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:", 844 + while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:n", 845 845 long_options, &longindex)) != -1) { 846 846 switch (opt) { 847 847 case 'd':
+8 -12
samples/bpf/xdpsock_user.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright(c) 2017 - 2018 Intel Corporation. */ 3 3 4 - #include <asm/barrier.h> 5 4 #include <errno.h> 6 5 #include <getopt.h> 7 6 #include <libgen.h> 8 7 #include <linux/bpf.h> 9 - #include <linux/compiler.h> 10 8 #include <linux/if_link.h> 11 9 #include <linux/if_xdp.h> 12 10 #include <linux/if_ether.h> ··· 651 653 return result; 652 654 } 653 655 654 - __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 655 - 656 656 /* 657 657 * This is a version of ip_compute_csum() optimized for IP headers, 658 658 * which always checksum on 4 octet boundaries. 659 659 * This function code has been taken from 660 660 * Linux kernel lib/checksum.c 661 661 */ 662 - __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 662 + static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 663 663 { 664 - return (__force __sum16)~do_csum(iph, ihl * 4); 664 + return (__sum16)~do_csum(iph, ihl * 4); 665 665 } 666 666 667 667 /* ··· 669 673 */ 670 674 static inline __sum16 csum_fold(__wsum csum) 671 675 { 672 - u32 sum = (__force u32)csum; 676 + u32 sum = (u32)csum; 673 677 674 678 sum = (sum & 0xffff) + (sum >> 16); 675 679 sum = (sum & 0xffff) + (sum >> 16); 676 - return (__force __sum16)~sum; 680 + return (__sum16)~sum; 677 681 } 678 682 679 683 /* ··· 699 703 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 700 704 __u32 len, __u8 proto, __wsum sum) 701 705 { 702 - unsigned long long s = (__force u32)sum; 706 + unsigned long long s = (u32)sum; 703 707 704 - s += (__force u32)saddr; 705 - s += (__force u32)daddr; 708 + s += (u32)saddr; 709 + s += (u32)daddr; 706 710 #ifdef __BIG_ENDIAN__ 707 711 s += proto + len; 708 712 #else 709 713 s += (proto + len) << 8; 710 714 #endif 711 - return (__force __wsum)from64to32(s); 715 + return (__wsum)from64to32(s); 712 716 } 713 717 714 718 /*
-1
tools/testing/selftests/bpf/.gitignore
··· 23 23 test_cgroup_storage 24 24 test_flow_dissector 25 25 flow_dissector_load 26 - test_netcnt 27 26 test_tcpnotify_user 28 27 test_libbpf 29 28 test_tcp_check_syncookie_user
+1 -2
tools/testing/selftests/bpf/Makefile
··· 38 38 test_verifier_log test_dev_cgroup \ 39 39 test_sock test_sockmap get_cgroup_id_user \ 40 40 test_cgroup_storage \ 41 - test_netcnt test_tcpnotify_user test_sysctl \ 41 + test_tcpnotify_user test_sysctl \ 42 42 test_progs-no_alu32 43 43 44 44 # Also test bpf-gcc, if present ··· 197 197 $(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c 198 198 $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c 199 199 $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c 200 - $(OUTPUT)/test_netcnt: cgroup_helpers.c 201 200 $(OUTPUT)/test_sock_fields: cgroup_helpers.c 202 201 $(OUTPUT)/test_sysctl: cgroup_helpers.c 203 202
+12
tools/testing/selftests/bpf/network_helpers.c
··· 310 310 } 311 311 return -1; 312 312 } 313 + 314 + char *ping_command(int family) 315 + { 316 + if (family == AF_INET6) { 317 + /* On some systems 'ping' doesn't support IPv6, so use ping6 if it is present. */ 318 + if (!system("which ping6 >/dev/null 2>&1")) 319 + return "ping6"; 320 + else 321 + return "ping -6"; 322 + } 323 + return "ping"; 324 + }
+1
tools/testing/selftests/bpf/network_helpers.h
··· 46 46 int timeout_ms); 47 47 int make_sockaddr(int family, const char *addr_str, __u16 port, 48 48 struct sockaddr_storage *addr, socklen_t *len); 49 + char *ping_command(int family); 49 50 50 51 #endif
+82
tools/testing/selftests/bpf/prog_tests/netcnt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <sys/sysinfo.h> 4 + #include <test_progs.h> 5 + #include "network_helpers.h" 6 + #include "netcnt_prog.skel.h" 7 + #include "netcnt_common.h" 8 + 9 + #define CG_NAME "/netcnt" 10 + 11 + void test_netcnt(void) 12 + { 13 + union percpu_net_cnt *percpu_netcnt = NULL; 14 + struct bpf_cgroup_storage_key key; 15 + int map_fd, percpu_map_fd; 16 + struct netcnt_prog *skel; 17 + unsigned long packets; 18 + union net_cnt netcnt; 19 + unsigned long bytes; 20 + int cpu, nproc; 21 + int cg_fd = -1; 22 + char cmd[128]; 23 + 24 + skel = netcnt_prog__open_and_load(); 25 + if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load")) 26 + return; 27 + 28 + nproc = get_nprocs_conf(); 29 + percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); 30 + if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)")) 31 + goto err; 32 + 33 + cg_fd = test__join_cgroup(CG_NAME); 34 + if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup")) 35 + goto err; 36 + 37 + skel->links.bpf_nextcnt = bpf_program__attach_cgroup(skel->progs.bpf_nextcnt, cg_fd); 38 + if (!ASSERT_OK_PTR(skel->links.bpf_nextcnt, 39 + "attach_cgroup(bpf_nextcnt)")) 40 + goto err; 41 + 42 + snprintf(cmd, sizeof(cmd), "%s ::1 -A -c 10000 -q > /dev/null", ping_command(AF_INET6)); 43 + ASSERT_OK(system(cmd), cmd); 44 + 45 + map_fd = bpf_map__fd(skel->maps.netcnt); 46 + if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, &key), "bpf_map_get_next_key")) 47 + goto err; 48 + 49 + if (!ASSERT_OK(bpf_map_lookup_elem(map_fd, &key, &netcnt), "bpf_map_lookup_elem(netcnt)")) 50 + goto err; 51 + 52 + percpu_map_fd = bpf_map__fd(skel->maps.percpu_netcnt); 53 + if (!ASSERT_OK(bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0]), 54 + "bpf_map_lookup_elem(percpu_netcnt)")) 55 + goto err; 56 + 57 + /* Some packets can be still in per-cpu cache, but not more than 58 + * MAX_PERCPU_PACKETS. 59 + */ 60 + packets = netcnt.packets; 61 + bytes = netcnt.bytes; 62 + for (cpu = 0; cpu < nproc; cpu++) { 63 + ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS"); 64 + 65 + packets += percpu_netcnt[cpu].packets; 66 + bytes += percpu_netcnt[cpu].bytes; 67 + } 68 + 69 + /* No packets should be lost */ 70 + ASSERT_EQ(packets, 10000, "packets"); 71 + 72 + /* Let's check that bytes counter matches the number of packets 73 + * multiplied by the size of ipv6 ICMP packet. 74 + */ 75 + ASSERT_EQ(bytes, packets * 104, "bytes"); 76 + 77 + err: 78 + if (cg_fd != -1) 79 + close(cg_fd); 80 + free(percpu_netcnt); 81 + netcnt_prog__destroy(skel); 82 + }
+2 -2
tools/testing/selftests/bpf/prog_tests/reference_tracking.c
··· 34 34 if (!test__start_subtest(title)) 35 35 continue; 36 36 37 - /* Expect verifier failure if test name has 'fail' */ 38 - if (strstr(title, "fail") != NULL) { 37 + /* Expect verifier failure if test name has 'err' */ 38 + if (strstr(title, "err_") != NULL) { 39 39 libbpf_print_fn_t old_print_fn; 40 40 41 41 old_print_fn = libbpf_set_print(NULL);
-12
tools/testing/selftests/bpf/prog_tests/tc_redirect.c
··· 390 390 close(client_fd); 391 391 } 392 392 393 - static char *ping_command(int family) 394 - { 395 - if (family == AF_INET6) { 396 - /* On some systems 'ping' doesn't support IPv6, so use ping6 if it is present. */ 397 - if (!system("which ping6 >/dev/null 2>&1")) 398 - return "ping6"; 399 - else 400 - return "ping -6"; 401 - } 402 - return "ping"; 403 - } 404 - 405 393 static int test_ping(int family, const char *addr) 406 394 { 407 395 SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr);
+520
tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + /** 4 + * Test XDP bonding support 5 + * 6 + * Sets up two bonded veth pairs between two fresh namespaces 7 + * and verifies that XDP_TX program loaded on a bond device 8 + * are correctly loaded onto the slave devices and XDP_TX'd 9 + * packets are balanced using bonding. 10 + */ 11 + 12 + #define _GNU_SOURCE 13 + #include <sched.h> 14 + #include <net/if.h> 15 + #include <linux/if_link.h> 16 + #include "test_progs.h" 17 + #include "network_helpers.h" 18 + #include <linux/if_bonding.h> 19 + #include <linux/limits.h> 20 + #include <linux/udp.h> 21 + 22 + #include "xdp_dummy.skel.h" 23 + #include "xdp_redirect_multi_kern.skel.h" 24 + #include "xdp_tx.skel.h" 25 + 26 + #define BOND1_MAC {0x00, 0x11, 0x22, 0x33, 0x44, 0x55} 27 + #define BOND1_MAC_STR "00:11:22:33:44:55" 28 + #define BOND2_MAC {0x00, 0x22, 0x33, 0x44, 0x55, 0x66} 29 + #define BOND2_MAC_STR "00:22:33:44:55:66" 30 + #define NPACKETS 100 31 + 32 + static int root_netns_fd = -1; 33 + 34 + static void restore_root_netns(void) 35 + { 36 + ASSERT_OK(setns(root_netns_fd, CLONE_NEWNET), "restore_root_netns"); 37 + } 38 + 39 + static int setns_by_name(char *name) 40 + { 41 + int nsfd, err; 42 + char nspath[PATH_MAX]; 43 + 44 + snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); 45 + nsfd = open(nspath, O_RDONLY | O_CLOEXEC); 46 + if (nsfd < 0) 47 + return -1; 48 + 49 + err = setns(nsfd, CLONE_NEWNET); 50 + close(nsfd); 51 + return err; 52 + } 53 + 54 + static int get_rx_packets(const char *iface) 55 + { 56 + FILE *f; 57 + char line[512]; 58 + int iface_len = strlen(iface); 59 + 60 + f = fopen("/proc/net/dev", "r"); 61 + if (!f) 62 + return -1; 63 + 64 + while (fgets(line, sizeof(line), f)) { 65 + char *p = line; 66 + 67 + while (*p == ' ') 68 + p++; /* skip whitespace */ 69 + if (!strncmp(p, iface, iface_len)) { 70 + p += iface_len; 71 + if (*p++ != ':') 72 + continue; 73 + while (*p == ' ') 74 + p++; /* skip whitespace */ 75 + while (*p && *p != ' ') 76 + p++; /* skip rx bytes */ 77 + while (*p == ' ') 78 + p++; /* skip whitespace */ 79 + fclose(f); 80 + return atoi(p); 81 + } 82 + } 83 + fclose(f); 84 + return -1; 85 + } 86 + 87 + #define MAX_BPF_LINKS 8 88 + 89 + struct skeletons { 90 + struct xdp_dummy *xdp_dummy; 91 + struct xdp_tx *xdp_tx; 92 + struct xdp_redirect_multi_kern *xdp_redirect_multi_kern; 93 + 94 + int nlinks; 95 + struct bpf_link *links[MAX_BPF_LINKS]; 96 + }; 97 + 98 + static int xdp_attach(struct skeletons *skeletons, struct bpf_program *prog, char *iface) 99 + { 100 + struct bpf_link *link; 101 + int ifindex; 102 + 103 + ifindex = if_nametoindex(iface); 104 + if (!ASSERT_GT(ifindex, 0, "get ifindex")) 105 + return -1; 106 + 107 + if (!ASSERT_LE(skeletons->nlinks+1, MAX_BPF_LINKS, "too many XDP programs attached")) 108 + return -1; 109 + 110 + link = bpf_program__attach_xdp(prog, ifindex); 111 + if (!ASSERT_OK_PTR(link, "attach xdp program")) 112 + return -1; 113 + 114 + skeletons->links[skeletons->nlinks++] = link; 115 + return 0; 116 + } 117 + 118 + enum { 119 + BOND_ONE_NO_ATTACH = 0, 120 + BOND_BOTH_AND_ATTACH, 121 + }; 122 + 123 + static const char * const mode_names[] = { 124 + [BOND_MODE_ROUNDROBIN] = "balance-rr", 125 + [BOND_MODE_ACTIVEBACKUP] = "active-backup", 126 + [BOND_MODE_XOR] = "balance-xor", 127 + [BOND_MODE_BROADCAST] = "broadcast", 128 + [BOND_MODE_8023AD] = "802.3ad", 129 + [BOND_MODE_TLB] = "balance-tlb", 130 + [BOND_MODE_ALB] = "balance-alb", 131 + }; 132 + 133 + static const char * const xmit_policy_names[] = { 134 + [BOND_XMIT_POLICY_LAYER2] = "layer2", 135 + [BOND_XMIT_POLICY_LAYER34] = "layer3+4", 136 + [BOND_XMIT_POLICY_LAYER23] = "layer2+3", 137 + [BOND_XMIT_POLICY_ENCAP23] = "encap2+3", 138 + [BOND_XMIT_POLICY_ENCAP34] = "encap3+4", 139 + }; 140 + 141 + static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy, 142 + int bond_both_attach) 143 + { 144 + #define SYS(fmt, ...) \ 145 + ({ \ 146 + char cmd[1024]; \ 147 + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ 148 + if (!ASSERT_OK(system(cmd), cmd)) \ 149 + return -1; \ 150 + }) 151 + 152 + SYS("ip netns add ns_dst"); 153 + SYS("ip link add veth1_1 type veth peer name veth2_1 netns ns_dst"); 154 + SYS("ip link add veth1_2 type veth peer name veth2_2 netns ns_dst"); 155 + 156 + SYS("ip link add bond1 type bond mode %s xmit_hash_policy %s", 157 + mode_names[mode], xmit_policy_names[xmit_policy]); 158 + SYS("ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none"); 159 + SYS("ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s", 160 + mode_names[mode], xmit_policy_names[xmit_policy]); 161 + SYS("ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none"); 162 + 163 + SYS("ip link set veth1_1 master bond1"); 164 + if (bond_both_attach == BOND_BOTH_AND_ATTACH) { 165 + SYS("ip link set veth1_2 master bond1"); 166 + } else { 167 + SYS("ip link set veth1_2 up addrgenmode none"); 168 + 169 + if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2")) 170 + return -1; 171 + } 172 + 173 + SYS("ip -netns ns_dst link set veth2_1 master bond2"); 174 + 175 + if (bond_both_attach == BOND_BOTH_AND_ATTACH) 176 + SYS("ip -netns ns_dst link set veth2_2 master bond2"); 177 + else 178 + SYS("ip -netns ns_dst link set veth2_2 up addrgenmode none"); 179 + 180 + /* Load a dummy program on sending side as with veth peer needs to have a 181 + * XDP program loaded as well. 182 + */ 183 + if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "bond1")) 184 + return -1; 185 + 186 + if (bond_both_attach == BOND_BOTH_AND_ATTACH) { 187 + if (!ASSERT_OK(setns_by_name("ns_dst"), "set netns to ns_dst")) 188 + return -1; 189 + 190 + if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2")) 191 + return -1; 192 + 193 + restore_root_netns(); 194 + } 195 + 196 + return 0; 197 + 198 + #undef SYS 199 + } 200 + 201 + static void bonding_cleanup(struct skeletons *skeletons) 202 + { 203 + restore_root_netns(); 204 + while (skeletons->nlinks) { 205 + skeletons->nlinks--; 206 + bpf_link__destroy(skeletons->links[skeletons->nlinks]); 207 + } 208 + ASSERT_OK(system("ip link delete bond1"), "delete bond1"); 209 + ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1"); 210 + ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2"); 211 + ASSERT_OK(system("ip netns delete ns_dst"), "delete ns_dst"); 212 + } 213 + 214 + static int send_udp_packets(int vary_dst_ip) 215 + { 216 + struct ethhdr eh = { 217 + .h_source = BOND1_MAC, 218 + .h_dest = BOND2_MAC, 219 + .h_proto = htons(ETH_P_IP), 220 + }; 221 + uint8_t buf[128] = {}; 222 + struct iphdr *iph = (struct iphdr *)(buf + sizeof(eh)); 223 + struct udphdr *uh = (struct udphdr *)(buf + sizeof(eh) + sizeof(*iph)); 224 + int i, s = -1; 225 + int ifindex; 226 + 227 + s = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); 228 + if (!ASSERT_GE(s, 0, "socket")) 229 + goto err; 230 + 231 + ifindex = if_nametoindex("bond1"); 232 + if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex")) 233 + goto err; 234 + 235 + memcpy(buf, &eh, sizeof(eh)); 236 + iph->ihl = 5; 237 + iph->version = 4; 238 + iph->tos = 16; 239 + iph->id = 1; 240 + iph->ttl = 64; 241 + iph->protocol = IPPROTO_UDP; 242 + iph->saddr = 1; 243 + iph->daddr = 2; 244 + iph->tot_len = htons(sizeof(buf) - ETH_HLEN); 245 + iph->check = 0; 246 + 247 + for (i = 1; i <= NPACKETS; i++) { 248 + int n; 249 + struct sockaddr_ll saddr_ll = { 250 + .sll_ifindex = ifindex, 251 + .sll_halen = ETH_ALEN, 252 + .sll_addr = BOND2_MAC, 253 + }; 254 + 255 + /* vary the UDP destination port for even distribution with roundrobin/xor modes */ 256 + uh->dest++; 257 + 258 + if (vary_dst_ip) 259 + iph->daddr++; 260 + 261 + n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll)); 262 + if (!ASSERT_EQ(n, sizeof(buf), "sendto")) 263 + goto err; 264 + } 265 + 266 + return 0; 267 + 268 + err: 269 + if (s >= 0) 270 + close(s); 271 + return -1; 272 + } 273 + 274 + static void test_xdp_bonding_with_mode(struct skeletons *skeletons, int mode, int xmit_policy) 275 + { 276 + int bond1_rx; 277 + 278 + if (bonding_setup(skeletons, mode, xmit_policy, BOND_BOTH_AND_ATTACH)) 279 + goto out; 280 + 281 + if (send_udp_packets(xmit_policy != BOND_XMIT_POLICY_LAYER34)) 282 + goto out; 283 + 284 + bond1_rx = get_rx_packets("bond1"); 285 + ASSERT_EQ(bond1_rx, NPACKETS, "expected more received packets"); 286 + 287 + switch (mode) { 288 + case BOND_MODE_ROUNDROBIN: 289 + case BOND_MODE_XOR: { 290 + int veth1_rx = get_rx_packets("veth1_1"); 291 + int veth2_rx = get_rx_packets("veth1_2"); 292 + int diff = abs(veth1_rx - veth2_rx); 293 + 294 + ASSERT_GE(veth1_rx + veth2_rx, NPACKETS, "expected more packets"); 295 + 296 + switch (xmit_policy) { 297 + case BOND_XMIT_POLICY_LAYER2: 298 + ASSERT_GE(diff, NPACKETS, 299 + "expected packets on only one of the interfaces"); 300 + break; 301 + case BOND_XMIT_POLICY_LAYER23: 302 + case BOND_XMIT_POLICY_LAYER34: 303 + ASSERT_LT(diff, NPACKETS/2, 304 + "expected even distribution of packets"); 305 + break; 306 + default: 307 + PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); 308 + break; 309 + } 310 + break; 311 + } 312 + case BOND_MODE_ACTIVEBACKUP: { 313 + int veth1_rx = get_rx_packets("veth1_1"); 314 + int veth2_rx = get_rx_packets("veth1_2"); 315 + int diff = abs(veth1_rx - veth2_rx); 316 + 317 + ASSERT_GE(diff, NPACKETS, 318 + "expected packets on only one of the interfaces"); 319 + break; 320 + } 321 + default: 322 + PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); 323 + break; 324 + } 325 + 326 + out: 327 + bonding_cleanup(skeletons); 328 + } 329 + 330 + /* Test the broadcast redirection using xdp_redirect_map_multi_prog and adding 331 + * all the interfaces to it and checking that broadcasting won't send the packet 332 + * to neither the ingress bond device (bond2) or its slave (veth2_1). 333 + */ 334 + static void test_xdp_bonding_redirect_multi(struct skeletons *skeletons) 335 + { 336 + static const char * const ifaces[] = {"bond2", "veth2_1", "veth2_2"}; 337 + int veth1_1_rx, veth1_2_rx; 338 + int err; 339 + 340 + if (bonding_setup(skeletons, BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, 341 + BOND_ONE_NO_ATTACH)) 342 + goto out; 343 + 344 + 345 + if (!ASSERT_OK(setns_by_name("ns_dst"), "could not set netns to ns_dst")) 346 + goto out; 347 + 348 + /* populate the devmap with the relevant interfaces */ 349 + for (int i = 0; i < ARRAY_SIZE(ifaces); i++) { 350 + int ifindex = if_nametoindex(ifaces[i]); 351 + int map_fd = bpf_map__fd(skeletons->xdp_redirect_multi_kern->maps.map_all); 352 + 353 + if (!ASSERT_GT(ifindex, 0, "could not get interface index")) 354 + goto out; 355 + 356 + err = bpf_map_update_elem(map_fd, &ifindex, &ifindex, 0); 357 + if (!ASSERT_OK(err, "add interface to map_all")) 358 + goto out; 359 + } 360 + 361 + if (xdp_attach(skeletons, 362 + skeletons->xdp_redirect_multi_kern->progs.xdp_redirect_map_multi_prog, 363 + "bond2")) 364 + goto out; 365 + 366 + restore_root_netns(); 367 + 368 + if (send_udp_packets(BOND_MODE_ROUNDROBIN)) 369 + goto out; 370 + 371 + veth1_1_rx = get_rx_packets("veth1_1"); 372 + veth1_2_rx = get_rx_packets("veth1_2"); 373 + 374 + ASSERT_EQ(veth1_1_rx, 0, "expected no packets on veth1_1"); 375 + ASSERT_GE(veth1_2_rx, NPACKETS, "expected packets on veth1_2"); 376 + 377 + out: 378 + restore_root_netns(); 379 + bonding_cleanup(skeletons); 380 + } 381 + 382 + /* Test that XDP programs cannot be attached to both the bond master and slaves simultaneously */ 383 + static void test_xdp_bonding_attach(struct skeletons *skeletons) 384 + { 385 + struct bpf_link *link = NULL; 386 + struct bpf_link *link2 = NULL; 387 + int veth, bond; 388 + int err; 389 + 390 + if (!ASSERT_OK(system("ip link add veth type veth"), "add veth")) 391 + goto out; 392 + if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) 393 + goto out; 394 + 395 + veth = if_nametoindex("veth"); 396 + if (!ASSERT_GE(veth, 0, "if_nametoindex veth")) 397 + goto out; 398 + bond = if_nametoindex("bond"); 399 + if (!ASSERT_GE(bond, 0, "if_nametoindex bond")) 400 + goto out; 401 + 402 + /* enslaving with a XDP program loaded fails */ 403 + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); 404 + if (!ASSERT_OK_PTR(link, "attach program to veth")) 405 + goto out; 406 + 407 + err = system("ip link set veth master bond"); 408 + if (!ASSERT_NEQ(err, 0, "attaching slave with xdp program expected to fail")) 409 + goto out; 410 + 411 + bpf_link__destroy(link); 412 + link = NULL; 413 + 414 + err = system("ip link set veth master bond"); 415 + if (!ASSERT_OK(err, "set veth master")) 416 + goto out; 417 + 418 + /* attaching to slave when master has no program is allowed */ 419 + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); 420 + if (!ASSERT_OK_PTR(link, "attach program to slave when enslaved")) 421 + goto out; 422 + 423 + /* attaching to master not allowed when slave has program loaded */ 424 + link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); 425 + if (!ASSERT_ERR_PTR(link2, "attach program to master when slave has program")) 426 + goto out; 427 + 428 + bpf_link__destroy(link); 429 + link = NULL; 430 + 431 + /* attaching XDP program to master allowed when slave has no program */ 432 + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); 433 + if (!ASSERT_OK_PTR(link, "attach program to master")) 434 + goto out; 435 + 436 + /* attaching to slave not allowed when master has program loaded */ 437 + link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); 438 + ASSERT_ERR_PTR(link2, "attach program to slave when master has program"); 439 + 440 + out: 441 + bpf_link__destroy(link); 442 + bpf_link__destroy(link2); 443 + 444 + system("ip link del veth"); 445 + system("ip link del bond"); 446 + } 447 + 448 + static int libbpf_debug_print(enum libbpf_print_level level, 449 + const char *format, va_list args) 450 + { 451 + if (level != LIBBPF_WARN) 452 + vprintf(format, args); 453 + return 0; 454 + } 455 + 456 + struct bond_test_case { 457 + char *name; 458 + int mode; 459 + int xmit_policy; 460 + }; 461 + 462 + static struct bond_test_case bond_test_cases[] = { 463 + { "xdp_bonding_roundrobin", BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, }, 464 + { "xdp_bonding_activebackup", BOND_MODE_ACTIVEBACKUP, BOND_XMIT_POLICY_LAYER23 }, 465 + 466 + { "xdp_bonding_xor_layer2", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER2, }, 467 + { "xdp_bonding_xor_layer23", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER23, }, 468 + { "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, }, 469 + }; 470 + 471 + void test_xdp_bonding(void) 472 + { 473 + libbpf_print_fn_t old_print_fn; 474 + struct skeletons skeletons = {}; 475 + int i; 476 + 477 + old_print_fn = libbpf_set_print(libbpf_debug_print); 478 + 479 + root_netns_fd = open("/proc/self/ns/net", O_RDONLY); 480 + if (!ASSERT_GE(root_netns_fd, 0, "open /proc/self/ns/net")) 481 + goto out; 482 + 483 + skeletons.xdp_dummy = xdp_dummy__open_and_load(); 484 + if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load")) 485 + goto out; 486 + 487 + skeletons.xdp_tx = xdp_tx__open_and_load(); 488 + if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) 489 + goto out; 490 + 491 + skeletons.xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load(); 492 + if (!ASSERT_OK_PTR(skeletons.xdp_redirect_multi_kern, 493 + "xdp_redirect_multi_kern__open_and_load")) 494 + goto out; 495 + 496 + if (!test__start_subtest("xdp_bonding_attach")) 497 + test_xdp_bonding_attach(&skeletons); 498 + 499 + for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) { 500 + struct bond_test_case *test_case = &bond_test_cases[i]; 501 + 502 + if (!test__start_subtest(test_case->name)) 503 + test_xdp_bonding_with_mode( 504 + &skeletons, 505 + test_case->mode, 506 + test_case->xmit_policy); 507 + } 508 + 509 + if (!test__start_subtest("xdp_bonding_redirect_multi")) 510 + test_xdp_bonding_redirect_multi(&skeletons); 511 + 512 + out: 513 + xdp_dummy__destroy(skeletons.xdp_dummy); 514 + xdp_tx__destroy(skeletons.xdp_tx); 515 + xdp_redirect_multi_kern__destroy(skeletons.xdp_redirect_multi_kern); 516 + 517 + libbpf_set_print(old_print_fn); 518 + if (root_netns_fd >= 0) 519 + close(root_netns_fd); 520 + }
+1 -1
tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
··· 121 121 } 122 122 123 123 BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", 124 - seq_num, src, srcp, destp, destp); 124 + seq_num, src, srcp, dest, destp); 125 125 BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", 126 126 state, 127 127 tp->write_seq - tp->snd_una, rx_queue,
+7 -7
tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
··· 91 91 return 0; 92 92 } 93 93 94 - SEC("classifier/fail_use_after_free") 94 + SEC("classifier/err_use_after_free") 95 95 int bpf_sk_lookup_uaf(struct __sk_buff *skb) 96 96 { 97 97 struct bpf_sock_tuple tuple = {}; ··· 106 106 return family; 107 107 } 108 108 109 - SEC("classifier/fail_modify_sk_pointer") 109 + SEC("classifier/err_modify_sk_pointer") 110 110 int bpf_sk_lookup_modptr(struct __sk_buff *skb) 111 111 { 112 112 struct bpf_sock_tuple tuple = {}; ··· 121 121 return 0; 122 122 } 123 123 124 - SEC("classifier/fail_modify_sk_or_null_pointer") 124 + SEC("classifier/err_modify_sk_or_null_pointer") 125 125 int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) 126 126 { 127 127 struct bpf_sock_tuple tuple = {}; ··· 135 135 return 0; 136 136 } 137 137 138 - SEC("classifier/fail_no_release") 138 + SEC("classifier/err_no_release") 139 139 int bpf_sk_lookup_test2(struct __sk_buff *skb) 140 140 { 141 141 struct bpf_sock_tuple tuple = {}; ··· 144 144 return 0; 145 145 } 146 146 147 - SEC("classifier/fail_release_twice") 147 + SEC("classifier/err_release_twice") 148 148 int bpf_sk_lookup_test3(struct __sk_buff *skb) 149 149 { 150 150 struct bpf_sock_tuple tuple = {}; ··· 156 156 return 0; 157 157 } 158 158 159 - SEC("classifier/fail_release_unchecked") 159 + SEC("classifier/err_release_unchecked") 160 160 int bpf_sk_lookup_test4(struct __sk_buff *skb) 161 161 { 162 162 struct bpf_sock_tuple tuple = {}; ··· 173 173 bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); 174 174 } 175 175 176 - SEC("classifier/fail_no_release_subcall") 176 + SEC("classifier/err_no_release_subcall") 177 177 int bpf_sk_lookup_test5(struct __sk_buff *skb) 178 178 { 179 179 lookup_no_release(skb);
+1 -1
tools/testing/selftests/bpf/progs/xdp_tx.c
··· 3 3 #include <linux/bpf.h> 4 4 #include <bpf/bpf_helpers.h> 5 5 6 - SEC("tx") 6 + SEC("xdp") 7 7 int xdp_tx(struct xdp_md *xdp) 8 8 { 9 9 return XDP_TX;
-148
tools/testing/selftests/bpf/test_netcnt.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include <stdio.h> 3 - #include <stdlib.h> 4 - #include <string.h> 5 - #include <errno.h> 6 - #include <assert.h> 7 - #include <sys/sysinfo.h> 8 - #include <sys/time.h> 9 - 10 - #include <linux/bpf.h> 11 - #include <bpf/bpf.h> 12 - #include <bpf/libbpf.h> 13 - 14 - #include "cgroup_helpers.h" 15 - #include "bpf_rlimit.h" 16 - #include "netcnt_common.h" 17 - 18 - #define BPF_PROG "./netcnt_prog.o" 19 - #define TEST_CGROUP "/test-network-counters/" 20 - 21 - static int bpf_find_map(const char *test, struct bpf_object *obj, 22 - const char *name) 23 - { 24 - struct bpf_map *map; 25 - 26 - map = bpf_object__find_map_by_name(obj, name); 27 - if (!map) { 28 - printf("%s:FAIL:map '%s' not found\n", test, name); 29 - return -1; 30 - } 31 - return bpf_map__fd(map); 32 - } 33 - 34 - int main(int argc, char **argv) 35 - { 36 - union percpu_net_cnt *percpu_netcnt; 37 - struct bpf_cgroup_storage_key key; 38 - int map_fd, percpu_map_fd; 39 - int error = EXIT_FAILURE; 40 - struct bpf_object *obj; 41 - int prog_fd, cgroup_fd; 42 - unsigned long packets; 43 - union net_cnt netcnt; 44 - unsigned long bytes; 45 - int cpu, nproc; 46 - __u32 prog_cnt; 47 - 48 - nproc = get_nprocs_conf(); 49 - percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); 50 - if (!percpu_netcnt) { 51 - printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); 52 - goto err; 53 - } 54 - 55 - if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB, 56 - &obj, &prog_fd)) { 57 - printf("Failed to load bpf program\n"); 58 - goto out; 59 - } 60 - 61 - cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); 62 - if (cgroup_fd < 0) 63 - goto err; 64 - 65 - /* Attach bpf program */ 66 - if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { 67 - printf("Failed to attach bpf program"); 68 - goto err; 69 - } 70 - 71 - if (system("which ping6 &>/dev/null") == 0) 72 - assert(!system("ping6 ::1 -c 10000 -f -q > /dev/null")); 73 - else 74 - assert(!system("ping -6 ::1 -c 10000 -f -q > /dev/null")); 75 - 76 - if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, 77 - &prog_cnt)) { 78 - printf("Failed to query attached programs"); 79 - goto err; 80 - } 81 - 82 - map_fd = bpf_find_map(__func__, obj, "netcnt"); 83 - if (map_fd < 0) { 84 - printf("Failed to find bpf map with net counters"); 85 - goto err; 86 - } 87 - 88 - percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt"); 89 - if (percpu_map_fd < 0) { 90 - printf("Failed to find bpf map with percpu net counters"); 91 - goto err; 92 - } 93 - 94 - if (bpf_map_get_next_key(map_fd, NULL, &key)) { 95 - printf("Failed to get key in cgroup storage\n"); 96 - goto err; 97 - } 98 - 99 - if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) { 100 - printf("Failed to lookup cgroup storage\n"); 101 - goto err; 102 - } 103 - 104 - if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) { 105 - printf("Failed to lookup percpu cgroup storage\n"); 106 - goto err; 107 - } 108 - 109 - /* Some packets can be still in per-cpu cache, but not more than 110 - * MAX_PERCPU_PACKETS. 111 - */ 112 - packets = netcnt.packets; 113 - bytes = netcnt.bytes; 114 - for (cpu = 0; cpu < nproc; cpu++) { 115 - if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) { 116 - printf("Unexpected percpu value: %llu\n", 117 - percpu_netcnt[cpu].packets); 118 - goto err; 119 - } 120 - 121 - packets += percpu_netcnt[cpu].packets; 122 - bytes += percpu_netcnt[cpu].bytes; 123 - } 124 - 125 - /* No packets should be lost */ 126 - if (packets != 10000) { 127 - printf("Unexpected packet count: %lu\n", packets); 128 - goto err; 129 - } 130 - 131 - /* Let's check that bytes counter matches the number of packets 132 - * multiplied by the size of ipv6 ICMP packet. 133 - */ 134 - if (bytes != packets * 104) { 135 - printf("Unexpected bytes count: %lu\n", bytes); 136 - goto err; 137 - } 138 - 139 - error = 0; 140 - printf("test_netcnt:PASS\n"); 141 - 142 - err: 143 - cleanup_cgroup_environment(); 144 - free(percpu_netcnt); 145 - 146 - out: 147 - return error; 148 - }
+1 -1
tools/testing/selftests/bpf/test_xdp_veth.sh
··· 108 108 ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2 109 109 110 110 ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy 111 - ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx 111 + ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp 112 112 ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy 113 113 114 114 trap cleanup EXIT