Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"Including fixes from netfilter and bpf.

Current release - regressions:

- sched: fix SKB_NOT_DROPPED_YET splat under debug config

Current release - new code bugs:

- tcp:
- fix usec timestamps with TCP fastopen
- fix possible out-of-bounds reads in tcp_hash_fail()
- fix SYN option room calculation for TCP-AO

- tcp_sigpool: fix some off by one bugs

- bpf: fix compilation error without CGROUPS

- ptp:
- ptp_read() should not release queue
- fix tsevqs corruption

Previous releases - regressions:

- llc: verify mac len before reading mac header

Previous releases - always broken:

- bpf:
- fix check_stack_write_fixed_off() to correctly spill imm
- fix precision tracking for BPF_ALU | BPF_TO_BE | BPF_END
- check map->usercnt after timer->timer is assigned

- dsa: lan9303: consequently nested-lock physical MDIO

- dccp/tcp: call security_inet_conn_request() after setting IP addr

- tg3: fix the TX ring stall due to incorrect full ring handling

- phylink: initialize carrier state at creation

- ice: fix direction of VF rules in switchdev mode

Misc:

- fill in a bunch of missing MODULE_DESCRIPTION()s, more to come"

* tag 'net-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
net: ti: icss-iep: fix setting counter value
ptp: fix corrupted list in ptp_open
ptp: ptp_read should not release queue
net_sched: sch_fq: better validate TCA_FQ_WEIGHTS and TCA_FQ_PRIOMAP
net: kcm: fill in MODULE_DESCRIPTION()
net/sched: act_ct: Always fill offloading tuple iifidx
netfilter: nat: fix ipv6 nat redirect with mapped and scoped addresses
netfilter: xt_recent: fix (increase) ipv6 literal buffer length
ipvs: add missing module descriptions
netfilter: nf_tables: remove catchall element in GC sync path
netfilter: add missing module descriptions
drivers/net/ppp: use standard array-copy-function
net: enetc: shorten enetc_setup_xdp_prog() error message to fit NETLINK_MAX_FMTMSG_LEN
virtio/vsock: Fix uninit-value in virtio_transport_recv_pkt()
r8169: respect userspace disabling IFF_MULTICAST
selftests/bpf: get trusted cgrp from bpf_iter__cgroup directly
bpf: Let verifier consider {task,cgroup} is trusted in bpf_iter_reg
net: phylink: initialize carrier state at creation
test/vsock: add dobule bind connect test
test/vsock: refactor vsock_accept
...

+1242 -434
+2 -4
Documentation/bpf/kfuncs.rst
··· 37 37 An example is given below:: 38 38 39 39 /* Disables missing prototype warnings */ 40 - __diag_push(); 41 - __diag_ignore_all("-Wmissing-prototypes", 42 - "Global kfuncs as their definitions will be in BTF"); 40 + __bpf_kfunc_start_defs(); 43 41 44 42 __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr) 45 43 { 46 44 return find_get_task_by_vpid(nr); 47 45 } 48 46 49 - __diag_pop(); 47 + __bpf_kfunc_end_defs(); 50 48 51 49 A wrapper kfunc is often needed when we need to annotate parameters of the 52 50 kfunc. Otherwise one may directly make the kfunc visible to the BPF program by
+4
Documentation/netlink/specs/devlink.yaml
··· 71 71 name: roce-bit 72 72 - 73 73 name: migratable-bit 74 + - 75 + name: ipsec-crypto-bit 76 + - 77 + name: ipsec-packet-bit 74 78 - 75 79 type: enum 76 80 name: sb-threshold-type
+2 -4
Documentation/networking/smc-sysctl.rst
··· 44 44 45 45 wmem - INTEGER 46 46 Initial size of send buffer used by SMC sockets. 47 - The default value inherits from net.ipv4.tcp_wmem[1]. 48 47 49 48 The minimum value is 16KiB and there is no hard limit for max value, but 50 49 only allowed 512KiB for SMC-R and 1MiB for SMC-D. 51 50 52 - Default: 16K 51 + Default: 64KiB 53 52 54 53 rmem - INTEGER 55 54 Initial size of receive buffer (RMB) used by SMC sockets. 56 - The default value inherits from net.ipv4.tcp_rmem[1]. 57 55 58 56 The minimum value is 16KiB and there is no hard limit for max value, but 59 57 only allowed 512KiB for SMC-R and 1MiB for SMC-D. 60 58 61 - Default: 128K 59 + Default: 64KiB
+2 -2
drivers/net/dsa/lan9303_mdio.c
··· 32 32 struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; 33 33 34 34 reg <<= 2; /* reg num to offset */ 35 - mutex_lock(&sw_dev->device->bus->mdio_lock); 35 + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); 36 36 lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); 37 37 lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); 38 38 mutex_unlock(&sw_dev->device->bus->mdio_lock); ··· 50 50 struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; 51 51 52 52 reg <<= 2; /* reg num to offset */ 53 - mutex_lock(&sw_dev->device->bus->mdio_lock); 53 + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); 54 54 *val = lan9303_mdio_real_read(sw_dev->device, reg); 55 55 *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); 56 56 mutex_unlock(&sw_dev->device->bus->mdio_lock);
+44 -12
drivers/net/ethernet/broadcom/tg3.c
··· 6647 6647 6648 6648 tnapi->tx_cons = sw_idx; 6649 6649 6650 - /* Need to make the tx_cons update visible to tg3_start_xmit() 6650 + /* Need to make the tx_cons update visible to __tg3_start_xmit() 6651 6651 * before checking for netif_queue_stopped(). Without the 6652 - * memory barrier, there is a small possibility that tg3_start_xmit() 6652 + * memory barrier, there is a small possibility that __tg3_start_xmit() 6653 6653 * will miss it and cause the queue to be stopped forever. 6654 6654 */ 6655 6655 smp_mb(); ··· 7889 7889 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7890 7890 } 7891 7891 7892 - static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7892 + static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); 7893 7893 7894 7894 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7895 7895 * indicated in tg3_tx_frag_set() ··· 7923 7923 7924 7924 skb_list_walk_safe(segs, seg, next) { 7925 7925 skb_mark_not_on_list(seg); 7926 - tg3_start_xmit(seg, tp->dev); 7926 + __tg3_start_xmit(seg, tp->dev); 7927 7927 } 7928 7928 7929 7929 tg3_tso_bug_end: ··· 7933 7933 } 7934 7934 7935 7935 /* hard_start_xmit for all devices */ 7936 - static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7936 + static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7937 7937 { 7938 7938 struct tg3 *tp = netdev_priv(dev); 7939 7939 u32 len, entry, base_flags, mss, vlan = 0; ··· 8182 8182 netif_tx_wake_queue(txq); 8183 8183 } 8184 8184 8185 - if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8186 - /* Packets are ready, update Tx producer idx on card. */ 8187 - tw32_tx_mbox(tnapi->prodmbox, entry); 8188 - } 8189 - 8190 8185 return NETDEV_TX_OK; 8191 8186 8192 8187 dma_error: ··· 8192 8197 drop_nofree: 8193 8198 tp->tx_dropped++; 8194 8199 return NETDEV_TX_OK; 8200 + } 8201 + 8202 + static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 8203 + { 8204 + struct netdev_queue *txq; 8205 + u16 skb_queue_mapping; 8206 + netdev_tx_t ret; 8207 + 8208 + skb_queue_mapping = skb_get_queue_mapping(skb); 8209 + txq = netdev_get_tx_queue(dev, skb_queue_mapping); 8210 + 8211 + ret = __tg3_start_xmit(skb, dev); 8212 + 8213 + /* Notify the hardware that packets are ready by updating the TX ring 8214 + * tail pointer. We respect netdev_xmit_more() thus avoiding poking 8215 + * the hardware for every packet. To guarantee forward progress the TX 8216 + * ring must be drained when it is full as indicated by 8217 + * netif_xmit_stopped(). This needs to happen even when the current 8218 + * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets 8219 + * queued by previous __tg3_start_xmit() calls might get stuck in 8220 + * the queue forever. 8221 + */ 8222 + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8223 + struct tg3_napi *tnapi; 8224 + struct tg3 *tp; 8225 + 8226 + tp = netdev_priv(dev); 8227 + tnapi = &tp->napi[skb_queue_mapping]; 8228 + 8229 + if (tg3_flag(tp, ENABLE_TSS)) 8230 + tnapi++; 8231 + 8232 + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 8233 + } 8234 + 8235 + return ret; 8195 8236 } 8196 8237 8197 8238 static void tg3_mac_loopback(struct tg3 *tp, bool enable) ··· 17760 17729 * device behind the EPB cannot support DMA addresses > 40-bit. 17761 17730 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17762 17731 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17763 - * do DMA address check in tg3_start_xmit(). 17732 + * do DMA address check in __tg3_start_xmit(). 17764 17733 */ 17765 17734 if (tg3_flag(tp, IS_5788)) 17766 17735 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); ··· 18158 18127 if (netif_running(dev)) 18159 18128 dev_close(dev); 18160 18129 18161 - tg3_power_down(tp); 18130 + if (system_state == SYSTEM_POWER_OFF) 18131 + tg3_power_down(tp); 18162 18132 18163 18133 rtnl_unlock(); 18164 18134
+1 -1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2769 2769 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > 2770 2770 priv->num_tx_rings) { 2771 2771 NL_SET_ERR_MSG_FMT_MOD(extack, 2772 - "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)", 2772 + "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", 2773 2773 num_xdp_tx_queues, 2774 2774 priv->min_num_stack_tx_queues, 2775 2775 priv->num_tx_rings);
-1
drivers/net/ethernet/intel/i40e/i40e_devlink.c
··· 231 231 **/ 232 232 void i40e_devlink_destroy_port(struct i40e_pf *pf) 233 233 { 234 - devlink_port_type_clear(&pf->devlink_port); 235 234 devlink_port_unregister(&pf->devlink_port); 236 235 }
+6 -4
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 14213 14213 } 14214 14214 set_bit(__I40E_VSI_RELEASING, vsi->state); 14215 14215 uplink_seid = vsi->uplink_seid; 14216 - if (vsi->type == I40E_VSI_MAIN) 14217 - i40e_devlink_destroy_port(pf); 14216 + 14218 14217 if (vsi->type != I40E_VSI_SRIOV) { 14219 14218 if (vsi->netdev_registered) { 14220 14219 vsi->netdev_registered = false; ··· 14226 14227 } 14227 14228 i40e_vsi_disable_irq(vsi); 14228 14229 } 14230 + 14231 + if (vsi->type == I40E_VSI_MAIN) 14232 + i40e_devlink_destroy_port(pf); 14229 14233 14230 14234 spin_lock_bh(&vsi->mac_filter_hash_lock); 14231 14235 ··· 14404 14402 14405 14403 err_rings: 14406 14404 i40e_vsi_free_q_vectors(vsi); 14407 - if (vsi->type == I40E_VSI_MAIN) 14408 - i40e_devlink_destroy_port(pf); 14409 14405 if (vsi->netdev_registered) { 14410 14406 vsi->netdev_registered = false; 14411 14407 unregister_netdev(vsi->netdev); 14412 14408 free_netdev(vsi->netdev); 14413 14409 vsi->netdev = NULL; 14414 14410 } 14411 + if (vsi->type == I40E_VSI_MAIN) 14412 + i40e_devlink_destroy_port(pf); 14415 14413 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 14416 14414 err_vsi: 14417 14415 i40e_vsi_clear(vsi);
+6 -12
drivers/net/ethernet/intel/ice/ice_lag.c
··· 628 628 INIT_LIST_HEAD(&ndlist.node); 629 629 rcu_read_lock(); 630 630 for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 631 - nl = kzalloc(sizeof(*nl), GFP_KERNEL); 631 + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 632 632 if (!nl) 633 633 break; 634 634 ··· 1555 1555 */ 1556 1556 static void ice_lag_disable_sriov_bond(struct ice_lag *lag) 1557 1557 { 1558 - struct ice_lag_netdev_list *entry; 1559 1558 struct ice_netdev_priv *np; 1560 - struct net_device *netdev; 1561 1559 struct ice_pf *pf; 1562 1560 1563 - list_for_each_entry(entry, lag->netdev_head, node) { 1564 - netdev = entry->netdev; 1565 - np = netdev_priv(netdev); 1566 - pf = np->vsi->back; 1567 - 1568 - ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); 1569 - } 1561 + np = netdev_priv(lag->netdev); 1562 + pf = np->vsi->back; 1563 + ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); 1570 1564 } 1571 1565 1572 1566 /** ··· 1692 1698 1693 1699 rcu_read_lock(); 1694 1700 for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { 1695 - nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL); 1701 + nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC); 1696 1702 if (!nd_list) 1697 1703 break; 1698 1704 ··· 2069 2075 INIT_LIST_HEAD(&ndlist.node); 2070 2076 rcu_read_lock(); 2071 2077 for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { 2072 - nl = kzalloc(sizeof(*nl), GFP_KERNEL); 2078 + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); 2073 2079 if (!nl) 2074 2080 break; 2075 2081
+85 -29
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 630 630 return ice_tc_tun_get_type(dev) != TNL_LAST; 631 631 } 632 632 633 - static int 634 - ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, 635 - struct flow_action_entry *act) 633 + static bool ice_tc_is_dev_uplink(struct net_device *dev) 634 + { 635 + return netif_is_ice(dev) || ice_is_tunnel_supported(dev); 636 + } 637 + 638 + static int ice_tc_setup_redirect_action(struct net_device *filter_dev, 639 + struct ice_tc_flower_fltr *fltr, 640 + struct net_device *target_dev) 636 641 { 637 642 struct ice_repr *repr; 638 643 644 + fltr->action.fltr_act = ICE_FWD_TO_VSI; 645 + 646 + if (ice_is_port_repr_netdev(filter_dev) && 647 + ice_is_port_repr_netdev(target_dev)) { 648 + repr = ice_netdev_to_repr(target_dev); 649 + 650 + fltr->dest_vsi = repr->src_vsi; 651 + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 652 + } else if (ice_is_port_repr_netdev(filter_dev) && 653 + ice_tc_is_dev_uplink(target_dev)) { 654 + repr = ice_netdev_to_repr(filter_dev); 655 + 656 + fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; 657 + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 658 + } else if (ice_tc_is_dev_uplink(filter_dev) && 659 + ice_is_port_repr_netdev(target_dev)) { 660 + repr = ice_netdev_to_repr(target_dev); 661 + 662 + fltr->dest_vsi = repr->src_vsi; 663 + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 664 + } else { 665 + NL_SET_ERR_MSG_MOD(fltr->extack, 666 + "Unsupported netdevice in switchdev mode"); 667 + return -EINVAL; 668 + } 669 + 670 + return 0; 671 + } 672 + 673 + static int 674 + ice_tc_setup_drop_action(struct net_device *filter_dev, 675 + struct ice_tc_flower_fltr *fltr) 676 + { 677 + fltr->action.fltr_act = ICE_DROP_PACKET; 678 + 679 + if (ice_is_port_repr_netdev(filter_dev)) { 680 + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 681 + } else if (ice_tc_is_dev_uplink(filter_dev)) { 682 + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 683 + } else { 684 + NL_SET_ERR_MSG_MOD(fltr->extack, 685 + "Unsupported netdevice in switchdev mode"); 686 + return -EINVAL; 687 + } 688 + 689 + return 0; 690 + } 691 + 692 + static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, 693 + struct ice_tc_flower_fltr *fltr, 694 + struct flow_action_entry *act) 695 + { 696 + int err; 697 + 639 698 switch (act->id) { 640 699 case FLOW_ACTION_DROP: 641 - fltr->action.fltr_act = ICE_DROP_PACKET; 700 + err = ice_tc_setup_drop_action(filter_dev, fltr); 701 + if (err) 702 + return err; 703 + 642 704 break; 643 705 644 706 case FLOW_ACTION_REDIRECT: 645 - fltr->action.fltr_act = ICE_FWD_TO_VSI; 646 - 647 - if (ice_is_port_repr_netdev(act->dev)) { 648 - repr = ice_netdev_to_repr(act->dev); 649 - 650 - fltr->dest_vsi = repr->src_vsi; 651 - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; 652 - } else if (netif_is_ice(act->dev) || 653 - ice_is_tunnel_supported(act->dev)) { 654 - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; 655 - } else { 656 - NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); 657 - return -EINVAL; 658 - } 707 + err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); 708 + if (err) 709 + return err; 659 710 660 711 break; 661 712 ··· 747 696 goto exit; 748 697 } 749 698 750 - /* egress traffic is always redirect to uplink */ 751 - if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) 752 - fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; 753 - 754 699 rule_info.sw_act.fltr_act = fltr->action.fltr_act; 755 700 if (fltr->action.fltr_act != ICE_DROP_PACKET) 756 701 rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; ··· 760 713 rule_info.flags_info.act_valid = true; 761 714 762 715 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { 716 + /* Uplink to VF */ 763 717 rule_info.sw_act.flag |= ICE_FLTR_RX; 764 718 rule_info.sw_act.src = hw->pf_id; 765 719 rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; 766 - } else { 720 + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && 721 + fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { 722 + /* VF to Uplink */ 767 723 rule_info.sw_act.flag |= ICE_FLTR_TX; 768 724 rule_info.sw_act.src = vsi->idx; 769 725 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 726 + } else { 727 + /* VF to VF */ 728 + rule_info.sw_act.flag |= ICE_FLTR_TX; 729 + rule_info.sw_act.src = vsi->idx; 730 + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; 770 731 } 771 732 772 733 /* specify the cookie as filter_rule_id */ ··· 1800 1745 1801 1746 /** 1802 1747 * ice_parse_tc_flower_actions - Parse the actions for a TC filter 1748 + * @filter_dev: Pointer to device on which filter is being added 1803 1749 * @vsi: Pointer to VSI 1804 1750 * @cls_flower: Pointer to TC flower offload structure 1805 1751 * @fltr: Pointer to TC flower filter structure 1806 1752 * 1807 1753 * Parse the actions for a TC filter 1808 1754 */ 1809 - static int 1810 - ice_parse_tc_flower_actions(struct ice_vsi *vsi, 1811 - struct flow_cls_offload *cls_flower, 1812 - struct ice_tc_flower_fltr *fltr) 1755 + static int ice_parse_tc_flower_actions(struct net_device *filter_dev, 1756 + struct ice_vsi *vsi, 1757 + struct flow_cls_offload *cls_flower, 1758 + struct ice_tc_flower_fltr *fltr) 1813 1759 { 1814 1760 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1815 1761 struct flow_action *flow_action = &rule->action; ··· 1825 1769 1826 1770 flow_action_for_each(i, act, flow_action) { 1827 1771 if (ice_is_eswitch_mode_switchdev(vsi->back)) 1828 - err = ice_eswitch_tc_parse_action(fltr, act); 1772 + err = ice_eswitch_tc_parse_action(filter_dev, fltr, act); 1829 1773 else 1830 1774 err = ice_tc_parse_action(vsi, fltr, act); 1831 1775 if (err) ··· 1912 1856 if (err < 0) 1913 1857 goto err; 1914 1858 1915 - err = ice_parse_tc_flower_actions(vsi, f, fltr); 1859 + err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr); 1916 1860 if (err < 0) 1917 1861 goto err; 1918 1862
+4 -2
drivers/net/ethernet/intel/idpf/idpf_txrx.c
··· 2365 2365 */ 2366 2366 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off) 2367 2367 { 2368 - const struct skb_shared_info *shinfo = skb_shinfo(skb); 2368 + const struct skb_shared_info *shinfo; 2369 2369 union { 2370 2370 struct iphdr *v4; 2371 2371 struct ipv6hdr *v6; ··· 2379 2379 u32 paylen, l4_start; 2380 2380 int err; 2381 2381 2382 - if (!shinfo->gso_size) 2382 + if (!skb_is_gso(skb)) 2383 2383 return 0; 2384 2384 2385 2385 err = skb_cow_head(skb, 0); 2386 2386 if (err < 0) 2387 2387 return err; 2388 + 2389 + shinfo = skb_shinfo(skb); 2388 2390 2389 2391 ip.hdr = skb_network_header(skb); 2390 2392 l4.hdr = skb_transport_header(skb);
+5 -10
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 818 818 int qidx, sqe_tail, sqe_head; 819 819 struct otx2_snd_queue *sq; 820 820 u64 incr, *ptr, val; 821 - int timeout = 1000; 822 821 823 822 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); 824 823 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { ··· 826 827 continue; 827 828 828 829 incr = (u64)qidx << 32; 829 - while (timeout) { 830 - val = otx2_atomic64_add(incr, ptr); 831 - sqe_head = (val >> 20) & 0x3F; 832 - sqe_tail = (val >> 28) & 0x3F; 833 - if (sqe_head == sqe_tail) 834 - break; 835 - usleep_range(1, 3); 836 - timeout--; 837 - } 830 + val = otx2_atomic64_add(incr, ptr); 831 + sqe_head = (val >> 20) & 0x3F; 832 + sqe_tail = (val >> 28) & 0x3F; 833 + if (sqe_head != sqe_tail) 834 + usleep_range(50, 60); 838 835 } 839 836 } 840 837
+1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 977 977 int otx2_txsch_alloc(struct otx2_nic *pfvf); 978 978 void otx2_txschq_stop(struct otx2_nic *pfvf); 979 979 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 980 + void otx2_free_pending_sqe(struct otx2_nic *pfvf); 980 981 void otx2_sqb_flush(struct otx2_nic *pfvf); 981 982 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 982 983 dma_addr_t *dma);
+47 -34
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1193 1193 }; 1194 1194 1195 1195 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { 1196 - "NIX_SND_STATUS_GOOD", 1197 - "NIX_SND_STATUS_SQ_CTX_FAULT", 1198 - "NIX_SND_STATUS_SQ_CTX_POISON", 1199 - "NIX_SND_STATUS_SQB_FAULT", 1200 - "NIX_SND_STATUS_SQB_POISON", 1201 - "NIX_SND_STATUS_HDR_ERR", 1202 - "NIX_SND_STATUS_EXT_ERR", 1203 - "NIX_SND_STATUS_JUMP_FAULT", 1204 - "NIX_SND_STATUS_JUMP_POISON", 1205 - "NIX_SND_STATUS_CRC_ERR", 1206 - "NIX_SND_STATUS_IMM_ERR", 1207 - "NIX_SND_STATUS_SG_ERR", 1208 - "NIX_SND_STATUS_MEM_ERR", 1209 - "NIX_SND_STATUS_INVALID_SUBDC", 1210 - "NIX_SND_STATUS_SUBDC_ORDER_ERR", 1211 - "NIX_SND_STATUS_DATA_FAULT", 1212 - "NIX_SND_STATUS_DATA_POISON", 1213 - "NIX_SND_STATUS_NPC_DROP_ACTION", 1214 - "NIX_SND_STATUS_LOCK_VIOL", 1215 - "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", 1216 - "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", 1217 - "NIX_SND_STATUS_NPC_MCAST_ABORT", 1218 - "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", 1219 - "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", 1220 - "NIX_SND_STATUS_SEND_STATS_ERR", 1196 + [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD", 1197 + [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT", 1198 + [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON", 1199 + [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT", 1200 + [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON", 1201 + [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR", 1202 + [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR", 1203 + [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT", 1204 + [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON", 1205 + [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR", 1206 + [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR", 1207 + [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR", 1208 + [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR", 1209 + [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC", 1210 + [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR", 1211 + [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT", 1212 + [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON", 1213 + [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION", 1214 + [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL", 1215 + [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR", 1216 + [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR", 1217 + [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT", 1218 + [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", 1219 + [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", 1220 + [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT", 1221 + [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR", 1221 1222 }; 1222 1223 1223 1224 static irqreturn_t otx2_q_intr_handler(int irq, void *data) ··· 1239 1238 continue; 1240 1239 1241 1240 if (val & BIT_ULL(42)) { 1242 - netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1241 + netdev_err(pf->netdev, 1242 + "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1243 1243 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1244 1244 } else { 1245 1245 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) 1246 1246 netdev_err(pf->netdev, "CQ%lld: Doorbell error", 1247 1247 qidx); 1248 1248 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) 1249 - netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", 1249 + netdev_err(pf->netdev, 1250 + "CQ%lld: Memory fault on CQE write to LLC/DRAM", 1250 1251 qidx); 1251 1252 } 1252 1253 ··· 1275 1272 (val & NIX_SQINT_BITS)); 1276 1273 1277 1274 if (val & BIT_ULL(42)) { 1278 - netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1275 + netdev_err(pf->netdev, 1276 + "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1279 1277 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1280 1278 goto done; 1281 1279 } ··· 1286 1282 goto chk_mnq_err_dbg; 1287 1283 1288 1284 sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); 1289 - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", 1290 - qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); 1285 + netdev_err(pf->netdev, 1286 + "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n", 1287 + qidx, sq_op_err_dbg, 1288 + nix_sqoperr_e_str[sq_op_err_code], 1289 + sq_op_err_code); 1291 1290 1292 1291 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); 1293 1292 ··· 1307 1300 goto chk_snd_err_dbg; 1308 1301 1309 1302 mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); 1310 - netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", 1311 - qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); 1303 + netdev_err(pf->netdev, 1304 + "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n", 1305 + qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code], 1306 + mnq_err_code); 1312 1307 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); 1313 1308 1314 1309 chk_snd_err_dbg: 1315 1310 snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); 1316 1311 if (snd_err_dbg & BIT(44)) { 1317 1312 snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); 1318 - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", 1319 - qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); 1313 + netdev_err(pf->netdev, 1314 + "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n", 1315 + qidx, snd_err_dbg, 1316 + nix_snd_status_e_str[snd_err_code], 1317 + snd_err_code); 1320 1318 otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); 1321 1319 } 1322 1320 ··· 1601 1589 else 1602 1590 otx2_cleanup_tx_cqes(pf, cq); 1603 1591 } 1592 + otx2_free_pending_sqe(pf); 1604 1593 1605 1594 otx2_free_sq_res(pf); 1606 1595
+17 -17
drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
··· 318 318 NIX_SND_STATUS_EXT_ERR = 0x6, 319 319 NIX_SND_STATUS_JUMP_FAULT = 0x7, 320 320 NIX_SND_STATUS_JUMP_POISON = 0x8, 321 - NIX_SND_STATUS_CRC_ERR = 0x9, 322 - NIX_SND_STATUS_IMM_ERR = 0x10, 323 - NIX_SND_STATUS_SG_ERR = 0x11, 324 - NIX_SND_STATUS_MEM_ERR = 0x12, 325 - NIX_SND_STATUS_INVALID_SUBDC = 0x13, 326 - NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, 327 - NIX_SND_STATUS_DATA_FAULT = 0x15, 328 - NIX_SND_STATUS_DATA_POISON = 0x16, 329 - NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, 330 - NIX_SND_STATUS_LOCK_VIOL = 0x18, 331 - NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, 332 - NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, 333 - NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, 334 - NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, 335 - NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, 336 - NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, 337 - NIX_SND_STATUS_SEND_STATS_ERR = 0x25, 321 + NIX_SND_STATUS_CRC_ERR = 0x10, 322 + NIX_SND_STATUS_IMM_ERR = 0x11, 323 + NIX_SND_STATUS_SG_ERR = 0x12, 324 + NIX_SND_STATUS_MEM_ERR = 0x13, 325 + NIX_SND_STATUS_INVALID_SUBDC = 0x14, 326 + NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15, 327 + NIX_SND_STATUS_DATA_FAULT = 0x16, 328 + NIX_SND_STATUS_DATA_POISON = 0x17, 329 + NIX_SND_STATUS_NPC_DROP_ACTION = 0x20, 330 + NIX_SND_STATUS_LOCK_VIOL = 0x21, 331 + NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22, 332 + NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23, 333 + NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24, 334 + NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25, 335 + NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26, 336 + NIX_SND_STATUS_SEND_MEM_FAULT = 0x27, 337 + NIX_SND_STATUS_SEND_STATS_ERR = 0x28, 338 338 NIX_SND_STATUS_MAX, 339 339 }; 340 340
+42
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
··· 1247 1247 1248 1248 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) 1249 1249 { 1250 + int tx_pkts = 0, tx_bytes = 0; 1250 1251 struct sk_buff *skb = NULL; 1251 1252 struct otx2_snd_queue *sq; 1252 1253 struct nix_cqe_tx_s *cqe; 1254 + struct netdev_queue *txq; 1253 1255 int processed_cqe = 0; 1254 1256 struct sg_list *sg; 1255 1257 int qidx; ··· 1272 1270 sg = &sq->sg[cqe->comp.sqe_id]; 1273 1271 skb = (struct sk_buff *)sg->skb; 1274 1272 if (skb) { 1273 + tx_bytes += skb->len; 1274 + tx_pkts++; 1275 1275 otx2_dma_unmap_skb_frags(pfvf, sg); 1276 1276 dev_kfree_skb_any(skb); 1277 1277 sg->skb = (u64)NULL; 1278 1278 } 1279 1279 } 1280 1280 1281 + if (likely(tx_pkts)) { 1282 + if (qidx >= pfvf->hw.tx_queues) 1283 + qidx -= pfvf->hw.xdp_queues; 1284 + txq = netdev_get_tx_queue(pfvf->netdev, qidx); 1285 + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 1286 + } 1281 1287 /* Free CQEs to HW */ 1282 1288 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 1283 1289 ((u64)cq->cq_idx << 32) | processed_cqe); ··· 1310 1300 err = otx2_sync_mbox_msg(&pfvf->mbox); 1311 1301 mutex_unlock(&pfvf->mbox.lock); 1312 1302 return err; 1303 + } 1304 + 1305 + void otx2_free_pending_sqe(struct otx2_nic *pfvf) 1306 + { 1307 + int tx_pkts = 0, tx_bytes = 0; 1308 + struct sk_buff *skb = NULL; 1309 + struct otx2_snd_queue *sq; 1310 + struct netdev_queue *txq; 1311 + struct sg_list *sg; 1312 + int sq_idx, sqe; 1313 + 1314 + for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { 1315 + sq = &pfvf->qset.sq[sq_idx]; 1316 + for (sqe = 0; sqe < sq->sqe_cnt; sqe++) { 1317 + sg = &sq->sg[sqe]; 1318 + skb = (struct sk_buff *)sg->skb; 1319 + if (skb) { 1320 + tx_bytes += skb->len; 1321 + tx_pkts++; 1322 + otx2_dma_unmap_skb_frags(pfvf, sg); 1323 + dev_kfree_skb_any(skb); 1324 + sg->skb = (u64)NULL; 1325 + } 1326 + } 1327 + 1328 + if (!tx_pkts) 1329 + continue; 1330 + txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); 1331 + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 1332 + tx_pkts = 0; 1333 + tx_bytes = 0; 1334 + } 1313 1335 } 1314 1336 1315 1337 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+5 -1
drivers/net/ethernet/realtek/r8169_main.c
··· 2582 2582 2583 2583 if (dev->flags & IFF_PROMISC) { 2584 2584 rx_mode |= AcceptAllPhys; 2585 + } else if (!(dev->flags & IFF_MULTICAST)) { 2586 + rx_mode &= ~AcceptMulticast; 2585 2587 } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || 2586 2588 dev->flags & IFF_ALLMULTI || 2587 - tp->mac_version == RTL_GIGA_MAC_VER_35) { 2589 + tp->mac_version == RTL_GIGA_MAC_VER_35 || 2590 + tp->mac_version == RTL_GIGA_MAC_VER_46 || 2591 + tp->mac_version == RTL_GIGA_MAC_VER_48) { 2588 2592 /* accept all multicasts */ 2589 2593 } else if (netdev_mc_empty(dev)) { 2590 2594 rx_mode &= ~AcceptMulticast;
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
··· 259 259 ((val) << XGMAC_PPS_MINIDX(x)) 260 260 #define XGMAC_PPSCMD_START 0x2 261 261 #define XGMAC_PPSCMD_STOP 0x5 262 - #define XGMAC_PPSEN0 BIT(4) 262 + #define XGMAC_PPSENx(x) BIT(4 + (x) * 8) 263 263 #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) 264 264 #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) 265 265 #define XGMAC_TRGTBUSY0 BIT(31)
+13 -1
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
··· 1178 1178 1179 1179 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); 1180 1180 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); 1181 - val |= XGMAC_PPSEN0; 1181 + 1182 + /* XGMAC Core has 4 PPS outputs at most. 1183 + * 1184 + * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for 1185 + * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default, 1186 + * and can not be switched to Fixed mode, since PPSEN{1,2,3} are 1187 + * read-only reserved to 0. 1188 + * But we always set PPSEN{1,2,3} do not make things worse ;-) 1189 + * 1190 + * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must 1191 + * be set, or the PPS outputs stay in Fixed PPS mode by default. 1192 + */ 1193 + val |= XGMAC_PPSENx(index); 1182 1194 1183 1195 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); 1184 1196
+2 -2
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1588 1588 1589 1589 /* rx_pause/tx_pause */ 1590 1590 if (rx_pause) 1591 - mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 1591 + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 1592 1592 1593 1593 if (tx_pause) 1594 - mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 1594 + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 1595 1595 1596 1596 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); 1597 1597
+1 -1
drivers/net/ethernet/ti/icssg/icss_iep.c
··· 177 177 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 178 178 writel(upper_32_bits(ns), iep->base + 179 179 iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); 180 - writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); 180 + writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); 181 181 } 182 182 183 183 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+1 -2
drivers/net/ethernet/xscale/ixp4xx_eth.c
··· 163 163 164 164 /* Information about built-in Ethernet MAC interfaces */ 165 165 struct eth_plat_info { 166 - u8 phy; /* MII PHY ID, 0 - 31 */ 167 166 u8 rxq; /* configurable, currently 0 - 31 only */ 168 167 u8 txreadyq; 169 168 u8 hwaddr[ETH_ALEN]; ··· 1582 1583 if ((err = register_netdev(ndev))) 1583 1584 goto err_phy_dis; 1584 1585 1585 - netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, 1586 + netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev), 1586 1587 npe_name(port->npe)); 1587 1588 1588 1589 return 0;
+1
drivers/net/mdio/acpi_mdio.c
··· 16 16 17 17 MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); 18 18 MODULE_LICENSE("GPL"); 19 + MODULE_DESCRIPTION("ACPI MDIO bus (Ethernet PHY) accessors"); 19 20 20 21 /** 21 22 * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+1
drivers/net/mdio/fwnode_mdio.c
··· 14 14 15 15 MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); 16 16 MODULE_LICENSE("GPL"); 17 + MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors"); 17 18 18 19 static struct pse_control * 19 20 fwnode_find_pse_control(struct fwnode_handle *fwnode)
+1
drivers/net/mdio/mdio-aspeed.c
··· 205 205 206 206 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); 207 207 MODULE_LICENSE("GPL"); 208 + MODULE_DESCRIPTION("ASPEED MDIO bus controller");
+1
drivers/net/mdio/mdio-bitbang.c
··· 263 263 EXPORT_SYMBOL(free_mdio_bitbang); 264 264 265 265 MODULE_LICENSE("GPL v2"); 266 + MODULE_DESCRIPTION("Bitbanged MDIO buses");
+1
drivers/net/mdio/of_mdio.c
··· 25 25 26 26 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); 27 27 MODULE_LICENSE("GPL"); 28 + MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors"); 28 29 29 30 /* Extract the clause 22 phy ID from the compatible string of the form 30 31 * ethernet-phy-idAAAA.BBBB */
+1
drivers/net/phy/bcm-phy-ptp.c
··· 942 942 EXPORT_SYMBOL_GPL(bcm_ptp_probe); 943 943 944 944 MODULE_LICENSE("GPL"); 945 + MODULE_DESCRIPTION("Broadcom PHY PTP driver");
+1
drivers/net/phy/bcm87xx.c
··· 223 223 module_phy_driver(bcm87xx_driver); 224 224 225 225 MODULE_LICENSE("GPL v2"); 226 + MODULE_DESCRIPTION("Broadcom BCM87xx PHY driver");
+2
drivers/net/phy/phylink.c
··· 1616 1616 pl->config = config; 1617 1617 if (config->type == PHYLINK_NETDEV) { 1618 1618 pl->netdev = to_net_dev(config->dev); 1619 + netif_carrier_off(pl->netdev); 1619 1620 } else if (config->type == PHYLINK_DEV) { 1620 1621 pl->dev = config->dev; 1621 1622 } else { ··· 3727 3726 module_init(phylink_init); 3728 3727 3729 3728 MODULE_LICENSE("GPL v2"); 3729 + MODULE_DESCRIPTION("phylink models the MAC to optional PHY connection");
+1
drivers/net/phy/sfp.c
··· 3153 3153 MODULE_ALIAS("platform:sfp"); 3154 3154 MODULE_AUTHOR("Russell King"); 3155 3155 MODULE_LICENSE("GPL v2"); 3156 + MODULE_DESCRIPTION("SFP cage support");
+2 -2
drivers/net/ppp/ppp_generic.c
··· 570 570 571 571 /* uprog->len is unsigned short, so no overflow here */ 572 572 fprog.len = uprog->len; 573 - fprog.filter = memdup_user(uprog->filter, 574 - uprog->len * sizeof(struct sock_filter)); 573 + fprog.filter = memdup_array_user(uprog->filter, 574 + uprog->len, sizeof(struct sock_filter)); 575 575 if (IS_ERR(fprog.filter)) 576 576 return ERR_CAST(fprog.filter); 577 577
+12 -11
drivers/ptp/ptp_chardev.c
··· 108 108 container_of(pccontext->clk, struct ptp_clock, clock); 109 109 struct timestamp_event_queue *queue; 110 110 char debugfsname[32]; 111 + unsigned long flags; 111 112 112 113 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 113 114 if (!queue) ··· 120 119 } 121 120 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS); 122 121 spin_lock_init(&queue->lock); 122 + spin_lock_irqsave(&ptp->tsevqs_lock, flags); 123 123 list_add_tail(&queue->qlist, &ptp->tsevqs); 124 + spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 124 125 pccontext->private_clkdata = queue; 125 126 126 127 /* Debugfs contents */ ··· 142 139 { 143 140 struct timestamp_event_queue *queue = pccontext->private_clkdata; 144 141 unsigned long flags; 142 + struct ptp_clock *ptp = 143 + container_of(pccontext->clk, struct ptp_clock, clock); 145 144 146 - if (queue) { 147 - debugfs_remove(queue->debugfs_instance); 148 - pccontext->private_clkdata = NULL; 149 - spin_lock_irqsave(&queue->lock, flags); 150 - list_del(&queue->qlist); 151 - spin_unlock_irqrestore(&queue->lock, flags); 152 - bitmap_free(queue->mask); 153 - kfree(queue); 154 - } 145 + debugfs_remove(queue->debugfs_instance); 146 + pccontext->private_clkdata = NULL; 147 + spin_lock_irqsave(&ptp->tsevqs_lock, flags); 148 + list_del(&queue->qlist); 149 + spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 150 + bitmap_free(queue->mask); 151 + kfree(queue); 155 152 return 0; 156 153 } 157 154 ··· 588 585 free_event: 589 586 kfree(event); 590 587 exit: 591 - if (result < 0) 592 - ptp_release(pccontext); 593 588 return result; 594 589 }
+6 -2
drivers/ptp/ptp_clock.c
··· 179 179 mutex_destroy(&ptp->pincfg_mux); 180 180 mutex_destroy(&ptp->n_vclocks_mux); 181 181 /* Delete first entry */ 182 + spin_lock_irqsave(&ptp->tsevqs_lock, flags); 182 183 tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue, 183 184 qlist); 184 - spin_lock_irqsave(&tsevq->lock, flags); 185 185 list_del(&tsevq->qlist); 186 - spin_unlock_irqrestore(&tsevq->lock, flags); 186 + spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 187 187 bitmap_free(tsevq->mask); 188 188 kfree(tsevq); 189 189 debugfs_remove(ptp->debugfs_root); ··· 247 247 if (!queue) 248 248 goto no_memory_queue; 249 249 list_add_tail(&queue->qlist, &ptp->tsevqs); 250 + spin_lock_init(&ptp->tsevqs_lock); 250 251 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL); 251 252 if (!queue->mask) 252 253 goto no_memory_bitmap; ··· 408 407 { 409 408 struct timestamp_event_queue *tsevq; 410 409 struct pps_event_time evt; 410 + unsigned long flags; 411 411 412 412 switch (event->type) { 413 413 ··· 417 415 418 416 case PTP_CLOCK_EXTTS: 419 417 /* Enqueue timestamp on selected queues */ 418 + spin_lock_irqsave(&ptp->tsevqs_lock, flags); 420 419 list_for_each_entry(tsevq, &ptp->tsevqs, qlist) { 421 420 if (test_bit((unsigned int)event->index, tsevq->mask)) 422 421 enqueue_external_timestamp(tsevq, event); 423 422 } 423 + spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 424 424 wake_up_interruptible(&ptp->tsev_wq); 425 425 break; 426 426
+1
drivers/ptp/ptp_private.h
··· 44 44 struct pps_device *pps_source; 45 45 long dialed_frequency; /* remembers the frequency adjustment */ 46 46 struct list_head tsevqs; /* timestamp fifo list */ 47 + spinlock_t tsevqs_lock; /* protects tsevqs from concurrent access */ 47 48 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */ 48 49 wait_queue_head_t tsev_wq; 49 50 int defunct; /* tells readers to go away when clock is being removed */
+1 -1
drivers/s390/net/qeth_core_main.c
··· 3675 3675 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3676 3676 { 3677 3677 /* 3678 - * check if weed have to switch to non-packing mode or if 3678 + * check if we have to switch to non-packing mode or if 3679 3679 * we have to get a pci flag out on the queue 3680 3680 */ 3681 3681 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+11
include/linux/btf.h
··· 84 84 */ 85 85 #define __bpf_kfunc __used noinline 86 86 87 + #define __bpf_kfunc_start_defs() \ 88 + __diag_push(); \ 89 + __diag_ignore_all("-Wmissing-declarations", \ 90 + "Global kfuncs as their definitions will be in BTF");\ 91 + __diag_ignore_all("-Wmissing-prototypes", \ 92 + "Global kfuncs as their definitions will be in BTF") 93 + 94 + #define __bpf_kfunc_end_defs() __diag_pop() 95 + #define __bpf_hook_start() __bpf_kfunc_start_defs() 96 + #define __bpf_hook_end() __bpf_kfunc_end_defs() 97 + 87 98 /* 88 99 * Return the name of the passed struct, if exists, or halt the build if for 89 100 * example the structure gets renamed. In this way, developers have to revisit
+2 -2
include/linux/ethtool.h
··· 1045 1045 1046 1046 /** 1047 1047 * ethtool_sprintf - Write formatted string to ethtool string data 1048 - * @data: Pointer to start of string to update 1048 + * @data: Pointer to a pointer to the start of string to update 1049 1049 * @fmt: Format of string to write 1050 1050 * 1051 - * Write formatted string to data. Update data to point at start of 1051 + * Write formatted string to *data. Update *data to point at start of 1052 1052 * next string. 1053 1053 */ 1054 1054 extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+4 -2
include/linux/idr.h
··· 200 200 */ 201 201 #define idr_for_each_entry_ul(idr, entry, tmp, id) \ 202 202 for (tmp = 0, id = 0; \ 203 - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ 203 + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ 204 204 tmp = id, ++id) 205 205 206 206 /** ··· 224 224 * @id: Entry ID. 225 225 * 226 226 * Continue to iterate over entries, continuing after the current position. 227 + * After normal termination @entry is left with the value NULL. This 228 + * is convenient for a "not found" value. 227 229 */ 228 230 #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ 229 231 for (tmp = id; \ 230 - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ 232 + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ 231 233 tmp = id, ++id) 232 234 233 235 /*
+1 -1
include/linux/tcp.h
··· 152 152 u64 snt_synack; /* first SYNACK sent time */ 153 153 bool tfo_listener; 154 154 bool is_mptcp; 155 - s8 req_usec_ts; 155 + bool req_usec_ts; 156 156 #if IS_ENABLED(CONFIG_MPTCP) 157 157 bool drop_req; 158 158 #endif
+1 -1
include/net/flow.h
··· 40 40 #define FLOWI_FLAG_KNOWN_NH 0x02 41 41 __u32 flowic_secid; 42 42 kuid_t flowic_uid; 43 - struct flowi_tunnel flowic_tun_key; 44 43 __u32 flowic_multipath_hash; 44 + struct flowi_tunnel flowic_tun_key; 45 45 }; 46 46 47 47 union flowi_uli {
+19 -15
include/net/netfilter/nf_conntrack_act_ct.h
··· 20 20 #endif 21 21 } 22 22 23 - static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct) 24 - { 25 - #if IS_ENABLED(CONFIG_NET_ACT_CT) 26 - struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT); 27 - 28 - if (act_ct) 29 - return act_ct; 30 - 31 - act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC); 32 - return act_ct; 33 - #else 34 - return NULL; 35 - #endif 36 - } 37 - 38 23 static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct, 39 24 enum ip_conntrack_info ctinfo) 40 25 { ··· 29 44 act_ct_ext = nf_conn_act_ct_ext_find(ct); 30 45 if (dev_net(skb->dev) == &init_net && act_ct_ext) 31 46 act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex; 47 + #endif 48 + } 49 + 50 + static inline struct 51 + nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb, 52 + struct nf_conn *ct, 53 + enum ip_conntrack_info ctinfo) 54 + { 55 + #if IS_ENABLED(CONFIG_NET_ACT_CT) 56 + struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT); 57 + 58 + if (act_ct) 59 + return act_ct; 60 + 61 + act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC); 62 + nf_conn_act_ct_ext_fill(skb, ct, ctinfo); 63 + return act_ct; 64 + #else 65 + return NULL; 32 66 #endif 33 67 } 34 68
+7 -6
include/net/tcp_ao.h
··· 124 124 #define tcp_hash_fail(msg, family, skb, fmt, ...) \ 125 125 do { \ 126 126 const struct tcphdr *th = tcp_hdr(skb); \ 127 - char hdr_flags[5] = {}; \ 127 + char hdr_flags[6]; \ 128 128 char *f = hdr_flags; \ 129 129 \ 130 130 if (th->fin) \ ··· 133 133 *f++ = 'S'; \ 134 134 if (th->rst) \ 135 135 *f++ = 'R'; \ 136 + if (th->psh) \ 137 + *f++ = 'P'; \ 136 138 if (th->ack) \ 137 - *f++ = 'A'; \ 138 - if (f != hdr_flags) \ 139 - *f = ' '; \ 139 + *f++ = '.'; \ 140 + *f = 0; \ 140 141 if ((family) == AF_INET) { \ 141 - net_info_ratelimited("%s for (%pI4, %d)->(%pI4, %d) %s" fmt "\n", \ 142 + net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \ 142 143 msg, &ip_hdr(skb)->saddr, ntohs(th->source), \ 143 144 &ip_hdr(skb)->daddr, ntohs(th->dest), \ 144 145 hdr_flags, ##__VA_ARGS__); \ 145 146 } else { \ 146 - net_info_ratelimited("%s for [%pI6c]:%u->[%pI6c]:%u %s" fmt "\n", \ 147 + net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \ 147 148 msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \ 148 149 &ipv6_hdr(skb)->daddr, ntohs(th->dest), \ 149 150 hdr_flags, ##__VA_ARGS__); \
+3 -3
include/uapi/linux/nfsd_netlink.h
··· 3 3 /* Documentation/netlink/specs/nfsd.yaml */ 4 4 /* YNL-GEN uapi header */ 5 5 6 - #ifndef _UAPI_LINUX_NFSD_H 7 - #define _UAPI_LINUX_NFSD_H 6 + #ifndef _UAPI_LINUX_NFSD_NETLINK_H 7 + #define _UAPI_LINUX_NFSD_NETLINK_H 8 8 9 9 #define NFSD_FAMILY_NAME "nfsd" 10 10 #define NFSD_FAMILY_VERSION 1 ··· 36 36 NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1) 37 37 }; 38 38 39 - #endif /* _UAPI_LINUX_NFSD_H */ 39 + #endif /* _UAPI_LINUX_NFSD_NETLINK_H */
+2 -4
kernel/bpf/bpf_iter.c
··· 782 782 int end; /* final value, exclusive */ 783 783 } __aligned(8); 784 784 785 - __diag_push(); 786 - __diag_ignore_all("-Wmissing-prototypes", 787 - "Global functions as their definitions will be in vmlinux BTF"); 785 + __bpf_kfunc_start_defs(); 788 786 789 787 __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) 790 788 { ··· 841 843 s->cur = s->end = 0; 842 844 } 843 845 844 - __diag_pop(); 846 + __bpf_kfunc_end_defs();
+3 -5
kernel/bpf/cgroup_iter.c
··· 282 282 .ctx_arg_info_size = 1, 283 283 .ctx_arg_info = { 284 284 { offsetof(struct bpf_iter__cgroup, cgroup), 285 - PTR_TO_BTF_ID_OR_NULL }, 285 + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 286 286 }, 287 287 .seq_info = &cgroup_iter_seq_info, 288 288 }; ··· 305 305 unsigned int flags; 306 306 } __attribute__((aligned(8))); 307 307 308 - __diag_push(); 309 - __diag_ignore_all("-Wmissing-prototypes", 310 - "Global functions as their definitions will be in vmlinux BTF"); 308 + __bpf_kfunc_start_defs(); 311 309 312 310 __bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it, 313 311 struct cgroup_subsys_state *start, unsigned int flags) ··· 356 358 { 357 359 } 358 360 359 - __diag_pop(); 361 + __bpf_kfunc_end_defs();
+2 -4
kernel/bpf/cpumask.c
··· 34 34 return cpu < nr_cpu_ids; 35 35 } 36 36 37 - __diag_push(); 38 - __diag_ignore_all("-Wmissing-prototypes", 39 - "Global kfuncs as their definitions will be in BTF"); 37 + __bpf_kfunc_start_defs(); 40 38 41 39 /** 42 40 * bpf_cpumask_create() - Create a mutable BPF cpumask. ··· 405 407 return cpumask_any_and_distribute(src1, src2); 406 408 } 407 409 408 - __diag_pop(); 410 + __bpf_kfunc_end_defs(); 409 411 410 412 BTF_SET8_START(cpumask_kfunc_btf_ids) 411 413 BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
+23 -16
kernel/bpf/helpers.c
··· 1177 1177 ret = -EBUSY; 1178 1178 goto out; 1179 1179 } 1180 - if (!atomic64_read(&map->usercnt)) { 1181 - /* maps with timers must be either held by user space 1182 - * or pinned in bpffs. 1183 - */ 1184 - ret = -EPERM; 1185 - goto out; 1186 - } 1187 1180 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1188 1181 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); 1189 1182 if (!t) { ··· 1189 1196 rcu_assign_pointer(t->callback_fn, NULL); 1190 1197 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1191 1198 t->timer.function = bpf_timer_cb; 1192 - timer->timer = t; 1199 + WRITE_ONCE(timer->timer, t); 1200 + /* Guarantee the order between timer->timer and map->usercnt. So 1201 + * when there are concurrent uref release and bpf timer init, either 1202 + * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1203 + * timer or atomic64_read() below returns a zero usercnt. 1204 + */ 1205 + smp_mb(); 1206 + if (!atomic64_read(&map->usercnt)) { 1207 + /* maps with timers must be either held by user space 1208 + * or pinned in bpffs. 1209 + */ 1210 + WRITE_ONCE(timer->timer, NULL); 1211 + kfree(t); 1212 + ret = -EPERM; 1213 + } 1193 1214 out: 1194 1215 __bpf_spin_unlock_irqrestore(&timer->lock); 1195 1216 return ret; ··· 1381 1374 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1382 1375 * this timer, since it won't be initialized. 1383 1376 */ 1384 - timer->timer = NULL; 1377 + WRITE_ONCE(timer->timer, NULL); 1385 1378 out: 1386 1379 __bpf_spin_unlock_irqrestore(&timer->lock); 1387 1380 if (!t) ··· 1893 1886 } 1894 1887 } 1895 1888 1896 - __diag_push(); 1897 - __diag_ignore_all("-Wmissing-prototypes", 1898 - "Global functions as their definitions will be in vmlinux BTF"); 1889 + __bpf_kfunc_start_defs(); 1899 1890 1900 1891 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 1901 1892 { ··· 2510 2505 WARN(1, "A call to BPF exception callback should never return\n"); 2511 2506 } 2512 2507 2513 - __diag_pop(); 2508 + __bpf_kfunc_end_defs(); 2514 2509 2515 2510 BTF_SET8_START(generic_btf_ids) 2516 2511 #ifdef CONFIG_KEXEC_CORE ··· 2569 2564 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 2570 2565 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 2571 2566 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 2567 + #ifdef CONFIG_CGROUPS 2572 2568 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 2573 2569 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 2574 2570 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 2575 - BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 2576 - BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 2577 - BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 2578 2571 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 2579 2572 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 2580 2573 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 2574 + #endif 2575 + BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 2576 + BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 2577 + BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 2581 2578 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 2582 2579 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 2583 2580 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
+2 -4
kernel/bpf/map_iter.c
··· 193 193 194 194 late_initcall(bpf_map_iter_init); 195 195 196 - __diag_push(); 197 - __diag_ignore_all("-Wmissing-prototypes", 198 - "Global functions as their definitions will be in vmlinux BTF"); 196 + __bpf_kfunc_start_defs(); 199 197 200 198 __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) 201 199 { ··· 211 213 return ret; 212 214 } 213 215 214 - __diag_pop(); 216 + __bpf_kfunc_end_defs(); 215 217 216 218 BTF_SET8_START(bpf_map_iter_kfunc_ids) 217 219 BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
+11 -13
kernel/bpf/task_iter.c
··· 704 704 .ctx_arg_info_size = 1, 705 705 .ctx_arg_info = { 706 706 { offsetof(struct bpf_iter__task, task), 707 - PTR_TO_BTF_ID_OR_NULL }, 707 + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 708 708 }, 709 709 .seq_info = &task_seq_info, 710 710 .fill_link_info = bpf_iter_fill_link_info, ··· 822 822 struct bpf_iter_task_vma_kern_data *data; 823 823 } __attribute__((aligned(8))); 824 824 825 - __diag_push(); 826 - __diag_ignore_all("-Wmissing-prototypes", 827 - "Global functions as their definitions will be in vmlinux BTF"); 825 + __bpf_kfunc_start_defs(); 828 826 829 827 __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, 830 828 struct task_struct *task, u64 addr) ··· 888 890 } 889 891 } 890 892 891 - __diag_pop(); 893 + __bpf_kfunc_end_defs(); 894 + 895 + #ifdef CONFIG_CGROUPS 892 896 893 897 struct bpf_iter_css_task { 894 898 __u64 __opaque[1]; ··· 900 900 struct css_task_iter *css_it; 901 901 } __attribute__((aligned(8))); 902 902 903 - __diag_push(); 904 - __diag_ignore_all("-Wmissing-prototypes", 905 - "Global functions as their definitions will be in vmlinux BTF"); 903 + __bpf_kfunc_start_defs(); 906 904 907 905 __bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it, 908 906 struct cgroup_subsys_state *css, unsigned int flags) ··· 946 948 bpf_mem_free(&bpf_global_ma, kit->css_it); 947 949 } 948 950 949 - __diag_pop(); 951 + __bpf_kfunc_end_defs(); 952 + 953 + #endif /* CONFIG_CGROUPS */ 950 954 951 955 struct bpf_iter_task { 952 956 __u64 __opaque[3]; ··· 969 969 BPF_TASK_ITER_PROC_THREADS 970 970 }; 971 971 972 - __diag_push(); 973 - __diag_ignore_all("-Wmissing-prototypes", 974 - "Global functions as their definitions will be in vmlinux BTF"); 972 + __bpf_kfunc_start_defs(); 975 973 976 974 __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it, 977 975 struct task_struct *task__nullable, unsigned int flags) ··· 1039 1041 { 1040 1042 } 1041 1043 1042 - __diag_pop(); 1044 + __bpf_kfunc_end_defs(); 1043 1045 1044 1046 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); 1045 1047
+27 -6
kernel/bpf/verifier.c
··· 3742 3742 if (class == BPF_ALU || class == BPF_ALU64) { 3743 3743 if (!bt_is_reg_set(bt, dreg)) 3744 3744 return 0; 3745 - if (opcode == BPF_MOV) { 3745 + if (opcode == BPF_END || opcode == BPF_NEG) { 3746 + /* sreg is reserved and unused 3747 + * dreg still need precision before this insn 3748 + */ 3749 + return 0; 3750 + } else if (opcode == BPF_MOV) { 3746 3751 if (BPF_SRC(insn->code) == BPF_X) { 3747 3752 /* dreg = sreg or dreg = (s8, s16, s32)sreg 3748 3753 * dreg needs precision after this insn ··· 4679 4674 insn->imm != 0 && env->bpf_capable) { 4680 4675 struct bpf_reg_state fake_reg = {}; 4681 4676 4682 - __mark_reg_known(&fake_reg, (u32)insn->imm); 4677 + __mark_reg_known(&fake_reg, insn->imm); 4683 4678 fake_reg.type = SCALAR_VALUE; 4684 4679 save_register_state(state, spi, &fake_reg, size); 4685 4680 } else if (reg && is_spillable_regtype(reg->type)) { ··· 5393 5388 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ 5394 5389 BTF_SET_START(rcu_protected_types) 5395 5390 BTF_ID(struct, prog_test_ref_kfunc) 5391 + #ifdef CONFIG_CGROUPS 5396 5392 BTF_ID(struct, cgroup) 5393 + #endif 5397 5394 BTF_ID(struct, bpf_cpumask) 5398 5395 BTF_ID(struct, task_struct) 5399 5396 BTF_SET_END(rcu_protected_types) ··· 10842 10835 BTF_ID(func, bpf_percpu_obj_new_impl) 10843 10836 BTF_ID(func, bpf_percpu_obj_drop_impl) 10844 10837 BTF_ID(func, bpf_throw) 10838 + #ifdef CONFIG_CGROUPS 10845 10839 BTF_ID(func, bpf_iter_css_task_new) 10840 + #endif 10846 10841 BTF_SET_END(special_kfunc_set) 10847 10842 10848 10843 BTF_ID_LIST(special_kfunc_list) ··· 10870 10861 BTF_ID(func, bpf_percpu_obj_new_impl) 10871 10862 BTF_ID(func, bpf_percpu_obj_drop_impl) 10872 10863 BTF_ID(func, bpf_throw) 10864 + #ifdef CONFIG_CGROUPS 10873 10865 BTF_ID(func, bpf_iter_css_task_new) 10866 + #else 10867 + BTF_ID_UNUSED 10868 + #endif 10874 10869 10875 10870 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) 10876 10871 { ··· 11407 11394 &meta->arg_rbtree_root.field); 11408 11395 } 11409 11396 11397 + /* 11398 + * css_task iter allowlist is needed to avoid dead locking on css_set_lock. 11399 + * LSM hooks and iters (both sleepable and non-sleepable) are safe. 11400 + * Any sleepable progs are also safe since bpf_check_attach_target() enforce 11401 + * them can only be attached to some specific hook points. 11402 + */ 11410 11403 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) 11411 11404 { 11412 11405 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); ··· 11420 11401 switch (prog_type) { 11421 11402 case BPF_PROG_TYPE_LSM: 11422 11403 return true; 11423 - case BPF_TRACE_ITER: 11424 - return env->prog->aux->sleepable; 11404 + case BPF_PROG_TYPE_TRACING: 11405 + if (env->prog->expected_attach_type == BPF_TRACE_ITER) 11406 + return true; 11407 + fallthrough; 11425 11408 default: 11426 - return false; 11409 + return env->prog->aux->sleepable; 11427 11410 } 11428 11411 } 11429 11412 ··· 11684 11663 case KF_ARG_PTR_TO_ITER: 11685 11664 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { 11686 11665 if (!check_css_task_iter_allowlist(env)) { 11687 - verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n"); 11666 + verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n"); 11688 11667 return -EINVAL; 11689 11668 } 11690 11669 }
+3 -6
kernel/cgroup/rstat.c
··· 156 156 * optimize away the callsite. Therefore, __weak is needed to ensure that the 157 157 * call is still emitted, by telling the compiler that we don't know what the 158 158 * function might eventually be. 159 - * 160 - * __diag_* below are needed to dismiss the missing prototype warning. 161 159 */ 162 - __diag_push(); 163 - __diag_ignore_all("-Wmissing-prototypes", 164 - "kfuncs which will be used in BPF programs"); 160 + 161 + __bpf_hook_start(); 165 162 166 163 __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, 167 164 struct cgroup *parent, int cpu) 168 165 { 169 166 } 170 167 171 - __diag_pop(); 168 + __bpf_hook_end(); 172 169 173 170 /* see cgroup_rstat_flush() */ 174 171 static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
+2 -4
kernel/trace/bpf_trace.c
··· 1252 1252 }; 1253 1253 1254 1254 #ifdef CONFIG_KEYS 1255 - __diag_push(); 1256 - __diag_ignore_all("-Wmissing-prototypes", 1257 - "kfuncs which will be used in BPF programs"); 1255 + __bpf_kfunc_start_defs(); 1258 1256 1259 1257 /** 1260 1258 * bpf_lookup_user_key - lookup a key by its serial ··· 1402 1404 } 1403 1405 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1404 1406 1405 - __diag_pop(); 1407 + __bpf_kfunc_end_defs(); 1406 1408 1407 1409 BTF_SET8_START(key_sig_kfunc_set) 1408 1410 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+3 -4
net/bpf/test_run.c
··· 503 503 * architecture dependent calling conventions. 7+ can be supported in the 504 504 * future. 505 505 */ 506 - __diag_push(); 507 - __diag_ignore_all("-Wmissing-prototypes", 508 - "Global functions as their definitions will be in vmlinux BTF"); 506 + __bpf_kfunc_start_defs(); 507 + 509 508 __bpf_kfunc int bpf_fentry_test1(int a) 510 509 { 511 510 return a + 1; ··· 604 605 { 605 606 } 606 607 607 - __diag_pop(); 608 + __bpf_kfunc_end_defs(); 608 609 609 610 BTF_SET8_START(bpf_test_modify_return_ids) 610 611 BTF_ID_FLAGS(func, bpf_modify_return_test)
+1
net/bridge/netfilter/ebtable_broute.c
··· 135 135 module_init(ebtable_broute_init); 136 136 module_exit(ebtable_broute_fini); 137 137 MODULE_LICENSE("GPL"); 138 + MODULE_DESCRIPTION("Force packets to be routed instead of bridged");
+1
net/bridge/netfilter/ebtable_filter.c
··· 116 116 module_init(ebtable_filter_init); 117 117 module_exit(ebtable_filter_fini); 118 118 MODULE_LICENSE("GPL"); 119 + MODULE_DESCRIPTION("ebtables legacy filter table");
+1
net/bridge/netfilter/ebtable_nat.c
··· 116 116 module_init(ebtable_nat_init); 117 117 module_exit(ebtable_nat_fini); 118 118 MODULE_LICENSE("GPL"); 119 + MODULE_DESCRIPTION("ebtables legacy stateless nat table");
+1
net/bridge/netfilter/ebtables.c
··· 2595 2595 module_init(ebtables_init); 2596 2596 module_exit(ebtables_fini); 2597 2597 MODULE_LICENSE("GPL"); 2598 + MODULE_DESCRIPTION("ebtables legacy core");
+1
net/bridge/netfilter/nf_conntrack_bridge.c
··· 416 416 417 417 MODULE_ALIAS("nf_conntrack-" __stringify(AF_BRIDGE)); 418 418 MODULE_LICENSE("GPL"); 419 + MODULE_DESCRIPTION("Bridge IPv4 and IPv6 connection tracking");
+4 -9
net/core/filter.c
··· 11767 11767 return func; 11768 11768 } 11769 11769 11770 - __diag_push(); 11771 - __diag_ignore_all("-Wmissing-prototypes", 11772 - "Global functions as their definitions will be in vmlinux BTF"); 11770 + __bpf_kfunc_start_defs(); 11773 11771 __bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, 11774 11772 struct bpf_dynptr_kern *ptr__uninit) 11775 11773 { ··· 11814 11816 11815 11817 return 0; 11816 11818 } 11817 - __diag_pop(); 11819 + __bpf_kfunc_end_defs(); 11818 11820 11819 11821 int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, 11820 11822 struct bpf_dynptr_kern *ptr__uninit) ··· 11877 11879 } 11878 11880 late_initcall(bpf_kfunc_init); 11879 11881 11880 - /* Disables missing prototype warnings */ 11881 - __diag_push(); 11882 - __diag_ignore_all("-Wmissing-prototypes", 11883 - "Global functions as their definitions will be in vmlinux BTF"); 11882 + __bpf_kfunc_start_defs(); 11884 11883 11885 11884 /* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code. 11886 11885 * ··· 11911 11916 return sk->sk_prot->diag_destroy(sk, ECONNABORTED); 11912 11917 } 11913 11918 11914 - __diag_pop() 11919 + __bpf_kfunc_end_defs(); 11915 11920 11916 11921 BTF_SET8_START(bpf_sk_iter_kfunc_ids) 11917 11922 BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
+5 -1
net/core/page_pool.c
··· 217 217 return -ENOMEM; 218 218 #endif 219 219 220 - if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) 220 + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { 221 + #ifdef CONFIG_PAGE_POOL_STATS 222 + free_percpu(pool->recycle_stats); 223 + #endif 221 224 return -ENOMEM; 225 + } 222 226 223 227 atomic_set(&pool->pages_state_release_cnt, 0); 224 228
+2 -4
net/core/xdp.c
··· 696 696 return nxdpf; 697 697 } 698 698 699 - __diag_push(); 700 - __diag_ignore_all("-Wmissing-prototypes", 701 - "Global functions as their definitions will be in vmlinux BTF"); 699 + __bpf_kfunc_start_defs(); 702 700 703 701 /** 704 702 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. ··· 736 738 return -EOPNOTSUPP; 737 739 } 738 740 739 - __diag_pop(); 741 + __bpf_kfunc_end_defs(); 740 742 741 743 BTF_SET8_START(xdp_metadata_kfunc_ids) 742 744 #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
+3 -3
net/dccp/ipv4.c
··· 629 629 if (dccp_parse_options(sk, dreq, skb)) 630 630 goto drop_and_free; 631 631 632 - if (security_inet_conn_request(sk, skb, req)) 633 - goto drop_and_free; 634 - 635 632 ireq = inet_rsk(req); 636 633 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 637 634 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 638 635 ireq->ir_mark = inet_request_mark(sk, skb); 639 636 ireq->ireq_family = AF_INET; 640 637 ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if); 638 + 639 + if (security_inet_conn_request(sk, skb, req)) 640 + goto drop_and_free; 641 641 642 642 /* 643 643 * Step 3: Process LISTEN state
+3 -3
net/dccp/ipv6.c
··· 360 360 if (dccp_parse_options(sk, dreq, skb)) 361 361 goto drop_and_free; 362 362 363 - if (security_inet_conn_request(sk, skb, req)) 364 - goto drop_and_free; 365 - 366 363 ireq = inet_rsk(req); 367 364 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 368 365 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 369 366 ireq->ireq_family = AF_INET6; 370 367 ireq->ir_mark = inet_request_mark(sk, skb); 368 + 369 + if (security_inet_conn_request(sk, skb, req)) 370 + goto drop_and_free; 371 371 372 372 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || 373 373 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+1 -3
net/hsr/hsr_forward.c
··· 342 342 skb = skb_copy_expand(frame->skb_std, 0, 343 343 skb_tailroom(frame->skb_std) + HSR_HLEN, 344 344 GFP_ATOMIC); 345 - prp_fill_rct(skb, frame, port); 346 - 347 - return skb; 345 + return prp_fill_rct(skb, frame, port); 348 346 } 349 347 350 348 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+2 -4
net/ipv4/fou_bpf.c
··· 22 22 FOU_BPF_ENCAP_GUE, 23 23 }; 24 24 25 - __diag_push(); 26 - __diag_ignore_all("-Wmissing-prototypes", 27 - "Global functions as their definitions will be in BTF"); 25 + __bpf_kfunc_start_defs(); 28 26 29 27 /* bpf_skb_set_fou_encap - Set FOU encap parameters 30 28 * ··· 98 100 return 0; 99 101 } 100 102 101 - __diag_pop() 103 + __bpf_kfunc_end_defs(); 102 104 103 105 BTF_SET8_START(fou_kfunc_set) 104 106 BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
+1
net/ipv4/netfilter/iptable_nat.c
··· 170 170 module_exit(iptable_nat_exit); 171 171 172 172 MODULE_LICENSE("GPL"); 173 + MODULE_DESCRIPTION("iptables legacy nat table");
+1
net/ipv4/netfilter/iptable_raw.c
··· 108 108 module_init(iptable_raw_init); 109 109 module_exit(iptable_raw_fini); 110 110 MODULE_LICENSE("GPL"); 111 + MODULE_DESCRIPTION("iptables legacy raw table");
+1
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 186 186 module_exit(nf_defrag_fini); 187 187 188 188 MODULE_LICENSE("GPL"); 189 + MODULE_DESCRIPTION("IPv4 defragmentation support");
+1
net/ipv4/netfilter/nf_reject_ipv4.c
··· 336 336 EXPORT_SYMBOL_GPL(nf_send_unreach); 337 337 338 338 MODULE_LICENSE("GPL"); 339 + MODULE_DESCRIPTION("IPv4 packet rejection core");
+1 -1
net/ipv4/syncookies.c
··· 306 306 treq->af_specific = af_ops; 307 307 308 308 treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; 309 - treq->req_usec_ts = -1; 309 + treq->req_usec_ts = false; 310 310 311 311 #if IS_ENABLED(CONFIG_MPTCP) 312 312 treq->is_mptcp = sk_is_mptcp(sk);
+3 -2
net/ipv4/tcp_ao.c
··· 1315 1315 key->maclen = cmd->maclen ?: 12; /* 12 is the default in RFC5925 */ 1316 1316 1317 1317 /* Check: maclen + tcp-ao header <= (MAX_TCP_OPTION_SPACE - mss 1318 - * - tstamp - wscale - sackperm), 1318 + * - tstamp (including sackperm) 1319 + * - wscale), 1319 1320 * see tcp_syn_options(), tcp_synack_options(), commit 33ad798c924b. 1320 1321 * 1321 1322 * In order to allow D-SACK with TCP-AO, the header size should be: ··· 1343 1342 * large to leave sufficient option space. 1344 1343 */ 1345 1344 syn_tcp_option_space = MAX_TCP_OPTION_SPACE; 1345 + syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED; 1346 1346 syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED; 1347 1347 syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED; 1348 - syn_tcp_option_space -= TCPOLEN_SACKPERM_ALIGNED; 1349 1348 if (tcp_ao_len(key) > syn_tcp_option_space) { 1350 1349 err = -EMSGSIZE; 1351 1350 goto err_kfree;
+4 -3
net/ipv4/tcp_input.c
··· 7115 7115 req->syncookie = want_cookie; 7116 7116 tcp_rsk(req)->af_specific = af_ops; 7117 7117 tcp_rsk(req)->ts_off = 0; 7118 - tcp_rsk(req)->req_usec_ts = -1; 7118 + tcp_rsk(req)->req_usec_ts = false; 7119 7119 #if IS_ENABLED(CONFIG_MPTCP) 7120 7120 tcp_rsk(req)->is_mptcp = 0; 7121 7121 #endif ··· 7143 7143 if (!dst) 7144 7144 goto drop_and_free; 7145 7145 7146 - if (tmp_opt.tstamp_ok) 7146 + if (tmp_opt.tstamp_ok) { 7147 + tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst); 7147 7148 tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); 7148 - 7149 + } 7149 7150 if (!want_cookie && !isn) { 7150 7151 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); 7151 7152
+39 -33
net/ipv4/tcp_output.c
··· 601 601 } 602 602 #endif 603 603 604 + static __be32 *process_tcp_ao_options(struct tcp_sock *tp, 605 + const struct tcp_request_sock *tcprsk, 606 + struct tcp_out_options *opts, 607 + struct tcp_key *key, __be32 *ptr) 608 + { 609 + #ifdef CONFIG_TCP_AO 610 + u8 maclen = tcp_ao_maclen(key->ao_key); 611 + 612 + if (tcprsk) { 613 + u8 aolen = maclen + sizeof(struct tcp_ao_hdr); 614 + 615 + *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) | 616 + (tcprsk->ao_keyid << 8) | 617 + (tcprsk->ao_rcv_next)); 618 + } else { 619 + struct tcp_ao_key *rnext_key; 620 + struct tcp_ao_info *ao_info; 621 + 622 + ao_info = rcu_dereference_check(tp->ao_info, 623 + lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); 624 + rnext_key = READ_ONCE(ao_info->rnext_key); 625 + if (WARN_ON_ONCE(!rnext_key)) 626 + return ptr; 627 + *ptr++ = htonl((TCPOPT_AO << 24) | 628 + (tcp_ao_len(key->ao_key) << 16) | 629 + (key->ao_key->sndid << 8) | 630 + (rnext_key->rcvid)); 631 + } 632 + opts->hash_location = (__u8 *)ptr; 633 + ptr += maclen / sizeof(*ptr); 634 + if (unlikely(maclen % sizeof(*ptr))) { 635 + memset(ptr, TCPOPT_NOP, sizeof(*ptr)); 636 + ptr++; 637 + } 638 + #endif 639 + return ptr; 640 + } 641 + 604 642 /* Write previously computed TCP options to the packet. 605 643 * 606 644 * Beware: Something in the Internet is very sensitive to the ordering of ··· 667 629 opts->hash_location = (__u8 *)ptr; 668 630 ptr += 4; 669 631 } else if (tcp_key_is_ao(key)) { 670 - #ifdef CONFIG_TCP_AO 671 - u8 maclen = tcp_ao_maclen(key->ao_key); 672 - 673 - if (tcprsk) { 674 - u8 aolen = maclen + sizeof(struct tcp_ao_hdr); 675 - 676 - *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) | 677 - (tcprsk->ao_keyid << 8) | 678 - (tcprsk->ao_rcv_next)); 679 - } else { 680 - struct tcp_ao_key *rnext_key; 681 - struct tcp_ao_info *ao_info; 682 - 683 - ao_info = rcu_dereference_check(tp->ao_info, 684 - lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); 685 - rnext_key = READ_ONCE(ao_info->rnext_key); 686 - if (WARN_ON_ONCE(!rnext_key)) 687 - goto out_ao; 688 - *ptr++ = htonl((TCPOPT_AO << 24) | 689 - (tcp_ao_len(key->ao_key) << 16) | 690 - (key->ao_key->sndid << 8) | 691 - (rnext_key->rcvid)); 692 - } 693 - opts->hash_location = (__u8 *)ptr; 694 - ptr += maclen / sizeof(*ptr); 695 - if (unlikely(maclen % sizeof(*ptr))) { 696 - memset(ptr, TCPOPT_NOP, sizeof(*ptr)); 697 - ptr++; 698 - } 699 - out_ao: 700 - #endif 632 + ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr); 701 633 } 702 634 if (unlikely(opts->mss)) { 703 635 *ptr++ = htonl((TCPOPT_MSS << 24) | ··· 3701 3693 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3702 3694 3703 3695 memset(&opts, 0, sizeof(opts)); 3704 - if (tcp_rsk(req)->req_usec_ts < 0) 3705 - tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst); 3706 3696 now = tcp_clock_ns(); 3707 3697 #ifdef CONFIG_SYN_COOKIES 3708 3698 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
+4 -4
net/ipv4/tcp_sigpool.c
··· 231 231 */ 232 232 void tcp_sigpool_release(unsigned int id) 233 233 { 234 - if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) 234 + if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) 235 235 return; 236 236 237 237 /* slow-path */ ··· 245 245 */ 246 246 void tcp_sigpool_get(unsigned int id) 247 247 { 248 - if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) 248 + if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) 249 249 return; 250 250 kref_get(&cpool[id].kref); 251 251 } ··· 256 256 struct crypto_ahash *hash; 257 257 258 258 rcu_read_lock_bh(); 259 - if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) { 259 + if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) { 260 260 rcu_read_unlock_bh(); 261 261 return -EINVAL; 262 262 } ··· 301 301 */ 302 302 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len) 303 303 { 304 - if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) 304 + if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) 305 305 return -EINVAL; 306 306 307 307 return strscpy(buf, cpool[id].alg, buf_len);
+1
net/ipv6/netfilter/ip6table_nat.c
··· 170 170 module_exit(ip6table_nat_exit); 171 171 172 172 MODULE_LICENSE("GPL"); 173 + MODULE_DESCRIPTION("Ip6tables legacy nat table");
+1
net/ipv6/netfilter/ip6table_raw.c
··· 106 106 module_init(ip6table_raw_init); 107 107 module_exit(ip6table_raw_fini); 108 108 MODULE_LICENSE("GPL"); 109 + MODULE_DESCRIPTION("Ip6tables legacy raw table");
+1
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
··· 182 182 module_exit(nf_defrag_fini); 183 183 184 184 MODULE_LICENSE("GPL"); 185 + MODULE_DESCRIPTION("IPv6 defragmentation support");
+1
net/ipv6/netfilter/nf_reject_ipv6.c
··· 413 413 EXPORT_SYMBOL_GPL(nf_send_unreach6); 414 414 415 415 MODULE_LICENSE("GPL"); 416 + MODULE_DESCRIPTION("IPv6 packet rejection core");
+4 -3
net/ipv6/syncookies.c
··· 181 181 treq = tcp_rsk(req); 182 182 treq->tfo_listener = false; 183 183 184 - if (security_inet_conn_request(sk, skb, req)) 185 - goto out_free; 186 - 187 184 req->mss = mss; 188 185 ireq->ir_rmt_port = th->source; 189 186 ireq->ir_num = ntohs(th->dest); 190 187 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 191 188 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 189 + 190 + if (security_inet_conn_request(sk, skb, req)) 191 + goto out_free; 192 + 192 193 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || 193 194 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 194 195 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+1
net/kcm/kcmsock.c
··· 1946 1946 module_exit(kcm_exit); 1947 1947 1948 1948 MODULE_LICENSE("GPL"); 1949 + MODULE_DESCRIPTION("KCM (Kernel Connection Multiplexor) sockets"); 1949 1950 MODULE_ALIAS_NETPROTO(PF_KCM);
+8 -2
net/llc/llc_input.c
··· 127 127 skb->transport_header += llc_len; 128 128 skb_pull(skb, llc_len); 129 129 if (skb->protocol == htons(ETH_P_802_2)) { 130 - __be16 pdulen = eth_hdr(skb)->h_proto; 131 - s32 data_size = ntohs(pdulen) - llc_len; 130 + __be16 pdulen; 131 + s32 data_size; 132 + 133 + if (skb->mac_len < ETH_HLEN) 134 + return 0; 135 + 136 + pdulen = eth_hdr(skb)->h_proto; 137 + data_size = ntohs(pdulen) - llc_len; 132 138 133 139 if (data_size < 0 || 134 140 !pskb_may_pull(skb, data_size))
+3
net/llc/llc_s_ac.c
··· 153 153 int rc = 1; 154 154 u32 data_size; 155 155 156 + if (skb->mac_len < ETH_HLEN) 157 + return 1; 158 + 156 159 llc_pdu_decode_sa(skb, mac_da); 157 160 llc_pdu_decode_da(skb, mac_sa); 158 161 llc_pdu_decode_ssap(skb, &dsap);
+3
net/llc/llc_station.c
··· 76 76 u32 data_size; 77 77 struct sk_buff *nskb; 78 78 79 + if (skb->mac_len < ETH_HLEN) 80 + goto out; 81 + 79 82 /* The test request command is type U (llc_len = 3) */ 80 83 data_size = ntohs(eth_hdr(skb)->h_proto) - 3; 81 84 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+1
net/netfilter/ipvs/ip_vs_core.c
··· 2450 2450 module_init(ip_vs_init); 2451 2451 module_exit(ip_vs_cleanup); 2452 2452 MODULE_LICENSE("GPL"); 2453 + MODULE_DESCRIPTION("IP Virtual Server");
+1
net/netfilter/ipvs/ip_vs_dh.c
··· 270 270 module_init(ip_vs_dh_init); 271 271 module_exit(ip_vs_dh_cleanup); 272 272 MODULE_LICENSE("GPL"); 273 + MODULE_DESCRIPTION("ipvs destination hashing scheduler");
+1
net/netfilter/ipvs/ip_vs_fo.c
··· 72 72 module_init(ip_vs_fo_init); 73 73 module_exit(ip_vs_fo_cleanup); 74 74 MODULE_LICENSE("GPL"); 75 + MODULE_DESCRIPTION("ipvs weighted failover scheduler");
+1
net/netfilter/ipvs/ip_vs_ftp.c
··· 635 635 module_init(ip_vs_ftp_init); 636 636 module_exit(ip_vs_ftp_exit); 637 637 MODULE_LICENSE("GPL"); 638 + MODULE_DESCRIPTION("ipvs ftp helper");
+1
net/netfilter/ipvs/ip_vs_lblc.c
··· 632 632 module_init(ip_vs_lblc_init); 633 633 module_exit(ip_vs_lblc_cleanup); 634 634 MODULE_LICENSE("GPL"); 635 + MODULE_DESCRIPTION("ipvs locality-based least-connection scheduler");
+1
net/netfilter/ipvs/ip_vs_lblcr.c
··· 817 817 module_init(ip_vs_lblcr_init); 818 818 module_exit(ip_vs_lblcr_cleanup); 819 819 MODULE_LICENSE("GPL"); 820 + MODULE_DESCRIPTION("ipvs locality-based least-connection with replication scheduler");
+1
net/netfilter/ipvs/ip_vs_lc.c
··· 86 86 module_init(ip_vs_lc_init); 87 87 module_exit(ip_vs_lc_cleanup); 88 88 MODULE_LICENSE("GPL"); 89 + MODULE_DESCRIPTION("ipvs least connection scheduler");
+1
net/netfilter/ipvs/ip_vs_nq.c
··· 136 136 module_init(ip_vs_nq_init); 137 137 module_exit(ip_vs_nq_cleanup); 138 138 MODULE_LICENSE("GPL"); 139 + MODULE_DESCRIPTION("ipvs never queue scheduler");
+1
net/netfilter/ipvs/ip_vs_ovf.c
··· 79 79 module_init(ip_vs_ovf_init); 80 80 module_exit(ip_vs_ovf_cleanup); 81 81 MODULE_LICENSE("GPL"); 82 + MODULE_DESCRIPTION("ipvs overflow connection scheduler");
+1
net/netfilter/ipvs/ip_vs_pe_sip.c
··· 185 185 module_init(ip_vs_sip_init); 186 186 module_exit(ip_vs_sip_cleanup); 187 187 MODULE_LICENSE("GPL"); 188 + MODULE_DESCRIPTION("ipvs sip helper");
+1
net/netfilter/ipvs/ip_vs_rr.c
··· 122 122 123 123 module_init(ip_vs_rr_init); 124 124 module_exit(ip_vs_rr_cleanup); 125 + MODULE_DESCRIPTION("ipvs round-robin scheduler"); 125 126 MODULE_LICENSE("GPL");
+1
net/netfilter/ipvs/ip_vs_sed.c
··· 137 137 module_init(ip_vs_sed_init); 138 138 module_exit(ip_vs_sed_cleanup); 139 139 MODULE_LICENSE("GPL"); 140 + MODULE_DESCRIPTION("ipvs shortest expected delay scheduler");
+1
net/netfilter/ipvs/ip_vs_sh.c
··· 376 376 module_init(ip_vs_sh_init); 377 377 module_exit(ip_vs_sh_cleanup); 378 378 MODULE_LICENSE("GPL"); 379 + MODULE_DESCRIPTION("ipvs source hashing scheduler");
+1
net/netfilter/ipvs/ip_vs_twos.c
··· 137 137 module_init(ip_vs_twos_init); 138 138 module_exit(ip_vs_twos_cleanup); 139 139 MODULE_LICENSE("GPL"); 140 + MODULE_DESCRIPTION("ipvs power of twos choice scheduler");
+1
net/netfilter/ipvs/ip_vs_wlc.c
··· 109 109 module_init(ip_vs_wlc_init); 110 110 module_exit(ip_vs_wlc_cleanup); 111 111 MODULE_LICENSE("GPL"); 112 + MODULE_DESCRIPTION("ipvs weighted least connection scheduler");
+1
net/netfilter/ipvs/ip_vs_wrr.c
··· 263 263 module_init(ip_vs_wrr_init); 264 264 module_exit(ip_vs_wrr_cleanup); 265 265 MODULE_LICENSE("GPL"); 266 + MODULE_DESCRIPTION("ipvs weighted round-robin scheduler");
+2 -4
net/netfilter/nf_conntrack_bpf.c
··· 230 230 return 0; 231 231 } 232 232 233 - __diag_push(); 234 - __diag_ignore_all("-Wmissing-prototypes", 235 - "Global functions as their definitions will be in nf_conntrack BTF"); 233 + __bpf_kfunc_start_defs(); 236 234 237 235 /* bpf_xdp_ct_alloc - Allocate a new CT entry 238 236 * ··· 465 467 return nf_ct_change_status_common(nfct, status); 466 468 } 467 469 468 - __diag_pop() 470 + __bpf_kfunc_end_defs(); 469 471 470 472 BTF_SET8_START(nf_ct_kfunc_set) 471 473 BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
+1
net/netfilter/nf_conntrack_broadcast.c
··· 82 82 EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help); 83 83 84 84 MODULE_LICENSE("GPL"); 85 + MODULE_DESCRIPTION("Broadcast connection tracking helper");
+1
net/netfilter/nf_conntrack_netlink.c
··· 57 57 #include "nf_internals.h" 58 58 59 59 MODULE_LICENSE("GPL"); 60 + MODULE_DESCRIPTION("List and change connection tracking table"); 60 61 61 62 struct ctnetlink_list_dump_ctx { 62 63 struct nf_conn *last;
+1
net/netfilter/nf_conntrack_proto.c
··· 699 699 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); 700 700 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); 701 701 MODULE_LICENSE("GPL"); 702 + MODULE_DESCRIPTION("IPv4 and IPv6 connection tracking");
+2 -4
net/netfilter/nf_nat_bpf.c
··· 12 12 #include <net/netfilter/nf_conntrack_core.h> 13 13 #include <net/netfilter/nf_nat.h> 14 14 15 - __diag_push(); 16 - __diag_ignore_all("-Wmissing-prototypes", 17 - "Global functions as their definitions will be in nf_nat BTF"); 15 + __bpf_kfunc_start_defs(); 18 16 19 17 /* bpf_ct_set_nat_info - Set source or destination nat address 20 18 * ··· 52 54 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; 53 55 } 54 56 55 - __diag_pop() 57 + __bpf_kfunc_end_defs(); 56 58 57 59 BTF_SET8_START(nf_nat_kfunc_set) 58 60 BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
+1
net/netfilter/nf_nat_core.c
··· 1263 1263 } 1264 1264 1265 1265 MODULE_LICENSE("GPL"); 1266 + MODULE_DESCRIPTION("Network address translation core"); 1266 1267 1267 1268 module_init(nf_nat_init); 1268 1269 module_exit(nf_nat_cleanup);
+26 -1
net/netfilter/nf_nat_redirect.c
··· 80 80 81 81 static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; 82 82 83 + static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope) 84 + { 85 + unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr); 86 + 87 + if (ifa_addr_type & IPV6_ADDR_MAPPED) 88 + return false; 89 + 90 + if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC))) 91 + return false; 92 + 93 + if (scope) { 94 + unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK; 95 + 96 + if (!(scope & ifa_scope)) 97 + return false; 98 + } 99 + 100 + return true; 101 + } 102 + 83 103 unsigned int 84 104 nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 85 105 unsigned int hooknum) ··· 109 89 if (hooknum == NF_INET_LOCAL_OUT) { 110 90 newdst.in6 = loopback_addr; 111 91 } else { 92 + unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr); 112 93 struct inet6_dev *idev; 113 - struct inet6_ifaddr *ifa; 114 94 bool addr = false; 115 95 116 96 idev = __in6_dev_get(skb->dev); 117 97 if (idev != NULL) { 98 + const struct inet6_ifaddr *ifa; 99 + 118 100 read_lock_bh(&idev->lock); 119 101 list_for_each_entry(ifa, &idev->addr_list, if_list) { 102 + if (!nf_nat_redirect_ipv6_usable(ifa, scope)) 103 + continue; 104 + 120 105 newdst.in6 = ifa->addr; 121 106 addr = true; 122 107 break;
+18 -5
net/netfilter/nf_tables_api.c
··· 6520 6520 return ret; 6521 6521 } 6522 6522 6523 + static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall) 6524 + { 6525 + list_del_rcu(&catchall->list); 6526 + kfree_rcu(catchall, rcu); 6527 + } 6528 + 6523 6529 static void nft_setelem_catchall_remove(const struct net *net, 6524 6530 const struct nft_set *set, 6525 6531 struct nft_elem_priv *elem_priv) ··· 6534 6528 6535 6529 list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { 6536 6530 if (catchall->elem == elem_priv) { 6537 - list_del_rcu(&catchall->list); 6538 - kfree_rcu(catchall, rcu); 6531 + nft_setelem_catchall_destroy(catchall); 6539 6532 break; 6540 6533 } 6541 6534 } ··· 9683 9678 unsigned int gc_seq, 9684 9679 bool sync) 9685 9680 { 9686 - struct nft_set_elem_catchall *catchall; 9681 + struct nft_set_elem_catchall *catchall, *next; 9687 9682 const struct nft_set *set = gc->set; 9683 + struct nft_elem_priv *elem_priv; 9688 9684 struct nft_set_ext *ext; 9689 9685 9690 - list_for_each_entry_rcu(catchall, &set->catchall_list, list) { 9686 + list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { 9691 9687 ext = nft_set_elem_ext(set, catchall->elem); 9692 9688 9693 9689 if (!nft_set_elem_expired(ext)) ··· 9706 9700 if (!gc) 9707 9701 return NULL; 9708 9702 9709 - nft_trans_gc_elem_add(gc, catchall->elem); 9703 + elem_priv = catchall->elem; 9704 + if (sync) { 9705 + nft_setelem_data_deactivate(gc->net, gc->set, elem_priv); 9706 + nft_setelem_catchall_destroy(catchall); 9707 + } 9708 + 9709 + nft_trans_gc_elem_add(gc, elem_priv); 9710 9710 } 9711 9711 9712 9712 return gc; ··· 11398 11386 11399 11387 MODULE_LICENSE("GPL"); 11400 11388 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 11389 + MODULE_DESCRIPTION("Framework for packet filtering and classification"); 11401 11390 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
+1
net/netfilter/nft_chain_nat.c
··· 137 137 module_exit(nft_chain_nat_exit); 138 138 139 139 MODULE_LICENSE("GPL"); 140 + MODULE_DESCRIPTION("nftables network address translation support"); 140 141 #ifdef CONFIG_NF_TABLES_IPV4 141 142 MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat"); 142 143 #endif
+1
net/netfilter/nft_fib.c
··· 204 204 EXPORT_SYMBOL_GPL(nft_fib_reduce); 205 205 206 206 MODULE_LICENSE("GPL"); 207 + MODULE_DESCRIPTION("Query routing table from nftables"); 207 208 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+1
net/netfilter/nft_fwd_netdev.c
··· 270 270 271 271 MODULE_LICENSE("GPL"); 272 272 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 273 + MODULE_DESCRIPTION("nftables netdev packet forwarding support"); 273 274 MODULE_ALIAS_NFT_AF_EXPR(5, "fwd");
+1 -1
net/netfilter/xt_recent.c
··· 561 561 { 562 562 struct recent_table *t = pde_data(file_inode(file)); 563 563 struct recent_entry *e; 564 - char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; 564 + char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")]; 565 565 const char *c = buf; 566 566 union nf_inet_addr addr = {}; 567 567 u_int16_t family;
+1
net/netlink/diag.c
··· 257 257 258 258 module_init(netlink_diag_init); 259 259 module_exit(netlink_diag_exit); 260 + MODULE_DESCRIPTION("Netlink-based socket monitoring/diagnostic interface (sock_diag)"); 260 261 MODULE_LICENSE("GPL"); 261 262 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
+1 -1
net/openvswitch/conntrack.c
··· 985 985 if (err) 986 986 return err; 987 987 988 - nf_conn_act_ct_ext_add(ct); 988 + nf_conn_act_ct_ext_add(skb, ct, ctinfo); 989 989 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 990 990 labels_nonzero(&info->labels.mask)) { 991 991 err = ovs_ct_set_labels(ct, key, &info->labels.value,
+1 -1
net/rxrpc/conn_object.c
··· 212 212 conn->idle_timestamp = jiffies; 213 213 if (atomic_dec_and_test(&conn->active)) 214 214 rxrpc_set_service_reap_timer(conn->rxnet, 215 - jiffies + rxrpc_connection_expiry); 215 + jiffies + rxrpc_connection_expiry * HZ); 216 216 } 217 217 218 218 rxrpc_put_call(call, rxrpc_call_put_io_thread);
+1 -1
net/rxrpc/local_object.c
··· 87 87 struct rxrpc_local *local = 88 88 container_of(timer, struct rxrpc_local, client_conn_reap_timer); 89 89 90 - if (local->kill_all_client_conns && 90 + if (!local->kill_all_client_conns && 91 91 test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags)) 92 92 rxrpc_wake_up_io_thread(local); 93 93 }
+1 -1
net/sched/act_api.c
··· 1098 1098 } 1099 1099 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 1100 1100 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 1101 - net_warn_ratelimited("can't go to NULL chain!\n"); 1101 + tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1102 1102 return TC_ACT_SHOT; 1103 1103 } 1104 1104 tcf_action_goto_chain_exec(a, res);
+14 -1
net/sched/act_ct.c
··· 376 376 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir]; 377 377 } 378 378 379 + static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry) 380 + { 381 + struct nf_conn_act_ct_ext *act_ct_ext; 382 + 383 + act_ct_ext = nf_conn_act_ct_ext_find(entry->ct); 384 + if (act_ct_ext) { 385 + tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL); 386 + tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY); 387 + } 388 + } 389 + 379 390 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, 380 391 struct nf_conn *ct, 381 392 bool tcp, bool bidirectional) ··· 682 671 else 683 672 ctinfo = IP_CT_ESTABLISHED_REPLY; 684 673 674 + nf_conn_act_ct_ext_fill(skb, ct, ctinfo); 675 + tcf_ct_flow_ct_ext_ifidx_update(flow); 685 676 flow_offload_refresh(nf_ft, flow, force_refresh); 686 677 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) { 687 678 /* Process this flow in SW to allow promoting to ASSURED */ ··· 1047 1034 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); 1048 1035 1049 1036 if (!nf_ct_is_confirmed(ct)) 1050 - nf_conn_act_ct_ext_add(ct); 1037 + nf_conn_act_ct_ext_add(skb, ct, ctinfo); 1051 1038 1052 1039 /* This will take care of sending queued events 1053 1040 * even if the connection is already confirmed.
+1
net/sched/act_gate.c
··· 677 677 678 678 module_init(gate_init_module); 679 679 module_exit(gate_cleanup_module); 680 + MODULE_DESCRIPTION("TC gate action"); 680 681 MODULE_LICENSE("GPL v2");
+8 -1
net/sched/cls_api.c
··· 1658 1658 int act_index, 1659 1659 u32 *last_executed_chain) 1660 1660 { 1661 + u32 orig_reason = res->drop_reason; 1661 1662 #ifdef CONFIG_NET_CLS_ACT 1662 1663 const int max_reclassify_loop = 16; 1663 1664 const struct tcf_proto *first_tp; ··· 1713 1712 goto reset; 1714 1713 } 1715 1714 #endif 1716 - if (err >= 0) 1715 + if (err >= 0) { 1716 + /* Policy drop or drop reason is over-written by 1717 + * classifiers with a bogus value(0) */ 1718 + if (err == TC_ACT_SHOT && 1719 + res->drop_reason == SKB_NOT_DROPPED_YET) 1720 + tcf_set_drop_reason(res, orig_reason); 1717 1721 return err; 1722 + } 1718 1723 } 1719 1724 1720 1725 if (unlikely(n)) {
+1
net/sched/cls_basic.c
··· 341 341 342 342 module_init(init_basic) 343 343 module_exit(exit_basic) 344 + MODULE_DESCRIPTION("TC basic classifier"); 344 345 MODULE_LICENSE("GPL");
+1
net/sched/cls_cgroup.c
··· 222 222 223 223 module_init(init_cgroup_cls); 224 224 module_exit(exit_cgroup_cls); 225 + MODULE_DESCRIPTION("TC cgroup classifier"); 225 226 MODULE_LICENSE("GPL");
+1
net/sched/cls_fw.c
··· 446 446 447 447 module_init(init_fw) 448 448 module_exit(exit_fw) 449 + MODULE_DESCRIPTION("SKB mark based TC classifier"); 449 450 MODULE_LICENSE("GPL");
+1
net/sched/cls_route.c
··· 684 684 685 685 module_init(init_route4) 686 686 module_exit(exit_route4) 687 + MODULE_DESCRIPTION("Routing table realm based TC classifier"); 687 688 MODULE_LICENSE("GPL");
+1
net/sched/cls_u32.c
··· 1489 1489 1490 1490 module_init(init_u32) 1491 1491 module_exit(exit_u32) 1492 + MODULE_DESCRIPTION("Universal 32bit based TC Classifier"); 1492 1493 MODULE_LICENSE("GPL");
+1
net/sched/sch_cbs.c
··· 574 574 module_init(cbs_module_init) 575 575 module_exit(cbs_module_exit) 576 576 MODULE_LICENSE("GPL"); 577 + MODULE_DESCRIPTION("Credit Based shaper");
+1
net/sched/sch_choke.c
··· 513 513 module_exit(choke_module_exit) 514 514 515 515 MODULE_LICENSE("GPL"); 516 + MODULE_DESCRIPTION("Choose and keep responsive flows scheduler");
+1
net/sched/sch_drr.c
··· 495 495 module_init(drr_init); 496 496 module_exit(drr_exit); 497 497 MODULE_LICENSE("GPL"); 498 + MODULE_DESCRIPTION("Deficit Round Robin scheduler");
+1
net/sched/sch_etf.c
··· 513 513 module_init(etf_module_init) 514 514 module_exit(etf_module_exit) 515 515 MODULE_LICENSE("GPL"); 516 + MODULE_DESCRIPTION("Earliest TxTime First (ETF) qdisc");
+1
net/sched/sch_ets.c
··· 826 826 module_init(ets_init); 827 827 module_exit(ets_exit); 828 828 MODULE_LICENSE("GPL"); 829 + MODULE_DESCRIPTION("Enhanced Transmission Selection(ETS) scheduler");
+1
net/sched/sch_fifo.c
··· 269 269 return q ? : ERR_PTR(err); 270 270 } 271 271 EXPORT_SYMBOL(fifo_create_dflt); 272 + MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");
+2 -8
net/sched/sch_fq.c
··· 919 919 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 }, 920 920 [TCA_FQ_HORIZON] = { .type = NLA_U32 }, 921 921 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 }, 922 - [TCA_FQ_PRIOMAP] = { 923 - .type = NLA_BINARY, 924 - .len = sizeof(struct tc_prio_qopt), 925 - }, 926 - [TCA_FQ_WEIGHTS] = { 927 - .type = NLA_BINARY, 928 - .len = FQ_BANDS * sizeof(s32), 929 - }, 922 + [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)), 923 + [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)), 930 924 }; 931 925 932 926 /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
+1
net/sched/sch_gred.c
··· 945 945 module_exit(gred_module_exit) 946 946 947 947 MODULE_LICENSE("GPL"); 948 + MODULE_DESCRIPTION("Generic Random Early Detection qdisc");
+1
net/sched/sch_hfsc.c
··· 1693 1693 } 1694 1694 1695 1695 MODULE_LICENSE("GPL"); 1696 + MODULE_DESCRIPTION("Hierarchical Fair Service Curve scheduler"); 1696 1697 module_init(hfsc_init); 1697 1698 module_exit(hfsc_cleanup);
+1
net/sched/sch_htb.c
··· 2179 2179 module_init(htb_module_init) 2180 2180 module_exit(htb_module_exit) 2181 2181 MODULE_LICENSE("GPL"); 2182 + MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler");
+1
net/sched/sch_ingress.c
··· 370 370 371 371 MODULE_ALIAS("sch_clsact"); 372 372 MODULE_LICENSE("GPL"); 373 + MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
+1
net/sched/sch_mqprio.c
··· 789 789 module_exit(mqprio_module_exit); 790 790 791 791 MODULE_LICENSE("GPL"); 792 + MODULE_DESCRIPTION("Classful multiqueue prio qdisc");
+1
net/sched/sch_mqprio_lib.c
··· 129 129 EXPORT_SYMBOL_GPL(mqprio_fp_to_offload); 130 130 131 131 MODULE_LICENSE("GPL"); 132 + MODULE_DESCRIPTION("Shared mqprio qdisc code currently between taprio and mqprio");
+1
net/sched/sch_multiq.c
··· 410 410 module_exit(multiq_module_exit) 411 411 412 412 MODULE_LICENSE("GPL"); 413 + MODULE_DESCRIPTION("Multi queue to hardware queue mapping qdisc");
+1
net/sched/sch_netem.c
··· 1307 1307 module_init(netem_module_init) 1308 1308 module_exit(netem_module_exit) 1309 1309 MODULE_LICENSE("GPL"); 1310 + MODULE_DESCRIPTION("Network characteristics emulator qdisc");
+1
net/sched/sch_plug.c
··· 226 226 module_init(plug_module_init) 227 227 module_exit(plug_module_exit) 228 228 MODULE_LICENSE("GPL"); 229 + MODULE_DESCRIPTION("Qdisc to plug and unplug traffic via netlink control");
+1
net/sched/sch_prio.c
··· 433 433 module_exit(prio_module_exit) 434 434 435 435 MODULE_LICENSE("GPL"); 436 + MODULE_DESCRIPTION("Simple 3-band priority qdisc");
+1
net/sched/sch_qfq.c
··· 1535 1535 module_init(qfq_init); 1536 1536 module_exit(qfq_exit); 1537 1537 MODULE_LICENSE("GPL"); 1538 + MODULE_DESCRIPTION("Quick Fair Queueing Plus qdisc");
+1
net/sched/sch_red.c
··· 563 563 module_exit(red_module_exit) 564 564 565 565 MODULE_LICENSE("GPL"); 566 + MODULE_DESCRIPTION("Random Early Detection qdisc");
+1
net/sched/sch_sfq.c
··· 937 937 module_init(sfq_module_init) 938 938 module_exit(sfq_module_exit) 939 939 MODULE_LICENSE("GPL"); 940 + MODULE_DESCRIPTION("Stochastic Fairness qdisc");
+1
net/sched/sch_skbprio.c
··· 307 307 module_exit(skbprio_module_exit) 308 308 309 309 MODULE_LICENSE("GPL"); 310 + MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
+1
net/sched/sch_taprio.c
··· 2572 2572 module_init(taprio_module_init); 2573 2573 module_exit(taprio_module_exit); 2574 2574 MODULE_LICENSE("GPL"); 2575 + MODULE_DESCRIPTION("Time Aware Priority qdisc");
+1
net/sched/sch_tbf.c
··· 621 621 module_init(tbf_module_init) 622 622 module_exit(tbf_module_exit) 623 623 MODULE_LICENSE("GPL"); 624 + MODULE_DESCRIPTION("Token Bucket Filter qdisc");
+1
net/sched/sch_teql.c
··· 523 523 module_exit(teql_exit); 524 524 525 525 MODULE_LICENSE("GPL"); 526 + MODULE_DESCRIPTION("True (or trivial) link equalizer qdisc");
+2 -2
net/smc/af_smc.c
··· 275 275 276 276 if (!smc->use_fallback) { 277 277 rc = smc_close_active(smc); 278 - sock_set_flag(sk, SOCK_DEAD); 278 + smc_sock_set_flag(sk, SOCK_DEAD); 279 279 sk->sk_shutdown |= SHUTDOWN_MASK; 280 280 } else { 281 281 if (sk->sk_state != SMC_CLOSED) { ··· 1743 1743 if (new_clcsock) 1744 1744 sock_release(new_clcsock); 1745 1745 new_sk->sk_state = SMC_CLOSED; 1746 - sock_set_flag(new_sk, SOCK_DEAD); 1746 + smc_sock_set_flag(new_sk, SOCK_DEAD); 1747 1747 sock_put(new_sk); /* final */ 1748 1748 *new_smc = NULL; 1749 1749 goto out;
+5
net/smc/smc.h
··· 377 377 int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info); 378 378 int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info); 379 379 380 + static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag) 381 + { 382 + set_bit(flag, &sk->sk_flags); 383 + } 384 + 380 385 #endif /* __SMC_H */
+5 -6
net/smc/smc_cdc.c
··· 28 28 { 29 29 struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; 30 30 struct smc_connection *conn = cdcpend->conn; 31 + struct smc_buf_desc *sndbuf_desc; 31 32 struct smc_sock *smc; 32 33 int diff; 33 34 35 + sndbuf_desc = conn->sndbuf_desc; 34 36 smc = container_of(conn, struct smc_sock, conn); 35 37 bh_lock_sock(&smc->sk); 36 - if (!wc_status) { 37 - diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len, 38 + if (!wc_status && sndbuf_desc) { 39 + diff = smc_curs_diff(sndbuf_desc->len, 38 40 &cdcpend->conn->tx_curs_fin, 39 41 &cdcpend->cursor); 40 42 /* sndbuf_space is decreased in smc_sendmsg */ ··· 115 113 struct smc_link *link = conn->lnk; 116 114 union smc_host_cursor cfed; 117 115 int rc; 118 - 119 - if (unlikely(!READ_ONCE(conn->sndbuf_desc))) 120 - return -ENOBUFS; 121 116 122 117 smc_cdc_add_pending_send(conn, pend); 123 118 ··· 384 385 smc->sk.sk_shutdown |= RCV_SHUTDOWN; 385 386 if (smc->clcsock && smc->clcsock->sk) 386 387 smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; 387 - sock_set_flag(&smc->sk, SOCK_DONE); 388 + smc_sock_set_flag(&smc->sk, SOCK_DONE); 388 389 sock_hold(&smc->sk); /* sock_put in close_work */ 389 390 if (!queue_work(smc_close_wq, &conn->close_work)) 390 391 sock_put(&smc->sk);
+3 -2
net/smc/smc_close.c
··· 116 116 struct sock *sk = &smc->sk; 117 117 118 118 release_sock(sk); 119 - cancel_work_sync(&smc->conn.close_work); 119 + if (cancel_work_sync(&smc->conn.close_work)) 120 + sock_put(sk); 120 121 cancel_delayed_work_sync(&smc->conn.tx_work); 121 122 lock_sock(sk); 122 123 } ··· 174 173 break; 175 174 } 176 175 177 - sock_set_flag(sk, SOCK_DEAD); 176 + smc_sock_set_flag(sk, SOCK_DEAD); 178 177 sk->sk_state_change(sk); 179 178 180 179 if (release_clcsock) {
+2 -6
net/socket.c
··· 1685 1685 * Therefore, __weak is needed to ensure that the call is still 1686 1686 * emitted, by telling the compiler that we don't know what the 1687 1687 * function might eventually be. 1688 - * 1689 - * __diag_* below are needed to dismiss the missing prototype warning. 1690 1688 */ 1691 1689 1692 - __diag_push(); 1693 - __diag_ignore_all("-Wmissing-prototypes", 1694 - "A fmod_ret entry point for BPF programs"); 1690 + __bpf_hook_start(); 1695 1691 1696 1692 __weak noinline int update_socket_protocol(int family, int type, int protocol) 1697 1693 { 1698 1694 return protocol; 1699 1695 } 1700 1696 1701 - __diag_pop(); 1697 + __bpf_hook_end(); 1702 1698 1703 1699 int __sys_socket(int family, int type, int protocol) 1704 1700 {
+2 -2
net/tipc/netlink.c
··· 88 88 89 89 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 90 90 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, 91 - [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING, 91 + [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING, 92 92 .len = TIPC_MAX_LINK_NAME }, 93 93 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, 94 94 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, ··· 125 125 126 126 const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = { 127 127 [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC }, 128 - [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING, 128 + [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING, 129 129 .len = TIPC_MAX_BEARER_NAME }, 130 130 [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED }, 131 131 [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+13 -5
net/vmw_vsock/virtio_transport_common.c
··· 130 130 hdr->dst_port = cpu_to_le32(dst_port); 131 131 hdr->flags = cpu_to_le32(info->flags); 132 132 hdr->len = cpu_to_le32(payload_len); 133 + hdr->buf_alloc = cpu_to_le32(0); 134 + hdr->fwd_cnt = cpu_to_le32(0); 133 135 } 134 136 135 137 static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb, ··· 1371 1369 vsk->peer_shutdown |= RCV_SHUTDOWN; 1372 1370 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 1373 1371 vsk->peer_shutdown |= SEND_SHUTDOWN; 1374 - if (vsk->peer_shutdown == SHUTDOWN_MASK && 1375 - vsock_stream_has_data(vsk) <= 0 && 1376 - !sock_flag(sk, SOCK_DONE)) { 1377 - (void)virtio_transport_reset(vsk, NULL); 1378 - virtio_transport_do_close(vsk, true); 1372 + if (vsk->peer_shutdown == SHUTDOWN_MASK) { 1373 + if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) { 1374 + (void)virtio_transport_reset(vsk, NULL); 1375 + virtio_transport_do_close(vsk, true); 1376 + } 1377 + /* Remove this socket anyway because the remote peer sent 1378 + * the shutdown. This way a new connection will succeed 1379 + * if the remote peer uses the same source port, 1380 + * even if the old socket is still unreleased, but now disconnected. 1381 + */ 1382 + vsock_remove_sock(vsk); 1379 1383 } 1380 1384 if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) 1381 1385 sk->sk_state_change(sk);
+2 -4
net/xfrm/xfrm_interface_bpf.c
··· 27 27 int link; 28 28 }; 29 29 30 - __diag_push(); 31 - __diag_ignore_all("-Wmissing-prototypes", 32 - "Global functions as their definitions will be in xfrm_interface BTF"); 30 + __bpf_kfunc_start_defs(); 33 31 34 32 /* bpf_skb_get_xfrm_info - Get XFRM metadata 35 33 * ··· 91 93 return 0; 92 94 } 93 95 94 - __diag_pop() 96 + __bpf_kfunc_end_defs(); 95 97 96 98 BTF_SET8_START(xfrm_ifc_kfunc_set) 97 99 BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
+114 -6
tools/net/ynl/generated/nfsd-user.c
··· 50 50 /* Common nested types */ 51 51 /* ============== NFSD_CMD_RPC_STATUS_GET ============== */ 52 52 /* NFSD_CMD_RPC_STATUS_GET - dump */ 53 - void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp) 53 + int nfsd_rpc_status_get_rsp_dump_parse(const struct nlmsghdr *nlh, void *data) 54 54 { 55 - struct nfsd_rpc_status_get_list *next = rsp; 55 + struct nfsd_rpc_status_get_rsp_dump *dst; 56 + struct ynl_parse_arg *yarg = data; 57 + unsigned int n_compound_ops = 0; 58 + const struct nlattr *attr; 59 + int i; 60 + 61 + dst = yarg->data; 62 + 63 + if (dst->compound_ops) 64 + return ynl_error_parse(yarg, "attribute already present (rpc-status.compound-ops)"); 65 + 66 + mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { 67 + unsigned int type = mnl_attr_get_type(attr); 68 + 69 + if (type == NFSD_A_RPC_STATUS_XID) { 70 + if (ynl_attr_validate(yarg, attr)) 71 + return MNL_CB_ERROR; 72 + dst->_present.xid = 1; 73 + dst->xid = mnl_attr_get_u32(attr); 74 + } else if (type == NFSD_A_RPC_STATUS_FLAGS) { 75 + if (ynl_attr_validate(yarg, attr)) 76 + return MNL_CB_ERROR; 77 + dst->_present.flags = 1; 78 + dst->flags = mnl_attr_get_u32(attr); 79 + } else if (type == NFSD_A_RPC_STATUS_PROG) { 80 + if (ynl_attr_validate(yarg, attr)) 81 + return MNL_CB_ERROR; 82 + dst->_present.prog = 1; 83 + dst->prog = mnl_attr_get_u32(attr); 84 + } else if (type == NFSD_A_RPC_STATUS_VERSION) { 85 + if (ynl_attr_validate(yarg, attr)) 86 + return MNL_CB_ERROR; 87 + dst->_present.version = 1; 88 + dst->version = mnl_attr_get_u8(attr); 89 + } else if (type == NFSD_A_RPC_STATUS_PROC) { 90 + if (ynl_attr_validate(yarg, attr)) 91 + return MNL_CB_ERROR; 92 + dst->_present.proc = 1; 93 + dst->proc = mnl_attr_get_u32(attr); 94 + } else if (type == NFSD_A_RPC_STATUS_SERVICE_TIME) { 95 + if (ynl_attr_validate(yarg, attr)) 96 + return MNL_CB_ERROR; 97 + dst->_present.service_time = 1; 98 + dst->service_time = mnl_attr_get_u64(attr); 99 + } else if (type == NFSD_A_RPC_STATUS_SADDR4) { 100 + if (ynl_attr_validate(yarg, attr)) 101 + return MNL_CB_ERROR; 102 + dst->_present.saddr4 = 1; 103 + dst->saddr4 = mnl_attr_get_u32(attr); 104 + } else if (type == NFSD_A_RPC_STATUS_DADDR4) { 105 + if (ynl_attr_validate(yarg, attr)) 106 + return MNL_CB_ERROR; 107 + dst->_present.daddr4 = 1; 108 + dst->daddr4 = mnl_attr_get_u32(attr); 109 + } else if (type == NFSD_A_RPC_STATUS_SADDR6) { 110 + unsigned int len; 111 + 112 + if (ynl_attr_validate(yarg, attr)) 113 + return MNL_CB_ERROR; 114 + 115 + len = mnl_attr_get_payload_len(attr); 116 + dst->_present.saddr6_len = len; 117 + dst->saddr6 = malloc(len); 118 + memcpy(dst->saddr6, mnl_attr_get_payload(attr), len); 119 + } else if (type == NFSD_A_RPC_STATUS_DADDR6) { 120 + unsigned int len; 121 + 122 + if (ynl_attr_validate(yarg, attr)) 123 + return MNL_CB_ERROR; 124 + 125 + len = mnl_attr_get_payload_len(attr); 126 + dst->_present.daddr6_len = len; 127 + dst->daddr6 = malloc(len); 128 + memcpy(dst->daddr6, mnl_attr_get_payload(attr), len); 129 + } else if (type == NFSD_A_RPC_STATUS_SPORT) { 130 + if (ynl_attr_validate(yarg, attr)) 131 + return MNL_CB_ERROR; 132 + dst->_present.sport = 1; 133 + dst->sport = mnl_attr_get_u16(attr); 134 + } else if (type == NFSD_A_RPC_STATUS_DPORT) { 135 + if (ynl_attr_validate(yarg, attr)) 136 + return MNL_CB_ERROR; 137 + dst->_present.dport = 1; 138 + dst->dport = mnl_attr_get_u16(attr); 139 + } else if (type == NFSD_A_RPC_STATUS_COMPOUND_OPS) { 140 + n_compound_ops++; 141 + } 142 + } 143 + 144 + if (n_compound_ops) { 145 + dst->compound_ops = calloc(n_compound_ops, sizeof(*dst->compound_ops)); 146 + dst->n_compound_ops = n_compound_ops; 147 + i = 0; 148 + mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { 149 + if (mnl_attr_get_type(attr) == NFSD_A_RPC_STATUS_COMPOUND_OPS) { 150 + dst->compound_ops[i] = mnl_attr_get_u32(attr); 151 + i++; 152 + } 153 + } 154 + } 155 + 156 + return MNL_CB_OK; 157 + } 158 + 159 + void 160 + nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp) 161 + { 162 + struct nfsd_rpc_status_get_rsp_list *next = rsp; 56 163 57 164 while ((void *)next != YNL_LIST_END) { 58 165 rsp = next; ··· 172 65 } 173 66 } 174 67 175 - struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys) 68 + struct nfsd_rpc_status_get_rsp_list * 69 + nfsd_rpc_status_get_dump(struct ynl_sock *ys) 176 70 { 177 71 struct ynl_dump_state yds = {}; 178 72 struct nlmsghdr *nlh; 179 73 int err; 180 74 181 75 yds.ys = ys; 182 - yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_list); 183 - yds.cb = nfsd_rpc_status_get_rsp_parse; 76 + yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_rsp_list); 77 + yds.cb = nfsd_rpc_status_get_rsp_dump_parse; 184 78 yds.rsp_cmd = NFSD_CMD_RPC_STATUS_GET; 185 79 yds.rsp_policy = &nfsd_rpc_status_nest; 186 80 ··· 194 86 return yds.first; 195 87 196 88 free_list: 197 - nfsd_rpc_status_get_list_free(yds.first); 89 + nfsd_rpc_status_get_rsp_list_free(yds.first); 198 90 return NULL; 199 91 } 200 92
+39 -5
tools/net/ynl/generated/nfsd-user.h
··· 21 21 /* Common nested types */ 22 22 /* ============== NFSD_CMD_RPC_STATUS_GET ============== */ 23 23 /* NFSD_CMD_RPC_STATUS_GET - dump */ 24 - struct nfsd_rpc_status_get_list { 25 - struct nfsd_rpc_status_get_list *next; 26 - struct nfsd_rpc_status_get_rsp obj __attribute__ ((aligned (8))); 24 + struct nfsd_rpc_status_get_rsp_dump { 25 + struct { 26 + __u32 xid:1; 27 + __u32 flags:1; 28 + __u32 prog:1; 29 + __u32 version:1; 30 + __u32 proc:1; 31 + __u32 service_time:1; 32 + __u32 saddr4:1; 33 + __u32 daddr4:1; 34 + __u32 saddr6_len; 35 + __u32 daddr6_len; 36 + __u32 sport:1; 37 + __u32 dport:1; 38 + } _present; 39 + 40 + __u32 xid /* big-endian */; 41 + __u32 flags; 42 + __u32 prog; 43 + __u8 version; 44 + __u32 proc; 45 + __s64 service_time; 46 + __u32 saddr4 /* big-endian */; 47 + __u32 daddr4 /* big-endian */; 48 + void *saddr6; 49 + void *daddr6; 50 + __u16 sport /* big-endian */; 51 + __u16 dport /* big-endian */; 52 + unsigned int n_compound_ops; 53 + __u32 *compound_ops; 27 54 }; 28 55 29 - void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp); 56 + struct nfsd_rpc_status_get_rsp_list { 57 + struct nfsd_rpc_status_get_rsp_list *next; 58 + struct nfsd_rpc_status_get_rsp_dump obj __attribute__((aligned(8))); 59 + }; 30 60 31 - struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys); 61 + void 62 + nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp); 63 + 64 + struct nfsd_rpc_status_get_rsp_list * 65 + nfsd_rpc_status_get_dump(struct ynl_sock *ys); 32 66 33 67 #endif /* _LINUX_NFSD_GEN_H */
+6 -1
tools/net/ynl/ynl-gen-c.py
··· 3 3 4 4 import argparse 5 5 import collections 6 + import filecmp 6 7 import os 7 8 import re 8 9 import shutil ··· 1169 1168 if out_file is None: 1170 1169 self._out = os.sys.stdout 1171 1170 else: 1172 - self._out = tempfile.TemporaryFile('w+') 1171 + self._out = tempfile.NamedTemporaryFile('w+') 1173 1172 self._out_file = out_file 1174 1173 1175 1174 def __del__(self): ··· 1177 1176 1178 1177 def close_out_file(self): 1179 1178 if self._out == os.sys.stdout: 1179 + return 1180 + # Avoid modifying the file if contents didn't change 1181 + self._out.flush() 1182 + if os.path.isfile(self._out_file) and filecmp.cmp(self._out.name, self._out_file, shallow=False): 1180 1183 return 1181 1184 with open(self._out_file, 'w+') as out_file: 1182 1185 self._out.seek(0)
+2 -4
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 39 39 int b; 40 40 }; 41 41 42 - __diag_push(); 43 - __diag_ignore_all("-Wmissing-prototypes", 44 - "Global functions as their definitions will be in bpf_testmod.ko BTF"); 42 + __bpf_hook_start(); 45 43 46 44 noinline int 47 45 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { ··· 333 335 } 334 336 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); 335 337 336 - __diag_pop(); 338 + __bpf_hook_end(); 337 339 338 340 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { 339 341 .attr = { .name = "bpf_testmod", .mode = 0666, },
+5 -15
tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
··· 326 326 327 327 static int create_hash(void) 328 328 { 329 - struct bpf_map_create_opts map_opts = { 330 - .sz = sizeof(map_opts), 331 - .map_flags = BPF_F_NO_PREALLOC, 332 - }; 329 + LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC); 333 330 334 331 return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts); 335 332 } 336 333 337 334 static int create_percpu_hash(void) 338 335 { 339 - struct bpf_map_create_opts map_opts = { 340 - .sz = sizeof(map_opts), 341 - .map_flags = BPF_F_NO_PREALLOC, 342 - }; 336 + LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC); 343 337 344 338 return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts); 345 339 } ··· 350 356 351 357 static int create_lru_hash(__u32 type, __u32 map_flags) 352 358 { 353 - struct bpf_map_create_opts map_opts = { 354 - .sz = sizeof(map_opts), 355 - .map_flags = map_flags, 356 - }; 359 + LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = map_flags); 357 360 358 361 return map_create(type, "lru_hash", &map_opts); 359 362 } 360 363 361 364 static int create_hash_of_maps(void) 362 365 { 363 - struct bpf_map_create_opts map_opts = { 364 - .sz = sizeof(map_opts), 366 + LIBBPF_OPTS(bpf_map_create_opts, map_opts, 365 367 .map_flags = BPF_F_NO_PREALLOC, 366 368 .inner_map_fd = create_small_hash(), 367 - }; 369 + ); 368 370 int ret; 369 371 370 372 ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
+33
tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
··· 4 4 #include <test_progs.h> 5 5 #include <bpf/libbpf.h> 6 6 #include <bpf/btf.h> 7 + #include "iters_css_task.skel.h" 7 8 #include "cgroup_iter.skel.h" 8 9 #include "cgroup_helpers.h" 9 10 ··· 264 263 close(cgrp_fd); 265 264 } 266 265 266 + static void test_walk_self_only_css_task(void) 267 + { 268 + struct iters_css_task *skel; 269 + int err; 270 + 271 + skel = iters_css_task__open(); 272 + if (!ASSERT_OK_PTR(skel, "skel_open")) 273 + return; 274 + 275 + bpf_program__set_autoload(skel->progs.cgroup_id_printer, true); 276 + 277 + err = iters_css_task__load(skel); 278 + if (!ASSERT_OK(err, "skel_load")) 279 + goto cleanup; 280 + 281 + err = join_cgroup(cg_path[CHILD2]); 282 + if (!ASSERT_OK(err, "join_cgroup")) 283 + goto cleanup; 284 + 285 + skel->bss->target_pid = getpid(); 286 + snprintf(expected_output, sizeof(expected_output), 287 + PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]); 288 + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2], 289 + BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task"); 290 + ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt"); 291 + cleanup: 292 + iters_css_task__destroy(skel); 293 + } 294 + 267 295 void test_cgroup_iter(void) 268 296 { 269 297 struct cgroup_iter *skel = NULL; ··· 323 293 test_walk_self_only(skel); 324 294 if (test__start_subtest("cgroup_iter__dead_self_only")) 325 295 test_walk_dead_self_only(skel); 296 + if (test__start_subtest("cgroup_iter__self_only_css_task")) 297 + test_walk_self_only_css_task(); 298 + 326 299 out: 327 300 cgroup_iter__destroy(skel); 328 301 cleanup_cgroups();
+1
tools/testing/selftests/bpf/prog_tests/iters.c
··· 294 294 RUN_TESTS(iters_state_safety); 295 295 RUN_TESTS(iters_looping); 296 296 RUN_TESTS(iters); 297 + RUN_TESTS(iters_css_task); 297 298 298 299 if (env.has_testmod) 299 300 RUN_TESTS(iters_testmod_seq);
+8 -3
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
··· 8 8 #include <sys/types.h> 9 9 #include <test_progs.h> 10 10 11 - #define TDIR "/sys/kernel/debug" 11 + /* TDIR must be in a location we can create a directory in. */ 12 + #define TDIR "/tmp/test_bpffs_testdir" 12 13 13 14 static int read_iter(char *file) 14 15 { ··· 44 43 if (!ASSERT_OK(err, "mount /")) 45 44 goto out; 46 45 47 - err = umount(TDIR); 48 - if (!ASSERT_OK(err, "umount " TDIR)) 46 + err = mkdir(TDIR, 0777); 47 + /* If the directory already exists we can carry on. It may be left over 48 + * from a previous run. 49 + */ 50 + if ((err && errno != EEXIST) && !ASSERT_OK(err, "mkdir " TDIR)) 49 51 goto out; 50 52 51 53 err = mount("none", TDIR, "tmpfs", 0, NULL); ··· 142 138 rmdir(TDIR "/fs1"); 143 139 rmdir(TDIR "/fs2"); 144 140 umount(TDIR); 141 + rmdir(TDIR); 145 142 exit(err); 146 143 } 147 144
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 46 46 #include "verifier_movsx.skel.h" 47 47 #include "verifier_netfilter_ctx.skel.h" 48 48 #include "verifier_netfilter_retcode.skel.h" 49 + #include "verifier_precision.skel.h" 49 50 #include "verifier_prevent_map_lookup.skel.h" 50 51 #include "verifier_raw_stack.skel.h" 51 52 #include "verifier_raw_tp_writable.skel.h" ··· 154 153 void test_verifier_movsx(void) { RUN(verifier_movsx); } 155 154 void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } 156 155 void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } 156 + void test_verifier_precision(void) { RUN(verifier_precision); } 157 157 void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } 158 158 void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } 159 159 void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
+55
tools/testing/selftests/bpf/progs/iters_css_task.c
··· 10 10 11 11 char _license[] SEC("license") = "GPL"; 12 12 13 + struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; 13 14 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; 14 15 void bpf_cgroup_release(struct cgroup *p) __ksym; 15 16 ··· 45 44 bpf_cgroup_release(cgrp); 46 45 47 46 return -EPERM; 47 + } 48 + 49 + static inline u64 cgroup_id(struct cgroup *cgrp) 50 + { 51 + return cgrp->kn->id; 52 + } 53 + 54 + SEC("?iter/cgroup") 55 + int cgroup_id_printer(struct bpf_iter__cgroup *ctx) 56 + { 57 + struct seq_file *seq = ctx->meta->seq; 58 + struct cgroup *cgrp = ctx->cgroup; 59 + struct cgroup_subsys_state *css; 60 + struct task_struct *task; 61 + 62 + /* epilogue */ 63 + if (cgrp == NULL) { 64 + BPF_SEQ_PRINTF(seq, "epilogue\n"); 65 + return 0; 66 + } 67 + 68 + /* prologue */ 69 + if (ctx->meta->seq_num == 0) 70 + BPF_SEQ_PRINTF(seq, "prologue\n"); 71 + 72 + BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp)); 73 + 74 + css = &cgrp->self; 75 + css_task_cnt = 0; 76 + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { 77 + if (task->pid == target_pid) 78 + css_task_cnt++; 79 + } 80 + 81 + return 0; 82 + } 83 + 84 + SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") 85 + int BPF_PROG(iter_css_task_for_each_sleep) 86 + { 87 + u64 cgrp_id = bpf_get_current_cgroup_id(); 88 + struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id); 89 + struct cgroup_subsys_state *css; 90 + struct task_struct *task; 91 + 92 + if (cgrp == NULL) 93 + return 0; 94 + css = &cgrp->self; 95 + 96 + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { 97 + 98 + } 99 + bpf_cgroup_release(cgrp); 100 + return 0; 48 101 }
+2 -2
tools/testing/selftests/bpf/progs/iters_task_failure.c
··· 84 84 return 0; 85 85 } 86 86 87 - SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") 88 - __failure __msg("css_task_iter is only allowed in bpf_lsm and bpf iter-s") 87 + SEC("?fentry/" SYS_PREFIX "sys_getpgid") 88 + __failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs") 89 89 int BPF_PROG(iter_css_task_for_each) 90 90 { 91 91 u64 cg_id = bpf_get_current_cgroup_id();
+93
tools/testing/selftests/bpf/progs/verifier_precision.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2023 SUSE LLC */ 3 + #include <linux/bpf.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include "bpf_misc.h" 6 + 7 + SEC("?raw_tp") 8 + __success __log_level(2) 9 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 10 + __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") 11 + __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") 12 + __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") 13 + __naked int bpf_neg(void) 14 + { 15 + asm volatile ( 16 + "r2 = 8;" 17 + "r2 = -r2;" 18 + "if r2 != -8 goto 1f;" 19 + "r1 = r10;" 20 + "r1 += r2;" 21 + "1:" 22 + "r0 = 0;" 23 + "exit;" 24 + ::: __clobber_all); 25 + } 26 + 27 + SEC("?raw_tp") 28 + __success __log_level(2) 29 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 30 + __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 31 + __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") 32 + __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") 33 + __naked int bpf_end_to_le(void) 34 + { 35 + asm volatile ( 36 + "r2 = 0;" 37 + "r2 = le16 r2;" 38 + "if r2 != 0 goto 1f;" 39 + "r1 = r10;" 40 + "r1 += r2;" 41 + "1:" 42 + "r0 = 0;" 43 + "exit;" 44 + ::: __clobber_all); 45 + } 46 + 47 + 48 + SEC("?raw_tp") 49 + __success __log_level(2) 50 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 51 + __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 52 + __msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2") 53 + __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") 54 + __naked int bpf_end_to_be(void) 55 + { 56 + asm volatile ( 57 + "r2 = 0;" 58 + "r2 = be16 r2;" 59 + "if r2 != 0 goto 1f;" 60 + "r1 = r10;" 61 + "r1 += r2;" 62 + "1:" 63 + "r0 = 0;" 64 + "exit;" 65 + ::: __clobber_all); 66 + } 67 + 68 + #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ 69 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ 70 + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ 71 + __clang_major__ >= 18 72 + 73 + SEC("?raw_tp") 74 + __success __log_level(2) 75 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 76 + __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 77 + __msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2") 78 + __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") 79 + __naked int bpf_end_bswap(void) 80 + { 81 + asm volatile ( 82 + "r2 = 0;" 83 + "r2 = bswap16 r2;" 84 + "if r2 != 0 goto 1f;" 85 + "r1 = r10;" 86 + "r1 += r2;" 87 + "1:" 88 + "r0 = 0;" 89 + "exit;" 90 + ::: __clobber_all); 91 + } 92 + 93 + #endif /* v4 instruction */
+32
tools/testing/selftests/bpf/verifier/bpf_st_mem.c
··· 65 65 .expected_attach_type = BPF_SK_LOOKUP, 66 66 .runs = -1, 67 67 }, 68 + { 69 + "BPF_ST_MEM stack imm sign", 70 + /* Check if verifier correctly reasons about sign of an 71 + * immediate spilled to stack by BPF_ST instruction. 72 + * 73 + * fp[-8] = -44; 74 + * r0 = fp[-8]; 75 + * if r0 s< 0 goto ret0; 76 + * r0 = -1; 77 + * exit; 78 + * ret0: 79 + * r0 = 0; 80 + * exit; 81 + */ 82 + .insns = { 83 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, -44), 84 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 85 + BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2), 86 + BPF_MOV64_IMM(BPF_REG_0, -1), 87 + BPF_EXIT_INSN(), 88 + BPF_MOV64_IMM(BPF_REG_0, 0), 89 + BPF_EXIT_INSN(), 90 + }, 91 + /* Use prog type that requires return value in range [0, 1] */ 92 + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, 93 + .expected_attach_type = BPF_SK_LOOKUP, 94 + .result = VERBOSE_ACCEPT, 95 + .runs = -1, 96 + .errstr = "0: (7a) *(u64 *)(r10 -8) = -44 ; R10=fp0 fp-8_w=-44\ 97 + 2: (c5) if r0 s< 0x0 goto pc+2\ 98 + R0_w=-44", 99 + },
+1 -1
tools/testing/selftests/bpf/xdp_hw_metadata.c
··· 430 430 431 431 static void read_args(int argc, char *argv[]) 432 432 { 433 - char opt; 433 + int opt; 434 434 435 435 while ((opt = getopt(argc, argv, "mh")) != -1) { 436 436 switch (opt) {
+1 -1
tools/testing/selftests/net/pmtu.sh
··· 2048 2048 case $ret in 2049 2049 0) 2050 2050 all_skipped=false 2051 - [ $exitcode=$ksft_skip ] && exitcode=0 2051 + [ $exitcode -eq $ksft_skip ] && exitcode=0 2052 2052 ;; 2053 2053 $ksft_skip) 2054 2054 [ $all_skipped = true ] && exitcode=$ksft_skip
+75 -12
tools/testing/vsock/util.c
··· 85 85 close(epollfd); 86 86 } 87 87 88 + /* Bind to <bind_port>, connect to <cid, port> and return the file descriptor. */ 89 + int vsock_bind_connect(unsigned int cid, unsigned int port, unsigned int bind_port, int type) 90 + { 91 + struct sockaddr_vm sa_client = { 92 + .svm_family = AF_VSOCK, 93 + .svm_cid = VMADDR_CID_ANY, 94 + .svm_port = bind_port, 95 + }; 96 + struct sockaddr_vm sa_server = { 97 + .svm_family = AF_VSOCK, 98 + .svm_cid = cid, 99 + .svm_port = port, 100 + }; 101 + 102 + int client_fd, ret; 103 + 104 + client_fd = socket(AF_VSOCK, type, 0); 105 + if (client_fd < 0) { 106 + perror("socket"); 107 + exit(EXIT_FAILURE); 108 + } 109 + 110 + if (bind(client_fd, (struct sockaddr *)&sa_client, sizeof(sa_client))) { 111 + perror("bind"); 112 + exit(EXIT_FAILURE); 113 + } 114 + 115 + timeout_begin(TIMEOUT); 116 + do { 117 + ret = connect(client_fd, (struct sockaddr *)&sa_server, sizeof(sa_server)); 118 + timeout_check("connect"); 119 + } while (ret < 0 && errno == EINTR); 120 + timeout_end(); 121 + 122 + if (ret < 0) { 123 + perror("connect"); 124 + exit(EXIT_FAILURE); 125 + } 126 + 127 + return client_fd; 128 + } 129 + 88 130 /* Connect to <cid, port> and return the file descriptor. */ 89 131 static int vsock_connect(unsigned int cid, unsigned int port, int type) 90 132 { ··· 146 104 control_expectln("LISTENING"); 147 105 148 106 fd = socket(AF_VSOCK, type, 0); 107 + if (fd < 0) { 108 + perror("socket"); 109 + exit(EXIT_FAILURE); 110 + } 149 111 150 112 timeout_begin(TIMEOUT); 151 113 do { ··· 178 132 return vsock_connect(cid, port, SOCK_SEQPACKET); 179 133 } 180 134 181 - /* Listen on <cid, port> and return the first incoming connection. The remote 182 - * address is stored to clientaddrp. clientaddrp may be NULL. 183 - */ 184 - static int vsock_accept(unsigned int cid, unsigned int port, 185 - struct sockaddr_vm *clientaddrp, int type) 135 + /* Listen on <cid, port> and return the file descriptor. */ 136 + static int vsock_listen(unsigned int cid, unsigned int port, int type) 186 137 { 187 138 union { 188 139 struct sockaddr sa; ··· 191 148 .svm_cid = cid, 192 149 }, 193 150 }; 194 - union { 195 - struct sockaddr sa; 196 - struct sockaddr_vm svm; 197 - } clientaddr; 198 - socklen_t clientaddr_len = sizeof(clientaddr.svm); 199 151 int fd; 200 - int client_fd; 201 - int old_errno; 202 152 203 153 fd = socket(AF_VSOCK, type, 0); 154 + if (fd < 0) { 155 + perror("socket"); 156 + exit(EXIT_FAILURE); 157 + } 204 158 205 159 if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) { 206 160 perror("bind"); ··· 208 168 perror("listen"); 209 169 exit(EXIT_FAILURE); 210 170 } 171 + 172 + return fd; 173 + } 174 + 175 + /* Listen on <cid, port> and return the first incoming connection. The remote 176 + * address is stored to clientaddrp. clientaddrp may be NULL. 177 + */ 178 + static int vsock_accept(unsigned int cid, unsigned int port, 179 + struct sockaddr_vm *clientaddrp, int type) 180 + { 181 + union { 182 + struct sockaddr sa; 183 + struct sockaddr_vm svm; 184 + } clientaddr; 185 + socklen_t clientaddr_len = sizeof(clientaddr.svm); 186 + int fd, client_fd, old_errno; 187 + 188 + fd = vsock_listen(cid, port, type); 211 189 212 190 control_writeln("LISTENING"); 213 191 ··· 263 205 struct sockaddr_vm *clientaddrp) 264 206 { 265 207 return vsock_accept(cid, port, clientaddrp, SOCK_STREAM); 208 + } 209 + 210 + int vsock_stream_listen(unsigned int cid, unsigned int port) 211 + { 212 + return vsock_listen(cid, port, SOCK_STREAM); 266 213 } 267 214 268 215 int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
+3
tools/testing/vsock/util.h
··· 36 36 void init_signals(void); 37 37 unsigned int parse_cid(const char *str); 38 38 int vsock_stream_connect(unsigned int cid, unsigned int port); 39 + int vsock_bind_connect(unsigned int cid, unsigned int port, 40 + unsigned int bind_port, int type); 39 41 int vsock_seqpacket_connect(unsigned int cid, unsigned int port); 40 42 int vsock_stream_accept(unsigned int cid, unsigned int port, 41 43 struct sockaddr_vm *clientaddrp); 44 + int vsock_stream_listen(unsigned int cid, unsigned int port); 42 45 int vsock_seqpacket_accept(unsigned int cid, unsigned int port, 43 46 struct sockaddr_vm *clientaddrp); 44 47 void vsock_wait_remote_close(int fd);
+50
tools/testing/vsock/vsock_test.c
··· 1180 1180 close(fd); 1181 1181 } 1182 1182 1183 + static void test_double_bind_connect_server(const struct test_opts *opts) 1184 + { 1185 + int listen_fd, client_fd, i; 1186 + struct sockaddr_vm sa_client; 1187 + socklen_t socklen_client = sizeof(sa_client); 1188 + 1189 + listen_fd = vsock_stream_listen(VMADDR_CID_ANY, 1234); 1190 + 1191 + for (i = 0; i < 2; i++) { 1192 + control_writeln("LISTENING"); 1193 + 1194 + timeout_begin(TIMEOUT); 1195 + do { 1196 + client_fd = accept(listen_fd, (struct sockaddr *)&sa_client, 1197 + &socklen_client); 1198 + timeout_check("accept"); 1199 + } while (client_fd < 0 && errno == EINTR); 1200 + timeout_end(); 1201 + 1202 + if (client_fd < 0) { 1203 + perror("accept"); 1204 + exit(EXIT_FAILURE); 1205 + } 1206 + 1207 + /* Waiting for remote peer to close connection */ 1208 + vsock_wait_remote_close(client_fd); 1209 + } 1210 + 1211 + close(listen_fd); 1212 + } 1213 + 1214 + static void test_double_bind_connect_client(const struct test_opts *opts) 1215 + { 1216 + int i, client_fd; 1217 + 1218 + for (i = 0; i < 2; i++) { 1219 + /* Wait until server is ready to accept a new connection */ 1220 + control_expectln("LISTENING"); 1221 + 1222 + client_fd = vsock_bind_connect(opts->peer_cid, 1234, 4321, SOCK_STREAM); 1223 + 1224 + close(client_fd); 1225 + } 1226 + } 1227 + 1183 1228 static struct test_case test_cases[] = { 1184 1229 { 1185 1230 .name = "SOCK_STREAM connection reset", ··· 1329 1284 .name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE", 1330 1285 .run_client = test_stream_msgzcopy_empty_errq_client, 1331 1286 .run_server = test_stream_msgzcopy_empty_errq_server, 1287 + }, 1288 + { 1289 + .name = "SOCK_STREAM double bind connect", 1290 + .run_client = test_double_bind_connect_client, 1291 + .run_server = test_double_bind_connect_server, 1332 1292 }, 1333 1293 {}, 1334 1294 };