Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from netfilter.

Current release - regressions:

- sched: sch_taprio: do not schedule in taprio_reset()

Previous releases - regressions:

- core: fix UaF in netns ops registration error path

- ipv4: prevent potential spectre v1 gadgets

- ipv6: fix reachability confirmation with proxy_ndp

- netfilter: fix for the set rbtree

- eth: fec: use page_pool_put_full_page when freeing rx buffers

- eth: iavf: fix temporary deadlock and failure to set MAC address

Previous releases - always broken:

- netlink: prevent potential spectre v1 gadgets

- netfilter: fixes for SCTP connection tracking

- mctp: struct sock lifetime fixes

- eth: ravb: fix possible hang if RIS2_QFF1 happen

- eth: tg3: resolve deadlock in tg3_reset_task() during EEH

Misc:

- Mat stepped out as MPTCP co-maintainer"

* tag 'net-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (40 commits)
net: mdio-mux-meson-g12a: force internal PHY off on mux switch
docs: networking: Fix bridge documentation URL
tsnep: Fix TX queue stop/wake for multiple queues
net/tg3: resolve deadlock in tg3_reset_task() during EEH
net: mctp: mark socks as dead on unhash, prevent re-add
net: mctp: hold key reference when looking up a general key
net: mctp: move expiry timer delete to unhash
net: mctp: add an explicit reference from a mctp_sk_key to sock
net: ravb: Fix possible hang if RIS2_QFF1 happen
net: ravb: Fix lack of register setting after system resumed for Gen3
net/x25: Fix to not accept on connected socket
ice: move devlink port creation/deletion
sctp: fail if no bound addresses can be used for a given scope
net/sched: sch_taprio: do not schedule in taprio_reset()
Revert "Merge branch 'ethtool-mac-merge'"
netrom: Fix use-after-free of a listening socket.
netfilter: conntrack: unify established states for SCTP paths
Revert "netfilter: conntrack: add sctp DATA_SENT state"
netfilter: conntrack: fix bug in for_each_sctp_chunk
netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE
...

+538 -403
+7
CREDITS
··· 2489 2489 D: XF86_8514 2490 2490 D: cfdisk (curses based disk partitioning program) 2491 2491 2492 + N: Mat Martineau 2493 + E: mat@martineau.name 2494 + D: MPTCP subsystem co-maintainer 2020-2023 2495 + D: Keyctl restricted keyring and Diffie-Hellman UAPI 2496 + D: Bluetooth L2CAP ERTM mode and AMP 2497 + S: USA 2498 + 2492 2499 N: John S. Marvin 2493 2500 E: jsm@fc.hp.com 2494 2501 D: PA-RISC port
+1 -1
Documentation/networking/bridge.rst
··· 8 8 userspace tools. 9 9 10 10 Documentation for Linux bridging is on: 11 - http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge 11 + https://wiki.linuxfoundation.org/networking/bridge 12 12 13 13 The bridge-utilities are maintained at: 14 14 git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git
+3 -7
Documentation/networking/nf_conntrack-sysctl.rst
··· 173 173 default 3 174 174 175 175 nf_conntrack_sctp_timeout_established - INTEGER (seconds) 176 - default 432000 (5 days) 176 + default 210 177 + 178 + Default is set to (hb_interval * path_max_retrans + rto_max) 177 179 178 180 nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds) 179 181 default 0.3 ··· 191 189 192 190 This timeout is used to setup conntrack entry on secondary paths. 193 191 Default is set to hb_interval. 194 - 195 - nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds) 196 - default 210 197 - 198 - This timeout is used to setup conntrack entry on secondary paths. 199 - Default is set to (hb_interval * path_max_retrans + rto_max) 200 192 201 193 nf_conntrack_udp_timeout - INTEGER (seconds) 202 194 default 30
-1
MAINTAINERS
··· 14632 14632 F: net/netlabel/ 14633 14633 14634 14634 NETWORKING [MPTCP] 14635 - M: Mat Martineau <mathew.j.martineau@linux.intel.com> 14636 14635 M: Matthieu Baerts <matthieu.baerts@tessares.net> 14637 14636 L: netdev@vger.kernel.org 14638 14637 L: mptcp@lists.linux.dev
+1 -1
drivers/net/dsa/microchip/ksz9477_i2c.c
··· 104 104 }, 105 105 { 106 106 .compatible = "microchip,ksz8563", 107 - .data = &ksz_switch_chips[KSZ9893] 107 + .data = &ksz_switch_chips[KSZ8563] 108 108 }, 109 109 { 110 110 .compatible = "microchip,ksz9567",
+1 -1
drivers/net/ethernet/adi/adin1110.c
··· 356 356 357 357 if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) || 358 358 (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST)) 359 - rxb->offload_fwd_mark = 1; 359 + rxb->offload_fwd_mark = port_priv->priv->forwarding; 360 360 361 361 netif_rx(rxb); 362 362
+4 -4
drivers/net/ethernet/broadcom/tg3.c
··· 11166 11166 rtnl_lock(); 11167 11167 tg3_full_lock(tp, 0); 11168 11168 11169 - if (!netif_running(tp->dev)) { 11169 + if (tp->pcierr_recovery || !netif_running(tp->dev)) { 11170 11170 tg3_flag_clear(tp, RESET_TASK_PENDING); 11171 11171 tg3_full_unlock(tp); 11172 11172 rtnl_unlock(); ··· 18101 18101 18102 18102 netdev_info(netdev, "PCI I/O error detected\n"); 18103 18103 18104 + /* Want to make sure that the reset task doesn't run */ 18105 + tg3_reset_task_cancel(tp); 18106 + 18104 18107 rtnl_lock(); 18105 18108 18106 18109 /* Could be second call or maybe we don't have netdev yet */ ··· 18119 18116 tg3_netif_stop(tp); 18120 18117 18121 18118 tg3_timer_stop(tp); 18122 - 18123 - /* Want to make sure that the reset task doesn't run */ 18124 - tg3_reset_task_cancel(tp); 18125 18119 18126 18120 netif_device_detach(netdev); 18127 18121
+9 -6
drivers/net/ethernet/engleder/tsnep_main.c
··· 450 450 /* ring full, shall not happen because queue is stopped if full 451 451 * below 452 452 */ 453 - netif_stop_queue(tx->adapter->netdev); 453 + netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 454 454 455 455 spin_unlock_irqrestore(&tx->lock, flags); 456 456 ··· 493 493 494 494 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 495 495 /* ring can get full with next frame */ 496 - netif_stop_queue(tx->adapter->netdev); 496 + netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 497 497 } 498 498 499 499 spin_unlock_irqrestore(&tx->lock, flags); ··· 503 503 504 504 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 505 505 { 506 + struct tsnep_tx_entry *entry; 507 + struct netdev_queue *nq; 506 508 unsigned long flags; 507 509 int budget = 128; 508 - struct tsnep_tx_entry *entry; 509 - int count; 510 510 int length; 511 + int count; 512 + 513 + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 511 514 512 515 spin_lock_irqsave(&tx->lock, flags); 513 516 ··· 567 564 } while (likely(budget)); 568 565 569 566 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 570 - netif_queue_stopped(tx->adapter->netdev)) { 571 - netif_wake_queue(tx->adapter->netdev); 567 + netif_tx_queue_stopped(nq)) { 568 + netif_tx_wake_queue(nq); 572 569 } 573 570 574 571 spin_unlock_irqrestore(&tx->lock, flags);
+1 -1
drivers/net/ethernet/freescale/fec_main.c
··· 3191 3191 for (q = 0; q < fep->num_rx_queues; q++) { 3192 3192 rxq = fep->rx_queue[q]; 3193 3193 for (i = 0; i < rxq->bd.ring_size; i++) 3194 - page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); 3194 + page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 3195 3195 3196 3196 for (i = 0; i < XDP_STATS_TOTAL; i++) 3197 3197 rxq->stats[i] = 0;
+1 -1
drivers/net/ethernet/intel/iavf/iavf.h
··· 249 249 250 250 /* board specific private data structure */ 251 251 struct iavf_adapter { 252 + struct workqueue_struct *wq; 252 253 struct work_struct reset_task; 253 254 struct work_struct adminq_task; 254 255 struct delayed_work client_task; ··· 460 459 461 460 /* needed by iavf_ethtool.c */ 462 461 extern char iavf_driver_name[]; 463 - extern struct workqueue_struct *iavf_wq; 464 462 465 463 static inline const char *iavf_state_str(enum iavf_state_t state) 466 464 {
+5 -5
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 532 532 if (changed_flags & IAVF_FLAG_LEGACY_RX) { 533 533 if (netif_running(netdev)) { 534 534 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 535 - queue_work(iavf_wq, &adapter->reset_task); 535 + queue_work(adapter->wq, &adapter->reset_task); 536 536 } 537 537 } 538 538 ··· 672 672 673 673 if (netif_running(netdev)) { 674 674 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 675 - queue_work(iavf_wq, &adapter->reset_task); 675 + queue_work(adapter->wq, &adapter->reset_task); 676 676 } 677 677 678 678 return 0; ··· 1433 1433 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1434 1434 spin_unlock_bh(&adapter->fdir_fltr_lock); 1435 1435 1436 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1436 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1437 1437 1438 1438 ret: 1439 1439 if (err && fltr) ··· 1474 1474 spin_unlock_bh(&adapter->fdir_fltr_lock); 1475 1475 1476 1476 if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) 1477 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1477 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1478 1478 1479 1479 return err; 1480 1480 } ··· 1658 1658 spin_unlock_bh(&adapter->adv_rss_lock); 1659 1659 1660 1660 if (!err) 1661 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1661 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1662 1662 1663 1663 mutex_unlock(&adapter->crit_lock); 1664 1664
+51 -62
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 49 49 MODULE_LICENSE("GPL v2"); 50 50 51 51 static const struct net_device_ops iavf_netdev_ops; 52 - struct workqueue_struct *iavf_wq; 53 52 54 53 int iavf_status_to_errno(enum iavf_status status) 55 54 { ··· 276 277 if (!(adapter->flags & 277 278 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 278 279 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 279 - queue_work(iavf_wq, &adapter->reset_task); 280 + queue_work(adapter->wq, &adapter->reset_task); 280 281 } 281 282 } 282 283 ··· 290 291 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 291 292 { 292 293 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 293 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 294 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 294 295 } 295 296 296 297 /** ··· 410 411 411 412 if (adapter->state != __IAVF_REMOVE) 412 413 /* schedule work on the private workqueue */ 413 - queue_work(iavf_wq, &adapter->adminq_task); 414 + queue_work(adapter->wq, &adapter->adminq_task); 414 415 415 416 return IRQ_HANDLED; 416 417 } ··· 1033 1034 1034 1035 /* schedule the watchdog task to immediately process the request */ 1035 1036 if (f) { 1036 - queue_work(iavf_wq, &adapter->watchdog_task.work); 1037 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1037 1038 return 0; 1038 1039 } 1039 1040 return -ENOMEM; ··· 1256 1257 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1257 1258 if (CLIENT_ENABLED(adapter)) 1258 1259 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1259 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1260 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1260 1261 } 1261 1262 1262 1263 /** ··· 1413 1414 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1414 1415 } 1415 1416 1416 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1417 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1417 1418 } 1418 1419 1419 1420 /** ··· 2247 2248 2248 2249 if (aq_required) { 2249 2250 adapter->aq_required |= aq_required; 2250 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 2251 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 2251 2252 } 2252 2253 } 2253 2254 ··· 2692 2693 goto restart_watchdog; 2693 2694 } 2694 2695 2696 + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && 2697 + adapter->netdev_registered && 2698 + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && 2699 + rtnl_trylock()) { 2700 + netdev_update_features(adapter->netdev); 2701 + rtnl_unlock(); 2702 + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 2703 + } 2704 + 2695 2705 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2696 2706 iavf_change_state(adapter, __IAVF_COMM_FAILED); 2697 2707 ··· 2708 2700 adapter->aq_required = 0; 2709 2701 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2710 2702 mutex_unlock(&adapter->crit_lock); 2711 - queue_work(iavf_wq, &adapter->reset_task); 2703 + queue_work(adapter->wq, &adapter->reset_task); 2712 2704 return; 2713 2705 } 2714 2706 ··· 2716 2708 case __IAVF_STARTUP: 2717 2709 iavf_startup(adapter); 2718 2710 mutex_unlock(&adapter->crit_lock); 2719 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2711 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2720 2712 msecs_to_jiffies(30)); 2721 2713 return; 2722 2714 case __IAVF_INIT_VERSION_CHECK: 2723 2715 iavf_init_version_check(adapter); 2724 2716 mutex_unlock(&adapter->crit_lock); 2725 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2717 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2726 2718 msecs_to_jiffies(30)); 2727 2719 return; 2728 2720 case __IAVF_INIT_GET_RESOURCES: 2729 2721 iavf_init_get_resources(adapter); 2730 2722 mutex_unlock(&adapter->crit_lock); 2731 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2723 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2732 2724 msecs_to_jiffies(1)); 2733 2725 return; 2734 2726 case __IAVF_INIT_EXTENDED_CAPS: 2735 2727 iavf_init_process_extended_caps(adapter); 2736 2728 mutex_unlock(&adapter->crit_lock); 2737 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2729 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2738 2730 msecs_to_jiffies(1)); 2739 2731 return; 2740 2732 case __IAVF_INIT_CONFIG_ADAPTER: 2741 2733 iavf_init_config_adapter(adapter); 2742 2734 mutex_unlock(&adapter->crit_lock); 2743 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2735 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2744 2736 msecs_to_jiffies(1)); 2745 2737 return; 2746 2738 case __IAVF_INIT_FAILED: ··· 2759 2751 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2760 2752 iavf_shutdown_adminq(hw); 2761 2753 mutex_unlock(&adapter->crit_lock); 2762 - queue_delayed_work(iavf_wq, 2754 + queue_delayed_work(adapter->wq, 2763 2755 &adapter->watchdog_task, (5 * HZ)); 2764 2756 return; 2765 2757 } 2766 2758 /* Try again from failed step*/ 2767 2759 iavf_change_state(adapter, adapter->last_state); 2768 2760 mutex_unlock(&adapter->crit_lock); 2769 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 2761 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); 2770 2762 return; 2771 2763 case __IAVF_COMM_FAILED: 2772 2764 if (test_bit(__IAVF_IN_REMOVE_TASK, ··· 2797 2789 adapter->aq_required = 0; 2798 2790 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2799 2791 mutex_unlock(&adapter->crit_lock); 2800 - queue_delayed_work(iavf_wq, 2792 + queue_delayed_work(adapter->wq, 2801 2793 &adapter->watchdog_task, 2802 2794 msecs_to_jiffies(10)); 2803 2795 return; 2804 2796 case __IAVF_RESETTING: 2805 2797 mutex_unlock(&adapter->crit_lock); 2806 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2798 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2799 + HZ * 2); 2807 2800 return; 2808 2801 case __IAVF_DOWN: 2809 2802 case __IAVF_DOWN_PENDING: ··· 2843 2834 adapter->aq_required = 0; 2844 2835 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2845 2836 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2846 - queue_work(iavf_wq, &adapter->reset_task); 2837 + queue_work(adapter->wq, &adapter->reset_task); 2847 2838 mutex_unlock(&adapter->crit_lock); 2848 - queue_delayed_work(iavf_wq, 2839 + queue_delayed_work(adapter->wq, 2849 2840 &adapter->watchdog_task, HZ * 2); 2850 2841 return; 2851 2842 } ··· 2854 2845 mutex_unlock(&adapter->crit_lock); 2855 2846 restart_watchdog: 2856 2847 if (adapter->state >= __IAVF_DOWN) 2857 - queue_work(iavf_wq, &adapter->adminq_task); 2848 + queue_work(adapter->wq, &adapter->adminq_task); 2858 2849 if (adapter->aq_required) 2859 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2850 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2860 2851 msecs_to_jiffies(20)); 2861 2852 else 2862 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2853 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2854 + HZ * 2); 2863 2855 } 2864 2856 2865 2857 /** ··· 2962 2952 */ 2963 2953 if (!mutex_trylock(&adapter->crit_lock)) { 2964 2954 if (adapter->state != __IAVF_REMOVE) 2965 - queue_work(iavf_wq, &adapter->reset_task); 2955 + queue_work(adapter->wq, &adapter->reset_task); 2966 2956 2967 2957 goto reset_finish; 2968 2958 } ··· 3126 3116 bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); 3127 3117 bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); 3128 3118 3129 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 3119 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); 3130 3120 3131 3121 /* We were running when the reset started, so we need to restore some 3132 3122 * state here. ··· 3218 3208 if (adapter->state == __IAVF_REMOVE) 3219 3209 return; 3220 3210 3221 - queue_work(iavf_wq, &adapter->adminq_task); 3211 + queue_work(adapter->wq, &adapter->adminq_task); 3222 3212 goto out; 3223 3213 } 3224 3214 ··· 3242 3232 } while (pending); 3243 3233 mutex_unlock(&adapter->crit_lock); 3244 3234 3245 - if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { 3246 - if (adapter->netdev_registered || 3247 - !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { 3248 - struct net_device *netdev = adapter->netdev; 3249 - 3250 - rtnl_lock(); 3251 - netdev_update_features(netdev); 3252 - rtnl_unlock(); 3253 - /* Request VLAN offload settings */ 3254 - if (VLAN_V2_ALLOWED(adapter)) 3255 - iavf_set_vlan_offload_features 3256 - (adapter, 0, netdev->features); 3257 - 3258 - iavf_set_queue_vlan_tag_loc(adapter); 3259 - } 3260 - 3261 - adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 3262 - } 3263 3235 if ((adapter->flags & 3264 3236 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 3265 3237 adapter->state == __IAVF_RESETTING) ··· 4341 4349 4342 4350 if (netif_running(netdev)) { 4343 4351 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 4344 - queue_work(iavf_wq, &adapter->reset_task); 4352 + queue_work(adapter->wq, &adapter->reset_task); 4345 4353 } 4346 4354 4347 4355 return 0; ··· 4890 4898 hw = &adapter->hw; 4891 4899 hw->back = adapter; 4892 4900 4901 + adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, 4902 + iavf_driver_name); 4903 + if (!adapter->wq) { 4904 + err = -ENOMEM; 4905 + goto err_alloc_wq; 4906 + } 4907 + 4893 4908 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 4894 4909 iavf_change_state(adapter, __IAVF_STARTUP); 4895 4910 ··· 4941 4942 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 4942 4943 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 4943 4944 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 4944 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 4945 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 4945 4946 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 4946 4947 4947 4948 /* Setup the wait queue for indicating transition to down status */ ··· 4953 4954 return 0; 4954 4955 4955 4956 err_ioremap: 4957 + destroy_workqueue(adapter->wq); 4958 + err_alloc_wq: 4956 4959 free_netdev(netdev); 4957 4960 err_alloc_etherdev: 4958 4961 pci_disable_pcie_error_reporting(pdev); ··· 5024 5023 return err; 5025 5024 } 5026 5025 5027 - queue_work(iavf_wq, &adapter->reset_task); 5026 + queue_work(adapter->wq, &adapter->reset_task); 5028 5027 5029 5028 netif_device_attach(adapter->netdev); 5030 5029 ··· 5171 5170 } 5172 5171 spin_unlock_bh(&adapter->adv_rss_lock); 5173 5172 5173 + destroy_workqueue(adapter->wq); 5174 + 5174 5175 free_netdev(netdev); 5175 5176 5176 5177 pci_disable_pcie_error_reporting(pdev); ··· 5199 5196 **/ 5200 5197 static int __init iavf_init_module(void) 5201 5198 { 5202 - int ret; 5203 - 5204 5199 pr_info("iavf: %s\n", iavf_driver_string); 5205 5200 5206 5201 pr_info("%s\n", iavf_copyright); 5207 5202 5208 - iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 5209 - iavf_driver_name); 5210 - if (!iavf_wq) { 5211 - pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 5212 - return -ENOMEM; 5213 - } 5214 - 5215 - ret = pci_register_driver(&iavf_driver); 5216 - if (ret) 5217 - destroy_workqueue(iavf_wq); 5218 - 5219 - return ret; 5203 + return pci_register_driver(&iavf_driver); 5220 5204 } 5221 5205 5222 5206 module_init(iavf_init_module); ··· 5217 5227 static void __exit iavf_exit_module(void) 5218 5228 { 5219 5229 pci_unregister_driver(&iavf_driver); 5220 - destroy_workqueue(iavf_wq); 5221 5230 } 5222 5231 5223 5232 module_exit(iavf_exit_module);
+9 -1
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 1952 1952 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1953 1953 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1954 1954 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1955 - queue_work(iavf_wq, &adapter->reset_task); 1955 + queue_work(adapter->wq, &adapter->reset_task); 1956 1956 } 1957 1957 break; 1958 1958 default: ··· 2226 2226 2227 2227 iavf_process_config(adapter); 2228 2228 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2229 + 2230 + /* Request VLAN offload settings */ 2231 + if (VLAN_V2_ALLOWED(adapter)) 2232 + iavf_set_vlan_offload_features(adapter, 0, 2233 + netdev->features); 2234 + 2235 + iavf_set_queue_vlan_tag_loc(adapter); 2236 + 2229 2237 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2230 2238 adapter->hw.mac.addr); 2231 2239
-3
drivers/net/ethernet/intel/ice/ice_lib.c
··· 3235 3235 } 3236 3236 } 3237 3237 3238 - if (vsi->type == ICE_VSI_PF) 3239 - ice_devlink_destroy_pf_port(pf); 3240 - 3241 3238 if (vsi->type == ICE_VSI_VF && 3242 3239 vsi->agg_node && vsi->agg_node->valid) 3243 3240 vsi->agg_node->num_vsis--;
+17 -8
drivers/net/ethernet/intel/ice/ice_main.c
··· 4590 4590 } 4591 4591 4592 4592 /** 4593 - * ice_register_netdev - register netdev and devlink port 4593 + * ice_register_netdev - register netdev 4594 4594 * @pf: pointer to the PF struct 4595 4595 */ 4596 4596 static int ice_register_netdev(struct ice_pf *pf) ··· 4602 4602 if (!vsi || !vsi->netdev) 4603 4603 return -EIO; 4604 4604 4605 - err = ice_devlink_create_pf_port(pf); 4606 - if (err) 4607 - goto err_devlink_create; 4608 - 4609 - SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 4610 4605 err = register_netdev(vsi->netdev); 4611 4606 if (err) 4612 4607 goto err_register_netdev; ··· 4612 4617 4613 4618 return 0; 4614 4619 err_register_netdev: 4615 - ice_devlink_destroy_pf_port(pf); 4616 - err_devlink_create: 4617 4620 free_netdev(vsi->netdev); 4618 4621 vsi->netdev = NULL; 4619 4622 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); ··· 4629 4636 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4630 4637 { 4631 4638 struct device *dev = &pdev->dev; 4639 + struct ice_vsi *vsi; 4632 4640 struct ice_pf *pf; 4633 4641 struct ice_hw *hw; 4634 4642 int i, err; ··· 4912 4918 pcie_print_link_status(pf->pdev); 4913 4919 4914 4920 probe_done: 4921 + err = ice_devlink_create_pf_port(pf); 4922 + if (err) 4923 + goto err_create_pf_port; 4924 + 4925 + vsi = ice_get_main_vsi(pf); 4926 + if (!vsi || !vsi->netdev) { 4927 + err = -EINVAL; 4928 + goto err_netdev_reg; 4929 + } 4930 + 4931 + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 4932 + 4915 4933 err = ice_register_netdev(pf); 4916 4934 if (err) 4917 4935 goto err_netdev_reg; ··· 4961 4955 err_devlink_reg_param: 4962 4956 ice_devlink_unregister_params(pf); 4963 4957 err_netdev_reg: 4958 + ice_devlink_destroy_pf_port(pf); 4959 + err_create_pf_port: 4964 4960 err_send_version_unroll: 4965 4961 ice_vsi_release_all(pf); 4966 4962 err_alloc_sw_unroll: ··· 5091 5083 ice_setup_mc_magic_wake(pf); 5092 5084 ice_vsi_release_all(pf); 5093 5085 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 5086 + ice_devlink_destroy_pf_port(pf); 5094 5087 ice_set_wake(pf); 5095 5088 ice_free_irq_msix_misc(pf); 5096 5089 ice_for_each_vsi(pf, i) {
+8 -1
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 1259 1259 gic->handler = NULL; 1260 1260 gic->arg = NULL; 1261 1261 1262 + if (!i) 1263 + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s", 1264 + pci_name(pdev)); 1265 + else 1266 + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", 1267 + i - 1, pci_name(pdev)); 1268 + 1262 1269 irq = pci_irq_vector(pdev, i); 1263 1270 if (irq < 0) { 1264 1271 err = irq; 1265 1272 goto free_mask; 1266 1273 } 1267 1274 1268 - err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic); 1275 + err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); 1269 1276 if (err) 1270 1277 goto free_mask; 1271 1278 irq_set_affinity_and_hint(irq, req_mask);
+8 -2
drivers/net/ethernet/renesas/ravb_main.c
··· 1101 1101 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); 1102 1102 if (eis & EIS_QFS) { 1103 1103 ris2 = ravb_read(ndev, RIS2); 1104 - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), 1104 + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), 1105 1105 RIS2); 1106 1106 1107 1107 /* Receive Descriptor Empty int */ 1108 1108 if (ris2 & RIS2_QFF0) 1109 1109 priv->stats[RAVB_BE].rx_over_errors++; 1110 1110 1111 - /* Receive Descriptor Empty int */ 1111 + /* Receive Descriptor Empty int */ 1112 1112 if (ris2 & RIS2_QFF1) 1113 1113 priv->stats[RAVB_NC].rx_over_errors++; 1114 1114 ··· 2973 2973 else 2974 2974 ret = ravb_close(ndev); 2975 2975 2976 + if (priv->info->ccc_gac) 2977 + ravb_ptp_stop(ndev); 2978 + 2976 2979 return ret; 2977 2980 } 2978 2981 ··· 3013 3010 3014 3011 /* Restore descriptor base address table */ 3015 3012 ravb_write(ndev, priv->desc_bat_dma, DBAT); 3013 + 3014 + if (priv->info->ccc_gac) 3015 + ravb_ptp_init(ndev, priv->pdev); 3016 3016 3017 3017 if (netif_running(ndev)) { 3018 3018 if (priv->wol_enabled) {
+13 -9
drivers/net/ethernet/renesas/rswitch.c
··· 1074 1074 port = NULL; 1075 1075 goto out; 1076 1076 } 1077 - if (index == rdev->etha->index) 1077 + if (index == rdev->etha->index) { 1078 + if (!of_device_is_available(port)) 1079 + port = NULL; 1078 1080 break; 1081 + } 1079 1082 } 1080 1083 1081 1084 out: ··· 1109 1106 1110 1107 port = rswitch_get_port_node(rdev); 1111 1108 if (!port) 1112 - return -ENODEV; 1109 + return 0; /* ignored */ 1113 1110 1114 1111 err = of_get_phy_mode(port, &rdev->etha->phy_interface); 1115 1112 of_node_put(port); ··· 1327 1324 { 1328 1325 int i, err; 1329 1326 1330 - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1327 + rswitch_for_each_enabled_port(priv, i) { 1331 1328 err = rswitch_ether_port_init_one(priv->rdev[i]); 1332 1329 if (err) 1333 1330 goto err_init_one; 1334 1331 } 1335 1332 1336 - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1333 + rswitch_for_each_enabled_port(priv, i) { 1337 1334 err = rswitch_serdes_init(priv->rdev[i]); 1338 1335 if (err) 1339 1336 goto err_serdes; ··· 1342 1339 return 0; 1343 1340 1344 1341 err_serdes: 1345 - for (i--; i >= 0; i--) 1342 + rswitch_for_each_enabled_port_continue_reverse(priv, i) 1346 1343 rswitch_serdes_deinit(priv->rdev[i]); 1347 1344 i = RSWITCH_NUM_PORTS; 1348 1345 1349 1346 err_init_one: 1350 - for (i--; i >= 0; i--) 1347 + rswitch_for_each_enabled_port_continue_reverse(priv, i) 1351 1348 rswitch_ether_port_deinit_one(priv->rdev[i]); 1352 1349 1353 1350 return err; ··· 1611 1608 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1612 1609 1613 1610 port = rswitch_get_port_node(rdev); 1611 + rdev->disabled = !port; 1614 1612 err = of_get_ethdev_address(port, ndev); 1615 1613 of_node_put(port); 1616 1614 if (err) { ··· 1711 1707 if (err) 1712 1708 goto err_ether_port_init_all; 1713 1709 1714 - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1710 + rswitch_for_each_enabled_port(priv, i) { 1715 1711 err = register_netdev(priv->rdev[i]->ndev); 1716 1712 if (err) { 1717 - for (i--; i >= 0; i--) 1713 + rswitch_for_each_enabled_port_continue_reverse(priv, i) 1718 1714 unregister_netdev(priv->rdev[i]->ndev); 1719 1715 goto err_register_netdev; 1720 1716 } 1721 1717 } 1722 1718 1723 - for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1719 + rswitch_for_each_enabled_port(priv, i) 1724 1720 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 1725 1721 priv->rdev[i]->ndev->dev_addr); 1726 1722
+12
drivers/net/ethernet/renesas/rswitch.h
··· 13 13 #define RSWITCH_MAX_NUM_QUEUES 128 14 14 15 15 #define RSWITCH_NUM_PORTS 3 16 + #define rswitch_for_each_enabled_port(priv, i) \ 17 + for (i = 0; i < RSWITCH_NUM_PORTS; i++) \ 18 + if (priv->rdev[i]->disabled) \ 19 + continue; \ 20 + else 21 + 22 + #define rswitch_for_each_enabled_port_continue_reverse(priv, i) \ 23 + for (i--; i >= 0; i--) \ 24 + if (priv->rdev[i]->disabled) \ 25 + continue; \ 26 + else 16 27 17 28 #define TX_RING_SIZE 1024 18 29 #define RX_RING_SIZE 1024 ··· 949 938 struct rswitch_gwca_queue *tx_queue; 950 939 struct rswitch_gwca_queue *rx_queue; 951 940 u8 ts_tag; 941 + bool disabled; 952 942 953 943 int port; 954 944 struct rswitch_etha *etha;
+16 -7
drivers/net/mdio/mdio-mux-meson-g12a.c
··· 4 4 */ 5 5 6 6 #include <linux/bitfield.h> 7 + #include <linux/delay.h> 7 8 #include <linux/clk.h> 8 9 #include <linux/clk-provider.h> 9 10 #include <linux/device.h> ··· 151 150 152 151 static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) 153 152 { 153 + u32 value; 154 154 int ret; 155 155 156 156 /* Enable the phy clock */ ··· 165 163 166 164 /* Initialize ephy control */ 167 165 writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); 168 - writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | 169 - FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | 170 - FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | 171 - PHY_CNTL1_CLK_EN | 172 - PHY_CNTL1_CLKFREQ | 173 - PHY_CNTL1_PHY_ENB, 174 - priv->regs + ETH_PHY_CNTL1); 166 + 167 + /* Make sure we get a 0 -> 1 transition on the enable bit */ 168 + value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | 169 + FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | 170 + FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | 171 + PHY_CNTL1_CLK_EN | 172 + PHY_CNTL1_CLKFREQ; 173 + writel(value, priv->regs + ETH_PHY_CNTL1); 175 174 writel(PHY_CNTL2_USE_INTERNAL | 176 175 PHY_CNTL2_SMI_SRC_MAC | 177 176 PHY_CNTL2_RX_CLK_EPHY, 178 177 priv->regs + ETH_PHY_CNTL2); 178 + 179 + value |= PHY_CNTL1_PHY_ENB; 180 + writel(value, priv->regs + ETH_PHY_CNTL1); 181 + 182 + /* The phy needs a bit of time to power up */ 183 + mdelay(10); 179 184 180 185 return 0; 181 186 }
+3
include/net/mana/gdma.h
··· 336 336 }; 337 337 }; 338 338 339 + #define MANA_IRQ_NAME_SZ 32 340 + 339 341 struct gdma_irq_context { 340 342 void (*handler)(void *arg); 341 343 void *arg; 344 + char name[MANA_IRQ_NAME_SZ]; 342 345 }; 343 346 344 347 struct gdma_context {
+1 -2
include/uapi/linux/netfilter/nf_conntrack_sctp.h
··· 15 15 SCTP_CONNTRACK_SHUTDOWN_RECD, 16 16 SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, 17 17 SCTP_CONNTRACK_HEARTBEAT_SENT, 18 - SCTP_CONNTRACK_HEARTBEAT_ACKED, 19 - SCTP_CONNTRACK_DATA_SENT, 18 + SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */ 20 19 SCTP_CONNTRACK_MAX 21 20 }; 22 21
+3
lib/nlattr.c
··· 10 10 #include <linux/kernel.h> 11 11 #include <linux/errno.h> 12 12 #include <linux/jiffies.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/skbuff.h> 14 15 #include <linux/string.h> 15 16 #include <linux/types.h> ··· 382 381 if (type <= 0 || type > maxtype) 383 382 return 0; 384 383 384 + type = array_index_nospec(type, maxtype + 1); 385 385 pt = &policy[type]; 386 386 387 387 BUG_ON(pt->type > NLA_TYPE_MAX); ··· 598 596 } 599 597 continue; 600 598 } 599 + type = array_index_nospec(type, maxtype + 1); 601 600 if (policy) { 602 601 int err = validate_nla(nla, maxtype, policy, 603 602 validate, extack, depth);
+1 -1
net/core/net_namespace.c
··· 137 137 return 0; 138 138 139 139 if (ops->id && ops->size) { 140 - cleanup: 141 140 ng = rcu_dereference_protected(net->gen, 142 141 lockdep_is_held(&pernet_ops_rwsem)); 143 142 ng->ptr[*ops->id] = NULL; 144 143 } 145 144 145 + cleanup: 146 146 kfree(data); 147 147 148 148 out:
+2
net/ipv4/fib_semantics.c
··· 30 30 #include <linux/slab.h> 31 31 #include <linux/netlink.h> 32 32 #include <linux/hash.h> 33 + #include <linux/nospec.h> 33 34 34 35 #include <net/arp.h> 35 36 #include <net/inet_dscp.h> ··· 1023 1022 if (type > RTAX_MAX) 1024 1023 return false; 1025 1024 1025 + type = array_index_nospec(type, RTAX_MAX + 1); 1026 1026 if (type == RTAX_CC_ALGO) { 1027 1027 char tmp[TCP_CA_NAME_MAX]; 1028 1028 bool ecn_ca = false;
+2
net/ipv4/metrics.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 #include <linux/netlink.h> 3 + #include <linux/nospec.h> 3 4 #include <linux/rtnetlink.h> 4 5 #include <linux/types.h> 5 6 #include <net/ip.h> ··· 26 25 return -EINVAL; 27 26 } 28 27 28 + type = array_index_nospec(type, RTAX_MAX + 1); 29 29 if (type == RTAX_CC_ALGO) { 30 30 char tmp[TCP_CA_NAME_MAX]; 31 31
+14 -1
net/ipv6/ip6_output.c
··· 547 547 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { 548 548 int proxied = ip6_forward_proxy_check(skb); 549 549 if (proxied > 0) { 550 - hdr->hop_limit--; 550 + /* It's tempting to decrease the hop limit 551 + * here by 1, as we do at the end of the 552 + * function too. 553 + * 554 + * But that would be incorrect, as proxying is 555 + * not forwarding. The ip6_input function 556 + * will handle this packet locally, and it 557 + * depends on the hop limit being unchanged. 558 + * 559 + * One example is the NDP hop limit, that 560 + * always has to stay 255, but other would be 561 + * similar checks around RA packets, where the 562 + * user can even change the desired limit. 563 + */ 551 564 return ip6_input(skb); 552 565 } else if (proxied < 0) { 553 566 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+7 -3
net/mctp/af_mctp.c
··· 544 544 545 545 static void mctp_sk_close(struct sock *sk, long timeout) 546 546 { 547 - struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 548 - 549 - del_timer_sync(&msk->key_expiry); 550 547 sk_common_release(sk); 551 548 } 552 549 ··· 577 580 spin_lock_irqsave(&key->lock, fl2); 578 581 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED); 579 582 } 583 + sock_set_flag(sk, SOCK_DEAD); 580 584 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 585 + 586 + /* Since there are no more tag allocations (we have removed all of the 587 + * keys), stop any pending expiry events. the timer cannot be re-queued 588 + * as the sk is no longer observable 589 + */ 590 + del_timer_sync(&msk->key_expiry); 581 591 } 582 592 583 593 static struct proto mctp_proto = {
+21 -13
net/mctp/route.c
··· 147 147 key->valid = true; 148 148 spin_lock_init(&key->lock); 149 149 refcount_set(&key->refs, 1); 150 + sock_hold(key->sk); 150 151 151 152 return key; 152 153 } ··· 166 165 mctp_dev_release_key(key->dev, key); 167 166 spin_unlock_irqrestore(&key->lock, flags); 168 167 168 + sock_put(key->sk); 169 169 kfree(key); 170 170 } 171 171 ··· 178 176 int rc = 0; 179 177 180 178 spin_lock_irqsave(&net->mctp.keys_lock, flags); 179 + 180 + if (sock_flag(&msk->sk, SOCK_DEAD)) { 181 + rc = -EINVAL; 182 + goto out_unlock; 183 + } 181 184 182 185 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { 183 186 if (mctp_key_match(tmp, key->local_addr, key->peer_addr, ··· 205 198 hlist_add_head(&key->sklist, &msk->keys); 206 199 } 207 200 201 + out_unlock: 208 202 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 209 203 210 204 return rc; ··· 323 315 324 316 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) 325 317 { 318 + struct mctp_sk_key *key, *any_key = NULL; 326 319 struct net *net = dev_net(skb->dev); 327 - struct mctp_sk_key *key; 328 320 struct mctp_sock *msk; 329 321 struct mctp_hdr *mh; 330 322 unsigned long f; ··· 369 361 * key for reassembly - we'll create a more specific 370 362 * one for future packets if required (ie, !EOM). 371 363 */ 372 - key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); 373 - if (key) { 374 - msk = container_of(key->sk, 364 + any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); 365 + if (any_key) { 366 + msk = container_of(any_key->sk, 375 367 struct mctp_sock, sk); 376 - spin_unlock_irqrestore(&key->lock, f); 377 - mctp_key_unref(key); 378 - key = NULL; 368 + spin_unlock_irqrestore(&any_key->lock, f); 379 369 } 380 370 } 381 371 ··· 425 419 * this function. 426 420 */ 427 421 rc = mctp_key_add(key, msk); 428 - if (rc) { 429 - kfree(key); 430 - } else { 422 + if (!rc) 431 423 trace_mctp_key_acquire(key); 432 424 433 - /* we don't need to release key->lock on exit */ 434 - mctp_key_unref(key); 435 - } 425 + /* we don't need to release key->lock on exit, so 426 + * clean up here and suppress the unlock via 427 + * setting to NULL 428 + */ 429 + mctp_key_unref(key); 436 430 key = NULL; 437 431 438 432 } else { ··· 479 473 spin_unlock_irqrestore(&key->lock, f); 480 474 mctp_key_unref(key); 481 475 } 476 + if (any_key) 477 + mctp_key_unref(any_key); 482 478 out: 483 479 if (rc) 484 480 kfree_skb(skb);
+72 -98
net/netfilter/nf_conntrack_proto_sctp.c
··· 27 27 #include <net/netfilter/nf_conntrack_ecache.h> 28 28 #include <net/netfilter/nf_conntrack_timeout.h> 29 29 30 - /* FIXME: Examine ipfilter's timeouts and conntrack transitions more 31 - closely. They're more complex. --RR 32 - 33 - And so for me for SCTP :D -Kiran */ 34 - 35 30 static const char *const sctp_conntrack_names[] = { 36 - "NONE", 37 - "CLOSED", 38 - "COOKIE_WAIT", 39 - "COOKIE_ECHOED", 40 - "ESTABLISHED", 41 - "SHUTDOWN_SENT", 42 - "SHUTDOWN_RECD", 43 - "SHUTDOWN_ACK_SENT", 44 - "HEARTBEAT_SENT", 45 - "HEARTBEAT_ACKED", 31 + [SCTP_CONNTRACK_NONE] = "NONE", 32 + [SCTP_CONNTRACK_CLOSED] = "CLOSED", 33 + [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT", 34 + [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED", 35 + [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED", 36 + [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT", 37 + [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD", 38 + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT", 39 + [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", 46 40 }; 47 41 48 42 #define SECS * HZ ··· 48 54 [SCTP_CONNTRACK_CLOSED] = 10 SECS, 49 55 [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, 50 56 [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, 51 - [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, 57 + [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, 52 58 [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, 53 59 [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, 54 60 [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, 55 61 [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, 56 - [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, 57 - [SCTP_CONNTRACK_DATA_SENT] = 30 SECS, 58 62 }; 59 63 60 64 #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 ··· 66 74 #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD 67 75 #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT 68 76 #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT 69 - #define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED 70 - #define sDS SCTP_CONNTRACK_DATA_SENT 71 77 #define sIV SCTP_CONNTRACK_MAX 72 78 73 79 /* ··· 88 98 CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of 89 99 the SHUTDOWN chunk. Connection is closed. 90 100 HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow. 91 - HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK/DATA/SACK in the direction 92 - opposite to that of the HEARTBEAT/DATA chunk. Secondary connection 93 - is established. 94 - DATA_SENT - We have seen a DATA/SACK in a new flow. 95 101 */ 96 102 97 103 /* TODO ··· 101 115 */ 102 116 103 117 /* SCTP conntrack state transitions */ 104 - static const u8 sctp_conntracks[2][12][SCTP_CONNTRACK_MAX] = { 118 + static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { 105 119 { 106 120 /* ORIGINAL */ 107 - /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */ 108 - /* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA, sCW}, 109 - /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL}, 110 - /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, 111 - /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS, sCL}, 112 - /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA, sSA}, 113 - /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't have Stale cookie*/ 114 - /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* 5.2.4 - Big TODO */ 115 - /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't come in orig dir */ 116 - /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA, sCL}, 117 - /* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}, 118 - /* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}, 119 - /* data/sack */ {sDS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS} 121 + /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ 122 + /* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, 123 + /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, 124 + /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, 125 + /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, 126 + /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA}, 127 + /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/ 128 + /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */ 129 + /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ 130 + /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL}, 131 + /* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, 132 + /* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, 120 133 }, 121 134 { 122 135 /* REPLY */ 123 - /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */ 124 - /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* INIT in sCL Big TODO */ 125 - /* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV}, 126 - /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL, sIV}, 127 - /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR, sIV}, 128 - /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA, sIV}, 129 - /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA, sIV}, 130 - /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* Can't come in reply dir */ 131 - /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA, sIV}, 132 - /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA, sIV}, 133 - /* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sHA}, 134 - /* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA}, 135 - /* data/sack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA}, 136 + /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ 137 + /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */ 138 + /* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV}, 139 + /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV}, 140 + /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV}, 141 + /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV}, 142 + /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV}, 143 + /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ 144 + /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV}, 145 + /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV}, 146 + /* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, 147 + /* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES}, 136 148 } 137 149 }; 138 150 ··· 144 160 145 161 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ 146 162 for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \ 147 - (offset) < (skb)->len && \ 148 - ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ 163 + ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \ 164 + (sch)->length; \ 149 165 (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) 150 166 151 167 /* Some validity checks to make sure the chunks are fine */ ··· 242 258 pr_debug("SCTP_CID_HEARTBEAT_ACK"); 243 259 i = 10; 244 260 break; 245 - case SCTP_CID_DATA: 246 - case SCTP_CID_SACK: 247 - pr_debug("SCTP_CID_DATA/SACK"); 248 - i = 11; 249 - break; 250 261 default: 251 262 /* Other chunks like DATA or SACK do not change the state */ 252 263 pr_debug("Unknown chunk type, Will stay in %s\n", ··· 295 316 ih->init_tag); 296 317 297 318 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; 298 - } else if (sch->type == SCTP_CID_HEARTBEAT || 299 - sch->type == SCTP_CID_DATA || 300 - sch->type == SCTP_CID_SACK) { 319 + } else if (sch->type == SCTP_CID_HEARTBEAT) { 301 320 pr_debug("Setting vtag %x for secondary conntrack\n", 302 321 sh->vtag); 303 322 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; ··· 381 404 382 405 if (!sctp_new(ct, skb, sh, dataoff)) 383 406 return -NF_ACCEPT; 384 - } else { 385 - /* Check the verification tag (Sec 8.5) */ 386 - if (!test_bit(SCTP_CID_INIT, map) && 387 - !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && 388 - !test_bit(SCTP_CID_COOKIE_ECHO, map) && 389 - !test_bit(SCTP_CID_ABORT, map) && 390 - !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && 391 - !test_bit(SCTP_CID_HEARTBEAT, map) && 392 - !test_bit(SCTP_CID_HEARTBEAT_ACK, map) && 393 - sh->vtag != ct->proto.sctp.vtag[dir]) { 394 - pr_debug("Verification tag check failed\n"); 395 - goto out; 396 - } 407 + } 408 + 409 + /* Check the verification tag (Sec 8.5) */ 410 + if (!test_bit(SCTP_CID_INIT, map) && 411 + !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && 412 + !test_bit(SCTP_CID_COOKIE_ECHO, map) && 413 + !test_bit(SCTP_CID_ABORT, map) && 414 + !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && 415 + !test_bit(SCTP_CID_HEARTBEAT, map) && 416 + !test_bit(SCTP_CID_HEARTBEAT_ACK, map) && 417 + sh->vtag != ct->proto.sctp.vtag[dir]) { 418 + pr_debug("Verification tag check failed\n"); 419 + goto out; 397 420 } 398 421 399 422 old_state = new_state = SCTP_CONNTRACK_NONE; ··· 401 424 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { 402 425 /* Special cases of Verification tag check (Sec 8.5.1) */ 403 426 if (sch->type == SCTP_CID_INIT) { 404 - /* Sec 8.5.1 (A) */ 427 + /* (A) vtag MUST be zero */ 405 428 if (sh->vtag != 0) 406 429 goto out_unlock; 407 430 } else if (sch->type == SCTP_CID_ABORT) { 408 - /* Sec 8.5.1 (B) */ 409 - if (sh->vtag != ct->proto.sctp.vtag[dir] && 410 - sh->vtag != ct->proto.sctp.vtag[!dir]) 431 + /* (B) vtag MUST match own vtag if T flag is unset OR 432 + * MUST match peer's vtag if T flag is set 433 + */ 434 + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && 435 + sh->vtag != ct->proto.sctp.vtag[dir]) || 436 + ((sch->flags & SCTP_CHUNK_FLAG_T) && 437 + sh->vtag != ct->proto.sctp.vtag[!dir])) 411 438 goto out_unlock; 412 439 } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { 413 - /* Sec 8.5.1 (C) */ 414 - if (sh->vtag != ct->proto.sctp.vtag[dir] && 415 - sh->vtag != ct->proto.sctp.vtag[!dir] && 416 - sch->flags & SCTP_CHUNK_FLAG_T) 440 + /* (C) vtag MUST match own vtag if T flag is unset OR 441 + * MUST match peer's vtag if T flag is set 442 + */ 443 + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && 444 + sh->vtag != ct->proto.sctp.vtag[dir]) || 445 + ((sch->flags & SCTP_CHUNK_FLAG_T) && 446 + sh->vtag != ct->proto.sctp.vtag[!dir])) 417 447 goto out_unlock; 418 448 } else if (sch->type == SCTP_CID_COOKIE_ECHO) { 419 - /* Sec 8.5.1 (D) */ 449 + /* (D) vtag must be same as init_vtag as found in INIT_ACK */ 420 450 if (sh->vtag != ct->proto.sctp.vtag[dir]) 421 451 goto out_unlock; 422 452 } else if (sch->type == SCTP_CID_HEARTBEAT) { ··· 459 475 ct->proto.sctp.vtag[!dir] = 0; 460 476 } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { 461 477 ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; 462 - } 463 - } else if (sch->type == SCTP_CID_DATA || sch->type == SCTP_CID_SACK) { 464 - if (ct->proto.sctp.vtag[dir] == 0) { 465 - pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); 466 - ct->proto.sctp.vtag[dir] = sh->vtag; 467 478 } 468 479 } 469 480 ··· 497 518 } 498 519 499 520 ct->proto.sctp.state = new_state; 500 - if (old_state != new_state) 521 + if (old_state != new_state) { 501 522 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 523 + if (new_state == SCTP_CONNTRACK_ESTABLISHED && 524 + !test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 525 + nf_conntrack_event_cache(IPCT_ASSURED, ct); 526 + } 502 527 } 503 528 spin_unlock_bh(&ct->lock); 504 529 ··· 515 532 timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; 516 533 517 534 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); 518 - 519 - if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && 520 - dir == IP_CT_DIR_REPLY && 521 - new_state == SCTP_CONNTRACK_ESTABLISHED) { 522 - pr_debug("Setting assured bit\n"); 523 - set_bit(IPS_ASSURED_BIT, &ct->status); 524 - nf_conntrack_event_cache(IPCT_ASSURED, ct); 525 - } 526 535 527 536 return NF_ACCEPT; 528 537 ··· 676 701 [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 }, 677 702 [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, 678 703 [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, 679 - [CTA_TIMEOUT_SCTP_DATA_SENT] = { .type = NLA_U32 }, 680 704 }; 681 705 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 682 706
-16
net/netfilter/nf_conntrack_standalone.c
··· 601 601 NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, 602 602 NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, 603 603 NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, 604 - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED, 605 - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT, 606 604 #endif 607 605 #ifdef CONFIG_NF_CT_PROTO_DCCP 608 606 NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST, ··· 885 887 .mode = 0644, 886 888 .proc_handler = proc_dointvec_jiffies, 887 889 }, 888 - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { 889 - .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", 890 - .maxlen = sizeof(unsigned int), 891 - .mode = 0644, 892 - .proc_handler = proc_dointvec_jiffies, 893 - }, 894 - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT] = { 895 - .procname = "nf_conntrack_sctp_timeout_data_sent", 896 - .maxlen = sizeof(unsigned int), 897 - .mode = 0644, 898 - .proc_handler = proc_dointvec_jiffies, 899 - }, 900 890 #endif 901 891 #ifdef CONFIG_NF_CT_PROTO_DCCP 902 892 [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = { ··· 1028 1042 XASSIGN(SHUTDOWN_RECD, sn); 1029 1043 XASSIGN(SHUTDOWN_ACK_SENT, sn); 1030 1044 XASSIGN(HEARTBEAT_SENT, sn); 1031 - XASSIGN(HEARTBEAT_ACKED, sn); 1032 - XASSIGN(DATA_SENT, sn); 1033 1045 #undef XASSIGN 1034 1046 #endif 1035 1047 }
+207 -131
net/netfilter/nft_set_rbtree.c
··· 38 38 return !nft_rbtree_interval_end(rbe); 39 39 } 40 40 41 - static bool nft_rbtree_equal(const struct nft_set *set, const void *this, 42 - const struct nft_rbtree_elem *interval) 41 + static int nft_rbtree_cmp(const struct nft_set *set, 42 + const struct nft_rbtree_elem *e1, 43 + const struct nft_rbtree_elem *e2) 43 44 { 44 - return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; 45 + return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext), 46 + set->klen); 45 47 } 46 48 47 49 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, ··· 54 52 const struct nft_rbtree_elem *rbe, *interval = NULL; 55 53 u8 genmask = nft_genmask_cur(net); 56 54 const struct rb_node *parent; 57 - const void *this; 58 55 int d; 59 56 60 57 parent = rcu_dereference_raw(priv->root.rb_node); ··· 63 62 64 63 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 65 64 66 - this = nft_set_ext_key(&rbe->ext); 67 - d = memcmp(this, key, set->klen); 65 + d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); 68 66 if (d < 0) { 69 67 parent = rcu_dereference_raw(parent->rb_left); 70 68 if (interval && 71 - nft_rbtree_equal(set, this, interval) && 69 + !nft_rbtree_cmp(set, rbe, interval) && 72 70 nft_rbtree_interval_end(rbe) && 73 71 nft_rbtree_interval_start(interval)) 74 72 continue; ··· 215 215 return rbe; 216 216 } 217 217 218 + static int nft_rbtree_gc_elem(const struct nft_set *__set, 219 + struct nft_rbtree *priv, 220 + struct nft_rbtree_elem *rbe) 221 + { 222 + struct nft_set *set = (struct nft_set *)__set; 223 + struct rb_node *prev = rb_prev(&rbe->node); 224 + struct nft_rbtree_elem *rbe_prev; 225 + struct nft_set_gc_batch *gcb; 226 + 227 + gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); 228 + if (!gcb) 229 + return -ENOMEM; 230 + 231 + /* search for expired end interval coming before this element. */ 232 + do { 233 + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); 234 + if (nft_rbtree_interval_end(rbe_prev)) 235 + break; 236 + 237 + prev = rb_prev(prev); 238 + } while (prev != NULL); 239 + 240 + rb_erase(&rbe_prev->node, &priv->root); 241 + rb_erase(&rbe->node, &priv->root); 242 + atomic_sub(2, &set->nelems); 243 + 244 + nft_set_gc_batch_add(gcb, rbe); 245 + nft_set_gc_batch_complete(gcb); 246 + 247 + return 0; 248 + } 249 + 250 + static bool nft_rbtree_update_first(const struct nft_set *set, 251 + struct nft_rbtree_elem *rbe, 252 + struct rb_node *first) 253 + { 254 + struct nft_rbtree_elem *first_elem; 255 + 256 + first_elem = rb_entry(first, struct nft_rbtree_elem, node); 257 + /* this element is closest to where the new element is to be inserted: 258 + * update the first element for the node list path. 259 + */ 260 + if (nft_rbtree_cmp(set, rbe, first_elem) < 0) 261 + return true; 262 + 263 + return false; 264 + } 265 + 218 266 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, 219 267 struct nft_rbtree_elem *new, 220 268 struct nft_set_ext **ext) 221 269 { 222 - bool overlap = false, dup_end_left = false, dup_end_right = false; 270 + struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; 271 + struct rb_node *node, *parent, **p, *first = NULL; 223 272 struct nft_rbtree *priv = nft_set_priv(set); 224 273 u8 genmask = nft_genmask_next(net); 225 - struct nft_rbtree_elem *rbe; 226 - struct rb_node *parent, **p; 227 - int d; 274 + int d, err; 228 275 229 - /* Detect overlaps as we descend the tree. Set the flag in these cases: 230 - * 231 - * a1. _ _ __>| ?_ _ __| (insert end before existing end) 232 - * a2. _ _ ___| ?_ _ _>| (insert end after existing end) 233 - * a3. _ _ ___? >|_ _ __| (insert start before existing end) 234 - * 235 - * and clear it later on, as we eventually reach the points indicated by 236 - * '?' above, in the cases described below. We'll always meet these 237 - * later, locally, due to tree ordering, and overlaps for the intervals 238 - * that are the closest together are always evaluated last. 239 - * 240 - * b1. _ _ __>| !_ _ __| (insert end before existing start) 241 - * b2. _ _ ___| !_ _ _>| (insert end after existing start) 242 - * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf) 243 - * '--' no nodes falling in this range 244 - * b4. >|_ _ ! (insert start before existing start) 245 - * 246 - * Case a3. resolves to b3.: 247 - * - if the inserted start element is the leftmost, because the '0' 248 - * element in the tree serves as end element 249 - * - otherwise, if an existing end is found immediately to the left. If 250 - * there are existing nodes in between, we need to further descend the 251 - * tree before we can conclude the new start isn't causing an overlap 252 - * 253 - * or to b4., which, preceded by a3., means we already traversed one or 254 - * more existing intervals entirely, from the right. 255 - * 256 - * For a new, rightmost pair of elements, we'll hit cases b3. and b2., 257 - * in that order. 258 - * 259 - * The flag is also cleared in two special cases: 260 - * 261 - * b5. |__ _ _!|<_ _ _ (insert start right before existing end) 262 - * b6. |__ _ >|!__ _ _ (insert end right after existing start) 263 - * 264 - * which always happen as last step and imply that no further 265 - * overlapping is possible. 266 - * 267 - * Another special case comes from the fact that start elements matching 268 - * an already existing start element are allowed: insertion is not 269 - * performed but we return -EEXIST in that case, and the error will be 270 - * cleared by the caller if NLM_F_EXCL is not present in the request. 271 - * This way, request for insertion of an exact overlap isn't reported as 272 - * error to userspace if not desired. 273 - * 274 - * However, if the existing start matches a pre-existing start, but the 275 - * end element doesn't match the corresponding pre-existing end element, 276 - * we need to report a partial overlap. This is a local condition that 277 - * can be noticed without need for a tracking flag, by checking for a 278 - * local duplicated end for a corresponding start, from left and right, 279 - * separately. 276 + /* Descend the tree to search for an existing element greater than the 277 + * key value to insert that is greater than the new element. This is the 278 + * first element to walk the ordered elements to find possible overlap. 280 279 */ 281 - 282 280 parent = NULL; 283 281 p = &priv->root.rb_node; 284 282 while (*p != NULL) { 285 283 parent = *p; 286 284 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 287 - d = memcmp(nft_set_ext_key(&rbe->ext), 288 - nft_set_ext_key(&new->ext), 289 - set->klen); 285 + d = nft_rbtree_cmp(set, rbe, new); 286 + 290 287 if (d < 0) { 291 288 p = &parent->rb_left; 292 - 293 - if (nft_rbtree_interval_start(new)) { 294 - if (nft_rbtree_interval_end(rbe) && 295 - nft_set_elem_active(&rbe->ext, genmask) && 296 - !nft_set_elem_expired(&rbe->ext) && !*p) 297 - overlap = false; 298 - } else { 299 - if (dup_end_left && !*p) 300 - return -ENOTEMPTY; 301 - 302 - overlap = nft_rbtree_interval_end(rbe) && 303 - nft_set_elem_active(&rbe->ext, 304 - genmask) && 305 - !nft_set_elem_expired(&rbe->ext); 306 - 307 - if (overlap) { 308 - dup_end_right = true; 309 - continue; 310 - } 311 - } 312 289 } else if (d > 0) { 290 + if (!first || 291 + nft_rbtree_update_first(set, rbe, first)) 292 + first = &rbe->node; 293 + 313 294 p = &parent->rb_right; 314 - 315 - if (nft_rbtree_interval_end(new)) { 316 - if (dup_end_right && !*p) 317 - return -ENOTEMPTY; 318 - 319 - overlap = nft_rbtree_interval_end(rbe) && 320 - nft_set_elem_active(&rbe->ext, 321 - genmask) && 322 - !nft_set_elem_expired(&rbe->ext); 323 - 324 - if (overlap) { 325 - dup_end_left = true; 326 - continue; 327 - } 328 - } else if (nft_set_elem_active(&rbe->ext, genmask) && 329 - !nft_set_elem_expired(&rbe->ext)) { 330 - overlap = nft_rbtree_interval_end(rbe); 331 - } 332 295 } else { 333 - if (nft_rbtree_interval_end(rbe) && 334 - nft_rbtree_interval_start(new)) { 296 + if (nft_rbtree_interval_end(rbe)) 335 297 p = &parent->rb_left; 336 - 337 - if (nft_set_elem_active(&rbe->ext, genmask) && 338 - !nft_set_elem_expired(&rbe->ext)) 339 - overlap = false; 340 - } else if (nft_rbtree_interval_start(rbe) && 341 - nft_rbtree_interval_end(new)) { 298 + else 342 299 p = &parent->rb_right; 343 - 344 - if (nft_set_elem_active(&rbe->ext, genmask) && 345 - !nft_set_elem_expired(&rbe->ext)) 346 - overlap = false; 347 - } else if (nft_set_elem_active(&rbe->ext, genmask) && 348 - !nft_set_elem_expired(&rbe->ext)) { 349 - *ext = &rbe->ext; 350 - return -EEXIST; 351 - } else { 352 - overlap = false; 353 - if (nft_rbtree_interval_end(rbe)) 354 - p = &parent->rb_left; 355 - else 356 - p = &parent->rb_right; 357 - } 358 300 } 359 - 360 - dup_end_left = dup_end_right = false; 361 301 } 362 302 363 - if (overlap) 303 + if (!first) 304 + first = rb_first(&priv->root); 305 + 306 + /* Detect overlap by going through the list of valid tree nodes. 307 + * Values stored in the tree are in reversed order, starting from 308 + * highest to lowest value. 309 + */ 310 + for (node = first; node != NULL; node = rb_next(node)) { 311 + rbe = rb_entry(node, struct nft_rbtree_elem, node); 312 + 313 + if (!nft_set_elem_active(&rbe->ext, genmask)) 314 + continue; 315 + 316 + /* perform garbage collection to avoid bogus overlap reports. */ 317 + if (nft_set_elem_expired(&rbe->ext)) { 318 + err = nft_rbtree_gc_elem(set, priv, rbe); 319 + if (err < 0) 320 + return err; 321 + 322 + continue; 323 + } 324 + 325 + d = nft_rbtree_cmp(set, rbe, new); 326 + if (d == 0) { 327 + /* Matching end element: no need to look for an 328 + * overlapping greater or equal element. 329 + */ 330 + if (nft_rbtree_interval_end(rbe)) { 331 + rbe_le = rbe; 332 + break; 333 + } 334 + 335 + /* first element that is greater or equal to key value. */ 336 + if (!rbe_ge) { 337 + rbe_ge = rbe; 338 + continue; 339 + } 340 + 341 + /* this is a closer more or equal element, update it. */ 342 + if (nft_rbtree_cmp(set, rbe_ge, new) != 0) { 343 + rbe_ge = rbe; 344 + continue; 345 + } 346 + 347 + /* element is equal to key value, make sure flags are 348 + * the same, an existing more or equal start element 349 + * must not be replaced by more or equal end element. 350 + */ 351 + if ((nft_rbtree_interval_start(new) && 352 + nft_rbtree_interval_start(rbe_ge)) || 353 + (nft_rbtree_interval_end(new) && 354 + nft_rbtree_interval_end(rbe_ge))) { 355 + rbe_ge = rbe; 356 + continue; 357 + } 358 + } else if (d > 0) { 359 + /* annotate element greater than the new element. */ 360 + rbe_ge = rbe; 361 + continue; 362 + } else if (d < 0) { 363 + /* annotate element less than the new element. */ 364 + rbe_le = rbe; 365 + break; 366 + } 367 + } 368 + 369 + /* - new start element matching existing start element: full overlap 370 + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. 371 + */ 372 + if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && 373 + nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { 374 + *ext = &rbe_ge->ext; 375 + return -EEXIST; 376 + } 377 + 378 + /* - new end element matching existing end element: full overlap 379 + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. 380 + */ 381 + if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) && 382 + nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) { 383 + *ext = &rbe_le->ext; 384 + return -EEXIST; 385 + } 386 + 387 + /* - new start element with existing closest, less or equal key value 388 + * being a start element: partial overlap, reported as -ENOTEMPTY. 389 + * Anonymous sets allow for two consecutive start element since they 390 + * are constant, skip them to avoid bogus overlap reports. 391 + */ 392 + if (!nft_set_is_anonymous(set) && rbe_le && 393 + nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new)) 364 394 return -ENOTEMPTY; 395 + 396 + /* - new end element with existing closest, less or equal key value 397 + * being a end element: partial overlap, reported as -ENOTEMPTY. 398 + */ 399 + if (rbe_le && 400 + nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new)) 401 + return -ENOTEMPTY; 402 + 403 + /* - new end element with existing closest, greater or equal key value 404 + * being an end element: partial overlap, reported as -ENOTEMPTY 405 + */ 406 + if (rbe_ge && 407 + nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) 408 + return -ENOTEMPTY; 409 + 410 + /* Accepted element: pick insertion point depending on key value */ 411 + parent = NULL; 412 + p = &priv->root.rb_node; 413 + while (*p != NULL) { 414 + parent = *p; 415 + rbe = rb_entry(parent, struct nft_rbtree_elem, node); 416 + d = nft_rbtree_cmp(set, rbe, new); 417 + 418 + if (d < 0) 419 + p = &parent->rb_left; 420 + else if (d > 0) 421 + p = &parent->rb_right; 422 + else if (nft_rbtree_interval_end(rbe)) 423 + p = &parent->rb_left; 424 + else 425 + p = &parent->rb_right; 426 + } 365 427 366 428 rb_link_node_rcu(&new->node, parent, p); 367 429 rb_insert_color(&new->node, &priv->root); ··· 563 501 struct nft_rbtree *priv; 564 502 struct rb_node *node; 565 503 struct nft_set *set; 504 + struct net *net; 505 + u8 genmask; 566 506 567 507 priv = container_of(work, struct nft_rbtree, gc_work.work); 568 508 set = nft_set_container_of(priv); 509 + net = read_pnet(&set->net); 510 + genmask = nft_genmask_cur(net); 569 511 570 512 write_lock_bh(&priv->lock); 571 513 write_seqcount_begin(&priv->count); 572 514 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 573 515 rbe = rb_entry(node, struct nft_rbtree_elem, node); 574 516 517 + if (!nft_set_elem_active(&rbe->ext, genmask)) 518 + continue; 519 + 520 + /* elements are reversed in the rbtree for historical reasons, 521 + * from highest to lowest value, that is why end element is 522 + * always visited before the start element. 523 + */ 575 524 if (nft_rbtree_interval_end(rbe)) { 576 525 rbe_end = rbe; 577 526 continue; 578 527 } 579 528 if (!nft_set_elem_expired(&rbe->ext)) 580 529 continue; 581 - if (nft_set_elem_mark_busy(&rbe->ext)) 530 + 531 + if (nft_set_elem_mark_busy(&rbe->ext)) { 532 + rbe_end = NULL; 582 533 continue; 534 + } 583 535 584 536 if (rbe_prev) { 585 537 rb_erase(&rbe_prev->node, &priv->root);
+24 -14
net/netlink/af_netlink.c
··· 580 580 if (nlk_sk(sk)->bound) 581 581 goto err; 582 582 583 - nlk_sk(sk)->portid = portid; 583 + /* portid can be read locklessly from netlink_getname(). */ 584 + WRITE_ONCE(nlk_sk(sk)->portid, portid); 585 + 584 586 sock_hold(sk); 585 587 586 588 err = __netlink_insert(table, sk); ··· 1098 1096 return -EINVAL; 1099 1097 1100 1098 if (addr->sa_family == AF_UNSPEC) { 1101 - sk->sk_state = NETLINK_UNCONNECTED; 1102 - nlk->dst_portid = 0; 1103 - nlk->dst_group = 0; 1099 + /* paired with READ_ONCE() in netlink_getsockbyportid() */ 1100 + WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); 1101 + /* dst_portid and dst_group can be read locklessly */ 1102 + WRITE_ONCE(nlk->dst_portid, 0); 1103 + WRITE_ONCE(nlk->dst_group, 0); 1104 1104 return 0; 1105 1105 } 1106 1106 if (addr->sa_family != AF_NETLINK) ··· 1123 1119 err = netlink_autobind(sock); 1124 1120 1125 1121 if (err == 0) { 1126 - sk->sk_state = NETLINK_CONNECTED; 1127 - nlk->dst_portid = nladdr->nl_pid; 1128 - nlk->dst_group = ffs(nladdr->nl_groups); 1122 + /* paired with READ_ONCE() in netlink_getsockbyportid() */ 1123 + WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); 1124 + /* dst_portid and dst_group can be read locklessly */ 1125 + WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid); 1126 + WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups)); 1129 1127 } 1130 1128 1131 1129 return err; ··· 1144 1138 nladdr->nl_pad = 0; 1145 1139 1146 1140 if (peer) { 1147 - nladdr->nl_pid = nlk->dst_portid; 1148 - nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 1141 + /* Paired with WRITE_ONCE() in netlink_connect() */ 1142 + nladdr->nl_pid = READ_ONCE(nlk->dst_portid); 1143 + nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group)); 1149 1144 } else { 1150 - nladdr->nl_pid = nlk->portid; 1145 + /* Paired with WRITE_ONCE() in netlink_insert() */ 1146 + nladdr->nl_pid = READ_ONCE(nlk->portid); 1151 1147 netlink_lock_table(); 1152 1148 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 1153 1149 netlink_unlock_table(); ··· 1176 1168 1177 1169 /* Don't bother queuing skb if kernel socket has no input function */ 1178 1170 nlk = nlk_sk(sock); 1179 - if (sock->sk_state == NETLINK_CONNECTED && 1180 - nlk->dst_portid != nlk_sk(ssk)->portid) { 1171 + /* dst_portid and sk_state can be changed in netlink_connect() */ 1172 + if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED && 1173 + READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) { 1181 1174 sock_put(sock); 1182 1175 return ERR_PTR(-ECONNREFUSED); 1183 1176 } ··· 1895 1886 goto out; 1896 1887 netlink_skb_flags |= NETLINK_SKB_DST; 1897 1888 } else { 1898 - dst_portid = nlk->dst_portid; 1899 - dst_group = nlk->dst_group; 1889 + /* Paired with WRITE_ONCE() in netlink_connect() */ 1890 + dst_portid = READ_ONCE(nlk->dst_portid); 1891 + dst_group = READ_ONCE(nlk->dst_group); 1900 1892 } 1901 1893 1902 1894 /* Paired with WRITE_ONCE() in netlink_insert() */
+1
net/netrom/nr_timer.c
··· 121 121 is accepted() it isn't 'dead' so doesn't get removed. */ 122 122 if (sock_flag(sk, SOCK_DESTROY) || 123 123 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { 124 + sock_hold(sk); 124 125 bh_unlock_sock(sk); 125 126 nr_destroy_socket(sk); 126 127 goto out;
-1
net/sched/sch_taprio.c
··· 1700 1700 int i; 1701 1701 1702 1702 hrtimer_cancel(&q->advance_timer); 1703 - qdisc_synchronize(sch); 1704 1703 1705 1704 if (q->qdiscs) { 1706 1705 for (i = 0; i < dev->num_tx_queues; i++)
+6
net/sctp/bind_addr.c
··· 73 73 } 74 74 } 75 75 76 + /* If somehow no addresses were found that can be used with this 77 + * scope, it's an error. 78 + */ 79 + if (list_empty(&dest->address_list)) 80 + error = -ENETUNREACH; 81 + 76 82 out: 77 83 if (error) 78 84 sctp_bind_addr_clean(dest);
+6
net/x25/af_x25.c
··· 482 482 int rc = -EOPNOTSUPP; 483 483 484 484 lock_sock(sk); 485 + if (sock->state != SS_UNCONNECTED) { 486 + rc = -EINVAL; 487 + release_sock(sk); 488 + return rc; 489 + } 490 + 485 491 if (sk->sk_state != TCP_LISTEN) { 486 492 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); 487 493 sk->sk_max_ack_backlog = backlog;