Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"A quick set of bug fixes after there initial networking merge:

1) Netlink multicast group storage allocator only was tested with
nr_groups equal to 1, make it work for other values too. From
Matti Vaittinen.

2) Check build_skb() return value in macb and hip04_eth drivers, from
Weidong Wang.

3) Don't leak x25_asy on x25_asy_open() failure.

4) More DMA map/unmap fixes in 3c59x from Neil Horman.

5) Don't clobber IP skb control block during GSO segmentation, from
Konstantin Khlebnikov.

6) ECN helpers for ipv6 don't fixup the checksum, from Eric Dumazet.

7) Fix SKB segment utilization estimation in xen-netback, from David
Vrabel.

8) Fix lockdep splat in bridge addrlist handling, from Nikolay
Aleksandrov"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
bgmac: Fix reversed test of build_skb() return value.
bridge: fix lockdep addr_list_lock false positive splat
net: smsc: Add support h8300
xen-netback: free queues after freeing the net device
xen-netback: delete NAPI instance when queue fails to initialize
xen-netback: use skb to determine number of required guest Rx requests
net: sctp: Move sequence start handling into sctp_transport_get_idx()
ipv6: update skb->csum when CE mark is propagated
net: phy: turn carrier off on phy attach
net: macb: clear interrupts when disabling them
sctp: support to lookup with ep+paddr in transport rhashtable
net: hns: fixes no syscon error when init mdio
dts: hisi: fixes no syscon fault when init mdio
net: preserve IP control block during GSO segmentation
fsl/fman: Delete one function call "put_device" in dtsec_config()
hip04_eth: fix missing error handle for build_skb failed
3c59x: fix another page map/single unmap imbalance
3c59x: balance page maps and unmaps
x25_asy: Free x25_asy on x25_asy_open() failure.
mlxsw: fix SWITCHDEV_OBJ_ID_PORT_MDB
...

+158 -66
+16
Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
··· 187 187 reg = <0xb0000000 0x10000>; 188 188 }; 189 189 190 + Hisilicon HiP05 PERISUB system controller 191 + 192 + Required properties: 193 + - compatible : "hisilicon,hip05-perisubc", "syscon"; 194 + - reg : Register address and size 195 + 196 + The HiP05 PERISUB system controller is shared by peripheral controllers in 197 + HiP05 Soc to implement some basic configurations. The peripheral 198 + controllers include mdio, ddr, iic, uart, timer and so on. 199 + 200 + Example: 201 + /* for HiP05 perisub-ctrl-c system */ 202 + peri_c_subctrl: syscon@80000000 { 203 + compatible = "hisilicon,hip05-perisubc", "syscon"; 204 + reg = <0x0 0x80000000 0x0 0x10000>; 205 + }; 190 206 ----------------------------------------------------------------------- 191 207 Hisilicon CPU controller 192 208
+5
arch/arm64/boot/dts/hisilicon/hip05.dtsi
··· 246 246 clock-frequency = <200000000>; 247 247 }; 248 248 249 + peri_c_subctrl: syscon@80000000 { 250 + compatible = "hisilicon,hip05-perisubc", "syscon"; 251 + reg = < 0x0 0x80000000 0x0 0x10000>; 252 + }; 253 + 249 254 uart0: uart@80300000 { 250 255 compatible = "snps,dw-apb-uart"; 251 256 reg = <0x0 0x80300000 0x0 0x10000>;
+2 -2
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
··· 10 10 #address-cells = <1>; 11 11 #size-cells = <0>; 12 12 compatible = "hisilicon,hns-mdio"; 13 - reg = <0x0 0x803c0000 0x0 0x10000 14 - 0x0 0x80000000 0x0 0x10000>; 13 + reg = <0x0 0x803c0000 0x0 0x10000>; 14 + subctrl-vbase = <&peri_c_subctrl>; 15 15 16 16 soc0_phy0: ethernet-phy@0 { 17 17 reg = <0x0>;
+7 -2
drivers/net/ethernet/3com/3c59x.c
··· 2459 2459 struct sk_buff *skb = vp->tx_skbuff[entry]; 2460 2460 #if DO_ZEROCOPY 2461 2461 int i; 2462 - for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) 2463 - pci_unmap_single(VORTEX_PCI(vp), 2462 + pci_unmap_single(VORTEX_PCI(vp), 2463 + le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2464 + le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2465 + PCI_DMA_TODEVICE); 2466 + 2467 + for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2468 + pci_unmap_page(VORTEX_PCI(vp), 2464 2469 le32_to_cpu(vp->tx_ring[entry].frag[i].addr), 2465 2470 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, 2466 2471 PCI_DMA_TODEVICE);
+5
drivers/net/ethernet/broadcom/bgmac.c
··· 466 466 len -= ETH_FCS_LEN; 467 467 468 468 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); 469 + if (unlikely(!skb)) { 470 + bgmac_err(bgmac, "build_skb failed\n"); 471 + put_page(virt_to_head_page(buf)); 472 + break; 473 + } 469 474 skb_put(skb, BGMAC_RX_FRAME_OFFSET + 470 475 BGMAC_RX_BUF_OFFSET + len); 471 476 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
+4
drivers/net/ethernet/cadence/macb.c
··· 1040 1040 /* close possible race with dev_close */ 1041 1041 if (unlikely(!netif_running(dev))) { 1042 1042 queue_writel(queue, IDR, -1); 1043 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1044 + queue_writel(queue, ISR, -1); 1043 1045 break; 1044 1046 } 1045 1047 ··· 1563 1561 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1564 1562 queue_writel(queue, IDR, -1); 1565 1563 queue_readl(queue, ISR); 1564 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1565 + queue_writel(queue, ISR, -1); 1566 1566 } 1567 1567 } 1568 1568
-1
drivers/net/ethernet/freescale/fman/fman_dtsec.c
··· 1434 1434 dtsec->tbiphy = of_phy_find_device(params->internal_phy_node); 1435 1435 if (!dtsec->tbiphy) { 1436 1436 pr_err("of_phy_find_device (TBI PHY) failed\n"); 1437 - put_device(&dtsec->tbiphy->mdio.dev); 1438 1437 goto err_dtsec_drv_param; 1439 1438 } 1440 1439
+4 -1
drivers/net/ethernet/hisilicon/hip04_eth.c
··· 500 500 while (cnt && !last) { 501 501 buf = priv->rx_buf[priv->rx_head]; 502 502 skb = build_skb(buf, priv->rx_buf_size); 503 - if (unlikely(!skb)) 503 + if (unlikely(!skb)) { 504 504 net_dbg_ratelimited("build_skb failed\n"); 505 + goto refill; 506 + } 505 507 506 508 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], 507 509 RX_BUF_SIZE, DMA_FROM_DEVICE); ··· 530 528 rx++; 531 529 } 532 530 531 + refill: 533 532 buf = netdev_alloc_frag(priv->rx_buf_size); 534 533 if (!buf) 535 534 goto done;
+1 -1
drivers/net/ethernet/hisilicon/hns_mdio.c
··· 458 458 } 459 459 460 460 mdio_dev->subctrl_vbase = 461 - syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0)); 461 + syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0)); 462 462 if (IS_ERR(mdio_dev->subctrl_vbase)) { 463 463 dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); 464 464 mdio_dev->subctrl_vbase = NULL;
+1
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1015 1015 case SWITCHDEV_OBJ_ID_PORT_MDB: 1016 1016 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1017 1017 SWITCHDEV_OBJ_PORT_MDB(obj)); 1018 + break; 1018 1019 default: 1019 1020 err = -EOPNOTSUPP; 1020 1021 break;
+2 -2
drivers/net/ethernet/smsc/Kconfig
··· 7 7 default y 8 8 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ 9 9 ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \ 10 - PCMCIA || SUPERH || XTENSA 10 + PCMCIA || SUPERH || XTENSA || H8300 11 11 ---help--- 12 12 If you have a network (Ethernet) card belonging to this class, say Y. 13 13 ··· 38 38 select MII 39 39 depends on !OF || GPIOLIB 40 40 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ 41 - M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA 41 + M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA || H8300 42 42 ---help--- 43 43 This is a driver for SMC's 91x series of Ethernet chipsets, 44 44 including the SMC91C94 and the SMC91C111. Say Y if you want it
+11
drivers/net/ethernet/smsc/smc91x.h
··· 172 172 173 173 #define SMC_IRQ_FLAGS 0 174 174 175 + #elif defined(CONFIG_H8300) 176 + #define SMC_CAN_USE_8BIT 1 177 + #define SMC_CAN_USE_16BIT 0 178 + #define SMC_CAN_USE_32BIT 0 179 + #define SMC_NOWAIT 0 180 + 181 + #define SMC_inb(a, r) ioread8((a) + (r)) 182 + #define SMC_outb(v, a, r) iowrite8(v, (a) + (r)) 183 + #define SMC_insb(a, r, p, l) ioread8_rep((a) + (r), p, l) 184 + #define SMC_outsb(a, r, p, l) iowrite8_rep((a) + (r), p, l) 185 + 175 186 #else 176 187 177 188 /*
+5
drivers/net/phy/phy_device.c
··· 901 901 902 902 phydev->state = PHY_READY; 903 903 904 + /* Initial carrier state is off as the phy is about to be 905 + * (re)initialized. 906 + */ 907 + netif_carrier_off(phydev->attached_dev); 908 + 904 909 /* Do initial configuration here, now that 905 910 * we have certain key parameters 906 911 * (dev_flags and interface)
+3 -1
drivers/net/wan/x25_asy.c
··· 571 571 572 572 /* Perform the low-level X.25 async init */ 573 573 err = x25_asy_open(sl->dev); 574 - if (err) 574 + if (err) { 575 + x25_asy_free(sl); 575 576 return err; 577 + } 576 578 /* Done. We have linked the TTY line to a channel. */ 577 579 return 0; 578 580 }
+6 -11
drivers/net/xen-netback/interface.c
··· 615 615 queue->tx_irq = 0; 616 616 err_unmap: 617 617 xenvif_unmap_frontend_rings(queue); 618 + netif_napi_del(&queue->napi); 618 619 err: 619 620 module_put(THIS_MODULE); 620 621 return err; ··· 685 684 686 685 void xenvif_free(struct xenvif *vif) 687 686 { 688 - struct xenvif_queue *queue = NULL; 687 + struct xenvif_queue *queues = vif->queues; 689 688 unsigned int num_queues = vif->num_queues; 690 689 unsigned int queue_index; 691 690 692 691 unregister_netdev(vif->dev); 693 - 694 - for (queue_index = 0; queue_index < num_queues; ++queue_index) { 695 - queue = &vif->queues[queue_index]; 696 - xenvif_deinit_queue(queue); 697 - } 698 - 699 - vfree(vif->queues); 700 - vif->queues = NULL; 701 - vif->num_queues = 0; 702 - 703 692 free_netdev(vif->dev); 693 + 694 + for (queue_index = 0; queue_index < num_queues; ++queue_index) 695 + xenvif_deinit_queue(&queues[queue_index]); 696 + vfree(queues); 704 697 705 698 module_put(THIS_MODULE); 706 699 }
+9 -11
drivers/net/xen-netback/netback.c
··· 149 149 return i & (MAX_PENDING_REQS-1); 150 150 } 151 151 152 - static int xenvif_rx_ring_slots_needed(struct xenvif *vif) 153 - { 154 - if (vif->gso_mask) 155 - return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1; 156 - else 157 - return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE); 158 - } 159 - 160 152 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) 161 153 { 162 154 RING_IDX prod, cons; 155 + struct sk_buff *skb; 163 156 int needed; 164 157 165 - needed = xenvif_rx_ring_slots_needed(queue->vif); 158 + skb = skb_peek(&queue->rx_queue); 159 + if (!skb) 160 + return false; 161 + 162 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); 163 + if (skb_is_gso(skb)) 164 + needed++; 166 165 167 166 do { 168 167 prod = queue->rx.sring->req_prod; ··· 2004 2005 2005 2006 static bool xenvif_have_rx_work(struct xenvif_queue *queue) 2006 2007 { 2007 - return (!skb_queue_empty(&queue->rx_queue) 2008 - && xenvif_rx_ring_slots_available(queue)) 2008 + return xenvif_rx_ring_slots_available(queue) 2009 2009 || (queue->vif->stall_timeout && 2010 2010 (xenvif_rx_queue_stalled(queue) 2011 2011 || xenvif_rx_queue_ready(queue)))
+2 -1
include/linux/skbuff.h
··· 3551 3551 int encap_level; 3552 3552 __u16 csum_start; 3553 3553 }; 3554 - #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) 3554 + #define SKB_SGO_CB_OFFSET 32 3555 + #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) 3555 3556 3556 3557 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) 3557 3558 {
+16 -3
include/net/inet_ecn.h
··· 111 111 112 112 struct ipv6hdr; 113 113 114 - static inline int IP6_ECN_set_ce(struct ipv6hdr *iph) 114 + /* Note: 115 + * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, 116 + * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE 117 + * In IPv6 case, no checksum compensates the change in IPv6 header, 118 + * so we have to update skb->csum. 119 + */ 120 + static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) 115 121 { 122 + __be32 from, to; 123 + 116 124 if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) 117 125 return 0; 118 - *(__be32*)iph |= htonl(INET_ECN_CE << 20); 126 + 127 + from = *(__be32 *)iph; 128 + to = from | htonl(INET_ECN_CE << 20); 129 + *(__be32 *)iph = to; 130 + if (skb->ip_summed == CHECKSUM_COMPLETE) 131 + skb->csum = csum_add(csum_sub(skb->csum, from), to); 119 132 return 1; 120 133 } 121 134 ··· 155 142 case cpu_to_be16(ETH_P_IPV6): 156 143 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= 157 144 skb_tail_pointer(skb)) 158 - return IP6_ECN_set_ce(ipv6_hdr(skb)); 145 + return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); 159 146 break; 160 147 } 161 148
+2
net/batman-adv/multicast.c
··· 802 802 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 1); 803 803 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 1); 804 804 805 + spin_lock_bh(&bat_priv->tt.commit_lock); 805 806 batadv_mcast_mla_tt_retract(bat_priv, NULL); 807 + spin_unlock_bh(&bat_priv->tt.commit_lock); 806 808 } 807 809 808 810 /**
+12 -6
net/batman-adv/originator.c
··· 211 211 212 212 hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu); 213 213 214 - spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 215 - hlist_del_init_rcu(&hardif_neigh->list); 216 - spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 217 - 218 214 batadv_hardif_free_ref_now(hardif_neigh->if_incoming); 219 215 kfree(hardif_neigh); 220 216 } ··· 223 227 static void 224 228 batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh) 225 229 { 226 - if (atomic_dec_and_test(&hardif_neigh->refcount)) 230 + if (atomic_dec_and_test(&hardif_neigh->refcount)) { 231 + spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 232 + hlist_del_init_rcu(&hardif_neigh->list); 233 + spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 234 + 227 235 batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu); 236 + } 228 237 } 229 238 230 239 /** ··· 239 238 */ 240 239 void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh) 241 240 { 242 - if (atomic_dec_and_test(&hardif_neigh->refcount)) 241 + if (atomic_dec_and_test(&hardif_neigh->refcount)) { 242 + spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 243 + hlist_del_init_rcu(&hardif_neigh->list); 244 + spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 245 + 243 246 call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu); 247 + } 244 248 } 245 249 246 250 /**
+8
net/bridge/br_device.c
··· 28 28 const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 29 29 EXPORT_SYMBOL_GPL(nf_br_ops); 30 30 31 + static struct lock_class_key bridge_netdev_addr_lock_key; 32 + 31 33 /* net device transmit always called with BH disabled */ 32 34 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 33 35 { ··· 89 87 return NETDEV_TX_OK; 90 88 } 91 89 90 + static void br_set_lockdep_class(struct net_device *dev) 91 + { 92 + lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); 93 + } 94 + 92 95 static int br_dev_init(struct net_device *dev) 93 96 { 94 97 struct net_bridge *br = netdev_priv(dev); ··· 106 99 err = br_vlan_init(br); 107 100 if (err) 108 101 free_percpu(br->stats); 102 + br_set_lockdep_class(dev); 109 103 110 104 return err; 111 105 }
+5
net/core/dev.c
··· 2695 2695 * 2696 2696 * It may return NULL if the skb requires no segmentation. This is 2697 2697 * only possible when GSO is used for verifying header integrity. 2698 + * 2699 + * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. 2698 2700 */ 2699 2701 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2700 2702 netdev_features_t features, bool tx_path) ··· 2710 2708 if (err < 0) 2711 2709 return ERR_PTR(err); 2712 2710 } 2711 + 2712 + BUILD_BUG_ON(SKB_SGO_CB_OFFSET + 2713 + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 2713 2714 2714 2715 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 2715 2716 SKB_GSO_CB(skb)->encap_level = 0;
+1
net/ipv4/ip_output.c
··· 239 239 * from host network stack. 240 240 */ 241 241 features = netif_skb_features(skb); 242 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); 242 243 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 243 244 if (IS_ERR_OR_NULL(segs)) { 244 245 kfree_skb(skb);
+1 -1
net/ipv6/xfrm6_mode_tunnel.c
··· 23 23 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 24 24 25 25 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 26 - IP6_ECN_set_ce(inner_iph); 26 + IP6_ECN_set_ce(skb, inner_iph); 27 27 } 28 28 29 29 /* Add encapsulation header.
+1 -1
net/netlink/genetlink.c
··· 185 185 } 186 186 } 187 187 188 - if (id >= mc_groups_longs * BITS_PER_LONG) { 188 + if (id + n_groups > mc_groups_longs * BITS_PER_LONG) { 189 189 unsigned long new_longs = mc_groups_longs + 190 190 BITS_TO_LONGS(n_groups); 191 191 size_t nlen = new_longs * sizeof(unsigned long);
+1 -4
net/openvswitch/datapath.c
··· 336 336 unsigned short gso_type = skb_shinfo(skb)->gso_type; 337 337 struct sw_flow_key later_key; 338 338 struct sk_buff *segs, *nskb; 339 - struct ovs_skb_cb ovs_cb; 340 339 int err; 341 340 342 - ovs_cb = *OVS_CB(skb); 341 + BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET); 343 342 segs = __skb_gso_segment(skb, NETIF_F_SG, false); 344 - *OVS_CB(skb) = ovs_cb; 345 343 if (IS_ERR(segs)) 346 344 return PTR_ERR(segs); 347 345 if (segs == NULL) ··· 357 359 /* Queue all of the segments. */ 358 360 skb = segs; 359 361 do { 360 - *OVS_CB(skb) = ovs_cb; 361 362 if (gso_type & SKB_GSO_UDP && skb != segs) 362 363 key = &later_key; 363 364
+23 -15
net/sctp/input.c
··· 784 784 785 785 /* rhashtable for transport */ 786 786 struct sctp_hash_cmp_arg { 787 + const struct sctp_endpoint *ep; 787 788 const union sctp_addr *laddr; 788 789 const union sctp_addr *paddr; 789 790 const struct net *net; ··· 798 797 struct sctp_association *asoc = t->asoc; 799 798 const struct net *net = x->net; 800 799 801 - if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) 802 - return 1; 803 800 if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr)) 804 801 return 1; 805 802 if (!net_eq(sock_net(asoc->base.sk), net)) 806 803 return 1; 807 - if (!sctp_bind_addr_match(&asoc->base.bind_addr, 808 - x->laddr, sctp_sk(asoc->base.sk))) 809 - return 1; 804 + if (x->ep) { 805 + if (x->ep != asoc->ep) 806 + return 1; 807 + } else { 808 + if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) 809 + return 1; 810 + if (!sctp_bind_addr_match(&asoc->base.bind_addr, 811 + x->laddr, sctp_sk(asoc->base.sk))) 812 + return 1; 813 + } 810 814 811 815 return 0; 812 816 } ··· 838 832 const struct sctp_hash_cmp_arg *x = data; 839 833 const union sctp_addr *paddr = x->paddr; 840 834 const struct net *net = x->net; 841 - u16 lport = x->laddr->v4.sin_port; 835 + u16 lport; 842 836 u32 addr; 843 837 838 + lport = x->ep ? htons(x->ep->base.bind_addr.port) : 839 + x->laddr->v4.sin_port; 844 840 if (paddr->sa.sa_family == AF_INET6) 845 841 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 846 842 else ··· 872 864 873 865 void sctp_hash_transport(struct sctp_transport *t) 874 866 { 875 - struct sctp_sockaddr_entry *addr; 876 867 struct sctp_hash_cmp_arg arg; 877 868 878 - addr = list_entry(t->asoc->base.bind_addr.address_list.next, 879 - struct sctp_sockaddr_entry, list); 880 - arg.laddr = &addr->a; 869 + arg.ep = t->asoc->ep; 881 870 arg.paddr = &t->ipaddr; 882 871 arg.net = sock_net(t->asoc->base.sk); 883 872 ··· 896 891 const union sctp_addr *paddr) 897 892 { 898 893 struct sctp_hash_cmp_arg arg = { 894 + .ep = NULL, 899 895 .laddr = laddr, 900 896 .paddr = paddr, 901 897 .net = net, ··· 910 904 const struct sctp_endpoint *ep, 911 905 const union sctp_addr *paddr) 912 906 { 913 - struct sctp_sockaddr_entry *addr; 914 907 struct net *net = sock_net(ep->base.sk); 908 + struct sctp_hash_cmp_arg arg = { 909 + .ep = ep, 910 + .paddr = paddr, 911 + .net = net, 912 + }; 915 913 916 - addr = list_entry(ep->base.bind_addr.address_list.next, 917 - struct sctp_sockaddr_entry, list); 918 - 919 - return sctp_addrs_lookup_transport(net, &addr->a, paddr); 914 + return rhashtable_lookup_fast(&sctp_transport_hashtable, &arg, 915 + sctp_hash_params); 920 916 } 921 917 922 918 /* Look up an association. */
+3 -3
net/sctp/proc.c
··· 310 310 static struct sctp_transport *sctp_transport_get_idx(struct seq_file *seq, 311 311 loff_t pos) 312 312 { 313 - void *obj; 313 + void *obj = SEQ_START_TOKEN; 314 314 315 315 while (pos && (obj = sctp_transport_get_next(seq)) && !IS_ERR(obj)) 316 316 pos--; ··· 347 347 if (err) 348 348 return ERR_PTR(err); 349 349 350 - return *pos ? sctp_transport_get_idx(seq, *pos) : SEQ_START_TOKEN; 350 + return sctp_transport_get_idx(seq, *pos); 351 351 } 352 352 353 353 static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) ··· 462 462 if (err) 463 463 return ERR_PTR(err); 464 464 465 - return *pos ? sctp_transport_get_idx(seq, *pos) : SEQ_START_TOKEN; 465 + return sctp_transport_get_idx(seq, *pos); 466 466 } 467 467 468 468 static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+2
net/xfrm/xfrm_output.c
··· 167 167 { 168 168 struct sk_buff *segs; 169 169 170 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); 171 + BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET); 170 172 segs = skb_gso_segment(skb, 0); 171 173 kfree_skb(skb); 172 174 if (IS_ERR(segs))