[VLAN]: Avoid a 4-order allocation.

This patch splits the vlan_group struct into a multi-allocated struct. On
x86_64, the size of the original struct is a little more than 32KB, causing
a 4-order allocation, which is prune to problems caused by buddy-system
external fragmentation conditions.

I couldn't just use vmalloc() because vfree() cannot be called in the
softirq context of the RCU callback.

Signed-off-by: Dan Aloni <da-x@monatomic.org>
Acked-by: Jeff Garzik <jeff@garzik.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Dan Aloni and committed by David S. Miller 5c15bdec b5284e5a

+102 -80
+1 -2
drivers/net/8139cp.c
··· 448 spin_lock_irqsave(&cp->lock, flags); 449 cp->cpcmd &= ~RxVlanOn; 450 cpw16(CpCmd, cp->cpcmd); 451 - if (cp->vlgrp) 452 - cp->vlgrp->vlan_devices[vid] = NULL; 453 spin_unlock_irqrestore(&cp->lock, flags); 454 } 455 #endif /* CP_VLAN_TAG_USED */
··· 448 spin_lock_irqsave(&cp->lock, flags); 449 cp->cpcmd &= ~RxVlanOn; 450 cpw16(CpCmd, cp->cpcmd); 451 + vlan_group_set_device(cp->vlgrp, vid, NULL); 452 spin_unlock_irqrestore(&cp->lock, flags); 453 } 454 #endif /* CP_VLAN_TAG_USED */
+1 -4
drivers/net/acenic.c
··· 2293 2294 local_irq_save(flags); 2295 ace_mask_irq(dev); 2296 - 2297 - if (ap->vlgrp) 2298 - ap->vlgrp->vlan_devices[vid] = NULL; 2299 - 2300 ace_unmask_irq(dev); 2301 local_irq_restore(flags); 2302 }
··· 2293 2294 local_irq_save(flags); 2295 ace_mask_irq(dev); 2296 + vlan_group_set_device(ap->vlgrp, vid, NULL); 2297 ace_unmask_irq(dev); 2298 local_irq_restore(flags); 2299 }
+1 -2
drivers/net/amd8111e.c
··· 1737 { 1738 struct amd8111e_priv *lp = netdev_priv(dev); 1739 spin_lock_irq(&lp->lock); 1740 - if (lp->vlgrp) 1741 - lp->vlgrp->vlan_devices[vid] = NULL; 1742 spin_unlock_irq(&lp->lock); 1743 } 1744 #endif
··· 1737 { 1738 struct amd8111e_priv *lp = netdev_priv(dev); 1739 spin_lock_irq(&lp->lock); 1740 + vlan_group_set_device(lp->vlgrp, vid, NULL); 1741 spin_unlock_irq(&lp->lock); 1742 } 1743 #endif
+2 -3
drivers/net/atl1/atl1_main.c
··· 1252 1253 spin_lock_irqsave(&adapter->lock, flags); 1254 /* atl1_irq_disable(adapter); */ 1255 - if (adapter->vlgrp) 1256 - adapter->vlgrp->vlan_devices[vid] = NULL; 1257 /* atl1_irq_enable(adapter); */ 1258 spin_unlock_irqrestore(&adapter->lock, flags); 1259 /* We don't do Vlan filtering */ ··· 1265 if (adapter->vlgrp) { 1266 u16 vid; 1267 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1268 - if (!adapter->vlgrp->vlan_devices[vid]) 1269 continue; 1270 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1271 }
··· 1252 1253 spin_lock_irqsave(&adapter->lock, flags); 1254 /* atl1_irq_disable(adapter); */ 1255 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 1256 /* atl1_irq_enable(adapter); */ 1257 spin_unlock_irqrestore(&adapter->lock, flags); 1258 /* We don't do Vlan filtering */ ··· 1266 if (adapter->vlgrp) { 1267 u16 vid; 1268 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1269 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 1270 continue; 1271 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1272 }
+1 -3
drivers/net/bnx2.c
··· 4467 struct bnx2 *bp = netdev_priv(dev); 4468 4469 bnx2_netif_stop(bp); 4470 - 4471 - if (bp->vlgrp) 4472 - bp->vlgrp->vlan_devices[vid] = NULL; 4473 bnx2_set_rx_mode(dev); 4474 4475 bnx2_netif_start(bp);
··· 4467 struct bnx2 *bp = netdev_priv(dev); 4468 4469 bnx2_netif_stop(bp); 4470 + vlan_group_set_device(bp->vlgrp, vid, NULL); 4471 bnx2_set_rx_mode(dev); 4472 4473 bnx2_netif_start(bp);
+7 -7
drivers/net/bonding/bond_main.c
··· 488 /* Save and then restore vlan_dev in the grp array, 489 * since the slave's driver might clear it. 490 */ 491 - vlan_dev = bond->vlgrp->vlan_devices[vid]; 492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 - bond->vlgrp->vlan_devices[vid] = vlan_dev; 494 } 495 } 496 ··· 550 /* Save and then restore vlan_dev in the grp array, 551 * since the slave's driver might clear it. 552 */ 553 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 - bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; 556 } 557 558 unreg: ··· 2397 vlan_id = 0; 2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2399 vlan_list) { 2400 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2401 if (vlan_dev == rt->u.dst.dev) { 2402 vlan_id = vlan->vlan_id; 2403 dprintk("basa: vlan match on %s %d\n", ··· 2444 } 2445 2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2447 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2448 if (vlan->vlan_ip) { 2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2450 vlan->vlan_ip, vlan->vlan_id); ··· 3371 3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3373 vlan_list) { 3374 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 3375 if (vlan_dev == event_dev) { 3376 switch (event) { 3377 case NETDEV_UP:
··· 488 /* Save and then restore vlan_dev in the grp array, 489 * since the slave's driver might clear it. 490 */ 491 + vlan_dev = vlan_group_get_device(bond->vlgrp, vid); 492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 + vlan_group_set_device(bond->vlgrp, vid, vlan_dev); 494 } 495 } 496 ··· 550 /* Save and then restore vlan_dev in the grp array, 551 * since the slave's driver might clear it. 552 */ 553 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 + vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev); 556 } 557 558 unreg: ··· 2397 vlan_id = 0; 2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2399 vlan_list) { 2400 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2401 if (vlan_dev == rt->u.dst.dev) { 2402 vlan_id = vlan->vlan_id; 2403 dprintk("basa: vlan match on %s %d\n", ··· 2444 } 2445 2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2447 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2448 if (vlan->vlan_ip) { 2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2450 vlan->vlan_ip, vlan->vlan_id); ··· 3371 3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3373 vlan_list) { 3374 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3375 if (vlan_dev == event_dev) { 3376 switch (event) { 3377 case NETDEV_UP:
+1 -2
drivers/net/chelsio/cxgb2.c
··· 889 struct adapter *adapter = dev->priv; 890 891 spin_lock_irq(&adapter->async_lock); 892 - if (adapter->vlan_grp) 893 - adapter->vlan_grp->vlan_devices[vid] = NULL; 894 spin_unlock_irq(&adapter->async_lock); 895 } 896 #endif
··· 889 struct adapter *adapter = dev->priv; 890 891 spin_lock_irq(&adapter->async_lock); 892 + vlan_group_set_device(adapter->vlan_grp, vid, NULL); 893 spin_unlock_irq(&adapter->async_lock); 894 } 895 #endif
+4 -2
drivers/net/cxgb3/cxgb3_offload.c
··· 160 int i; 161 162 for_each_port(adapter, i) { 163 - const struct vlan_group *grp; 164 struct net_device *dev = adapter->port[i]; 165 const struct port_info *p = netdev_priv(dev); 166 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 if (vlan && vlan != VLAN_VID_MASK) { 169 grp = p->vlan_grp; 170 - dev = grp ? grp->vlan_devices[vlan] : NULL; 171 } else 172 while (dev->master) 173 dev = dev->master;
··· 160 int i; 161 162 for_each_port(adapter, i) { 163 + struct vlan_group *grp; 164 struct net_device *dev = adapter->port[i]; 165 const struct port_info *p = netdev_priv(dev); 166 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 if (vlan && vlan != VLAN_VID_MASK) { 169 grp = p->vlan_grp; 170 + dev = NULL; 171 + if (grp) 172 + dev = vlan_group_get_device(grp, vlan); 173 } else 174 while (dev->master) 175 dev = dev->master;
+5 -8
drivers/net/e1000/e1000_main.c
··· 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 377 uint16_t old_vid = adapter->mng_vlan_id; 378 if (adapter->vlgrp) { 379 - if (!adapter->vlgrp->vlan_devices[vid]) { 380 if (adapter->hw.mng_cookie.status & 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 382 e1000_vlan_rx_add_vid(netdev, vid); ··· 386 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 388 (vid != old_vid) && 389 - !adapter->vlgrp->vlan_devices[old_vid]) 390 e1000_vlan_rx_kill_vid(netdev, old_vid); 391 } else 392 adapter->mng_vlan_id = vid; ··· 1482 if ((adapter->hw.mng_cookie.status & 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1484 !(adapter->vlgrp && 1485 - adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1487 } 1488 ··· 4998 uint32_t vfta, index; 4999 5000 e1000_irq_disable(adapter); 5001 - 5002 - if (adapter->vlgrp) 5003 - adapter->vlgrp->vlan_devices[vid] = NULL; 5004 - 5005 e1000_irq_enable(adapter); 5006 5007 if ((adapter->hw.mng_cookie.status & ··· 5024 if (adapter->vlgrp) { 5025 uint16_t vid; 5026 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5027 - if (!adapter->vlgrp->vlan_devices[vid]) 5028 continue; 5029 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5030 }
··· 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 377 uint16_t old_vid = adapter->mng_vlan_id; 378 if (adapter->vlgrp) { 379 + if (!vlan_group_get_device(adapter->vlgrp, vid)) { 380 if (adapter->hw.mng_cookie.status & 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 382 e1000_vlan_rx_add_vid(netdev, vid); ··· 386 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 388 (vid != old_vid) && 389 + !vlan_group_get_device(adapter->vlgrp, old_vid)) 390 e1000_vlan_rx_kill_vid(netdev, old_vid); 391 } else 392 adapter->mng_vlan_id = vid; ··· 1482 if ((adapter->hw.mng_cookie.status & 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1484 !(adapter->vlgrp && 1485 + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1487 } 1488 ··· 4998 uint32_t vfta, index; 4999 5000 e1000_irq_disable(adapter); 5001 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 5002 e1000_irq_enable(adapter); 5003 5004 if ((adapter->hw.mng_cookie.status & ··· 5027 if (adapter->vlgrp) { 5028 uint16_t vid; 5029 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5030 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 5031 continue; 5032 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5033 }
+1 -2
drivers/net/ehea/ehea_main.c
··· 1939 int index; 1940 u64 hret; 1941 1942 - if (port->vgrp) 1943 - port->vgrp->vlan_devices[vid] = NULL; 1944 1945 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1946 if (!cb1) {
··· 1939 int index; 1940 u64 hret; 1941 1942 + vlan_group_set_device(port->vgrp, vid, NULL); 1943 1944 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1945 if (!cb1) {
+1 -2
drivers/net/gianfar.c
··· 1132 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1135 - if (priv->vlgrp) 1136 - priv->vlgrp->vlan_devices[vid] = NULL; 1137 1138 spin_unlock_irqrestore(&priv->rxlock, flags); 1139 }
··· 1132 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1135 + vlan_group_set_device(priv->vgrp, vid, NULL); 1136 1137 spin_unlock_irqrestore(&priv->rxlock, flags); 1138 }
+2 -3
drivers/net/ixgb/ixgb_main.c
··· 2213 2214 ixgb_irq_disable(adapter); 2215 2216 - if(adapter->vlgrp) 2217 - adapter->vlgrp->vlan_devices[vid] = NULL; 2218 2219 ixgb_irq_enable(adapter); 2220 ··· 2233 if(adapter->vlgrp) { 2234 uint16_t vid; 2235 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2236 - if(!adapter->vlgrp->vlan_devices[vid]) 2237 continue; 2238 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2239 }
··· 2213 2214 ixgb_irq_disable(adapter); 2215 2216 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 2217 2218 ixgb_irq_enable(adapter); 2219 ··· 2234 if(adapter->vlgrp) { 2235 uint16_t vid; 2236 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2237 + if(!vlan_group_get_device(adapter->vlgrp, vid)) 2238 continue; 2239 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2240 }
+1 -2
drivers/net/ns83820.c
··· 514 515 spin_lock_irq(&dev->misc_lock); 516 spin_lock(&dev->tx_lock); 517 - if (dev->vlgrp) 518 - dev->vlgrp->vlan_devices[vid] = NULL; 519 spin_unlock(&dev->tx_lock); 520 spin_unlock_irq(&dev->misc_lock); 521 }
··· 514 515 spin_lock_irq(&dev->misc_lock); 516 spin_lock(&dev->tx_lock); 517 + vlan_group_set_device(dev->vlgrp, vid, NULL); 518 spin_unlock(&dev->tx_lock); 519 spin_unlock_irq(&dev->misc_lock); 520 }
+1 -2
drivers/net/r8169.c
··· 890 unsigned long flags; 891 892 spin_lock_irqsave(&tp->lock, flags); 893 - if (tp->vlgrp) 894 - tp->vlgrp->vlan_devices[vid] = NULL; 895 spin_unlock_irqrestore(&tp->lock, flags); 896 } 897
··· 890 unsigned long flags; 891 892 spin_lock_irqsave(&tp->lock, flags); 893 + vlan_group_set_device(tp->vlgrp, vid, NULL); 894 spin_unlock_irqrestore(&tp->lock, flags); 895 } 896
+1 -2
drivers/net/s2io.c
··· 325 unsigned long flags; 326 327 spin_lock_irqsave(&nic->tx_lock, flags); 328 - if (nic->vlgrp) 329 - nic->vlgrp->vlan_devices[vid] = NULL; 330 spin_unlock_irqrestore(&nic->tx_lock, flags); 331 } 332
··· 325 unsigned long flags; 326 327 spin_lock_irqsave(&nic->tx_lock, flags); 328 + vlan_group_set_device(nic->vlgrp, vid, NULL); 329 spin_unlock_irqrestore(&nic->tx_lock, flags); 330 } 331
+1 -2
drivers/net/sky2.c
··· 1053 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1056 - if (sky2->vlgrp) 1057 - sky2->vlgrp->vlan_devices[vid] = NULL; 1058 1059 netif_tx_unlock_bh(dev); 1060 }
··· 1053 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1056 + vlan_group_set_device(sky2->vlgrp, vid, NULL); 1057 1058 netif_tx_unlock_bh(dev); 1059 }
+2 -3
drivers/net/starfire.c
··· 677 spin_lock(&np->lock); 678 if (debug > 1) 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 680 - if (np->vlgrp) 681 - np->vlgrp->vlan_devices[vid] = NULL; 682 set_rx_mode(dev); 683 spin_unlock(&np->lock); 684 } ··· 1737 int vlan_count = 0; 1738 void __iomem *filter_addr = ioaddr + HashTable + 8; 1739 for (i = 0; i < VLAN_VID_MASK; i++) { 1740 - if (np->vlgrp->vlan_devices[i]) { 1741 if (vlan_count >= 32) 1742 break; 1743 writew(cpu_to_be16(i), filter_addr);
··· 677 spin_lock(&np->lock); 678 if (debug > 1) 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 680 + vlan_group_set_device(np->vlgrp, vid, NULL); 681 set_rx_mode(dev); 682 spin_unlock(&np->lock); 683 } ··· 1738 int vlan_count = 0; 1739 void __iomem *filter_addr = ioaddr + HashTable + 8; 1740 for (i = 0; i < VLAN_VID_MASK; i++) { 1741 + if (vlan_group_get_device(np->vlgrp, i)) { 1742 if (vlan_count >= 32) 1743 break; 1744 writew(cpu_to_be16(i), filter_addr);
+1 -2
drivers/net/tg3.c
··· 9114 tg3_netif_stop(tp); 9115 9116 tg3_full_lock(tp, 0); 9117 - if (tp->vlgrp) 9118 - tp->vlgrp->vlan_devices[vid] = NULL; 9119 tg3_full_unlock(tp); 9120 9121 if (netif_running(dev))
··· 9114 tg3_netif_stop(tp); 9115 9116 tg3_full_lock(tp, 0); 9117 + vlan_group_set_device(tp->vlgrp, vid, NULL); 9118 tg3_full_unlock(tp); 9119 9120 if (netif_running(dev))
+1 -2
drivers/net/typhoon.c
··· 746 { 747 struct typhoon *tp = netdev_priv(dev); 748 spin_lock_bh(&tp->state_lock); 749 - if(tp->vlgrp) 750 - tp->vlgrp->vlan_devices[vid] = NULL; 751 spin_unlock_bh(&tp->state_lock); 752 } 753
··· 746 { 747 struct typhoon *tp = netdev_priv(dev); 748 spin_lock_bh(&tp->state_lock); 749 + vlan_group_set_device(tp->vlgrp, vid, NULL); 750 spin_unlock_bh(&tp->state_lock); 751 } 752
+13 -12
drivers/s390/net/qeth_main.c
··· 3654 return rc; 3655 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3657 - if (vg->vlan_devices[i] == dev){ 3658 rc = QETH_VLAN_CARD; 3659 break; 3660 } ··· 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5262 5263 rcu_read_lock(); 5264 - in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); 5265 if (!in_dev) 5266 goto out; 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { ··· 5288 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5290 5291 - in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); 5292 if (!in6_dev) 5293 return; 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ ··· 5360 if (!card->vlangrp) 5361 return; 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5363 - if (card->vlangrp->vlan_devices[i] == NULL) 5364 continue; 5365 if (clear) 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); ··· 5398 spin_lock_irqsave(&card->vlanlock, flags); 5399 /* unregister IP addresses of vlan device */ 5400 qeth_free_vlan_addresses(card, vid); 5401 - if (card->vlangrp) 5402 - card->vlangrp->vlan_devices[vid] = NULL; 5403 spin_unlock_irqrestore(&card->vlanlock, flags); 5404 if (card->options.layer2) 5405 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); ··· 5661 5662 vg = card->vlangrp; 5663 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5664 - if (vg->vlan_devices[i] == NULL || 5665 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5666 continue; 5667 - in_dev = in_dev_get(vg->vlan_devices[i]); 5668 if (!in_dev) 5669 continue; 5670 read_lock(&in_dev->mc_list_lock); ··· 5749 5750 vg = card->vlangrp; 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5752 - if (vg->vlan_devices[i] == NULL || 5753 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5754 continue; 5755 - in_dev = in6_dev_get(vg->vlan_devices[i]); 5756 if (!in_dev) 5757 continue; 5758 read_lock(&in_dev->lock);
··· 3654 return rc; 3655 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3657 + if (vlan_group_get_device(vg, i) == dev){ 3658 rc = QETH_VLAN_CARD; 3659 break; 3660 } ··· 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5262 5263 rcu_read_lock(); 5264 + in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid)); 5265 if (!in_dev) 5266 goto out; 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { ··· 5288 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5290 5291 + in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 5292 if (!in6_dev) 5293 return; 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ ··· 5360 if (!card->vlangrp) 5361 return; 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5363 + if (vlan_group_get_device(card->vlangrp, i) == NULL) 5364 continue; 5365 if (clear) 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); ··· 5398 spin_lock_irqsave(&card->vlanlock, flags); 5399 /* unregister IP addresses of vlan device */ 5400 qeth_free_vlan_addresses(card, vid); 5401 + vlan_group_set_device(card->vlangrp, vid, NULL); 5402 spin_unlock_irqrestore(&card->vlanlock, flags); 5403 if (card->options.layer2) 5404 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); ··· 5662 5663 vg = card->vlangrp; 5664 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5665 + struct net_device *netdev = vlan_group_get_device(vg, i); 5666 + if (netdev == NULL || 5667 + !(netdev->flags & IFF_UP)) 5668 continue; 5669 + in_dev = in_dev_get(netdev); 5670 if (!in_dev) 5671 continue; 5672 read_lock(&in_dev->mc_list_lock); ··· 5749 5750 vg = card->vlangrp; 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5752 + struct net_device *netdev = vlan_group_get_device(vg, i); 5753 + if (netdev == NULL || 5754 + !(netdev->flags & IFF_UP)) 5755 continue; 5756 + in_dev = in6_dev_get(netdev); 5757 if (!in_dev) 5758 continue; 5759 read_lock(&in_dev->lock);
+22 -3
include/linux/if_vlan.h
··· 70 * depends on completely exhausting the VLAN identifier space. Thus 71 * it gives constant time look-up, but in many cases it wastes memory. 72 */ 73 - #define VLAN_GROUP_ARRAY_LEN 4096 74 75 struct vlan_group { 76 int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ 77 struct hlist_node hlist; /* linked list */ 78 - struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN]; 79 struct rcu_head rcu; 80 }; 81 82 struct vlan_priority_tci_mapping { 83 unsigned long priority; ··· 179 return NET_RX_DROP; 180 } 181 182 - skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 183 if (skb->dev == NULL) { 184 dev_kfree_skb_any(skb); 185
··· 70 * depends on completely exhausting the VLAN identifier space. Thus 71 * it gives constant time look-up, but in many cases it wastes memory. 72 */ 73 + #define VLAN_GROUP_ARRAY_LEN 4096 74 + #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 75 + #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) 76 77 struct vlan_group { 78 int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ 79 struct hlist_node hlist; /* linked list */ 80 + struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; 81 struct rcu_head rcu; 82 }; 83 + 84 + static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id) 85 + { 86 + struct net_device **array; 87 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 88 + return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN]; 89 + } 90 + 91 + static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, 92 + struct net_device *dev) 93 + { 94 + struct net_device **array; 95 + if (!vg) 96 + return; 97 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 98 + array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; 99 + } 100 101 struct vlan_priority_tci_mapping { 102 unsigned long priority; ··· 160 return NET_RX_DROP; 161 } 162 163 + skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK); 164 if (skb->dev == NULL) { 165 dev_kfree_skb_any(skb); 166
+32 -10
net/8021q/vlan.c
··· 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 185 186 if (grp) 187 - return grp->vlan_devices[VID]; 188 189 return NULL; 190 } 191 192 static void vlan_rcu_free(struct rcu_head *rcu) 193 { 194 - kfree(container_of(rcu, struct vlan_group, rcu)); 195 } 196 197 ··· 232 ret = 0; 233 234 if (grp) { 235 - dev = grp->vlan_devices[vlan_id]; 236 if (dev) { 237 /* Remove proc entry */ 238 vlan_proc_rem_dev(dev); ··· 246 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 247 } 248 249 - grp->vlan_devices[vlan_id] = NULL; 250 synchronize_net(); 251 252 ··· 260 * group. 261 */ 262 for (i = 0; i < VLAN_VID_MASK; i++) 263 - if (grp->vlan_devices[i]) 264 break; 265 266 if (i == VLAN_VID_MASK) { ··· 388 struct net_device *new_dev; 389 struct net_device *real_dev; /* the ethernet device */ 390 char name[IFNAMSIZ]; 391 392 #ifdef VLAN_DEBUG 393 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", ··· 554 if (!grp) 555 goto out_free_unregister; 556 557 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 558 grp->real_dev_ifindex = real_dev->ifindex; 559 ··· 573 real_dev->vlan_rx_register(real_dev, grp); 574 } 575 576 - grp->vlan_devices[VLAN_ID] = new_dev; 577 578 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 579 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", ··· 589 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 590 #endif 591 return new_dev; 592 593 out_free_unregister: 594 unregister_netdev(new_dev); ··· 628 case NETDEV_CHANGE: 629 /* Propagate real device state to vlan devices */ 630 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 631 - vlandev = grp->vlan_devices[i]; 632 if (!vlandev) 633 continue; 634 ··· 639 case NETDEV_DOWN: 640 /* Put all VLANs for this dev in the down state too. */ 641 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 642 - vlandev = grp->vlan_devices[i]; 643 if (!vlandev) 644 continue; 645 ··· 654 case NETDEV_UP: 655 /* Put all VLANs for this dev in the up state too. */ 656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 657 - vlandev = grp->vlan_devices[i]; 658 if (!vlandev) 659 continue; 660 ··· 671 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 672 int ret; 673 674 - vlandev = grp->vlan_devices[i]; 675 if (!vlandev) 676 continue; 677
··· 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 185 186 if (grp) 187 + return vlan_group_get_device(grp, VID); 188 189 return NULL; 190 } 191 192 + static void vlan_group_free(struct vlan_group *grp) 193 + { 194 + int i; 195 + 196 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 197 + kfree(grp->vlan_devices_arrays[i]); 198 + kfree(grp); 199 + } 200 + 201 static void vlan_rcu_free(struct rcu_head *rcu) 202 { 203 + vlan_group_free(container_of(rcu, struct vlan_group, rcu)); 204 } 205 206 ··· 223 ret = 0; 224 225 if (grp) { 226 + dev = vlan_group_get_device(grp, vlan_id); 227 if (dev) { 228 /* Remove proc entry */ 229 vlan_proc_rem_dev(dev); ··· 237 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 238 } 239 240 + vlan_group_set_device(grp, vlan_id, NULL); 241 synchronize_net(); 242 243 ··· 251 * group. 252 */ 253 for (i = 0; i < VLAN_VID_MASK; i++) 254 + if (vlan_group_get_device(grp, i)) 255 break; 256 257 if (i == VLAN_VID_MASK) { ··· 379 struct net_device *new_dev; 380 struct net_device *real_dev; /* the ethernet device */ 381 char name[IFNAMSIZ]; 382 + int i; 383 384 #ifdef VLAN_DEBUG 385 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", ··· 544 if (!grp) 545 goto out_free_unregister; 546 547 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) { 548 + grp->vlan_devices_arrays[i] = kzalloc( 549 + sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN, 550 + GFP_KERNEL); 551 + 552 + if (!grp->vlan_devices_arrays[i]) 553 + goto out_free_arrays; 554 + } 555 + 556 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 557 grp->real_dev_ifindex = real_dev->ifindex; 558 ··· 554 real_dev->vlan_rx_register(real_dev, grp); 555 } 556 557 + vlan_group_set_device(grp, VLAN_ID, new_dev); 558 559 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 560 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", ··· 570 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 571 #endif 572 return new_dev; 573 + 574 + out_free_arrays: 575 + vlan_group_free(grp); 576 577 out_free_unregister: 578 unregister_netdev(new_dev); ··· 606 case NETDEV_CHANGE: 607 /* Propagate real device state to vlan devices */ 608 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 609 + vlandev = vlan_group_get_device(grp, i); 610 if (!vlandev) 611 continue; 612 ··· 617 case NETDEV_DOWN: 618 /* Put all VLANs for this dev in the down state too. */ 619 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 620 + vlandev = vlan_group_get_device(grp, i); 621 if (!vlandev) 622 continue; 623 ··· 632 case NETDEV_UP: 633 /* Put all VLANs for this dev in the up state too. */ 634 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 635 + vlandev = vlan_group_get_device(grp, i); 636 if (!vlandev) 637 continue; 638 ··· 649 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 650 int ret; 651 652 + vlandev = vlan_group_get_device(grp, i); 653 if (!vlandev) 654 continue; 655