Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[VLAN]: Avoid a 4-order allocation.
[HDLC] Fix dev->header_cache_update having a random value.
[NetLabel]: Verify sensitivity level has a valid CIPSO mapping
[PPPOE]: Key connections properly on local device.
[AF_UNIX]: Test against sk_max_ack_backlog properly.
[NET]: Fix bugs in "Whether sock accept queue is full" checking

+165 -130
+1 -2
drivers/net/8139cp.c
··· 448 448 spin_lock_irqsave(&cp->lock, flags); 449 449 cp->cpcmd &= ~RxVlanOn; 450 450 cpw16(CpCmd, cp->cpcmd); 451 - if (cp->vlgrp) 452 - cp->vlgrp->vlan_devices[vid] = NULL; 451 + vlan_group_set_device(cp->vlgrp, vid, NULL); 453 452 spin_unlock_irqrestore(&cp->lock, flags); 454 453 } 455 454 #endif /* CP_VLAN_TAG_USED */
+1 -4
drivers/net/acenic.c
··· 2293 2293 2294 2294 local_irq_save(flags); 2295 2295 ace_mask_irq(dev); 2296 - 2297 - if (ap->vlgrp) 2298 - ap->vlgrp->vlan_devices[vid] = NULL; 2299 - 2296 + vlan_group_set_device(ap->vlgrp, vid, NULL); 2300 2297 ace_unmask_irq(dev); 2301 2298 local_irq_restore(flags); 2302 2299 }
+1 -2
drivers/net/amd8111e.c
··· 1737 1737 { 1738 1738 struct amd8111e_priv *lp = netdev_priv(dev); 1739 1739 spin_lock_irq(&lp->lock); 1740 - if (lp->vlgrp) 1741 - lp->vlgrp->vlan_devices[vid] = NULL; 1740 + vlan_group_set_device(lp->vlgrp, vid, NULL); 1742 1741 spin_unlock_irq(&lp->lock); 1743 1742 } 1744 1743 #endif
+2 -3
drivers/net/atl1/atl1_main.c
··· 1252 1252 1253 1253 spin_lock_irqsave(&adapter->lock, flags); 1254 1254 /* atl1_irq_disable(adapter); */ 1255 - if (adapter->vlgrp) 1256 - adapter->vlgrp->vlan_devices[vid] = NULL; 1255 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 1257 1256 /* atl1_irq_enable(adapter); */ 1258 1257 spin_unlock_irqrestore(&adapter->lock, flags); 1259 1258 /* We don't do Vlan filtering */ ··· 1265 1266 if (adapter->vlgrp) { 1266 1267 u16 vid; 1267 1268 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1268 - if (!adapter->vlgrp->vlan_devices[vid]) 1269 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 1269 1270 continue; 1270 1271 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1271 1272 }
+1 -3
drivers/net/bnx2.c
··· 4467 4467 struct bnx2 *bp = netdev_priv(dev); 4468 4468 4469 4469 bnx2_netif_stop(bp); 4470 - 4471 - if (bp->vlgrp) 4472 - bp->vlgrp->vlan_devices[vid] = NULL; 4470 + vlan_group_set_device(bp->vlgrp, vid, NULL); 4473 4471 bnx2_set_rx_mode(dev); 4474 4472 4475 4473 bnx2_netif_start(bp);
+7 -7
drivers/net/bonding/bond_main.c
··· 488 488 /* Save and then restore vlan_dev in the grp array, 489 489 * since the slave's driver might clear it. 490 490 */ 491 - vlan_dev = bond->vlgrp->vlan_devices[vid]; 491 + vlan_dev = vlan_group_get_device(bond->vlgrp, vid); 492 492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 - bond->vlgrp->vlan_devices[vid] = vlan_dev; 493 + vlan_group_set_device(bond->vlgrp, vid, vlan_dev); 494 494 } 495 495 } 496 496 ··· 550 550 /* Save and then restore vlan_dev in the grp array, 551 551 * since the slave's driver might clear it. 552 552 */ 553 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 553 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 554 554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 - bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; 555 + vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev); 556 556 } 557 557 558 558 unreg: ··· 2397 2397 vlan_id = 0; 2398 2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2399 2399 vlan_list) { 2400 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2400 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2401 2401 if (vlan_dev == rt->u.dst.dev) { 2402 2402 vlan_id = vlan->vlan_id; 2403 2403 dprintk("basa: vlan match on %s %d\n", ··· 2444 2444 } 2445 2445 2446 2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2447 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2447 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2448 2448 if (vlan->vlan_ip) { 2449 2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2450 2450 vlan->vlan_ip, vlan->vlan_id); ··· 3371 3371 3372 3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3373 3373 vlan_list) { 3374 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 3374 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3375 3375 if (vlan_dev == event_dev) { 3376 3376 switch (event) { 3377 3377 case NETDEV_UP:
+1 -2
drivers/net/chelsio/cxgb2.c
··· 889 889 struct adapter *adapter = dev->priv; 890 890 891 891 spin_lock_irq(&adapter->async_lock); 892 - if (adapter->vlan_grp) 893 - adapter->vlan_grp->vlan_devices[vid] = NULL; 892 + vlan_group_set_device(adapter->vlan_grp, vid, NULL); 894 893 spin_unlock_irq(&adapter->async_lock); 895 894 } 896 895 #endif
+4 -2
drivers/net/cxgb3/cxgb3_offload.c
··· 160 160 int i; 161 161 162 162 for_each_port(adapter, i) { 163 - const struct vlan_group *grp; 163 + struct vlan_group *grp; 164 164 struct net_device *dev = adapter->port[i]; 165 165 const struct port_info *p = netdev_priv(dev); 166 166 167 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 168 if (vlan && vlan != VLAN_VID_MASK) { 169 169 grp = p->vlan_grp; 170 - dev = grp ? grp->vlan_devices[vlan] : NULL; 170 + dev = NULL; 171 + if (grp) 172 + dev = vlan_group_get_device(grp, vlan); 171 173 } else 172 174 while (dev->master) 173 175 dev = dev->master;
+5 -8
drivers/net/e1000/e1000_main.c
··· 376 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 377 377 uint16_t old_vid = adapter->mng_vlan_id; 378 378 if (adapter->vlgrp) { 379 - if (!adapter->vlgrp->vlan_devices[vid]) { 379 + if (!vlan_group_get_device(adapter->vlgrp, vid)) { 380 380 if (adapter->hw.mng_cookie.status & 381 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 382 382 e1000_vlan_rx_add_vid(netdev, vid); ··· 386 386 387 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 388 388 (vid != old_vid) && 389 - !adapter->vlgrp->vlan_devices[old_vid]) 389 + !vlan_group_get_device(adapter->vlgrp, old_vid)) 390 390 e1000_vlan_rx_kill_vid(netdev, old_vid); 391 391 } else 392 392 adapter->mng_vlan_id = vid; ··· 1482 1482 if ((adapter->hw.mng_cookie.status & 1483 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1484 1484 !(adapter->vlgrp && 1485 - adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { 1485 + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1486 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1487 1487 } 1488 1488 ··· 4998 4998 uint32_t vfta, index; 4999 4999 5000 5000 e1000_irq_disable(adapter); 5001 - 5002 - if (adapter->vlgrp) 5003 - adapter->vlgrp->vlan_devices[vid] = NULL; 5004 - 5001 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 5005 5002 e1000_irq_enable(adapter); 5006 5003 5007 5004 if ((adapter->hw.mng_cookie.status & ··· 5024 5027 if (adapter->vlgrp) { 5025 5028 uint16_t vid; 5026 5029 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5027 - if (!adapter->vlgrp->vlan_devices[vid]) 5030 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 5028 5031 continue; 5029 5032 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5030 5033 }
+1 -2
drivers/net/ehea/ehea_main.c
··· 1939 1939 int index; 1940 1940 u64 hret; 1941 1941 1942 - if (port->vgrp) 1943 - port->vgrp->vlan_devices[vid] = NULL; 1942 + vlan_group_set_device(port->vgrp, vid, NULL); 1944 1943 1945 1944 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1946 1945 if (!cb1) {
+1 -2
drivers/net/gianfar.c
··· 1132 1132 1133 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1134 1135 - if (priv->vlgrp) 1136 - priv->vlgrp->vlan_devices[vid] = NULL; 1135 + vlan_group_set_device(priv->vgrp, vid, NULL); 1137 1136 1138 1137 spin_unlock_irqrestore(&priv->rxlock, flags); 1139 1138 }
+2 -3
drivers/net/ixgb/ixgb_main.c
··· 2213 2213 2214 2214 ixgb_irq_disable(adapter); 2215 2215 2216 - if(adapter->vlgrp) 2217 - adapter->vlgrp->vlan_devices[vid] = NULL; 2216 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 2218 2217 2219 2218 ixgb_irq_enable(adapter); 2220 2219 ··· 2233 2234 if(adapter->vlgrp) { 2234 2235 uint16_t vid; 2235 2236 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2236 - if(!adapter->vlgrp->vlan_devices[vid]) 2237 + if(!vlan_group_get_device(adapter->vlgrp, vid)) 2237 2238 continue; 2238 2239 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2239 2240 }
+1 -2
drivers/net/ns83820.c
··· 514 514 515 515 spin_lock_irq(&dev->misc_lock); 516 516 spin_lock(&dev->tx_lock); 517 - if (dev->vlgrp) 518 - dev->vlgrp->vlan_devices[vid] = NULL; 517 + vlan_group_set_device(dev->vlgrp, vid, NULL); 519 518 spin_unlock(&dev->tx_lock); 520 519 spin_unlock_irq(&dev->misc_lock); 521 520 }
+32 -20
drivers/net/pppoe.c
··· 7 7 * 8 8 * Version: 0.7.0 9 9 * 10 + * 070228 : Fix to allow multiple sessions with same remote MAC and same 11 + * session id by including the local device ifindex in the 12 + * tuple identifying a session. This also ensures packets can't 13 + * be injected into a session from interfaces other than the one 14 + * specified by userspace. Florian Zumbiehl <florz@florz.de> 15 + * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) 10 16 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme 11 17 * 030700 : Fixed connect logic to allow for disconnect. 12 18 * 270700 : Fixed potential SMP problems; we must protect against ··· 133 127 * Set/get/delete/rehash items (internal versions) 134 128 * 135 129 **********************************************************************/ 136 - static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr) 130 + static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex) 137 131 { 138 132 int hash = hash_item(sid, addr); 139 133 struct pppox_sock *ret; 140 134 141 135 ret = item_hash_table[hash]; 142 136 143 - while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr)) 137 + while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex)) 144 138 ret = ret->next; 145 139 146 140 return ret; ··· 153 147 154 148 ret = item_hash_table[hash]; 155 149 while (ret) { 156 - if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa)) 150 + if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex) 157 151 return -EALREADY; 158 152 159 153 ret = ret->next; 160 154 } 161 155 162 - if (!ret) { 163 - po->next = item_hash_table[hash]; 164 - item_hash_table[hash] = po; 165 - } 156 + po->next = item_hash_table[hash]; 157 + item_hash_table[hash] = po; 166 158 167 159 return 0; 168 160 } 169 161 170 - static struct pppox_sock *__delete_item(unsigned long sid, char *addr) 162 + static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex) 171 163 { 172 164 int hash = hash_item(sid, addr); 173 165 struct pppox_sock *ret, **src; ··· 174 170 src = &item_hash_table[hash]; 175 171 176 172 while (ret) { 177 - if (cmp_addr(&ret->pppoe_pa, sid, addr)) { 173 + if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) { 178 174 *src = ret->next; 179 175 break; 180 176 } ··· 192 188 * 193 189 **********************************************************************/ 194 190 static inline struct pppox_sock *get_item(unsigned long sid, 195 - unsigned char *addr) 191 + unsigned char *addr, int ifindex) 196 192 { 197 193 struct pppox_sock *po; 198 194 199 195 read_lock_bh(&pppoe_hash_lock); 200 - po = __get_item(sid, addr); 196 + po = __get_item(sid, addr, ifindex); 201 197 if (po) 202 198 sock_hold(sk_pppox(po)); 203 199 read_unlock_bh(&pppoe_hash_lock); ··· 207 203 208 204 static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 209 205 { 210 - return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote); 206 + struct net_device *dev = NULL; 207 + int ifindex; 208 + 209 + dev = dev_get_by_name(sp->sa_addr.pppoe.dev); 210 + if(!dev) 211 + return NULL; 212 + ifindex = dev->ifindex; 213 + dev_put(dev); 214 + return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); 211 215 } 212 216 213 217 static inline int set_item(struct pppox_sock *po) ··· 232 220 return i; 233 221 } 234 222 235 - static inline struct pppox_sock *delete_item(unsigned long sid, char *addr) 223 + static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) 236 224 { 237 225 struct pppox_sock *ret; 238 226 239 227 write_lock_bh(&pppoe_hash_lock); 240 - ret = __delete_item(sid, addr); 228 + ret = __delete_item(sid, addr, ifindex); 241 229 write_unlock_bh(&pppoe_hash_lock); 242 230 243 231 return ret; ··· 403 391 404 392 ph = (struct pppoe_hdr *) skb->nh.raw; 405 393 406 - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 394 + po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 407 395 if (po != NULL) 408 396 return sk_receive_skb(sk_pppox(po), skb, 0); 409 397 drop: ··· 437 425 if (ph->code != PADT_CODE) 438 426 goto abort; 439 427 440 - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 428 + po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 441 429 if (po) { 442 430 struct sock *sk = sk_pppox(po); 443 431 ··· 529 517 530 518 po = pppox_sk(sk); 531 519 if (po->pppoe_pa.sid) { 532 - delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 520 + delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex); 533 521 } 534 522 535 523 if (po->pppoe_dev) ··· 551 539 int sockaddr_len, int flags) 552 540 { 553 541 struct sock *sk = sock->sk; 554 - struct net_device *dev = NULL; 542 + struct net_device *dev; 555 543 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 556 544 struct pppox_sock *po = pppox_sk(sk); 557 545 int error; ··· 577 565 pppox_unbind_sock(sk); 578 566 579 567 /* Delete the old binding */ 580 - delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote); 568 + delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex); 581 569 582 570 if(po->pppoe_dev) 583 571 dev_put(po->pppoe_dev); ··· 717 705 break; 718 706 719 707 /* PPPoE address from the user specifies an outbound 720 - PPPoE address to which frames are forwarded to */ 708 + PPPoE address which frames are forwarded to */ 721 709 err = -EFAULT; 722 710 if (copy_from_user(&po->pppoe_relay, 723 711 (void __user *)arg,
+1 -2
drivers/net/r8169.c
··· 890 890 unsigned long flags; 891 891 892 892 spin_lock_irqsave(&tp->lock, flags); 893 - if (tp->vlgrp) 894 - tp->vlgrp->vlan_devices[vid] = NULL; 893 + vlan_group_set_device(tp->vlgrp, vid, NULL); 895 894 spin_unlock_irqrestore(&tp->lock, flags); 896 895 } 897 896
+1 -2
drivers/net/s2io.c
··· 325 325 unsigned long flags; 326 326 327 327 spin_lock_irqsave(&nic->tx_lock, flags); 328 - if (nic->vlgrp) 329 - nic->vlgrp->vlan_devices[vid] = NULL; 328 + vlan_group_set_device(nic->vlgrp, vid, NULL); 330 329 spin_unlock_irqrestore(&nic->tx_lock, flags); 331 330 } 332 331
+1 -2
drivers/net/sky2.c
··· 1053 1053 1054 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1055 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1056 - if (sky2->vlgrp) 1057 - sky2->vlgrp->vlan_devices[vid] = NULL; 1056 + vlan_group_set_device(sky2->vlgrp, vid, NULL); 1058 1057 1059 1058 netif_tx_unlock_bh(dev); 1060 1059 }
+2 -3
drivers/net/starfire.c
··· 677 677 spin_lock(&np->lock); 678 678 if (debug > 1) 679 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 680 - if (np->vlgrp) 681 - np->vlgrp->vlan_devices[vid] = NULL; 680 + vlan_group_set_device(np->vlgrp, vid, NULL); 682 681 set_rx_mode(dev); 683 682 spin_unlock(&np->lock); 684 683 } ··· 1737 1738 int vlan_count = 0; 1738 1739 void __iomem *filter_addr = ioaddr + HashTable + 8; 1739 1740 for (i = 0; i < VLAN_VID_MASK; i++) { 1740 - if (np->vlgrp->vlan_devices[i]) { 1741 + if (vlan_group_get_device(np->vlgrp, i)) { 1741 1742 if (vlan_count >= 32) 1742 1743 break; 1743 1744 writew(cpu_to_be16(i), filter_addr);
+1 -2
drivers/net/tg3.c
··· 9114 9114 tg3_netif_stop(tp); 9115 9115 9116 9116 tg3_full_lock(tp, 0); 9117 - if (tp->vlgrp) 9118 - tp->vlgrp->vlan_devices[vid] = NULL; 9117 + vlan_group_set_device(tp->vlgrp, vid, NULL); 9119 9118 tg3_full_unlock(tp); 9120 9119 9121 9120 if (netif_running(dev))
+1 -2
drivers/net/typhoon.c
··· 746 746 { 747 747 struct typhoon *tp = netdev_priv(dev); 748 748 spin_lock_bh(&tp->state_lock); 749 - if(tp->vlgrp) 750 - tp->vlgrp->vlan_devices[vid] = NULL; 749 + vlan_group_set_device(tp->vlgrp, vid, NULL); 751 750 spin_unlock_bh(&tp->state_lock); 752 751 } 753 752
+23 -10
drivers/net/wan/hdlc.c
··· 38 38 #include <linux/hdlc.h> 39 39 40 40 41 - static const char* version = "HDLC support module revision 1.20"; 41 + static const char* version = "HDLC support module revision 1.21"; 42 42 43 43 #undef DEBUG_LINK 44 44 ··· 222 222 return -EINVAL; 223 223 } 224 224 225 + static void hdlc_setup_dev(struct net_device *dev) 226 + { 227 + /* Re-init all variables changed by HDLC protocol drivers, 228 + * including ether_setup() called from hdlc_raw_eth.c. 229 + */ 230 + dev->get_stats = hdlc_get_stats; 231 + dev->flags = IFF_POINTOPOINT | IFF_NOARP; 232 + dev->mtu = HDLC_MAX_MTU; 233 + dev->type = ARPHRD_RAWHDLC; 234 + dev->hard_header_len = 16; 235 + dev->addr_len = 0; 236 + dev->hard_header = NULL; 237 + dev->rebuild_header = NULL; 238 + dev->set_mac_address = NULL; 239 + dev->hard_header_cache = NULL; 240 + dev->header_cache_update = NULL; 241 + dev->change_mtu = hdlc_change_mtu; 242 + dev->hard_header_parse = NULL; 243 + } 244 + 225 245 static void hdlc_setup(struct net_device *dev) 226 246 { 227 247 hdlc_device *hdlc = dev_to_hdlc(dev); 228 248 229 - dev->get_stats = hdlc_get_stats; 230 - dev->change_mtu = hdlc_change_mtu; 231 - dev->mtu = HDLC_MAX_MTU; 232 - 233 - dev->type = ARPHRD_RAWHDLC; 234 - dev->hard_header_len = 16; 235 - 236 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 237 - 249 + hdlc_setup_dev(dev); 238 250 hdlc->carrier = 1; 239 251 hdlc->open = 0; 240 252 spin_lock_init(&hdlc->state_lock); ··· 306 294 } 307 295 kfree(hdlc->state); 308 296 hdlc->state = NULL; 297 + hdlc_setup_dev(dev); 309 298 } 310 299 311 300
-3
drivers/net/wan/hdlc_cisco.c
··· 365 365 memcpy(&state(hdlc)->settings, &new_settings, size); 366 366 dev->hard_start_xmit = hdlc->xmit; 367 367 dev->hard_header = cisco_hard_header; 368 - dev->hard_header_cache = NULL; 369 368 dev->type = ARPHRD_CISCO; 370 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 371 - dev->addr_len = 0; 372 369 netif_dormant_on(dev); 373 370 return 0; 374 371 }
-3
drivers/net/wan/hdlc_fr.c
··· 1289 1289 memcpy(&state(hdlc)->settings, &new_settings, size); 1290 1290 1291 1291 dev->hard_start_xmit = hdlc->xmit; 1292 - dev->hard_header = NULL; 1293 1292 dev->type = ARPHRD_FRAD; 1294 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1295 - dev->addr_len = 0; 1296 1293 return 0; 1297 1294 1298 1295 case IF_PROTO_FR_ADD_PVC:
-2
drivers/net/wan/hdlc_ppp.c
··· 127 127 if (result) 128 128 return result; 129 129 dev->hard_start_xmit = hdlc->xmit; 130 - dev->hard_header = NULL; 131 130 dev->type = ARPHRD_PPP; 132 - dev->addr_len = 0; 133 131 netif_dormant_off(dev); 134 132 return 0; 135 133 }
-3
drivers/net/wan/hdlc_raw.c
··· 88 88 return result; 89 89 memcpy(hdlc->state, &new_settings, size); 90 90 dev->hard_start_xmit = hdlc->xmit; 91 - dev->hard_header = NULL; 92 91 dev->type = ARPHRD_RAWHDLC; 93 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 94 - dev->addr_len = 0; 95 92 netif_dormant_off(dev); 96 93 return 0; 97 94 }
-2
drivers/net/wan/hdlc_x25.c
··· 215 215 x25_rx, 0)) != 0) 216 216 return result; 217 217 dev->hard_start_xmit = x25_xmit; 218 - dev->hard_header = NULL; 219 218 dev->type = ARPHRD_X25; 220 - dev->addr_len = 0; 221 219 netif_dormant_off(dev); 222 220 return 0; 223 221 }
+13 -12
drivers/s390/net/qeth_main.c
··· 3654 3654 return rc; 3655 3655 3656 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3657 - if (vg->vlan_devices[i] == dev){ 3657 + if (vlan_group_get_device(vg, i) == dev){ 3658 3658 rc = QETH_VLAN_CARD; 3659 3659 break; 3660 3660 } ··· 5261 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5262 5262 5263 5263 rcu_read_lock(); 5264 - in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); 5264 + in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid)); 5265 5265 if (!in_dev) 5266 5266 goto out; 5267 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { ··· 5288 5288 5289 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5290 5290 5291 - in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); 5291 + in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 5292 5292 if (!in6_dev) 5293 5293 return; 5294 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ ··· 5360 5360 if (!card->vlangrp) 5361 5361 return; 5362 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5363 - if (card->vlangrp->vlan_devices[i] == NULL) 5363 + if (vlan_group_get_device(card->vlangrp, i) == NULL) 5364 5364 continue; 5365 5365 if (clear) 5366 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); ··· 5398 5398 spin_lock_irqsave(&card->vlanlock, flags); 5399 5399 /* unregister IP addresses of vlan device */ 5400 5400 qeth_free_vlan_addresses(card, vid); 5401 - if (card->vlangrp) 5402 - card->vlangrp->vlan_devices[vid] = NULL; 5401 + vlan_group_set_device(card->vlangrp, vid, NULL); 5403 5402 spin_unlock_irqrestore(&card->vlanlock, flags); 5404 5403 if (card->options.layer2) 5405 5404 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); ··· 5661 5662 5662 5663 vg = card->vlangrp; 5663 5664 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5664 - if (vg->vlan_devices[i] == NULL || 5665 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5665 + struct net_device *netdev = vlan_group_get_device(vg, i); 5666 + if (netdev == NULL || 5667 + !(netdev->flags & IFF_UP)) 5666 5668 continue; 5667 - in_dev = in_dev_get(vg->vlan_devices[i]); 5669 + in_dev = in_dev_get(netdev); 5668 5670 if (!in_dev) 5669 5671 continue; 5670 5672 read_lock(&in_dev->mc_list_lock); ··· 5749 5749 5750 5750 vg = card->vlangrp; 5751 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5752 - if (vg->vlan_devices[i] == NULL || 5753 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5752 + struct net_device *netdev = vlan_group_get_device(vg, i); 5753 + if (netdev == NULL || 5754 + !(netdev->flags & IFF_UP)) 5754 5755 continue; 5755 - in_dev = in6_dev_get(vg->vlan_devices[i]); 5756 + in_dev = in6_dev_get(netdev); 5756 5757 if (!in_dev) 5757 5758 continue; 5758 5759 read_lock(&in_dev->lock);
+22 -3
include/linux/if_vlan.h
··· 70 70 * depends on completely exhausting the VLAN identifier space. Thus 71 71 * it gives constant time look-up, but in many cases it wastes memory. 72 72 */ 73 - #define VLAN_GROUP_ARRAY_LEN 4096 73 + #define VLAN_GROUP_ARRAY_LEN 4096 74 + #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 75 + #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) 74 76 75 77 struct vlan_group { 76 78 int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ 77 79 struct hlist_node hlist; /* linked list */ 78 - struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN]; 80 + struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; 79 81 struct rcu_head rcu; 80 82 }; 83 + 84 + static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id) 85 + { 86 + struct net_device **array; 87 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 88 + return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN]; 89 + } 90 + 91 + static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, 92 + struct net_device *dev) 93 + { 94 + struct net_device **array; 95 + if (!vg) 96 + return; 97 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 98 + array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; 99 + } 81 100 82 101 struct vlan_priority_tci_mapping { 83 102 unsigned long priority; ··· 179 160 return NET_RX_DROP; 180 161 } 181 162 182 - skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 163 + skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK); 183 164 if (skb->dev == NULL) { 184 165 dev_kfree_skb_any(skb); 185 166
+1 -1
include/net/sock.h
··· 426 426 427 427 static inline int sk_acceptq_is_full(struct sock *sk) 428 428 { 429 - return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 429 + return sk->sk_ack_backlog >= sk->sk_max_ack_backlog; 430 430 } 431 431 432 432 /*
+32 -10
net/8021q/vlan.c
··· 184 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 185 185 186 186 if (grp) 187 - return grp->vlan_devices[VID]; 187 + return vlan_group_get_device(grp, VID); 188 188 189 189 return NULL; 190 190 } 191 191 192 + static void vlan_group_free(struct vlan_group *grp) 193 + { 194 + int i; 195 + 196 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 197 + kfree(grp->vlan_devices_arrays[i]); 198 + kfree(grp); 199 + } 200 + 192 201 static void vlan_rcu_free(struct rcu_head *rcu) 193 202 { 194 - kfree(container_of(rcu, struct vlan_group, rcu)); 203 + vlan_group_free(container_of(rcu, struct vlan_group, rcu)); 195 204 } 196 205 197 206 ··· 232 223 ret = 0; 233 224 234 225 if (grp) { 235 - dev = grp->vlan_devices[vlan_id]; 226 + dev = vlan_group_get_device(grp, vlan_id); 236 227 if (dev) { 237 228 /* Remove proc entry */ 238 229 vlan_proc_rem_dev(dev); ··· 246 237 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 247 238 } 248 239 249 - grp->vlan_devices[vlan_id] = NULL; 240 + vlan_group_set_device(grp, vlan_id, NULL); 250 241 synchronize_net(); 251 242 252 243 ··· 260 251 * group. 261 252 */ 262 253 for (i = 0; i < VLAN_VID_MASK; i++) 263 - if (grp->vlan_devices[i]) 254 + if (vlan_group_get_device(grp, i)) 264 255 break; 265 256 266 257 if (i == VLAN_VID_MASK) { ··· 388 379 struct net_device *new_dev; 389 380 struct net_device *real_dev; /* the ethernet device */ 390 381 char name[IFNAMSIZ]; 382 + int i; 391 383 392 384 #ifdef VLAN_DEBUG 393 385 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", ··· 554 544 if (!grp) 555 545 goto out_free_unregister; 556 546 547 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) { 548 + grp->vlan_devices_arrays[i] = kzalloc( 549 + sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN, 550 + GFP_KERNEL); 551 + 552 + if (!grp->vlan_devices_arrays[i]) 553 + goto out_free_arrays; 554 + } 555 + 557 556 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 558 557 grp->real_dev_ifindex = real_dev->ifindex; 559 558 ··· 573 554 real_dev->vlan_rx_register(real_dev, grp); 574 555 } 575 556 576 - grp->vlan_devices[VLAN_ID] = new_dev; 557 + vlan_group_set_device(grp, VLAN_ID, new_dev); 577 558 578 559 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 579 560 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", ··· 589 570 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 590 571 #endif 591 572 return new_dev; 573 + 574 + out_free_arrays: 575 + vlan_group_free(grp); 592 576 593 577 out_free_unregister: 594 578 unregister_netdev(new_dev); ··· 628 606 case NETDEV_CHANGE: 629 607 /* Propagate real device state to vlan devices */ 630 608 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 631 - vlandev = grp->vlan_devices[i]; 609 + vlandev = vlan_group_get_device(grp, i); 632 610 if (!vlandev) 633 611 continue; 634 612 ··· 639 617 case NETDEV_DOWN: 640 618 /* Put all VLANs for this dev in the down state too. */ 641 619 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 642 - vlandev = grp->vlan_devices[i]; 620 + vlandev = vlan_group_get_device(grp, i); 643 621 if (!vlandev) 644 622 continue; 645 623 ··· 654 632 case NETDEV_UP: 655 633 /* Put all VLANs for this dev in the up state too. */ 656 634 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 657 - vlandev = grp->vlan_devices[i]; 635 + vlandev = vlan_group_get_device(grp, i); 658 636 if (!vlandev) 659 637 continue; 660 638 ··· 671 649 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 672 650 int ret; 673 651 674 - vlandev = grp->vlan_devices[i]; 652 + vlandev = vlan_group_get_device(grp, i); 675 653 if (!vlandev) 676 654 continue; 677 655
+4 -3
net/ipv4/cipso_ipv4.c
··· 732 732 *net_lvl = host_lvl; 733 733 return 0; 734 734 case CIPSO_V4_MAP_STD: 735 - if (host_lvl < doi_def->map.std->lvl.local_size) { 735 + if (host_lvl < doi_def->map.std->lvl.local_size && 736 + doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { 736 737 *net_lvl = doi_def->map.std->lvl.local[host_lvl]; 737 738 return 0; 738 739 } 739 - break; 740 + return -EPERM; 740 741 } 741 742 742 743 return -EINVAL; ··· 772 771 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; 773 772 return 0; 774 773 } 775 - break; 774 + return -EPERM; 776 775 } 777 776 778 777 return -EINVAL;
+3 -3
net/unix/af_unix.c
··· 934 934 935 935 sched = !sock_flag(other, SOCK_DEAD) && 936 936 !(other->sk_shutdown & RCV_SHUTDOWN) && 937 - (skb_queue_len(&other->sk_receive_queue) > 937 + (skb_queue_len(&other->sk_receive_queue) >= 938 938 other->sk_max_ack_backlog); 939 939 940 940 unix_state_runlock(other); ··· 1008 1008 if (other->sk_state != TCP_LISTEN) 1009 1009 goto out_unlock; 1010 1010 1011 - if (skb_queue_len(&other->sk_receive_queue) > 1011 + if (skb_queue_len(&other->sk_receive_queue) >= 1012 1012 other->sk_max_ack_backlog) { 1013 1013 err = -EAGAIN; 1014 1014 if (!timeo) ··· 1381 1381 } 1382 1382 1383 1383 if (unix_peer(other) != sk && 1384 - (skb_queue_len(&other->sk_receive_queue) > 1384 + (skb_queue_len(&other->sk_receive_queue) >= 1385 1385 other->sk_max_ack_backlog)) { 1386 1386 if (!timeo) { 1387 1387 err = -EAGAIN;