Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
tproxy: fixe a possible read from an invalid location in the socket match
zd1211rw: use unaligned safe memcmp() in-place of compare_ether_addr()
mac80211: use unaligned safe memcmp() in-place of compare_ether_addr()
ipw2200: fix netif_*_queue() removal regression
iwlwifi: clean key table in iwl_clear_stations_table function
tcp: tcp_vegas ssthresh bug fix
can: omit received RTR frames for single ID filter lists
ATM: CVE-2008-5079: duplicate listen() on socket corrupts the vcc table
netx-eth: initialize per device spinlock
tcp: make urg+gso work for real this time
enc28j60: Fix sporadic packet loss (corrected again)
hysdn: fix writing outside the field on 64 bits
b1isa: fix b1isa_exit() to really remove registered capi controllers
can: Fix CAN_(EFF|RTR)_FLAG handling in can_filter
Phonet: do not dump addresses from other namespaces
netlabel: Fix a potential NULL pointer dereference
bnx2: Add workaround to handle missed MSI.
xfrm: Fix kernel panic when flush and dump SPD entries

+173 -54
+2 -4
drivers/isdn/hardware/avm/b1isa.c
··· 233 233 int i; 234 234 235 235 for (i = 0; i < MAX_CARDS; i++) { 236 - if (!io[i]) 237 - break; 238 - 239 - b1isa_remove(&isa_dev[i]); 236 + if (isa_dev[i].resource[0].start) 237 + b1isa_remove(&isa_dev[i]); 240 238 } 241 239 unregister_capi_driver(&capi_driver_b1isa); 242 240 }
+2 -2
drivers/isdn/hysdn/hysdn_net.c
··· 83 83 84 84 /* Fill in the MAC-level header (if not already set) */ 85 85 if (!card->mac_addr[0]) { 86 - for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) 86 + for (i = 0; i < ETH_ALEN; i++) 87 87 dev->dev_addr[i] = 0xfc; 88 88 if ((in_dev = dev->ip_ptr) != NULL) { 89 89 struct in_ifaddr *ifa = in_dev->ifa_list; 90 90 if (ifa != NULL) 91 - memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); 91 + memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local)); 92 92 } 93 93 } else 94 94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
+32 -3
drivers/net/bnx2.c
··· 3144 3144 return 0; 3145 3145 } 3146 3146 3147 + static void 3148 + bnx2_chk_missed_msi(struct bnx2 *bp) 3149 + { 3150 + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 3151 + u32 msi_ctrl; 3152 + 3153 + if (bnx2_has_work(bnapi)) { 3154 + msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); 3155 + if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) 3156 + return; 3157 + 3158 + if (bnapi->last_status_idx == bp->idle_chk_status_idx) { 3159 + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & 3160 + ~BNX2_PCICFG_MSI_CONTROL_ENABLE); 3161 + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); 3162 + bnx2_msi(bp->irq_tbl[0].vector, bnapi); 3163 + } 3164 + } 3165 + 3166 + bp->idle_chk_status_idx = bnapi->last_status_idx; 3167 + } 3168 + 3147 3169 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3148 3170 { 3149 3171 struct status_block *sblk = bnapi->status_blk.msi; ··· 3240 3218 3241 3219 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3242 3220 3243 - if (unlikely(work_done >= budget)) 3244 - break; 3245 - 3246 3221 /* bnapi->last_status_idx is used below to tell the hw how 3247 3222 * much work has been processed, so we must read it before 3248 3223 * checking for more work. 3249 3224 */ 3250 3225 bnapi->last_status_idx = sblk->status_idx; 3226 + 3227 + if (unlikely(work_done >= budget)) 3228 + break; 3229 + 3251 3230 rmb(); 3252 3231 if (likely(!bnx2_has_work(bnapi))) { 3253 3232 netif_rx_complete(bp->dev, napi); ··· 4593 4570 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4594 4571 bp->bnx2_napi[i].last_status_idx = 0; 4595 4572 4573 + bp->idle_chk_status_idx = 0xffff; 4574 + 4596 4575 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4597 4576 4598 4577 /* Set up how to generate a link change interrupt. */ ··· 5742 5717 5743 5718 if (atomic_read(&bp->intr_sem) != 0) 5744 5719 goto bnx2_restart_timer; 5720 + 5721 + if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == 5722 + BNX2_FLAG_USING_MSI) 5723 + bnx2_chk_missed_msi(bp); 5745 5724 5746 5725 bnx2_send_heart_beat(bp); 5747 5726
+6
drivers/net/bnx2.h
··· 378 378 * pci_config_l definition 379 379 * offset: 0000 380 380 */ 381 + #define BNX2_PCICFG_MSI_CONTROL 0x00000058 382 + #define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16) 383 + 381 384 #define BNX2_PCICFG_MISC_CONFIG 0x00000068 382 385 #define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) 383 386 #define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) ··· 6866 6863 6867 6864 u8 num_tx_rings; 6868 6865 u8 num_rx_rings; 6866 + 6867 + u32 idle_chk_status_idx; 6868 + 6869 6869 }; 6870 6870 6871 6871 #define REG_RD(bp, offset) \
+14 -2
drivers/net/enc28j60.c
··· 568 568 return erxrdpt; 569 569 } 570 570 571 + /* 572 + * Calculate wrap around when reading beyond the end of the RX buffer 573 + */ 574 + static u16 rx_packet_start(u16 ptr) 575 + { 576 + if (ptr + RSV_SIZE > RXEND_INIT) 577 + return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); 578 + else 579 + return ptr + RSV_SIZE; 580 + } 581 + 571 582 static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) 572 583 { 573 584 u16 erxrdpt; ··· 949 938 skb->dev = ndev; 950 939 skb_reserve(skb, NET_IP_ALIGN); 951 940 /* copy the packet from the receive buffer */ 952 - enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 953 - len, skb_put(skb, len)); 941 + enc28j60_mem_read(priv, 942 + rx_packet_start(priv->next_pk_ptr), 943 + len, skb_put(skb, len)); 954 944 if (netif_msg_pktdata(priv)) 955 945 dump_packet(__func__, skb->len, skb->data); 956 946 skb->protocol = eth_type_trans(skb, ndev);
+2
drivers/net/netx-eth.c
··· 401 401 priv->xmac_base = priv->xc->xmac_base; 402 402 priv->sram_base = priv->xc->sram_base; 403 403 404 + spin_lock_init(&priv->lock); 405 + 404 406 ret = pfifo_request(PFIFO_MASK(priv->id)); 405 407 if (ret) { 406 408 printk("unable to request PFIFO\n");
+4
drivers/net/wireless/ipw2200.c
··· 3897 3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3898 3898 return 0; 3899 3899 ipw_send_disassociate(data, 0); 3900 + netif_carrier_off(priv->net_dev); 3900 3901 return 1; 3901 3902 } 3902 3903 ··· 10190 10189 u8 id, hdr_len, unicast; 10191 10190 u16 remaining_bytes; 10192 10191 int fc; 10192 + 10193 + if (!(priv->status & STATUS_ASSOCIATED)) 10194 + goto drop; 10193 10195 10194 10196 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10195 10197 switch (priv->ieee->iw_mode) {
+3
drivers/net/wireless/iwlwifi/iwl-core.c
··· 290 290 priv->num_stations = 0; 291 291 memset(priv->stations, 0, sizeof(priv->stations)); 292 292 293 + /* clean ucode key table bit map */ 294 + priv->ucode_key_table = 0; 295 + 293 296 spin_unlock_irqrestore(&priv->sta_lock, flags); 294 297 } 295 298 EXPORT_SYMBOL(iwl_clear_stations_table);
+21 -3
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 475 475 if (!test_and_set_bit(i, &priv->ucode_key_table)) 476 476 return i; 477 477 478 - return -1; 478 + return WEP_INVALID_OFFSET; 479 479 } 480 480 481 481 int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) ··· 620 620 /* else, we are overriding an existing key => no need to allocated room 621 621 * in uCode. */ 622 622 623 + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 624 + "no space for new kew"); 625 + 623 626 priv->stations[sta_id].sta.key.key_flags = key_flags; 624 627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 625 628 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; ··· 640 637 { 641 638 unsigned long flags; 642 639 __le16 key_flags = 0; 640 + int ret; 643 641 644 642 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 645 643 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); ··· 668 664 /* else, we are overriding an existing key => no need to allocated room 669 665 * in uCode. */ 670 666 667 + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 668 + "no space for new kew"); 669 + 671 670 priv->stations[sta_id].sta.key.key_flags = key_flags; 672 671 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 673 672 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 674 673 674 + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 675 + 675 676 spin_unlock_irqrestore(&priv->sta_lock, flags); 676 677 677 - IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 678 - return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 678 + return ret; 679 679 } 680 680 681 681 static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, ··· 703 695 iwl_get_free_ucode_key_index(priv); 704 696 /* else, we are overriding an existing key => no need to allocated room 705 697 * in uCode. */ 698 + 699 + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 700 + "no space for new kew"); 706 701 707 702 /* This copy is acutally not needed: we get the key with each TX */ 708 703 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); ··· 741 730 * been replaced by another one with different index. 742 731 * Don't do anything and return ok 743 732 */ 733 + spin_unlock_irqrestore(&priv->sta_lock, flags); 734 + return 0; 735 + } 736 + 737 + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { 738 + IWL_WARNING("Removing wrong key %d 0x%x\n", 739 + keyconf->keyidx, key_flags); 744 740 spin_unlock_irqrestore(&priv->sta_lock, flags); 745 741 return 0; 746 742 }
+1 -1
drivers/net/wireless/zd1211rw/zd_mac.c
··· 615 615 struct ieee80211_hdr *tx_hdr; 616 616 617 617 tx_hdr = (struct ieee80211_hdr *)skb->data; 618 - if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 618 + if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) 619 619 { 620 620 __skb_unlink(skb, q); 621 621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
+1 -1
include/linux/can/core.h
··· 19 19 #include <linux/skbuff.h> 20 20 #include <linux/netdevice.h> 21 21 22 - #define CAN_VERSION "20071116" 22 + #define CAN_VERSION "20081130" 23 23 24 24 /* increment this number each time you change some user-space interface */ 25 25 #define CAN_ABI_VERSION "8"
+5 -1
net/atm/svc.c
··· 293 293 error = -EINVAL; 294 294 goto out; 295 295 } 296 - vcc_insert_socket(sk); 296 + if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { 297 + error = -EADDRINUSE; 298 + goto out; 299 + } 297 300 set_bit(ATM_VF_WAITING, &vcc->flags); 298 301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 299 302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); ··· 310 307 goto out; 311 308 } 312 309 set_bit(ATM_VF_LISTEN,&vcc->flags); 310 + vcc_insert_socket(sk); 313 311 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 314 312 error = -sk->sk_err; 315 313 out:
+52 -16
net/can/af_can.c
··· 319 319 return n ? d : NULL; 320 320 } 321 321 322 + /** 323 + * find_rcv_list - determine optimal filterlist inside device filter struct 324 + * @can_id: pointer to CAN identifier of a given can_filter 325 + * @mask: pointer to CAN mask of a given can_filter 326 + * @d: pointer to the device filter struct 327 + * 328 + * Description: 329 + * Returns the optimal filterlist to reduce the filter handling in the 330 + * receive path. This function is called by service functions that need 331 + * to register or unregister a can_filter in the filter lists. 332 + * 333 + * A filter matches in general, when 334 + * 335 + * <received_can_id> & mask == can_id & mask 336 + * 337 + * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe 338 + * relevant bits for the filter. 339 + * 340 + * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 341 + * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames 342 + * there is a special filterlist and a special rx path filter handling. 343 + * 344 + * Return: 345 + * Pointer to optimal filterlist for the given can_id/mask pair. 346 + * Constistency checked mask. 347 + * Reduced can_id to have a preprocessed filter compare value. 348 + */ 322 349 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 323 350 struct dev_rcv_lists *d) 324 351 { 325 352 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 326 353 327 - /* filter error frames */ 354 + /* filter for error frames in extra filterlist */ 328 355 if (*mask & CAN_ERR_FLAG) { 329 - /* clear CAN_ERR_FLAG in list entry */ 356 + /* clear CAN_ERR_FLAG in filter entry */ 330 357 *mask &= CAN_ERR_MASK; 331 358 return &d->rx[RX_ERR]; 332 359 } 333 360 334 - /* ensure valid values in can_mask */ 335 - if (*mask & CAN_EFF_FLAG) 336 - *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); 337 - else 338 - *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); 361 + /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 362 + 363 + #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) 364 + 365 + /* ensure valid values in can_mask for 'SFF only' frame filtering */ 366 + if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) 367 + *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); 339 368 340 369 /* reduce condition testing at receive time */ 341 370 *can_id &= *mask; ··· 377 348 if (!(*mask)) 378 349 return &d->rx[RX_ALL]; 379 350 380 - /* use extra filterset for the subscription of exactly *ONE* can_id */ 381 - if (*can_id & CAN_EFF_FLAG) { 382 - if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { 383 - /* RFC: a use-case for hash-tables in the future? */ 384 - return &d->rx[RX_EFF]; 351 + /* extra filterlists for the subscription of a single non-RTR can_id */ 352 + if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) 353 + && !(*can_id & CAN_RTR_FLAG)) { 354 + 355 + if (*can_id & CAN_EFF_FLAG) { 356 + if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { 357 + /* RFC: a future use-case for hash-tables? */ 358 + return &d->rx[RX_EFF]; 359 + } 360 + } else { 361 + if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 362 + return &d->rx_sff[*can_id]; 385 363 } 386 - } else { 387 - if (*mask == CAN_SFF_MASK) 388 - return &d->rx_sff[*can_id]; 389 364 } 390 365 391 366 /* default: filter via can_id/can_mask */ ··· 622 589 } 623 590 } 624 591 625 - /* check CAN_ID specific entries */ 592 + /* check filterlists for single non-RTR can_ids */ 593 + if (can_id & CAN_RTR_FLAG) 594 + return matches; 595 + 626 596 if (can_id & CAN_EFF_FLAG) { 627 597 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 628 598 if (r->can_id == can_id) {
+4 -3
net/can/bcm.c
··· 64 64 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 65 65 66 66 /* get best masking value for can_rx_register() for a given single can_id */ 67 - #define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ 68 - (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) 67 + #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 68 + (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 69 + (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 69 70 70 - #define CAN_BCM_VERSION "20080415" 71 + #define CAN_BCM_VERSION CAN_VERSION 71 72 static __initdata const char banner[] = KERN_INFO 72 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 74
+10 -12
net/ipv4/tcp_output.c
··· 722 722 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 723 723 unsigned int mss_now) 724 724 { 725 - if (skb->len <= mss_now || !sk_can_gso(sk) || 726 - tcp_urg_mode(tcp_sk(sk))) { 725 + if (skb->len <= mss_now || !sk_can_gso(sk)) { 727 726 /* Avoid the costly divide in the normal 728 727 * non-TSO case. 729 728 */ ··· 1028 1029 1029 1030 /* Compute the current effective MSS, taking SACKs and IP options, 1030 1031 * and even PMTU discovery events into account. 1031 - * 1032 - * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up 1033 - * cannot be large. However, taking into account rare use of URG, this 1034 - * is not a big flaw. 1035 1032 */ 1036 1033 unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1037 1034 { ··· 1042 1047 1043 1048 mss_now = tp->mss_cache; 1044 1049 1045 - if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) 1050 + if (large_allowed && sk_can_gso(sk)) 1046 1051 doing_tso = 1; 1047 1052 1048 1053 if (dst) { ··· 1159 1164 { 1160 1165 int tso_segs = tcp_skb_pcount(skb); 1161 1166 1162 - if (!tso_segs || 1163 - (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now || 1164 - tcp_urg_mode(tcp_sk(sk))))) { 1167 + if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1165 1168 tcp_set_skb_tso_segs(sk, skb, mss_now); 1166 1169 tso_segs = tcp_skb_pcount(skb); 1167 1170 } ··· 1512 1519 * send_head. This happens as incoming acks open up the remote 1513 1520 * window for us. 1514 1521 * 1522 + * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1523 + * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1524 + * account rare use of URG, this is not a big flaw. 1525 + * 1515 1526 * Returns 1, if no segments are in flight and we have queued segments, but 1516 1527 * cannot send anything now because of SWS or another problem. 1517 1528 */ ··· 1567 1570 } 1568 1571 1569 1572 limit = mss_now; 1570 - if (tso_segs > 1) 1573 + if (tso_segs > 1 && !tcp_urg_mode(tp)) 1571 1574 limit = tcp_mss_split_point(sk, skb, mss_now, 1572 1575 cwnd_quota); 1573 1576 ··· 1616 1619 */ 1617 1620 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1618 1621 { 1622 + struct tcp_sock *tp = tcp_sk(sk); 1619 1623 struct sk_buff *skb = tcp_send_head(sk); 1620 1624 unsigned int tso_segs, cwnd_quota; 1621 1625 ··· 1631 1633 BUG_ON(!tso_segs); 1632 1634 1633 1635 limit = mss_now; 1634 - if (tso_segs > 1) 1636 + if (tso_segs > 1 && !tcp_urg_mode(tp)) 1635 1637 limit = tcp_mss_split_point(sk, skb, mss_now, 1636 1638 cwnd_quota); 1637 1639
+2
net/ipv4/tcp_vegas.c
··· 326 326 tp->snd_cwnd = 2; 327 327 else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 328 328 tp->snd_cwnd = tp->snd_cwnd_clamp; 329 + 330 + tp->snd_ssthresh = tcp_current_ssthresh(sk); 329 331 } 330 332 331 333 /* Wipe the slate clean for the next RTT. */
+1 -1
net/mac80211/sta_info.c
··· 99 99 100 100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 101 101 while (sta) { 102 - if (compare_ether_addr(sta->sta.addr, addr) == 0) 102 + if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 103 103 break; 104 104 sta = rcu_dereference(sta->hnext); 105 105 }
+1 -1
net/netfilter/xt_socket.c
··· 141 141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 142 142 saddr, daddr, sport, dport, par->in, false); 143 143 if (sk != NULL) { 144 - bool wildcard = (inet_sk(sk)->rcv_saddr == 0); 144 + bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0); 145 145 146 146 nf_tproxy_put_sock(sk); 147 147 if (wildcard)
+6 -4
net/netlabel/netlabel_unlabeled.c
··· 574 574 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, 575 575 &iface->addr4_list); 576 576 spin_unlock(&netlbl_unlhsh_lock); 577 - if (list_entry == NULL) 577 + if (list_entry != NULL) 578 + entry = netlbl_unlhsh_addr4_entry(list_entry); 579 + else 578 580 ret_val = -ENOENT; 579 - entry = netlbl_unlhsh_addr4_entry(list_entry); 580 581 581 582 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 582 583 audit_info); ··· 635 634 spin_lock(&netlbl_unlhsh_lock); 636 635 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); 637 636 spin_unlock(&netlbl_unlhsh_lock); 638 - if (list_entry == NULL) 637 + if (list_entry != NULL) 638 + entry = netlbl_unlhsh_addr6_entry(list_entry); 639 + else 639 640 ret_val = -ENOENT; 640 - entry = netlbl_unlhsh_addr6_entry(list_entry); 641 641 642 642 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 643 643 audit_info);
+3
net/phonet/pn_netlink.c
··· 123 123 124 124 static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 125 125 { 126 + struct net *net = sock_net(skb->sk); 126 127 struct phonet_device *pnd; 127 128 int dev_idx = 0, dev_start_idx = cb->args[0]; 128 129 int addr_idx = 0, addr_start_idx = cb->args[1]; ··· 132 131 list_for_each_entry(pnd, &pndevs.list, list) { 133 132 u8 addr; 134 133 134 + if (!net_eq(dev_net(pnd->netdev), net)) 135 + continue; 135 136 if (dev_idx > dev_start_idx) 136 137 addr_start_idx = 0; 137 138 if (dev_idx++ < dev_start_idx)
+1
net/xfrm/xfrm_policy.c
··· 817 817 continue; 818 818 hlist_del(&pol->bydst); 819 819 hlist_del(&pol->byidx); 820 + list_del(&pol->walk.all); 820 821 write_unlock_bh(&xfrm_policy_lock); 821 822 822 823 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,