Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/soft-interface.c
net/mac80211/mlme.c

With merge help from Antonio Quartulli (batman-adv) and
Stephen Rothwell (drivers/net/usb/qmi_wwan.c).

The net/mac80211/mlme.c conflict seemed easy enough, accounting for a
conversion to some new tracing macros.

Signed-off-by: David S. Miller <davem@davemloft.net>

+137 -90
+8 -4
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 123 123 124 124 skb_frag_size_set(frag, size); 125 125 skb->data_len += size; 126 - skb->truesize += size; 126 + skb->truesize += PAGE_SIZE; 127 127 } else 128 128 skb_put(skb, length); 129 129 ··· 156 156 struct ipoib_dev_priv *priv = netdev_priv(dev); 157 157 struct sk_buff *skb; 158 158 int buf_size; 159 + int tailroom; 159 160 u64 *mapping; 160 161 161 - if (ipoib_ud_need_sg(priv->max_ib_mtu)) 162 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { 162 163 buf_size = IPOIB_UD_HEAD_SIZE; 163 - else 164 + tailroom = 128; /* reserve some tailroom for IP/TCP headers */ 165 + } else { 164 166 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 167 + tailroom = 0; 168 + } 165 169 166 - skb = dev_alloc_skb(buf_size + 4); 170 + skb = dev_alloc_skb(buf_size + tailroom + 4); 167 171 if (unlikely(!skb)) 168 172 return NULL; 169 173
+2 -2
drivers/isdn/mISDN/stack.c
··· 135 135 skb = NULL; 136 136 else if (*debug & DEBUG_SEND_ERR) 137 137 printk(KERN_DEBUG 138 - "%s ch%d mgr prim(%x) addr(%x) err %d\n", 139 - __func__, ch->nr, hh->prim, ch->addr, ret); 138 + "%s mgr prim(%x) err %d\n", 139 + __func__, hh->prim, ret); 140 140 } 141 141 out: 142 142 mutex_unlock(&st->lmutex);
+1 -1
drivers/net/bonding/bond_debugfs.c
··· 6 6 #include "bonding.h" 7 7 #include "bond_alb.h" 8 8 9 - #ifdef CONFIG_DEBUG_FS 9 + #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) 10 10 11 11 #include <linux/debugfs.h> 12 12 #include <linux/seq_file.h>
+6 -3
drivers/net/bonding/bond_main.c
··· 3230 3230 switch (event) { 3231 3231 case NETDEV_CHANGENAME: 3232 3232 return bond_event_changename(event_bond); 3233 + case NETDEV_UNREGISTER: 3234 + bond_remove_proc_entry(event_bond); 3235 + break; 3236 + case NETDEV_REGISTER: 3237 + bond_create_proc_entry(event_bond); 3238 + break; 3233 3239 default: 3234 3240 break; 3235 3241 } ··· 4420 4414 4421 4415 bond_work_cancel_all(bond); 4422 4416 4423 - bond_remove_proc_entry(bond); 4424 - 4425 4417 bond_debug_unregister(bond); 4426 4418 4427 4419 __hw_addr_flush(&bond->mc_list); ··· 4821 4817 4822 4818 bond_set_lockdep_class(bond_dev); 4823 4819 4824 - bond_create_proc_entry(bond); 4825 4820 list_add_tail(&bond->bond_list, &bn->dev_list); 4826 4821 4827 4822 bond_prepare_sysfs_group(bond);
-1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 261 261 if ((phy_data & BMSR_LSTATUS) == 0) { 262 262 /* link down */ 263 263 netif_carrier_off(netdev); 264 - netif_stop_queue(netdev); 265 264 hw->hibernate = true; 266 265 if (atl1c_reset_mac(hw) != 0) 267 266 if (netif_msg_hw(adapter))
+2 -2
drivers/net/ethernet/broadcom/b44.c
··· 656 656 dma_unmap_single(bp->sdev->dma_dev, mapping, 657 657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 658 658 dev_kfree_skb_any(skb); 659 - skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 659 + skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA); 660 660 if (skb == NULL) 661 661 return -ENOMEM; 662 662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, ··· 967 967 dma_unmap_single(bp->sdev->dma_dev, mapping, len, 968 968 DMA_TO_DEVICE); 969 969 970 - bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); 970 + bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA); 971 971 if (!bounce_skb) 972 972 goto err_out; 973 973
+3 -3
drivers/net/ethernet/broadcom/bnx2.c
··· 5377 5377 int k, last; 5378 5378 5379 5379 if (skb == NULL) { 5380 - j++; 5380 + j = NEXT_TX_BD(j); 5381 5381 continue; 5382 5382 } 5383 5383 ··· 5389 5389 tx_buf->skb = NULL; 5390 5390 5391 5391 last = tx_buf->nr_frags; 5392 - j++; 5393 - for (k = 0; k < last; k++, j++) { 5392 + j = NEXT_TX_BD(j); 5393 + for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { 5394 5394 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5395 5395 dma_unmap_page(&bp->pdev->dev, 5396 5396 dma_unmap_addr(tx_buf, mapping),
+2 -1
drivers/net/ethernet/broadcom/cnic.c
··· 542 542 } 543 543 544 544 if (atomic_read(&ulp_ops->ref_count) != 0) 545 - netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 545 + pr_warn("%s: Failed waiting for ref count to go to zero\n", 546 + __func__); 546 547 return 0; 547 548 548 549 out_unlock:
+3 -4
drivers/net/ethernet/freescale/gianfar.c
··· 2077 2077 return NETDEV_TX_OK; 2078 2078 } 2079 2079 2080 - /* Steal sock reference for processing TX time stamps */ 2081 - swap(skb_new->sk, skb->sk); 2082 - swap(skb_new->destructor, skb->destructor); 2083 - kfree_skb(skb); 2080 + if (skb->sk) 2081 + skb_set_owner_w(skb_new, skb->sk); 2082 + consume_skb(skb); 2084 2083 skb = skb_new; 2085 2084 } 2086 2085
+5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6640 6640 return -EINVAL; 6641 6641 } 6642 6642 6643 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 6644 + e_err(drv, "Enable failed, SR-IOV enabled\n"); 6645 + return -EINVAL; 6646 + } 6647 + 6643 6648 /* Hardware supports up to 8 traffic classes */ 6644 6649 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 6645 6650 (hw->mac.type == ixgbe_mac_82598EB &&
+2 -1
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
··· 51 51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 52 52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 53 53 csum); 54 - 54 + wmb(); 55 55 entry = (++priv->cur_tx) % txsize; 56 56 desc = priv->dma_tx + entry; 57 57 ··· 59 59 len, DMA_TO_DEVICE); 60 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 61 61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 62 + wmb(); 62 63 priv->hw->desc->set_tx_owner(desc); 63 64 priv->tx_skbuff[entry] = NULL; 64 65 } else {
+3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1334 1334 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1335 1335 wmb(); 1336 1336 priv->hw->desc->set_tx_owner(desc); 1337 + wmb(); 1337 1338 } 1338 1339 1339 1340 /* Interrupt on completition only for the latest segment */ ··· 1350 1349 1351 1350 /* To avoid raise condition */ 1352 1351 priv->hw->desc->set_tx_owner(first); 1352 + wmb(); 1353 1353 1354 1354 priv->cur_tx++; 1355 1355 ··· 1414 1412 } 1415 1413 wmb(); 1416 1414 priv->hw->desc->set_rx_owner(p + entry); 1415 + wmb(); 1417 1416 } 1418 1417 } 1419 1418
+8 -2
drivers/net/phy/mdio-mux.c
··· 46 46 struct mdio_mux_parent_bus *pb = cb->parent; 47 47 int r; 48 48 49 - mutex_lock(&pb->mii_bus->mdio_lock); 49 + /* In theory multiple mdio_mux could be stacked, thus creating 50 + * more than a single level of nesting. But in practice, 51 + * SINGLE_DEPTH_NESTING will cover the vast majority of use 52 + * cases. We use it, instead of trying to handle the general 53 + * case. 54 + */ 55 + mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); 50 56 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 51 57 if (r) 52 58 goto out; ··· 77 71 78 72 int r; 79 73 80 - mutex_lock(&pb->mii_bus->mdio_lock); 74 + mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); 81 75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 82 76 if (r) 83 77 goto out;
+18
drivers/net/usb/qmi_wwan.c
··· 374 374 .data = BIT(1), /* interface whitelist bitmap */ 375 375 }; 376 376 377 + static const struct driver_info qmi_wwan_force_int2 = { 378 + .description = "Qualcomm WWAN/QMI device", 379 + .flags = FLAG_WWAN, 380 + .bind = qmi_wwan_bind_shared, 381 + .unbind = qmi_wwan_unbind, 382 + .manage_power = qmi_wwan_manage_power, 383 + .data = BIT(2), /* interface whitelist bitmap */ 384 + }; 385 + 377 386 static const struct driver_info qmi_wwan_force_int3 = { 378 387 .description = "Qualcomm WWAN/QMI device", 379 388 .flags = FLAG_WWAN, ··· 534 525 .bInterfaceSubClass = 0xff, 535 526 .bInterfaceProtocol = 0xff, 536 527 .driver_info = (unsigned long)&qmi_wwan_force_int4, 528 + }, 529 + { /* ZTE MF60 */ 530 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 531 + .idVendor = 0x19d2, 532 + .idProduct = 0x1402, 533 + .bInterfaceClass = 0xff, 534 + .bInterfaceSubClass = 0xff, 535 + .bInterfaceProtocol = 0xff, 536 + .driver_info = (unsigned long)&qmi_wwan_force_int2, 537 537 }, 538 538 { /* Sierra Wireless MC77xx in QMI mode */ 539 539 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+1 -1
drivers/net/wireless/b43legacy/dma.c
··· 1072 1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1073 1073 /* create a bounce buffer in zone_dma on mapping failure. */ 1074 1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1075 - bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1075 + bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1076 1076 if (!bounce_skb) { 1077 1077 ring->current_slot = old_top_slot; 1078 1078 ring->used_slots = old_used_slots;
+2 -2
drivers/net/wireless/iwlegacy/4965-mac.c
··· 3405 3405 return 0; 3406 3406 } 3407 3407 3408 - if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { 3408 + if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) { 3409 3409 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, 3410 3410 key_flags); 3411 3411 spin_unlock_irqrestore(&il->sta_lock, flags); ··· 3420 3420 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); 3421 3421 il->stations[sta_id].sta.key.key_flags = 3422 3422 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; 3423 - il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; 3423 + il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx; 3424 3424 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 3425 3425 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3426 3426
+6 -8
drivers/net/wireless/iwlegacy/common.c
··· 4768 4768 return; 4769 4769 4770 4770 /* monitor and check for other stuck queues */ 4771 - if (il_is_any_associated(il)) { 4772 - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4773 - /* skip as we already checked the command queue */ 4774 - if (cnt == il->cmd_queue) 4775 - continue; 4776 - if (il_check_stuck_queue(il, cnt)) 4777 - return; 4778 - } 4771 + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4772 + /* skip as we already checked the command queue */ 4773 + if (cnt == il->cmd_queue) 4774 + continue; 4775 + if (il_check_stuck_queue(il, cnt)) 4776 + return; 4779 4777 } 4780 4778 4781 4779 mod_timer(&il->watchdog,
+1
drivers/net/wireless/mwifiex/cfg80211.c
··· 976 976 case NL80211_HIDDEN_SSID_ZERO_CONTENTS: 977 977 /* firmware doesn't support this type of hidden SSID */ 978 978 default: 979 + kfree(bss_cfg); 979 980 return -EINVAL; 980 981 } 981 982
+1 -1
drivers/net/wireless/rt2x00/rt2x00usb.c
··· 436 436 case QID_RX: 437 437 if (!rt2x00queue_full(queue)) 438 438 rt2x00queue_for_each_entry(queue, 439 - Q_INDEX_DONE, 440 439 Q_INDEX, 440 + Q_INDEX_DONE, 441 441 NULL, 442 442 rt2x00usb_kick_rx_entry); 443 443 break;
+1 -1
include/net/netfilter/nf_conntrack_ecache.h
··· 78 78 struct net *net = nf_ct_net(ct); 79 79 struct nf_conntrack_ecache *e; 80 80 81 - if (net->ct.nf_conntrack_event_cb == NULL) 81 + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) 82 82 return; 83 83 84 84 e = nf_ct_ecache_find(ct);
+3
net/8021q/vlan.c
··· 403 403 break; 404 404 405 405 case NETDEV_DOWN: 406 + if (dev->features & NETIF_F_HW_VLAN_FILTER) 407 + vlan_vid_del(dev, 0); 408 + 406 409 /* Put all VLANs for this dev in the down state too. */ 407 410 for (i = 0; i < VLAN_N_VID; i++) { 408 411 vlandev = vlan_group_get_device(grp, i);
+11 -4
net/batman-adv/bridge_loop_avoidance.c
··· 1381 1381 * @bat_priv: the bat priv with all the soft interface information 1382 1382 * @skb: the frame to be checked 1383 1383 * @vid: the VLAN ID of the frame 1384 + * @is_bcast: the packet came in a broadcast packet type. 1384 1385 * 1385 1386 * bla_rx avoidance checks if: 1386 1387 * * we have to race for a claim ··· 1391 1390 * returns 1, otherwise it returns 0 and the caller shall further 1392 1391 * process the skb. 1393 1392 */ 1394 - int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) 1393 + int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, 1394 + bool is_bcast) 1395 1395 { 1396 1396 struct ethhdr *ethhdr; 1397 1397 struct batadv_claim search_claim, *claim = NULL; ··· 1411 1409 1412 1410 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1413 1411 /* don't allow broadcasts while requests are in flight */ 1414 - if (is_multicast_ether_addr(ethhdr->h_dest)) 1412 + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1415 1413 goto handled; 1416 1414 1417 1415 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); ··· 1437 1435 } 1438 1436 1439 1437 /* if it is a broadcast ... */ 1440 - if (is_multicast_ether_addr(ethhdr->h_dest)) { 1441 - /* ... drop it. the responsible gateway is in charge. */ 1438 + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { 1439 + /* ... drop it. the responsible gateway is in charge. 1440 + * 1441 + * We need to check is_bcast because with the gateway 1442 + * feature, broadcasts (like DHCP requests) may be sent 1443 + * using a unicast packet type. 1444 + */ 1442 1445 goto handled; 1443 1446 } else { 1444 1447 /* seems the client considers us as its best gateway.
+4 -2
net/batman-adv/bridge_loop_avoidance.h
··· 21 21 #define _NET_BATMAN_ADV_BLA_H_ 22 22 23 23 #ifdef CONFIG_BATMAN_ADV_BLA 24 - int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); 24 + int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, 25 + bool is_bcast); 25 26 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); 26 27 int batadv_bla_is_backbone_gw(struct sk_buff *skb, 27 28 struct batadv_orig_node *orig_node, int hdr_size); ··· 41 40 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ 42 41 43 42 static inline int batadv_bla_rx(struct batadv_priv *bat_priv, 44 - struct sk_buff *skb, short vid) 43 + struct sk_buff *skb, short vid, 44 + bool is_bcast) 45 45 { 46 46 return 0; 47 47 }
+5 -1
net/batman-adv/soft-interface.c
··· 267 267 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 268 268 struct ethhdr *ethhdr; 269 269 struct vlan_ethhdr *vhdr; 270 + struct batadv_header *batadv_header = (struct batadv_header *)skb->data; 270 271 short vid __maybe_unused = -1; 271 272 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); 273 + bool is_bcast; 274 + 275 + is_bcast = (batadv_header->packet_type == BATADV_BCAST); 272 276 273 277 /* check if enough space is available for pulling, and pull */ 274 278 if (!pskb_may_pull(skb, hdr_size)) ··· 319 315 /* Let the bridge loop avoidance check the packet. If will 320 316 * not handle it, we can safely push it up. 321 317 */ 322 - if (batadv_bla_rx(bat_priv, skb, vid)) 318 + if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) 323 319 goto out; 324 320 325 321 netif_rx(skb);
+6 -2
net/core/dev.c
··· 2457 2457 { 2458 2458 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2459 2459 2460 - if ((!skb->priority) && (skb->sk) && map) 2461 - skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2460 + if (!skb->priority && skb->sk && map) { 2461 + unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2462 + 2463 + if (prioidx < map->priomap_len) 2464 + skb->priority = map->priomap[prioidx]; 2465 + } 2462 2466 } 2463 2467 #else 2464 2468 #define skb_update_prio(skb)
+4 -3
net/core/netprio_cgroup.c
··· 49 49 return -ENOSPC; 50 50 } 51 51 set_bit(prioidx, prioidx_map); 52 + if (atomic_read(&max_prioidx) < prioidx) 53 + atomic_set(&max_prioidx, prioidx); 52 54 spin_unlock_irqrestore(&prioidx_map_lock, flags); 53 - atomic_set(&max_prioidx, prioidx); 54 55 *prio = prioidx; 55 56 return 0; 56 57 } ··· 142 141 rtnl_lock(); 143 142 for_each_netdev(&init_net, dev) { 144 143 map = rtnl_dereference(dev->priomap); 145 - if (map) 144 + if (map && cs->prioidx < map->priomap_len) 146 145 map->priomap[cs->prioidx] = 0; 147 146 } 148 147 rtnl_unlock(); ··· 166 165 rcu_read_lock(); 167 166 for_each_netdev_rcu(&init_net, dev) { 168 167 map = rcu_dereference(dev->priomap); 169 - priority = map ? map->priomap[prioidx] : 0; 168 + priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0; 170 169 cb->fill(cb, dev->name, priority); 171 170 } 172 171 rcu_read_unlock();
+6 -6
net/ieee802154/dgram.c
··· 230 230 mtu = dev->mtu; 231 231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 232 232 233 + if (size > mtu) { 234 + pr_debug("size = %Zu, mtu = %u\n", size, mtu); 235 + err = -EINVAL; 236 + goto out_dev; 237 + } 238 + 233 239 hlen = LL_RESERVED_SPACE(dev); 234 240 tlen = dev->needed_tailroom; 235 241 skb = sock_alloc_send_skb(sk, hlen + tlen + size, ··· 263 257 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 264 258 if (err < 0) 265 259 goto out_skb; 266 - 267 - if (size > mtu) { 268 - pr_debug("size = %Zu, mtu = %u\n", size, mtu); 269 - err = -EINVAL; 270 - goto out_skb; 271 - } 272 260 273 261 skb->dev = dev; 274 262 skb->sk = sk;
+2 -4
net/mac80211/mlme.c
··· 2152 2152 mgmt->sa, status_code); 2153 2153 ieee80211_destroy_assoc_data(sdata, false); 2154 2154 } else { 2155 - sdata_info(sdata, "associated\n"); 2156 - 2157 2155 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2158 2156 /* oops -- internal error -- send timeout for now */ 2159 - ieee80211_destroy_assoc_data(sdata, true); 2160 - sta_info_destroy_addr(sdata, mgmt->bssid); 2157 + ieee80211_destroy_assoc_data(sdata, false); 2161 2158 cfg80211_put_bss(*bss); 2162 2159 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2163 2160 } 2161 + sdata_info(sdata, "associated\n"); 2164 2162 2165 2163 /* 2166 2164 * destroy assoc_data afterwards, as otherwise an idle
+1 -1
net/mac80211/rc80211_minstrel_ht.c
··· 809 809 max_rates = sband->n_bitrates; 810 810 } 811 811 812 - msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); 812 + msp = kzalloc(sizeof(*msp), gfp); 813 813 if (!msp) 814 814 return NULL; 815 815
+3 -1
net/netfilter/xt_set.c
··· 16 16 17 17 #include <linux/netfilter/x_tables.h> 18 18 #include <linux/netfilter/xt_set.h> 19 + #include <linux/netfilter/ipset/ip_set_timeout.h> 19 20 20 21 MODULE_LICENSE("GPL"); 21 22 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); ··· 311 310 info->del_set.flags, 0, UINT_MAX); 312 311 313 312 /* Normalize to fit into jiffies */ 314 - if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) 313 + if (add_opt.timeout != IPSET_NO_TIMEOUT && 314 + add_opt.timeout > UINT_MAX/MSEC_PER_SEC) 315 315 add_opt.timeout = UINT_MAX/MSEC_PER_SEC; 316 316 if (info->add_set.index != IPSET_INVALID_ID) 317 317 ip_set_add(info->add_set.index, skb, par, &add_opt);
+1 -1
net/nfc/llcp/sock.c
··· 292 292 293 293 pr_debug("%p\n", sk); 294 294 295 - if (llcp_sock == NULL) 295 + if (llcp_sock == NULL || llcp_sock->dev == NULL) 296 296 return -EBADFD; 297 297 298 298 addr->sa_family = AF_NFC;
+1 -1
net/rxrpc/ar-peer.c
··· 229 229 return peer; 230 230 231 231 new_UDP_peer: 232 - _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); 232 + _net("Rx UDP DGRAM from NEW peer"); 233 233 read_unlock_bh(&rxrpc_peer_lock); 234 234 _leave(" = -EBUSY [new]"); 235 235 return ERR_PTR(-EBUSY);
+15 -27
net/sched/sch_netem.c
··· 331 331 return PSCHED_NS2TICKS(ticks); 332 332 } 333 333 334 - static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 334 + static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 335 335 { 336 336 struct sk_buff_head *list = &sch->q; 337 337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 338 - struct sk_buff *skb; 338 + struct sk_buff *skb = skb_peek_tail(list); 339 339 340 - if (likely(skb_queue_len(list) < sch->limit)) { 341 - skb = skb_peek_tail(list); 342 - /* Optimize for add at tail */ 343 - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) 344 - return qdisc_enqueue_tail(nskb, sch); 340 + /* Optimize for add at tail */ 341 + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) 342 + return __skb_queue_tail(list, nskb); 345 343 346 - skb_queue_reverse_walk(list, skb) { 347 - if (tnext >= netem_skb_cb(skb)->time_to_send) 348 - break; 349 - } 350 - 351 - __skb_queue_after(list, skb, nskb); 352 - sch->qstats.backlog += qdisc_pkt_len(nskb); 353 - return NET_XMIT_SUCCESS; 344 + skb_queue_reverse_walk(list, skb) { 345 + if (tnext >= netem_skb_cb(skb)->time_to_send) 346 + break; 354 347 } 355 348 356 - return qdisc_reshape_fail(nskb, sch); 349 + __skb_queue_after(list, skb, nskb); 357 350 } 358 351 359 352 /* ··· 361 368 /* We don't fill cb now as skb_unshare() may invalidate it */ 362 369 struct netem_skb_cb *cb; 363 370 struct sk_buff *skb2; 364 - int ret; 365 371 int count = 1; 366 372 367 373 /* Random duplication */ ··· 411 419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 412 420 } 413 421 422 + if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 423 + return qdisc_reshape_fail(skb, sch); 424 + 425 + sch->qstats.backlog += qdisc_pkt_len(skb); 426 + 414 427 cb = netem_skb_cb(skb); 415 428 if (q->gap == 0 || /* not doing reordering */ 416 429 q->counter < q->gap - 1 || /* inside last reordering gap */ ··· 447 450 448 451 cb->time_to_send = now + delay; 449 452 ++q->counter; 450 - ret = tfifo_enqueue(skb, sch); 453 + tfifo_enqueue(skb, sch); 451 454 } else { 452 455 /* 453 456 * Do re-ordering by putting one out of N packets at the front ··· 457 460 q->counter = 0; 458 461 459 462 __skb_queue_head(&sch->q, skb); 460 - sch->qstats.backlog += qdisc_pkt_len(skb); 461 463 sch->qstats.requeues++; 462 - ret = NET_XMIT_SUCCESS; 463 - } 464 - 465 - if (ret != NET_XMIT_SUCCESS) { 466 - if (net_xmit_drop_count(ret)) { 467 - sch->qstats.drops++; 468 - return ret; 469 - } 470 464 } 471 465 472 466 return NET_XMIT_SUCCESS;