Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/iwlwifi/dvm/tx.c
net/ipv6/route.c

The ipv6 route.c conflict is simple, just ignore the 'net' side change
as we fixed the same problem in 'net-next' by eliminating cached
neighbours from ipv6 routes.

The e1000e conflict is an addition of a new statistic in the ethtool
code, trivial.

The vmxnet3 conflict is about one change in 'net' removing a guarding
conditional, whilst in 'net-next' we had a netdev_info() conversion.

The iwlwifi conflict is dealing with a WARN_ON() conversion in
'net-next' vs. a revert happening in 'net'.

Signed-off-by: David S. Miller <davem@davemloft.net>

+290 -165
+1 -1
drivers/bcma/driver_chipcommon_nflash.c
··· 21 21 struct bcma_bus *bus = cc->core->bus; 22 22 23 23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && 24 - cc->core->id.rev != 0x38) { 24 + cc->core->id.rev != 38) { 25 25 bcma_err(bus, "NAND flash on unsupported board!\n"); 26 26 return -ENOTSUPP; 27 27 }
+1
drivers/net/bonding/bond_sysfs.c
··· 1053 1053 pr_info("%s: Setting primary slave to None.\n", 1054 1054 bond->dev->name); 1055 1055 bond->primary_slave = NULL; 1056 + memset(bond->params.primary, 0, sizeof(bond->params.primary)); 1056 1057 bond_select_active_slave(bond); 1057 1058 goto out; 1058 1059 }
+5 -1
drivers/net/can/c_can/c_can.c
··· 491 491 492 492 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 493 493 IFX_WRITE_LOW_16BIT(mask)); 494 + 495 + /* According to C_CAN documentation, the reserved bit 496 + * in IFx_MASK2 register is fixed 1 497 + */ 494 498 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 495 - IFX_WRITE_HIGH_16BIT(mask)); 499 + IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); 496 500 497 501 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 498 502 IFX_WRITE_LOW_16BIT(id));
+4 -4
drivers/net/ethernet/emulex/benet/be.h
··· 36 36 37 37 #define DRV_VER "4.6.62.0u" 38 38 #define DRV_NAME "be2net" 39 - #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 40 - #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 41 - #define OC_NAME "Emulex OneConnect 10Gbps NIC" 39 + #define BE_NAME "Emulex BladeEngine2" 40 + #define BE3_NAME "Emulex BladeEngine3" 41 + #define OC_NAME "Emulex OneConnect" 42 42 #define OC_NAME_BE OC_NAME "(be3)" 43 43 #define OC_NAME_LANCER OC_NAME "(Lancer)" 44 44 #define OC_NAME_SH OC_NAME "(Skyhawk)" 45 - #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 45 + #define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" 46 46 47 47 #define BE_VENDOR_ID 0x19a2 48 48 #define EMULEX_VENDOR_ID 0x10df
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 25 25 MODULE_VERSION(DRV_VER); 26 26 MODULE_DEVICE_TABLE(pci, be_dev_ids); 27 27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); 28 - MODULE_AUTHOR("ServerEngines Corporation"); 28 + MODULE_AUTHOR("Emulex Corporation"); 29 29 MODULE_LICENSE("GPL"); 30 30 31 31 static unsigned int num_vfs;
+2 -6
drivers/net/ethernet/via/via-rhine.c
··· 1812 1812 rp->tx_skbuff[entry]->len, 1813 1813 PCI_DMA_TODEVICE); 1814 1814 } 1815 - dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1815 + dev_kfree_skb(rp->tx_skbuff[entry]); 1816 1816 rp->tx_skbuff[entry] = NULL; 1817 1817 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1818 1818 } ··· 2024 2024 if (intr_status & IntrPCIErr) 2025 2025 netif_warn(rp, hw, dev, "PCI error\n"); 2026 2026 2027 - napi_disable(&rp->napi); 2028 - rhine_irq_disable(rp); 2029 - /* Slow and safe. Consider __napi_schedule as a replacement ? */ 2030 - napi_enable(&rp->napi); 2031 - napi_schedule(&rp->napi); 2027 + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2032 2028 2033 2029 out_unlock: 2034 2030 mutex_unlock(&rp->task_lock);
+24 -14
drivers/net/tun.c
··· 298 298 } 299 299 300 300 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 301 - u16 queue_index) 301 + struct tun_file *tfile) 302 302 { 303 303 struct hlist_head *head; 304 304 struct tun_flow_entry *e; 305 305 unsigned long delay = tun->ageing_time; 306 + u16 queue_index = tfile->queue_index; 306 307 307 308 if (!rxhash) 308 309 return; ··· 312 311 313 312 rcu_read_lock(); 314 313 315 - if (tun->numqueues == 1) 314 + /* We may get a very small possibility of OOO during switching, not 315 + * worth to optimize.*/ 316 + if (tun->numqueues == 1 || tfile->detached) 316 317 goto unlock; 317 318 318 319 e = tun_flow_find(head, rxhash); ··· 414 411 415 412 tun = rtnl_dereference(tfile->tun); 416 413 417 - if (tun) { 414 + if (tun && !tfile->detached) { 418 415 u16 index = tfile->queue_index; 419 416 BUG_ON(index >= tun->numqueues); 420 417 dev = tun->dev; 421 418 422 419 rcu_assign_pointer(tun->tfiles[index], 423 420 tun->tfiles[tun->numqueues - 1]); 424 - rcu_assign_pointer(tfile->tun, NULL); 425 421 ntfile = rtnl_dereference(tun->tfiles[index]); 426 422 ntfile->queue_index = index; 427 423 428 424 --tun->numqueues; 429 - if (clean) 425 + if (clean) { 426 + rcu_assign_pointer(tfile->tun, NULL); 430 427 sock_put(&tfile->sk); 431 - else 428 + } else 432 429 tun_disable_queue(tun, tfile); 433 430 434 431 synchronize_net(); ··· 442 439 } 443 440 444 441 if (clean) { 445 - if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && 446 - !(tun->flags & TUN_PERSIST)) 447 - if (tun->dev->reg_state == NETREG_REGISTERED) 442 + if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 443 + netif_carrier_off(tun->dev); 444 + 445 + if (!(tun->flags & TUN_PERSIST) && 446 + tun->dev->reg_state == NETREG_REGISTERED) 448 447 unregister_netdevice(tun->dev); 448 + } 449 449 450 450 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 451 451 &tfile->socket.flags)); ··· 475 469 wake_up_all(&tfile->wq.wait); 476 470 rcu_assign_pointer(tfile->tun, NULL); 477 471 --tun->numqueues; 472 + } 473 + list_for_each_entry(tfile, &tun->disabled, next) { 474 + wake_up_all(&tfile->wq.wait); 475 + rcu_assign_pointer(tfile->tun, NULL); 478 476 } 479 477 BUG_ON(tun->numqueues != 0); 480 478 ··· 510 500 goto out; 511 501 512 502 err = -EINVAL; 513 - if (rtnl_dereference(tfile->tun)) 503 + if (rtnl_dereference(tfile->tun) && !tfile->detached) 514 504 goto out; 515 505 516 506 err = -EBUSY; ··· 1213 1203 tun->dev->stats.rx_packets++; 1214 1204 tun->dev->stats.rx_bytes += len; 1215 1205 1216 - tun_flow_update(tun, rxhash, tfile->queue_index); 1206 + tun_flow_update(tun, rxhash, tfile); 1217 1207 return total_len; 1218 1208 } 1219 1209 ··· 1672 1662 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1673 1663 device_create_file(&tun->dev->dev, &dev_attr_group)) 1674 1664 pr_err("Failed to create tun sysfs files\n"); 1675 - 1676 - netif_carrier_on(tun->dev); 1677 1665 } 1666 + 1667 + netif_carrier_on(tun->dev); 1678 1668 1679 1669 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1680 1670 ··· 1827 1817 ret = tun_attach(tun, file); 1828 1818 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1829 1819 tun = rtnl_dereference(tfile->tun); 1830 - if (!tun || !(tun->flags & TUN_TAP_MQ)) 1820 + if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) 1831 1821 ret = -EINVAL; 1832 1822 else 1833 1823 __tun_detach(tfile, false);
+1
drivers/net/usb/qmi_wwan.c
··· 461 461 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 462 462 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 463 463 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 464 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 464 465 465 466 /* 4. Gobi 1000 devices */ 466 467 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+29 -6
drivers/net/usb/usbnet.c
··· 380 380 unsigned long lockflags; 381 381 size_t size = dev->rx_urb_size; 382 382 383 + /* prevent rx skb allocation when error ratio is high */ 384 + if (test_bit(EVENT_RX_KILL, &dev->flags)) { 385 + usb_free_urb(urb); 386 + return -ENOLINK; 387 + } 388 + 383 389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 384 390 if (!skb) { 385 391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); ··· 543 537 dev->net->stats.rx_errors++; 544 538 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 545 539 break; 540 + } 541 + 542 + /* stop rx if packet error rate is high */ 543 + if (++dev->pkt_cnt > 30) { 544 + dev->pkt_cnt = 0; 545 + dev->pkt_err = 0; 546 + } else { 547 + if (state == rx_cleanup) 548 + dev->pkt_err++; 549 + if (dev->pkt_err > 20) 550 + set_bit(EVENT_RX_KILL, &dev->flags); 546 551 } 547 552 548 553 state = defer_bh(dev, skb, &dev->rxq, state); ··· 807 790 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 808 791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 809 792 "simple"); 793 + 794 + /* reset rx error state */ 795 + dev->pkt_cnt = 0; 796 + dev->pkt_err = 0; 797 + clear_bit(EVENT_RX_KILL, &dev->flags); 810 798 811 799 // delay posting reads until we're fully open 812 800 tasklet_schedule (&dev->bh); ··· 1125 1103 if (info->tx_fixup) { 1126 1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1127 1105 if (!skb) { 1128 - if (netif_msg_tx_err(dev)) { 1129 - netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1130 - goto drop; 1131 - } else { 1132 - /* cdc_ncm collected packet; waits for more */ 1106 + /* packet collected; minidriver waiting for more */ 1107 + if (info->flags & FLAG_MULTI_PACKET) 1133 1108 goto not_drop; 1134 - } 1109 + netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1110 + goto drop; 1135 1111 } 1136 1112 } 1137 1113 length = skb->len; ··· 1273 1253 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1274 1254 } 1275 1255 } 1256 + 1257 + /* restart RX again after disabling due to high error rate */ 1258 + clear_bit(EVENT_RX_KILL, &dev->flags); 1276 1259 1277 1260 // waiting for all pending urbs to complete? 1278 1261 if (dev->wait) {
+3 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 150 150 if (ret & 1) { /* Link is up. */ 151 151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", 152 152 adapter->link_speed); 153 - if (!netif_carrier_ok(adapter->netdev)) 154 - netif_carrier_on(adapter->netdev); 153 + netif_carrier_on(adapter->netdev); 155 154 156 155 if (affectTxQueue) { 157 156 for (i = 0; i < adapter->num_tx_queues; i++) ··· 159 160 } 160 161 } else { 161 162 netdev_info(adapter->netdev, "NIC Link is Down\n"); 162 - if (netif_carrier_ok(adapter->netdev)) 163 - netif_carrier_off(adapter->netdev); 163 + netif_carrier_off(adapter->netdev); 164 164 165 165 if (affectTxQueue) { 166 166 for (i = 0; i < adapter->num_tx_queues; i++) ··· 3058 3060 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3059 3061 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3060 3062 3063 + netif_carrier_off(netdev); 3061 3064 err = register_netdev(netdev); 3062 3065 3063 3066 if (err) {
+9 -16
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 1027 1027 static bool 1028 1028 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1029 1029 { 1030 - bool morepending = false; 1031 1030 struct bcma_device *core; 1032 1031 struct tx_status txstatus, *txs; 1033 1032 u32 s1, s2; ··· 1040 1041 txs = &txstatus; 1041 1042 core = wlc_hw->d11core; 1042 1043 *fatal = false; 1043 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1044 - while (!(*fatal) 1045 - && (s1 & TXS_V)) { 1046 - /* !give others some time to run! */ 1047 - if (n >= max_tx_num) { 1048 - morepending = true; 1049 - break; 1050 - } 1051 1044 1045 + while (n < max_tx_num) { 1046 + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1052 1047 if (s1 == 0xffffffff) { 1053 1048 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, 1054 1049 __func__); 1055 1050 *fatal = true; 1056 1051 return false; 1057 1052 } 1058 - s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1053 + /* only process when valid */ 1054 + if (!(s1 & TXS_V)) 1055 + break; 1059 1056 1057 + s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1060 1058 txs->status = s1 & TXS_STATUS_MASK; 1061 1059 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1062 1060 txs->sequence = s2 & TXS_SEQ_MASK; ··· 1061 1065 txs->lasttxtime = 0; 1062 1066 1063 1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1064 - 1065 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1068 + if (*fatal == true) 1069 + return false; 1066 1070 n++; 1067 1071 } 1068 1072 1069 - if (*fatal) 1070 - return false; 1071 - 1072 - return morepending; 1073 + return n >= max_tx_num; 1073 1074 } 1074 1075 1075 1076 static void brcms_c_tbtt(struct brcms_c_info *wlc)
+8 -19
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 1145 1145 next_reclaimed = ssn; 1146 1146 } 1147 1147 1148 + if (tid != IWL_TID_NON_QOS) { 1149 + priv->tid_data[sta_id][tid].next_reclaimed = 1150 + next_reclaimed; 1151 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1152 + next_reclaimed); 1153 + } 1154 + 1148 1155 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1149 1156 1150 1157 iwlagn_check_ratid_empty(priv, sta_id, tid); ··· 1202 1195 if (!is_agg) 1203 1196 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1204 1197 1205 - /* 1206 - * W/A for FW bug - the seq_ctl isn't updated when the 1207 - * queues are flushed. Fetch it from the packet itself 1208 - */ 1209 - if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { 1210 - next_reclaimed = le16_to_cpu(hdr->seq_ctrl); 1211 - next_reclaimed = 1212 - SEQ_TO_SN(next_reclaimed + 0x10); 1213 - } 1214 - 1215 1198 is_offchannel_skb = 1216 1199 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); 1217 1200 freed++; 1218 1201 } 1219 1202 1220 - if (tid != IWL_TID_NON_QOS) { 1221 - priv->tid_data[sta_id][tid].next_reclaimed = 1222 - next_reclaimed; 1223 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1224 - next_reclaimed); 1225 - } 1226 - 1227 - if (!is_agg && freed != 1) 1228 - IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); 1203 + WARN_ON(!is_agg && freed != 1); 1229 1204 1230 1205 /* 1231 1206 * An offchannel frame can be send only on the AUX queue, where
+5 -4
drivers/net/wireless/mwifiex/scan.c
··· 1557 1557 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", 1558 1558 scan_rsp->number_of_sets); 1559 1559 ret = -1; 1560 - goto done; 1560 + goto check_next_scan; 1561 1561 } 1562 1562 1563 1563 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); ··· 1628 1628 if (!beacon_size || beacon_size > bytes_left) { 1629 1629 bss_info += bytes_left; 1630 1630 bytes_left = 0; 1631 - return -1; 1631 + ret = -1; 1632 + goto check_next_scan; 1632 1633 } 1633 1634 1634 1635 /* Initialize the current working beacon pointer for this BSS ··· 1685 1684 dev_err(priv->adapter->dev, 1686 1685 "%s: bytes left < IE length\n", 1687 1686 __func__); 1688 - goto done; 1687 + goto check_next_scan; 1689 1688 } 1690 1689 if (element_id == WLAN_EID_DS_PARAMS) { 1691 1690 channel = *(current_ptr + sizeof(struct ieee_types_header)); ··· 1748 1747 } 1749 1748 } 1750 1749 1750 + check_next_scan: 1751 1751 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1752 1752 if (list_empty(&adapter->scan_pending_q)) { 1753 1753 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); ··· 1809 1807 } 1810 1808 } 1811 1809 1812 - done: 1813 1810 return ret; 1814 1811 } 1815 1812
+2 -2
drivers/net/wireless/rtlwifi/usb.c
··· 542 542 WARN_ON(skb_queue_empty(&rx_queue)); 543 543 while (!skb_queue_empty(&rx_queue)) { 544 544 _skb = skb_dequeue(&rx_queue); 545 - _rtl_usb_rx_process_agg(hw, skb); 546 - ieee80211_rx_irqsafe(hw, skb); 545 + _rtl_usb_rx_process_agg(hw, _skb); 546 + ieee80211_rx_irqsafe(hw, _skb); 547 547 } 548 548 } 549 549
+28 -13
drivers/vhost/net.c
··· 165 165 } 166 166 167 167 /* Caller must have TX VQ lock */ 168 - static void tx_poll_start(struct vhost_net *net, struct socket *sock) 168 + static int tx_poll_start(struct vhost_net *net, struct socket *sock) 169 169 { 170 + int ret; 171 + 170 172 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) 171 - return; 172 - vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 173 - net->tx_poll_state = VHOST_NET_POLL_STARTED; 173 + return 0; 174 + ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 175 + if (!ret) 176 + net->tx_poll_state = VHOST_NET_POLL_STARTED; 177 + return ret; 174 178 } 175 179 176 180 /* In case of DMA done not in order in lower device driver for some reason. ··· 646 642 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); 647 643 } 648 644 649 - static void vhost_net_enable_vq(struct vhost_net *n, 645 + static int vhost_net_enable_vq(struct vhost_net *n, 650 646 struct vhost_virtqueue *vq) 651 647 { 652 648 struct socket *sock; 649 + int ret; 653 650 654 651 sock = rcu_dereference_protected(vq->private_data, 655 652 lockdep_is_held(&vq->mutex)); 656 653 if (!sock) 657 - return; 654 + return 0; 658 655 if (vq == n->vqs + VHOST_NET_VQ_TX) { 659 656 n->tx_poll_state = VHOST_NET_POLL_STOPPED; 660 - tx_poll_start(n, sock); 657 + ret = tx_poll_start(n, sock); 661 658 } else 662 - vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 659 + ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 660 + 661 + return ret; 663 662 } 664 663 665 664 static struct socket *vhost_net_stop_vq(struct vhost_net *n, ··· 834 827 r = PTR_ERR(ubufs); 835 828 goto err_ubufs; 836 829 } 837 - oldubufs = vq->ubufs; 838 - vq->ubufs = ubufs; 830 + 839 831 vhost_net_disable_vq(n, vq); 840 832 rcu_assign_pointer(vq->private_data, sock); 841 - vhost_net_enable_vq(n, vq); 842 - 843 833 r = vhost_init_used(vq); 844 834 if (r) 845 - goto err_vq; 835 + goto err_used; 836 + r = vhost_net_enable_vq(n, vq); 837 + if (r) 838 + goto err_used; 839 + 840 + oldubufs = vq->ubufs; 841 + vq->ubufs = ubufs; 846 842 847 843 n->tx_packets = 0; 848 844 n->tx_zcopy_err = 0; ··· 869 859 mutex_unlock(&n->dev.mutex); 870 860 return 0; 871 861 862 + err_used: 863 + rcu_assign_pointer(vq->private_data, oldsock); 864 + vhost_net_enable_vq(n, vq); 865 + if (ubufs) 866 + vhost_ubuf_put_and_wait(ubufs); 872 867 err_ubufs: 873 868 fput(sock->file); 874 869 err_vq:
+15 -3
drivers/vhost/vhost.c
··· 77 77 init_poll_funcptr(&poll->table, vhost_poll_func); 78 78 poll->mask = mask; 79 79 poll->dev = dev; 80 + poll->wqh = NULL; 80 81 81 82 vhost_work_init(&poll->work, fn); 82 83 } 83 84 84 85 /* Start polling a file. We add ourselves to file's wait queue. The caller must 85 86 * keep a reference to a file until after vhost_poll_stop is called. */ 86 - void vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 + int vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 88 { 88 89 unsigned long mask; 90 + int ret = 0; 89 91 90 92 mask = file->f_op->poll(file, &poll->table); 91 93 if (mask) 92 94 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 95 + if (mask & POLLERR) { 96 + if (poll->wqh) 97 + remove_wait_queue(poll->wqh, &poll->wait); 98 + ret = -EINVAL; 99 + } 100 + 101 + return ret; 93 102 } 94 103 95 104 /* Stop polling a file. After this function returns, it becomes safe to drop the 96 105 * file reference. You must also flush afterwards. */ 97 106 void vhost_poll_stop(struct vhost_poll *poll) 98 107 { 99 - remove_wait_queue(poll->wqh, &poll->wait); 108 + if (poll->wqh) { 109 + remove_wait_queue(poll->wqh, &poll->wait); 110 + poll->wqh = NULL; 111 + } 100 112 } 101 113 102 114 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, ··· 804 792 fput(filep); 805 793 806 794 if (pollstart && vq->handle_kick) 807 - vhost_poll_start(&vq->poll, vq->kick); 795 + r = vhost_poll_start(&vq->poll, vq->kick); 808 796 809 797 mutex_unlock(&vq->mutex); 810 798
+1 -1
drivers/vhost/vhost.h
··· 42 42 43 43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 44 44 unsigned long mask, struct vhost_dev *dev); 45 - void vhost_poll_start(struct vhost_poll *poll, struct file *file); 45 + int vhost_poll_start(struct vhost_poll *poll, struct file *file); 46 46 void vhost_poll_stop(struct vhost_poll *poll); 47 47 void vhost_poll_flush(struct vhost_poll *poll); 48 48 void vhost_poll_queue(struct vhost_poll *poll);
+2
include/linux/usb/usbnet.h
··· 33 33 wait_queue_head_t *wait; 34 34 struct mutex phy_mutex; 35 35 unsigned char suspend_count; 36 + unsigned char pkt_cnt, pkt_err; 36 37 37 38 /* i/o info: pipes etc */ 38 39 unsigned in, out; ··· 71 70 # define EVENT_DEV_OPEN 7 72 71 # define EVENT_DEVICE_REPORT_IDLE 8 73 72 # define EVENT_NO_RUNTIME_PM 9 73 + # define EVENT_RX_KILL 10 74 74 }; 75 75 76 76 static inline struct usb_driver *driver_of(struct usb_interface *intf)
+10 -10
include/net/transp_v6.h
··· 34 34 struct sockaddr *uaddr, 35 35 int addr_len); 36 36 37 - extern int datagram_recv_ctl(struct sock *sk, 38 - struct msghdr *msg, 39 - struct sk_buff *skb); 37 + extern int ip6_datagram_recv_ctl(struct sock *sk, 38 + struct msghdr *msg, 39 + struct sk_buff *skb); 40 40 41 - extern int datagram_send_ctl(struct net *net, 42 - struct sock *sk, 43 - struct msghdr *msg, 44 - struct flowi6 *fl6, 45 - struct ipv6_txoptions *opt, 46 - int *hlimit, int *tclass, 47 - int *dontfrag); 41 + extern int ip6_datagram_send_ctl(struct net *net, 42 + struct sock *sk, 43 + struct msghdr *msg, 44 + struct flowi6 *fl6, 45 + struct ipv6_txoptions *opt, 46 + int *hlimit, int *tclass, 47 + int *dontfrag); 48 48 49 49 #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) 50 50
+6 -3
net/core/pktgen.c
··· 1790 1790 return -EFAULT; 1791 1791 i += len; 1792 1792 mutex_lock(&pktgen_thread_lock); 1793 - pktgen_add_device(t, f); 1793 + ret = pktgen_add_device(t, f); 1794 1794 mutex_unlock(&pktgen_thread_lock); 1795 - ret = count; 1796 - sprintf(pg_result, "OK: add_device=%s", f); 1795 + if (!ret) { 1796 + ret = count; 1797 + sprintf(pg_result, "OK: add_device=%s", f); 1798 + } else 1799 + sprintf(pg_result, "ERROR: can not add device %s", f); 1797 1800 goto out; 1798 1801 } 1799 1802
+1 -1
net/core/skbuff.c
··· 686 686 new->network_header = old->network_header; 687 687 new->mac_header = old->mac_header; 688 688 new->inner_transport_header = old->inner_transport_header; 689 - new->inner_network_header = old->inner_transport_header; 689 + new->inner_network_header = old->inner_network_header; 690 690 skb_dst_copy(new, old); 691 691 new->rxhash = old->rxhash; 692 692 new->ooo_okay = old->ooo_okay;
+10 -4
net/ipv4/tcp_cong.c
··· 310 310 { 311 311 int cnt; /* increase in packets */ 312 312 unsigned int delta = 0; 313 + u32 snd_cwnd = tp->snd_cwnd; 314 + 315 + if (unlikely(!snd_cwnd)) { 316 + pr_err_once("snd_cwnd is nul, please report this bug.\n"); 317 + snd_cwnd = 1U; 318 + } 313 319 314 320 /* RFC3465: ABC Slow start 315 321 * Increase only after a full MSS of bytes is acked ··· 330 324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 331 325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 332 326 else 333 - cnt = tp->snd_cwnd; /* exponential increase */ 327 + cnt = snd_cwnd; /* exponential increase */ 334 328 335 329 /* RFC3465: ABC 336 330 * We MAY increase by 2 if discovered delayed ack ··· 340 334 tp->bytes_acked = 0; 341 335 342 336 tp->snd_cwnd_cnt += cnt; 343 - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 344 - tp->snd_cwnd_cnt -= tp->snd_cwnd; 337 + while (tp->snd_cwnd_cnt >= snd_cwnd) { 338 + tp->snd_cwnd_cnt -= snd_cwnd; 345 339 delta++; 346 340 } 347 - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 341 + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); 348 342 } 349 343 EXPORT_SYMBOL_GPL(tcp_slow_start); 350 344
+3 -3
net/ipv4/tcp_input.c
··· 3482 3482 ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) 3483 3483 tp->undo_marker = 0; 3484 3484 3485 - if (!before(tp->snd_una, tp->frto_highmark)) { 3485 + if (!before(tp->snd_una, tp->frto_highmark) || 3486 + !tcp_packets_in_flight(tp)) { 3486 3487 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3487 3488 return true; 3488 3489 } ··· 5648 5647 * the remote receives only the retransmitted (regular) SYNs: either 5649 5648 * the original SYN-data or the corresponding SYN-ACK is lost. 5650 5649 */ 5651 - syn_drop = (cookie->len <= 0 && data && 5652 - inet_csk(sk)->icsk_retransmits); 5650 + syn_drop = (cookie->len <= 0 && data && tp->total_retrans); 5653 5651 5654 5652 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5655 5653
+5 -1
net/ipv4/tcp_ipv4.c
··· 496 496 * errors returned from accept(). 497 497 */ 498 498 inet_csk_reqsk_queue_drop(sk, req, prev); 499 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 499 500 goto out; 500 501 501 502 case TCP_SYN_SENT: ··· 1502 1501 * clogging syn queue with openreqs with exponentially increasing 1503 1502 * timeout. 1504 1503 */ 1505 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1504 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 1505 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1506 1506 goto drop; 1507 + } 1507 1508 1508 1509 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1509 1510 if (!req) ··· 1670 1667 drop_and_free: 1671 1668 reqsk_free(req); 1672 1669 drop: 1670 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1673 1671 return 0; 1674 1672 } 1675 1673 EXPORT_SYMBOL(tcp_v4_conn_request);
+1
net/ipv6/addrconf.c
··· 1656 1656 if (dev->addr_len != IEEE802154_ADDR_LEN) 1657 1657 return -1; 1658 1658 memcpy(eui, dev->dev_addr, 8); 1659 + eui[0] ^= 2; 1659 1660 return 0; 1660 1661 } 1661 1662
+9 -7
net/ipv6/datagram.c
··· 380 380 if (skb->protocol == htons(ETH_P_IPV6)) { 381 381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 382 382 if (np->rxopt.all) 383 - datagram_recv_ctl(sk, msg, skb); 383 + ip6_datagram_recv_ctl(sk, msg, skb); 384 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 385 385 sin->sin6_scope_id = IP6CB(skb)->iif; 386 386 } else { ··· 468 468 } 469 469 470 470 471 - int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471 + int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, 472 + struct sk_buff *skb) 472 473 { 473 474 struct ipv6_pinfo *np = inet6_sk(sk); 474 475 struct inet6_skb_parm *opt = IP6CB(skb); ··· 599 598 } 600 599 return 0; 601 600 } 601 + EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); 602 602 603 - int datagram_send_ctl(struct net *net, struct sock *sk, 604 - struct msghdr *msg, struct flowi6 *fl6, 605 - struct ipv6_txoptions *opt, 606 - int *hlimit, int *tclass, int *dontfrag) 603 + int ip6_datagram_send_ctl(struct net *net, struct sock *sk, 604 + struct msghdr *msg, struct flowi6 *fl6, 605 + struct ipv6_txoptions *opt, 606 + int *hlimit, int *tclass, int *dontfrag) 607 607 { 608 608 struct in6_pktinfo *src_info; 609 609 struct cmsghdr *cmsg; ··· 874 872 exit_f: 875 873 return err; 876 874 } 877 - EXPORT_SYMBOL_GPL(datagram_send_ctl); 875 + EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+2 -2
net/ipv6/ip6_flowlabel.c
··· 390 390 msg.msg_control = (void*)(fl->opt+1); 391 391 memset(&flowi6, 0, sizeof(flowi6)); 392 392 393 - err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 394 - &junk, &junk); 393 + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, 394 + &junk, &junk, &junk); 395 395 if (err) 396 396 goto done; 397 397 err = -EINVAL;
+3 -3
net/ipv6/ipv6_sockglue.c
··· 476 476 msg.msg_controllen = optlen; 477 477 msg.msg_control = (void*)(opt+1); 478 478 479 - retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 480 - &junk); 479 + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, 480 + &junk, &junk); 481 481 if (retv) 482 482 goto done; 483 483 update: ··· 1002 1002 release_sock(sk); 1003 1003 1004 1004 if (skb) { 1005 - int err = datagram_recv_ctl(sk, &msg, skb); 1005 + int err = ip6_datagram_recv_ctl(sk, &msg, skb); 1006 1006 kfree_skb(skb); 1007 1007 if (err) 1008 1008 return err;
+3 -3
net/ipv6/raw.c
··· 507 507 sock_recv_ts_and_drops(msg, sk, skb); 508 508 509 509 if (np->rxopt.all) 510 - datagram_recv_ctl(sk, msg, skb); 510 + ip6_datagram_recv_ctl(sk, msg, skb); 511 511 512 512 err = copied; 513 513 if (flags & MSG_TRUNC) ··· 822 822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 823 823 opt->tot_len = sizeof(struct ipv6_txoptions); 824 824 825 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 - &hlimit, &tclass, &dontfrag); 825 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 + &hlimit, &tclass, &dontfrag); 827 827 if (err < 0) { 828 828 fl6_sock_release(flowlabel); 829 829 return err;
+5 -1
net/ipv6/tcp_ipv6.c
··· 423 423 } 424 424 425 425 inet_csk_reqsk_queue_drop(sk, req, prev); 426 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 426 427 goto out; 427 428 428 429 case TCP_SYN_SENT: ··· 960 959 goto drop; 961 960 } 962 961 963 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 962 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 963 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 964 964 goto drop; 965 + } 965 966 966 967 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 967 968 if (req == NULL) ··· 1112 1109 drop_and_free: 1113 1110 reqsk_free(req); 1114 1111 drop: 1112 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1115 1113 return 0; /* don't send reset */ 1116 1114 } 1117 1115
+3 -3
net/ipv6/udp.c
··· 467 467 ip_cmsg_recv(msg, skb); 468 468 } else { 469 469 if (np->rxopt.all) 470 - datagram_recv_ctl(sk, msg, skb); 470 + ip6_datagram_recv_ctl(sk, msg, skb); 471 471 } 472 472 473 473 err = copied; ··· 1143 1143 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1144 1144 opt->tot_len = sizeof(*opt); 1145 1145 1146 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1147 - &hlimit, &tclass, &dontfrag); 1146 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1147 + &hlimit, &tclass, &dontfrag); 1148 1148 if (err < 0) { 1149 1149 fl6_sock_release(flowlabel); 1150 1150 return err;
+65 -11
net/l2tp/l2tp_core.c
··· 168 168 169 169 } 170 170 171 + /* Lookup the tunnel socket, possibly involving the fs code if the socket is 172 + * owned by userspace. A struct sock returned from this function must be 173 + * released using l2tp_tunnel_sock_put once you're done with it. 174 + */ 175 + struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) 176 + { 177 + int err = 0; 178 + struct socket *sock = NULL; 179 + struct sock *sk = NULL; 180 + 181 + if (!tunnel) 182 + goto out; 183 + 184 + if (tunnel->fd >= 0) { 185 + /* Socket is owned by userspace, who might be in the process 186 + * of closing it. Look the socket up using the fd to ensure 187 + * consistency. 188 + */ 189 + sock = sockfd_lookup(tunnel->fd, &err); 190 + if (sock) 191 + sk = sock->sk; 192 + } else { 193 + /* Socket is owned by kernelspace */ 194 + sk = tunnel->sock; 195 + } 196 + 197 + out: 198 + return sk; 199 + } 200 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); 201 + 202 + /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ 203 + void l2tp_tunnel_sock_put(struct sock *sk) 204 + { 205 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 206 + if (tunnel) { 207 + if (tunnel->fd >= 0) { 208 + /* Socket is owned by userspace */ 209 + sockfd_put(sk->sk_socket); 210 + } 211 + sock_put(sk); 212 + } 213 + } 214 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215 + 171 216 /* Lookup a session by id in the global session list 172 217 */ 173 218 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) ··· 1652 1607 tunnel->old_sk_destruct = sk->sk_destruct; 1653 1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1654 1609 tunnel->sock = sk; 1610 + tunnel->fd = fd; 1655 1611 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1656 1612 1657 1613 sk->sk_allocation = GFP_ATOMIC; ··· 1688 1642 */ 1689 1643 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1690 1644 { 1691 - int err = 0; 1692 - struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; 1645 + int err = -EBADF; 1646 + struct socket *sock = NULL; 1647 + struct sock *sk = NULL; 1648 + 1649 + sk = l2tp_tunnel_sock_lookup(tunnel); 1650 + if (!sk) 1651 + goto out; 1652 + 1653 + sock = sk->sk_socket; 1654 + BUG_ON(!sock); 1693 1655 1694 1656 /* Force the tunnel socket to close. This will eventually 1695 1657 * cause the tunnel to be deleted via the normal socket close 1696 1658 * mechanisms when userspace closes the tunnel socket. 1697 1659 */ 1698 - if (sock != NULL) { 1699 - err = inet_shutdown(sock, 2); 1660 + err = inet_shutdown(sock, 2); 1700 1661 1701 - /* If the tunnel's socket was created by the kernel, 1702 - * close the socket here since the socket was not 1703 - * created by userspace. 1704 - */ 1705 - if (sock->file == NULL) 1706 - err = inet_release(sock); 1707 - } 1662 + /* If the tunnel's socket was created by the kernel, 1663 + * close the socket here since the socket was not 1664 + * created by userspace. 1665 + */ 1666 + if (sock->file == NULL) 1667 + err = inet_release(sock); 1708 1668 1669 + l2tp_tunnel_sock_put(sk); 1670 + out: 1709 1671 return err; 1710 1672 } 1711 1673 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+4 -1
net/l2tp/l2tp_core.h
··· 188 188 int (*recv_payload_hook)(struct sk_buff *skb); 189 189 void (*old_sk_destruct)(struct sock *); 190 190 struct sock *sock; /* Parent socket */ 191 - int fd; 191 + int fd; /* Parent fd, if tunnel socket 192 + * was created by userspace */ 192 193 193 194 uint8_t priv[0]; /* private data */ 194 195 }; ··· 229 228 return tunnel; 230 229 } 231 230 231 + extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); 232 + extern void l2tp_tunnel_sock_put(struct sock *sk); 232 233 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 233 234 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 234 235 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+5 -5
net/l2tp/l2tp_ip6.c
··· 554 554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 555 555 opt->tot_len = sizeof(struct ipv6_txoptions); 556 556 557 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 - &hlimit, &tclass, &dontfrag); 557 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 + &hlimit, &tclass, &dontfrag); 559 559 if (err < 0) { 560 560 fl6_sock_release(flowlabel); 561 561 return err; ··· 646 646 struct msghdr *msg, size_t len, int noblock, 647 647 int flags, int *addr_len) 648 648 { 649 - struct inet_sock *inet = inet_sk(sk); 649 + struct ipv6_pinfo *np = inet6_sk(sk); 650 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 651 651 size_t copied = 0; 652 652 int err = -EOPNOTSUPP; ··· 688 688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 689 689 } 690 690 691 - if (inet->cmsg_flags) 692 - ip_cmsg_recv(msg, skb); 691 + if (np->rxopt.all) 692 + ip6_datagram_recv_ctl(sk, msg, skb); 693 693 694 694 if (flags & MSG_TRUNC) 695 695 copied = skb->len;
+6 -4
net/packet/af_packet.c
··· 2361 2361 2362 2362 packet_flush_mclist(sk); 2363 2363 2364 - memset(&req_u, 0, sizeof(req_u)); 2365 - 2366 - if (po->rx_ring.pg_vec) 2364 + if (po->rx_ring.pg_vec) { 2365 + memset(&req_u, 0, sizeof(req_u)); 2367 2366 packet_set_ring(sk, &req_u, 1, 0); 2367 + } 2368 2368 2369 - if (po->tx_ring.pg_vec) 2369 + if (po->tx_ring.pg_vec) { 2370 + memset(&req_u, 0, sizeof(req_u)); 2370 2371 packet_set_ring(sk, &req_u, 1, 1); 2372 + } 2371 2373 2372 2374 fanout_release(sk); 2373 2375
+6 -6
net/sched/sch_netem.c
··· 438 438 if (q->rate) { 439 439 struct sk_buff_head *list = &sch->q; 440 440 441 - delay += packet_len_2_sched_time(skb->len, q); 442 - 443 441 if (!skb_queue_empty(list)) { 444 442 /* 445 - * Last packet in queue is reference point (now). 446 - * First packet in queue is already in flight, 447 - * calculate this time bonus and substract 443 + * Last packet in queue is reference point (now), 444 + * calculate this time bonus and subtract 448 445 * from delay. 449 446 */ 450 - delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 + delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; 448 + delay = max_t(psched_tdiff_t, 0, delay); 451 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 452 450 } 451 + 452 + delay += packet_len_2_sched_time(skb->len, q); 453 453 } 454 454 455 455 cb->time_to_send = now + delay;
+1 -1
net/sunrpc/svcsock.c
··· 465 465 } 466 466 467 467 /* 468 - * See net/ipv6/datagram.c : datagram_recv_ctl 468 + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl 469 469 */ 470 470 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 471 471 struct cmsghdr *cmh)
+1 -1
net/wireless/scan.c
··· 1358 1358 &iwe, IW_EV_UINT_LEN); 1359 1359 } 1360 1360 1361 - buf = kmalloc(30, GFP_ATOMIC); 1361 + buf = kmalloc(31, GFP_ATOMIC); 1362 1362 if (buf) { 1363 1363 memset(&iwe, 0, sizeof(iwe)); 1364 1364 iwe.cmd = IWEVCUSTOM;