Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Various VTI tunnel (mark handling, PMTU) bug fixes from Alexander
Duyck and Steffen Klassert.

2) Revert ethtool PHY query change, it wasn't correct. The PHY address
selected by the driver running the PHY to MAC connection decides
what PHY address GET ethtool operations return information from.

3) Fix handling of sequence number bits for encryption IV generation in
ESP driver, from Herbert Xu.

4) UDP can return -EAGAIN when we hit a bad checksum on receive, even
when there are other packets in the receive queue which is wrong.
Just respect the error returned from the generic socket recv
datagram helper. From Eric Dumazet.

5) Fix BNA driver firmware loading on big-endian systems, from Ivan
Vecera.

6) Fix regression in that we were inheriting the congestion control of
the listening socket for new connections, the intended behavior
always was to use the default in this case. From Neal Cardwell.

7) Fix NULL deref in brcmfmac driver, from Arend van Spriel.

8) OTP parsing fix in iwlwifi from Liad Kaufman.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
vti6: Add pmtu handling to vti6_xmit.
Revert "net: core: 'ethtool' issue with querying phy settings"
bnx2x: Move statistics implementation into semaphores
xen: netback: read hotplug script once at start of day.
xen: netback: fix printf format string warning
Revert "netfilter: ensure number of counters is >0 in do_replace()"
net: dsa: Properly propagate errors from dsa_switch_setup_one
tcp: fix child sockets to use system default congestion control if not set
udp: fix behavior of wrong checksums
sfc: free multiple Rx buffers when required
bna: fix soft lock-up during firmware initialization failure
bna: remove unreasonable iocpf timer start
bna: fix firmware loading on big-endian machines
bridge: fix br_multicast_query_expired() bug
via-rhine: Resigning as maintainer
brcmfmac: avoid null pointer access when brcmf_msgbuf_get_pktid() fails
mac80211: Fix mac80211.h docbook comments
iwlwifi: nvm: fix otp parsing in 8000 hw family
iwlwifi: pcie: fix tracking of cmd_in_flight
ip_vti/ip6_vti: Preserve skb->mark after rcv_cb call
...

+174 -119
+1 -2
MAINTAINERS
··· 10587 10587 F: include/uapi/linux/virtio_input.h 10588 10588 10589 10589 VIA RHINE NETWORK DRIVER 10590 - M: Roger Luethi <rl@hellgate.ch> 10591 - S: Maintained 10590 + S: Orphan 10592 10591 F: drivers/net/ethernet/via/via-rhine.c 10593 10592 10594 10593 VIA SD/MMC CARD CONTROLLER DRIVER
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 1774 1774 int stats_state; 1775 1775 1776 1776 /* used for synchronization of concurrent threads statistics handling */ 1777 - struct mutex stats_lock; 1777 + struct semaphore stats_lock; 1778 1778 1779 1779 /* used by dmae command loader */ 1780 1780 struct dmae_command stats_dmae;
+5 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12054 12054 mutex_init(&bp->port.phy_mutex); 12055 12055 mutex_init(&bp->fw_mb_mutex); 12056 12056 mutex_init(&bp->drv_info_mutex); 12057 - mutex_init(&bp->stats_lock); 12057 + sema_init(&bp->stats_lock, 1); 12058 12058 bp->drv_info_mng_owner = false; 12059 12059 12060 12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); ··· 13690 13690 cancel_delayed_work_sync(&bp->sp_task); 13691 13691 cancel_delayed_work_sync(&bp->period_task); 13692 13692 13693 - mutex_lock(&bp->stats_lock); 13694 - bp->stats_state = STATS_STATE_DISABLED; 13695 - mutex_unlock(&bp->stats_lock); 13693 + if (!down_timeout(&bp->stats_lock, HZ / 10)) { 13694 + bp->stats_state = STATS_STATE_DISABLED; 13695 + up(&bp->stats_lock); 13696 + } 13696 13697 13697 13698 bnx2x_save_statistics(bp); 13698 13699
+14 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
··· 1372 1372 * that context in case someone is in the middle of a transition. 1373 1373 * For other events, wait a bit until lock is taken. 1374 1374 */ 1375 - if (!mutex_trylock(&bp->stats_lock)) { 1375 + if (down_trylock(&bp->stats_lock)) { 1376 1376 if (event == STATS_EVENT_UPDATE) 1377 1377 return; 1378 1378 1379 1379 DP(BNX2X_MSG_STATS, 1380 1380 "Unlikely stats' lock contention [event %d]\n", event); 1381 - mutex_lock(&bp->stats_lock); 1381 + if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { 1382 + BNX2X_ERR("Failed to take stats lock [event %d]\n", 1383 + event); 1384 + return; 1385 + } 1382 1386 } 1383 1387 1384 1388 bnx2x_stats_stm[state][event].action(bp); 1385 1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1386 1390 1387 - mutex_unlock(&bp->stats_lock); 1391 + up(&bp->stats_lock); 1388 1392 1389 1393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1390 1394 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", ··· 1974 1970 /* Wait for statistics to end [while blocking further requests], 1975 1971 * then run supplied function 'safely'. 1976 1972 */ 1977 - mutex_lock(&bp->stats_lock); 1973 + rc = down_timeout(&bp->stats_lock, HZ / 10); 1974 + if (unlikely(rc)) { 1975 + BNX2X_ERR("Failed to take statistics lock for safe execution\n"); 1976 + goto out_no_lock; 1977 + } 1978 1978 1979 1979 bnx2x_stats_comp(bp); 1980 1980 while (bp->stats_pending && cnt--) ··· 1996 1988 /* No need to restart statistics - if they're enabled, the timer 1997 1989 * will restart the statistics. 1998 1990 */ 1999 - mutex_unlock(&bp->stats_lock); 2000 - 1991 + up(&bp->stats_lock); 1992 + out_no_lock: 2001 1993 return rc; 2002 1994 }
+2 -2
drivers/net/ethernet/brocade/bna/bfa_ioc.c
··· 2414 2414 if (status == BFA_STATUS_OK) 2415 2415 bfa_ioc_lpu_start(ioc); 2416 2416 else 2417 - bfa_nw_iocpf_timeout(ioc); 2417 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2418 2418 2419 2419 return status; 2420 2420 } ··· 3029 3029 } 3030 3030 3031 3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3032 - bfa_nw_iocpf_timeout(ioc); 3032 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3033 3033 } else { 3034 3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3035 3035 mod_timer(&ioc->iocpf_timer, jiffies +
-4
drivers/net/ethernet/brocade/bna/bnad.c
··· 3701 3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, 3702 3702 ((unsigned long)bnad)); 3703 3703 3704 - /* Now start the timer before calling IOC */ 3705 - mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, 3706 - jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3707 - 3708 3704 /* 3709 3705 * Start the chip 3710 3706 * If the call back comes with error, we bail out.
+7
drivers/net/ethernet/brocade/bna/cna_fwimg.c
··· 30 30 u32 *bfi_image_size, char *fw_name) 31 31 { 32 32 const struct firmware *fw; 33 + u32 n; 33 34 34 35 if (request_firmware(&fw, fw_name, &pdev->dev)) { 35 36 pr_alert("Can't locate firmware %s\n", fw_name); ··· 40 39 *bfi_image = (u32 *)fw->data; 41 40 *bfi_image_size = fw->size/sizeof(u32); 42 41 bfi_fw = fw; 42 + 43 + /* Convert loaded firmware to host order as it is stored in file 44 + * as sequence of LE32 integers. 45 + */ 46 + for (n = 0; n < *bfi_image_size; n++) 47 + le32_to_cpus(*bfi_image + n); 43 48 44 49 return *bfi_image; 45 50 error:
+25 -17
drivers/net/ethernet/sfc/rx.c
··· 224 224 } 225 225 } 226 226 227 - static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 227 + static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 228 + struct efx_rx_buffer *rx_buf, 229 + unsigned int num_bufs) 228 230 { 229 - if (rx_buf->page) { 230 - put_page(rx_buf->page); 231 - rx_buf->page = NULL; 232 - } 231 + do { 232 + if (rx_buf->page) { 233 + put_page(rx_buf->page); 234 + rx_buf->page = NULL; 235 + } 236 + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 237 + } while (--num_bufs); 233 238 } 234 239 235 240 /* Attempt to recycle the page if there is an RX recycle ring; the page can ··· 283 278 /* If this is the last buffer in a page, unmap and free it. */ 284 279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 285 280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 286 - efx_free_rx_buffer(rx_buf); 281 + efx_free_rx_buffers(rx_queue, rx_buf, 1); 287 282 } 288 283 rx_buf->page = NULL; 289 284 } ··· 309 304 310 305 efx_recycle_rx_pages(channel, rx_buf, n_frags); 311 306 312 - do { 313 - efx_free_rx_buffer(rx_buf); 314 - rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 315 - } while (--n_frags); 307 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 316 308 } 317 309 318 310 /** ··· 433 431 434 432 skb = napi_get_frags(napi); 435 433 if (unlikely(!skb)) { 436 - while (n_frags--) { 437 - put_page(rx_buf->page); 438 - rx_buf->page = NULL; 439 - rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 440 - } 434 + struct efx_rx_queue *rx_queue; 435 + 436 + rx_queue = efx_channel_get_rx_queue(channel); 437 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 441 438 return; 442 439 } 443 440 ··· 623 622 624 623 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 625 624 if (unlikely(skb == NULL)) { 626 - efx_free_rx_buffer(rx_buf); 625 + struct efx_rx_queue *rx_queue; 626 + 627 + rx_queue = efx_channel_get_rx_queue(channel); 628 + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 627 629 return; 628 630 } 629 631 skb_record_rx_queue(skb, channel->rx_queue.core_index); ··· 665 661 * loopback layer, and free the rx_buf here 666 662 */ 667 663 if (unlikely(efx->loopback_selftest)) { 664 + struct efx_rx_queue *rx_queue; 665 + 668 666 efx_loopback_rx_packet(efx, eh, rx_buf->len); 669 - efx_free_rx_buffer(rx_buf); 667 + rx_queue = efx_channel_get_rx_queue(channel); 668 + efx_free_rx_buffers(rx_queue, rx_buf, 669 + channel->rx_pkt_n_frags); 670 670 goto out; 671 671 } 672 672
+5 -7
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
··· 511 511 msgbuf->rx_pktids, 512 512 msgbuf->ioctl_resp_pktid); 513 513 if (msgbuf->ioctl_resp_ret_len != 0) { 514 - if (!skb) { 515 - brcmf_err("Invalid packet id idx recv'd %d\n", 516 - msgbuf->ioctl_resp_pktid); 514 + if (!skb) 517 515 return -EBADF; 518 - } 516 + 519 517 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 520 518 len : msgbuf->ioctl_resp_ret_len); 521 519 } ··· 872 874 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 873 875 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 874 876 msgbuf->tx_pktids, idx); 875 - if (!skb) { 876 - brcmf_err("Invalid packet id idx recv'd %d\n", idx); 877 + if (!skb) 877 878 return; 878 - } 879 879 880 880 set_bit(flowid, msgbuf->txstatus_done_map); 881 881 commonring = msgbuf->flowrings[flowid]; ··· 1152 1156 1153 1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1154 1158 msgbuf->rx_pktids, idx); 1159 + if (!skb) 1160 + return; 1155 1161 1156 1162 if (data_offset) 1157 1163 skb_pull(skb, data_offset);
+1 -1
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
··· 471 471 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 472 472 return le16_to_cpup(nvm_sw + RADIO_CFG); 473 473 474 - return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 474 + return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); 475 475 476 476 } 477 477
+3 -3
drivers/net/wireless/iwlwifi/pcie/internal.h
··· 1 1 /****************************************************************************** 2 2 * 3 - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 3 + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 5 * 6 6 * Portions of this file are derived from the ipw3945 project, as well 7 7 * as portions of the ieee80211 subsystem header files. ··· 320 320 321 321 /*protect hw register */ 322 322 spinlock_t reg_lock; 323 - bool cmd_in_flight; 323 + bool cmd_hold_nic_awake; 324 324 bool ref_cmd_in_flight; 325 325 326 326 /* protect ref counter */
+2 -2
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 1372 1372 1373 1373 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1374 1374 1375 - if (trans_pcie->cmd_in_flight) 1375 + if (trans_pcie->cmd_hold_nic_awake) 1376 1376 goto out; 1377 1377 1378 1378 /* this bit wakes up the NIC */ ··· 1438 1438 */ 1439 1439 __acquire(&trans_pcie->reg_lock); 1440 1440 1441 - if (trans_pcie->cmd_in_flight) 1441 + if (trans_pcie->cmd_hold_nic_awake) 1442 1442 goto out; 1443 1443 1444 1444 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+9 -14
drivers/net/wireless/iwlwifi/pcie/tx.c
··· 1039 1039 iwl_trans_pcie_ref(trans); 1040 1040 } 1041 1041 1042 - if (trans_pcie->cmd_in_flight) 1043 - return 0; 1044 - 1045 - trans_pcie->cmd_in_flight = true; 1046 - 1047 1042 /* 1048 1043 * wake up the NIC to make sure that the firmware will see the host 1049 1044 * command - we will let the NIC sleep once all the host commands 1050 1045 * returned. This needs to be done only on NICs that have 1051 1046 * apmg_wake_up_wa set. 1052 1047 */ 1053 - if (trans->cfg->base_params->apmg_wake_up_wa) { 1048 + if (trans->cfg->base_params->apmg_wake_up_wa && 1049 + !trans_pcie->cmd_hold_nic_awake) { 1054 1050 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1055 1051 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1056 1052 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) ··· 1060 1064 if (ret < 0) { 1061 1065 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1062 1066 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1063 - trans_pcie->cmd_in_flight = false; 1064 1067 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1065 1068 return -EIO; 1066 1069 } 1070 + trans_pcie->cmd_hold_nic_awake = true; 1067 1071 } 1068 1072 1069 1073 return 0; ··· 1081 1085 iwl_trans_pcie_unref(trans); 1082 1086 } 1083 1087 1084 - if (WARN_ON(!trans_pcie->cmd_in_flight)) 1085 - return 0; 1088 + if (trans->cfg->base_params->apmg_wake_up_wa) { 1089 + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 1090 + return 0; 1086 1091 1087 - trans_pcie->cmd_in_flight = false; 1088 - 1089 - if (trans->cfg->base_params->apmg_wake_up_wa) 1092 + trans_pcie->cmd_hold_nic_awake = false; 1090 1093 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1091 - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1092 - 1094 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1095 + } 1093 1096 return 0; 1094 1097 } 1095 1098
+1 -1
drivers/net/xen-netback/netback.c
··· 1250 1250 netdev_err(queue->vif->dev, 1251 1251 "txreq.offset: %x, size: %u, end: %lu\n", 1252 1252 txreq.offset, txreq.size, 1253 - (txreq.offset&~PAGE_MASK) + txreq.size); 1253 + (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); 1254 1254 xenvif_fatal_tx_err(queue->vif); 1255 1255 break; 1256 1256 }
+19 -14
drivers/net/xen-netback/xenbus.c
··· 34 34 enum xenbus_state frontend_state; 35 35 struct xenbus_watch hotplug_status_watch; 36 36 u8 have_hotplug_status_watch:1; 37 + 38 + const char *hotplug_script; 37 39 }; 38 40 39 41 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); ··· 240 238 xenvif_free(be->vif); 241 239 be->vif = NULL; 242 240 } 241 + kfree(be->hotplug_script); 243 242 kfree(be); 244 243 dev_set_drvdata(&dev->dev, NULL); 245 244 return 0; ··· 258 255 struct xenbus_transaction xbt; 259 256 int err; 260 257 int sg; 258 + const char *script; 261 259 struct backend_info *be = kzalloc(sizeof(struct backend_info), 262 260 GFP_KERNEL); 263 261 if (!be) { ··· 351 347 if (err) 352 348 pr_debug("Error writing multi-queue-max-queues\n"); 353 349 350 + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); 351 + if (IS_ERR(script)) { 352 + err = PTR_ERR(script); 353 + xenbus_dev_fatal(dev, err, "reading script"); 354 + goto fail; 355 + } 356 + 357 + be->hotplug_script = script; 358 + 354 359 err = xenbus_switch_state(dev, XenbusStateInitWait); 355 360 if (err) 356 361 goto fail; ··· 392 379 struct kobj_uevent_env *env) 393 380 { 394 381 struct backend_info *be = dev_get_drvdata(&xdev->dev); 395 - char *val; 396 382 397 - val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 398 - if (IS_ERR(val)) { 399 - int err = PTR_ERR(val); 400 - xenbus_dev_fatal(xdev, err, "reading script"); 401 - return err; 402 - } else { 403 - if (add_uevent_var(env, "script=%s", val)) { 404 - kfree(val); 405 - return -ENOMEM; 406 - } 407 - kfree(val); 408 - } 383 + if (!be) 384 + return 0; 409 385 410 - if (!be || !be->vif) 386 + if (add_uevent_var(env, "script=%s", be->hotplug_script)) 387 + return -ENOMEM; 388 + 389 + if (!be->vif) 411 390 return 0; 412 391 413 392 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
+2 -1
include/net/inet_connection_sock.h
··· 98 98 const struct tcp_congestion_ops *icsk_ca_ops; 99 99 const struct inet_connection_sock_af_ops *icsk_af_ops; 100 100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 101 - __u8 icsk_ca_state:7, 101 + __u8 icsk_ca_state:6, 102 + icsk_ca_setsockopt:1, 102 103 icsk_ca_dst_locked:1; 103 104 __u8 icsk_retransmits; 104 105 __u8 icsk_pending;
+4 -3
include/net/mac80211.h
··· 354 354 }; 355 355 356 356 /** 357 - * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT 357 + * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT 358 358 * @data: See &enum ieee80211_rssi_event_data 359 359 */ 360 360 struct ieee80211_rssi_event { ··· 388 388 }; 389 389 390 390 /** 391 - * enum ieee80211_mlme_event - data attached to an %MLME_EVENT 391 + * struct ieee80211_mlme_event - data attached to an %MLME_EVENT 392 392 * @data: See &enum ieee80211_mlme_event_data 393 393 * @status: See &enum ieee80211_mlme_event_status 394 394 * @reason: the reason code if applicable ··· 401 401 402 402 /** 403 403 * struct ieee80211_event - event to be sent to the driver 404 - * @type The event itself. See &enum ieee80211_event_type. 404 + * @type: The event itself. See &enum ieee80211_event_type. 405 405 * @rssi: relevant if &type is %RSSI_EVENT 406 406 * @mlme: relevant if &type is %AUTH_EVENT 407 + * @u: union holding the above two fields 407 408 */ 408 409 struct ieee80211_event { 409 410 enum ieee80211_event_type type;
+1 -1
net/bridge/br_multicast.c
··· 1822 1822 if (query->startup_sent < br->multicast_startup_query_count) 1823 1823 query->startup_sent++; 1824 1824 1825 - RCU_INIT_POINTER(querier, NULL); 1825 + RCU_INIT_POINTER(querier->port, NULL); 1826 1826 br_multicast_send_query(br, NULL, query); 1827 1827 spin_unlock(&br->multicast_lock); 1828 1828 }
-4
net/bridge/netfilter/ebtables.c
··· 1117 1117 return -ENOMEM; 1118 1118 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1119 1119 return -ENOMEM; 1120 - if (tmp.num_counters == 0) 1121 - return -EINVAL; 1122 1120 1123 1121 tmp.name[sizeof(tmp.name) - 1] = 0; 1124 1122 ··· 2159 2161 return -ENOMEM; 2160 2162 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 2161 2163 return -ENOMEM; 2162 - if (tmp.num_counters == 0) 2163 - return -EINVAL; 2164 2164 2165 2165 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); 2166 2166
+1 -9
net/core/ethtool.c
··· 359 359 int err; 360 360 struct ethtool_cmd cmd; 361 361 362 - if (!dev->ethtool_ops->get_settings) 363 - return -EOPNOTSUPP; 364 - 365 - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 366 - return -EFAULT; 367 - 368 - cmd.cmd = ETHTOOL_GSET; 369 - 370 - err = dev->ethtool_ops->get_settings(dev, &cmd); 362 + err = __ethtool_get_settings(dev, &cmd); 371 363 if (err < 0) 372 364 return err; 373 365
+2 -2
net/dsa/dsa.c
··· 359 359 */ 360 360 ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); 361 361 if (ds == NULL) 362 - return NULL; 362 + return ERR_PTR(-ENOMEM); 363 363 364 364 ds->dst = dst; 365 365 ds->index = index; ··· 370 370 371 371 ret = dsa_switch_setup_one(ds, parent); 372 372 if (ret) 373 - return NULL; 373 + return ERR_PTR(ret); 374 374 375 375 return ds; 376 376 }
+2 -1
net/ipv4/esp4.c
··· 256 256 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 257 257 aead_givcrypt_set_assoc(req, asg, assoclen); 258 258 aead_givcrypt_set_giv(req, esph->enc_data, 259 - XFRM_SKB_CB(skb)->seq.output.low); 259 + XFRM_SKB_CB(skb)->seq.output.low + 260 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 260 261 261 262 ESP_SKB_CB(skb)->tmp = tmp; 262 263 err = crypto_aead_givencrypt(req);
+10 -4
net/ipv4/ip_vti.c
··· 65 65 goto drop; 66 66 67 67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; 68 - skb->mark = be32_to_cpu(tunnel->parms.i_key); 69 68 70 69 return xfrm_input(skb, nexthdr, spi, encap_type); 71 70 } ··· 90 91 struct pcpu_sw_netstats *tstats; 91 92 struct xfrm_state *x; 92 93 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; 94 + u32 orig_mark = skb->mark; 95 + int ret; 93 96 94 97 if (!tunnel) 95 98 return 1; ··· 108 107 x = xfrm_input_state(skb); 109 108 family = x->inner_mode->afinfo->family; 110 109 111 - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 110 + skb->mark = be32_to_cpu(tunnel->parms.i_key); 111 + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 112 + skb->mark = orig_mark; 113 + 114 + if (!ret) 112 115 return -EPERM; 113 116 114 117 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); ··· 221 216 222 217 memset(&fl, 0, sizeof(fl)); 223 218 224 - skb->mark = be32_to_cpu(tunnel->parms.o_key); 225 - 226 219 switch (skb->protocol) { 227 220 case htons(ETH_P_IP): 228 221 xfrm_decode_session(skb, &fl, AF_INET); ··· 235 232 dev_kfree_skb(skb); 236 233 return NETDEV_TX_OK; 237 234 } 235 + 236 + /* override mark with tunnel output key */ 237 + fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); 238 238 239 239 return vti_xmit(skb, dev, &fl); 240 240 }
+4 -1
net/ipv4/tcp_cong.c
··· 187 187 188 188 tcp_cleanup_congestion_control(sk); 189 189 icsk->icsk_ca_ops = ca; 190 + icsk->icsk_ca_setsockopt = 1; 190 191 191 192 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) 192 193 icsk->icsk_ca_ops->init(sk); ··· 336 335 rcu_read_lock(); 337 336 ca = __tcp_ca_find_autoload(name); 338 337 /* No change asking for existing value */ 339 - if (ca == icsk->icsk_ca_ops) 338 + if (ca == icsk->icsk_ca_ops) { 339 + icsk->icsk_ca_setsockopt = 1; 340 340 goto out; 341 + } 341 342 if (!ca) 342 343 err = -ENOENT; 343 344 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+4 -1
net/ipv4/tcp_minisocks.c
··· 420 420 rcu_read_unlock(); 421 421 } 422 422 423 - if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) 423 + /* If no valid choice made yet, assign current system default ca. */ 424 + if (!ca_got_dst && 425 + (!icsk->icsk_ca_setsockopt || 426 + !try_module_get(icsk->icsk_ca_ops->owner))) 424 427 tcp_assign_congestion_control(sk); 425 428 426 429 tcp_set_ca_state(sk, TCP_CA_Open);
+2 -4
net/ipv4/udp.c
··· 1345 1345 } 1346 1346 unlock_sock_fast(sk, slow); 1347 1347 1348 - if (noblock) 1349 - return -EAGAIN; 1350 - 1351 - /* starting over for a new packet */ 1348 + /* starting over for a new packet, but check if we need to yield */ 1349 + cond_resched(); 1352 1350 msg->msg_flags &= ~MSG_TRUNC; 1353 1351 goto try_again; 1354 1352 }
+2 -1
net/ipv6/esp6.c
··· 248 248 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 249 249 aead_givcrypt_set_assoc(req, asg, assoclen); 250 250 aead_givcrypt_set_giv(req, esph->enc_data, 251 - XFRM_SKB_CB(skb)->seq.output.low); 251 + XFRM_SKB_CB(skb)->seq.output.low + 252 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 252 253 253 254 ESP_SKB_CB(skb)->tmp = tmp; 254 255 err = crypto_aead_givencrypt(req);
+24 -3
net/ipv6/ip6_vti.c
··· 322 322 } 323 323 324 324 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 325 - skb->mark = be32_to_cpu(t->parms.i_key); 326 325 327 326 rcu_read_unlock(); 328 327 ··· 341 342 struct pcpu_sw_netstats *tstats; 342 343 struct xfrm_state *x; 343 344 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 345 + u32 orig_mark = skb->mark; 346 + int ret; 344 347 345 348 if (!t) 346 349 return 1; ··· 359 358 x = xfrm_input_state(skb); 360 359 family = x->inner_mode->afinfo->family; 361 360 362 - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 361 + skb->mark = be32_to_cpu(t->parms.i_key); 362 + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 363 + skb->mark = orig_mark; 364 + 365 + if (!ret) 363 366 return -EPERM; 364 367 365 368 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); ··· 435 430 struct net_device *tdev; 436 431 struct xfrm_state *x; 437 432 int err = -1; 433 + int mtu; 438 434 439 435 if (!dst) 440 436 goto tx_err_link_failure; ··· 469 463 skb_dst_set(skb, dst); 470 464 skb->dev = skb_dst(skb)->dev; 471 465 466 + mtu = dst_mtu(dst); 467 + if (!skb->ignore_df && skb->len > mtu) { 468 + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 469 + 470 + if (skb->protocol == htons(ETH_P_IPV6)) 471 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 472 + else 473 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 474 + htonl(mtu)); 475 + 476 + return -EMSGSIZE; 477 + } 478 + 472 479 err = dst_output(skb); 473 480 if (net_xmit_eval(err) == 0) { 474 481 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); ··· 514 495 int ret; 515 496 516 497 memset(&fl, 0, sizeof(fl)); 517 - skb->mark = be32_to_cpu(t->parms.o_key); 518 498 519 499 switch (skb->protocol) { 520 500 case htons(ETH_P_IPV6): ··· 533 515 default: 534 516 goto tx_err; 535 517 } 518 + 519 + /* override mark with tunnel output key */ 520 + fl.flowi_mark = be32_to_cpu(t->parms.o_key); 536 521 537 522 ret = vti6_xmit(skb, dev, &fl); 538 523 if (ret < 0)
+2 -4
net/ipv6/udp.c
··· 525 525 } 526 526 unlock_sock_fast(sk, slow); 527 527 528 - if (noblock) 529 - return -EAGAIN; 530 - 531 - /* starting over for a new packet */ 528 + /* starting over for a new packet, but check if we need to yield */ 529 + cond_resched(); 532 530 msg->msg_flags &= ~MSG_TRUNC; 533 531 goto try_again; 534 532 }
+16 -1
net/xfrm/xfrm_input.c
··· 13 13 #include <net/dst.h> 14 14 #include <net/ip.h> 15 15 #include <net/xfrm.h> 16 + #include <net/ip_tunnels.h> 17 + #include <net/ip6_tunnel.h> 16 18 17 19 static struct kmem_cache *secpath_cachep __read_mostly; 18 20 ··· 188 186 struct xfrm_state *x = NULL; 189 187 xfrm_address_t *daddr; 190 188 struct xfrm_mode *inner_mode; 189 + u32 mark = skb->mark; 191 190 unsigned int family; 192 191 int decaps = 0; 193 192 int async = 0; ··· 205 202 daddr = (xfrm_address_t *)(skb_network_header(skb) + 206 203 XFRM_SPI_SKB_CB(skb)->daddroff); 207 204 family = XFRM_SPI_SKB_CB(skb)->family; 205 + 206 + /* if tunnel is present override skb->mark value with tunnel i_key */ 207 + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { 208 + switch (family) { 209 + case AF_INET: 210 + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 211 + break; 212 + case AF_INET6: 213 + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 214 + break; 215 + } 216 + } 208 217 209 218 /* Allocate new secpath or COW existing one. */ 210 219 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { ··· 244 229 goto drop; 245 230 } 246 231 247 - x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); 232 + x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 248 233 if (x == NULL) { 249 234 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 250 235 xfrm_audit_state_notfound(skb, family, spi, seq);
+2
net/xfrm/xfrm_replay.c
··· 99 99 100 100 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 101 101 XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; 102 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 102 103 if (unlikely(x->replay.oseq == 0)) { 103 104 x->replay.oseq--; 104 105 xfrm_audit_state_replay_overflow(x, skb); ··· 178 177 179 178 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 180 179 XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; 180 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 181 181 if (unlikely(replay_esn->oseq == 0)) { 182 182 replay_esn->oseq--; 183 183 xfrm_audit_state_replay_overflow(x, skb);
+1 -1
net/xfrm/xfrm_state.c
··· 927 927 x->id.spi != spi) 928 928 continue; 929 929 930 - spin_unlock_bh(&net->xfrm.xfrm_state_lock); 931 930 xfrm_state_hold(x); 931 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 932 932 return x; 933 933 } 934 934 spin_unlock_bh(&net->xfrm.xfrm_state_lock);