Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Need to access netdev->num_rx_queues behind an accessor in netvsc
driver otherwise the build breaks with some configs, from Arnd
Bergmann.

2) Add dummy xfrm_dev_event() so that build doesn't fail when
CONFIG_XFRM_OFFLOAD is not set. From Hangbin Liu.

3) Don't OOPS when pfkey_msg2xfrm_state() signals an erros, from Dan
Carpenter.

4) Fix MCDI command size for filter operations in sfc driver, from
Martin Habets.

5) Fix UFO segmenting so that we don't calculate incorrect checksums,
from Michal Kubecek.

6) When ipv6 datagram connects fail, reset destination address and
port. From Wei Wang.

7) TCP disconnect must reset the cached receive DST, from WANG Cong.

8) Fix sign extension bug on 32-bit in dev_get_stats(), from Eric
Dumazet.

9) fman driver has to depend on HAS_DMA, from Madalin Bucur.

10) Fix bpf pointer leak with xadd in verifier, from Daniel Borkmann.

11) Fix negative page counts with GFO, from Michal Kubecek.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
sfc: fix attempt to translate invalid filter ID
net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish()
bpf: prevent leaking pointer via xadd on unpriviledged
arcnet: com20020-pci: add missing pdev setup in netdev structure
arcnet: com20020-pci: fix dev_id calculation
arcnet: com20020: remove needless base_addr assignment
Trivial fix to spelling mistake in arc_printk message
arcnet: change irq handler to lock irqsave
rocker: move dereference before free
mlxsw: spectrum_router: Fix NULL pointer dereference
net: sched: Fix one possible panic when no destroy callback
virtio-net: serialize tx routine during reset
net: usb: asix88179_178a: Add support for the Belkin B2B128
fsl/fman: add dependency on HAS_DMA
net: prevent sign extension in dev_get_stats()
tcp: reset sk_rx_dst in tcp_disconnect()
net: ipv6: reset daddr and dport in sk if connect() fails
bnx2x: Don't log mc removal needlessly
bnxt_en: Fix netpoll handling.
bnxt_en: Add missing logic to handle TPA end error conditions.
...

+350 -84
+4 -3
drivers/net/arcnet/arcnet.c
··· 756 756 struct net_device *dev = dev_id; 757 757 struct arcnet_local *lp; 758 758 int recbuf, status, diagstatus, didsomething, boguscount; 759 + unsigned long flags; 759 760 int retval = IRQ_NONE; 760 761 761 762 arc_printk(D_DURING, dev, "\n"); ··· 766 765 lp = netdev_priv(dev); 767 766 BUG_ON(!lp); 768 767 769 - spin_lock(&lp->lock); 768 + spin_lock_irqsave(&lp->lock, flags); 770 769 771 770 /* RESET flag was enabled - if device is not running, we must 772 771 * clear it right away (but nothing else). ··· 775 774 if (lp->hw.status(dev) & RESETflag) 776 775 lp->hw.command(dev, CFLAGScmd | RESETclear); 777 776 lp->hw.intmask(dev, 0); 778 - spin_unlock(&lp->lock); 777 + spin_unlock_irqrestore(&lp->lock, flags); 779 778 return retval; 780 779 } 781 780 ··· 999 998 udelay(1); 1000 999 lp->hw.intmask(dev, lp->intmask); 1001 1000 1002 - spin_unlock(&lp->lock); 1001 + spin_unlock_irqrestore(&lp->lock, flags); 1003 1002 return retval; 1004 1003 } 1005 1004 EXPORT_SYMBOL(arcnet_interrupt);
+1 -1
drivers/net/arcnet/capmode.c
··· 212 212 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ 213 213 ackpkt->soft.cap.mes.ack = acked; 214 214 215 - arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n", 215 + arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n", 216 216 *((int *)&ackpkt->soft.cap.cookie[0])); 217 217 218 218 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
+4 -2
drivers/net/arcnet/com20020-pci.c
··· 135 135 for (i = 0; i < ci->devcount; i++) { 136 136 struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; 137 137 struct com20020_dev *card; 138 + int dev_id_mask = 0xf; 138 139 139 140 dev = alloc_arcdev(device); 140 141 if (!dev) { ··· 167 166 arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); 168 167 arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); 169 168 169 + SET_NETDEV_DEV(dev, &pdev->dev); 170 170 dev->base_addr = ioaddr; 171 171 dev->dev_addr[0] = node; 172 172 dev->irq = pdev->irq; ··· 181 179 182 180 /* Get the dev_id from the PLX rotary coder */ 183 181 if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) 184 - dev->dev_id = 0xc; 185 - dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; 182 + dev_id_mask = 0x3; 183 + dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; 186 184 187 185 snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); 188 186
-2
drivers/net/arcnet/com20020.c
··· 246 246 return -ENODEV; 247 247 } 248 248 249 - dev->base_addr = ioaddr; 250 - 251 249 arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", 252 250 lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); 253 251
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12729 12729 } else { 12730 12730 /* If no mc addresses are required, flush the configuration */ 12731 12731 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12732 - if (rc) 12732 + if (rc < 0) 12733 12733 BNX2X_ERR("Failed to clear multicast configuration %d\n", 12734 12734 rc); 12735 12735 }
+52 -9
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1301 1301 cp_cons = NEXT_CMP(cp_cons); 1302 1302 } 1303 1303 1304 - if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 1304 + if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1305 1305 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1306 - netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1307 - agg_bufs, (int)MAX_SKB_FRAGS); 1306 + if (agg_bufs > MAX_SKB_FRAGS) 1307 + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1308 + agg_bufs, (int)MAX_SKB_FRAGS); 1308 1309 return NULL; 1309 1310 } 1310 1311 ··· 1563 1562 return rc; 1564 1563 } 1565 1564 1565 + /* In netpoll mode, if we are using a combined completion ring, we need to 1566 + * discard the rx packets and recycle the buffers. 1567 + */ 1568 + static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, 1569 + u32 *raw_cons, u8 *event) 1570 + { 1571 + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1572 + u32 tmp_raw_cons = *raw_cons; 1573 + struct rx_cmp_ext *rxcmp1; 1574 + struct rx_cmp *rxcmp; 1575 + u16 cp_cons; 1576 + u8 cmp_type; 1577 + 1578 + cp_cons = RING_CMP(tmp_raw_cons); 1579 + rxcmp = (struct rx_cmp *) 1580 + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1581 + 1582 + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1583 + cp_cons = RING_CMP(tmp_raw_cons); 1584 + rxcmp1 = (struct rx_cmp_ext *) 1585 + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1586 + 1587 + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1588 + return -EBUSY; 1589 + 1590 + cmp_type = RX_CMP_TYPE(rxcmp); 1591 + if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1592 + rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1593 + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1594 + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1595 + struct rx_tpa_end_cmp_ext *tpa_end1; 1596 + 1597 + tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1598 + tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1599 + cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1600 + } 1601 + return bnxt_rx_pkt(bp, bnapi, raw_cons, event); 1602 + } 1603 + 1566 1604 #define BNXT_GET_EVENT_PORT(data) \ 1567 1605 ((data) & \ 1568 1606 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) ··· 1784 1744 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1785 1745 rx_pkts = budget; 1786 1746 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1787 - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1747 + if (likely(budget)) 1748 + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1749 + else 1750 + rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, 1751 + &event); 1788 1752 if (likely(rc >= 0)) 1789 1753 rx_pkts += rc; 1790 1754 else if (rc == -EBUSY) /* partial completion */ ··· 6707 6663 struct bnxt *bp = netdev_priv(dev); 6708 6664 int i; 6709 6665 6710 - for (i = 0; i < bp->cp_nr_rings; i++) { 6711 - struct bnxt_irq *irq = &bp->irq_tbl[i]; 6666 + /* Only process tx rings/combined rings in netpoll mode. */ 6667 + for (i = 0; i < bp->tx_nr_rings; i++) { 6668 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6712 6669 6713 - disable_irq(irq->vector); 6714 - irq->handler(irq->vector, bp->bnapi[i]); 6715 - enable_irq(irq->vector); 6670 + napi_schedule(&txr->bnapi->napi); 6716 6671 } 6717 6672 } 6718 6673 #endif
+5 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 374 374 375 375 __le32 rx_tpa_end_cmp_errors_v2; 376 376 #define RX_TPA_END_CMP_V2 (0x1 << 0) 377 - #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) 377 + #define RX_TPA_END_CMP_ERRORS (0x3 << 1) 378 378 #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 379 379 380 380 u32 rx_tpa_end_cmp_start_opaque; 381 381 }; 382 + 383 + #define TPA_END_ERRORS(rx_tpa_end_ext) \ 384 + ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ 385 + cpu_to_le32(RX_TPA_END_CMP_ERRORS)) 382 386 383 387 #define DB_IDX_MASK 0xffffff 384 388 #define DB_IDX_VALID (0x1 << 26)
+1
drivers/net/ethernet/freescale/fman/Kconfig
··· 2 2 tristate "FMan support" 3 3 depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST 4 4 select GENERIC_ALLOCATOR 5 + depends on HAS_DMA 5 6 select PHYLIB 6 7 default n 7 8 help
+3
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 3334 3334 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3335 3335 u16 vid = vlan_dev_vlan_id(vlan_dev); 3336 3336 3337 + if (netif_is_bridge_port(vlan_dev)) 3338 + return 0; 3339 + 3337 3340 if (mlxsw_sp_port_dev_check(real_dev)) 3338 3341 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3339 3342 vid);
+1 -1
drivers/net/ethernet/rocker/rocker_ofdpa.c
··· 1505 1505 *index = entry->index; 1506 1506 resolved = false; 1507 1507 } else if (removing) { 1508 - ofdpa_neigh_del(trans, found); 1509 1508 *index = found->index; 1509 + ofdpa_neigh_del(trans, found); 1510 1510 } else if (updating) { 1511 1511 ofdpa_neigh_update(found, trans, NULL, false); 1512 1512 resolved = !is_zero_ether_addr(found->eth_dst);
+8 -7
drivers/net/ethernet/sfc/ef10.c
··· 4172 4172 * recipients 4173 4173 */ 4174 4174 if (is_mc_recip) { 4175 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4175 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4176 4176 unsigned int depth, i; 4177 4177 4178 4178 memset(inbuf, 0, sizeof(inbuf)); ··· 4320 4320 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4321 4321 } else { 4322 4322 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4323 - MC_CMD_FILTER_OP_IN_LEN, 4323 + MC_CMD_FILTER_OP_EXT_IN_LEN, 4324 4324 NULL, 0, rc); 4325 4325 } 4326 4326 } ··· 4453 4453 struct efx_filter_spec *spec) 4454 4454 { 4455 4455 struct efx_ef10_filter_table *table = efx->filter_state; 4456 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4456 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4457 4457 struct efx_filter_spec *saved_spec; 4458 4458 unsigned int hash, i, depth = 1; 4459 4459 bool replacing = false; ··· 4940 4940 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 4941 4941 { 4942 4942 struct efx_ef10_filter_table *table = efx->filter_state; 4943 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4943 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4944 4944 struct efx_filter_spec *spec; 4945 4945 unsigned int filter_idx; 4946 4946 int rc; ··· 5105 5105 5106 5106 /* Insert/renew filters */ 5107 5107 for (i = 0; i < addr_count; i++) { 5108 + EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5108 5109 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5109 5110 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5110 5111 rc = efx_ef10_filter_insert(efx, &spec, true); ··· 5123 5122 } 5124 5123 return rc; 5125 5124 } else { 5126 - /* mark as not inserted, and carry on */ 5127 - rc = EFX_EF10_FILTER_ID_INVALID; 5125 + /* keep invalid ID, and carry on */ 5128 5126 } 5127 + } else { 5128 + ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5129 5129 } 5130 - ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5131 5130 } 5132 5131 5133 5132 if (multicast && rollback) {
+1 -1
drivers/net/ethernet/ti/cpsw-common.c
··· 90 90 if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) 91 91 return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); 92 92 93 - if (of_machine_is_compatible("ti,am4372")) 93 + if (of_machine_is_compatible("ti,am43")) 94 94 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 95 95 96 96 if (of_machine_is_compatible("ti,dra7"))
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 776 776 channels->rx_count || channels->tx_count || channels->other_count) 777 777 return -EINVAL; 778 778 779 - if (count > net->num_tx_queues || count > net->num_rx_queues) 779 + if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) 780 780 return -EINVAL; 781 781 782 782 if (!nvdev || nvdev->destroy) ··· 1203 1203 rndis_dev = ndev->extension; 1204 1204 if (indir) { 1205 1205 for (i = 0; i < ITAB_NUM; i++) 1206 - if (indir[i] >= dev->num_rx_queues) 1206 + if (indir[i] >= VRSS_CHANNEL_MAX) 1207 1207 return -EINVAL; 1208 1208 1209 1209 for (i = 0; i < ITAB_NUM; i++)
+71 -14
drivers/net/macvlan.c
··· 39 39 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) 40 40 #define MACVLAN_BC_QUEUE_LEN 1000 41 41 42 + #define MACVLAN_F_PASSTHRU 1 43 + #define MACVLAN_F_ADDRCHANGE 2 44 + 42 45 struct macvlan_port { 43 46 struct net_device *dev; 44 47 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 45 48 struct list_head vlans; 46 49 struct sk_buff_head bc_queue; 47 50 struct work_struct bc_work; 48 - bool passthru; 51 + u32 flags; 49 52 int count; 50 53 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; 51 54 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); 55 + unsigned char perm_addr[ETH_ALEN]; 52 56 }; 53 57 54 58 struct macvlan_source_entry { ··· 69 65 #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) 70 66 71 67 static void macvlan_port_destroy(struct net_device *dev); 68 + 69 + static inline bool macvlan_passthru(const struct macvlan_port *port) 70 + { 71 + return port->flags & MACVLAN_F_PASSTHRU; 72 + } 73 + 74 + static inline void macvlan_set_passthru(struct macvlan_port *port) 75 + { 76 + port->flags |= MACVLAN_F_PASSTHRU; 77 + } 78 + 79 + static inline bool macvlan_addr_change(const struct macvlan_port *port) 80 + { 81 + return port->flags & MACVLAN_F_ADDRCHANGE; 82 + } 83 + 84 + static inline void macvlan_set_addr_change(struct macvlan_port *port) 85 + { 86 + port->flags |= MACVLAN_F_ADDRCHANGE; 87 + } 88 + 89 + static inline void macvlan_clear_addr_change(struct macvlan_port *port) 90 + { 91 + port->flags &= ~MACVLAN_F_ADDRCHANGE; 92 + } 72 93 73 94 /* Hash Ethernet address */ 74 95 static u32 macvlan_eth_hash(const unsigned char *addr) ··· 210 181 static bool macvlan_addr_busy(const struct macvlan_port *port, 211 182 const unsigned char *addr) 212 183 { 213 - /* Test to see if the specified multicast address is 184 + /* Test to see if the specified address is 214 185 * currently in use by the underlying device or 215 186 * another macvlan. 216 187 */ 217 - if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) 188 + if (!macvlan_passthru(port) && !macvlan_addr_change(port) && 189 + ether_addr_equal_64bits(port->dev->dev_addr, addr)) 218 190 return true; 219 191 220 192 if (macvlan_hash_lookup(port, addr)) ··· 475 445 } 476 446 477 447 macvlan_forward_source(skb, port, eth->h_source); 478 - if (port->passthru) 448 + if (macvlan_passthru(port)) 479 449 vlan = list_first_or_null_rcu(&port->vlans, 480 450 struct macvlan_dev, list); 481 451 else ··· 604 574 struct net_device *lowerdev = vlan->lowerdev; 605 575 int err; 606 576 607 - if (vlan->port->passthru) { 577 + if (macvlan_passthru(vlan->port)) { 608 578 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { 609 579 err = dev_set_promiscuity(lowerdev, 1); 610 580 if (err < 0) ··· 679 649 dev_uc_unsync(lowerdev, dev); 680 650 dev_mc_unsync(lowerdev, dev); 681 651 682 - if (vlan->port->passthru) { 652 + if (macvlan_passthru(vlan->port)) { 683 653 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 684 654 dev_set_promiscuity(lowerdev, -1); 685 655 goto hash_del; ··· 702 672 { 703 673 struct macvlan_dev *vlan = netdev_priv(dev); 704 674 struct net_device *lowerdev = vlan->lowerdev; 675 + struct macvlan_port *port = vlan->port; 705 676 int err; 706 677 707 678 if (!(dev->flags & IFF_UP)) { ··· 713 682 if (macvlan_addr_busy(vlan->port, addr)) 714 683 return -EBUSY; 715 684 716 - if (!vlan->port->passthru) { 685 + if (!macvlan_passthru(port)) { 717 686 err = dev_uc_add(lowerdev, addr); 718 687 if (err) 719 688 return err; ··· 723 692 724 693 macvlan_hash_change_addr(vlan, addr); 725 694 } 695 + if (macvlan_passthru(port) && !macvlan_addr_change(port)) { 696 + /* Since addr_change isn't set, we are here due to lower 697 + * device change. Save the lower-dev address so we can 698 + * restore it later. 699 + */ 700 + ether_addr_copy(vlan->port->perm_addr, 701 + lowerdev->dev_addr); 702 + } 703 + macvlan_clear_addr_change(port); 726 704 return 0; 727 705 } 728 706 ··· 743 703 if (!is_valid_ether_addr(addr->sa_data)) 744 704 return -EADDRNOTAVAIL; 745 705 706 + /* If the addresses are the same, this is a no-op */ 707 + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) 708 + return 0; 709 + 746 710 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 711 + macvlan_set_addr_change(vlan->port); 747 712 dev_set_mac_address(vlan->lowerdev, addr); 748 713 return 0; 749 714 } ··· 973 928 /* Support unicast filter only on passthru devices. 974 929 * Multicast filter should be allowed on all devices. 975 930 */ 976 - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 931 + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 977 932 return -EOPNOTSUPP; 978 933 979 934 if (flags & NLM_F_REPLACE) ··· 997 952 /* Support unicast filter only on passthru devices. 998 953 * Multicast filter should be allowed on all devices. 999 954 */ 1000 - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 955 + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 1001 956 return -EOPNOTSUPP; 1002 957 1003 958 if (is_unicast_ether_addr(addr)) ··· 1165 1120 if (port == NULL) 1166 1121 return -ENOMEM; 1167 1122 1168 - port->passthru = false; 1169 1123 port->dev = dev; 1124 + ether_addr_copy(port->perm_addr, dev->dev_addr); 1170 1125 INIT_LIST_HEAD(&port->vlans); 1171 1126 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 1172 1127 INIT_HLIST_HEAD(&port->vlan_hash[i]); ··· 1204 1159 dev_put(src->dev); 1205 1160 1206 1161 kfree_skb(skb); 1162 + } 1163 + 1164 + /* If the lower device address has been changed by passthru 1165 + * macvlan, put it back. 1166 + */ 1167 + if (macvlan_passthru(port) && 1168 + !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { 1169 + struct sockaddr sa; 1170 + 1171 + sa.sa_family = port->dev->type; 1172 + memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); 1173 + dev_set_mac_address(port->dev, &sa); 1207 1174 } 1208 1175 1209 1176 kfree(port); ··· 1383 1326 port = macvlan_port_get_rtnl(lowerdev); 1384 1327 1385 1328 /* Only 1 macvlan device can be created in passthru mode */ 1386 - if (port->passthru) { 1329 + if (macvlan_passthru(port)) { 1387 1330 /* The macvlan port must be not created this time, 1388 1331 * still goto destroy_macvlan_port for readability. 1389 1332 */ ··· 1409 1352 err = -EINVAL; 1410 1353 goto destroy_macvlan_port; 1411 1354 } 1412 - port->passthru = true; 1355 + macvlan_set_passthru(port); 1413 1356 eth_hw_addr_inherit(dev, lowerdev); 1414 1357 } 1415 1358 ··· 1491 1434 if (data && data[IFLA_MACVLAN_FLAGS]) { 1492 1435 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1493 1436 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 1494 - if (vlan->port->passthru && promisc) { 1437 + if (macvlan_passthru(vlan->port) && promisc) { 1495 1438 int err; 1496 1439 1497 1440 if (flags & MACVLAN_FLAG_NOPROMISC) ··· 1654 1597 } 1655 1598 break; 1656 1599 case NETDEV_CHANGEADDR: 1657 - if (!port->passthru) 1600 + if (!macvlan_passthru(port)) 1658 1601 return NOTIFY_DONE; 1659 1602 1660 1603 vlan = list_first_entry_or_null(&port->vlans,
+1 -1
drivers/net/phy/dp83640.c
··· 908 908 if (overflow) { 909 909 pr_debug("tx timestamp queue overflow, count %d\n", overflow); 910 910 while (skb) { 911 - skb_complete_tx_timestamp(skb, NULL); 911 + kfree_skb(skb); 912 912 skb = skb_dequeue(&dp83640->tx_queue); 913 913 } 914 914 return;
+2
drivers/net/phy/micrel.c
··· 619 619 if ((regval & 0xFF) == 0xFF) { 620 620 phy_init_hw(phydev); 621 621 phydev->link = 0; 622 + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) 623 + phydev->drv->config_intr(phydev); 622 624 } 623 625 624 626 return 0;
+16
drivers/net/usb/ax88179_178a.c
··· 1722 1722 .tx_fixup = ax88179_tx_fixup, 1723 1723 }; 1724 1724 1725 + static const struct driver_info belkin_info = { 1726 + .description = "Belkin USB Ethernet Adapter", 1727 + .bind = ax88179_bind, 1728 + .unbind = ax88179_unbind, 1729 + .status = ax88179_status, 1730 + .link_reset = ax88179_link_reset, 1731 + .reset = ax88179_reset, 1732 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1733 + .rx_fixup = ax88179_rx_fixup, 1734 + .tx_fixup = ax88179_tx_fixup, 1735 + }; 1736 + 1725 1737 static const struct usb_device_id products[] = { 1726 1738 { 1727 1739 /* ASIX AX88179 10/100/1000 */ ··· 1763 1751 /* Lenovo OneLinkDock Gigabit LAN */ 1764 1752 USB_DEVICE(0x17ef, 0x304b), 1765 1753 .driver_info = (unsigned long)&lenovo_info, 1754 + }, { 1755 + /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ 1756 + USB_DEVICE(0x050d, 0x0128), 1757 + .driver_info = (unsigned long)&belkin_info, 1766 1758 }, 1767 1759 { }, 1768 1760 };
+2 -2
drivers/net/veth.c
··· 383 383 tbp = tb; 384 384 } 385 385 386 - if (tbp[IFLA_IFNAME]) { 386 + if (ifmp && tbp[IFLA_IFNAME]) { 387 387 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 388 388 name_assign_type = NET_NAME_USER; 389 389 } else { ··· 402 402 return PTR_ERR(peer); 403 403 } 404 404 405 - if (tbp[IFLA_ADDRESS] == NULL) 405 + if (!ifmp || !tbp[IFLA_ADDRESS]) 406 406 eth_hw_addr_random(peer); 407 407 408 408 if (ifmp && (dev->ifindex != 0))
+1
drivers/net/virtio_net.c
··· 1797 1797 flush_work(&vi->config_work); 1798 1798 1799 1799 netif_device_detach(vi->dev); 1800 + netif_tx_disable(vi->dev); 1800 1801 cancel_delayed_work_sync(&vi->refill); 1801 1802 1802 1803 if (netif_running(vi->dev)) {
+1
drivers/net/xen-netback/common.h
··· 199 199 unsigned long remaining_credit; 200 200 struct timer_list credit_timeout; 201 201 u64 credit_window_start; 202 + bool rate_limited; 202 203 203 204 /* Statistics */ 204 205 struct xenvif_stats stats;
+5 -1
drivers/net/xen-netback/interface.c
··· 106 106 107 107 if (work_done < budget) { 108 108 napi_complete_done(napi, work_done); 109 - xenvif_napi_schedule_or_enable_events(queue); 109 + /* If the queue is rate-limited, it shall be 110 + * rescheduled in the timer callback. 111 + */ 112 + if (likely(!queue->rate_limited)) 113 + xenvif_napi_schedule_or_enable_events(queue); 110 114 } 111 115 112 116 return work_done;
+5 -1
drivers/net/xen-netback/netback.c
··· 180 180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 181 181 182 182 queue->remaining_credit = min(max_credit, max_burst); 183 + queue->rate_limited = false; 183 184 } 184 185 185 186 void xenvif_tx_credit_callback(unsigned long data) ··· 687 686 msecs_to_jiffies(queue->credit_usec / 1000); 688 687 689 688 /* Timer could already be pending in rare cases. */ 690 - if (timer_pending(&queue->credit_timeout)) 689 + if (timer_pending(&queue->credit_timeout)) { 690 + queue->rate_limited = true; 691 691 return true; 692 + } 692 693 693 694 /* Passed the point where we can replenish credit? */ 694 695 if (time_after_eq64(now, next_credit)) { ··· 705 702 mod_timer(&queue->credit_timeout, 706 703 next_credit); 707 704 queue->credit_window_start = next_credit; 705 + queue->rate_limited = true; 708 706 709 707 return true; 710 708 }
+2 -5
include/net/xfrm.h
··· 1850 1850 } 1851 1851 #endif 1852 1852 1853 - #ifdef CONFIG_XFRM_OFFLOAD 1854 1853 void __net_init xfrm_dev_init(void); 1854 + 1855 + #ifdef CONFIG_XFRM_OFFLOAD 1855 1856 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); 1856 1857 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1857 1858 struct xfrm_user_offload *xuo); ··· 1878 1877 } 1879 1878 } 1880 1879 #else 1881 - static inline void __net_init xfrm_dev_init(void) 1882 - { 1883 - } 1884 - 1885 1880 static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 1886 1881 { 1887 1882 return 0;
+5
kernel/bpf/verifier.c
··· 989 989 if (err) 990 990 return err; 991 991 992 + if (is_pointer_value(env, insn->src_reg)) { 993 + verbose("R%d leaks addr into mem\n", insn->src_reg); 994 + return -EACCES; 995 + } 996 + 992 997 /* check whether atomic_add can read the memory */ 993 998 err = check_mem_access(env, insn->dst_reg, insn->off, 994 999 BPF_SIZE(insn->code), BPF_READ, -1);
+20 -10
net/core/dev.c
··· 4767 4767 } 4768 4768 EXPORT_SYMBOL(gro_find_complete_by_type); 4769 4769 4770 + static void napi_skb_free_stolen_head(struct sk_buff *skb) 4771 + { 4772 + skb_dst_drop(skb); 4773 + secpath_reset(skb); 4774 + kmem_cache_free(skbuff_head_cache, skb); 4775 + } 4776 + 4770 4777 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4771 4778 { 4772 4779 switch (ret) { ··· 4787 4780 break; 4788 4781 4789 4782 case GRO_MERGED_FREE: 4790 - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { 4791 - skb_dst_drop(skb); 4792 - secpath_reset(skb); 4793 - kmem_cache_free(skbuff_head_cache, skb); 4794 - } else { 4783 + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4784 + napi_skb_free_stolen_head(skb); 4785 + else 4795 4786 __kfree_skb(skb); 4796 - } 4797 4787 break; 4798 4788 4799 4789 case GRO_HELD: ··· 4862 4858 break; 4863 4859 4864 4860 case GRO_DROP: 4865 - case GRO_MERGED_FREE: 4866 4861 napi_reuse_skb(napi, skb); 4862 + break; 4863 + 4864 + case GRO_MERGED_FREE: 4865 + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4866 + napi_skb_free_stolen_head(skb); 4867 + else 4868 + napi_reuse_skb(napi, skb); 4867 4869 break; 4868 4870 4869 4871 case GRO_MERGED: ··· 7793 7783 } else { 7794 7784 netdev_stats_to_stats64(storage, &dev->stats); 7795 7785 } 7796 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 7797 - storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 7798 - storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); 7786 + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 7787 + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 7788 + storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 7799 7789 return storage; 7800 7790 } 7801 7791 EXPORT_SYMBOL(dev_get_stats);
+2 -1
net/ipv4/ip_output.c
··· 964 964 csummode = CHECKSUM_PARTIAL; 965 965 966 966 cork->length += length; 967 - if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 + if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || 968 + (skb && skb_is_gso(skb))) && 968 969 (sk->sk_protocol == IPPROTO_UDP) && 969 970 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 970 971 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+2
net/ipv4/tcp.c
··· 2330 2330 tcp_init_send_head(sk); 2331 2331 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2332 2332 __sk_dst_reset(sk); 2333 + dst_release(sk->sk_rx_dst); 2334 + sk->sk_rx_dst = NULL; 2333 2335 tcp_saved_syn_free(tp); 2334 2336 2335 2337 /* Clean up fastopen related fields */
+3 -2
net/ipv6/addrconf.c
··· 3369 3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3370 3370 struct netdev_notifier_changeupper_info *info; 3371 3371 struct inet6_dev *idev = __in6_dev_get(dev); 3372 + struct net *net = dev_net(dev); 3372 3373 int run_pending = 0; 3373 3374 int err; 3374 3375 ··· 3385 3384 case NETDEV_CHANGEMTU: 3386 3385 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3387 3386 if (dev->mtu < IPV6_MIN_MTU) { 3388 - addrconf_ifdown(dev, 1); 3387 + addrconf_ifdown(dev, dev != net->loopback_dev); 3389 3388 break; 3390 3389 } 3391 3390 ··· 3501 3500 * IPV6_MIN_MTU stop IPv6 on this interface. 3502 3501 */ 3503 3502 if (dev->mtu < IPV6_MIN_MTU) 3504 - addrconf_ifdown(dev, 1); 3503 + addrconf_ifdown(dev, dev != net->loopback_dev); 3505 3504 } 3506 3505 break; 3507 3506
+7 -1
net/ipv6/datagram.c
··· 250 250 */ 251 251 252 252 err = ip6_datagram_dst_update(sk, true); 253 - if (err) 253 + if (err) { 254 + /* Reset daddr and dport so that udp_v6_early_demux() 255 + * fails to find this socket 256 + */ 257 + memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 258 + inet->inet_dport = 0; 254 259 goto out; 260 + } 255 261 256 262 sk->sk_state = TCP_ESTABLISHED; 257 263 sk_set_txhash(sk);
+25
net/ipv6/esp6_offload.c
··· 30 30 #include <net/ipv6.h> 31 31 #include <linux/icmpv6.h> 32 32 33 + static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 34 + { 35 + int off = sizeof(struct ipv6hdr); 36 + struct ipv6_opt_hdr *exthdr; 37 + 38 + if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 39 + return offsetof(struct ipv6hdr, nexthdr); 40 + 41 + while (off < nhlen) { 42 + exthdr = (void *)ipv6_hdr + off; 43 + if (exthdr->nexthdr == NEXTHDR_ESP) 44 + return off; 45 + 46 + off += ipv6_optlen(exthdr); 47 + } 48 + 49 + return 0; 50 + } 51 + 33 52 static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 34 53 struct sk_buff *skb) 35 54 { ··· 57 38 struct xfrm_state *x; 58 39 __be32 seq; 59 40 __be32 spi; 41 + int nhoff; 60 42 int err; 61 43 62 44 skb_pull(skb, offset); ··· 92 72 93 73 xo->flags |= XFRM_GRO; 94 74 75 + nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 76 + if (!nhoff) 77 + goto out; 78 + 79 + IP6CB(skb)->nhoff = nhoff; 95 80 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 96 81 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 97 82 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+1 -1
net/ipv6/ip6_output.c
··· 1390 1390 */ 1391 1391 1392 1392 cork->length += length; 1393 - if ((((length + fragheaderlen) > mtu) || 1393 + if ((((length + (skb ? skb->len : headersize)) > mtu) || 1394 1394 (skb && skb_is_gso(skb))) && 1395 1395 (sk->sk_protocol == IPPROTO_UDP) && 1396 1396 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
+5 -1
net/ipv6/route.c
··· 3722 3722 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3723 3723 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3724 3724 #endif 3725 - } else if (event == NETDEV_UNREGISTER) { 3725 + } else if (event == NETDEV_UNREGISTER && 3726 + dev->reg_state != NETREG_UNREGISTERED) { 3727 + /* NETDEV_UNREGISTER could be fired for multiple times by 3728 + * netdev_wait_allrefs(). Make sure we only call this once. 3729 + */ 3726 3730 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); 3727 3731 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3728 3732 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+1 -1
net/ipv6/sit.c
··· 305 305 * we try harder to allocate. 306 306 */ 307 307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? 308 - kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 308 + kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) : 309 309 NULL; 310 310 311 311 rcu_read_lock();
+2 -1
net/ipv6/udp.c
··· 879 879 struct sock *sk; 880 880 881 881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 882 - if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 882 + if (sk->sk_state == TCP_ESTABLISHED && 883 + INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 883 884 return sk; 884 885 /* Only check first socket in chain */ 885 886 break;
+1 -1
net/ipv6/xfrm6_input.c
··· 43 43 return 1; 44 44 #endif 45 45 46 - ipv6_hdr(skb)->payload_len = htons(skb->len); 47 46 __skb_push(skb, skb->data - skb_network_header(skb)); 47 + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 48 48 49 49 if (xo && (xo->flags & XFRM_GRO)) { 50 50 skb_mac_header_rebuild(skb);
+15 -4
net/key/af_key.c
··· 1157 1157 goto out; 1158 1158 } 1159 1159 1160 + err = -ENOBUFS; 1160 1161 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1161 1162 if (sa->sadb_sa_auth) { 1162 1163 int keysize = 0; ··· 1169 1168 if (key) 1170 1169 keysize = (key->sadb_key_bits + 7) / 8; 1171 1170 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); 1172 - if (!x->aalg) 1171 + if (!x->aalg) { 1172 + err = -ENOMEM; 1173 1173 goto out; 1174 + } 1174 1175 strcpy(x->aalg->alg_name, a->name); 1175 1176 x->aalg->alg_key_len = 0; 1176 1177 if (key) { ··· 1191 1188 goto out; 1192 1189 } 1193 1190 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); 1194 - if (!x->calg) 1191 + if (!x->calg) { 1192 + err = -ENOMEM; 1195 1193 goto out; 1194 + } 1196 1195 strcpy(x->calg->alg_name, a->name); 1197 1196 x->props.calgo = sa->sadb_sa_encrypt; 1198 1197 } else { ··· 1208 1203 if (key) 1209 1204 keysize = (key->sadb_key_bits + 7) / 8; 1210 1205 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); 1211 - if (!x->ealg) 1206 + if (!x->ealg) { 1207 + err = -ENOMEM; 1212 1208 goto out; 1209 + } 1213 1210 strcpy(x->ealg->alg_name, a->name); 1214 1211 x->ealg->alg_key_len = 0; 1215 1212 if (key) { ··· 1256 1249 struct xfrm_encap_tmpl *natt; 1257 1250 1258 1251 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); 1259 - if (!x->encap) 1252 + if (!x->encap) { 1253 + err = -ENOMEM; 1260 1254 goto out; 1255 + } 1261 1256 1262 1257 natt = x->encap; 1263 1258 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; ··· 2764 2755 int err, err2; 2765 2756 2766 2757 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); 2758 + if (!err) 2759 + xfrm_garbage_collect(net); 2767 2760 err2 = unicast_flush_resp(sk, hdr); 2768 2761 if (err || err2) { 2769 2762 if (err == -ESRCH) /* empty table - old silent behavior */
+2 -1
net/sched/sch_api.c
··· 1019 1019 return sch; 1020 1020 } 1021 1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1022 - ops->destroy(sch); 1022 + if (ops->destroy) 1023 + ops->destroy(sch); 1023 1024 err_out3: 1024 1025 dev_put(dev); 1025 1026 kfree((char *) sch - sch->padded);
+1 -2
net/xfrm/Makefile
··· 4 4 5 5 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 6 6 xfrm_input.o xfrm_output.o \ 7 - xfrm_sysctl.o xfrm_replay.o 8 - obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o 7 + xfrm_sysctl.o xfrm_replay.o xfrm_device.o 9 8 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 10 9 obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o 11 10 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
+2
net/xfrm/xfrm_device.c
··· 22 22 #include <net/xfrm.h> 23 23 #include <linux/notifier.h> 24 24 25 + #ifdef CONFIG_XFRM_OFFLOAD 25 26 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 26 27 { 27 28 int err; ··· 138 137 return true; 139 138 } 140 139 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 140 + #endif 141 141 142 142 int xfrm_dev_register(struct net_device *dev) 143 143 {
-4
net/xfrm/xfrm_policy.c
··· 1006 1006 err = -ESRCH; 1007 1007 out: 1008 1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1009 - 1010 - if (cnt) 1011 - xfrm_garbage_collect(net); 1012 - 1013 1009 return err; 1014 1010 } 1015 1011 EXPORT_SYMBOL(xfrm_policy_flush);
+1
net/xfrm/xfrm_user.c
··· 2027 2027 return 0; 2028 2028 return err; 2029 2029 } 2030 + xfrm_garbage_collect(net); 2030 2031 2031 2032 c.data.type = type; 2032 2033 c.event = nlh->nlmsg_type;
+66
tools/testing/selftests/bpf/test_verifier.c
··· 3749 3749 .errstr = "invalid bpf_context access", 3750 3750 }, 3751 3751 { 3752 + "leak pointer into ctx 1", 3753 + .insns = { 3754 + BPF_MOV64_IMM(BPF_REG_0, 0), 3755 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3756 + offsetof(struct __sk_buff, cb[0])), 3757 + BPF_LD_MAP_FD(BPF_REG_2, 0), 3758 + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, 3759 + offsetof(struct __sk_buff, cb[0])), 3760 + BPF_EXIT_INSN(), 3761 + }, 3762 + .fixup_map1 = { 2 }, 3763 + .errstr_unpriv = "R2 leaks addr into mem", 3764 + .result_unpriv = REJECT, 3765 + .result = ACCEPT, 3766 + }, 3767 + { 3768 + "leak pointer into ctx 2", 3769 + .insns = { 3770 + BPF_MOV64_IMM(BPF_REG_0, 0), 3771 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3772 + offsetof(struct __sk_buff, cb[0])), 3773 + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, 3774 + offsetof(struct __sk_buff, cb[0])), 3775 + BPF_EXIT_INSN(), 3776 + }, 3777 + .errstr_unpriv = "R10 leaks addr into mem", 3778 + .result_unpriv = REJECT, 3779 + .result = ACCEPT, 3780 + }, 3781 + { 3782 + "leak pointer into ctx 3", 3783 + .insns = { 3784 + BPF_MOV64_IMM(BPF_REG_0, 0), 3785 + BPF_LD_MAP_FD(BPF_REG_2, 0), 3786 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 3787 + offsetof(struct __sk_buff, cb[0])), 3788 + BPF_EXIT_INSN(), 3789 + }, 3790 + .fixup_map1 = { 1 }, 3791 + .errstr_unpriv = "R2 leaks addr into ctx", 3792 + .result_unpriv = REJECT, 3793 + .result = ACCEPT, 3794 + }, 3795 + { 3796 + "leak pointer into map val", 3797 + .insns = { 3798 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 3799 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3800 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3801 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3802 + BPF_LD_MAP_FD(BPF_REG_1, 0), 3803 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3804 + BPF_FUNC_map_lookup_elem), 3805 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 3806 + BPF_MOV64_IMM(BPF_REG_3, 0), 3807 + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 3808 + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 3809 + BPF_MOV64_IMM(BPF_REG_0, 0), 3810 + BPF_EXIT_INSN(), 3811 + }, 3812 + .fixup_map1 = { 4 }, 3813 + .errstr_unpriv = "R6 leaks addr into mem", 3814 + .result_unpriv = REJECT, 3815 + .result = ACCEPT, 3816 + }, 3817 + { 3752 3818 "helper access to map: full range", 3753 3819 .insns = { 3754 3820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),