Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix mvneta/bm dependencies, from Arnd Bergmann.

2) RX completion hw bug workaround in bnxt_en, from Michael Chan.

3) Kernel pointer leak in nf_conntrack, from Linus.

4) Hoplimit route attribute limits not enforced properly, from Paolo
Abeni.

5) qlcnic driver NULL deref fix from Dan Carpenter.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
arm64: bpf: jit JMP_JSET_{X,K}
net/route: enforce hoplimit max value
nf_conntrack: avoid kernel pointer value leak in slab name
drivers: net: xgene: fix register offset
drivers: net: xgene: fix statistics counters race condition
drivers: net: xgene: fix ununiform latency across queues
drivers: net: xgene: fix sharing of irqs
drivers: net: xgene: fix IPv4 forward crash
xen-netback: fix extra_info handling in xenvif_tx_err()
net: mvneta: bm: fix dependencies again
bnxt_en: Add workaround to detect bad opaque in rx completion (part 2)
bnxt_en: Add workaround to detect bad opaque in rx completion (part 1)
qlcnic: potential NULL dereference in qlcnic_83xx_get_minidump_template()

Changed files
+175 -45
arch
arm64
drivers
net
+1
arch/arm64/net/bpf_jit_comp.c
··· 476 476 case BPF_JGE: 477 477 jmp_cond = A64_COND_CS; 478 478 break; 479 + case BPF_JSET: 479 480 case BPF_JNE: 480 481 jmp_cond = A64_COND_NE; 481 482 break;
+6 -5
drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
··· 43 43 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, 44 44 struct xgene_cle_dbptr *dbptr, u32 *buf) 45 45 { 46 + buf[0] = SET_VAL(CLE_DROP, dbptr->drop); 46 47 buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | 47 48 SET_VAL(CLE_DSTQIDL, dbptr->dstqid); 48 49 ··· 413 412 .branch = { 414 413 { 415 414 /* IPV4 */ 416 - .valid = 0, 415 + .valid = 1, 417 416 .next_packet_pointer = 22, 418 417 .jump_bw = JMP_FW, 419 418 .jump_rel = JMP_ABS, ··· 421 420 .next_node = PKT_PROT_NODE, 422 421 .next_branch = 0, 423 422 .data = 0x8, 424 - .mask = 0xffff 423 + .mask = 0x0 425 424 }, 426 425 { 427 426 .valid = 0, ··· 457 456 .next_node = RSS_IPV4_TCP_NODE, 458 457 .next_branch = 0, 459 458 .data = 0x0600, 460 - .mask = 0xffff 459 + .mask = 0x00ff 461 460 }, 462 461 { 463 462 /* UDP */ ··· 469 468 .next_node = RSS_IPV4_UDP_NODE, 470 469 .next_branch = 0, 471 470 .data = 0x1100, 472 - .mask = 0xffff 471 + .mask = 0x00ff 473 472 }, 474 473 { 475 474 .valid = 0, ··· 643 642 { 644 643 /* TCP DST Port */ 645 644 .valid = 0, 646 - .next_packet_pointer = 256, 645 + .next_packet_pointer = 258, 647 646 .jump_bw = JMP_FW, 648 647 .jump_rel = JMP_ABS, 649 648 .operation = EQT,
+2
drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
··· 83 83 #define CLE_TYPE_POS 0 84 84 #define CLE_TYPE_LEN 2 85 85 86 + #define CLE_DROP_POS 28 87 + #define CLE_DROP_LEN 1 86 88 #define CLE_DSTQIDL_POS 25 87 89 #define CLE_DSTQIDL_LEN 7 88 90 #define CLE_DSTQIDH_POS 0
+11 -8
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
··· 219 219 struct xgene_enet_pdata *pdata, 220 220 enum xgene_enet_err_code status) 221 221 { 222 - struct rtnl_link_stats64 *stats = &pdata->stats; 223 - 224 222 switch (status) { 225 223 case INGRESS_CRC: 226 - stats->rx_crc_errors++; 224 + ring->rx_crc_errors++; 225 + ring->rx_dropped++; 227 226 break; 228 227 case INGRESS_CHECKSUM: 229 228 case INGRESS_CHECKSUM_COMPUTE: 230 - stats->rx_errors++; 229 + ring->rx_errors++; 230 + ring->rx_dropped++; 231 231 break; 232 232 case INGRESS_TRUNC_FRAME: 233 - stats->rx_frame_errors++; 233 + ring->rx_frame_errors++; 234 + ring->rx_dropped++; 234 235 break; 235 236 case INGRESS_PKT_LEN: 236 - stats->rx_length_errors++; 237 + ring->rx_length_errors++; 238 + ring->rx_dropped++; 237 239 break; 238 240 case INGRESS_PKT_UNDER: 239 - stats->rx_frame_errors++; 241 + ring->rx_frame_errors++; 242 + ring->rx_dropped++; 240 243 break; 241 244 case INGRESS_FIFO_OVERRUN: 242 - stats->rx_fifo_errors++; 245 + ring->rx_fifo_errors++; 243 246 break; 244 247 default: 245 248 break;
+5 -3
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
··· 86 86 #define RINGADDRL_POS 5 87 87 #define RINGADDRL_LEN 27 88 88 #define RINGADDRH_POS 0 89 - #define RINGADDRH_LEN 6 89 + #define RINGADDRH_LEN 7 90 90 #define RINGSIZE_POS 23 91 91 #define RINGSIZE_LEN 3 92 92 #define RINGTYPE_POS 19 ··· 94 94 #define RINGMODE_POS 20 95 95 #define RINGMODE_LEN 3 96 96 #define RECOMTIMEOUTL_POS 28 97 - #define RECOMTIMEOUTL_LEN 3 97 + #define RECOMTIMEOUTL_LEN 4 98 98 #define RECOMTIMEOUTH_POS 0 99 - #define RECOMTIMEOUTH_LEN 2 99 + #define RECOMTIMEOUTH_LEN 3 100 100 #define NUMMSGSINQ_POS 1 101 101 #define NUMMSGSINQ_LEN 16 102 102 #define ACCEPTLERR BIT(19) ··· 201 201 #define USERINFO_LEN 32 202 202 #define FPQNUM_POS 32 203 203 #define FPQNUM_LEN 12 204 + #define ELERR_POS 46 205 + #define ELERR_LEN 2 204 206 #define NV_POS 50 205 207 #define NV_LEN 1 206 208 #define LL_POS 51
+55 -20
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 443 443 444 444 skb_tx_timestamp(skb); 445 445 446 - pdata->stats.tx_packets++; 447 - pdata->stats.tx_bytes += skb->len; 446 + tx_ring->tx_packets++; 447 + tx_ring->tx_bytes += skb->len; 448 448 449 449 pdata->ring_ops->wr_cmd(tx_ring, count); 450 450 return NETDEV_TX_OK; ··· 483 483 skb = buf_pool->rx_skb[skb_index]; 484 484 485 485 /* checking for error */ 486 - status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 486 + status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || 487 + GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); 487 488 if (unlikely(status > 2)) { 488 489 dev_kfree_skb_any(skb); 489 490 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), 490 491 status); 491 - pdata->stats.rx_dropped++; 492 492 ret = -EIO; 493 493 goto out; 494 494 } ··· 506 506 xgene_enet_skip_csum(skb); 507 507 } 508 508 509 - pdata->stats.rx_packets++; 510 - pdata->stats.rx_bytes += datalen; 509 + rx_ring->rx_packets++; 510 + rx_ring->rx_bytes += datalen; 511 511 napi_gro_receive(&rx_ring->napi, skb); 512 512 out: 513 513 if (--rx_ring->nbufpool == 0) { ··· 630 630 ring = pdata->rx_ring[i]; 631 631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 632 632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 633 - IRQF_SHARED, ring->irq_name, ring); 633 + 0, ring->irq_name, ring); 634 634 if (ret) { 635 635 netdev_err(ndev, "Failed to request irq %s\n", 636 636 ring->irq_name); ··· 641 641 ring = pdata->tx_ring[i]->cp_ring; 642 642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); 643 643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 644 - IRQF_SHARED, ring->irq_name, ring); 644 + 0, ring->irq_name, ring); 645 645 if (ret) { 646 646 netdev_err(ndev, "Failed to request irq %s\n", 647 647 ring->irq_name); ··· 1114 1114 { 1115 1115 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 1116 1116 struct rtnl_link_stats64 *stats = &pdata->stats; 1117 + struct xgene_enet_desc_ring *ring; 1118 + int i; 1117 1119 1118 - stats->rx_errors += stats->rx_length_errors + 1119 - stats->rx_crc_errors + 1120 - stats->rx_frame_errors + 1121 - stats->rx_fifo_errors; 1122 - memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); 1120 + memset(stats, 0, sizeof(struct rtnl_link_stats64)); 1121 + for (i = 0; i < pdata->txq_cnt; i++) { 1122 + ring = pdata->tx_ring[i]; 1123 + if (ring) { 1124 + stats->tx_packets += ring->tx_packets; 1125 + stats->tx_bytes += ring->tx_bytes; 1126 + } 1127 + } 1128 + 1129 + for (i = 0; i < pdata->rxq_cnt; i++) { 1130 + ring = pdata->rx_ring[i]; 1131 + if (ring) { 1132 + stats->rx_packets += ring->rx_packets; 1133 + stats->rx_bytes += ring->rx_bytes; 1134 + stats->rx_errors += ring->rx_length_errors + 1135 + ring->rx_crc_errors + 1136 + ring->rx_frame_errors + 1137 + ring->rx_fifo_errors; 1138 + stats->rx_dropped += ring->rx_dropped; 1139 + } 1140 + } 1141 + memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); 1123 1142 1124 1143 return storage; 1125 1144 } ··· 1253 1234 for (i = 0; i < max_irqs; i++) { 1254 1235 ret = platform_get_irq(pdev, i); 1255 1236 if (ret <= 0) { 1237 + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1238 + max_irqs = i; 1239 + pdata->rxq_cnt = max_irqs / 2; 1240 + pdata->txq_cnt = max_irqs / 2; 1241 + pdata->cq_cnt = max_irqs / 2; 1242 + break; 1243 + } 1256 1244 dev_err(dev, "Unable to get ENET IRQ\n"); 1257 1245 ret = ret ? : -ENXIO; 1258 1246 return ret; ··· 1463 1437 pdata->port_ops = &xgene_xgport_ops; 1464 1438 pdata->cle_ops = &xgene_cle3in_ops; 1465 1439 pdata->rm = RM0; 1466 - pdata->rxq_cnt = XGENE_NUM_RX_RING; 1467 - pdata->txq_cnt = XGENE_NUM_TX_RING; 1468 - pdata->cq_cnt = XGENE_NUM_TXC_RING; 1440 + if (!pdata->rxq_cnt) { 1441 + pdata->rxq_cnt = XGENE_NUM_RX_RING; 1442 + pdata->txq_cnt = XGENE_NUM_TX_RING; 1443 + pdata->cq_cnt = XGENE_NUM_TXC_RING; 1444 + } 1469 1445 break; 1470 1446 } 1471 1447 1472 1448 if (pdata->enet_id == XGENE_ENET1) { 1473 1449 switch (pdata->port_id) { 1474 1450 case 0: 1475 - pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1476 - pdata->eth_bufnum = START_ETH_BUFNUM_0; 1477 - pdata->bp_bufnum = START_BP_BUFNUM_0; 1478 - pdata->ring_num = START_RING_NUM_0; 1451 + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { 1452 + pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; 1453 + pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; 1454 + pdata->bp_bufnum = X2_START_BP_BUFNUM_0; 1455 + pdata->ring_num = START_RING_NUM_0; 1456 + } else { 1457 + pdata->cpu_bufnum = START_CPU_BUFNUM_0; 1458 + pdata->eth_bufnum = START_ETH_BUFNUM_0; 1459 + pdata->bp_bufnum = START_BP_BUFNUM_0; 1460 + pdata->ring_num = START_RING_NUM_0; 1461 + } 1479 1462 break; 1480 1463 case 1: 1481 1464 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+14 -4
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
··· 49 49 #define XGENE_ENET_MSS 1448 50 50 #define XGENE_MIN_ENET_FRAME_SIZE 60 51 51 52 - #define XGENE_MAX_ENET_IRQ 8 53 - #define XGENE_NUM_RX_RING 4 54 - #define XGENE_NUM_TX_RING 4 55 - #define XGENE_NUM_TXC_RING 4 52 + #define XGENE_MAX_ENET_IRQ 16 53 + #define XGENE_NUM_RX_RING 8 54 + #define XGENE_NUM_TX_RING 8 55 + #define XGENE_NUM_TXC_RING 8 56 56 57 57 #define START_CPU_BUFNUM_0 0 58 58 #define START_ETH_BUFNUM_0 2 ··· 121 121 struct xgene_enet_raw_desc16 *raw_desc16; 122 122 }; 123 123 __le64 *exp_bufs; 124 + u64 tx_packets; 125 + u64 tx_bytes; 126 + u64 rx_packets; 127 + u64 rx_bytes; 128 + u64 rx_dropped; 129 + u64 rx_errors; 130 + u64 rx_length_errors; 131 + u64 rx_crc_errors; 132 + u64 rx_frame_errors; 133 + u64 rx_fifo_errors; 124 134 }; 125 135 126 136 struct xgene_mac_ops {
+1 -1
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
··· 33 33 #define LINK_STATUS BIT(2) 34 34 #define LINK_UP BIT(15) 35 35 #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) 36 - #define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc 36 + #define SG_RX_DV_GATE_REG_0_ADDR 0x05fc 37 37 38 38 extern const struct xgene_mac_ops xgene_sgmac_ops; 39 39 extern const struct xgene_port_ops xgene_sgport_ops;
+63
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 813 813 return skb; 814 814 } 815 815 816 + static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 817 + u32 *raw_cons, void *cmp) 818 + { 819 + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 820 + struct rx_cmp *rxcmp = cmp; 821 + u32 tmp_raw_cons = *raw_cons; 822 + u8 cmp_type, agg_bufs = 0; 823 + 824 + cmp_type = RX_CMP_TYPE(rxcmp); 825 + 826 + if (cmp_type == CMP_TYPE_RX_L2_CMP) { 827 + agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 828 + RX_CMP_AGG_BUFS) >> 829 + RX_CMP_AGG_BUFS_SHIFT; 830 + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 831 + struct rx_tpa_end_cmp *tpa_end = cmp; 832 + 833 + agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 834 + RX_TPA_END_CMP_AGG_BUFS) >> 835 + RX_TPA_END_CMP_AGG_BUFS_SHIFT; 836 + } 837 + 838 + if (agg_bufs) { 839 + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 840 + return -EBUSY; 841 + } 842 + *raw_cons = tmp_raw_cons; 843 + return 0; 844 + } 845 + 846 + static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 847 + { 848 + if (!rxr->bnapi->in_reset) { 849 + rxr->bnapi->in_reset = true; 850 + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 851 + schedule_work(&bp->sp_task); 852 + } 853 + rxr->rx_next_cons = 0xffff; 854 + } 855 + 816 856 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 817 857 struct rx_tpa_start_cmp *tpa_start, 818 858 struct rx_tpa_start_cmp_ext *tpa_start1) ··· 869 829 cons_rx_buf = &rxr->rx_buf_ring[cons]; 870 830 prod_rx_buf = &rxr->rx_buf_ring[prod]; 871 831 tpa_info = &rxr->rx_tpa[agg_id]; 832 + 833 + if (unlikely(cons != rxr->rx_next_cons)) { 834 + bnxt_sched_reset(bp, rxr); 835 + return; 836 + } 872 837 873 838 prod_rx_buf->data = tpa_info->data; 874 839 ··· 912 867 913 868 rxr->rx_prod = NEXT_RX(prod); 914 869 cons = NEXT_RX(cons); 870 + rxr->rx_next_cons = NEXT_RX(cons); 915 871 cons_rx_buf = &rxr->rx_buf_ring[cons]; 916 872 917 873 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); ··· 1025 979 struct bnxt_tpa_info *tpa_info; 1026 980 dma_addr_t mapping; 1027 981 struct sk_buff *skb; 982 + 983 + if (unlikely(bnapi->in_reset)) { 984 + int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 985 + 986 + if (rc < 0) 987 + return ERR_PTR(-EBUSY); 988 + return NULL; 989 + } 1028 990 1029 991 tpa_info = &rxr->rx_tpa[agg_id]; 1030 992 data = tpa_info->data; ··· 1200 1146 cons = rxcmp->rx_cmp_opaque; 1201 1147 rx_buf = &rxr->rx_buf_ring[cons]; 1202 1148 data = rx_buf->data; 1149 + if (unlikely(cons != rxr->rx_next_cons)) { 1150 + int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1151 + 1152 + bnxt_sched_reset(bp, rxr); 1153 + return rc1; 1154 + } 1203 1155 prefetch(data); 1204 1156 1205 1157 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> ··· 1305 1245 1306 1246 next_rx: 1307 1247 rxr->rx_prod = NEXT_RX(prod); 1248 + rxr->rx_next_cons = NEXT_RX(cons); 1308 1249 1309 1250 next_rx_no_prod: 1310 1251 *raw_cons = tmp_raw_cons; ··· 2547 2486 rxr->rx_prod = 0; 2548 2487 rxr->rx_agg_prod = 0; 2549 2488 rxr->rx_sw_agg_prod = 0; 2489 + rxr->rx_next_cons = 0; 2550 2490 } 2551 2491 } 2552 2492 } ··· 4524 4462 int i; 4525 4463 4526 4464 for (i = 0; i < bp->cp_nr_rings; i++) { 4465 + bp->bnapi[i]->in_reset = false; 4527 4466 bnxt_enable_poll(bp->bnapi[i]); 4528 4467 napi_enable(&bp->bnapi[i]->napi); 4529 4468 }
+2
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 584 584 u16 rx_prod; 585 585 u16 rx_agg_prod; 586 586 u16 rx_sw_agg_prod; 587 + u16 rx_next_cons; 587 588 void __iomem *rx_doorbell; 588 589 void __iomem *rx_agg_doorbell; 589 590 ··· 637 636 #ifdef CONFIG_NET_RX_BUSY_POLL 638 637 atomic_t poll_state; 639 638 #endif 639 + bool in_reset; 640 640 }; 641 641 642 642 #ifdef CONFIG_NET_RX_BUSY_POLL
+1 -1
drivers/net/ethernet/marvell/Kconfig
··· 68 68 69 69 config MVNETA_BM 70 70 tristate 71 - default y if MVNETA=y && MVNETA_BM_ENABLE 71 + default y if MVNETA=y && MVNETA_BM_ENABLE!=n 72 72 default MVNETA_BM_ENABLE 73 73 select HWBM 74 74 help
+6 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
··· 1417 1417 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; 1418 1418 struct pci_dev *pdev = adapter->pdev; 1419 1419 bool extended = false; 1420 + int ret; 1420 1421 1421 1422 prev_version = adapter->fw_version; 1422 1423 current_version = qlcnic_83xx_get_fw_version(adapter); ··· 1428 1427 if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) 1429 1428 extended = !qlcnic_83xx_extend_md_capab(adapter); 1430 1429 1431 - if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) 1432 - dev_info(&pdev->dev, "Supports FW dump capability\n"); 1430 + ret = qlcnic_fw_cmd_get_minidump_temp(adapter); 1431 + if (ret) 1432 + return; 1433 + 1434 + dev_info(&pdev->dev, "Supports FW dump capability\n"); 1433 1435 1434 1436 /* Once we have minidump template with extended iSCSI dump 1435 1437 * capability, update the minidump capture mask to 0x1f as
+1
drivers/net/xen-netback/netback.c
··· 711 711 if (cons == end) 712 712 break; 713 713 RING_COPY_REQUEST(&queue->tx, cons++, txp); 714 + extra_count = 0; /* only the first frag can have extras */ 714 715 } while (1); 715 716 queue->tx.req_cons = cons; 716 717 }
+2
net/ipv4/fib_semantics.c
··· 975 975 val = 65535 - 40; 976 976 if (type == RTAX_MTU && val > 65535 - 15) 977 977 val = 65535 - 15; 978 + if (type == RTAX_HOPLIMIT && val > 255) 979 + val = 255; 978 980 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 979 981 return -EINVAL; 980 982 fi->fib_metrics[type - 1] = val;
+2
net/ipv6/route.c
··· 1750 1750 } else { 1751 1751 val = nla_get_u32(nla); 1752 1752 } 1753 + if (type == RTAX_HOPLIMIT && val > 255) 1754 + val = 255; 1753 1755 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 1754 1756 goto err; 1755 1757
+3 -1
net/netfilter/nf_conntrack_core.c
··· 1778 1778 1779 1779 int nf_conntrack_init_net(struct net *net) 1780 1780 { 1781 + static atomic64_t unique_id; 1781 1782 int ret = -ENOMEM; 1782 1783 int cpu; 1783 1784 ··· 1801 1800 if (!net->ct.stat) 1802 1801 goto err_pcpu_lists; 1803 1802 1804 - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); 1803 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu", 1804 + (u64)atomic64_inc_return(&unique_id)); 1805 1805 if (!net->ct.slabname) 1806 1806 goto err_slabname; 1807 1807