Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vmxnet3: Remove buf_info from device accessible structures

buf_info structures in RX & TX queues are private driver data that
do not need to be visible to the device. Although there is physical
address and length in the queue descriptor that points to these
structures, their layout is not standardized, and device never looks
at them.

So lets allocate these structures in non-DMA-able memory, and fill
physical address as all-ones and length as zero in the queue
descriptor.

That should alleviate worries brought by Martin Radev in
https://lists.osuosl.org/pipermail/intel-wired-lan/Week-of-Mon-20210104/022829.html
that malicious vmxnet3 device could subvert SVM/TDX guarantees.

Signed-off-by: Petr Vandrovec <petr@vmware.com>
Signed-off-by: Ronak Doshi <doshir@vmware.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Ronak Doshi and committed by
Jakub Kicinski
de1da8bc 6c13d75b

+15 -33
+15 -31
drivers/net/vmxnet3/vmxnet3_drv.c
··· 451 451 tq->comp_ring.base, tq->comp_ring.basePA); 452 452 tq->comp_ring.base = NULL; 453 453 } 454 - if (tq->buf_info) { 455 - dma_free_coherent(&adapter->pdev->dev, 456 - tq->tx_ring.size * sizeof(tq->buf_info[0]), 457 - tq->buf_info, tq->buf_info_pa); 458 - tq->buf_info = NULL; 459 - } 454 + kfree(tq->buf_info); 455 + tq->buf_info = NULL; 460 456 } 461 457 462 458 ··· 501 505 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 502 506 struct vmxnet3_adapter *adapter) 503 507 { 504 - size_t sz; 505 - 506 508 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 507 509 tq->comp_ring.base || tq->buf_info); 508 510 ··· 528 534 goto err; 529 535 } 530 536 531 - sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 532 - tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, 533 - &tq->buf_info_pa, GFP_KERNEL); 537 + tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]), 538 + GFP_KERNEL, 539 + dev_to_node(&adapter->pdev->dev)); 534 540 if (!tq->buf_info) 535 541 goto err; 536 542 ··· 1731 1737 rq->comp_ring.base = NULL; 1732 1738 } 1733 1739 1734 - if (rq->buf_info[0]) { 1735 - size_t sz = sizeof(struct vmxnet3_rx_buf_info) * 1736 - (rq->rx_ring[0].size + rq->rx_ring[1].size); 1737 - dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1738 - rq->buf_info_pa); 1739 - rq->buf_info[0] = rq->buf_info[1] = NULL; 1740 - } 1740 + kfree(rq->buf_info[0]); 1741 + rq->buf_info[0] = NULL; 1742 + rq->buf_info[1] = NULL; 1741 1743 } 1742 1744 1743 1745 static void ··· 1873 1883 goto err; 1874 1884 } 1875 1885 1876 - sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1877 - rq->rx_ring[1].size); 1878 - bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1879 - GFP_KERNEL); 1886 + bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size, 1887 + sizeof(rq->buf_info[0][0]), GFP_KERNEL, 1888 + dev_to_node(&adapter->pdev->dev)); 1880 1889 if (!bi) 1881 1890 goto err; 1882 1891 ··· 2511 2522 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2512 2523 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2513 2524 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2514 - tqc->ddPA = cpu_to_le64(tq->buf_info_pa); 2525 + tqc->ddPA = cpu_to_le64(~0ULL); 2515 2526 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2516 2527 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2517 2528 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); 2518 2529 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2519 - tqc->ddLen = cpu_to_le32( 2520 - sizeof(struct vmxnet3_tx_buf_info) * 2521 - tqc->txRingSize); 2530 + tqc->ddLen = cpu_to_le32(0); 2522 2531 tqc->intrIdx = tq->comp_ring.intr_idx; 2523 2532 } 2524 2533 ··· 2528 2541 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2529 2542 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2530 2543 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2531 - rqc->ddPA = cpu_to_le64(rq->buf_info_pa); 2544 + rqc->ddPA = cpu_to_le64(~0ULL); 2532 2545 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2533 2546 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2534 2547 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2535 - rqc->ddLen = cpu_to_le32( 2536 - sizeof(struct vmxnet3_rx_buf_info) * 2537 - (rqc->rxRingSize[0] + 2538 - rqc->rxRingSize[1])); 2548 + rqc->ddLen = cpu_to_le32(0); 2539 2549 rqc->intrIdx = rq->comp_ring.intr_idx; 2540 2550 if (VMXNET3_VERSION_GE_3(adapter)) { 2541 2551 rqc->rxDataRingBasePA =
-2
drivers/net/vmxnet3/vmxnet3_int.h
··· 240 240 spinlock_t tx_lock; 241 241 struct vmxnet3_cmd_ring tx_ring; 242 242 struct vmxnet3_tx_buf_info *buf_info; 243 - dma_addr_t buf_info_pa; 244 243 struct vmxnet3_tx_data_ring data_ring; 245 244 struct vmxnet3_comp_ring comp_ring; 246 245 struct Vmxnet3_TxQueueCtrl *shared; ··· 297 298 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 298 299 u32 dataRingQid; /* rqID in RCD for buffer from data ring */ 299 300 struct vmxnet3_rx_buf_info *buf_info[2]; 300 - dma_addr_t buf_info_pa; 301 301 struct Vmxnet3_RxQueueCtrl *shared; 302 302 struct vmxnet3_rq_driver_stats stats; 303 303 } __attribute__((__aligned__(SMP_CACHE_BYTES)));