Merge tag 'ntb-4.6' of git://github.com/jonmason/ntb

Pull NTB bug fixes from Jon Mason:
"NTB bug fixes for tasklet from spinning forever, link errors,
translation window setup, NULL ptr dereference, and ntb-perf errors.

Also, a modification to the driver API that makes _addr functions
optional"

* tag 'ntb-4.6' of git://github.com/jonmason/ntb:
NTB: Remove _addr functions from ntb_hw_amd
NTB: Make _addr functions optional in the API
NTB: Fix incorrect clean up routine in ntb_perf
NTB: Fix incorrect return check in ntb_perf
ntb: fix possible NULL dereference
ntb: add missing setup of translation window
ntb: stop link work when we do not have memory
ntb: stop tasklet from spinning forever during shutdown.
ntb: perf test: fix address space confusion

+79 -70
-30
drivers/ntb/hw/amd/ntb_hw_amd.c
··· 357 return 0; 358 } 359 360 - static int amd_ntb_peer_db_addr(struct ntb_dev *ntb, 361 - phys_addr_t *db_addr, 362 - resource_size_t *db_size) 363 - { 364 - struct amd_ntb_dev *ndev = ntb_ndev(ntb); 365 - 366 - if (db_addr) 367 - *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET); 368 - if (db_size) 369 - *db_size = sizeof(u32); 370 - 371 - return 0; 372 - } 373 - 374 static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 375 { 376 struct amd_ntb_dev *ndev = ntb_ndev(ntb); ··· 398 offset = ndev->self_spad + (idx << 2); 399 writel(val, mmio + AMD_SPAD_OFFSET + offset); 400 401 - return 0; 402 - } 403 - 404 - static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, 405 - phys_addr_t *spad_addr) 406 - { 407 - struct amd_ntb_dev *ndev = ntb_ndev(ntb); 408 - 409 - if (idx < 0 || idx >= ndev->spad_count) 410 - return -EINVAL; 411 - 412 - if (spad_addr) 413 - *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET + 414 - ndev->peer_spad + (idx << 2)); 415 return 0; 416 } 417 ··· 444 .db_clear = amd_ntb_db_clear, 445 .db_set_mask = amd_ntb_db_set_mask, 446 .db_clear_mask = amd_ntb_db_clear_mask, 447 - .peer_db_addr = amd_ntb_peer_db_addr, 448 .peer_db_set = amd_ntb_peer_db_set, 449 .spad_count = amd_ntb_spad_count, 450 .spad_read = amd_ntb_spad_read, 451 .spad_write = amd_ntb_spad_write, 452 - .peer_spad_addr = amd_ntb_peer_spad_addr, 453 .peer_spad_read = amd_ntb_peer_spad_read, 454 .peer_spad_write = amd_ntb_peer_spad_write, 455 };
··· 357 return 0; 358 } 359 360 static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 361 { 362 struct amd_ntb_dev *ndev = ntb_ndev(ntb); ··· 412 offset = ndev->self_spad + (idx << 2); 413 writel(val, mmio + AMD_SPAD_OFFSET + offset); 414 415 return 0; 416 } 417 ··· 472 .db_clear = amd_ntb_db_clear, 473 .db_set_mask = amd_ntb_db_set_mask, 474 .db_clear_mask = amd_ntb_db_clear_mask, 475 .peer_db_set = amd_ntb_peer_db_set, 476 .spad_count = amd_ntb_spad_count, 477 .spad_read = amd_ntb_spad_read, 478 .spad_write = amd_ntb_spad_write, 479 .peer_spad_read = amd_ntb_peer_spad_read, 480 .peer_spad_write = amd_ntb_peer_spad_write, 481 };
+24 -7
drivers/ntb/ntb_transport.c
··· 124 125 bool client_ready; 126 bool link_is_up; 127 128 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 129 u64 qp_bit; ··· 720 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 721 { 722 qp->link_is_up = false; 723 724 qp->tx_index = 0; 725 qp->rx_index = 0; ··· 829 struct pci_dev *pdev = ndev->pdev; 830 resource_size_t size; 831 u32 val; 832 - int rc, i, spad; 833 834 /* send the local info, in the opposite order of the way we read it */ 835 for (i = 0; i < nt->mw_count; i++) { ··· 899 out1: 900 for (i = 0; i < nt->mw_count; i++) 901 ntb_free_mw(nt, i); 902 out: 903 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 904 schedule_delayed_work(&nt->link_work, ··· 935 if (val & BIT(qp->qp_num)) { 936 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 937 qp->link_is_up = true; 938 939 if (qp->event_handler) 940 qp->event_handler(qp->cb_data, qp->link_is_up); 941 942 - tasklet_schedule(&qp->rxc_db_work); 943 } else if (nt->link_is_up) 944 schedule_delayed_work(&qp->link_work, 945 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); ··· 1422 1423 if (i == qp->rx_max_entry) { 1424 /* there is more work to do */ 1425 - tasklet_schedule(&qp->rxc_db_work); 1426 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1427 /* the doorbell bit is set: clear it */ 1428 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); ··· 1434 * ntb_process_rxc and clearing the doorbell bit: 1435 * there might be some more work to do. 1436 */ 1437 - tasklet_schedule(&qp->rxc_db_work); 1438 } 1439 } 1440 ··· 1773 1774 pdev = qp->ndev->pdev; 1775 1776 if (qp->tx_dma_chan) { 1777 struct dma_chan *chan = qp->tx_dma_chan; 1778 /* Putting the dma_chan to NULL will force any new traffic to be ··· 1808 qp_bit = BIT_ULL(qp->qp_num); 1809 1810 ntb_db_set_mask(qp->ndev, qp_bit); 1811 - tasklet_disable(&qp->rxc_db_work); 1812 1813 cancel_delayed_work_sync(&qp->link_work); 1814 ··· 1901 1902 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1903 1904 - tasklet_schedule(&qp->rxc_db_work); 1905 1906 return 0; 1907 } ··· 2085 qp_num = __ffs(db_bits); 2086 qp = &nt->qp_vec[qp_num]; 2087 2088 - tasklet_schedule(&qp->rxc_db_work); 2089 2090 db_bits &= ~BIT_ULL(qp_num); 2091 }
··· 124 125 bool client_ready; 126 bool link_is_up; 127 + bool active; 128 129 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 130 u64 qp_bit; ··· 719 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 720 { 721 qp->link_is_up = false; 722 + qp->active = false; 723 724 qp->tx_index = 0; 725 qp->rx_index = 0; ··· 827 struct pci_dev *pdev = ndev->pdev; 828 resource_size_t size; 829 u32 val; 830 + int rc = 0, i, spad; 831 832 /* send the local info, in the opposite order of the way we read it */ 833 for (i = 0; i < nt->mw_count; i++) { ··· 897 out1: 898 for (i = 0; i < nt->mw_count; i++) 899 ntb_free_mw(nt, i); 900 + 901 + /* if there's an actual failure, we should just bail */ 902 + if (rc < 0) { 903 + ntb_link_disable(ndev); 904 + return; 905 + } 906 + 907 out: 908 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 909 schedule_delayed_work(&nt->link_work, ··· 926 if (val & BIT(qp->qp_num)) { 927 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 928 qp->link_is_up = true; 929 + qp->active = true; 930 931 if (qp->event_handler) 932 qp->event_handler(qp->cb_data, qp->link_is_up); 933 934 + if (qp->active) 935 + tasklet_schedule(&qp->rxc_db_work); 936 } else if (nt->link_is_up) 937 schedule_delayed_work(&qp->link_work, 938 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); ··· 1411 1412 if (i == qp->rx_max_entry) { 1413 /* there is more work to do */ 1414 + if (qp->active) 1415 + tasklet_schedule(&qp->rxc_db_work); 1416 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1417 /* the doorbell bit is set: clear it */ 1418 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); ··· 1422 * ntb_process_rxc and clearing the doorbell bit: 1423 * there might be some more work to do. 1424 */ 1425 + if (qp->active) 1426 + tasklet_schedule(&qp->rxc_db_work); 1427 } 1428 } 1429 ··· 1760 1761 pdev = qp->ndev->pdev; 1762 1763 + qp->active = false; 1764 + 1765 if (qp->tx_dma_chan) { 1766 struct dma_chan *chan = qp->tx_dma_chan; 1767 /* Putting the dma_chan to NULL will force any new traffic to be ··· 1793 qp_bit = BIT_ULL(qp->qp_num); 1794 1795 ntb_db_set_mask(qp->ndev, qp_bit); 1796 + tasklet_kill(&qp->rxc_db_work); 1797 1798 cancel_delayed_work_sync(&qp->link_work); 1799 ··· 1886 1887 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1888 1889 + if (qp->active) 1890 + tasklet_schedule(&qp->rxc_db_work); 1891 1892 return 0; 1893 } ··· 2069 qp_num = __ffs(db_bits); 2070 qp = &nt->qp_vec[qp_num]; 2071 2072 + if (qp->active) 2073 + tasklet_schedule(&qp->rxc_db_work); 2074 2075 db_bits &= ~BIT_ULL(qp_num); 2076 }
+47 -31
drivers/ntb/test/ntb_perf.c
··· 178 atomic_dec(&pctx->dma_sync); 179 } 180 181 - static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, 182 char *src, size_t size) 183 { 184 struct perf_ctx *perf = pctx->perf; ··· 189 dma_cookie_t cookie; 190 size_t src_off, dst_off; 191 struct perf_mw *mw = &perf->mw; 192 - u64 vbase, dst_vaddr; 193 dma_addr_t dst_phys; 194 int retries = 0; 195 ··· 205 } 206 207 device = chan->device; 208 - src_off = (size_t)src & ~PAGE_MASK; 209 - dst_off = (size_t)dst & ~PAGE_MASK; 210 211 if (!is_dma_copy_aligned(device, src_off, dst_off, size)) 212 return -ENODEV; 213 214 - vbase = (u64)(u64 *)mw->vbase; 215 - dst_vaddr = (u64)(u64 *)dst; 216 dst_phys = mw->phys_addr + (dst_vaddr - vbase); 217 218 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); ··· 262 return 0; 263 } 264 265 - static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src, 266 u64 buf_size, u64 win_size, u64 total) 267 { 268 int chunks, total_chunks, i; 269 int copied_chunks = 0; 270 u64 copied = 0, result; 271 - char *tmp = dst; 272 u64 perf, diff_us; 273 ktime_t kstart, kstop, kdiff; 274 ··· 325 struct perf_ctx *perf = pctx->perf; 326 struct pci_dev *pdev = perf->ntb->pdev; 327 struct perf_mw *mw = &perf->mw; 328 - char *dst; 329 u64 win_size, buf_size, total; 330 void *src; 331 int rc, node, i; ··· 365 if (buf_size > MAX_TEST_SIZE) 366 buf_size = MAX_TEST_SIZE; 367 368 - dst = (char *)mw->vbase; 369 370 atomic_inc(&perf->tsync); 371 while (atomic_read(&perf->tsync) != perf->perf_threads) ··· 425 { 426 struct perf_mw *mw = &perf->mw; 427 size_t xlat_size, buf_size; 428 429 if (!size) 430 return -EINVAL; ··· 447 if (!mw->virt_addr) { 448 mw->xlat_size = 0; 449 mw->buf_size = 0; 450 } 451 452 return 0; ··· 550 return 0; 551 552 buf = kmalloc(64, GFP_KERNEL); 553 out_offset = snprintf(buf, 64, "%d\n", perf->run); 554 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 555 kfree(buf); 556 557 return ret; 558 } 559 560 static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, ··· 589 if (atomic_read(&perf->tsync) == 0) 590 perf->run = false; 591 592 - if (perf->run) { 593 - /* lets stop the threads */ 594 - perf->run = false; 595 - for (i = 0; i < MAX_THREADS; i++) { 596 - if (perf->pthr_ctx[i].thread) { 597 - kthread_stop(perf->pthr_ctx[i].thread); 598 - perf->pthr_ctx[i].thread = NULL; 599 - } else 600 - break; 601 - } 602 - } else { 603 perf->run = true; 604 605 if (perf->perf_threads > MAX_THREADS) { ··· 622 kthread_create_on_node(ntb_perf_thread, 623 (void *)pctx, 624 node, "ntb_perf %d", i); 625 - if (pctx->thread) 626 wake_up_process(pctx->thread); 627 - else { 628 - perf->run = false; 629 - for (i = 0; i < MAX_THREADS; i++) { 630 - if (pctx->thread) { 631 - kthread_stop(pctx->thread); 632 - pctx->thread = NULL; 633 - } 634 - } 635 - } 636 637 if (perf->run == false) 638 return -ENXIO; ··· 635 } 636 637 return count; 638 } 639 640 static const struct file_operations ntb_perf_debugfs_run = {
··· 178 atomic_dec(&pctx->dma_sync); 179 } 180 181 + static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst, 182 char *src, size_t size) 183 { 184 struct perf_ctx *perf = pctx->perf; ··· 189 dma_cookie_t cookie; 190 size_t src_off, dst_off; 191 struct perf_mw *mw = &perf->mw; 192 + void __iomem *vbase; 193 + void __iomem *dst_vaddr; 194 dma_addr_t dst_phys; 195 int retries = 0; 196 ··· 204 } 205 206 device = chan->device; 207 + src_off = (uintptr_t)src & ~PAGE_MASK; 208 + dst_off = (uintptr_t __force)dst & ~PAGE_MASK; 209 210 if (!is_dma_copy_aligned(device, src_off, dst_off, size)) 211 return -ENODEV; 212 213 + vbase = mw->vbase; 214 + dst_vaddr = dst; 215 dst_phys = mw->phys_addr + (dst_vaddr - vbase); 216 217 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); ··· 261 return 0; 262 } 263 264 + static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, 265 u64 buf_size, u64 win_size, u64 total) 266 { 267 int chunks, total_chunks, i; 268 int copied_chunks = 0; 269 u64 copied = 0, result; 270 + char __iomem *tmp = dst; 271 u64 perf, diff_us; 272 ktime_t kstart, kstop, kdiff; 273 ··· 324 struct perf_ctx *perf = pctx->perf; 325 struct pci_dev *pdev = perf->ntb->pdev; 326 struct perf_mw *mw = &perf->mw; 327 + char __iomem *dst; 328 u64 win_size, buf_size, total; 329 void *src; 330 int rc, node, i; ··· 364 if (buf_size > MAX_TEST_SIZE) 365 buf_size = MAX_TEST_SIZE; 366 367 + dst = (char __iomem *)mw->vbase; 368 369 atomic_inc(&perf->tsync); 370 while (atomic_read(&perf->tsync) != perf->perf_threads) ··· 424 { 425 struct perf_mw *mw = &perf->mw; 426 size_t xlat_size, buf_size; 427 + int rc; 428 429 if (!size) 430 return -EINVAL; ··· 445 if (!mw->virt_addr) { 446 mw->xlat_size = 0; 447 mw->buf_size = 0; 448 + } 449 + 450 + rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size); 451 + if (rc) { 452 + dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n"); 453 + perf_free_mw(perf); 454 + return -EIO; 455 } 456 457 return 0; ··· 541 return 0; 542 543 buf = kmalloc(64, GFP_KERNEL); 544 + if (!buf) 545 + return -ENOMEM; 546 out_offset = snprintf(buf, 64, "%d\n", perf->run); 547 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 548 kfree(buf); 549 550 return ret; 551 + } 552 + 553 + static void threads_cleanup(struct perf_ctx *perf) 554 + { 555 + struct pthr_ctx *pctx; 556 + int i; 557 + 558 + perf->run = false; 559 + for (i = 0; i < MAX_THREADS; i++) { 560 + pctx = &perf->pthr_ctx[i]; 561 + if (pctx->thread) { 562 + kthread_stop(pctx->thread); 563 + pctx->thread = NULL; 564 + } 565 + } 566 } 567 568 static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, ··· 563 if (atomic_read(&perf->tsync) == 0) 564 perf->run = false; 565 566 + if (perf->run) 567 + threads_cleanup(perf); 568 + else { 569 perf->run = true; 570 571 if (perf->perf_threads > MAX_THREADS) { ··· 604 kthread_create_on_node(ntb_perf_thread, 605 (void *)pctx, 606 node, "ntb_perf %d", i); 607 + if (IS_ERR(pctx->thread)) { 608 + pctx->thread = NULL; 609 + goto err; 610 + } else 611 wake_up_process(pctx->thread); 612 613 if (perf->run == false) 614 return -ENXIO; ··· 623 } 624 625 return count; 626 + 627 + err: 628 + threads_cleanup(perf); 629 + return -ENXIO; 630 } 631 632 static const struct file_operations ntb_perf_debugfs_run = {
+8 -2
include/linux/ntb.h
··· 284 /* ops->db_read_mask && */ 285 ops->db_set_mask && 286 ops->db_clear_mask && 287 - ops->peer_db_addr && 288 /* ops->peer_db_read && */ 289 ops->peer_db_set && 290 /* ops->peer_db_clear && */ ··· 295 ops->spad_count && 296 ops->spad_read && 297 ops->spad_write && 298 - ops->peer_spad_addr && 299 /* ops->peer_spad_read && */ 300 ops->peer_spad_write && 301 1; ··· 757 phys_addr_t *db_addr, 758 resource_size_t *db_size) 759 { 760 return ntb->ops->peer_db_addr(ntb, db_addr, db_size); 761 } 762 ··· 951 static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, 952 phys_addr_t *spad_addr) 953 { 954 return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); 955 } 956
··· 284 /* ops->db_read_mask && */ 285 ops->db_set_mask && 286 ops->db_clear_mask && 287 + /* ops->peer_db_addr && */ 288 /* ops->peer_db_read && */ 289 ops->peer_db_set && 290 /* ops->peer_db_clear && */ ··· 295 ops->spad_count && 296 ops->spad_read && 297 ops->spad_write && 298 + /* ops->peer_spad_addr && */ 299 /* ops->peer_spad_read && */ 300 ops->peer_spad_write && 301 1; ··· 757 phys_addr_t *db_addr, 758 resource_size_t *db_size) 759 { 760 + if (!ntb->ops->peer_db_addr) 761 + return -EINVAL; 762 + 763 return ntb->ops->peer_db_addr(ntb, db_addr, db_size); 764 } 765 ··· 948 static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, 949 phys_addr_t *spad_addr) 950 { 951 + if (!ntb->ops->peer_spad_addr) 952 + return -EINVAL; 953 + 954 return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); 955 } 956