Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Fix world-writable child interface control sysfs attributes
IB/qib: Clean up properly if qib_init() fails
IB/qib: Completion queue callback needs to be single threaded
IB/qib: Update 7322 serdes tables
IB/qib: Clear 6120 hardware error register
IB/qib: Clear eager buffer memory for each new process
IB/qib: Mask hardware error during link reset
IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem
RDMA/cxgb4: Derive smac_idx from port viid
RDMA/cxgb4: Avoid false GTS CIDX_INC overflows
RDMA/cxgb4: Don't call abort_connection() for active connect failures
RDMA/cxgb4: Use the DMA state API instead of the pci equivalents

+148 -66
+7 -5
drivers/infiniband/hw/cxgb4/cm.c
··· 969 969 goto err; 970 970 goto out; 971 971 err: 972 - abort_connection(ep, skb, GFP_KERNEL); 972 + state_set(&ep->com, ABORTING); 973 + send_abort(ep, skb, GFP_KERNEL); 973 974 out: 974 975 connect_reply_upcall(ep, err); 975 976 return; ··· 1373 1372 pdev, 0); 1374 1373 mtu = pdev->mtu; 1375 1374 tx_chan = cxgb4_port_chan(pdev); 1376 - smac_idx = tx_chan << 1; 1375 + smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1377 1376 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1378 1377 txq_idx = cxgb4_port_idx(pdev) * step; 1379 1378 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; ··· 1384 1383 dst->neighbour->dev, 0); 1385 1384 mtu = dst_mtu(dst); 1386 1385 tx_chan = cxgb4_port_chan(dst->neighbour->dev); 1387 - smac_idx = tx_chan << 1; 1386 + smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; 1388 1387 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1389 1388 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; 1390 1389 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; ··· 1951 1950 pdev, 0); 1952 1951 ep->mtu = pdev->mtu; 1953 1952 ep->tx_chan = cxgb4_port_chan(pdev); 1954 - ep->smac_idx = ep->tx_chan << 1; 1953 + ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1955 1954 step = ep->com.dev->rdev.lldi.ntxq / 1956 1955 ep->com.dev->rdev.lldi.nchan; 1957 1956 ep->txq_idx = cxgb4_port_idx(pdev) * step; ··· 1966 1965 ep->dst->neighbour->dev, 0); 1967 1966 ep->mtu = dst_mtu(ep->dst); 1968 1967 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); 1969 - ep->smac_idx = ep->tx_chan << 1; 1968 + ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & 1969 + 0x7F) << 1; 1970 1970 step = ep->com.dev->rdev.lldi.ntxq / 1971 1971 ep->com.dev->rdev.lldi.nchan; 1972 1972 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+23 -8
drivers/infiniband/hw/cxgb4/cq.c
··· 77 77 kfree(cq->sw_queue); 78 78 dma_free_coherent(&(rdev->lldi.pdev->dev), 79 79 cq->memsize, cq->queue, 80 - pci_unmap_addr(cq, mapping)); 80 + dma_unmap_addr(cq, mapping)); 81 81 c4iw_put_cqid(rdev, cq->cqid, uctx); 82 82 return ret; 83 83 } ··· 112 112 ret = -ENOMEM; 113 113 goto err3; 114 114 } 115 - pci_unmap_addr_set(cq, mapping, cq->dma_addr); 115 + dma_unmap_addr_set(cq, mapping, cq->dma_addr); 116 116 memset(cq->queue, 0, cq->memsize); 117 117 118 118 /* build fw_ri_res_wr */ ··· 179 179 return 0; 180 180 err4: 181 181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, 182 - pci_unmap_addr(cq, mapping)); 182 + dma_unmap_addr(cq, mapping)); 183 183 err3: 184 184 kfree(cq->sw_queue); 185 185 err2: ··· 764 764 struct c4iw_create_cq_resp uresp; 765 765 struct c4iw_ucontext *ucontext = NULL; 766 766 int ret; 767 - size_t memsize; 767 + size_t memsize, hwentries; 768 768 struct c4iw_mm_entry *mm, *mm2; 769 769 770 770 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); ··· 788 788 * entries must be multiple of 16 for HW. 789 789 */ 790 790 entries = roundup(entries, 16); 791 - memsize = entries * sizeof *chp->cq.queue; 791 + 792 + /* 793 + * Make actual HW queue 2x to avoid cdix_inc overflows. 794 + */ 795 + hwentries = entries * 2; 796 + 797 + /* 798 + * Make HW queue at least 64 entries so GTS updates aren't too 799 + * frequent. 800 + */ 801 + if (hwentries < 64) 802 + hwentries = 64; 803 + 804 + memsize = hwentries * sizeof *chp->cq.queue; 792 805 793 806 /* 794 807 * memsize must be a multiple of the page size if its a user cq. 795 808 */ 796 - if (ucontext) 809 + if (ucontext) { 797 810 memsize = roundup(memsize, PAGE_SIZE); 798 - chp->cq.size = entries; 811 + hwentries = memsize / sizeof *chp->cq.queue; 812 + } 813 + chp->cq.size = hwentries; 799 814 chp->cq.memsize = memsize; 800 815 801 816 ret = create_cq(&rhp->rdev, &chp->cq, ··· 820 805 821 806 chp->rhp = rhp; 822 807 chp->cq.size--; /* status page */ 823 - chp->ibcq.cqe = chp->cq.size - 1; 808 + chp->ibcq.cqe = entries - 2; 824 809 spin_lock_init(&chp->lock); 825 810 atomic_set(&chp->refcnt, 1); 826 811 init_waitqueue_head(&chp->wait);
+1 -1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 261 261 262 262 struct c4iw_fr_page_list { 263 263 struct ib_fast_reg_page_list ibpl; 264 - DECLARE_PCI_UNMAP_ADDR(mapping); 264 + DEFINE_DMA_UNMAP_ADDR(mapping); 265 265 dma_addr_t dma_addr; 266 266 struct c4iw_dev *dev; 267 267 int size;
+2 -2
drivers/infiniband/hw/cxgb4/mem.c
··· 764 764 if (!c4pl) 765 765 return ERR_PTR(-ENOMEM); 766 766 767 - pci_unmap_addr_set(c4pl, mapping, dma_addr); 767 + dma_unmap_addr_set(c4pl, mapping, dma_addr); 768 768 c4pl->dma_addr = dma_addr; 769 769 c4pl->dev = dev; 770 770 c4pl->size = size; ··· 779 779 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 780 780 781 781 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 782 - c4pl, pci_unmap_addr(c4pl, mapping)); 782 + c4pl, dma_unmap_addr(c4pl, mapping)); 783 783 } 784 784 785 785 int c4iw_dereg_mr(struct ib_mr *ib_mr)
+6 -6
drivers/infiniband/hw/cxgb4/qp.c
··· 40 40 */ 41 41 dma_free_coherent(&(rdev->lldi.pdev->dev), 42 42 wq->rq.memsize, wq->rq.queue, 43 - pci_unmap_addr(&wq->rq, mapping)); 43 + dma_unmap_addr(&wq->rq, mapping)); 44 44 dma_free_coherent(&(rdev->lldi.pdev->dev), 45 45 wq->sq.memsize, wq->sq.queue, 46 - pci_unmap_addr(&wq->sq, mapping)); 46 + dma_unmap_addr(&wq->sq, mapping)); 47 47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 48 48 kfree(wq->rq.sw_rq); 49 49 kfree(wq->sq.sw_sq); ··· 99 99 if (!wq->sq.queue) 100 100 goto err5; 101 101 memset(wq->sq.queue, 0, wq->sq.memsize); 102 - pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 102 + dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 103 103 104 104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 105 105 wq->rq.memsize, &(wq->rq.dma_addr), ··· 112 112 wq->rq.queue, 113 113 (unsigned long long)virt_to_phys(wq->rq.queue)); 114 114 memset(wq->rq.queue, 0, wq->rq.memsize); 115 - pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 115 + dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 116 116 117 117 wq->db = rdev->lldi.db_reg; 118 118 wq->gts = rdev->lldi.gts_reg; ··· 217 217 err7: 218 218 dma_free_coherent(&(rdev->lldi.pdev->dev), 219 219 wq->rq.memsize, wq->rq.queue, 220 - pci_unmap_addr(&wq->rq, mapping)); 220 + dma_unmap_addr(&wq->rq, mapping)); 221 221 err6: 222 222 dma_free_coherent(&(rdev->lldi.pdev->dev), 223 223 wq->sq.memsize, wq->sq.queue, 224 - pci_unmap_addr(&wq->sq, mapping)); 224 + dma_unmap_addr(&wq->sq, mapping)); 225 225 err5: 226 226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 227 227 err4:
+3 -3
drivers/infiniband/hw/cxgb4/t4.h
··· 279 279 struct t4_sq { 280 280 union t4_wr *queue; 281 281 dma_addr_t dma_addr; 282 - DECLARE_PCI_UNMAP_ADDR(mapping); 282 + DEFINE_DMA_UNMAP_ADDR(mapping); 283 283 struct t4_swsqe *sw_sq; 284 284 struct t4_swsqe *oldest_read; 285 285 u64 udb; ··· 298 298 struct t4_rq { 299 299 union t4_recv_wr *queue; 300 300 dma_addr_t dma_addr; 301 - DECLARE_PCI_UNMAP_ADDR(mapping); 301 + DEFINE_DMA_UNMAP_ADDR(mapping); 302 302 struct t4_swrqe *sw_rq; 303 303 u64 udb; 304 304 size_t memsize; ··· 429 429 struct t4_cq { 430 430 struct t4_cqe *queue; 431 431 dma_addr_t dma_addr; 432 - DECLARE_PCI_UNMAP_ADDR(mapping); 432 + DEFINE_DMA_UNMAP_ADDR(mapping); 433 433 struct t4_cqe *sw_queue; 434 434 void __iomem *gts; 435 435 struct c4iw_rdev *rdev;
+1
drivers/infiniband/hw/qib/qib.h
··· 686 686 void __iomem *piobase; 687 687 /* mem-mapped pointer to base of user chip regs (if using WC PAT) */ 688 688 u64 __iomem *userbase; 689 + void __iomem *piovl15base; /* base of VL15 buffers, if not WC */ 689 690 /* 690 691 * points to area where PIOavail registers will be DMA'ed. 691 692 * Has to be on a page of it's own, because the page will be
+24 -24
drivers/infiniband/hw/qib/qib_7322_regs.h
··· 742 742 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF 743 743 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF 744 744 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 745 - #define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE 746 - #define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE 747 - #define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1 745 + #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE 746 + #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE 747 + #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1 748 748 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD 749 749 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD 750 750 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 751 - #define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC 752 - #define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC 753 - #define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1 751 + #define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC 752 + #define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC 753 + #define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1 754 754 #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB 755 755 #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB 756 756 #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 ··· 796 796 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF 797 797 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF 798 798 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 799 - #define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE 800 - #define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE 801 - #define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1 799 + #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE 800 + #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE 801 + #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1 802 802 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD 803 803 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD 804 804 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 805 - #define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC 806 - #define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC 807 - #define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1 805 + #define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC 806 + #define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC 807 + #define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1 808 808 #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB 809 809 #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB 810 810 #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 ··· 850 850 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF 851 851 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF 852 852 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 853 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE 854 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE 855 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1 853 + #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE 854 + #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE 855 + #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1 856 856 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD 857 857 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD 858 858 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 859 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC 860 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC 861 - #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1 859 + #define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC 860 + #define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC 861 + #define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1 862 862 #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB 863 863 #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB 864 864 #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 ··· 880 880 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF 881 881 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF 882 882 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 883 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE 884 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE 885 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1 883 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE 884 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE 885 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1 886 886 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD 887 887 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD 888 888 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 889 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC 890 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC 891 - #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1 889 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC 890 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC 891 + #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1 892 892 893 893 #define QIB_7322_EXTStatus_OFFS 0xC0 894 894 #define QIB_7322_EXTStatus_DEF 0x000000000000X000
+15 -4
drivers/infiniband/hw/qib/qib_diag.c
··· 233 233 u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; 234 234 u32 __iomem *map = NULL; 235 235 u32 cnt = 0; 236 + u32 tot4k, offs4k; 236 237 237 238 /* First, simplest case, offset is within the first map. */ 238 239 kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); ··· 251 250 if (dd->userbase) { 252 251 /* If user regs mapped, they are after send, so set limit. */ 253 252 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 254 - snd_lim = dd->uregbase; 253 + if (!dd->piovl15base) 254 + snd_lim = dd->uregbase; 255 255 krb32 = (u32 __iomem *)dd->userbase; 256 256 if (offset >= dd->uregbase && offset < ulim) { 257 257 map = krb32 + (offset - dd->uregbase) / sizeof(u32); ··· 279 277 /* If 4k buffers exist, account for them by bumping 280 278 * appropriate limit. 281 279 */ 280 + tot4k = dd->piobcnt4k * dd->align4k; 281 + offs4k = dd->piobufbase >> 32; 282 282 if (dd->piobcnt4k) { 283 - u32 tot4k = dd->piobcnt4k * dd->align4k; 284 - u32 offs4k = dd->piobufbase >> 32; 285 283 if (snd_bottom > offs4k) 286 284 snd_bottom = offs4k; 287 285 else { 288 286 /* 4k above 2k. Bump snd_lim, if needed*/ 289 - if (!dd->userbase) 287 + if (!dd->userbase || dd->piovl15base) 290 288 snd_lim = offs4k + tot4k; 291 289 } 292 290 } ··· 298 296 offset -= snd_bottom; 299 297 map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); 300 298 cnt = snd_lim - offset; 299 + } 300 + 301 + if (!map && offs4k && dd->piovl15base) { 302 + snd_lim = offs4k + tot4k + 2 * dd->align4k; 303 + if (offset >= (offs4k + tot4k) && offset < snd_lim) { 304 + map = (u32 __iomem *)dd->piovl15base + 305 + ((offset - (offs4k + tot4k)) / sizeof(u32)); 306 + cnt = snd_lim - offset; 307 + } 301 308 } 302 309 303 310 mapped:
+1 -2
drivers/infiniband/hw/qib/qib_iba6120.c
··· 1355 1355 hwstat = qib_read_kreg64(dd, kr_hwerrstatus); 1356 1356 if (hwstat) { 1357 1357 /* should just have PLL, clear all set, in an case */ 1358 - if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED) 1359 - qib_write_kreg(dd, kr_hwerrclear, hwstat); 1358 + qib_write_kreg(dd, kr_hwerrclear, hwstat); 1360 1359 qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); 1361 1360 } 1362 1361
+36 -7
drivers/infiniband/hw/qib/qib_iba7322.c
··· 543 543 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); 544 544 545 545 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ 546 - #define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ 546 + #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ 547 547 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ 548 548 549 549 #define H1_FORCE_VAL 8 ··· 1100 1100 HWE_AUTO_P(SDmaMemReadErr, 1), 1101 1101 HWE_AUTO_P(SDmaMemReadErr, 0), 1102 1102 HWE_AUTO_P(IBCBusFromSPCParityErr, 1), 1103 + HWE_AUTO_P(IBCBusToSPCParityErr, 1), 1103 1104 HWE_AUTO_P(IBCBusFromSPCParityErr, 0), 1104 - HWE_AUTO_P(statusValidNoEop, 1), 1105 - HWE_AUTO_P(statusValidNoEop, 0), 1105 + HWE_AUTO(statusValidNoEop), 1106 1106 HWE_AUTO(LATriggered), 1107 1107 { .mask = 0 } 1108 1108 }; ··· 4763 4763 SYM_MASK(IBPCSConfig_0, tx_rx_reset); 4764 4764 4765 4765 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); 4766 + qib_write_kreg(dd, kr_hwerrmask, 4767 + dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); 4766 4768 qib_write_kreg_port(ppd, krp_ibcctrl_a, 4767 4769 ppd->cpspec->ibcctrl_a & 4768 4770 ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); ··· 4774 4772 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); 4775 4773 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); 4776 4774 qib_write_kreg(dd, kr_scratch, 0ULL); 4775 + qib_write_kreg(dd, kr_hwerrclear, 4776 + SYM_MASK(HwErrClear, statusValidNoEopClear)); 4777 + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); 4777 4778 } 4778 4779 4779 4780 /* ··· 5629 5624 if (ppd->port != port || !ppd->link_speed_supported) 5630 5625 continue; 5631 5626 ppd->cpspec->no_eep = val; 5627 + if (seth1) 5628 + ppd->cpspec->h1_val = h1; 5632 5629 /* now change the IBC and serdes, overriding generic */ 5633 5630 init_txdds_table(ppd, 1); 5634 5631 any++; ··· 6071 6064 * the "cable info" setup here. Can be overridden 6072 6065 * in adapter-specific routines. 6073 6066 */ 6074 - if (!(ppd->dd->flags & QIB_HAS_QSFP)) { 6075 - if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) 6076 - qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " 6067 + if (!(dd->flags & QIB_HAS_QSFP)) { 6068 + if (!IS_QMH(dd) && !IS_QME(dd)) 6069 + qib_devinfo(dd->pcidev, "IB%u:%u: " 6077 6070 "Unknown mezzanine card type\n", 6078 6071 dd->unit, ppd->port); 6079 6072 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; ··· 6126 6119 qib_set_ctxtcnt(dd); 6127 6120 6128 6121 if (qib_wc_pat) { 6129 - ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k); 6122 + resource_size_t vl15off; 6123 + /* 6124 + * We do not set WC on the VL15 buffers to avoid 6125 + * a rare problem with unaligned writes from 6126 + * interrupt-flushed store buffers, so we need 6127 + * to map those separately here. We can't solve 6128 + * this for the rarely used mtrr case. 6129 + */ 6130 + ret = init_chip_wc_pat(dd, 0); 6130 6131 if (ret) 6132 + goto bail; 6133 + 6134 + /* vl15 buffers start just after the 4k buffers */ 6135 + vl15off = dd->physaddr + (dd->piobufbase >> 32) + 6136 + dd->piobcnt4k * dd->align4k; 6137 + dd->piovl15base = ioremap_nocache(vl15off, 6138 + NUM_VL15_BUFS * dd->align4k); 6139 + if (!dd->piovl15base) 6131 6140 goto bail; 6132 6141 } 6133 6142 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ ··· 6955 6932 { 0, 0, 0, 11 }, /* QME7342 backplane settings */ 6956 6933 { 0, 0, 0, 11 }, /* QME7342 backplane settings */ 6957 6934 { 0, 0, 0, 11 }, /* QME7342 backplane settings */ 6935 + { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ 6936 + { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ 6958 6937 }; 6959 6938 6960 6939 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { ··· 6972 6947 { 0, 0, 0, 13 }, /* QME7342 backplane settings */ 6973 6948 { 0, 0, 0, 13 }, /* QME7342 backplane settings */ 6974 6949 { 0, 0, 0, 13 }, /* QME7342 backplane settings */ 6950 + { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ 6951 + { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ 6975 6952 }; 6976 6953 6977 6954 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { ··· 6989 6962 { 0, 1, 12, 6 }, /* QME7342 backplane setting */ 6990 6963 { 0, 1, 12, 7 }, /* QME7342 backplane setting */ 6991 6964 { 0, 1, 12, 8 }, /* QME7342 backplane setting */ 6965 + { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ 6966 + { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ 6992 6967 }; 6993 6968 6994 6969 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
+20 -1
drivers/infiniband/hw/qib/qib_init.c
··· 1059 1059 goto bail_dev; 1060 1060 } 1061 1061 1062 - qib_cq_wq = create_workqueue("qib_cq"); 1062 + qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1063 1063 if (!qib_cq_wq) { 1064 1064 ret = -ENOMEM; 1065 1065 goto bail_wq; ··· 1289 1289 1290 1290 if (qib_mini_init || initfail || ret) { 1291 1291 qib_stop_timers(dd); 1292 + flush_scheduled_work(); 1292 1293 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1293 1294 dd->f_quiet_serdes(dd->pport + pidx); 1295 + if (qib_mini_init) 1296 + goto bail; 1297 + if (!j) { 1298 + (void) qibfs_remove(dd); 1299 + qib_device_remove(dd); 1300 + } 1301 + if (!ret) 1302 + qib_unregister_ib_device(dd); 1303 + qib_postinit_cleanup(dd); 1294 1304 if (initfail) 1295 1305 ret = initfail; 1296 1306 goto bail; ··· 1482 1472 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; 1483 1473 unsigned i; 1484 1474 1475 + /* clear for security and sanity on each use */ 1476 + memset(rcd->rcvegrbuf[chunk], 0, size); 1477 + 1485 1478 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 1486 1479 dd->f_put_tid(dd, e + egroff + 1487 1480 (u64 __iomem *) ··· 1512 1499 return -ENOMEM; 1513 1500 } 1514 1501 1502 + /* 1503 + * Note: Changes to this routine should be mirrored 1504 + * for the diagnostics routine qib_remap_ioaddr32(). 1505 + * There is also related code for VL15 buffers in qib_init_7322_variables(). 1506 + * The teardown code that unmaps is in qib_pcie_ddcleanup() 1507 + */ 1515 1508 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) 1516 1509 { 1517 1510 u64 __iomem *qib_kregbase = NULL;
+2
drivers/infiniband/hw/qib/qib_pcie.c
··· 179 179 iounmap(dd->piobase); 180 180 if (dd->userbase) 181 181 iounmap(dd->userbase); 182 + if (dd->piovl15base) 183 + iounmap(dd->piovl15base); 182 184 183 185 pci_disable_device(dd->pcidev); 184 186 pci_release_regions(dd->pcidev);
+5 -1
drivers/infiniband/hw/qib/qib_tx.c
··· 340 340 if (i < dd->piobcnt2k) 341 341 buf = (u32 __iomem *)(dd->pio2kbase + 342 342 i * dd->palign); 343 - else 343 + else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) 344 344 buf = (u32 __iomem *)(dd->pio4kbase + 345 345 (i - dd->piobcnt2k) * dd->align4k); 346 + else 347 + buf = (u32 __iomem *)(dd->piovl15base + 348 + (i - (dd->piobcnt2k + dd->piobcnt4k)) * 349 + dd->align4k); 346 350 if (pbufnum) 347 351 *pbufnum = i; 348 352 dd->upd_pio_shadow = 0;
+2 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1163 1163 1164 1164 return ret ? ret : count; 1165 1165 } 1166 - static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 1166 + static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 1167 1167 1168 1168 static ssize_t delete_child(struct device *dev, 1169 1169 struct device_attribute *attr, ··· 1183 1183 return ret ? ret : count; 1184 1184 1185 1185 } 1186 - static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 1186 + static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 1187 1187 1188 1188 int ipoib_add_pkey_attr(struct net_device *dev) 1189 1189 {