Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/ipath: Add locking for interrupt use of ipath_pd contexts vs free

Fixes timing race resulting in panic. Not a performance sensitive path.

Signed-off-by: Dave Olson <dave.olson@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Dave Olson and committed by
Roland Dreier
3d089098 1bf7724e

+49 -31
+30 -19
drivers/infiniband/hw/ipath/ipath_driver.c
··· 661 661 static void __devexit cleanup_device(struct ipath_devdata *dd) 662 662 { 663 663 int port; 664 + struct ipath_portdata **tmp; 665 + unsigned long flags; 664 666 665 667 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { 666 668 /* can't do anything more with chip; needs re-init */ ··· 744 742 745 743 /* 746 744 * free any resources still in use (usually just kernel ports) 747 - * at unload; we do for portcnt, not cfgports, because cfgports 748 - * could have changed while we were loaded. 745 + * at unload; we do for portcnt, because that's what we allocate. 746 + * We acquire lock to be really paranoid that ipath_pd isn't being 747 + * accessed from some interrupt-related code (that should not happen, 748 + * but best to be sure). 749 749 */ 750 + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); 751 + tmp = dd->ipath_pd; 752 + dd->ipath_pd = NULL; 753 + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 750 754 for (port = 0; port < dd->ipath_portcnt; port++) { 751 - struct ipath_portdata *pd = dd->ipath_pd[port]; 752 - dd->ipath_pd[port] = NULL; 755 + struct ipath_portdata *pd = tmp[port]; 756 + tmp[port] = NULL; /* debugging paranoia */ 753 757 ipath_free_pddata(dd, pd); 754 758 } 755 - kfree(dd->ipath_pd); 756 - /* 757 - * debuggability, in case some cleanup path tries to use it 758 - * after this 759 - */ 760 - dd->ipath_pd = NULL; 759 + kfree(tmp); 761 760 } 762 761 763 762 static void __devexit ipath_remove_one(struct pci_dev *pdev) ··· 2589 2586 { 2590 2587 int ret, i; 2591 2588 struct ipath_devdata *dd = ipath_lookup(unit); 2589 + unsigned long flags; 2592 2590 2593 2591 if (!dd) { 2594 2592 ret = -ENODEV; ··· 2615 2611 goto bail; 2616 2612 } 2617 2613 2614 + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); 2618 2615 if (dd->ipath_pd) 2619 2616 for (i = 1; i < dd->ipath_cfgports; i++) { 2620 - if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { 2621 - ipath_dbg("unit %u port %d is in use " 2622 - "(PID %u cmd %s), can't reset\n", 2623 - unit, i, 2624 - pid_nr(dd->ipath_pd[i]->port_pid), 2625 - dd->ipath_pd[i]->port_comm); 2626 - ret = -EBUSY; 2627 - goto bail; 2628 - } 2617 + if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) 2618 + continue; 2619 + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 2620 + ipath_dbg("unit %u port %d is in use " 2621 + "(PID %u cmd %s), can't reset\n", 2622 + unit, i, 2623 + pid_nr(dd->ipath_pd[i]->port_pid), 2624 + dd->ipath_pd[i]->port_comm); 2625 + ret = -EBUSY; 2626 + goto bail; 2629 2627 } 2628 + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 2630 2629 2631 2630 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) 2632 2631 teardown_sdma(dd); ··· 2663 2656 { 2664 2657 int i, sub, any = 0; 2665 2658 struct pid *pid; 2659 + unsigned long flags; 2666 2660 2667 2661 if (!dd->ipath_pd) 2668 2662 return 0; 2663 + 2664 + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); 2669 2665 for (i = 1; i < dd->ipath_cfgports; i++) { 2670 2666 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) 2671 2667 continue; ··· 2692 2682 any++; 2693 2683 } 2694 2684 } 2685 + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 2695 2686 return any; 2696 2687 } 2697 2688
+10 -11
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 2046 2046 struct ipath_filedata *fd; 2047 2047 struct ipath_portdata *pd; 2048 2048 struct ipath_devdata *dd; 2049 + unsigned long flags; 2049 2050 unsigned port; 2051 + struct pid *pid; 2050 2052 2051 2053 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", 2052 2054 (long)in->i_rdev, fp->private_data); ··· 2081 2079 mutex_unlock(&ipath_mutex); 2082 2080 goto bail; 2083 2081 } 2082 + /* early; no interrupt users after this */ 2083 + spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); 2084 2084 port = pd->port_port; 2085 - 2086 - if (pd->port_hdrqfull) { 2087 - ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2088 - "during run\n", pd->port_comm, pid_nr(pd->port_pid), 2089 - pd->port_hdrqfull); 2090 - pd->port_hdrqfull = 0; 2091 - } 2085 + dd->ipath_pd[port] = NULL; 2086 + pid = pd->port_pid; 2087 + pd->port_pid = NULL; 2088 + spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); 2092 2089 2093 2090 if (pd->port_rcvwait_to || pd->port_piowait_to 2094 2091 || pd->port_rcvnowait || pd->port_pionowait) { ··· 2144 2143 unlock_expected_tids(pd); 2145 2144 ipath_stats.sps_ports--; 2146 2145 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2147 - pd->port_comm, pid_nr(pd->port_pid), 2146 + pd->port_comm, pid_nr(pid), 2148 2147 dd->ipath_unit, port); 2149 2148 } 2150 2149 2151 - put_pid(pd->port_pid); 2152 - pd->port_pid = NULL; 2153 - dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2150 + put_pid(pid); 2154 2151 mutex_unlock(&ipath_mutex); 2155 2152 ipath_free_pddata(dd, pd); /* after releasing the mutex */ 2156 2153
+1
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 229 229 spin_lock_init(&dd->ipath_kernel_tid_lock); 230 230 spin_lock_init(&dd->ipath_user_tid_lock); 231 231 spin_lock_init(&dd->ipath_sendctrl_lock); 232 + spin_lock_init(&dd->ipath_uctxt_lock); 232 233 spin_lock_init(&dd->ipath_sdma_lock); 233 234 spin_lock_init(&dd->ipath_gpio_lock); 234 235 spin_lock_init(&dd->ipath_eep_st_lock);
+2
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 477 477 spinlock_t ipath_kernel_tid_lock; 478 478 spinlock_t ipath_user_tid_lock; 479 479 spinlock_t ipath_sendctrl_lock; 480 + /* around ipath_pd and (user ports) port_cnt use (intr vs free) */ 481 + spinlock_t ipath_uctxt_lock; 480 482 481 483 /* 482 484 * IPATH_STATUS_*,
+2
drivers/infiniband/hw/ipath/ipath_keys.c
··· 132 132 * (see ipath_get_dma_mr and ipath_dma.c). 133 133 */ 134 134 if (sge->lkey == 0) { 135 + /* always a kernel port, no locking needed */ 135 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); 136 137 137 138 if (pd->user) { ··· 212 211 * (see ipath_get_dma_mr and ipath_dma.c). 213 212 */ 214 213 if (rkey == 0) { 214 + /* always a kernel port, no locking needed */ 215 215 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); 216 216 217 217 if (pd->user) {
+2
drivers/infiniband/hw/ipath/ipath_mad.c
··· 348 348 */ 349 349 static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) 350 350 { 351 + /* always a kernel port, no locking needed */ 351 352 struct ipath_portdata *pd = dd->ipath_pd[0]; 352 353 353 354 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); ··· 731 730 int i; 732 731 int changed = 0; 733 732 733 + /* always a kernel port, no locking needed */ 734 734 pd = dd->ipath_pd[0]; 735 735 736 736 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+2 -1
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 1852 1852 } 1853 1853 1854 1854 /** 1855 - * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table 1855 + * ipath_get_pkey - return the indexed PKEY from the port PKEY table 1856 1856 * @dd: the infinipath device 1857 1857 * @index: the PKEY index 1858 1858 */ ··· 1860 1860 { 1861 1861 unsigned ret; 1862 1862 1863 + /* always a kernel port, no locking needed */ 1863 1864 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) 1864 1865 ret = 0; 1865 1866 else