IB/ipath: Make ipath_portdata work with struct pid * not pid_t

The official reason is "with the presence of pid namespaces in the
kernel using pid_t-s inside one is no longer safe."

But the reason I fix this right now is the following:

About a month ago (when 2.6.25 was not yet released) there still was a
one last caller of a to-be-deprecated-soon function find_pid() - the
kill_proc() function, which in turn was only used by nfs callback
code.

During the last merge window, this last caller was finally eliminated
by some NFS patch(es) and I was about to finally kill this kill_proc()
and find_pid(), but found, that I was late and the kill_proc is now
called from the ipath driver since commit 58411d1c ("IB/ipath: Head of
Line blocking vs forward progress of user apps").

So here's a patch that fixes this code to use struct pid * and (!)
the kill_pid routine.

Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Pavel Emelyanov and committed by Roland Dreier 40d97692 74116f58

+23 -18
+10 -8
drivers/infiniband/hw/ipath/ipath_driver.c
··· 2616 ipath_dbg("unit %u port %d is in use " 2617 "(PID %u cmd %s), can't reset\n", 2618 unit, i, 2619 - dd->ipath_pd[i]->port_pid, 2620 dd->ipath_pd[i]->port_comm); 2621 ret = -EBUSY; 2622 goto bail; ··· 2654 static int ipath_signal_procs(struct ipath_devdata *dd, int sig) 2655 { 2656 int i, sub, any = 0; 2657 - pid_t pid; 2658 2659 if (!dd->ipath_pd) 2660 return 0; 2661 for (i = 1; i < dd->ipath_cfgports; i++) { 2662 - if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt || 2663 - !dd->ipath_pd[i]->port_pid) 2664 continue; 2665 pid = dd->ipath_pd[i]->port_pid; 2666 dev_info(&dd->pcidev->dev, "context %d in use " 2667 "(PID %u), sending signal %d\n", 2668 - i, pid, sig); 2669 - kill_proc(pid, sig, 1); 2670 any++; 2671 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) { 2672 pid = dd->ipath_pd[i]->port_subpid[sub]; ··· 2676 continue; 2677 dev_info(&dd->pcidev->dev, "sub-context " 2678 "%d:%d in use (PID %u), sending " 2679 - "signal %d\n", i, sub, pid, sig); 2680 - kill_proc(pid, sig, 1); 2681 any++; 2682 } 2683 }
··· 2616 ipath_dbg("unit %u port %d is in use " 2617 "(PID %u cmd %s), can't reset\n", 2618 unit, i, 2619 + pid_nr(dd->ipath_pd[i]->port_pid), 2620 dd->ipath_pd[i]->port_comm); 2621 ret = -EBUSY; 2622 goto bail; ··· 2654 static int ipath_signal_procs(struct ipath_devdata *dd, int sig) 2655 { 2656 int i, sub, any = 0; 2657 + struct pid *pid; 2658 2659 if (!dd->ipath_pd) 2660 return 0; 2661 for (i = 1; i < dd->ipath_cfgports; i++) { 2662 + if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) 2663 continue; 2664 pid = dd->ipath_pd[i]->port_pid; 2665 + if (!pid) 2666 + continue; 2667 + 2668 dev_info(&dd->pcidev->dev, "context %d in use " 2669 "(PID %u), sending signal %d\n", 2670 + i, pid_nr(pid), sig); 2671 + kill_pid(pid, sig, 1); 2672 any++; 2673 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) { 2674 pid = dd->ipath_pd[i]->port_subpid[sub]; ··· 2674 continue; 2675 dev_info(&dd->pcidev->dev, "sub-context " 2676 "%d:%d in use (PID %u), sending " 2677 + "signal %d\n", i, sub, pid_nr(pid), sig); 2678 + kill_pid(pid, sig, 1); 2679 any++; 2680 } 2681 }
+11 -8
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 555 p = dd->ipath_pageshadow[porttid + tid]; 556 dd->ipath_pageshadow[porttid + tid] = NULL; 557 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 558 - pd->port_pid, tid); 559 dd->ipath_f_put_tid(dd, &tidbase[tid], 560 RCVHQ_RCV_TYPE_EXPECTED, 561 dd->ipath_tidinvalid); ··· 1609 port); 1610 pd->port_cnt = 1; 1611 port_fp(fp) = pd; 1612 - pd->port_pid = current->pid; 1613 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1614 ipath_stats.sps_ports++; 1615 ret = 0; ··· 1793 } 1794 port_fp(fp) = pd; 1795 subport_fp(fp) = pd->port_cnt++; 1796 - pd->port_subpid[subport_fp(fp)] = current->pid; 1797 tidcursor_fp(fp) = 0; 1798 pd->active_slaves |= 1 << subport_fp(fp); 1799 ipath_cdbg(PROC, 1800 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1801 current->comm, current->pid, 1802 subport_fp(fp), 1803 - pd->port_comm, pd->port_pid, 1804 dd->ipath_unit, pd->port_port); 1805 ret = 1; 1806 goto done; ··· 2067 * the slave(s) don't wait for receive data forever. 2068 */ 2069 pd->active_slaves &= ~(1 << fd->subport); 2070 - pd->port_subpid[fd->subport] = 0; 2071 mutex_unlock(&ipath_mutex); 2072 goto bail; 2073 } ··· 2076 2077 if (pd->port_hdrqfull) { 2078 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2079 - "during run\n", pd->port_comm, pd->port_pid, 2080 pd->port_hdrqfull); 2081 pd->port_hdrqfull = 0; 2082 } ··· 2136 unlock_expected_tids(pd); 2137 ipath_stats.sps_ports--; 2138 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2139 - pd->port_comm, pd->port_pid, 2140 dd->ipath_unit, port); 2141 } 2142 2143 - pd->port_pid = 0; 2144 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2145 mutex_unlock(&ipath_mutex); 2146 ipath_free_pddata(dd, pd); /* after releasing the mutex */
··· 555 p = dd->ipath_pageshadow[porttid + tid]; 556 dd->ipath_pageshadow[porttid + tid] = NULL; 557 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 558 + pid_nr(pd->port_pid), tid); 559 dd->ipath_f_put_tid(dd, &tidbase[tid], 560 RCVHQ_RCV_TYPE_EXPECTED, 561 dd->ipath_tidinvalid); ··· 1609 port); 1610 pd->port_cnt = 1; 1611 port_fp(fp) = pd; 1612 + pd->port_pid = get_pid(task_pid(current)); 1613 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1614 ipath_stats.sps_ports++; 1615 ret = 0; ··· 1793 } 1794 port_fp(fp) = pd; 1795 subport_fp(fp) = pd->port_cnt++; 1796 + pd->port_subpid[subport_fp(fp)] = 1797 + get_pid(task_pid(current)); 1798 tidcursor_fp(fp) = 0; 1799 pd->active_slaves |= 1 << subport_fp(fp); 1800 ipath_cdbg(PROC, 1801 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1802 current->comm, current->pid, 1803 subport_fp(fp), 1804 + pd->port_comm, pid_nr(pd->port_pid), 1805 dd->ipath_unit, pd->port_port); 1806 ret = 1; 1807 goto done; ··· 2066 * the slave(s) don't wait for receive data forever. 2067 */ 2068 pd->active_slaves &= ~(1 << fd->subport); 2069 + put_pid(pd->port_subpid[fd->subport]); 2070 + pd->port_subpid[fd->subport] = NULL; 2071 mutex_unlock(&ipath_mutex); 2072 goto bail; 2073 } ··· 2074 2075 if (pd->port_hdrqfull) { 2076 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2077 + "during run\n", pd->port_comm, pid_nr(pd->port_pid), 2078 pd->port_hdrqfull); 2079 pd->port_hdrqfull = 0; 2080 } ··· 2134 unlock_expected_tids(pd); 2135 ipath_stats.sps_ports--; 2136 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2137 + pd->port_comm, pid_nr(pd->port_pid), 2138 dd->ipath_unit, port); 2139 } 2140 2141 + put_pid(pd->port_pid); 2142 + pd->port_pid = NULL; 2143 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2144 mutex_unlock(&ipath_mutex); 2145 ipath_free_pddata(dd, pd); /* after releasing the mutex */
+2 -2
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 159 /* saved total number of polled urgent packets for poll edge trigger */ 160 u32 port_urgent_poll; 161 /* pid of process using this port */ 162 - pid_t port_pid; 163 - pid_t port_subpid[INFINIPATH_MAX_SUBPORT]; 164 /* same size as task_struct .comm[] */ 165 char port_comm[16]; 166 /* pkeys set by this use of this port */
··· 159 /* saved total number of polled urgent packets for poll edge trigger */ 160 u32 port_urgent_poll; 161 /* pid of process using this port */ 162 + struct pid *port_pid; 163 + struct pid *port_subpid[INFINIPATH_MAX_SUBPORT]; 164 /* same size as task_struct .comm[] */ 165 char port_comm[16]; 166 /* pkeys set by this use of this port */