IB/ipath: Make ipath_portdata work with struct pid * not pid_t
The official reason is "with the presence of pid namespaces in the
kernel using pid_t-s inside one is no longer safe."
But the reason I fix this right now is the following:
About a month ago (when 2.6.25 was not yet released) there still was a
one last caller of a to-be-deprecated-soon function find_pid() - the
kill_proc() function, which in turn was only used by nfs callback
code.
During the last merge window, this last caller was finally eliminated
by some NFS patch(es) and I was about to finally kill this kill_proc()
and find_pid(), but found, that I was late and the kill_proc is now
called from the ipath driver since commit 58411d1c
("IB/ipath: Head of
Line blocking vs forward progress of user apps").
So here's a patch that fixes this code to use struct pid * and (!)
the kill_pid routine.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
74116f580b
commit
40d97692fb
3 changed files with 23 additions and 18 deletions
|
@ -2616,7 +2616,7 @@ int ipath_reset_device(int unit)
|
|||
ipath_dbg("unit %u port %d is in use "
|
||||
"(PID %u cmd %s), can't reset\n",
|
||||
unit, i,
|
||||
dd->ipath_pd[i]->port_pid,
|
||||
pid_nr(dd->ipath_pd[i]->port_pid),
|
||||
dd->ipath_pd[i]->port_comm);
|
||||
ret = -EBUSY;
|
||||
goto bail;
|
||||
|
@ -2654,19 +2654,21 @@ int ipath_reset_device(int unit)
|
|||
static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
|
||||
{
|
||||
int i, sub, any = 0;
|
||||
pid_t pid;
|
||||
struct pid *pid;
|
||||
|
||||
if (!dd->ipath_pd)
|
||||
return 0;
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
|
||||
!dd->ipath_pd[i]->port_pid)
|
||||
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
|
||||
continue;
|
||||
pid = dd->ipath_pd[i]->port_pid;
|
||||
if (!pid)
|
||||
continue;
|
||||
|
||||
dev_info(&dd->pcidev->dev, "context %d in use "
|
||||
"(PID %u), sending signal %d\n",
|
||||
i, pid, sig);
|
||||
kill_proc(pid, sig, 1);
|
||||
i, pid_nr(pid), sig);
|
||||
kill_pid(pid, sig, 1);
|
||||
any++;
|
||||
for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
|
||||
pid = dd->ipath_pd[i]->port_subpid[sub];
|
||||
|
@ -2674,8 +2676,8 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
|
|||
continue;
|
||||
dev_info(&dd->pcidev->dev, "sub-context "
|
||||
"%d:%d in use (PID %u), sending "
|
||||
"signal %d\n", i, sub, pid, sig);
|
||||
kill_proc(pid, sig, 1);
|
||||
"signal %d\n", i, sub, pid_nr(pid), sig);
|
||||
kill_pid(pid, sig, 1);
|
||||
any++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -555,7 +555,7 @@ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
|
|||
p = dd->ipath_pageshadow[porttid + tid];
|
||||
dd->ipath_pageshadow[porttid + tid] = NULL;
|
||||
ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
|
||||
pd->port_pid, tid);
|
||||
pid_nr(pd->port_pid), tid);
|
||||
dd->ipath_f_put_tid(dd, &tidbase[tid],
|
||||
RCVHQ_RCV_TYPE_EXPECTED,
|
||||
dd->ipath_tidinvalid);
|
||||
|
@ -1609,7 +1609,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
|
|||
port);
|
||||
pd->port_cnt = 1;
|
||||
port_fp(fp) = pd;
|
||||
pd->port_pid = current->pid;
|
||||
pd->port_pid = get_pid(task_pid(current));
|
||||
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
|
||||
ipath_stats.sps_ports++;
|
||||
ret = 0;
|
||||
|
@ -1793,14 +1793,15 @@ static int find_shared_port(struct file *fp,
|
|||
}
|
||||
port_fp(fp) = pd;
|
||||
subport_fp(fp) = pd->port_cnt++;
|
||||
pd->port_subpid[subport_fp(fp)] = current->pid;
|
||||
pd->port_subpid[subport_fp(fp)] =
|
||||
get_pid(task_pid(current));
|
||||
tidcursor_fp(fp) = 0;
|
||||
pd->active_slaves |= 1 << subport_fp(fp);
|
||||
ipath_cdbg(PROC,
|
||||
"%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
|
||||
current->comm, current->pid,
|
||||
subport_fp(fp),
|
||||
pd->port_comm, pd->port_pid,
|
||||
pd->port_comm, pid_nr(pd->port_pid),
|
||||
dd->ipath_unit, pd->port_port);
|
||||
ret = 1;
|
||||
goto done;
|
||||
|
@ -2066,7 +2067,8 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
* the slave(s) don't wait for receive data forever.
|
||||
*/
|
||||
pd->active_slaves &= ~(1 << fd->subport);
|
||||
pd->port_subpid[fd->subport] = 0;
|
||||
put_pid(pd->port_subpid[fd->subport]);
|
||||
pd->port_subpid[fd->subport] = NULL;
|
||||
mutex_unlock(&ipath_mutex);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2074,7 +2076,7 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
|
||||
if (pd->port_hdrqfull) {
|
||||
ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
|
||||
"during run\n", pd->port_comm, pd->port_pid,
|
||||
"during run\n", pd->port_comm, pid_nr(pd->port_pid),
|
||||
pd->port_hdrqfull);
|
||||
pd->port_hdrqfull = 0;
|
||||
}
|
||||
|
@ -2134,11 +2136,12 @@ static int ipath_close(struct inode *in, struct file *fp)
|
|||
unlock_expected_tids(pd);
|
||||
ipath_stats.sps_ports--;
|
||||
ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
|
||||
pd->port_comm, pd->port_pid,
|
||||
pd->port_comm, pid_nr(pd->port_pid),
|
||||
dd->ipath_unit, port);
|
||||
}
|
||||
|
||||
pd->port_pid = 0;
|
||||
put_pid(pd->port_pid);
|
||||
pd->port_pid = NULL;
|
||||
dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
|
||||
mutex_unlock(&ipath_mutex);
|
||||
ipath_free_pddata(dd, pd); /* after releasing the mutex */
|
||||
|
|
|
@ -159,8 +159,8 @@ struct ipath_portdata {
|
|||
/* saved total number of polled urgent packets for poll edge trigger */
|
||||
u32 port_urgent_poll;
|
||||
/* pid of process using this port */
|
||||
pid_t port_pid;
|
||||
pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
|
||||
struct pid *port_pid;
|
||||
struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
|
||||
/* same size as task_struct .comm[] */
|
||||
char port_comm[16];
|
||||
/* pkeys set by this use of this port */
|
||||
|
|
Loading…
Reference in a new issue