ptrace: optimize exit_ptrace() for the likely case
exit_ptrace() takes tasklist_lock unconditionally. We need this lock to avoid the race with ptrace_traceme(), it acts as a barrier. Change its caller, forget_original_parent(), to call exit_ptrace() under tasklist_lock. Change exit_ptrace() to drop and reacquire this lock if needed. This allows us to add the fastpath list_empty(ptraced) check. In the likely no-tracees case exit_ptrace() just returns and we avoid the lock() + unlock() sequence. "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> suggested to add this check, and he reports that this change adds about 11% improvement in some tests. Suggested-and-tested-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
13d7e3a2db
commit
c7e49c1488
2 changed files with 14 additions and 5 deletions
|
@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father)
|
|||
struct task_struct *p, *n, *reaper;
|
||||
LIST_HEAD(dead_children);
|
||||
|
||||
exit_ptrace(father);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
/*
|
||||
* Note that exit_ptrace() and find_new_reaper() might
|
||||
* drop tasklist_lock and reacquire it.
|
||||
*/
|
||||
exit_ptrace(father);
|
||||
reaper = find_new_reaper(father);
|
||||
|
||||
list_for_each_entry_safe(p, n, &father->children, sibling) {
|
||||
|
|
|
@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
|
|||
}
|
||||
|
||||
/*
|
||||
* Detach all tasks we were using ptrace on.
|
||||
* Detach all tasks we were using ptrace on. Called with tasklist held
|
||||
* for writing, and returns with it held too. But note it can release
|
||||
* and reacquire the lock.
|
||||
*/
|
||||
void exit_ptrace(struct task_struct *tracer)
|
||||
{
|
||||
struct task_struct *p, *n;
|
||||
LIST_HEAD(ptrace_dead);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
if (likely(list_empty(&tracer->ptraced)))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
|
||||
if (__ptrace_detach(tracer, p))
|
||||
list_add(&p->ptrace_entry, &ptrace_dead);
|
||||
}
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
BUG_ON(!list_empty(&tracer->ptraced));
|
||||
|
||||
list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
|
||||
list_del_init(&p->ptrace_entry);
|
||||
release_task(p);
|
||||
}
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
}
|
||||
|
||||
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
|
||||
|
|
Loading…
Reference in a new issue