[PATCH] kprobes-changed-from-using-spinlock-to-mutex fix
Based on some feedback from Oleg Nesterov, I have made few changes to previously posted patch. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
49a2a1b83b
commit
f709b12234
3 changed files with 20 additions and 19 deletions
|
@ -35,7 +35,6 @@
|
|||
#include <asm/kdebug.h>
|
||||
#include <asm/sstep.h>
|
||||
|
||||
static DECLARE_MUTEX(kprobe_mutex);
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
|
@ -54,9 +53,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
|||
|
||||
/* insn must be on a special executable page on ppc64 */
|
||||
if (!ret) {
|
||||
down(&kprobe_mutex);
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
up(&kprobe_mutex);
|
||||
if (!p->ainsn.insn)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
#include <asm/kdebug.h>
|
||||
|
||||
void jprobe_return_end(void);
|
||||
void __kprobes arch_copy_kprobe(struct kprobe *p);
|
||||
static void __kprobes arch_copy_kprobe(struct kprobe *p);
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
@ -180,7 +180,7 @@ static inline s32 *is_riprel(u8 *insn)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
static void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
{
|
||||
s32 *ripdisp;
|
||||
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
|
||||
|
|
|
@ -431,7 +431,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
|
|||
copy_kprobe(old_p, p);
|
||||
ret = add_new_kprobe(old_p, p);
|
||||
} else {
|
||||
ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
|
||||
ap = kcalloc(1, sizeof(struct kprobe), GFP_KERNEL);
|
||||
if (!ap)
|
||||
return -ENOMEM;
|
||||
add_aggr_kprobe(ap, old_p);
|
||||
|
@ -491,7 +491,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
|||
void __kprobes unregister_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct module *mod;
|
||||
struct kprobe *old_p, *cleanup_p;
|
||||
struct kprobe *old_p, *list_p;
|
||||
int cleanup_p;
|
||||
|
||||
down(&kprobe_mutex);
|
||||
old_p = get_kprobe(p->addr);
|
||||
|
@ -499,22 +500,25 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||
up(&kprobe_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((old_p->pre_handler == aggr_pre_handler) &&
|
||||
if (p != old_p) {
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid_p;
|
||||
up(&kprobe_mutex);
|
||||
return;
|
||||
}
|
||||
valid_p:
|
||||
if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
|
||||
(p->list.next == &old_p->list) &&
|
||||
(p->list.prev == &old_p->list)) {
|
||||
/* Only one element in the aggregate list */
|
||||
(p->list.prev == &old_p->list))) {
|
||||
/* Only probe on the hash list */
|
||||
arch_disarm_kprobe(p);
|
||||
hlist_del_rcu(&old_p->hlist);
|
||||
cleanup_p = old_p;
|
||||
} else if (old_p == p) {
|
||||
/* Only one kprobe element in the hash list */
|
||||
arch_disarm_kprobe(p);
|
||||
hlist_del_rcu(&p->hlist);
|
||||
cleanup_p = p;
|
||||
cleanup_p = 1;
|
||||
} else {
|
||||
list_del_rcu(&p->list);
|
||||
cleanup_p = NULL;
|
||||
cleanup_p = 0;
|
||||
}
|
||||
|
||||
up(&kprobe_mutex);
|
||||
|
@ -524,7 +528,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||
module_put(mod);
|
||||
|
||||
if (cleanup_p) {
|
||||
if (cleanup_p->pre_handler == aggr_pre_handler) {
|
||||
if (p != old_p) {
|
||||
list_del_rcu(&p->list);
|
||||
kfree(old_p);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue