kprobes: allow to specify custom allocator for insn caches
The current two insn slot caches both use module_alloc/module_free to allocate and free insn slot cache pages. For s390 this is not sufficient since there is the need to allocate insn slots that are either within the vmalloc module area or within dma memory. Therefore add a mechanism which allows to specify an own allocator for an own insn slot cache. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c802d64a35
commit
af96397de8
2 changed files with 20 additions and 2 deletions
|
@ -268,6 +268,8 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
|
|||
|
||||
struct kprobe_insn_cache {
|
||||
struct mutex mutex;
|
||||
void *(*alloc)(void); /* allocate insn page */
|
||||
void (*free)(void *); /* free insn page */
|
||||
struct list_head pages; /* list of kprobe_insn_page */
|
||||
size_t insn_size; /* size of instruction slot */
|
||||
int nr_garbage;
|
||||
|
|
|
@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
|
|||
struct kprobe_insn_page {
|
||||
struct list_head list;
|
||||
kprobe_opcode_t *insns; /* Page of instruction slots */
|
||||
struct kprobe_insn_cache *cache;
|
||||
int nused;
|
||||
int ngarbage;
|
||||
char slot_used[];
|
||||
|
@ -132,8 +133,20 @@ enum kprobe_slot_state {
|
|||
SLOT_USED = 2,
|
||||
};
|
||||
|
||||
static void *alloc_insn_page(void)
|
||||
{
|
||||
return module_alloc(PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void free_insn_page(void *page)
|
||||
{
|
||||
module_free(NULL, page);
|
||||
}
|
||||
|
||||
struct kprobe_insn_cache kprobe_insn_slots = {
|
||||
.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
|
||||
.alloc = alloc_insn_page,
|
||||
.free = free_insn_page,
|
||||
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
|
||||
.insn_size = MAX_INSN_SIZE,
|
||||
.nr_garbage = 0,
|
||||
|
@ -182,7 +195,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|||
* kernel image and loaded module images reside. This is required
|
||||
* so x86_64 can correctly handle the %rip-relative fixups.
|
||||
*/
|
||||
kip->insns = module_alloc(PAGE_SIZE);
|
||||
kip->insns = c->alloc();
|
||||
if (!kip->insns) {
|
||||
kfree(kip);
|
||||
goto out;
|
||||
|
@ -192,6 +205,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|||
kip->slot_used[0] = SLOT_USED;
|
||||
kip->nused = 1;
|
||||
kip->ngarbage = 0;
|
||||
kip->cache = c;
|
||||
list_add(&kip->list, &c->pages);
|
||||
slot = kip->insns;
|
||||
out:
|
||||
|
@ -213,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
|
|||
*/
|
||||
if (!list_is_singular(&kip->list)) {
|
||||
list_del(&kip->list);
|
||||
module_free(NULL, kip->insns);
|
||||
kip->cache->free(kip->insns);
|
||||
kfree(kip);
|
||||
}
|
||||
return 1;
|
||||
|
@ -274,6 +288,8 @@ void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
|||
/* For optimized_kprobe buffer */
|
||||
struct kprobe_insn_cache kprobe_optinsn_slots = {
|
||||
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
|
||||
.alloc = alloc_insn_page,
|
||||
.free = free_insn_page,
|
||||
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
|
||||
/* .insn_size is initialized later */
|
||||
.nr_garbage = 0,
|
||||
|
|
Loading…
Reference in a new issue