09e56a2d07
Without these, exit functions cannot be stack-traced, so to speak. This implies that module unloads that perform allocations (don't laugh) will cause noisy warnings on the console when kmemleak is enabled, as it presumes that all code's call chains are traceable. Similarly, BUGs and WARN_ONs will give additional console spam. Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
340 lines
8.8 KiB
C
340 lines
8.8 KiB
C
/*
|
|
* linux/arch/arm/kernel/module.c
|
|
*
|
|
* Copyright (C) 2002 Russell King.
|
|
* Modified for nommu by Hyok S. Choi
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Module allocation method suggested by Andi Kleen.
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/moduleloader.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/elf.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/string.h>
|
|
#include <linux/gfp.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/unwind.h>
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
/*
|
|
* The XIP kernel text is mapped in the module area for modules and
|
|
* some other stuff to work without any indirect relocations.
|
|
* MODULES_VADDR is redefined here and not in asm/memory.h to avoid
|
|
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
|
|
*/
|
|
#undef MODULES_VADDR
|
|
#define MODULES_VADDR (((unsigned long)_etext + ~PGDIR_MASK) & PGDIR_MASK)
|
|
#endif
|
|
|
|
#ifdef CONFIG_MMU
|
|
void *module_alloc(unsigned long size)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
if (!size)
|
|
return NULL;
|
|
|
|
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
|
|
if (!area)
|
|
return NULL;
|
|
|
|
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
|
|
}
|
|
#else /* CONFIG_MMU */
|
|
void *module_alloc(unsigned long size)
|
|
{
|
|
return size == 0 ? NULL : vmalloc(size);
|
|
}
|
|
#endif /* !CONFIG_MMU */
|
|
|
|
void module_free(struct module *module, void *region)
|
|
{
|
|
vfree(region);
|
|
}
|
|
|
|
int module_frob_arch_sections(Elf_Ehdr *hdr,
|
|
Elf_Shdr *sechdrs,
|
|
char *secstrings,
|
|
struct module *mod)
|
|
{
|
|
#ifdef CONFIG_ARM_UNWIND
|
|
Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
|
|
struct arm_unwind_mapping *maps = mod->arch.map;
|
|
|
|
for (s = sechdrs; s < sechdrs_end; s++) {
|
|
char const *secname = secstrings + s->sh_name;
|
|
|
|
if (strcmp(".ARM.exidx.init.text", secname) == 0)
|
|
maps[ARM_SEC_INIT].unw_sec = s;
|
|
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
|
|
maps[ARM_SEC_DEVINIT].unw_sec = s;
|
|
else if (strcmp(".ARM.exidx", secname) == 0)
|
|
maps[ARM_SEC_CORE].unw_sec = s;
|
|
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
|
|
maps[ARM_SEC_EXIT].unw_sec = s;
|
|
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
|
|
maps[ARM_SEC_DEVEXIT].unw_sec = s;
|
|
else if (strcmp(".init.text", secname) == 0)
|
|
maps[ARM_SEC_INIT].sec_text = s;
|
|
else if (strcmp(".devinit.text", secname) == 0)
|
|
maps[ARM_SEC_DEVINIT].sec_text = s;
|
|
else if (strcmp(".text", secname) == 0)
|
|
maps[ARM_SEC_CORE].sec_text = s;
|
|
else if (strcmp(".exit.text", secname) == 0)
|
|
maps[ARM_SEC_EXIT].sec_text = s;
|
|
else if (strcmp(".devexit.text", secname) == 0)
|
|
maps[ARM_SEC_DEVEXIT].sec_text = s;
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
|
unsigned int relindex, struct module *module)
|
|
{
|
|
Elf32_Shdr *symsec = sechdrs + symindex;
|
|
Elf32_Shdr *relsec = sechdrs + relindex;
|
|
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
|
|
Elf32_Rel *rel = (void *)relsec->sh_addr;
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
|
|
unsigned long loc;
|
|
Elf32_Sym *sym;
|
|
s32 offset;
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
u32 upper, lower, sign, j1, j2;
|
|
#endif
|
|
|
|
offset = ELF32_R_SYM(rel->r_info);
|
|
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
|
|
printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
|
|
module->name, relindex, i);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
|
|
|
|
if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
|
|
printk(KERN_ERR "%s: out of bounds relocation, "
|
|
"section %d reloc %d offset %d size %d\n",
|
|
module->name, relindex, i, rel->r_offset,
|
|
dstsec->sh_size);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
loc = dstsec->sh_addr + rel->r_offset;
|
|
|
|
switch (ELF32_R_TYPE(rel->r_info)) {
|
|
case R_ARM_NONE:
|
|
/* ignore */
|
|
break;
|
|
|
|
case R_ARM_ABS32:
|
|
*(u32 *)loc += sym->st_value;
|
|
break;
|
|
|
|
case R_ARM_PC24:
|
|
case R_ARM_CALL:
|
|
case R_ARM_JUMP24:
|
|
offset = (*(u32 *)loc & 0x00ffffff) << 2;
|
|
if (offset & 0x02000000)
|
|
offset -= 0x04000000;
|
|
|
|
offset += sym->st_value - loc;
|
|
if (offset & 3 ||
|
|
offset <= (s32)0xfe000000 ||
|
|
offset >= (s32)0x02000000) {
|
|
printk(KERN_ERR
|
|
"%s: relocation out of range, section "
|
|
"%d reloc %d sym '%s'\n", module->name,
|
|
relindex, i, strtab + sym->st_name);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
offset >>= 2;
|
|
|
|
*(u32 *)loc &= 0xff000000;
|
|
*(u32 *)loc |= offset & 0x00ffffff;
|
|
break;
|
|
|
|
case R_ARM_V4BX:
|
|
/* Preserve Rm and the condition code. Alter
|
|
* other bits to re-code instruction as
|
|
* MOV PC,Rm.
|
|
*/
|
|
*(u32 *)loc &= 0xf000000f;
|
|
*(u32 *)loc |= 0x01a0f000;
|
|
break;
|
|
|
|
case R_ARM_PREL31:
|
|
offset = *(u32 *)loc + sym->st_value - loc;
|
|
*(u32 *)loc = offset & 0x7fffffff;
|
|
break;
|
|
|
|
case R_ARM_MOVW_ABS_NC:
|
|
case R_ARM_MOVT_ABS:
|
|
offset = *(u32 *)loc;
|
|
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
|
|
offset = (offset ^ 0x8000) - 0x8000;
|
|
|
|
offset += sym->st_value;
|
|
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
|
|
offset >>= 16;
|
|
|
|
*(u32 *)loc &= 0xfff0f000;
|
|
*(u32 *)loc |= ((offset & 0xf000) << 4) |
|
|
(offset & 0x0fff);
|
|
break;
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
case R_ARM_THM_CALL:
|
|
case R_ARM_THM_JUMP24:
|
|
upper = *(u16 *)loc;
|
|
lower = *(u16 *)(loc + 2);
|
|
|
|
/*
|
|
* 25 bit signed address range (Thumb-2 BL and B.W
|
|
* instructions):
|
|
* S:I1:I2:imm10:imm11:0
|
|
* where:
|
|
* S = upper[10] = offset[24]
|
|
* I1 = ~(J1 ^ S) = offset[23]
|
|
* I2 = ~(J2 ^ S) = offset[22]
|
|
* imm10 = upper[9:0] = offset[21:12]
|
|
* imm11 = lower[10:0] = offset[11:1]
|
|
* J1 = lower[13]
|
|
* J2 = lower[11]
|
|
*/
|
|
sign = (upper >> 10) & 1;
|
|
j1 = (lower >> 13) & 1;
|
|
j2 = (lower >> 11) & 1;
|
|
offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
|
|
((~(j2 ^ sign) & 1) << 22) |
|
|
((upper & 0x03ff) << 12) |
|
|
((lower & 0x07ff) << 1);
|
|
if (offset & 0x01000000)
|
|
offset -= 0x02000000;
|
|
offset += sym->st_value - loc;
|
|
|
|
/* only Thumb addresses allowed (no interworking) */
|
|
if (!(offset & 1) ||
|
|
offset <= (s32)0xff000000 ||
|
|
offset >= (s32)0x01000000) {
|
|
printk(KERN_ERR
|
|
"%s: relocation out of range, section "
|
|
"%d reloc %d sym '%s'\n", module->name,
|
|
relindex, i, strtab + sym->st_name);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
sign = (offset >> 24) & 1;
|
|
j1 = sign ^ (~(offset >> 23) & 1);
|
|
j2 = sign ^ (~(offset >> 22) & 1);
|
|
*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
|
|
((offset >> 12) & 0x03ff));
|
|
*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
|
|
(j1 << 13) | (j2 << 11) |
|
|
((offset >> 1) & 0x07ff));
|
|
break;
|
|
|
|
case R_ARM_THM_MOVW_ABS_NC:
|
|
case R_ARM_THM_MOVT_ABS:
|
|
upper = *(u16 *)loc;
|
|
lower = *(u16 *)(loc + 2);
|
|
|
|
/*
|
|
* MOVT/MOVW instructions encoding in Thumb-2:
|
|
*
|
|
* i = upper[10]
|
|
* imm4 = upper[3:0]
|
|
* imm3 = lower[14:12]
|
|
* imm8 = lower[7:0]
|
|
*
|
|
* imm16 = imm4:i:imm3:imm8
|
|
*/
|
|
offset = ((upper & 0x000f) << 12) |
|
|
((upper & 0x0400) << 1) |
|
|
((lower & 0x7000) >> 4) | (lower & 0x00ff);
|
|
offset = (offset ^ 0x8000) - 0x8000;
|
|
offset += sym->st_value;
|
|
|
|
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
|
|
offset >>= 16;
|
|
|
|
*(u16 *)loc = (u16)((upper & 0xfbf0) |
|
|
((offset & 0xf000) >> 12) |
|
|
((offset & 0x0800) >> 1));
|
|
*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
|
|
((offset & 0x0700) << 4) |
|
|
(offset & 0x00ff));
|
|
break;
|
|
#endif
|
|
|
|
default:
|
|
printk(KERN_ERR "%s: unknown relocation: %u\n",
|
|
module->name, ELF32_R_TYPE(rel->r_info));
|
|
return -ENOEXEC;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|
unsigned int symindex, unsigned int relsec, struct module *module)
|
|
{
|
|
printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
|
|
module->name);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_UNWIND
|
|
static void register_unwind_tables(struct module *mod)
|
|
{
|
|
int i;
|
|
for (i = 0; i < ARM_SEC_MAX; ++i) {
|
|
struct arm_unwind_mapping *map = &mod->arch.map[i];
|
|
if (map->unw_sec && map->sec_text)
|
|
map->unwind = unwind_table_add(map->unw_sec->sh_addr,
|
|
map->unw_sec->sh_size,
|
|
map->sec_text->sh_addr,
|
|
map->sec_text->sh_size);
|
|
}
|
|
}
|
|
|
|
static void unregister_unwind_tables(struct module *mod)
|
|
{
|
|
int i = ARM_SEC_MAX;
|
|
while (--i >= 0)
|
|
unwind_table_del(mod->arch.map[i].unwind);
|
|
}
|
|
#else
|
|
static inline void register_unwind_tables(struct module *mod) { }
|
|
static inline void unregister_unwind_tables(struct module *mod) { }
|
|
#endif
|
|
|
|
int
|
|
module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
|
struct module *module)
|
|
{
|
|
register_unwind_tables(module);
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
module_arch_cleanup(struct module *mod)
|
|
{
|
|
unregister_unwind_tables(mod);
|
|
}
|