arch/tile: fix two bugs in the backtracer code
The first is that we were using an incorrect hand-rolled variant of __kernel_text_address() which didn't handle module PCs. We now just use the standard API. The second was that we weren't accounting for the three-level page table when we were trying to pre-verify the addresses on the 64-bit TILE-Gx processor; we now do that correctly. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
parent
6c4d112688
commit
3cebbafd28
1 changed files with 19 additions and 9 deletions
|
@ -44,13 +44,6 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
|
||||||
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
|
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Is address in the specified kernel code? */
|
|
||||||
static int in_kernel_text(VirtualAddress address)
|
|
||||||
{
|
|
||||||
return (address >= MEM_SV_INTRPT &&
|
|
||||||
address < MEM_SV_INTRPT + HPAGE_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Is address valid for reading? */
|
/* Is address valid for reading? */
|
||||||
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
|
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
|
||||||
{
|
{
|
||||||
|
@ -63,6 +56,23 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
|
||||||
if (l1_pgtable == NULL)
|
if (l1_pgtable == NULL)
|
||||||
return 0; /* can't read user space in other tasks */
|
return 0; /* can't read user space in other tasks */
|
||||||
|
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
/* Find the real l1_pgtable by looking in the l0_pgtable. */
|
||||||
|
pte = l1_pgtable[HV_L0_INDEX(address)];
|
||||||
|
if (!hv_pte_get_present(pte))
|
||||||
|
return 0;
|
||||||
|
pfn = hv_pte_get_pfn(pte);
|
||||||
|
if (pte_huge(pte)) {
|
||||||
|
if (!pfn_valid(pfn)) {
|
||||||
|
pr_err("L0 huge page has bad pfn %#lx\n", pfn);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
|
||||||
|
}
|
||||||
|
page = pfn_to_page(pfn);
|
||||||
|
BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
|
||||||
|
l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
|
||||||
|
#endif
|
||||||
pte = l1_pgtable[HV_L1_INDEX(address)];
|
pte = l1_pgtable[HV_L1_INDEX(address)];
|
||||||
if (!hv_pte_get_present(pte))
|
if (!hv_pte_get_present(pte))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -92,7 +102,7 @@ static bool read_memory_func(void *result, VirtualAddress address,
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
|
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
|
||||||
if (in_kernel_text(address)) {
|
if (__kernel_text_address(address)) {
|
||||||
/* OK to read kernel code. */
|
/* OK to read kernel code. */
|
||||||
} else if (address >= PAGE_OFFSET) {
|
} else if (address >= PAGE_OFFSET) {
|
||||||
/* We only tolerate kernel-space reads of this task's stack */
|
/* We only tolerate kernel-space reads of this task's stack */
|
||||||
|
@ -132,7 +142,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (EX1_PL(p->ex1) == KERNEL_PL &&
|
if (EX1_PL(p->ex1) == KERNEL_PL &&
|
||||||
in_kernel_text(p->pc) &&
|
__kernel_text_address(p->pc) &&
|
||||||
in_kernel_stack(kbt, p->sp) &&
|
in_kernel_stack(kbt, p->sp) &&
|
||||||
p->sp >= sp) {
|
p->sp >= sp) {
|
||||||
if (kbt->verbose)
|
if (kbt->verbose)
|
||||||
|
|
Loading…
Reference in a new issue