bb817bef3b
Wire up the existing arm64 support for SMBIOS tables (aka DMI) for ARM as well, by moving the arm64 init code to drivers/firmware/efi/arm-runtime.c (which is shared between ARM and arm64), and adding a asm/dmi.h header to ARM that defines the mapping routines for the firmware tables. This allows userspace to access these tables to discover system information exposed by the firmware. It also sets the hardware name used in crash dumps, e.g.: Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = ed3c0000 [00000000] *pgd=bf1f3835 Internal error: Oops: 817 [#1] SMP THUMB2 Modules linked in: CPU: 0 PID: 759 Comm: bash Not tainted 4.10.0-09601-g0e8f38792120-dirty #112 Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 ^^^ NOTE: This does *NOT* enable or encourage the use of DMI quirks, i.e., the the practice of identifying the platform via DMI to decide whether certain workarounds for buggy hardware and/or firmware need to be enabled. This would require the DMI subsystem to be enabled much earlier than we do on ARM, which is non-trivial. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20170602135207.21708-14-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
126 lines
3.7 KiB
C
126 lines
3.7 KiB
C
/*
|
|
* Extensible Firmware Interface
|
|
*
|
|
* Based on Extensible Firmware Interface Specification version 2.4
|
|
*
|
|
* Copyright (C) 2013, 2014 Linaro Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
*/
|
|
|
|
#include <linux/efi.h>
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/efi.h>
|
|
|
|
/*
|
|
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
|
|
* executable, everything else can be mapped with the XN bits
|
|
* set. Also take the new (optional) RO/XP bits into account.
|
|
*/
|
|
static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
|
|
{
|
|
u64 attr = md->attribute;
|
|
u32 type = md->type;
|
|
|
|
if (type == EFI_MEMORY_MAPPED_IO)
|
|
return PROT_DEVICE_nGnRE;
|
|
|
|
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
|
|
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
|
|
/*
|
|
* If the region is not aligned to the page size of the OS, we
|
|
* can not use strict permissions, since that would also affect
|
|
* the mapping attributes of the adjacent regions.
|
|
*/
|
|
return pgprot_val(PAGE_KERNEL_EXEC);
|
|
|
|
/* R-- */
|
|
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
|
|
(EFI_MEMORY_XP | EFI_MEMORY_RO))
|
|
return pgprot_val(PAGE_KERNEL_RO);
|
|
|
|
/* R-X */
|
|
if (attr & EFI_MEMORY_RO)
|
|
return pgprot_val(PAGE_KERNEL_ROX);
|
|
|
|
/* RW- */
|
|
if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
|
|
return pgprot_val(PAGE_KERNEL);
|
|
|
|
/* RWX */
|
|
return pgprot_val(PAGE_KERNEL_EXEC);
|
|
}
|
|
|
|
/* we will fill this structure from the stub, so don't put it in .bss */
|
|
struct screen_info screen_info __section(.data);
|
|
|
|
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
|
{
|
|
pteval_t prot_val = create_mapping_protection(md);
|
|
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
|
md->type == EFI_RUNTIME_SERVICES_DATA);
|
|
|
|
if (!PAGE_ALIGNED(md->phys_addr) ||
|
|
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
|
|
/*
|
|
* If the end address of this region is not aligned to page
|
|
* size, the mapping is rounded up, and may end up sharing a
|
|
* page frame with the next UEFI memory region. If we create
|
|
* a block entry now, we may need to split it again when mapping
|
|
* the next region, and support for that is going to be removed
|
|
* from the MMU routines. So avoid block mappings altogether in
|
|
* that case.
|
|
*/
|
|
page_mappings_only = true;
|
|
}
|
|
|
|
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
|
|
md->num_pages << EFI_PAGE_SHIFT,
|
|
__pgprot(prot_val | PTE_NG), page_mappings_only);
|
|
return 0;
|
|
}
|
|
|
|
static int __init set_permissions(pte_t *ptep, pgtable_t token,
|
|
unsigned long addr, void *data)
|
|
{
|
|
efi_memory_desc_t *md = data;
|
|
pte_t pte = *ptep;
|
|
|
|
if (md->attribute & EFI_MEMORY_RO)
|
|
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
|
if (md->attribute & EFI_MEMORY_XP)
|
|
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
|
|
set_pte(ptep, pte);
|
|
return 0;
|
|
}
|
|
|
|
int __init efi_set_mapping_permissions(struct mm_struct *mm,
|
|
efi_memory_desc_t *md)
|
|
{
|
|
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
|
|
md->type != EFI_RUNTIME_SERVICES_DATA);
|
|
|
|
/*
|
|
* Calling apply_to_page_range() is only safe on regions that are
|
|
* guaranteed to be mapped down to pages. Since we are only called
|
|
* for regions that have been mapped using efi_create_mapping() above
|
|
* (and this is checked by the generic Memory Attributes table parsing
|
|
* routines), there is no need to check that again here.
|
|
*/
|
|
return apply_to_page_range(mm, md->virt_addr,
|
|
md->num_pages << EFI_PAGE_SHIFT,
|
|
set_permissions, md);
|
|
}
|
|
|
|
/*
|
|
* UpdateCapsule() depends on the system being shutdown via
|
|
* ResetSystem().
|
|
*/
|
|
bool efi_poweroff_required(void)
|
|
{
|
|
return efi_enabled(EFI_RUNTIME_SERVICES);
|
|
}
|