ACPI / APEI: Switch estatus pool to use vmalloc memory

[ Upstream commit 0ac234be1a9497498e57d958f4251f5257b116b4 ]

The ghes code is careful to parse and round firmware's advertised
memory requirements for CPER records, up to a maximum of 64K.
However when ghes_estatus_pool_expand() does its work, it splits
the requested size into PAGE_SIZE granules.

This means if firmware generates 5K of CPER records, and correctly
describes this in the table, __process_error() will silently fail as it
is unable to allocate more than PAGE_SIZE.

Switch the estatus pool to vmalloc() memory. On x86 vmalloc() memory
may fault and be fixed up by vmalloc_fault(). To prevent this call
vmalloc_sync_all() before an NMI handler could discover the memory.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
James Morse 2019-01-29 18:48:39 +00:00 committed by Greg Kroah-Hartman
parent 07575de062
commit 81ae6e6bfa

View file

@ -170,40 +170,40 @@ static int ghes_estatus_pool_init(void)
return 0; return 0;
} }
static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, static void ghes_estatus_pool_free_chunk(struct gen_pool *pool,
struct gen_pool_chunk *chunk, struct gen_pool_chunk *chunk,
void *data) void *data)
{ {
free_page(chunk->start_addr); vfree((void *)chunk->start_addr);
} }
static void ghes_estatus_pool_exit(void) static void ghes_estatus_pool_exit(void)
{ {
gen_pool_for_each_chunk(ghes_estatus_pool, gen_pool_for_each_chunk(ghes_estatus_pool,
ghes_estatus_pool_free_chunk_page, NULL); ghes_estatus_pool_free_chunk, NULL);
gen_pool_destroy(ghes_estatus_pool); gen_pool_destroy(ghes_estatus_pool);
} }
static int ghes_estatus_pool_expand(unsigned long len) static int ghes_estatus_pool_expand(unsigned long len)
{ {
unsigned long i, pages, size, addr; unsigned long size, addr;
int ret;
ghes_estatus_pool_size_request += PAGE_ALIGN(len); ghes_estatus_pool_size_request += PAGE_ALIGN(len);
size = gen_pool_size(ghes_estatus_pool); size = gen_pool_size(ghes_estatus_pool);
if (size >= ghes_estatus_pool_size_request) if (size >= ghes_estatus_pool_size_request)
return 0; return 0;
pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
for (i = 0; i < pages; i++) {
addr = __get_free_page(GFP_KERNEL);
if (!addr)
return -ENOMEM;
ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
if (ret)
return ret;
}
return 0; addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
if (!addr)
return -ENOMEM;
/*
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_all();
return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
} }
static int map_gen_v2(struct ghes *ghes) static int map_gen_v2(struct ghes *ghes)