s390/mm: Fix memory hotplug for unaligned standby memory
Commit 27356f54c8
("mm/hotplug: verify hotplug memory range")
introduced a check that makes add_memory() only accept section size
aligned memory.
Therefore on z/VM systems, where standby memory is not aligned, no
standby memory is registered at all.
Example:
#cp def store 3504M standby 2336M
00: CP Q V STORE
00: STORAGE = 3504M MAX = 6G INC = 8M STANDBY = 2336M RESERVED = 0
For this setup the following error message is printed:
Section-unaligned hotplug range: start 0xdb000000, size 0x92000000
So fix this and register aligned memory in "sclp_cmd.c". This means
that for the corner cases where the standby memory is not aligned we
loose some memory.
In order to inform the user about the potential loss of standby memory,
we add a new message for each added standby block and print how
much of the standby memory is usable, for example:
sclp_cmd.4336b4: Standby memory at 0x50000000 (256M of 256M usable)
sclp_cmd.4336b4: Standby memory at 0xb0000000 (256M of 256M usable)
sclp_cmd.4336b4: Standby memory at 0xdb000000 (2048M of 2336M usable)
We also ensure that a potential memory block that contains both "assigned"
and "standby" memory cannot be setup offline.
Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
0546231057
commit
8741ce6d11
1 changed files with 46 additions and 2 deletions
|
@ -315,10 +315,29 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
|
|||
rc |= sclp_assign_storage(incr->rn);
|
||||
else
|
||||
sclp_unassign_storage(incr->rn);
|
||||
if (rc == 0)
|
||||
incr->standby = online ? 0 : 1;
|
||||
}
|
||||
return rc ? -EIO : 0;
|
||||
}
|
||||
|
||||
static bool contains_standby_increment(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct memory_increment *incr;
|
||||
unsigned long istart;
|
||||
|
||||
list_for_each_entry(incr, &sclp_mem_list, list) {
|
||||
istart = rn2addr(incr->rn);
|
||||
if (end - 1 < istart)
|
||||
continue;
|
||||
if (start > istart + sclp_rzm - 1)
|
||||
continue;
|
||||
if (incr->standby)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int sclp_mem_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
|
@ -334,8 +353,16 @@ static int sclp_mem_notifier(struct notifier_block *nb,
|
|||
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
|
||||
sclp_attach_storage(id);
|
||||
switch (action) {
|
||||
case MEM_ONLINE:
|
||||
case MEM_GOING_OFFLINE:
|
||||
/*
|
||||
* We do not allow to set memory blocks offline that contain
|
||||
* standby memory. This is done to simplify the "memory online"
|
||||
* case.
|
||||
*/
|
||||
if (contains_standby_increment(start, start + size))
|
||||
rc = -EPERM;
|
||||
break;
|
||||
case MEM_ONLINE:
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
break;
|
||||
case MEM_GOING_ONLINE:
|
||||
|
@ -361,6 +388,21 @@ static struct notifier_block sclp_mem_nb = {
|
|||
.notifier_call = sclp_mem_notifier,
|
||||
};
|
||||
|
||||
static void __init align_to_block_size(unsigned long long *start,
|
||||
unsigned long long *size)
|
||||
{
|
||||
unsigned long long start_align, size_align, alignment;
|
||||
|
||||
alignment = memory_block_size_bytes();
|
||||
start_align = roundup(*start, alignment);
|
||||
size_align = rounddown(*start + *size, alignment) - start_align;
|
||||
|
||||
pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
|
||||
*start, size_align >> 20, *size >> 20);
|
||||
*start = start_align;
|
||||
*size = size_align;
|
||||
}
|
||||
|
||||
static void __init add_memory_merged(u16 rn)
|
||||
{
|
||||
static u16 first_rn, num;
|
||||
|
@ -382,7 +424,9 @@ static void __init add_memory_merged(u16 rn)
|
|||
goto skip_add;
|
||||
if (memory_end_set && (start + size > memory_end))
|
||||
size = memory_end - start;
|
||||
add_memory(0, start, size);
|
||||
align_to_block_size(&start, &size);
|
||||
if (size)
|
||||
add_memory(0, start, size);
|
||||
skip_add:
|
||||
first_rn = rn;
|
||||
num = 1;
|
||||
|
|
Loading…
Reference in a new issue