s390/kdump: fix nosmt kernel parameter
It turned out that SIGP set-multi-threading can only be done once. Therefore switching to a different MT level after switching to sclp.mtid_prev in the dump case fails. As a symptom specifying the "nosmt" parameter currently fails for the kdump kernel and the kernel starts with multi-threading enabled. So fix this and issue diag 308 subcode 1 call after collecting the CPU states for the dump. Also enhance the diag308_reset() function to be usable also with enabled lowcore protection and prefix register != 0. After the reset it is possible to switch the MT level again. We have to do the reset very early in order not to kill the already initialized console. Therefore instead of kmalloc() the corresponding memblock functions have to be used. To avoid copying the sclp cpu code into sclp_early, we now use the simple sigp loop method for CPU detection. Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
1d1858d244
commit
1592a8e456
7 changed files with 64 additions and 61 deletions
|
@ -94,7 +94,6 @@ struct dump_save_areas {
|
|||
};
|
||||
|
||||
extern struct dump_save_areas dump_save_areas;
|
||||
struct save_area_ext *dump_save_area_create(int cpu);
|
||||
|
||||
extern void do_reipl(void);
|
||||
extern void do_halt(void);
|
||||
|
|
|
@ -29,6 +29,7 @@ extern void smp_call_ipl_cpu(void (*func)(void *), void *);
|
|||
|
||||
extern int smp_find_processor_id(u16 address);
|
||||
extern int smp_store_status(int cpu);
|
||||
extern void smp_save_dump_cpus(void);
|
||||
extern int smp_vcpu_scheduled(int cpu);
|
||||
extern void smp_yield_cpu(int cpu);
|
||||
extern void smp_cpu_set_polarization(int cpu, int val);
|
||||
|
|
|
@ -78,15 +78,20 @@ s390_base_pgm_handler_fn:
|
|||
#
|
||||
# Calls diag 308 subcode 1 and continues execution
|
||||
#
|
||||
# The following conditions must be ensured before calling this function:
|
||||
# * Prefix register = 0
|
||||
# * Lowcore protection is disabled
|
||||
#
|
||||
ENTRY(diag308_reset)
|
||||
larl %r4,.Lctlregs # Save control registers
|
||||
stctg %c0,%c15,0(%r4)
|
||||
lg %r2,0(%r4) # Disable lowcore protection
|
||||
nilh %r2,0xefff
|
||||
larl %r4,.Lctlreg0
|
||||
stg %r2,0(%r4)
|
||||
lctlg %c0,%c0,0(%r4)
|
||||
larl %r4,.Lfpctl # Floating point control register
|
||||
stfpc 0(%r4)
|
||||
larl %r4,.Lprefix # Save prefix register
|
||||
stpx 0(%r4)
|
||||
larl %r4,.Lprefix_zero # Set prefix register to 0
|
||||
spx 0(%r4)
|
||||
larl %r4,.Lcontinue_psw # Save PSW flags
|
||||
epsw %r2,%r3
|
||||
stm %r2,%r3,0(%r4)
|
||||
|
@ -106,6 +111,8 @@ ENTRY(diag308_reset)
|
|||
lctlg %c0,%c15,0(%r4)
|
||||
larl %r4,.Lfpctl # Restore floating point ctl register
|
||||
lfpc 0(%r4)
|
||||
larl %r4,.Lprefix # Restore prefix register
|
||||
spx 0(%r4)
|
||||
larl %r4,.Lcontinue_psw # Restore PSW flags
|
||||
lpswe 0(%r4)
|
||||
.Lcontinue:
|
||||
|
@ -122,10 +129,16 @@ ENTRY(diag308_reset)
|
|||
|
||||
.section .bss
|
||||
.align 8
|
||||
.Lctlreg0:
|
||||
.quad 0
|
||||
.Lctlregs:
|
||||
.rept 16
|
||||
.quad 0
|
||||
.endr
|
||||
.Lfpctl:
|
||||
.long 0
|
||||
.Lprefix:
|
||||
.long 0
|
||||
.Lprefix_zero:
|
||||
.long 0
|
||||
.previous
|
||||
|
|
|
@ -44,31 +44,6 @@ static struct memblock_type oldmem_type = {
|
|||
|
||||
struct dump_save_areas dump_save_areas;
|
||||
|
||||
/*
|
||||
* Allocate and add a save area for a CPU
|
||||
*/
|
||||
struct save_area_ext *dump_save_area_create(int cpu)
|
||||
{
|
||||
struct save_area_ext **save_areas, *save_area;
|
||||
|
||||
save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
|
||||
if (!save_area)
|
||||
return NULL;
|
||||
if (cpu + 1 > dump_save_areas.count) {
|
||||
dump_save_areas.count = cpu + 1;
|
||||
save_areas = krealloc(dump_save_areas.areas,
|
||||
dump_save_areas.count * sizeof(void *),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!save_areas) {
|
||||
kfree(save_area);
|
||||
return NULL;
|
||||
}
|
||||
dump_save_areas.areas = save_areas;
|
||||
}
|
||||
dump_save_areas.areas[cpu] = save_area;
|
||||
return save_area;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return physical address for virtual address
|
||||
*/
|
||||
|
|
|
@ -868,6 +868,11 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
check_initrd();
|
||||
reserve_crashkernel();
|
||||
/*
|
||||
* Be aware that smp_save_dump_cpus() triggers a system reset.
|
||||
* Therefore CPU and device initialization should be done afterwards.
|
||||
*/
|
||||
smp_save_dump_cpus();
|
||||
|
||||
setup_resources();
|
||||
setup_vmcoreinfo();
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/facility.h>
|
||||
|
@ -531,15 +532,12 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
|
|||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
|
||||
static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
|
||||
static void __smp_store_cpu_state(struct save_area_ext *sa_ext, u16 address,
|
||||
int is_boot_cpu)
|
||||
{
|
||||
void *lc = pcpu_devices[0].lowcore;
|
||||
struct save_area_ext *sa_ext;
|
||||
void *lc = (void *)(unsigned long) store_prefix();
|
||||
unsigned long vx_sa;
|
||||
|
||||
sa_ext = dump_save_area_create(cpu);
|
||||
if (!sa_ext)
|
||||
panic("could not allocate memory for save area\n");
|
||||
if (is_boot_cpu) {
|
||||
/* Copy the registers of the boot CPU. */
|
||||
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
|
||||
|
@ -554,12 +552,12 @@ static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
|
|||
if (!MACHINE_HAS_VX)
|
||||
return;
|
||||
/* Get the VX registers */
|
||||
vx_sa = __get_free_page(GFP_KERNEL);
|
||||
vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!vx_sa)
|
||||
panic("could not allocate memory for VX save area\n");
|
||||
__pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
|
||||
memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
|
||||
free_page(vx_sa);
|
||||
memblock_free(vx_sa, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -589,10 +587,11 @@ static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
|
|||
* old system. The ELF sections are picked up by the crash_dump code
|
||||
* via elfcorehdr_addr.
|
||||
*/
|
||||
static void __init smp_store_cpu_states(struct sclp_core_info *info)
|
||||
void __init smp_save_dump_cpus(void)
|
||||
{
|
||||
unsigned int cpu, address, i, j;
|
||||
int is_boot_cpu;
|
||||
int addr, cpu, boot_cpu_addr, max_cpu_addr;
|
||||
struct save_area_ext *sa_ext;
|
||||
bool is_boot_cpu;
|
||||
|
||||
if (is_kdump_kernel())
|
||||
/* Previous system stored the CPU states. Nothing to do. */
|
||||
|
@ -602,22 +601,34 @@ static void __init smp_store_cpu_states(struct sclp_core_info *info)
|
|||
return;
|
||||
/* Set multi-threading state to the previous system. */
|
||||
pcpu_set_smt(sclp.mtid_prev);
|
||||
/* Collect CPU states. */
|
||||
cpu = 0;
|
||||
for (i = 0; i < info->configured; i++) {
|
||||
/* Skip CPUs with different CPU type. */
|
||||
if (sclp.has_core_type && info->core[i].type != boot_core_type)
|
||||
max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
|
||||
for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
|
||||
if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
|
||||
SIGP_CC_NOT_OPERATIONAL)
|
||||
continue;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
|
||||
address = (info->core[i].core_id << smp_cpu_mt_shift) + j;
|
||||
is_boot_cpu = (address == pcpu_devices[0].address);
|
||||
if (is_boot_cpu && !OLDMEM_BASE)
|
||||
/* Skip boot CPU for standard zfcp dump. */
|
||||
continue;
|
||||
/* Get state for this CPu. */
|
||||
__smp_store_cpu_state(cpu, address, is_boot_cpu);
|
||||
}
|
||||
cpu += 1;
|
||||
}
|
||||
dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8);
|
||||
dump_save_areas.count = cpu;
|
||||
boot_cpu_addr = stap();
|
||||
for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
|
||||
if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
|
||||
SIGP_CC_NOT_OPERATIONAL)
|
||||
continue;
|
||||
sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8);
|
||||
dump_save_areas.areas[cpu] = sa_ext;
|
||||
if (!sa_ext)
|
||||
panic("could not allocate memory for save area\n");
|
||||
is_boot_cpu = (addr == boot_cpu_addr);
|
||||
cpu += 1;
|
||||
if (is_boot_cpu && !OLDMEM_BASE)
|
||||
/* Skip boot CPU for standard zfcp dump. */
|
||||
continue;
|
||||
/* Get state for this CPU. */
|
||||
__smp_store_cpu_state(sa_ext, addr, is_boot_cpu);
|
||||
}
|
||||
diag308_reset();
|
||||
pcpu_set_smt(0);
|
||||
}
|
||||
|
||||
int smp_store_status(int cpu)
|
||||
|
@ -637,6 +648,10 @@ int smp_store_status(int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
void smp_save_dump_cpus(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
void smp_cpu_set_polarization(int cpu, int val)
|
||||
|
@ -735,11 +750,6 @@ static void __init smp_detect_cpus(void)
|
|||
panic("Could not find boot CPU type");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/* Collect CPU state of previous system */
|
||||
smp_store_cpu_states(info);
|
||||
#endif
|
||||
|
||||
/* Set multi-threading state for the current system */
|
||||
mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
|
||||
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
|
||||
|
|
|
@ -154,7 +154,7 @@ static int __init init_cpu_info(enum arch_id arch)
|
|||
|
||||
/* get info for boot cpu from lowcore, stored in the HSA */
|
||||
|
||||
sa_ext = dump_save_area_create(0);
|
||||
sa_ext = dump_save_areas.areas[0];
|
||||
if (!sa_ext)
|
||||
return -ENOMEM;
|
||||
if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
|
||||
|
|
Loading…
Reference in a new issue