s390/kvm: remove explicit -EFAULT return code checking on guest access

Let's change to the paradigm that every return code from guest memory
access functions that is not zero translates to -EFAULT and do not
explictly compare.
Explictly comparing the return value with -EFAULT has already shown to
be a bit fragile. In addition this is closer to the handling of
copy_to/from_user functions, which imho is in general a good idea.

Also shorten the return code handling in interrupt.c a bit.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Heiko Carstens 2013-03-05 13:14:43 +01:00 committed by Marcelo Tosatti
parent 59a1fa2d80
commit dc5008b9bf
3 changed files with 74 additions and 177 deletions

View file

@ -45,7 +45,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
do { do {
rc = get_guest_u64(vcpu, useraddr, rc = get_guest_u64(vcpu, useraddr,
&vcpu->arch.sie_block->gcr[reg]); &vcpu->arch.sie_block->gcr[reg]);
if (rc == -EFAULT) { if (rc) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break; break;
} }
@ -79,7 +79,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
reg = reg1; reg = reg1;
do { do {
rc = get_guest_u32(vcpu, useraddr, &val); rc = get_guest_u32(vcpu, useraddr, &val);
if (rc == -EFAULT) { if (rc) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break; break;
} }

View file

@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti) struct kvm_s390_interrupt_info *inti)
{ {
const unsigned short table[] = { 2, 4, 4, 6 }; const unsigned short table[] = { 2, 4, 4, 6 };
int rc, exception = 0; int rc = 0;
switch (inti->type) { switch (inti->type) {
case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_EMERGENCY:
@ -188,74 +188,38 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_emergency_signal++; vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->emerg.code, 0); inti->emerg.code, 0);
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
if (rc == -EFAULT) rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
exception = 1; rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
if (rc == -EFAULT) __LC_EXT_NEW_PSW, sizeof(psw_t));
exception = 1;
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_INT_EXTERNAL_CALL: case KVM_S390_INT_EXTERNAL_CALL:
VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
vcpu->stat.deliver_external_call++; vcpu->stat.deliver_external_call++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->extcall.code, 0); inti->extcall.code, 0);
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
if (rc == -EFAULT) rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
exception = 1; rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
if (rc == -EFAULT) __LC_EXT_NEW_PSW, sizeof(psw_t));
exception = 1;
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_INT_SERVICE: case KVM_S390_INT_SERVICE:
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
inti->ext.ext_params); inti->ext.ext_params);
vcpu->stat.deliver_service_signal++; vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->ext.ext_params, 0); inti->ext.ext_params, 0);
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
if (rc == -EFAULT) rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
exception = 1; &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, __LC_EXT_NEW_PSW, sizeof(psw_t));
&vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_INT_VIRTIO: case KVM_S390_INT_VIRTIO:
VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
inti->ext.ext_params, inti->ext.ext_params2); inti->ext.ext_params, inti->ext.ext_params2);
@ -263,34 +227,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->ext.ext_params, inti->ext.ext_params,
inti->ext.ext_params2); inti->ext.ext_params2);
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
if (rc == -EFAULT) rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
exception = 1; rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
if (rc == -EFAULT) __LC_EXT_NEW_PSW, sizeof(psw_t));
exception = 1; rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
rc |= put_guest_u64(vcpu, __LC_EXT_PARAMS2,
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, inti->ext.ext_params2);
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
if (rc == -EFAULT)
exception = 1;
rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
inti->ext.ext_params2);
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_SIGP_STOP: case KVM_S390_SIGP_STOP:
VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
vcpu->stat.deliver_stop_signal++; vcpu->stat.deliver_stop_signal++;
@ -313,18 +259,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_restart_signal++; vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
0, 0); 0, 0);
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, rc = copy_to_guest(vcpu,
restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); offsetof(struct _lowcore, restart_old_psw),
if (rc == -EFAULT) &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
exception = 1; rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
offsetof(struct _lowcore, restart_psw),
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
break; break;
case KVM_S390_PROGRAM_INT: case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
inti->pgm.code, inti->pgm.code,
@ -332,24 +274,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_program_int++; vcpu->stat.deliver_program_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->pgm.code, 0); inti->pgm.code, 0);
rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
if (rc == -EFAULT) rc |= put_guest_u16(vcpu, __LC_PGM_ILC,
exception = 1; table[vcpu->arch.sie_block->ipa >> 14]);
rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
rc = put_guest_u16(vcpu, __LC_PGM_ILC, &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
table[vcpu->arch.sie_block->ipa >> 14]); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
if (rc == -EFAULT) __LC_PGM_NEW_PSW, sizeof(psw_t));
exception = 1;
rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_PGM_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_MCHK: case KVM_S390_MCHK:
@ -358,24 +289,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
inti->mchk.cr14, inti->mchk.cr14,
inti->mchk.mcic); inti->mchk.mcic);
rc = kvm_s390_vcpu_store_status(vcpu, rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_PREFIXED); KVM_S390_STORE_STATUS_PREFIXED);
if (rc == -EFAULT) rc |= put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
exception = 1; rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
if (rc == -EFAULT) __LC_MCK_NEW_PSW, sizeof(psw_t));
exception = 1;
rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_MCK_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@ -388,67 +308,44 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
vcpu->stat.deliver_io_int++; vcpu->stat.deliver_io_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
param0, param1); param0, param1);
rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
inti->io.subchannel_id); inti->io.subchannel_id);
if (rc == -EFAULT) rc |= put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
exception = 1; inti->io.subchannel_nr);
rc |= put_guest_u32(vcpu, __LC_IO_INT_PARM,
rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, inti->io.io_int_parm);
inti->io.subchannel_nr); rc |= put_guest_u32(vcpu, __LC_IO_INT_WORD,
if (rc == -EFAULT) inti->io.io_int_word);
exception = 1; rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
inti->io.io_int_parm); __LC_IO_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
inti->io.io_int_word);
if (rc == -EFAULT)
exception = 1;
rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_IO_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
break; break;
} }
default: default:
BUG(); BUG();
} }
if (exception) { if (rc) {
printk("kvm: The guest lowcore is not mapped during interrupt " printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n"); "delivery, killing userspace\n");
do_exit(SIGKILL); do_exit(SIGKILL);
} }
} }
static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{ {
int rc, exception = 0; int rc;
if (psw_extint_disabled(vcpu)) if (psw_extint_disabled(vcpu))
return 0; return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
return 0; return 0;
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
if (rc == -EFAULT) rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
exception = 1; &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t)); __LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT) if (rc) {
exception = 1;
rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
if (exception) {
printk("kvm: The guest lowcore is not mapped during interrupt " printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n"); "delivery, killing userspace\n");
do_exit(SIGKILL); do_exit(SIGKILL);

View file

@ -108,7 +108,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
} }
rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
if (rc == -EFAULT) { if (rc) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out; goto out;
} }
@ -230,7 +230,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list)); &facility_list, sizeof(facility_list));
if (rc == -EFAULT) if (rc)
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
else { else {
VCPU_EVENT(vcpu, 5, "store facility list value %x", VCPU_EVENT(vcpu, 5, "store facility list value %x",
@ -348,7 +348,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
} }
rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
if (rc == -EFAULT) { if (rc) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out; goto out;
} }