KVM: PPC: Book3S HV: Translate kvmhv_commence_exit to C
This replaces the assembler code for kvmhv_commence_exit() with C code in book3s_hv_builtin.c. It also moves the IPI sending code that was in book3s_hv_rm_xics.c into a new kvmhv_rm_send_ipi() function so it can be used by kvmhv_commence_exit() as well as icp_rm_set_vcpu_irq(). Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
6af27c847a
commit
eddb60fb14
4 changed files with 75 additions and 68 deletions
|
@ -438,6 +438,8 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
|
||||||
|
|
||||||
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
|
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
|
||||||
|
|
||||||
|
extern void kvmhv_rm_send_ipi(int cpu);
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||||
|
|
||||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
#include <asm/archrandom.h>
|
#include <asm/archrandom.h>
|
||||||
|
#include <asm/xics.h>
|
||||||
|
|
||||||
#define KVM_CMA_CHUNK_ORDER 18
|
#define KVM_CMA_CHUNK_ORDER 18
|
||||||
|
|
||||||
|
@ -184,3 +185,65 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void rm_writeb(unsigned long paddr, u8 val)
|
||||||
|
{
|
||||||
|
__asm__ __volatile__("stbcix %0,0,%1"
|
||||||
|
: : "r" (val), "r" (paddr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send an interrupt to another CPU.
|
||||||
|
* This can only be called in real mode.
|
||||||
|
* The caller needs to include any barrier needed to order writes
|
||||||
|
* to memory vs. the IPI/message.
|
||||||
|
*/
|
||||||
|
void kvmhv_rm_send_ipi(int cpu)
|
||||||
|
{
|
||||||
|
unsigned long xics_phys;
|
||||||
|
|
||||||
|
/* Poke the target */
|
||||||
|
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
||||||
|
rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following functions are called from the assembly code
|
||||||
|
* in book3s_hv_rmhandlers.S.
|
||||||
|
*/
|
||||||
|
static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
|
||||||
|
{
|
||||||
|
int cpu = vc->pcpu;
|
||||||
|
|
||||||
|
/* Order setting of exit map vs. msgsnd/IPI */
|
||||||
|
smp_mb();
|
||||||
|
for (; active; active >>= 1, ++cpu)
|
||||||
|
if (active & 1)
|
||||||
|
kvmhv_rm_send_ipi(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmhv_commence_exit(int trap)
|
||||||
|
{
|
||||||
|
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
|
||||||
|
int ptid = local_paca->kvm_hstate.ptid;
|
||||||
|
int me, ee;
|
||||||
|
|
||||||
|
/* Set our bit in the threads-exiting-guest map in the 0xff00
|
||||||
|
bits of vcore->entry_exit_map */
|
||||||
|
me = 0x100 << ptid;
|
||||||
|
do {
|
||||||
|
ee = vc->entry_exit_map;
|
||||||
|
} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
|
||||||
|
|
||||||
|
/* Are we the first here? */
|
||||||
|
if ((ee >> 8) != 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trigger the other threads in this vcore to exit the guest.
|
||||||
|
* If this is a hypervisor decrementer interrupt then they
|
||||||
|
* will be already on their way out of the guest.
|
||||||
|
*/
|
||||||
|
if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
|
||||||
|
kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
|
||||||
|
}
|
||||||
|
|
|
@ -26,12 +26,6 @@
|
||||||
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
|
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
|
||||||
u32 new_irq);
|
u32 new_irq);
|
||||||
|
|
||||||
static inline void rm_writeb(unsigned long paddr, u8 val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__("sync; stbcix %0,0,%1"
|
|
||||||
: : "r" (val), "r" (paddr) : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* -- ICS routines -- */
|
/* -- ICS routines -- */
|
||||||
static void ics_rm_check_resend(struct kvmppc_xics *xics,
|
static void ics_rm_check_resend(struct kvmppc_xics *xics,
|
||||||
struct kvmppc_ics *ics, struct kvmppc_icp *icp)
|
struct kvmppc_ics *ics, struct kvmppc_icp *icp)
|
||||||
|
@ -60,7 +54,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu *this_vcpu)
|
struct kvm_vcpu *this_vcpu)
|
||||||
{
|
{
|
||||||
struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
|
struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
|
||||||
unsigned long xics_phys;
|
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
/* Mark the target VCPU as having an interrupt pending */
|
/* Mark the target VCPU as having an interrupt pending */
|
||||||
|
@ -83,9 +76,8 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
|
||||||
/* In SMT cpu will always point to thread 0, we adjust it */
|
/* In SMT cpu will always point to thread 0, we adjust it */
|
||||||
cpu += vcpu->arch.ptid;
|
cpu += vcpu->arch.ptid;
|
||||||
|
|
||||||
/* Not too hard, then poke the target */
|
smp_mb();
|
||||||
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
kvmhv_rm_send_ipi(cpu);
|
||||||
rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
|
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -264,7 +264,11 @@ kvm_novcpu_exit:
|
||||||
addi r3, r4, VCPU_TB_RMEXIT
|
addi r3, r4, VCPU_TB_RMEXIT
|
||||||
bl kvmhv_accumulate_time
|
bl kvmhv_accumulate_time
|
||||||
#endif
|
#endif
|
||||||
13: bl kvmhv_commence_exit
|
13: mr r3, r12
|
||||||
|
stw r12, 112-4(r1)
|
||||||
|
bl kvmhv_commence_exit
|
||||||
|
nop
|
||||||
|
lwz r12, 112-4(r1)
|
||||||
b kvmhv_switch_to_host
|
b kvmhv_switch_to_host
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1161,6 +1165,9 @@ mc_cont:
|
||||||
|
|
||||||
/* Increment exit count, poke other threads to exit */
|
/* Increment exit count, poke other threads to exit */
|
||||||
bl kvmhv_commence_exit
|
bl kvmhv_commence_exit
|
||||||
|
nop
|
||||||
|
ld r9, HSTATE_KVM_VCPU(r13)
|
||||||
|
lwz r12, VCPU_TRAP(r9)
|
||||||
|
|
||||||
/* Save guest CTRL register, set runlatch to 1 */
|
/* Save guest CTRL register, set runlatch to 1 */
|
||||||
mfspr r6,SPRN_CTRLF
|
mfspr r6,SPRN_CTRLF
|
||||||
|
@ -1614,63 +1621,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||||
mtlr r0
|
mtlr r0
|
||||||
blr
|
blr
|
||||||
|
|
||||||
kvmhv_commence_exit: /* r12 = trap, r13 = paca, doesn't trash r9 */
|
|
||||||
mflr r0
|
|
||||||
std r0, PPC_LR_STKOFF(r1)
|
|
||||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
|
||||||
|
|
||||||
/* Set our bit in the threads-exiting-guest map in the 0xff00
|
|
||||||
bits of vcore->entry_exit_map */
|
|
||||||
ld r5, HSTATE_KVM_VCORE(r13)
|
|
||||||
lbz r4, HSTATE_PTID(r13)
|
|
||||||
li r7, 0x100
|
|
||||||
sld r7, r7, r4
|
|
||||||
addi r6, r5, VCORE_ENTRY_EXIT
|
|
||||||
41: lwarx r3, 0, r6
|
|
||||||
or r0, r3, r7
|
|
||||||
stwcx. r0, 0, r6
|
|
||||||
bne 41b
|
|
||||||
isync /* order stwcx. vs. reading napping_threads */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* At this point we have an interrupt that we have to pass
|
|
||||||
* up to the kernel or qemu; we can't handle it in real mode.
|
|
||||||
* Thus we have to do a partition switch, so we have to
|
|
||||||
* collect the other threads, if we are the first thread
|
|
||||||
* to take an interrupt. To do this, we send a message or
|
|
||||||
* IPI to all the threads that have their bit set in the entry
|
|
||||||
* map in vcore->entry_exit_map (other than ourselves).
|
|
||||||
* However, we don't need to bother if this is an HDEC
|
|
||||||
* interrupt, since the other threads will already be on their
|
|
||||||
* way here in that case.
|
|
||||||
*/
|
|
||||||
cmpwi r3,0x100 /* Are we the first here? */
|
|
||||||
bge 43f
|
|
||||||
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
|
|
||||||
beq 43f
|
|
||||||
|
|
||||||
srwi r0,r7,8
|
|
||||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
|
||||||
beq 43f
|
|
||||||
/* Order entry/exit update vs. IPIs */
|
|
||||||
sync
|
|
||||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
|
||||||
subf r6,r4,r13
|
|
||||||
42: andi. r0,r3,1
|
|
||||||
beq 44f
|
|
||||||
ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
|
|
||||||
li r0,IPI_PRIORITY
|
|
||||||
li r7,XICS_MFRR
|
|
||||||
stbcix r0,r7,r8 /* trigger the IPI */
|
|
||||||
44: srdi. r3,r3,1
|
|
||||||
addi r6,r6,PACA_SIZE
|
|
||||||
bne 42b
|
|
||||||
|
|
||||||
43: ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
|
|
||||||
addi r1, r1, PPC_MIN_STKFRM
|
|
||||||
mtlr r0
|
|
||||||
blr
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether an HDSI is an HPTE not found fault or something else.
|
* Check whether an HDSI is an HPTE not found fault or something else.
|
||||||
* If it is an HPTE not found fault that is due to the guest accessing
|
* If it is an HPTE not found fault that is due to the guest accessing
|
||||||
|
|
Loading…
Reference in a new issue