summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2014-04-24 13:46:24 +0200
committerAlexander Graf <agraf@suse.de>2014-05-30 14:26:21 +0200
commit5deb8e7ad8ac7e3fcdfa042acff617f461b361c2 (patch)
treeccfc251ffbadfa4297aefcfe17dee807eba7ce73 /arch/powerpc/kvm/book3s.c
parent2743103f91e20d4c4d4f1a8d00821289c4c6ff62 (diff)
KVM: PPC: Make shared struct aka magic page guest endian
The shared (magic) page is a data structure that contains often used supervisor privileged SPRs accessible via memory to the user to reduce the number of exits we have to take to read/write them. When we actually share this structure with the guest we have to maintain it in guest endianness, because some of the patch tricks only work with native endian load/store operations. Since we only share the structure with either host or guest in little endian on book3s_64 pr mode, we don't have to worry about booke or book3s hv. For booke, the shared struct stays big endian. For book3s_64 hv we maintain the struct in host native endian, since it never gets shared with the guest. For book3s_64 pr we introduce a variable that tells us which endianness the shared struct is in and route every access to it through helper inline functions that evaluate this variable. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r--arch/powerpc/kvm/book3s.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 94e597e6f15c..81abc5cef0fe 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
if (is_kvmppc_hv_enabled(vcpu->kvm))
return;
if (pending_now)
- vcpu->arch.shared->int_pending = 1;
+ kvmppc_set_int_pending(vcpu, 1);
else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
+ kvmppc_set_int_pending(vcpu, 0);
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
@@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
if (is_kvmppc_hv_enabled(vcpu->kvm))
return false;
- crit_raw = vcpu->arch.shared->critical;
+ crit_raw = kvmppc_get_critical(vcpu);
crit_r1 = kvmppc_get_gpr(vcpu, 1);
/* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
crit_raw &= 0xffffffff;
crit_r1 &= 0xffffffff;
}
@@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
/* Critical section when crit == r1 */
crit = (crit_raw == crit_r1);
/* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+ crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
return crit;
}
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
- vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
- vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
+ kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
+ kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -225,12 +225,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
- deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
+ deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
- deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
+ deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -343,7 +343,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
{
ulong mp_pa = vcpu->arch.magic_page_pa;
- if (!(vcpu->arch.shared->msr & MSR_SF))
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF))
mp_pa = (uint32_t)mp_pa;
/* Magic page override */
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
bool iswrite, struct kvmppc_pte *pte)
{
- int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
+ int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
int r;
if (relocated) {
@@ -498,18 +498,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->ctr = kvmppc_get_ctr(vcpu);
regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu);
- regs->msr = vcpu->arch.shared->msr;
- regs->srr0 = vcpu->arch.shared->srr0;
- regs->srr1 = vcpu->arch.shared->srr1;
+ regs->msr = kvmppc_get_msr(vcpu);
+ regs->srr0 = kvmppc_get_srr0(vcpu);
+ regs->srr1 = kvmppc_get_srr1(vcpu);
regs->pid = vcpu->arch.pid;
- regs->sprg0 = vcpu->arch.shared->sprg0;
- regs->sprg1 = vcpu->arch.shared->sprg1;
- regs->sprg2 = vcpu->arch.shared->sprg2;
- regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.shared->sprg4;
- regs->sprg5 = vcpu->arch.shared->sprg5;
- regs->sprg6 = vcpu->arch.shared->sprg6;
- regs->sprg7 = vcpu->arch.shared->sprg7;
+ regs->sprg0 = kvmppc_get_sprg0(vcpu);
+ regs->sprg1 = kvmppc_get_sprg1(vcpu);
+ regs->sprg2 = kvmppc_get_sprg2(vcpu);
+ regs->sprg3 = kvmppc_get_sprg3(vcpu);
+ regs->sprg4 = kvmppc_get_sprg4(vcpu);
+ regs->sprg5 = kvmppc_get_sprg5(vcpu);
+ regs->sprg6 = kvmppc_get_sprg6(vcpu);
+ regs->sprg7 = kvmppc_get_sprg7(vcpu);
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -527,16 +527,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvmppc_set_lr(vcpu, regs->lr);
kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
- vcpu->arch.shared->srr0 = regs->srr0;
- vcpu->arch.shared->srr1 = regs->srr1;
- vcpu->arch.shared->sprg0 = regs->sprg0;
- vcpu->arch.shared->sprg1 = regs->sprg1;
- vcpu->arch.shared->sprg2 = regs->sprg2;
- vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.shared->sprg4 = regs->sprg4;
- vcpu->arch.shared->sprg5 = regs->sprg5;
- vcpu->arch.shared->sprg6 = regs->sprg6;
- vcpu->arch.shared->sprg7 = regs->sprg7;
+ kvmppc_set_srr0(vcpu, regs->srr0);
+ kvmppc_set_srr1(vcpu, regs->srr1);
+ kvmppc_set_sprg0(vcpu, regs->sprg0);
+ kvmppc_set_sprg1(vcpu, regs->sprg1);
+ kvmppc_set_sprg2(vcpu, regs->sprg2);
+ kvmppc_set_sprg3(vcpu, regs->sprg3);
+ kvmppc_set_sprg4(vcpu, regs->sprg4);
+ kvmppc_set_sprg5(vcpu, regs->sprg5);
+ kvmppc_set_sprg6(vcpu, regs->sprg6);
+ kvmppc_set_sprg7(vcpu, regs->sprg7);
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -570,10 +570,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = 0;
switch (reg->id) {
case KVM_REG_PPC_DAR:
- val = get_reg_val(reg->id, vcpu->arch.shared->dar);
+ val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
- val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
+ val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
@@ -660,10 +660,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = 0;
switch (reg->id) {
case KVM_REG_PPC_DAR:
- vcpu->arch.shared->dar = set_reg_val(reg->id, val);
+ kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_DSISR:
- vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
+ kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;