diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 10 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 27 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 33 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp-init.S | 25 |
4 files changed, 93 insertions, 2 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..018760675b56 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -270,6 +270,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return false; +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) { } + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 42a24d6b003b..69c2b4ce6160 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -35,6 +35,7 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; static pgd_t *boot_hyp_pgd; static pgd_t *hyp_pgd; +static pgd_t *merged_hyp_pgd; static DEFINE_MUTEX(kvm_hyp_pgd_mutex); static unsigned long hyp_idmap_start; @@ -434,6 +435,11 @@ void free_hyp_pgds(void) free_pages((unsigned long)hyp_pgd, hyp_pgd_order); hyp_pgd = NULL; } + if (merged_hyp_pgd) { + clear_page(merged_hyp_pgd); + free_page((unsigned long)merged_hyp_pgd); + merged_hyp_pgd = NULL; + } mutex_unlock(&kvm_hyp_pgd_mutex); } @@ -1473,12 +1479,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) phys_addr_t kvm_mmu_get_httbr(void) { - return virt_to_phys(hyp_pgd); + if (__kvm_cpu_uses_extended_idmap()) + return virt_to_phys(merged_hyp_pgd); + else + return virt_to_phys(hyp_pgd); } phys_addr_t kvm_mmu_get_boot_httbr(void) { - return virt_to_phys(boot_hyp_pgd); + if (__kvm_cpu_uses_extended_idmap()) + return virt_to_phys(merged_hyp_pgd); + else + return virt_to_phys(boot_hyp_pgd); } phys_addr_t kvm_get_idmap_vector(void) @@ -1521,6 +1533,17 @@ int kvm_mmu_init(void) goto out; } + if (__kvm_cpu_uses_extended_idmap()) { + merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!merged_hyp_pgd) { + kvm_err("Failed to allocate extra HYP pgd\n"); + goto out; + } + __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, + hyp_idmap_start); + return 0; + } + /* Map the very same page at the trampoline VA */ err = __create_hyp_mappings(boot_hyp_pgd, TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..edfe6864bc28 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -68,6 +68,8 @@ #include <asm/pgalloc.h> #include <asm/cachetype.h> #include <asm/cacheflush.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) @@ -305,5 +307,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return __cpu_uses_extended_idmap(); +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) +{ + int idmap_idx; + + /* + * Use the first entry to access the HYP mappings. It is + * guaranteed to be free, otherwise we wouldn't use an + * extended idmap. + */ + VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); + merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); + + /* + * Create another extended level entry that points to the boot HYP map, + * which contains an ID mapping of the HYP init code. We essentially + * merge the boot and runtime HYP maps by doing so, but they don't + * overlap anyway, so this is fine. + */ + idmap_idx = hyp_idmap_start >> VA_BITS; + VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); + merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c3191168a994..178ba2248a98 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -20,6 +20,7 @@ #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> +#include <asm/pgtable-hwdef.h> .text .pushsection .hyp.idmap.text, "ax" @@ -65,6 +66,25 @@ __do_hyp_init: and x4, x4, x5 ldr x5, =TCR_EL2_FLAGS orr x4, x4, x5 + +#ifndef CONFIG_ARM64_VA_BITS_48 + /* + * If we are running with VA_BITS < 48, we may be running with an extra + * level of translation in the ID map. This is only the case if system + * RAM is out of range for the currently configured page size and number + * of translation levels, in which case we will also need the extra + * level for the HYP ID map, or we won't be able to enable the EL2 MMU. + * + * However, at EL2, there is only one TTBR register, and we can't switch + * between translation tables *and* update TCR_EL2.T0SZ at the same + * time. Bottom line: we need the extra level in *both* our translation + * tables. + * + * So use the same T0SZ value we use for the ID map. + */ + ldr_l x5, idmap_t0sz + bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH +#endif msr tcr_el2, x4 ldr x4, =VTCR_EL2_FLAGS @@ -91,6 +111,10 @@ __do_hyp_init: msr sctlr_el2, x4 isb + /* Skip the trampoline dance if we merged the boot and runtime PGDs */ + cmp x0, x1 + b.eq merged + /* MMU is now enabled. Get ready for the trampoline dance */ ldr x4, =TRAMPOLINE_VA adr x5, target @@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */ tlbi alle2 dsb sy +merged: /* Set the stack and new vectors */ kern_hyp_va x2 mov sp, x2 |