From 37a34ac1d4775aafbc73b9db53c7daebbbc67e6a Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 22 Sep 2014 15:52:48 +0100 Subject: arm: kvm: fix CPU hotplug On some platforms with no power management capabilities, the hotplug implementation is allowed to return from a smp_ops.cpu_die() call as a function return. Upon a CPU onlining event, the KVM CPU notifier tries to reinstall the hyp stub, which fails on platform where no reset took place following a hotplug event, with the message: CPU1: smp_ops.cpu_die() returned, trying to resuscitate CPU1: Booted secondary processor Kernel panic - not syncing: unexpected prefetch abort in Hyp mode at: 0x80409540 unexpected data abort in Hyp mode at: 0x80401fe8 unexpected HVC/SVC trap in Hyp mode at: 0x805c6170 since KVM code is trying to reinstall the stub on a system where it is already configured. To prevent this issue, this patch adds a check in the KVM hotplug notifier that detects if the HYP stub really needs re-installing when a CPU is onlined and skips the installation call if the stub is already in place, which means that the CPU has not been reset. Signed-off-by: Vladimir Murzin Acked-by: Lorenzo Pieralisi Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/arm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 779605122f32..45e5f67e63b1 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -808,7 +808,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self, switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: - cpu_init_hyp_mode(NULL); + if (__hyp_get_vectors() == hyp_default_vectors) + cpu_init_hyp_mode(NULL); break; } -- cgit v1.2.3 From e9e8578b6cdfba0259e99e79f0cb80c55c99b3c2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 17 Sep 2014 14:56:16 -0700 Subject: arm/arm64: KVM: use __GFP_ZERO not memset() to get zeroed pages Pass __GFP_ZERO to __get_free_pages() instead of calling memset() explicitly. Acked-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index eea03069161b..8f0c7a4c49fc 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -528,11 +528,10 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) return -EINVAL; } - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); + pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); if (!pgd) return -ENOMEM; - memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; -- cgit v1.2.3 From 37b544087ef3f65ca68465ba39291a07195dac26 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 17 Sep 2014 14:56:17 -0700 Subject: arm/arm64: KVM: fix potential NULL dereference in user_mem_abort() Handle the potential NULL return value of find_vma_intersection() before dereferencing it. Acked-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 8f0c7a4c49fc..bb2e110e77c0 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -776,6 +776,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Let's check if we will get back a huge page backed by hugetlbfs */ down_read(¤t->mm->mmap_sem); vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_sem); + return -EFAULT; + } + if (is_vm_hugetlb_page(vma)) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; -- cgit v1.2.3 From c40f2f8ff833eddc02cb599ef6e5a162223449ba Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 17 Sep 2014 14:56:18 -0700 Subject: arm/arm64: KVM: add 'writable' parameter to kvm_phys_addr_ioremap Add support for read-only MMIO passthrough mappings by adding a 'writable' parameter to kvm_phys_addr_ioremap. For the moment, mappings will be read-write even if 'writable' is false, but once the definition of PAGE_S2_DEVICE gets changed, those mappings will be created read-only. Acked-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index bb2e110e77c0..a7eabd1c4287 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -674,7 +674,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, * @size: The size of the mapping */ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size) + phys_addr_t pa, unsigned long size, bool writable) { phys_addr_t addr, end; int ret = 0; @@ -687,6 +687,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); + if (writable) + kvm_set_s2pte_writable(&pte); + ret = mmu_topup_memory_cache(&cache, 2, 2); if (ret) goto out; -- cgit v1.2.3 From 8eef91239e57d2e932e7470879c9a504d5494ebb Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 10 Oct 2014 17:00:32 +0200 Subject: arm/arm64: KVM: map MMIO regions at creation time There is really no point in faulting in memory regions page by page if they are not backed by demand paged system RAM but by a linear passthrough mapping of a host MMIO region. So instead, detect such regions at setup time and install the mappings for the backing all at once. Acked-by: Marc Zyngier Reviewed-by: Christoffer Dall Signed-off-by: Ard Biesheuvel Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 69 insertions(+), 8 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index a7eabd1c4287..6038027ab1d6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1134,13 +1134,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, enum kvm_mr_change change) { - gpa_t gpa = old->base_gfn << PAGE_SHIFT; - phys_addr_t size = old->npages << PAGE_SHIFT; - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - spin_lock(&kvm->mmu_lock); - unmap_stage2_range(kvm, gpa, size); - spin_unlock(&kvm->mmu_lock); - } } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -1148,7 +1141,69 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { - return 0; + hva_t hva = mem->userspace_addr; + hva_t reg_end = hva + mem->memory_size; + bool writable = !(mem->flags & KVM_MEM_READONLY); + int ret = 0; + + if (change != KVM_MR_CREATE && change != KVM_MR_MOVE) + return 0; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we can map + * any of them right now. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Mapping a read-only VMA is only allowed if the + * memory region is configured as read-only. + */ + if (writable && !(vma->vm_flags & VM_WRITE)) { + ret = -EPERM; + break; + } + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (vma->vm_flags & VM_PFNMAP) { + gpa_t gpa = mem->guest_phys_addr + + (vm_start - mem->userspace_addr); + phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + + vm_start - vma->vm_start; + + ret = kvm_phys_addr_ioremap(kvm, gpa, pa, + vm_end - vm_start, + writable); + if (ret) + break; + } + hva = vm_end; + } while (hva < reg_end); + + if (ret) { + spin_lock(&kvm->mmu_lock); + unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); + spin_unlock(&kvm->mmu_lock); + } + return ret; } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, @@ -1173,4 +1228,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + unmap_stage2_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); } -- cgit v1.2.3 From 38f791a4e499792eeb2a3c0572dd5133511c5bbb Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 10 Oct 2014 12:14:28 +0200 Subject: arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2 This patch adds the necessary support for all host kernel PGSIZE and VA_SPACE configuration options for both EL2 and the Stage-2 page tables. However, for 40bit and 42bit PARange systems, the architecture mandates that VTCR_EL2.SL0 is maximum 1, resulting in fewer levels of stage-2 pagge tables than levels of host kernel page tables. At the same time, systems with a PARange > 42bit, we limit the IPA range by always setting VTCR_EL2.T0SZ to 24. To solve the situation with different levels of page tables for Stage-2 translation than the host kernel page tables, we allocate a dummy PGD with pointers to our actual inital level Stage-2 page table, in order for us to reuse the kernel pgtable manipulation primitives. Reproducing all these in KVM does not look pretty and unnecessarily complicates the 32-bit side. Systems with a PARange < 40bits are not yet supported. [ I have reworked this patch from its original form submitted by Jungseok to take the architecture constraints into consideration. There were too many changes from the original patch for me to preserve the authorship. Thanks to Catalin Marinas for his help in figuring out a good solution to this challenge. I have also fixed various bugs and missing error code handling from the original patch. - Christoffer ] Reviewed-by: Catalin Marinas Acked-by: Marc Zyngier Signed-off-by: Jungseok Lee Signed-off-by: Christoffer Dall --- arch/arm/kvm/arm.c | 2 +- arch/arm/kvm/mmu.c | 133 ++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 107 insertions(+), 28 deletions(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 45e5f67e63b1..9e193c8a959e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm) kvm_next_vmid++; /* update vttbr to be used with the new vmid */ - pgd_phys = virt_to_phys(kvm->arch.pgd); + pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; kvm->arch.vttbr = pgd_phys | vmid; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 6038027ab1d6..ee142edcca84 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -42,7 +42,7 @@ static unsigned long hyp_idmap_start; static unsigned long hyp_idmap_end; static phys_addr_t hyp_idmap_vector; -#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) +#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) @@ -134,7 +134,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, } } while (pte++, addr += PAGE_SIZE, addr != end); - if (kvm_pte_table_empty(start_pte)) + if (kvm_pte_table_empty(kvm, start_pte)) clear_pmd_entry(kvm, pmd, start_addr); } @@ -158,7 +158,7 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud, } } while (pmd++, addr = next, addr != end); - if (kvm_pmd_table_empty(start_pmd)) + if (kvm_pmd_table_empty(kvm, start_pmd)) clear_pud_entry(kvm, pud, start_addr); } @@ -182,7 +182,7 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd, } } while (pud++, addr = next, addr != end); - if (kvm_pud_table_empty(start_pud)) + if (kvm_pud_table_empty(kvm, start_pud)) clear_pgd_entry(kvm, pgd, start_addr); } @@ -306,7 +306,7 @@ void free_boot_hyp_pgd(void) if (boot_hyp_pgd) { unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); - free_pages((unsigned long)boot_hyp_pgd, pgd_order); + free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); boot_hyp_pgd = NULL; } @@ -343,7 +343,7 @@ void free_hyp_pgds(void) for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); - free_pages((unsigned long)hyp_pgd, pgd_order); + free_pages((unsigned long)hyp_pgd, hyp_pgd_order); hyp_pgd = NULL; } @@ -401,13 +401,46 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, return 0; } +static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, + unsigned long end, unsigned long pfn, + pgprot_t prot) +{ + pud_t *pud; + pmd_t *pmd; + unsigned long addr, next; + int ret; + + addr = start; + do { + pud = pud_offset(pgd, addr); + + if (pud_none_or_clear_bad(pud)) { + pmd = pmd_alloc_one(NULL, addr); + if (!pmd) { + kvm_err("Cannot allocate Hyp pmd\n"); + return -ENOMEM; + } + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + kvm_flush_dcache_to_poc(pud, sizeof(*pud)); + } + + next = pud_addr_end(addr, end); + ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); + if (ret) + return ret; + pfn += (next - addr) >> PAGE_SHIFT; + } while (addr = next, addr != end); + + return 0; +} + static int __create_hyp_mappings(pgd_t *pgdp, unsigned long start, unsigned long end, unsigned long pfn, pgprot_t prot) { pgd_t *pgd; pud_t *pud; - pmd_t *pmd; unsigned long addr, next; int err = 0; @@ -416,22 +449,21 @@ static int __create_hyp_mappings(pgd_t *pgdp, end = PAGE_ALIGN(end); do { pgd = pgdp + pgd_index(addr); - pud = pud_offset(pgd, addr); - if (pud_none_or_clear_bad(pud)) { - pmd = pmd_alloc_one(NULL, addr); - if (!pmd) { - kvm_err("Cannot allocate Hyp pmd\n"); + if (pgd_none(*pgd)) { + pud = pud_alloc_one(NULL, addr); + if (!pud) { + kvm_err("Cannot allocate Hyp pud\n"); err = -ENOMEM; goto out; } - pud_populate(NULL, pud, pmd); - get_page(virt_to_page(pud)); - kvm_flush_dcache_to_poc(pud, sizeof(*pud)); + pgd_populate(NULL, pgd, pud); + get_page(virt_to_page(pgd)); + kvm_flush_dcache_to_poc(pgd, sizeof(*pgd)); } next = pgd_addr_end(addr, end); - err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); + err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); if (err) goto out; pfn += (next - addr) >> PAGE_SHIFT; @@ -521,6 +553,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) */ int kvm_alloc_stage2_pgd(struct kvm *kvm) { + int ret; pgd_t *pgd; if (kvm->arch.pgd != NULL) { @@ -528,14 +561,38 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) return -EINVAL; } - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + if (KVM_PREALLOC_LEVEL > 0) { + /* + * Allocate fake pgd for the page table manipulation macros to + * work. This is not used by the hardware and we have no + * alignment requirement for this allocation. + */ + pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), + GFP_KERNEL | __GFP_ZERO); + } else { + /* + * Allocate actual first-level Stage-2 page table used by the + * hardware for Stage-2 page table walks. + */ + pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + } + if (!pgd) return -ENOMEM; + ret = kvm_prealloc_hwpgd(kvm, pgd); + if (ret) + goto out_err; + kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; - return 0; +out_err: + if (KVM_PREALLOC_LEVEL > 0) + kfree(pgd); + else + free_pages((unsigned long)pgd, S2_PGD_ORDER); + return ret; } /** @@ -571,19 +628,39 @@ void kvm_free_stage2_pgd(struct kvm *kvm) return; unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); + kvm_free_hwpgd(kvm); + if (KVM_PREALLOC_LEVEL > 0) + kfree(kvm->arch.pgd); + else + free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); kvm->arch.pgd = NULL; } -static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, phys_addr_t addr) { pgd_t *pgd; pud_t *pud; - pmd_t *pmd; pgd = kvm->arch.pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); + if (WARN_ON(pgd_none(*pgd))) { + if (!cache) + return NULL; + pud = mmu_memory_cache_alloc(cache); + pgd_populate(NULL, pgd, pud); + get_page(virt_to_page(pgd)); + } + + return pud_offset(pgd, addr); +} + +static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) +{ + pud_t *pud; + pmd_t *pmd; + + pud = stage2_get_pud(kvm, cache, addr); if (pud_none(*pud)) { if (!cache) return NULL; @@ -629,7 +706,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, pmd_t *pmd; pte_t *pte, old_pte; - /* Create stage-2 page table mapping - Level 1 */ + /* Create stage-2 page table mapping - Levels 0 and 1 */ pmd = stage2_get_pmd(kvm, cache, addr); if (!pmd) { /* @@ -690,7 +767,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (writable) kvm_set_s2pte_writable(&pte); - ret = mmu_topup_memory_cache(&cache, 2, 2); + ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); if (ret) goto out; spin_lock(&kvm->mmu_lock); @@ -805,7 +883,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); if (ret) return ret; @@ -1080,8 +1159,8 @@ int kvm_mmu_init(void) (unsigned long)phys_base); } - hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); - boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); + hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); + boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); if (!hyp_pgd || !boot_hyp_pgd) { kvm_err("Hyp mode PGD not allocated\n"); -- cgit v1.2.3 From c3058d5da2222629bc2223c488a4512b59bb4baf Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 10 Oct 2014 12:14:29 +0200 Subject: arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE When creating or moving a memslot, make sure the IPA space is within the addressable range of the guest. Otherwise, user space can create too large a memslot and KVM would try to access potentially unallocated page table entries when inserting entries in the Stage-2 page tables. Acked-by: Catalin Marinas Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ee142edcca84..8c40829d4a52 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1003,6 +1003,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) goto out_unlock; } + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); + ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); if (ret == 0) ret = 1; @@ -1228,6 +1231,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (change != KVM_MR_CREATE && change != KVM_MR_MOVE) return 0; + /* + * Prevent userspace from creating a memory region outside of the IPA + * space addressable by the KVM guest IPA space. + */ + if (memslot->base_gfn + memslot->npages >= + (KVM_PHYS_SIZE >> PAGE_SHIFT)) + return -EFAULT; + /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map -- cgit v1.2.3 From 3d08c629244257473450a8ba17cb8184b91e68f8 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Tue, 14 Oct 2014 15:02:15 +0100 Subject: arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort Commit: b886576 ARM: KVM: user_mem_abort: support stage 2 MMIO page mapping introduced some code in user_mem_abort that failed to compile if STRICT_MM_TYPECHECKS was enabled. This patch fixes up the failing comparison. Signed-off-by: Steve Capper Reviewed-by: Kim Phillips Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 8c40829d4a52..57a403a5c22b 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -930,7 +930,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, - mem_type == PAGE_S2_DEVICE); + pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); } -- cgit v1.2.3 From 2df36a5dd6792870bef48f63bfca42055ea5b79c Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 28 Sep 2014 16:04:26 +0200 Subject: arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs The EIRSR and ELRSR registers are 32-bit registers on GICv2, and we store these as an array of two such registers on the vgic vcpu struct. However, we access them as a single 64-bit value or as a bitmap pointer in the generic vgic code, which breaks BE support. Instead, store them as u64 values on the vgic structure and do the word-swapping in the assembly code, which already handles the byte order for BE systems. Tested-by: Victor Kamensky Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/interrupts_head.S | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/kvm') diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 98c8c5b9a87f..14d488388480 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 ) str r3, [r11, #VGIC_V2_CPU_HCR] str r4, [r11, #VGIC_V2_CPU_VMCR] str r5, [r11, #VGIC_V2_CPU_MISR] +#ifdef CONFIG_CPU_ENDIAN_BE8 + str r6, [r11, #(VGIC_V2_CPU_EISR + 4)] + str r7, [r11, #VGIC_V2_CPU_EISR] + str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)] + str r9, [r11, #VGIC_V2_CPU_ELRSR] +#else str r6, [r11, #VGIC_V2_CPU_EISR] str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] str r8, [r11, #VGIC_V2_CPU_ELRSR] str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] +#endif str r10, [r11, #VGIC_V2_CPU_APR] /* Clear GICH_HCR */ -- cgit v1.2.3