diff options
author | Anup Patel <apatel@ventanamicro.com> | 2025-06-18 17:05:29 +0530 |
---|---|---|
committer | Anup Patel <anup@brainfault.org> | 2025-07-28 22:27:25 +0530 |
commit | f035b44b518c300d51d36057867d615a30d43cb8 (patch) | |
tree | 6594a2517f5e45e4b9da66e346fa4427121b0aca /arch/riscv/kvm/mmu.c | |
parent | 4ecbd3eb5b1ba41db8f39d9cd4d20440e88482fa (diff) |
RISC-V: KVM: Introduce struct kvm_gstage_mapping
Introduce struct kvm_gstage_mapping which represents a g-stage
mapping at a particular g-stage page table level. Also, update
the kvm_riscv_gstage_map() to return the g-stage mapping upon
success.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Tested-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Nutty Liu <liujingqi@lanxincomputing.com>
Link: https://lore.kernel.org/r/20250618113532.471448-10-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
Diffstat (limited to 'arch/riscv/kvm/mmu.c')
-rw-r--r-- | arch/riscv/kvm/mmu.c | 58 |
1 files changed, 33 insertions, 25 deletions
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index c1a3eb076df3..806614b3e46d 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -135,18 +135,18 @@ static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); } -static int gstage_set_pte(struct kvm *kvm, u32 level, - struct kvm_mmu_memory_cache *pcache, - gpa_t addr, const pte_t *new_pte) +static int gstage_set_pte(struct kvm *kvm, + struct kvm_mmu_memory_cache *pcache, + const struct kvm_gstage_mapping *map) { u32 current_level = gstage_pgd_levels - 1; pte_t *next_ptep = (pte_t *)kvm->arch.pgd; - pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)]; + pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)]; - if (current_level < level) + if (current_level < map->level) return -EINVAL; - while (current_level != level) { + while (current_level != map->level) { if (gstage_pte_leaf(ptep)) return -EEXIST; @@ -165,13 +165,13 @@ static int gstage_set_pte(struct kvm *kvm, u32 level, } current_level--; - ptep = &next_ptep[gstage_pte_index(addr, current_level)]; + ptep = &next_ptep[gstage_pte_index(map->addr, current_level)]; } - if (pte_val(*ptep) != pte_val(*new_pte)) { - set_pte(ptep, *new_pte); + if (pte_val(*ptep) != pte_val(map->pte)) { + set_pte(ptep, map->pte); if (gstage_pte_leaf(ptep)) - gstage_remote_tlb_flush(kvm, current_level, addr); + gstage_remote_tlb_flush(kvm, current_level, map->addr); } return 0; @@ -181,14 +181,16 @@ static int gstage_map_page(struct kvm *kvm, struct kvm_mmu_memory_cache *pcache, gpa_t gpa, phys_addr_t hpa, unsigned long page_size, - bool page_rdonly, bool page_exec) + bool page_rdonly, bool page_exec, + struct kvm_gstage_mapping *out_map) { - int ret; - u32 level = 0; - pte_t new_pte; pgprot_t prot; + int ret; - ret = gstage_page_size_to_level(page_size, &level); + out_map->addr = gpa; + out_map->level = 0; + + ret = gstage_page_size_to_level(page_size, &out_map->level); if (ret) return ret; @@ -216,10 +218,10 @@ static int gstage_map_page(struct kvm *kvm, else prot = PAGE_WRITE; } - new_pte = pfn_pte(PFN_DOWN(hpa), prot); - new_pte = pte_mkdirty(new_pte); + out_map->pte = pfn_pte(PFN_DOWN(hpa), prot); + out_map->pte = pte_mkdirty(out_map->pte); - return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); + return gstage_set_pte(kvm, pcache, out_map); } enum gstage_op { @@ -352,7 +354,6 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, unsigned long size, bool writable, bool in_atomic) { - pte_t pte; int ret = 0; unsigned long pfn; phys_addr_t addr, end; @@ -360,22 +361,25 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0, .gfp_zero = __GFP_ZERO, }; + struct kvm_gstage_mapping map; end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; pfn = __phys_to_pfn(hpa); for (addr = gpa; addr < end; addr += PAGE_SIZE) { - pte = pfn_pte(pfn, PAGE_KERNEL_IO); + map.addr = addr; + map.pte = pfn_pte(pfn, PAGE_KERNEL_IO); + map.level = 0; if (!writable) - pte = pte_wrprotect(pte); + map.pte = pte_wrprotect(map.pte); ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels); if (ret) goto out; spin_lock(&kvm->mmu_lock); - ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte); + ret = gstage_set_pte(kvm, &pcache, &map); spin_unlock(&kvm->mmu_lock); if (ret) goto out; @@ -593,7 +597,8 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, - gpa_t gpa, unsigned long hva, bool is_write) + gpa_t gpa, unsigned long hva, bool is_write, + struct kvm_gstage_mapping *out_map) { int ret; kvm_pfn_t hfn; @@ -608,6 +613,9 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, unsigned long vma_pagesize, mmu_seq; struct page *page; + /* Setup initial state of output mapping */ + memset(out_map, 0, sizeof(*out_map)); + /* We need minimum second+third level pages */ ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels); if (ret) { @@ -677,10 +685,10 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, if (writable) { mark_page_dirty(kvm, gfn); ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, - vma_pagesize, false, true); + vma_pagesize, false, true, out_map); } else { ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, - vma_pagesize, true, true); + vma_pagesize, true, true, out_map); } if (ret) |