summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Donnefort <vdonnefort@google.com>2025-03-13 11:40:36 +0000
committerOliver Upton <oliver.upton@linux.dev>2025-03-14 00:56:29 -0700
commitcf2d228da9a8140bd7227ca9093947d20e8ea39d (patch)
tree5682ae9705fabf94b95aa2028aa31a4957e66454
parent0ad2507d5d93f39619fc42372c347d6006b64319 (diff)
KVM: arm64: Add flags to kvm_hyp_memcache
Add flags to kvm_hyp_memcache and propagate the latter to the allocation and free callbacks. This will later allow to account for memory, based on the memcache configuration. Signed-off-by: Vincent Donnefort <vdonnefort@google.com> Acked-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250313114038.1502357-2-vdonnefort@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kvm/mmu.c8
2 files changed, 5 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3a7ec98ef123..12691ae23d4c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -86,6 +86,7 @@ struct kvm_hyp_memcache {
phys_addr_t head;
unsigned long nr_pages;
struct pkvm_mapping *mapping; /* only used from EL1 */
+ unsigned long flags;
};
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1f55b0c7b11d..6107a3c8ccf6 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1086,12 +1086,12 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
}
}
-static void hyp_mc_free_fn(void *addr, void *unused)
+static void hyp_mc_free_fn(void *addr, void *mc)
{
free_page((unsigned long)addr);
}
-static void *hyp_mc_alloc_fn(void *unused)
+static void *hyp_mc_alloc_fn(void *mc)
{
return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
}
@@ -1102,7 +1102,7 @@ void free_hyp_memcache(struct kvm_hyp_memcache *mc)
return;
kfree(mc->mapping);
- __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, NULL);
+ __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, mc);
}
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
@@ -1117,7 +1117,7 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
}
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
- kvm_host_pa, NULL);
+ kvm_host_pa, mc);
}
/**