summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-05-23 10:56:25 +0100
committerMarc Zyngier <maz@kernel.org>2025-05-23 10:56:25 +0100
commita90e0017541dbd2e6c1e40e501efefc938590390 (patch)
tree44864ec88eebdf9b80821efc657479eecf0ba11e
parent67bd641517b04dc5f360ece4dc5f1cf3c2449022 (diff)
parentc353fde17d8f7e710884532da50393cd22ddbfbe (diff)
Merge branch kvm-arm64/pkvm-np-thp-6.16 into kvmarm-master/next
* kvm-arm64/pkvm-np-thp-6.16: (21 commits) : . : Large mapping support for non-protected pKVM guests, courtesy of : Vincent Donnefort. From the cover letter: : : "This series adds support for stage-2 huge mappings (PMD_SIZE) to pKVM : np-guests, that is installing PMD-level mappings in the stage-2, : whenever the stage-1 is backed by either Hugetlbfs or THPs." : . KVM: arm64: np-guest CMOs with PMD_SIZE fixmap KVM: arm64: Stage-2 huge mappings for np-guests KVM: arm64: Add a range to pkvm_mappings KVM: arm64: Convert pkvm_mappings to interval tree KVM: arm64: Add a range to __pkvm_host_test_clear_young_guest() KVM: arm64: Add a range to __pkvm_host_wrprotect_guest() KVM: arm64: Add a range to __pkvm_host_unshare_guest() KVM: arm64: Add a range to __pkvm_host_share_guest() KVM: arm64: Introduce for_each_hyp_page KVM: arm64: Handle huge mappings for np-guest CMOs KVM: arm64: Extend pKVM selftest for np-guests KVM: arm64: Selftest for pKVM transitions KVM: arm64: Don't WARN from __pkvm_host_share_guest() KVM: arm64: Add .hyp.data section KVM: arm64: Unconditionally cross check hyp state KVM: arm64: Defer EL2 stage-1 mapping on share KVM: arm64: Move hyp state to hyp_vmemmap KVM: arm64: Introduce {get,set}_host_state() helpers KVM: arm64: Use 0b11 for encoding PKVM_NOPAGE KVM: arm64: Fix pKVM page-tracking comments ... Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/kvm_host.h12
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h7
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h8
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S18
-rw-r--r--arch/arm64/kvm/arm.c7
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h14
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h58
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c20
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c508
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c97
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c47
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c27
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c6
-rw-r--r--arch/arm64/kvm/mmu.c6
-rw-r--r--arch/arm64/kvm/pkvm.c131
19 files changed, 730 insertions, 245 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bea8ae1b1b02..73b7762b0e7d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -971,20 +971,22 @@ struct kvm_vcpu_arch {
#define vcpu_sve_zcr_elx(vcpu) \
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
-#define vcpu_sve_state_size(vcpu) ({ \
+#define sve_state_size_from_vl(sve_max_vl) ({ \
size_t __size_ret; \
- unsigned int __vcpu_vq; \
+ unsigned int __vq; \
\
- if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
__size_ret = 0; \
} else { \
- __vcpu_vq = vcpu_sve_max_vq(vcpu); \
- __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ __vq = sve_vq_from_vl(sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vq); \
} \
\
__size_ret; \
})
+#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
+
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 6b9d274052c7..2888b5d03757 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -59,6 +59,11 @@ typedef u64 kvm_pte_t;
#define KVM_PHYS_INVALID (-1ULL)
+#define KVM_PTE_TYPE BIT(1)
+#define KVM_PTE_TYPE_BLOCK 0
+#define KVM_PTE_TYPE_PAGE 1
+#define KVM_PTE_TYPE_TABLE 1
+
#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
@@ -413,7 +418,7 @@ static inline bool kvm_pgtable_walk_lock_held(void)
*/
struct kvm_pgtable {
union {
- struct rb_root pkvm_mappings;
+ struct rb_root_cached pkvm_mappings;
struct {
u32 ia_bits;
s8 start_level;
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index abd693ce5b93..ea58282f59bb 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -135,6 +135,12 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
+#ifdef CONFIG_NVHE_EL2_DEBUG
+static inline unsigned long pkvm_selftest_pages(void) { return 32; }
+#else
+static inline unsigned long pkvm_selftest_pages(void) { return 0; }
+#endif
+
#define KVM_FFA_MBOX_NR_PAGES 1
static inline unsigned long hyp_ffa_proxy_pages(void)
@@ -167,6 +173,8 @@ struct pkvm_mapping {
struct rb_node node;
u64 gfn;
u64 pfn;
+ u64 nr_pages;
+ u64 __subtree_last; /* Internal member for interval tree */
};
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 40971ac1303f..51b0d594239e 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -11,6 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
+extern char __hyp_data_start[], __hyp_data_end[];
extern char __hyp_rodata_start[], __hyp_rodata_end[];
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
extern char __hyp_bss_start[], __hyp_bss_end[];
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 2004b4f41ade..798405ec48a3 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -127,6 +127,8 @@ KVM_NVHE_ALIAS(__hyp_text_start);
KVM_NVHE_ALIAS(__hyp_text_end);
KVM_NVHE_ALIAS(__hyp_bss_start);
KVM_NVHE_ALIAS(__hyp_bss_end);
+KVM_NVHE_ALIAS(__hyp_data_start);
+KVM_NVHE_ALIAS(__hyp_data_end);
KVM_NVHE_ALIAS(__hyp_rodata_start);
KVM_NVHE_ALIAS(__hyp_rodata_end);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index e73326bd3ff7..7c770053f072 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
-#define HYPERVISOR_DATA_SECTIONS \
+#define HYPERVISOR_RODATA_SECTIONS \
HYP_SECTION_NAME(.rodata) : { \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_start = .; \
@@ -23,6 +23,15 @@
__hyp_rodata_end = .; \
}
+#define HYPERVISOR_DATA_SECTION \
+ HYP_SECTION_NAME(.data) : { \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_data_start = .; \
+ *(HYP_SECTION_NAME(.data)) \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_data_end = .; \
+ }
+
#define HYPERVISOR_PERCPU_SECTION \
. = ALIGN(PAGE_SIZE); \
HYP_SECTION_NAME(.data..percpu) : { \
@@ -51,7 +60,8 @@
#define SBSS_ALIGN PAGE_SIZE
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
-#define HYPERVISOR_DATA_SECTIONS
+#define HYPERVISOR_RODATA_SECTIONS
+#define HYPERVISOR_DATA_SECTION
#define HYPERVISOR_PERCPU_SECTION
#define HYPERVISOR_RELOC_SECTION
#define SBSS_ALIGN 0
@@ -190,7 +200,7 @@ SECTIONS
/* everything from this point to __init_begin will be marked RO NX */
RO_DATA(PAGE_SIZE)
- HYPERVISOR_DATA_SECTIONS
+ HYPERVISOR_RODATA_SECTIONS
.got : { *(.got) }
/*
@@ -295,6 +305,8 @@ SECTIONS
_sdata = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+ HYPERVISOR_DATA_SECTION
+
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 19ca57def629..6c42682bc66f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2604,6 +2604,13 @@ static int __init init_hyp_mode(void)
goto out_err;
}
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start),
+ kvm_ksym_ref(__hyp_data_end), PAGE_HYP);
+ if (err) {
+ kvm_err("Cannot map .hyp.data section\n");
+ goto out_err;
+ }
+
err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
if (err) {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index ea0a704da9b8..5f9d56754e39 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -39,12 +39,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot);
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
-int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
-int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm);
+int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm);
int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
bool addr_is_memory(phys_addr_t phys);
@@ -67,4 +67,10 @@ static __always_inline void __load_host_stage2(void)
else
write_sysreg(0, vttbr_el2);
}
+
+#ifdef CONFIG_NVHE_EL2_DEBUG
+void pkvm_ownership_selftest(void *base);
+#else
+static inline void pkvm_ownership_selftest(void *base) { }
+#endif
#endif /* __KVM_NVHE_MEM_PROTECT__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 34233d586060..dee1a406b0c2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -8,23 +8,30 @@
#include <linux/types.h>
/*
- * Bits 0-1 are reserved to track the memory ownership state of each page:
- * 00: The page is owned exclusively by the page-table owner.
- * 01: The page is owned by the page-table owner, but is shared
- * with another entity.
- * 10: The page is shared with, but not owned by the page-table owner.
- * 11: Reserved for future use (lending).
+ * Bits 0-1 are used to encode the memory ownership state of each page from the
+ * point of view of a pKVM "component" (host, hyp, guest, ... see enum
+ * pkvm_component_id):
+ * 00: The page is owned and exclusively accessible by the component;
+ * 01: The page is owned and accessible by the component, but is also
+ * accessible by another component;
+ * 10: The page is accessible but not owned by the component;
+ * The storage of this state depends on the component: either in the
+ * hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
*/
enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = BIT(0),
PKVM_PAGE_SHARED_BORROWED = BIT(1),
- __PKVM_PAGE_RESERVED = BIT(0) | BIT(1),
- /* Meta-states which aren't encoded directly in the PTE's SW bits */
- PKVM_NOPAGE = BIT(2),
+ /*
+ * 'Meta-states' are not stored directly in PTE SW bits for guest
+ * states, but inferred from the context (e.g. invalid PTE entries).
+ * For the host and hyp, meta-states are stored directly in the
+ * struct hyp_page.
+ */
+ PKVM_NOPAGE = BIT(0) | BIT(1),
};
-#define PKVM_PAGE_META_STATES_MASK (~__PKVM_PAGE_RESERVED)
+#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
@@ -44,8 +51,15 @@ struct hyp_page {
u16 refcount;
u8 order;
- /* Host (non-meta) state. Guarded by the host stage-2 lock. */
- enum pkvm_page_state host_state : 8;
+ /* Host state. Guarded by the host stage-2 lock. */
+ unsigned __host_state : 4;
+
+ /*
+ * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
+ * the complement so that the initial 0 in __hyp_state_comp (due to the
+ * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
+ */
+ unsigned __hyp_state_comp : 4;
u32 host_share_guest_count;
};
@@ -82,6 +96,26 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
+static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
+{
+ return p->__host_state;
+}
+
+static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
+{
+ p->__host_state = state;
+}
+
+static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
+{
+ return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+}
+
+static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
+{
+ p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+}
+
/*
* Refcounting for 'struct hyp_page'.
* hyp_pool::lock must be held if atomic access to the refcount is required.
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 230e4f2527de..6e83ce35c2f2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -13,9 +13,11 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
-int hyp_create_pcpu_fixmap(void);
+int hyp_create_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys);
void hyp_fixmap_unmap(void);
+void *hyp_fixblock_map(phys_addr_t phys, size_t *size);
+void hyp_fixblock_unmap(void);
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2c37680d954c..8e8848de4d47 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -123,10 +123,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
- hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
- /* Limit guest vector length to the maximum supported by the host. */
- hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
-
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
@@ -249,7 +245,8 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
- DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
struct pkvm_hyp_vcpu *hyp_vcpu;
int ret = -EINVAL;
@@ -264,7 +261,7 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
if (ret)
goto out;
- ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
+ ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot);
out:
cpu_reg(host_ctxt, 1) = ret;
}
@@ -273,6 +270,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -283,7 +281,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
if (!hyp_vm)
goto out;
- ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
+ ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
@@ -312,6 +310,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -322,7 +321,7 @@ static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt
if (!hyp_vm)
goto out;
- ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm);
+ ret = __pkvm_host_wrprotect_guest(gfn, nr_pages, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
@@ -332,7 +331,8 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
- DECLARE_REG(bool, mkold, host_ctxt, 3);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 3);
+ DECLARE_REG(bool, mkold, host_ctxt, 4);
struct pkvm_hyp_vm *hyp_vm;
int ret = -EINVAL;
@@ -343,7 +343,7 @@ static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *ho
if (!hyp_vm)
goto out;
- ret = __pkvm_host_test_clear_young_guest(gfn, mkold, hyp_vm);
+ ret = __pkvm_host_test_clear_young_guest(gfn, nr_pages, mkold, hyp_vm);
put_pkvm_hyp_vm(hyp_vm);
out:
cpu_reg(host_ctxt, 1) = ret;
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index f4562f417d3f..d724f6d69302 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -25,5 +25,7 @@ SECTIONS {
BEGIN_HYP_SECTION(.data..percpu)
PERCPU_INPUT(L1_CACHE_BYTES)
END_HYP_SECTION
+
HYP_SECTION(.bss)
+ HYP_SECTION(.data)
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 2a5284f749b4..e359cd24ecf0 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -60,6 +60,11 @@ static void hyp_unlock_component(void)
hyp_spin_unlock(&pkvm_pgd_lock);
}
+#define for_each_hyp_page(__p, __st, __sz) \
+ for (struct hyp_page *__p = hyp_phys_to_page(__st), \
+ *__e = __p + ((__sz) >> PAGE_SHIFT); \
+ __p < __e; __p++)
+
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
@@ -161,12 +166,6 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
return 0;
}
-static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
- enum kvm_pgtable_prot prot)
-{
- return true;
-}
-
static void *guest_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
@@ -217,16 +216,42 @@ static void guest_s2_put_page(void *addr)
hyp_put_page(&current_vm->pool, addr);
}
+static void __apply_guest_page(void *va, size_t size,
+ void (*func)(void *addr, size_t size))
+{
+ size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE);
+ va = PTR_ALIGN_DOWN(va, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
+
+ while (size) {
+ size_t map_size = PAGE_SIZE;
+ void *map;
+
+ if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE)
+ map = hyp_fixblock_map(__hyp_pa(va), &map_size);
+ else
+ map = hyp_fixmap_map(__hyp_pa(va));
+
+ func(map, map_size);
+
+ if (map_size == PMD_SIZE)
+ hyp_fixblock_unmap();
+ else
+ hyp_fixmap_unmap();
+
+ size -= map_size;
+ va += map_size;
+ }
+}
+
static void clean_dcache_guest_page(void *va, size_t size)
{
- __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ __apply_guest_page(va, size, __clean_dcache_guest_page);
}
static void invalidate_icache_guest_page(void *va, size_t size)
{
- __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ __apply_guest_page(va, size, __invalidate_icache_guest_page);
}
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
@@ -255,8 +280,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
};
guest_lock_component(vm);
- ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
- guest_stage2_force_pte_cb);
+ ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL);
guest_unlock_component(vm);
if (ret)
return ret;
@@ -467,7 +491,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EAGAIN;
if (pte) {
- WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE);
+ WARN_ON(addr_is_memory(addr) &&
+ get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
return -EPERM;
}
@@ -493,10 +518,8 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
{
- phys_addr_t end = addr + size;
-
- for (; addr < end; addr += PAGE_SIZE)
- hyp_phys_to_page(addr)->host_state = state;
+ for_each_hyp_page(page, addr, size)
+ set_host_state(page, state);
}
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
@@ -618,16 +641,16 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
static int __host_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- u64 end = addr + size;
int ret;
- ret = check_range_allowed_memory(addr, end);
+ ret = check_range_allowed_memory(addr, addr + size);
if (ret)
return ret;
hyp_assert_lock_held(&host_mmu.lock);
- for (; addr < end; addr += PAGE_SIZE) {
- if (hyp_phys_to_page(addr)->host_state != state)
+
+ for_each_hyp_page(page, addr, size) {
+ if (get_host_state(page) != state)
return -EPERM;
}
@@ -637,7 +660,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) {
+ if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) {
int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
if (ret)
@@ -649,24 +672,20 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
+static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- if (!kvm_pte_valid(pte))
- return PKVM_NOPAGE;
-
- return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ for_each_hyp_page(page, phys, size)
+ set_hyp_state(page, state);
}
-static int __hyp_check_page_state_range(u64 addr, u64 size,
- enum pkvm_page_state state)
+static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- struct check_walk_data d = {
- .desired = state,
- .get_page_state = hyp_get_page_state,
- };
+ for_each_hyp_page(page, phys, size) {
+ if (get_hyp_state(page) != state)
+ return -EPERM;
+ }
- hyp_assert_lock_held(&pkvm_pgd_lock);
- return check_page_state_range(&pkvm_pgtable, addr, size, &d);
+ return 0;
}
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
@@ -677,10 +696,9 @@ static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}
-static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
+static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
u64 size, enum pkvm_page_state state)
{
- struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
struct check_walk_data d = {
.desired = state,
.get_page_state = guest_get_page_state,
@@ -693,8 +711,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
- void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
u64 size = PAGE_SIZE;
int ret;
@@ -704,14 +720,11 @@ int __pkvm_host_share_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
unlock:
@@ -734,7 +747,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
if (hyp_page_count((void *)virt)) {
@@ -742,7 +755,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
goto unlock;
}
- WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
unlock:
@@ -757,7 +770,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
int ret;
host_lock_component();
@@ -766,14 +778,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
unlock:
@@ -793,15 +803,14 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
host_lock_component();
hyp_lock_component();
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ if (ret)
+ goto unlock;
+ ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
- if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
- if (ret)
- goto unlock;
- }
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
@@ -816,24 +825,30 @@ int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ u64 phys = __hyp_pa(start);
u64 size = end - start;
+ struct hyp_page *p;
int ret;
host_lock_component();
hyp_lock_component();
- ret = __host_check_page_state_range(__hyp_pa(start), size,
- PKVM_PAGE_SHARED_OWNED);
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(start, size,
- PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_inc(hyp_virt_to_page(cur));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ p = hyp_virt_to_page(cur);
+ hyp_page_ref_inc(p);
+ if (p->refcount == 1)
+ WARN_ON(pkvm_create_mappings_locked((void *)cur,
+ (void *)cur + PAGE_SIZE,
+ PAGE_HYP));
+ }
unlock:
hyp_unlock_component();
@@ -846,12 +861,17 @@ void hyp_unpin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ struct hyp_page *p;
host_lock_component();
hyp_lock_component();
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_dec(hyp_virt_to_page(cur));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ p = hyp_virt_to_page(cur);
+ if (p->refcount == 1)
+ WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
+ hyp_page_ref_dec(p);
+ }
hyp_unlock_component();
host_unlock_component();
@@ -887,49 +907,84 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
return ret;
}
-int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
+{
+ size_t block_size;
+
+ if (nr_pages == 1) {
+ *size = PAGE_SIZE;
+ return 0;
+ }
+
+ /* We solely support second to last level huge mapping */
+ block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1);
+
+ if (nr_pages != block_size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(phys | ipa, block_size))
+ return -EINVAL;
+
+ *size = block_size;
+ return 0;
+}
+
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 phys = hyp_pfn_to_phys(pfn);
u64 ipa = hyp_pfn_to_phys(gfn);
- struct hyp_page *page;
+ u64 size;
int ret;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
- ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+ ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ ret = check_range_allowed_memory(phys, phys + size);
if (ret)
return ret;
host_lock_component();
guest_lock_component(vm);
- ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
+ ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE);
if (ret)
goto unlock;
- page = hyp_phys_to_page(phys);
- switch (page->host_state) {
- case PKVM_PAGE_OWNED:
- WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
- break;
- case PKVM_PAGE_SHARED_OWNED:
- if (page->host_share_guest_count)
- break;
- /* Only host to np-guest multi-sharing is tolerated */
- WARN_ON(1);
- fallthrough;
- default:
- ret = -EPERM;
- goto unlock;
+ for_each_hyp_page(page, phys, size) {
+ switch (get_host_state(page)) {
+ case PKVM_PAGE_OWNED:
+ continue;
+ case PKVM_PAGE_SHARED_OWNED:
+ if (page->host_share_guest_count == U32_MAX) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Only host to np-guest multi-sharing is tolerated */
+ if (page->host_share_guest_count)
+ continue;
+
+ fallthrough;
+ default:
+ ret = -EPERM;
+ goto unlock;
+ }
}
- WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+ for_each_hyp_page(page, phys, size) {
+ set_host_state(page, PKVM_PAGE_SHARED_OWNED);
+ page->host_share_guest_count++;
+ }
+
+ WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys,
pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
&vcpu->vcpu.arch.pkvm_memcache, 0));
- page->host_share_guest_count++;
unlock:
guest_unlock_component(vm);
@@ -938,10 +993,9 @@ unlock:
return ret;
}
-static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
+static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size)
{
enum pkvm_page_state state;
- struct hyp_page *page;
kvm_pte_t pte;
u64 phys;
s8 level;
@@ -952,7 +1006,7 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
return ret;
if (!kvm_pte_valid(pte))
return -ENOENT;
- if (level != KVM_PGTABLE_LAST_LEVEL)
+ if (kvm_granule_size(level) != size)
return -E2BIG;
state = guest_get_page_state(pte, ipa);
@@ -960,43 +1014,49 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
return -EPERM;
phys = kvm_pte_to_phys(pte);
- ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+ ret = check_range_allowed_memory(phys, phys + size);
if (WARN_ON(ret))
return ret;
- page = hyp_phys_to_page(phys);
- if (page->host_state != PKVM_PAGE_SHARED_OWNED)
- return -EPERM;
- if (WARN_ON(!page->host_share_guest_count))
- return -EINVAL;
+ for_each_hyp_page(page, phys, size) {
+ if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
+ return -EPERM;
+ if (WARN_ON(!page->host_share_guest_count))
+ return -EINVAL;
+ }
*__phys = phys;
return 0;
}
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
- struct hyp_page *page;
- u64 phys;
+ u64 size, phys;
int ret;
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
host_lock_component();
guest_lock_component(vm);
- ret = __check_host_shared_guest(vm, &phys, ipa);
+ ret = __check_host_shared_guest(vm, &phys, ipa, size);
if (ret)
goto unlock;
- ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
+ ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size);
if (ret)
goto unlock;
- page = hyp_phys_to_page(phys);
- page->host_share_guest_count--;
- if (!page->host_share_guest_count)
- WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED));
+ for_each_hyp_page(page, phys, size) {
+ /* __check_host_shared_guest() protects against underflow */
+ page->host_share_guest_count--;
+ if (!page->host_share_guest_count)
+ set_host_state(page, PKVM_PAGE_OWNED);
+ }
unlock:
guest_unlock_component(vm);
@@ -1005,7 +1065,7 @@ unlock:
return ret;
}
-static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
+static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size)
{
u64 phys;
int ret;
@@ -1016,7 +1076,7 @@ static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
host_lock_component();
guest_lock_component(vm);
- ret = __check_host_shared_guest(vm, &phys, ipa);
+ ret = __check_host_shared_guest(vm, &phys, ipa, size);
guest_unlock_component(vm);
host_unlock_component();
@@ -1036,7 +1096,7 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
- assert_host_shared_guest(vm, ipa);
+ assert_host_shared_guest(vm, ipa, PAGE_SIZE);
guest_lock_component(vm);
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
@@ -1044,33 +1104,41 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
return ret;
}
-int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
{
- u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 size, ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ assert_host_shared_guest(vm, ipa, size);
guest_lock_component(vm);
- ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
+ ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size);
guest_unlock_component(vm);
return ret;
}
-int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm)
+int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm)
{
- u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 size, ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+ if (ret)
+ return ret;
+
+ assert_host_shared_guest(vm, ipa, size);
guest_lock_component(vm);
- ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
+ ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold);
guest_unlock_component(vm);
return ret;
@@ -1084,10 +1152,210 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
- assert_host_shared_guest(vm, ipa);
+ assert_host_shared_guest(vm, ipa, PAGE_SIZE);
guest_lock_component(vm);
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
guest_unlock_component(vm);
return 0;
}
+
+#ifdef CONFIG_NVHE_EL2_DEBUG
+struct pkvm_expected_state {
+ enum pkvm_page_state host;
+ enum pkvm_page_state hyp;
+ enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */
+};
+
+static struct pkvm_expected_state selftest_state;
+static struct hyp_page *selftest_page;
+
+static struct pkvm_hyp_vm selftest_vm = {
+ .kvm = {
+ .arch = {
+ .mmu = {
+ .arch = &selftest_vm.kvm.arch,
+ .pgt = &selftest_vm.pgt,
+ },
+ },
+ },
+};
+
+static struct pkvm_hyp_vcpu selftest_vcpu = {
+ .vcpu = {
+ .arch = {
+ .hw_mmu = &selftest_vm.kvm.arch.mmu,
+ },
+ .kvm = &selftest_vm.kvm,
+ },
+};
+
+static void init_selftest_vm(void *virt)
+{
+ struct hyp_page *p = hyp_virt_to_page(virt);
+ int i;
+
+ selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
+ WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt));
+
+ for (i = 0; i < pkvm_selftest_pages(); i++) {
+ if (p[i].refcount)
+ continue;
+ p[i].refcount = 1;
+ hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i]));
+ }
+}
+
+static u64 selftest_ipa(void)
+{
+ return BIT(selftest_vm.pgt.ia_bits - 1);
+}
+
+static void assert_page_state(void)
+{
+ void *virt = hyp_page_to_virt(selftest_page);
+ u64 size = PAGE_SIZE << selftest_page->order;
+ struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
+ u64 phys = hyp_virt_to_phys(virt);
+ u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE };
+ struct pkvm_hyp_vm *vm;
+
+ vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+
+ host_lock_component();
+ WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host));
+ host_unlock_component();
+
+ hyp_lock_component();
+ WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp));
+ hyp_unlock_component();
+
+ guest_lock_component(&selftest_vm);
+ WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0]));
+ WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1]));
+ guest_unlock_component(&selftest_vm);
+}
+
+#define assert_transition_res(res, fn, ...) \
+ do { \
+ WARN_ON(fn(__VA_ARGS__) != res); \
+ assert_page_state(); \
+ } while (0)
+
+void pkvm_ownership_selftest(void *base)
+{
+ enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX;
+ void *virt = hyp_alloc_pages(&host_s2_pool, 0);
+ struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
+ struct pkvm_hyp_vm *vm = &selftest_vm;
+ u64 phys, size, pfn, gfn;
+
+ WARN_ON(!virt);
+ selftest_page = hyp_virt_to_page(virt);
+ selftest_page->refcount = 0;
+ init_selftest_vm(base);
+
+ size = PAGE_SIZE << selftest_page->order;
+ phys = hyp_virt_to_phys(virt);
+ pfn = hyp_phys_to_pfn(phys);
+ gfn = hyp_phys_to_pfn(selftest_ipa());
+
+ selftest_state.host = PKVM_NOPAGE;
+ selftest_state.hyp = PKVM_PAGE_OWNED;
+ selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE;
+ assert_page_state();
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
+ assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
+ hyp_unpin_shared_mem(virt, virt + size);
+ WARN_ON(hyp_page_count(virt) != 1);
+ assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ hyp_unpin_shared_mem(virt, virt + size);
+ assert_page_state();
+ WARN_ON(hyp_page_count(virt));
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_hyp, pfn);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.host = PKVM_PAGE_OWNED;
+ selftest_state.hyp = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
+
+ selftest_state.host = PKVM_PAGE_SHARED_OWNED;
+ selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
+ assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
+ assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
+ assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
+ assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
+
+ selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED;
+ assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot);
+ WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2);
+
+ selftest_state.guest[0] = PKVM_NOPAGE;
+ assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm);
+
+ selftest_state.guest[1] = PKVM_NOPAGE;
+ selftest_state.host = PKVM_PAGE_OWNED;
+ assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm);
+
+ selftest_state.host = PKVM_NOPAGE;
+ selftest_state.hyp = PKVM_PAGE_OWNED;
+ assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1);
+
+ selftest_page->refcount = 1;
+ hyp_put_page(&host_s2_pool, virt);
+}
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index f41c7440b34b..ae8391baebc3 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -229,9 +229,8 @@ int hyp_map_vectors(void)
return 0;
}
-void *hyp_fixmap_map(phys_addr_t phys)
+static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
{
- struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
kvm_pte_t pte, *ptep = slot->ptep;
pte = *ptep;
@@ -243,10 +242,21 @@ void *hyp_fixmap_map(phys_addr_t phys)
return (void *)slot->addr;
}
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+ return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys);
+}
+
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
{
kvm_pte_t *ptep = slot->ptep;
u64 addr = slot->addr;
+ u32 level;
+
+ if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE)
+ level = KVM_PGTABLE_LAST_LEVEL;
+ else
+ level = KVM_PGTABLE_LAST_LEVEL - 1; /* create_fixblock() guarantees PMD level */
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
@@ -260,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
*/
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL);
+ __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
dsb(ish);
isb();
}
@@ -273,9 +283,9 @@ void hyp_fixmap_unmap(void)
static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
- struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
+ struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg;
- if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL)
+ if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level))
return -EINVAL;
slot->addr = ctx->addr;
@@ -296,13 +306,84 @@ static int create_fixmap_slot(u64 addr, u64 cpu)
struct kvm_pgtable_walker walker = {
.cb = __create_fixmap_slot_cb,
.flags = KVM_PGTABLE_WALK_LEAF,
- .arg = (void *)cpu,
+ .arg = per_cpu_ptr(&fixmap_slots, cpu),
};
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
}
-int hyp_create_pcpu_fixmap(void)
+#if PAGE_SHIFT < 16
+#define HAS_FIXBLOCK
+static struct hyp_fixmap_slot hyp_fixblock_slot;
+static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock);
+#endif
+
+static int create_fixblock(void)
+{
+#ifdef HAS_FIXBLOCK
+ struct kvm_pgtable_walker walker = {
+ .cb = __create_fixmap_slot_cb,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = &hyp_fixblock_slot,
+ };
+ unsigned long addr;
+ phys_addr_t phys;
+ int ret, i;
+
+ /* Find a RAM phys address, PMD aligned */
+ for (i = 0; i < hyp_memblock_nr; i++) {
+ phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
+ if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
+ break;
+ }
+
+ if (i >= hyp_memblock_nr)
+ return -EINVAL;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+ addr = ALIGN(__io_map_base, PMD_SIZE);
+ ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
+
+unlock:
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+void *hyp_fixblock_map(phys_addr_t phys, size_t *size)
+{
+#ifdef HAS_FIXBLOCK
+ *size = PMD_SIZE;
+ hyp_spin_lock(&hyp_fixblock_lock);
+ return fixmap_map_slot(&hyp_fixblock_slot, phys);
+#else
+ *size = PAGE_SIZE;
+ return hyp_fixmap_map(phys);
+#endif
+}
+
+void hyp_fixblock_unmap(void)
+{
+#ifdef HAS_FIXBLOCK
+ fixmap_clear_slot(&hyp_fixblock_slot);
+ hyp_spin_unlock(&hyp_fixblock_lock);
+#else
+ hyp_fixmap_unmap();
+#endif
+}
+
+int hyp_create_fixmap(void)
{
unsigned long addr, i;
int ret;
@@ -322,7 +403,7 @@ int hyp_create_pcpu_fixmap(void)
return ret;
}
- return 0;
+ return create_fixblock();
}
int hyp_create_idmap(u32 hyp_va_bits)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 5a335a51deca..338505cb0171 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -372,6 +372,18 @@ static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
}
+static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ void *sve_state;
+
+ if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
+ return;
+
+ sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
+ hyp_unpin_shared_mem(sve_state,
+ sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
+}
+
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
unsigned int nr_vcpus)
{
@@ -384,6 +396,7 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
continue;
unpin_host_vcpu(hyp_vcpu->host_vcpu);
+ unpin_host_sve_state(hyp_vcpu);
}
}
@@ -398,12 +411,40 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
pkvm_init_features_from_host(hyp_vm, host_kvm);
}
-static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
+static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+ unsigned int sve_max_vl;
+ size_t sve_state_size;
+ void *sve_state;
+ int ret = 0;
- if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
+ if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+ return 0;
+ }
+
+ /* Limit guest vector length to the maximum supported by the host. */
+ sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
+ sve_state_size = sve_state_size_from_vl(sve_max_vl);
+ sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
+
+ if (!sve_state || !sve_state_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
+ if (ret)
+ goto err;
+
+ vcpu->arch.sve_state = sve_state;
+ vcpu->arch.sve_max_vl = sve_max_vl;
+
+ return 0;
+err:
+ clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
+ return ret;
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -432,7 +473,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
if (ret)
goto done;
- pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
+ ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
done:
if (ret)
unpin_host_vcpu(host_vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index d62bcb5634a2..a48d3f5a5afb 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -28,6 +28,7 @@ static void *vmemmap_base;
static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
+static void *selftest_base;
static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@@ -38,6 +39,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
hyp_early_alloc_init(virt, size);
+ nr_pages = pkvm_selftest_pages();
+ selftest_base = hyp_early_alloc_contig(nr_pages);
+ if (nr_pages && !selftest_base)
+ return -ENOMEM;
+
nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
vmemmap_base = hyp_early_alloc_contig(nr_pages);
if (!vmemmap_base)
@@ -119,6 +125,10 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
+ ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
+ if (ret)
+ return ret;
+
ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
if (ret)
return ret;
@@ -180,6 +190,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
enum pkvm_page_state state;
+ struct hyp_page *page;
phys_addr_t phys;
if (!kvm_pte_valid(ctx->old))
@@ -192,19 +203,25 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (!addr_is_memory(phys))
return -EINVAL;
+ page = hyp_phys_to_page(phys);
+
/*
* Adjust the host stage-2 mappings to match the ownership attributes
- * configured in the hypervisor stage-1.
+ * configured in the hypervisor stage-1, and make sure to propagate them
+ * to the hyp_vmemmap state.
*/
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
+ set_hyp_state(page, PKVM_PAGE_OWNED);
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
- hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED;
+ set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
+ set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
- hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED;
+ set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
+ set_host_state(page, PKVM_PAGE_SHARED_OWNED);
break;
default:
return -EINVAL;
@@ -295,7 +312,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = hyp_create_pcpu_fixmap();
+ ret = hyp_create_fixmap();
if (ret)
goto out;
@@ -304,6 +321,8 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
+
+ pkvm_ownership_selftest(selftest_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index df5cc74a7dd0..c351b4abd5db 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -11,12 +11,6 @@
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
-
-#define KVM_PTE_TYPE BIT(1)
-#define KVM_PTE_TYPE_BLOCK 0
-#define KVM_PTE_TYPE_PAGE 1
-#define KVM_PTE_TYPE_TABLE 1
-
struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 754f2fe0cc67..e445db2cb4a4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1304,6 +1304,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
if (map_size == PAGE_SIZE)
return true;
+ /* pKVM only supports PMD_SIZE huge-mappings */
+ if (is_protected_kvm_enabled() && map_size != PMD_SIZE)
+ return false;
+
size = memslot->npages * PAGE_SIZE;
gpa_start = memslot->base_gfn << PAGE_SHIFT;
@@ -1537,7 +1541,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging_active is guaranteed to never be true for VM_PFNMAP
* memslots.
*/
- if (logging_active || is_protected_kvm_enabled()) {
+ if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
} else {
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 0f89157d31fd..793089ba23c5 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -5,6 +5,7 @@
*/
#include <linux/init.h>
+#include <linux/interval_tree_generic.h>
#include <linux/kmemleak.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
@@ -79,6 +80,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
+ hyp_mem_pages += pkvm_selftest_pages();
hyp_mem_pages += hyp_ffa_proxy_pages();
/*
@@ -262,6 +264,7 @@ static int __init finalize_pkvm(void)
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
+ kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
kmemleak_free_part(__hyp_rodata_start, __hyp_rodata_end - __hyp_rodata_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
@@ -273,80 +276,68 @@ static int __init finalize_pkvm(void)
}
device_initcall_sync(finalize_pkvm);
-static int cmp_mappings(struct rb_node *node, const struct rb_node *parent)
+static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
{
- struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node);
- struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node);
-
- if (a->gfn < b->gfn)
- return -1;
- if (a->gfn > b->gfn)
- return 1;
- return 0;
+ return m->gfn * PAGE_SIZE;
}
-static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn)
+static u64 __pkvm_mapping_end(struct pkvm_mapping *m)
{
- struct rb_node *node = root->rb_node, *prev = NULL;
- struct pkvm_mapping *mapping;
-
- while (node) {
- mapping = rb_entry(node, struct pkvm_mapping, node);
- if (mapping->gfn == gfn)
- return node;
- prev = node;
- node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right;
- }
-
- return prev;
+ return (m->gfn + m->nr_pages) * PAGE_SIZE - 1;
}
+INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last,
+ __pkvm_mapping_start, __pkvm_mapping_end, static,
+ pkvm_mapping);
+
/*
- * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing
- * of __map inline.
+ * __tmp is updated to iter_first(pkvm_mappings) *before* entering the body of the loop to allow
+ * freeing of __map inline.
*/
#define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \
- for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \
- ((__start) >> PAGE_SHIFT)); \
+ for (struct pkvm_mapping *__tmp = pkvm_mapping_iter_first(&(__pgt)->pkvm_mappings, \
+ __start, __end - 1); \
__tmp && ({ \
- __map = rb_entry(__tmp, struct pkvm_mapping, node); \
- __tmp = rb_next(__tmp); \
+ __map = __tmp; \
+ __tmp = pkvm_mapping_iter_next(__map, __start, __end - 1); \
true; \
}); \
- ) \
- if (__map->gfn < ((__start) >> PAGE_SHIFT)) \
- continue; \
- else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \
- break; \
- else
+ )
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops)
{
- pgt->pkvm_mappings = RB_ROOT;
+ pgt->pkvm_mappings = RB_ROOT_CACHED;
pgt->mmu = mmu;
return 0;
}
-void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
+static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end)
{
struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
pkvm_handle_t handle = kvm->arch.pkvm.handle;
struct pkvm_mapping *mapping;
- struct rb_node *node;
+ int ret;
if (!handle)
- return;
+ return 0;
- node = rb_first(&pgt->pkvm_mappings);
- while (node) {
- mapping = rb_entry(node, struct pkvm_mapping, node);
- kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
- node = rb_next(node);
- rb_erase(&mapping->node, &pgt->pkvm_mappings);
+ for_each_mapping_in_range_safe(pgt, start, end, mapping) {
+ ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn,
+ mapping->nr_pages);
+ if (WARN_ON(ret))
+ return ret;
+ pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
kfree(mapping);
}
+
+ return 0;
+}
+
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
+{
+ __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
}
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -360,42 +351,46 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 pfn = phys >> PAGE_SHIFT;
int ret;
- if (size != PAGE_SIZE)
+ if (size != PAGE_SIZE && size != PMD_SIZE)
return -EINVAL;
lockdep_assert_held_write(&kvm->mmu_lock);
- ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot);
- if (ret) {
- /* Is the gfn already mapped due to a racing vCPU? */
- if (ret == -EPERM)
+
+ /*
+ * Calling stage2_map() on top of existing mappings is either happening because of a race
+ * with another vCPU, or because we're changing between page and block mappings. As per
+ * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
+ */
+ mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1);
+ if (mapping) {
+ if (size == (mapping->nr_pages * PAGE_SIZE))
return -EAGAIN;
+
+ /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
+ ret = __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
+ if (ret)
+ return ret;
+ mapping = NULL;
}
+ ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
+ if (WARN_ON(ret))
+ return ret;
+
swap(mapping, cache->mapping);
mapping->gfn = gfn;
mapping->pfn = pfn;
- WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings));
+ mapping->nr_pages = size / PAGE_SIZE;
+ pkvm_mapping_insert(mapping, &pgt->pkvm_mappings);
return ret;
}
int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
- struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
- pkvm_handle_t handle = kvm->arch.pkvm.handle;
- struct pkvm_mapping *mapping;
- int ret = 0;
+ lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock);
- lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
- ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
- if (WARN_ON(ret))
- break;
- rb_erase(&mapping->node, &pgt->pkvm_mappings);
- kfree(mapping);
- }
-
- return ret;
+ return __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
}
int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
@@ -407,7 +402,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
- ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn);
+ ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn,
+ mapping->nr_pages);
if (WARN_ON(ret))
break;
}
@@ -422,7 +418,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
- __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE);
+ __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
+ PAGE_SIZE * mapping->nr_pages);
return 0;
}
@@ -437,7 +434,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64
lockdep_assert_held(&kvm->mmu_lock);
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn,
- mkold);
+ mapping->nr_pages, mkold);
return young;
}