summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu/spte.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index e5af69a8f101..09ce93c4916a 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -220,29 +220,20 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
/*
- * When overwriting an existing leaf SPTE, and the old SPTE was
- * writable, skip trying to unsync shadow pages as any relevant
- * shadow pages must already be unsync, i.e. the hash lookup is
- * unnecessary (and expensive).
- *
- * The same reasoning applies to dirty page/folio accounting;
- * KVM marked the folio dirty when the old SPTE was created,
- * thus there's no need to mark the folio dirty again.
- *
- * Note, both cases rely on KVM not changing PFNs without first
- * zapping the old SPTE, which is guaranteed by both the shadow
- * MMU and the TDP MMU.
- */
- if (is_last_spte(old_spte, level) && is_writable_pte(old_spte))
- goto out;
-
- /*
* Unsync shadow pages that are reachable by the new, writable
* SPTE. Write-protect the SPTE if the page can't be unsync'd,
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
+ *
+ * When overwriting an existing leaf SPTE, and the old SPTE was
+ * writable, skip trying to unsync shadow pages as any relevant
+ * shadow pages must already be unsync, i.e. the hash lookup is
+ * unnecessary (and expensive). Note, this relies on KVM not
+ * changing PFNs without first zapping the old SPTE, which is
+ * guaranteed by both the shadow MMU and the TDP MMU.
*/
- if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) {
+ if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) &&
+ mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) {
wrprot = true;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
@@ -252,7 +243,6 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (pte_access & ACC_WRITE_MASK)
spte |= spte_shadow_dirty_mask(spte);
-out:
if (prefetch && !synchronizing)
spte = mark_spte_for_access_track(spte);