summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 19:10:36 -0700
committerSean Christopherson <seanjc@google.com>2024-10-30 14:46:46 -0700
commitb7ed46b201a41a2f63bc104a66f33c65e1b44fbf (patch)
treefc7c5ea38bdda5c52fde56ef14783d4a3e3612f7
parent0387d79e24d6cd816ea600f91607bd27c680a897 (diff)
KVM: x86/mmu: Don't force flush if SPTE update clears Accessed bit
Don't force a TLB flush if mmu_spte_update() clears the Accessed bit, as access tracking tolerates false negatives, as evidenced by the mmu_notifier hooks that explicitly test and age SPTEs without doing a TLB flush. In practice, this is very nearly a nop. spte_write_protect() and spte_clear_dirty() never clear the Accessed bit. make_spte() always sets the Accessed bit for !prefetch scenarios. FNAME(sync_spte) only sets SPTE if the protection bits are changing, i.e. if a flush will be needed regardless of the Accessed bits. And FNAME(pte_prefetch) sets SPTE if and only if the old SPTE is !PRESENT. That leaves kvm_arch_async_page_ready() as the one path that will generate a !ACCESSED SPTE *and* overwrite a PRESENT SPTE. And that's very arguably a bug, as clobbering a valid SPTE in that case is nonsensical. Tested-by: Alex Bennée <alex.bennee@linaro.org> Link: https://lore.kernel.org/r/20241011021051.1557902-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c30
1 files changed, 9 insertions, 21 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d4bb9d60dc59..067b6bb7bfca 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -521,36 +521,24 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
* not whether or not SPTEs were modified, i.e. only the write-tracking case
* needs to flush at the time the SPTEs is modified, before dropping mmu_lock.
*
+ * Remote TLBs also need to be flushed if the Dirty bit is cleared, as false
+ * negatives are not acceptable, e.g. if KVM is using D-bit based PML on VMX.
+ *
+ * Don't flush if the Accessed bit is cleared, as access tracking tolerates
+ * false negatives, and the one path that does care about TLB flushes,
+ * kvm_mmu_notifier_clear_flush_young(), uses mmu_spte_update_no_track().
+ *
* Returns true if the TLB needs to be flushed
*/
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
- bool flush = false;
u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
if (!is_shadow_present_pte(old_spte))
return false;
- /*
- * For the spte updated out of mmu-lock is safe, since
- * we always atomically update it, see the comments in
- * spte_has_volatile_bits().
- */
- if (is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte))
- flush = true;
-
- /*
- * Flush TLB when accessed/dirty states are changed in the page tables,
- * to guarantee consistency between TLB and page tables.
- */
-
- if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte))
- flush = true;
-
- if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte))
- flush = true;
-
- return flush;
+ return (is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte)) ||
+ (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte));
}
/*