diff options
author | Sean Christopherson <seanjc@google.com> | 2024-10-10 19:10:44 -0700 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2024-10-30 14:46:47 -0700 |
commit | 7971801b5618e37d7173f615300f640ab590ba6d (patch) | |
tree | d9b2ed62a6d112c7f45f96fffef186a9a496edd3 | |
parent | 53510b912518bd15dba9632ee8e539168166af3c (diff) |
KVM: x86/mmu: Use Accessed bit even when _hardware_ A/D bits are disabled
Use the Accessed bit in SPTEs even when A/D bits are disabled in hardware,
i.e. propagate accessed information to SPTE.Accessed even when KVM is
doing manual tracking by making SPTEs not-present. In addition to
eliminating a small amount of code in is_accessed_spte(), this also paves
the way for preserving Accessed information when a SPTE is zapped in
response to a mmu_notifier PROTECTION event, e.g. if a SPTE is zapped
because NUMA balancing kicks in.
Note, EPT is the only flavor of paging in which A/D bits are conditionally
enabled, and the Accessed (and Dirty) bit is software-available when A/D
bits are disabled.
Note #2, there are currently no concrete plans to preserve Accessed
information. Explorations on that front were the initial catalyst, but
the cleanup is the motivation for the actual commit.
Link: https://lore.kernel.org/r/20241011021051.1557902-13-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/spte.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/spte.h | 11 |
3 files changed, 5 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 80b5a45ac317..36247b8d7476 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3493,7 +3493,8 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * enabled, the SPTE can't be an access-tracked SPTE. */ if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte)) - new_spte = restore_acc_track_spte(new_spte); + new_spte = restore_acc_track_spte(new_spte) | + shadow_accessed_mask; /* * To keep things simple, only SPTEs that are MMU-writable can diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 54d8c9b76050..617479efd127 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -175,7 +175,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= shadow_present_mask; if (!prefetch || synchronizing) - spte |= spte_shadow_accessed_mask(spte); + spte |= shadow_accessed_mask; /* * For simplicity, enforce the NX huge page mitigation even if not @@ -346,7 +346,7 @@ u64 mark_spte_for_access_track(u64 spte) spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; - spte &= ~shadow_acc_track_mask; + spte &= ~(shadow_acc_track_mask | shadow_accessed_mask); return spte; } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 908cb672c4e1..c8dc75337c8b 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -316,12 +316,6 @@ static inline bool spte_ad_need_write_protect(u64 spte) return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED; } -static inline u64 spte_shadow_accessed_mask(u64 spte) -{ - KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); - return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; -} - static inline u64 spte_shadow_dirty_mask(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); @@ -355,10 +349,7 @@ static inline kvm_pfn_t spte_to_pfn(u64 pte) static inline bool is_accessed_spte(u64 spte) { - u64 accessed_mask = spte_shadow_accessed_mask(spte); - - return accessed_mask ? spte & accessed_mask - : !is_access_track_spte(spte); + return spte & shadow_accessed_mask; } static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, |