diff options
author | Ankit Agrawal <ankita@nvidia.com> | 2025-07-05 07:17:13 +0000 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2025-07-07 07:50:20 -0700 |
commit | 216887f79d9878d9cb169729fea3bb16c56db465 (patch) | |
tree | cf96722801cd97c433e1d03a1045e3445cd23e94 | |
parent | 8cc9dc1ae4fb075e29e2d5cf2386761d3497049a (diff) |
KVM: arm64: Assume non-PFNMAP/MIXEDMAP VMAs can be mapped cacheable
Despite its name, kvm_is_device_pfn() is actually used to determine if a
given PFN has a kernel mapping that can be used to perform cache
maintenance, as it calls pfn_is_map_memory() internally.
Expand the helper into its single callsite and further condition the
check on the VMA having either VM_PFNMAP or VM_MIXEDMAP set. VMAs that
set neither of these flags must always contain Normal, struct page
backed memory with valid aliases in the kernel address space.
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Tested-by: Donald Dutile <ddutile@redhat.com>
Signed-off-by: Ankit Agrawal <ankita@nvidia.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20250705071717.5062-3-ankita@nvidia.com
[ Oliver: fixed typos, refined changelog ]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
-rw-r--r-- | arch/arm64/kvm/mmu.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 1601ab9527d4..5fe24f30999d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -193,11 +193,6 @@ int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, return 0; } -static bool kvm_is_device_pfn(unsigned long pfn) -{ - return !pfn_is_map_memory(pfn); -} - static void *stage2_memcache_zalloc_page(void *arg) { struct kvm_mmu_memory_cache *mc = arg; @@ -1492,6 +1487,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; struct page *page; + vm_flags_t vm_flags; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED; if (fault_is_perm) @@ -1619,6 +1615,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; + vm_flags = vma->vm_flags; + /* Don't use the VMA after the unlock -- it may have vanished */ vma = NULL; @@ -1642,7 +1640,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_noslot_pfn(pfn)) return -EFAULT; - if (kvm_is_device_pfn(pfn)) { + if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) { /* * If the page was identified as device early by looking at * the VMA flags, vma_pagesize is already representing the |