diff options
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 23 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 5 |
3 files changed, 28 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4a391929cdb..17529e5d717c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1344,7 +1344,7 @@ struct kvm_arch { bool has_private_mem; bool has_protected_state; bool pre_fault_allowed; - struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; + struct hlist_head *mmu_page_hash; struct list_head active_mmu_pages; /* * A list of kvm_mmu_page structs that, if zapped, could possibly be @@ -2007,7 +2007,7 @@ void kvm_mmu_vendor_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); -void kvm_mmu_init_vm(struct kvm *kvm); +int kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4e06e2e89a8f..2b521f74bacf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3882,6 +3882,18 @@ out_unlock: return r; } +static int kvm_mmu_alloc_page_hash(struct kvm *kvm) +{ + struct hlist_head *h; + + h = kvcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT); + if (!h) + return -ENOMEM; + + kvm->arch.mmu_page_hash = h; + return 0; +} + static int mmu_first_shadow_root_alloc(struct kvm *kvm) { struct kvm_memslots *slots; @@ -6682,13 +6694,19 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) kvm_tdp_mmu_zap_invalidated_roots(kvm, true); } -void kvm_mmu_init_vm(struct kvm *kvm) +int kvm_mmu_init_vm(struct kvm *kvm) { + int r; + kvm->arch.shadow_mmio_value = shadow_mmio_value; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); + r = kvm_mmu_alloc_page_hash(kvm); + if (r) + return r; + if (tdp_mmu_enabled) kvm_mmu_init_tdp_mmu(kvm); @@ -6699,6 +6717,7 @@ void kvm_mmu_init_vm(struct kvm *kvm) kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; + return 0; } static void mmu_free_vm_memory_caches(struct kvm *kvm) @@ -6710,6 +6729,8 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm) void kvm_mmu_uninit_vm(struct kvm *kvm) { + kvfree(kvm->arch.mmu_page_hash); + if (tdp_mmu_enabled) kvm_mmu_uninit_tdp_mmu(kvm); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b58a74c1722d..d9339ad1474e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12789,7 +12789,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) goto out; - kvm_mmu_init_vm(kvm); + ret = kvm_mmu_init_vm(kvm); + if (ret) + goto out_cleanup_page_track; ret = kvm_x86_call(vm_init)(kvm); if (ret) @@ -12842,6 +12844,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) out_uninit_mmu: kvm_mmu_uninit_vm(kvm); +out_cleanup_page_track: kvm_page_track_cleanup(kvm); out: return ret; |
