diff options
| author | Andre Przywara <andre.przywara@amd.com> | 2010-12-21 11:12:07 +0100 | 
|---|---|---|
| committer | Avi Kivity <avi@redhat.com> | 2011-01-12 11:31:07 +0200 | 
| commit | dc25e89e07d5ef31c476117d2c76b34dbb22196c (patch) | |
| tree | 9fef452c4bd0704b9d74512a9a58dc5d5b742d53 | |
| parent | df4f3108562dc6f6ae6648f2698df7f4c9acf52d (diff) | |
KVM: SVM: copy instruction bytes from VMCB
In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 9 | ||||
| -rw-r--r-- | arch/x86/include/asm/svm.h | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/emulate.c | 7 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 6 | 
8 files changed, 26 insertions, 15 deletions
| diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index bf70ecea3974..8e37deb1eb38 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -265,7 +265,7 @@ struct x86_emulate_ctxt {  #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64  #endif -int x86_decode_insn(struct x86_emulate_ctxt *ctxt); +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);  #define EMULATION_FAILED -1  #define EMULATION_OK 0  #define EMULATION_RESTART 1 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index de00b6026b76..6268f6ce6434 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -634,13 +634,13 @@ enum emulation_result {  #define EMULTYPE_NO_DECODE	    (1 << 0)  #define EMULTYPE_TRAP_UD	    (1 << 1)  #define EMULTYPE_SKIP		    (1 << 2) -int x86_emulate_instruction(struct kvm_vcpu *vcpu, -			unsigned long cr2, int emulation_type); +int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, +			    int emulation_type, void *insn, int insn_len);  static inline int emulate_instruction(struct kvm_vcpu *vcpu,  			int emulation_type)  { -	return x86_emulate_instruction(vcpu, 0, emulation_type); +	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);  }  void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); @@ -721,7 +721,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);  int kvm_fix_hypercall(struct kvm_vcpu *vcpu); -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, +		       void *insn, int insn_len);  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);  void kvm_enable_tdp(void); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index f0ffb8184089..f2b83bc7d784 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -83,7 +83,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {  	u32 clean;  	u32 reserved_5;  	u64 next_rip; -	u8 reserved_6[816]; +	u8 insn_len; +	u8 insn_bytes[15]; +	u8 reserved_6[800];  }; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 36534ecaf596..caf966781d25 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2610,7 +2610,7 @@ done:  }  int -x86_decode_insn(struct x86_emulate_ctxt *ctxt) +x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)  {  	struct x86_emulate_ops *ops = ctxt->ops;  	struct decode_cache *c = &ctxt->decode; @@ -2621,7 +2621,10 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)  	struct operand memop = { .type = OP_NONE };  	c->eip = ctxt->eip; -	c->fetch.start = c->fetch.end = c->eip; +	c->fetch.start = c->eip; +	c->fetch.end = c->fetch.start + insn_len; +	if (insn_len > 0) +		memcpy(c->fetch.data, insn, insn_len);  	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);  	switch (mode) { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01c5a104031f..ea6063d9242e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3330,7 +3330,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)  	}  } -int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) +int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, +		       void *insn, int insn_len)  {  	int r;  	enum emulation_result er; @@ -3348,7 +3349,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)  	if (r)  		goto out; -	er = x86_emulate_instruction(vcpu, cr2, 0); +	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);  	switch (er) {  	case EMULATE_DONE: diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a04c01e324b4..af4b911a8bed 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1527,7 +1527,9 @@ static int pf_interception(struct vcpu_svm *svm)  		trace_kvm_page_fault(fault_address, error_code);  		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))  			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); -		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); +		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, +			svm->vmcb->control.insn_bytes, +			svm->vmcb->control.insn_len);  		break;  	case KVM_PV_REASON_PAGE_NOT_PRESENT:  		svm->apf_reason = 0; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f3c60fb8d95e..736f83955ce9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3055,7 +3055,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)  		if (kvm_event_needs_reinjection(vcpu))  			kvm_mmu_unprotect_page_virt(vcpu, cr2); -		return kvm_mmu_page_fault(vcpu, cr2, error_code); +		return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);  	}  	if (vmx->rmode.vm86_active && @@ -3502,7 +3502,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)  	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);  	trace_kvm_page_fault(gpa, exit_qualification); -	return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3); +	return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);  }  static u64 ept_rsvd_mask(u64 spte, int level) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a6fcb76196b7..7ad9cda8ff36 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4365,7 +4365,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)  int x86_emulate_instruction(struct kvm_vcpu *vcpu,  			    unsigned long cr2, -			    int emulation_type) +			    int emulation_type, +			    void *insn, +			    int insn_len)  {  	int r;  	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; @@ -4386,7 +4388,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,  		vcpu->arch.emulate_ctxt.have_exception = false;  		vcpu->arch.emulate_ctxt.perm_ok = false; -		r = x86_decode_insn(&vcpu->arch.emulate_ctxt); +		r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);  		if (r == X86EMUL_PROPAGATE_FAULT)  			goto done; | 
