diff options
| -rw-r--r-- | arch/x86/kvm/svm/svm.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.h | 12 | 
2 files changed, 7 insertions, 7 deletions
| diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 19794a8ea28c..70183d2271b5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1802,7 +1802,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)  	if (!npt_enabled) {  		hcr0 |= X86_CR0_PG | X86_CR0_WP; -		if (old_paging != !!is_paging(vcpu)) +		if (old_paging != is_paging(vcpu))  			svm_set_cr4(vcpu, kvm_read_cr4(vcpu));  	} diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 6d68c6772105..577b82358529 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -171,19 +171,19 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)  	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;  } -static inline int is_pae(struct kvm_vcpu *vcpu) +static inline bool is_pae(struct kvm_vcpu *vcpu)  { -	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); +	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);  } -static inline int is_pse(struct kvm_vcpu *vcpu) +static inline bool is_pse(struct kvm_vcpu *vcpu)  { -	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); +	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);  } -static inline int is_paging(struct kvm_vcpu *vcpu) +static inline bool is_paging(struct kvm_vcpu *vcpu)  { -	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); +	return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));  }  static inline bool is_pae_paging(struct kvm_vcpu *vcpu) | 
