summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/virtext.h
blob: a27801f2bc71c121dd6f5519e2464613223b9e46 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
/* SPDX-License-Identifier: GPL-2.0-only */
/* CPU virtualization extensions handling
 *
 * This should carry the code for handling CPU virtualization extensions
 * that needs to live in the kernel core.
 *
 * Author: Eduardo Habkost <ehabkost@redhat.com>
 *
 * Copyright (C) 2008, Red Hat Inc.
 *
 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
 */
#ifndef _ASM_X86_VIRTEX_H
#define _ASM_X86_VIRTEX_H

#include <asm/processor.h>

#include <asm/vmx.h>
#include <asm/svm.h>
#include <asm/tlbflush.h>

/*
 * SVM functions:
 */

/** Check if the CPU has SVM support
 *
 * You can use the 'msg' arg to get a message describing the problem,
 * if the function returns zero. Simply pass NULL if you are not interested
 * on the messages; gcc should take care of not generating code for
 * the messages on this case.
 */
static inline int cpu_has_svm(const char **msg)
{
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
		if (msg)
			*msg = "not amd or hygon";
		return 0;
	}

	if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
		if (msg)
			*msg = "can't execute cpuid_8000000a";
		return 0;
	}

	if (!boot_cpu_has(X86_FEATURE_SVM)) {
		if (msg)
			*msg = "svm not available";
		return 0;
	}
	return 1;
}


/** Disable SVM on the current CPU
 *
 * You should call this only if cpu_has_svm() returned true.
 */
static inline void cpu_svm_disable(void)
{
	uint64_t efer;

	wrmsrl(MSR_VM_HSAVE_PA, 0);
	rdmsrl(MSR_EFER, efer);
	if (efer & EFER_SVME) {
		/*
		 * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
		 * aren't blocked, e.g. if a fatal error occurred between CLGI
		 * and STGI.  Note, STGI may #UD if SVM is disabled from NMI
		 * context between reading EFER and executing STGI.  In that
		 * case, GIF must already be set, otherwise the NMI would have
		 * been blocked, so just eat the fault.
		 */
		asm_volatile_goto("1: stgi\n\t"
				  _ASM_EXTABLE(1b, %l[fault])
				  ::: "memory" : fault);
fault:
		wrmsrl(MSR_EFER, efer & ~EFER_SVME);
	}
}

#endif /* _ASM_X86_VIRTEX_H */