summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp/nvhe/debug-sr.c
blob: 2f4a4f5036bb54dbefc24abb0388ddea3e1f3ab2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#include <hyp/debug-sr.h>

#include <linux/compiler.h>
#include <linux/kvm_host.h>

#include <asm/debug-monitors.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>

static void __debug_save_spe(u64 *pmscr_el1)
{
	u64 reg;

	/* Clear pmscr in case of early return */
	*pmscr_el1 = 0;

	/*
	 * At this point, we know that this CPU implements
	 * SPE and is available to the host.
	 * Check if the host is actually using it ?
	 */
	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
	if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT)))
		return;

	/* Yes; save the control register and disable data generation */
	*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
	write_sysreg_el1(0, SYS_PMSCR);
	isb();

	/* Now drain all buffered data to memory */
	psb_csync();
}

static void __debug_restore_spe(u64 pmscr_el1)
{
	if (!pmscr_el1)
		return;

	/* The host page table is installed, but not yet synchronised */
	isb();

	/* Re-enable data generation */
	write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}

static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr)
{
	*saved_trfcr = read_sysreg_el1(SYS_TRFCR);
	write_sysreg_el1(new_trfcr, SYS_TRFCR);
}

static bool __trace_needs_drain(void)
{
	if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE))
		return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E;

	return host_data_test_flag(TRBE_ENABLED);
}

static bool __trace_needs_switch(void)
{
	return host_data_test_flag(TRBE_ENABLED) ||
	       host_data_test_flag(EL1_TRACING_CONFIGURED);
}

static void __trace_switch_to_guest(void)
{
	/* Unsupported with TRBE so disable */
	if (host_data_test_flag(TRBE_ENABLED))
		*host_data_ptr(trfcr_while_in_guest) = 0;

	__trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1),
			  *host_data_ptr(trfcr_while_in_guest));

	if (__trace_needs_drain()) {
		isb();
		tsb_csync();
	}
}

static void __trace_switch_to_host(void)
{
	__trace_do_switch(host_data_ptr(trfcr_while_in_guest),
			  *host_data_ptr(host_debug_state.trfcr_el1));
}

void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
	/* Disable and flush SPE data generation */
	if (host_data_test_flag(HAS_SPE))
		__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));

	if (__trace_needs_switch())
		__trace_switch_to_guest();
}

void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
	__debug_switch_to_guest_common(vcpu);
}

void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
	if (host_data_test_flag(HAS_SPE))
		__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
	if (__trace_needs_switch())
		__trace_switch_to_host();
}

void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
	__debug_switch_to_host_common(vcpu);
}