diff options
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 17 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 63 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pvh.c | 19 | ||||
-rw-r--r-- | arch/x86/xen/mmu_pv.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.c | 26 | ||||
-rw-r--r-- | arch/x86/xen/pmu.c | 73 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 3 | ||||
-rw-r--r-- | arch/x86/xen/smp_pv.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 7 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm.S | 4 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 8 |
11 files changed, 99 insertions, 126 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 43dcd8c7badc..53282dc7d5ac 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags); */ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; + static __ref void xen_get_vendor(void) { init_cpu_devs(); @@ -100,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void) void (*func)(void); /* - * Xen is supported only on CPUs with CPUID, so testing for - * X86_FEATURE_CPUID is a test for early_cpu_init() having been - * run. - * * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty * dependency chain: it is being called via the xen_hypercall static * call when running as a PVH or HVM guest. Hypercalls need to be @@ -115,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void) */ instrumentation_begin(); - if (!boot_cpu_has(X86_FEATURE_CPUID)) - xen_get_vendor(); + xen_get_vendor(); if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) @@ -466,6 +464,13 @@ int __init arch_xen_unpopulated_init(struct resource **res) xen_free_unpopulated_pages(1, &pg); } + /* + * Account for the region being in the physmap but unpopulated. + * The value in xen_released_pages is used by the balloon + * driver to know how much of the physmap is unpopulated and + * set an accurate initial memory target. + */ + xen_released_pages += xen_extra_mem[i].n_pfns; /* Zero so region is not also added to the balloon driver. */ xen_extra_mem[i].n_pfns = 0; } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 846b5737d320..26bbaf4b7330 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -49,7 +49,7 @@ #include <xen/hvc-console.h> #include <xen/acpi.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/paravirt.h> #include <asm/apic.h> #include <asm/page.h> @@ -61,6 +61,7 @@ #include <asm/processor.h> #include <asm/proto.h> #include <asm/msr-index.h> +#include <asm/msr.h> #include <asm/traps.h> #include <asm/setup.h> #include <asm/desc.h> @@ -1086,15 +1087,15 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } -static u64 xen_do_read_msr(unsigned int msr, int *err) +static u64 xen_do_read_msr(u32 msr, int *err) { u64 val = 0; /* Avoid uninitialized value for safe variant. */ - if (pmu_msr_read(msr, &val, err)) + if (pmu_msr_chk_emulated(msr, &val, true)) return val; if (err) - val = native_read_msr_safe(msr, err); + *err = native_read_msr_safe(msr, &val); else val = native_read_msr(msr); @@ -1110,17 +1111,9 @@ static u64 xen_do_read_msr(unsigned int msr, int *err) return val; } -static void set_seg(unsigned int which, unsigned int low, unsigned int high, - int *err) +static void set_seg(u32 which, u64 base) { - u64 base = ((u64)high << 32) | low; - - if (HYPERVISOR_set_segment_base(which, base) == 0) - return; - - if (err) - *err = -EIO; - else + if (HYPERVISOR_set_segment_base(which, base)) WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base); } @@ -1129,20 +1122,19 @@ static void set_seg(unsigned int which, unsigned int low, unsigned int high, * With err == NULL write_msr() semantics are selected. * Supplying an err pointer requires err to be pre-initialized with 0. */ -static void xen_do_write_msr(unsigned int msr, unsigned int low, - unsigned int high, int *err) +static void xen_do_write_msr(u32 msr, u64 val, int *err) { switch (msr) { case MSR_FS_BASE: - set_seg(SEGBASE_FS, low, high, err); + set_seg(SEGBASE_FS, val); break; case MSR_KERNEL_GS_BASE: - set_seg(SEGBASE_GS_USER, low, high, err); + set_seg(SEGBASE_GS_USER, val); break; case MSR_GS_BASE: - set_seg(SEGBASE_GS_KERNEL, low, high, err); + set_seg(SEGBASE_GS_KERNEL, val); break; case MSR_STAR: @@ -1158,42 +1150,45 @@ static void xen_do_write_msr(unsigned int msr, unsigned int low, break; default: - if (!pmu_msr_write(msr, low, high, err)) { - if (err) - *err = native_write_msr_safe(msr, low, high); - else - native_write_msr(msr, low, high); - } + if (pmu_msr_chk_emulated(msr, &val, false)) + return; + + if (err) + *err = native_write_msr_safe(msr, val); + else + native_write_msr(msr, val); } } -static u64 xen_read_msr_safe(unsigned int msr, int *err) +static int xen_read_msr_safe(u32 msr, u64 *val) { - return xen_do_read_msr(msr, err); + int err = 0; + + *val = xen_do_read_msr(msr, &err); + return err; } -static int xen_write_msr_safe(unsigned int msr, unsigned int low, - unsigned int high) +static int xen_write_msr_safe(u32 msr, u64 val) { int err = 0; - xen_do_write_msr(msr, low, high, &err); + xen_do_write_msr(msr, val, &err); return err; } -static u64 xen_read_msr(unsigned int msr) +static u64 xen_read_msr(u32 msr) { - int err; + int err = 0; return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL); } -static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) +static void xen_write_msr(u32 msr, u64 val) { int err; - xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL); + xen_do_write_msr(msr, val, xen_msr_safe ? &err : NULL); } /* This is called once we have the cpu_possible_mask */ diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index 0e3d930bcb89..9d25d9373945 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/acpi.h> +#include <linux/cpufreq.h> +#include <linux/cpuidle.h> #include <linux/export.h> #include <linux/mm.h> @@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void) { pvh_reserve_extra_memory(); - if (xen_initial_domain()) + if (xen_initial_domain()) { xen_add_preferred_consoles(); + + /* + * Disable usage of CPU idle and frequency drivers: when + * running as hardware domain the exposed native ACPI tables + * causes idle and/or frequency drivers to attach and + * malfunction. It's Xen the entity that controls the idle and + * frequency states. + * + * For unprivileged domains the exposed ACPI tables are + * fabricated and don't contain such data. + */ + disable_cpuidle(); + disable_cpufreq(); + WARN_ON(xen_set_default_idle()); + } } void __init xen_pvh_init(struct boot_params *boot_params) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 38971c6dcd4b..2a4a8deaf612 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -578,7 +578,6 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(XEN_LAZY_MMU); } -#if CONFIG_PGTABLE_LEVELS >= 5 __visible p4dval_t xen_p4d_val(p4d_t p4d) { return pte_mfn_to_pfn(p4d.p4d); @@ -592,7 +591,6 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d) return native_make_p4d(p4d); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); -#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, void (*func)(struct mm_struct *mm, struct page *, @@ -2222,10 +2220,8 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = { .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, -#if CONFIG_PGTABLE_LEVELS >= 5 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val), .make_p4d = PV_CALLEE_SAVE(xen_make_p4d), -#endif .enter_mmap = xen_enter_mmap, .exit_mmap = xen_exit_mmap, diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 10c660fae8b3..7237d56a9d3f 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -54,14 +54,20 @@ struct mc_debug_data { static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); static struct mc_debug_data mc_debug_data_early __initdata; -static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) = - &mc_debug_data_early; static struct mc_debug_data __percpu *mc_debug_data_ptr; DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); static struct static_key mc_debug __ro_after_init; static bool mc_debug_enabled __initdata; +static struct mc_debug_data * __ref get_mc_debug(void) +{ + if (!mc_debug_data_ptr) + return &mc_debug_data_early; + + return this_cpu_ptr(mc_debug_data_ptr); +} + static int __init xen_parse_mc_debug(char *arg) { mc_debug_enabled = true; @@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg) } early_param("xen_mc_debug", xen_parse_mc_debug); -void mc_percpu_init(unsigned int cpu) -{ - per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu); -} - static int __init mc_debug_enable(void) { unsigned long flags; + struct mc_debug_data __percpu *mcdb; if (!mc_debug_enabled) return 0; - mc_debug_data_ptr = alloc_percpu(struct mc_debug_data); - if (!mc_debug_data_ptr) { + mcdb = alloc_percpu(struct mc_debug_data); + if (!mcdb) { pr_err("xen_mc_debug inactive\n"); static_key_slow_dec(&mc_debug); return -ENOMEM; @@ -93,7 +95,7 @@ static int __init mc_debug_enable(void) /* Be careful when switching to percpu debug data. */ local_irq_save(flags); xen_mc_flush(); - mc_percpu_init(0); + mc_debug_data_ptr = mcdb; local_irq_restore(flags); pr_info("xen_mc_debug active\n"); @@ -155,7 +157,7 @@ void xen_mc_flush(void) trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); if (static_key_false(&mc_debug)) { - mcdb = __this_cpu_read(mc_debug_data); + mcdb = get_mc_debug(); memcpy(mcdb->entries, b->entries, b->mcidx * sizeof(struct multicall_entry)); } @@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args) ret.mc = &b->entries[b->mcidx]; if (static_key_false(&mc_debug)) { - struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data); + struct mc_debug_data *mcdb = get_mc_debug(); mcdb->caller[b->mcidx] = __builtin_return_address(0); mcdb->argsz[b->mcidx] = args; diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index f06987b0efc3..8f89ce0b67e3 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -2,6 +2,7 @@ #include <linux/types.h> #include <linux/interrupt.h> +#include <asm/msr.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/page.h> @@ -128,7 +129,7 @@ static inline uint32_t get_fam15h_addr(u32 addr) return addr; } -static inline bool is_amd_pmu_msr(unsigned int msr) +static bool is_amd_pmu_msr(u32 msr) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) @@ -194,8 +195,7 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) } } -static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, - int index, bool is_read) +static bool xen_intel_pmu_emulate(u32 msr, u64 *val, int type, int index, bool is_read) { uint64_t *reg = NULL; struct xen_pmu_intel_ctxt *ctxt; @@ -257,7 +257,7 @@ static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, return false; } -static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) +static bool xen_amd_pmu_emulate(u32 msr, u64 *val, bool is_read) { uint64_t *reg = NULL; int i, off = 0; @@ -298,55 +298,20 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) return false; } -static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, - bool *emul) +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read) { int type, index = 0; if (is_amd_pmu_msr(msr)) - *emul = xen_amd_pmu_emulate(msr, val, is_read); - else if (is_intel_pmu_msr(msr, &type, &index)) - *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read); - else - return false; - - return true; -} - -bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) -{ - bool emulated; + return xen_amd_pmu_emulate(msr, val, is_read); - if (!pmu_msr_chk_emulated(msr, val, true, &emulated)) - return false; + if (is_intel_pmu_msr(msr, &type, &index)) + return xen_intel_pmu_emulate(msr, val, type, index, is_read); - if (!emulated) { - *val = err ? native_read_msr_safe(msr, err) - : native_read_msr(msr); - } - - return true; -} - -bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) -{ - uint64_t val = ((uint64_t)high << 32) | low; - bool emulated; - - if (!pmu_msr_chk_emulated(msr, &val, false, &emulated)) - return false; - - if (!emulated) { - if (err) - *err = native_write_msr_safe(msr, low, high); - else - native_write_msr(msr, low, high); - } - - return true; + return false; } -static unsigned long long xen_amd_read_pmc(int counter) +static u64 xen_amd_read_pmc(int counter) { struct xen_pmu_amd_ctxt *ctxt; uint64_t *counter_regs; @@ -354,11 +319,12 @@ static unsigned long long xen_amd_read_pmc(int counter) uint8_t xenpmu_flags = get_xenpmu_flags(); if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { - uint32_t msr; - int err; + u32 msr; + u64 val; msr = amd_counters_base + (counter * amd_msr_step); - return native_read_msr_safe(msr, &err); + native_read_msr_safe(msr, &val); + return val; } ctxt = &xenpmu_data->pmu.c.amd; @@ -366,7 +332,7 @@ static unsigned long long xen_amd_read_pmc(int counter) return counter_regs[counter]; } -static unsigned long long xen_intel_read_pmc(int counter) +static u64 xen_intel_read_pmc(int counter) { struct xen_pmu_intel_ctxt *ctxt; uint64_t *fixed_counters; @@ -375,15 +341,16 @@ static unsigned long long xen_intel_read_pmc(int counter) uint8_t xenpmu_flags = get_xenpmu_flags(); if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { - uint32_t msr; - int err; + u32 msr; + u64 val; if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff); else msr = MSR_IA32_PERFCTR0 + counter; - return native_read_msr_safe(msr, &err); + native_read_msr_safe(msr, &val); + return val; } ctxt = &xenpmu_data->pmu.c.intel; @@ -396,7 +363,7 @@ static unsigned long long xen_intel_read_pmc(int counter) return arch_cntr_pair[counter].counter; } -unsigned long long xen_read_pmc(int counter) +u64 xen_read_pmc(int counter) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return xen_amd_read_pmc(counter); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c3db71d96c43..3823e52aef52 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -37,9 +37,6 @@ #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) -/* Number of pages released from the initial allocation. */ -unsigned long xen_released_pages; - /* Memory map would allow PCI passthrough. */ bool xen_pv_pci_possible; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 688ff59318ae..9bb8ff8bff30 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle) return rc; xen_pmu_init(cpu); - mc_percpu_init(cpu); /* * Why is this a BUG? If the hypercall fails then everything can be diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 77a6ea1c60e4..ba2f17e64321 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -13,6 +13,7 @@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/fixmap.h> +#include <asm/msr.h> #include "xen-ops.h" @@ -39,7 +40,7 @@ void xen_arch_post_suspend(int cancelled) static void xen_vcpu_notify_restore(void *data) { if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) - wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); + wrmsrq(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); /* Boot processor notified via generic timekeeping_resume() */ if (smp_processor_id() == 0) @@ -55,9 +56,9 @@ static void xen_vcpu_notify_suspend(void *data) tick_suspend_local(); if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { - rdmsrl(MSR_IA32_SPEC_CTRL, tmp); + rdmsrq(MSR_IA32_SPEC_CTRL, tmp); this_cpu_write(spec_ctrl, tmp); - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + wrmsrq(MSR_IA32_SPEC_CTRL, 0); } } diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 109af12f7647..461bb1526502 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -226,9 +226,7 @@ SYM_CODE_END(xen_early_idt_handler_array) push %rax mov $__HYPERVISOR_iret, %eax syscall /* Do the IRET. */ -#ifdef CONFIG_MITIGATION_SLS - int3 -#endif + ud2 /* The SYSCALL should never return. */ .endm SYM_CODE_START(xen_iret) diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 63c13a2ccf55..090349baec09 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data); */ struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size); -/* Do percpu data initialization for multicalls. */ -void mc_percpu_init(unsigned int cpu); - extern bool is_xen_pmu; irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); @@ -274,10 +271,9 @@ void xen_pmu_finish(int cpu); static inline void xen_pmu_init(int cpu) {} static inline void xen_pmu_finish(int cpu) {} #endif -bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); -bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read); int pmu_apic_update(uint32_t reg); -unsigned long long xen_read_pmc(int counter); +u64 xen_read_pmc(int counter); #ifdef CONFIG_SMP |