diff options
author | Brian Gerst <brgerst@gmail.com> | 2025-03-14 11:12:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2025-03-19 11:18:58 +0100 |
commit | 1ab7b5ed44ba9bce581e225f40219b793bc779d6 (patch) | |
tree | 95f68bc3ff60be56e27d1dc37899a92c08f1a57d | |
parent | 4476e7f81467b2c1b0aea7383e657181a3f9e652 (diff) |
x86/xen: Move Xen upcall handler
Move the upcall handler to Xen-specific files.
No functional changes.
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
-rw-r--r-- | arch/x86/entry/common.c | 72 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 69 |
2 files changed, 69 insertions, 72 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3514bf2978ee..ce4d88eda693 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -21,11 +21,6 @@ #include <linux/uaccess.h> #include <linux/init.h> -#ifdef CONFIG_XEN_PV -#include <xen/xen-ops.h> -#include <xen/events.h> -#endif - #include <asm/apic.h> #include <asm/desc.h> #include <asm/traps.h> @@ -455,70 +450,3 @@ SYSCALL_DEFINE0(ni_syscall) { return -ENOSYS; } - -#ifdef CONFIG_XEN_PV -#ifndef CONFIG_PREEMPTION -/* - * Some hypercalls issued by the toolstack can take many 10s of - * seconds. Allow tasks running hypercalls via the privcmd driver to - * be voluntarily preempted even if full kernel preemption is - * disabled. - * - * Such preemptible hypercalls are bracketed by - * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() - * calls. - */ -DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); -EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); - -/* - * In case of scheduling the flag must be cleared and restored after - * returning from schedule as the task might move to a different CPU. - */ -static __always_inline bool get_and_clear_inhcall(void) -{ - bool inhcall = __this_cpu_read(xen_in_preemptible_hcall); - - __this_cpu_write(xen_in_preemptible_hcall, false); - return inhcall; -} - -static __always_inline void restore_inhcall(bool inhcall) -{ - __this_cpu_write(xen_in_preemptible_hcall, inhcall); -} -#else -static __always_inline bool get_and_clear_inhcall(void) { return false; } -static __always_inline void restore_inhcall(bool inhcall) { } -#endif - -static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - - inc_irq_stat(irq_hv_callback_count); - - xen_evtchn_do_upcall(); - - set_irq_regs(old_regs); -} - -__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) -{ - irqentry_state_t state = irqentry_enter(regs); - bool inhcall; - - instrumentation_begin(); - run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); - - inhcall = get_and_clear_inhcall(); - if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { - irqentry_exit_cond_resched(); - instrumentation_end(); - restore_inhcall(inhcall); - } else { - instrumentation_end(); - irqentry_exit(regs, state); - } -} -#endif /* CONFIG_XEN_PV */ diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 5e57835e999d..dcc2041f8e61 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -73,6 +73,7 @@ #include <asm/mwait.h> #include <asm/pci_x86.h> #include <asm/cpu.h> +#include <asm/irq_stack.h> #ifdef CONFIG_X86_IOPL_IOPERM #include <asm/io_bitmap.h> #endif @@ -94,6 +95,44 @@ void *xen_initial_gdt; static int xen_cpu_up_prepare_pv(unsigned int cpu); static int xen_cpu_dead_pv(unsigned int cpu); +#ifndef CONFIG_PREEMPTION +/* + * Some hypercalls issued by the toolstack can take many 10s of + * seconds. Allow tasks running hypercalls via the privcmd driver to + * be voluntarily preempted even if full kernel preemption is + * disabled. + * + * Such preemptible hypercalls are bracketed by + * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() + * calls. + */ +DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); +EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); + +/* + * In case of scheduling the flag must be cleared and restored after + * returning from schedule as the task might move to a different CPU. + */ +static __always_inline bool get_and_clear_inhcall(void) +{ + bool inhcall = __this_cpu_read(xen_in_preemptible_hcall); + + __this_cpu_write(xen_in_preemptible_hcall, false); + return inhcall; +} + +static __always_inline void restore_inhcall(bool inhcall) +{ + __this_cpu_write(xen_in_preemptible_hcall, inhcall); +} + +#else + +static __always_inline bool get_and_clear_inhcall(void) { return false; } +static __always_inline void restore_inhcall(bool inhcall) { } + +#endif + struct tls_descs { struct desc_struct desc[3]; }; @@ -687,6 +726,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check) } #endif +static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + inc_irq_stat(irq_hv_callback_count); + + xen_evtchn_do_upcall(); + + set_irq_regs(old_regs); +} + +__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + bool inhcall; + + instrumentation_begin(); + run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); + + inhcall = get_and_clear_inhcall(); + if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { + irqentry_exit_cond_resched(); + instrumentation_end(); + restore_inhcall(inhcall); + } else { + instrumentation_end(); + irqentry_exit(regs, state); + } +} + struct trap_array_entry { void (*orig)(void); void (*xen)(void); |