diff options
Diffstat (limited to 'arch/riscv/include/asm/processor.h')
-rw-r--r-- | arch/riscv/include/asm/processor.h | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 5f56eb9d114a..24d3af4d3807 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -13,6 +13,9 @@ #include <vdso/processor.h> #include <asm/ptrace.h> +#include <asm/insn-def.h> +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> #define arch_get_mmap_end(addr, len, flags) \ ({ \ @@ -52,7 +55,6 @@ #endif #ifndef __ASSEMBLY__ -#include <linux/cpumask.h> struct task_struct; struct pt_regs; @@ -79,6 +81,10 @@ struct pt_regs; * Thus, the task does not own preempt_v. Any use of Vector will have to * save preempt_v, if dirty, and fallback to non-preemptible kernel-mode * Vector. + * - bit 29: The thread voluntarily calls schedule() while holding an active + * preempt_v. All preempt_v context should be dropped in such case because + * V-regs are caller-saved. Only sstatus.VS=ON is persisted across a + * schedule() call. * - bit 30: The in-kernel preempt_v context is saved, and requries to be * restored when returning to the context that owns the preempt_v. * - bit 31: The in-kernel preempt_v context is dirty, as signaled by the @@ -93,6 +99,7 @@ struct pt_regs; #define RISCV_PREEMPT_V 0x00000100 #define RISCV_PREEMPT_V_DIRTY 0x80000000 #define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000 +#define RISCV_PREEMPT_V_IN_SCHEDULE 0x20000000 /* CPU-specific state of a task */ struct thread_struct { @@ -103,6 +110,7 @@ struct thread_struct { struct __riscv_d_ext_state fstate; unsigned long bad_cause; unsigned long envcfg; + unsigned long sum; u32 riscv_v_flags; u32 vstate_ctrl; struct __riscv_v_ext_state vstate; @@ -136,6 +144,27 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) +#define PREFETCH_ASM(x) \ + ALTERNATIVE(__nops(1), PREFETCH_R(x, 0), 0, \ + RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) + +#define PREFETCHW_ASM(x) \ + ALTERNATIVE(__nops(1), PREFETCH_W(x, 0), 0, \ + RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) + +#ifdef CONFIG_RISCV_ISA_ZICBOP +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *x) +{ + __asm__ __volatile__(PREFETCH_ASM(%0) : : "r" (x) : "memory"); +} + +#define ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *x) +{ + __asm__ __volatile__(PREFETCHW_ASM(%0) : : "r" (x) : "memory"); +} +#endif /* CONFIG_RISCV_ISA_ZICBOP */ /* Do necessary setup to start up a newly executed thread. */ extern void start_thread(struct pt_regs *regs, |