summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/alternative-macros.h19
-rw-r--r--arch/riscv/include/asm/arch_hweight.h6
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h2
-rw-r--r--arch/riscv/include/asm/asm.h1
-rw-r--r--arch/riscv/include/asm/bitops.h28
-rw-r--r--arch/riscv/include/asm/bugs.h22
-rw-r--r--arch/riscv/include/asm/cacheflush.h15
-rw-r--r--arch/riscv/include/asm/checksum.h3
-rw-r--r--arch/riscv/include/asm/cmpxchg.h40
-rw-r--r--arch/riscv/include/asm/cpufeature.h9
-rw-r--r--arch/riscv/include/asm/csr.h15
-rw-r--r--arch/riscv/include/asm/errata_list.h3
-rw-r--r--arch/riscv/include/asm/ftrace.h50
-rw-r--r--arch/riscv/include/asm/futex.h4
-rw-r--r--arch/riscv/include/asm/hugetlb.h3
-rw-r--r--arch/riscv/include/asm/hwcap.h5
-rw-r--r--arch/riscv/include/asm/hwprobe.h5
-rw-r--r--arch/riscv/include/asm/insn-def.h3
-rw-r--r--arch/riscv/include/asm/io.h4
-rw-r--r--arch/riscv/include/asm/kgdb.h9
-rw-r--r--arch/riscv/include/asm/kvm_aia.h3
-rw-r--r--arch/riscv/include/asm/kvm_host.h24
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h4
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_vector.h6
-rw-r--r--arch/riscv/include/asm/page.h27
-rw-r--r--arch/riscv/include/asm/pgalloc.h68
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h55
-rw-r--r--arch/riscv/include/asm/ptrace.h18
-rw-r--r--arch/riscv/include/asm/runtime-const.h268
-rw-r--r--arch/riscv/include/asm/suspend.h4
-rw-r--r--arch/riscv/include/asm/switch_to.h2
-rw-r--r--arch/riscv/include/asm/syscall.h26
-rw-r--r--arch/riscv/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/tlbflush.h3
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/arch_data.h (renamed from arch/riscv/include/asm/vdso/time_data.h)8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h14
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h9
-rw-r--r--arch/riscv/include/asm/vector.h222
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead.h47
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h19
-rw-r--r--arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h37
-rw-r--r--arch/riscv/include/asm/vendorid_list.h1
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h12
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h9
-rw-r--r--arch/riscv/include/uapi/asm/vendor/thead.h3
48 files changed, 869 insertions, 289 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index de13d5a234f8..bd5fc9403295 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -4,6 +4,7 @@ syscall-y += syscall_table_64.h
generic-y += early_ioremap.h
generic-y += flat.h
+generic-y += fprobe.h
generic-y += kvm_para.h
generic-y += mmzone.h
generic-y += mcs_spinlock.h
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 721ec275ce57..231d777d936c 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -115,24 +115,19 @@
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, ...) \
- ALTERNATIVE_CFG old_c
-
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
- old_c "\n"
+#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
+#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#endif /* __ASSEMBLY__ */
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
-#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
index 613769b9cdc9..0e7cdbbec8ef 100644
--- a/arch/riscv/include/asm/arch_hweight.h
+++ b/arch/riscv/include/asm/arch_hweight.h
@@ -19,7 +19,7 @@
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
-#ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -50,7 +50,7 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#if BITS_PER_LONG == 64
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
-# ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -64,7 +64,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
return w;
legacy:
-# endif
+#endif
return __sw_hweight64(w);
}
#else /* BITS_PER_LONG == 64 */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index cd627ec289f1..bfc8ea5f9319 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -52,6 +52,8 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
+asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs);
+asmlinkage void ret_from_fork_user(struct pt_regs *regs);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 776354895b81..a8a2af6dfe9d 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -27,6 +27,7 @@
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
+#define SRLI __REG_SEL(srliw, srli)
#if __SIZEOF_POINTER__ == 8
#ifdef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index fae152ea0508..d59310f74c2b 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -15,7 +15,7 @@
#include <asm/barrier.h>
#include <asm/bitsperlong.h>
-#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
+#if !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE)
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
@@ -175,7 +175,7 @@ legacy:
variable_fls(x_); \
})
-#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
+#endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE) */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
@@ -226,9 +226,9 @@ legacy:
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation may be reordered on other architectures than x86.
+ * This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
@@ -238,9 +238,9 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to clear
* @addr: Address to count from
*
- * This operation can be reordered on other architectures other than x86.
+ * This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int arch_test_and_set_bit_lock(
+static __always_inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void arch_clear_bit_unlock(
+static __always_inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void arch___clear_bit_unlock(
+static __always_inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
arch_clear_bit_unlock(nr, addr);
}
-static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
diff --git a/arch/riscv/include/asm/bugs.h b/arch/riscv/include/asm/bugs.h
new file mode 100644
index 000000000000..17ca0a947730
--- /dev/null
+++ b/arch/riscv/include/asm/bugs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for riscv vulnerabilities.
+ *
+ * Copyright (C) 2024 Rivos Inc.
+ */
+
+#ifndef __ASM_BUGS_H
+#define __ASM_BUGS_H
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ UNAFFECTED,
+ MITIGATED,
+ VULNERABLE,
+};
+
+void ghostwrite_set_vulnerable(void);
+bool ghostwrite_enable_mitigation(void);
+enum mitigation_state ghostwrite_get_state(void);
+
+#endif /* __ASM_BUGS_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8de73f91bfa3..b59ffeb668d6 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
flush_dcache_folio(page_folio(page));
}
-/*
- * RISC-V doesn't have an instruction to flush parts of the instruction cache,
- * so instead we just flush the whole thing.
- */
-#define flush_icache_range(start, end) flush_icache_all()
#define flush_icache_user_page(vma, pg, addr, len) \
do { \
if (vma->vm_flags & VM_EXEC) \
@@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range flush_icache_range
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ flush_icache_all();
+}
+
extern unsigned int riscv_cbom_block_size;
extern unsigned int riscv_cboz_block_size;
void riscv_init_cbo_blocksizes(void);
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index 88e6f1499e88..da378856f1d5 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -49,8 +49,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
* ZBB only saves three instructions on 32-bit and five on 64-bit so not
* worth checking if supported without Alternatives.
*/
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
- IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
unsigned long fold_temp;
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 4cadc56220fe..2ec119eb147b 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -231,7 +231,7 @@
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
- __ret, __ptr, (long), __old, __new); \
+ __ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
@@ -365,16 +365,48 @@ static __always_inline void __cmpwait(volatile void *ptr,
{
unsigned long tmp;
+ u32 *__ptr32b;
+ ulong __s, __val, __mask;
+
asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
0, RISCV_ISA_EXT_ZAWRS, 1)
: : : : no_zawrs);
switch (size) {
case 1:
- fallthrough;
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
case 2:
- /* RISC-V doesn't have lr instructions on byte and half-word. */
- goto no_zawrs;
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xffff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
case 4:
asm volatile(
" lr.w %0, %1\n"
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 4bd054c54c21..f56b409361fb 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -34,6 +34,8 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
+extern u32 thead_vlenb_of;
+
void __init riscv_user_isa_enable(void);
#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
@@ -54,6 +56,9 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
_RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
ARRAY_SIZE(_bundled_exts), NULL)
+#define __RISCV_ISA_EXT_BUNDLE_VALIDATE(_name, _bundled_exts, _validate) \
+ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
+ ARRAY_SIZE(_bundled_exts), _validate)
/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
@@ -61,7 +66,7 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-bool check_unaligned_access_emulated_all_cpus(void);
+bool __init check_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
@@ -74,7 +79,7 @@ static inline bool unaligned_ctl_available(void)
}
#endif
-bool check_vector_unaligned_access_emulated_all_cpus(void);
+bool __init check_vector_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
DECLARE_PER_CPU(long, vector_misaligned_access);
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 37bdea65bbd8..6fed42e37705 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -30,6 +30,12 @@
#define SR_VS_CLEAN _AC(0x00000400, UL)
#define SR_VS_DIRTY _AC(0x00000600, UL)
+#define SR_VS_THEAD _AC(0x01800000, UL) /* xtheadvector Status */
+#define SR_VS_OFF_THEAD _AC(0x00000000, UL)
+#define SR_VS_INITIAL_THEAD _AC(0x00800000, UL)
+#define SR_VS_CLEAN_THEAD _AC(0x01000000, UL)
+#define SR_VS_DIRTY_THEAD _AC(0x01800000, UL)
+
#define SR_XS _AC(0x00018000, UL) /* Extension Status */
#define SR_XS_OFF _AC(0x00000000, UL)
#define SR_XS_INITIAL _AC(0x00008000, UL)
@@ -315,6 +321,15 @@
#define CSR_STIMECMP 0x14D
#define CSR_STIMECMPH 0x15D
+/* xtheadvector symbolic CSR names */
+#define CSR_VXSAT 0x9
+#define CSR_VXRM 0xa
+
+/* xtheadvector CSR masks */
+#define CSR_VXRM_MASK 3
+#define CSR_VXRM_SHIFT 1
+#define CSR_VXSAT_MASK 1
+
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 7c8a71a526a3..6e426ed7919a 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -25,7 +25,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
-#define ERRATA_THEAD_NUMBER 2
+#define ERRATA_THEAD_GHOSTWRITE 2
+#define ERRATA_THEAD_NUMBER 3
#endif
#ifdef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 3d66437a1029..d627f63ee289 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -79,7 +79,6 @@ struct dyn_arch_ftrace {
#define AUIPC_RA (0x00000097)
#define JALR_T0 (0x000282e7)
#define AUIPC_T0 (0x00000297)
-#define NOP4 (0x00000013)
#define to_jalr_t0(offset) \
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
@@ -92,7 +91,7 @@ struct dyn_arch_ftrace {
#define make_call_t0(caller, callee, call) \
do { \
unsigned int offset = \
- (unsigned long) callee - (unsigned long) caller; \
+ (unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_t0(offset); \
call[1] = to_jalr_t0(offset); \
} while (0)
@@ -108,7 +107,7 @@ do { \
#define make_call_ra(caller, callee, call) \
do { \
unsigned int offset = \
- (unsigned long) callee - (unsigned long) caller; \
+ (unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_ra(offset); \
call[1] = to_jalr_ra(offset); \
} while (0)
@@ -168,6 +167,11 @@ static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct
return arch_ftrace_regs(fregs)->sp;
}
+static __always_inline unsigned long ftrace_regs_get_frame_pointer(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->s0;
+}
+
static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs,
unsigned int n)
{
@@ -181,6 +185,11 @@ static __always_inline unsigned long ftrace_regs_get_return_value(const struct f
return arch_ftrace_regs(fregs)->a0;
}
+static __always_inline unsigned long ftrace_regs_get_return_address(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->ra;
+}
+
static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs,
unsigned long ret)
{
@@ -192,6 +201,20 @@ static __always_inline void ftrace_override_function_with_return(struct ftrace_r
arch_ftrace_regs(fregs)->epc = arch_ftrace_regs(fregs)->ra;
}
+static __always_inline struct pt_regs *
+ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs);
+
+ memcpy(&regs->a_regs, afregs->args, sizeof(afregs->args));
+ regs->epc = afregs->epc;
+ regs->ra = afregs->ra;
+ regs->sp = afregs->sp;
+ regs->s0 = afregs->s0;
+ regs->t1 = afregs->t1;
+ return regs;
+}
+
int ftrace_regs_query_register_offset(const char *name);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
@@ -208,25 +231,4 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsi
#endif /* CONFIG_DYNAMIC_FTRACE */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- unsigned long a1;
- unsigned long a0;
- unsigned long s0;
- unsigned long ra;
-};
-
-static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->a0;
-}
-
-static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->s0;
-}
-#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
-#endif
-
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index fc8130f995c1..90c86b115e00 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -85,7 +85,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__enable_user_access();
__asm__ __volatile__ (
- "1: lr.w.aqrl %[v],%[u] \n"
+ "1: lr.w %[v],%[u] \n"
" bne %[v],%z[ov],3f \n"
"2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n"
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
- : [ov] "Jr" (oldval), [nv] "Jr" (newval)
+ : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index faf3624d8057..446126497768 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 869da082252a..e3cbf203cdde 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -100,6 +100,11 @@
#define RISCV_ISA_EXT_ZICCRSE 91
#define RISCV_ISA_EXT_SVADE 92
#define RISCV_ISA_EXT_SVADU 93
+#define RISCV_ISA_EXT_ZFBFMIN 94
+#define RISCV_ISA_EXT_ZVFBFMIN 95
+#define RISCV_ISA_EXT_ZVFBFWMA 96
+#define RISCV_ISA_EXT_ZAAMO 97
+#define RISCV_ISA_EXT_ZALRSC 98
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 1ce1df6d0ff3..1f690fea0e03 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _ASM_HWPROBE_H
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 10
+#define RISCV_HWPROBE_MAX_KEY 12
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
@@ -21,6 +21,7 @@ static inline bool hwprobe_key_is_bitmask(__s64 key)
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
case RISCV_HWPROBE_KEY_IMA_EXT_0:
case RISCV_HWPROBE_KEY_CPUPERF_0:
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
return true;
}
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index 9a913010cdd9..71060a2f838e 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -199,5 +199,8 @@
#define RISCV_PAUSE ".4byte 0x100000f"
#define ZAWRS_WRS_NTO ".4byte 0x00d00073"
#define ZAWRS_WRS_STO ".4byte 0x01d00073"
+#define RISCV_NOP4 ".4byte 0x00000013"
+
+#define RISCV_INSN_NOP4 _AC(0x00000013, U)
#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 1c5c641075d2..a0e51840b9db 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -136,8 +136,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#include <asm-generic/io.h>
#ifdef CONFIG_MMU
-#define arch_memremap_wb(addr, size) \
- ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+#define arch_memremap_wb(addr, size, flags) \
+ ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL)))
#endif
#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
index 46677daf708b..cc11c4544cff 100644
--- a/arch/riscv/include/asm/kgdb.h
+++ b/arch/riscv/include/asm/kgdb.h
@@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__
+void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break;
-static inline void arch_kgdb_breakpoint(void)
-{
- asm(".global kgdb_compiled_break\n"
- ".option norvc\n"
- "kgdb_compiled_break: ebreak\n"
- ".option rvc\n");
-}
-
#endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero"
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 1f37b600ca47..3b643b9efc07 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
/* CPU AIA CSR context of Guest VCPU */
struct kvm_vcpu_aia_csr guest_csr;
- /* CPU AIA CSR context upon Guest VCPU reset */
- struct kvm_vcpu_aia_csr guest_reset_csr;
-
/* Guest physical address of IMSIC for this VCPU */
gpa_t imsic_addr;
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 35eab6e0f4ae..85cfebc32e4c 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -87,6 +87,11 @@ struct kvm_vcpu_stat {
u64 csr_exit_kernel;
u64 signal_exits;
u64 exits;
+ u64 instr_illegal_exits;
+ u64 load_misaligned_exits;
+ u64 store_misaligned_exits;
+ u64 load_access_exits;
+ u64 store_access_exits;
};
struct kvm_arch_memory_slot {
@@ -114,6 +119,9 @@ struct kvm_arch {
/* AIA Guest/VM context */
struct kvm_aia aia;
+
+ /* KVM_CAP_RISCV_MP_STATE_RESET */
+ bool mp_state_reset;
};
struct kvm_cpu_trap {
@@ -188,6 +196,12 @@ struct kvm_vcpu_smstateen_csr {
unsigned long sstateen0;
};
+struct kvm_vcpu_reset_state {
+ spinlock_t lock;
+ unsigned long pc;
+ unsigned long a1;
+};
+
struct kvm_vcpu_arch {
/* VCPU ran at least once */
bool ran_atleast_once;
@@ -222,12 +236,8 @@ struct kvm_vcpu_arch {
/* CPU Smstateen CSR context of Guest VCPU */
struct kvm_vcpu_smstateen_csr smstateen_csr;
- /* CPU context upon Guest VCPU reset */
- struct kvm_cpu_context guest_reset_context;
- spinlock_t reset_cntx_lock;
-
- /* CPU CSR context upon Guest VCPU reset */
- struct kvm_vcpu_csr guest_reset_csr;
+ /* CPU reset state of Guest VCPU */
+ struct kvm_vcpu_reset_state reset_state;
/*
* VCPU interrupts
@@ -296,8 +306,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index b96705258cf9..439ab2b3534f 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -55,6 +55,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
+void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
+ unsigned long pc, unsigned long a1);
+void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
@@ -85,6 +88,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h
index 27f5bccdd8b0..57a798a4cb0d 100644
--- a/arch/riscv/include/asm/kvm_vcpu_vector.h
+++ b/arch/riscv/include/asm/kvm_vcpu_vector.h
@@ -33,8 +33,7 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa);
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
-int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *cntx);
+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
#else
@@ -62,8 +61,7 @@ static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cn
{
}
-static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *cntx)
+static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
{
return 0;
}
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 125f5ecd9565..572a141ddecd 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -24,21 +24,22 @@
* When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary).
*/
-#ifdef CONFIG_64BIT
#ifdef CONFIG_MMU
-#define PAGE_OFFSET kernel_map.page_offset
-#else
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-#endif
-/*
- * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
- * define the PAGE_OFFSET value for SV48 and SV39.
- */
+#ifdef CONFIG_64BIT
+#define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL)
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
+#ifdef CONFIG_XIP_KERNEL
+#define PAGE_OFFSET PAGE_OFFSET_L3
#else
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#define PAGE_OFFSET kernel_map.page_offset
+#endif /* CONFIG_XIP_KERNEL */
+#else
+#define PAGE_OFFSET _AC(0xc0000000, UL)
#endif /* CONFIG_64BIT */
+#else
+#define PAGE_OFFSET ((unsigned long)phys_ram_base)
+#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
@@ -95,14 +96,9 @@ typedef struct page *pgtable_t;
#define MIN_MEMBLOCK_ADDR 0
#endif
-#ifdef CONFIG_MMU
#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
-#else
-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-#endif /* CONFIG_MMU */
struct kernel_mapping {
- unsigned long page_offset;
unsigned long virt_addr;
unsigned long virt_offset;
uintptr_t phys_addr;
@@ -116,6 +112,7 @@ struct kernel_mapping {
uintptr_t xiprom;
uintptr_t xiprom_sz;
#else
+ unsigned long page_offset;
unsigned long va_kernel_pa_offset;
#endif
};
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index f52264304f77..770ce18a7328 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -12,18 +12,9 @@
#include <asm/tlb.h>
#ifdef CONFIG_MMU
-#define __HAVE_ARCH_PUD_ALLOC_ONE
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
-static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
-{
- if (riscv_use_sbi_for_rfence())
- tlb_remove_ptdesc(tlb, pt);
- else
- tlb_remove_page_ptdesc(tlb, pt);
-}
-
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -88,15 +79,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
}
}
-#define pud_alloc_one pud_alloc_one
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l4_enabled)
- return __pud_alloc_one(mm, addr);
-
- return NULL;
-}
-
#define pud_free pud_free
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
@@ -107,46 +89,15 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
- if (pgtable_l4_enabled) {
- struct ptdesc *ptdesc = virt_to_ptdesc(pud);
-
- pagetable_pud_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
- }
-}
-
-#define p4d_alloc_one p4d_alloc_one
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l5_enabled) {
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
- }
-
- return NULL;
-}
-
-static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
-#define p4d_free p4d_free
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (pgtable_l5_enabled)
- __p4d_free(mm, p4d);
+ if (pgtable_l4_enabled)
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
if (pgtable_l5_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -161,9 +112,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd = __pgd_alloc(mm, 0);
if (likely(pgd != NULL)) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */
sync_kernel_mappings(pgd);
}
@@ -175,10 +125,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
-
- pagetable_pmd_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -186,10 +133,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- struct ptdesc *ptdesc = page_ptdesc(pte);
-
- pagetable_pte_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 0897dd99ab8d..188fadc1c21f 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -262,8 +262,6 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
return __page_val_to_pfn(pmd_val(pmd));
}
-#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 050fdc49b5ad..f19240fd018e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -12,7 +12,11 @@
#include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU
-#define KERNEL_LINK_ADDR PAGE_OFFSET
+#ifdef CONFIG_RELOCATABLE
+#define KERNEL_LINK_ADDR UL(0)
+#else
+#define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
+#endif
#define KERN_VIRT_SIZE (UL(-1))
#else
@@ -339,7 +343,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+#define pte_pgprot pte_pgprot
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
static inline int pte_present(pte_t pte)
{
@@ -674,6 +684,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
@@ -699,6 +714,18 @@ static inline unsigned long pud_pfn(pud_t pud)
return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
}
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ return pte_pgprot(pmd_pte(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ return pte_pgprot(pud_pte(pud));
+}
+
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
@@ -768,6 +795,30 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
return pte_pmd(pte_mkdevmap(pmd_pte(pmd)));
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pte_special(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
+}
+#endif
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pte_special(pud_pte(pud));
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pte_pud(pte_mkspecial(pud_pte(pud)));
+}
+#endif
+
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index b5b0adcc85c1..2910231977cb 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -23,14 +23,16 @@ struct pt_regs {
unsigned long t2;
unsigned long s0;
unsigned long s1;
- unsigned long a0;
- unsigned long a1;
- unsigned long a2;
- unsigned long a3;
- unsigned long a4;
- unsigned long a5;
- unsigned long a6;
- unsigned long a7;
+ struct_group(a_regs,
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ );
unsigned long s2;
unsigned long s3;
unsigned long s4;
diff --git a/arch/riscv/include/asm/runtime-const.h b/arch/riscv/include/asm/runtime-const.h
new file mode 100644
index 000000000000..451fd76b8811
--- /dev/null
+++ b/arch/riscv/include/asm/runtime-const.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_RUNTIME_CONST_H
+#define _ASM_RISCV_RUNTIME_CONST_H
+
+#include <asm/asm.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/insn-def.h>
+#include <linux/memory.h>
+#include <asm/text-patching.h>
+
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_32BIT
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret; \
+ asm_inline(".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ "lui %[__ret],0x89abd\n\t" \
+ "addi %[__ret],%[__ret],-0x211\n\t" \
+ ".option pop\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret)); \
+ __ret; \
+})
+#else
+/*
+ * Loading 64-bit constants into a register from immediates is a non-trivial
+ * task on riscv64. To get it somewhat performant, load 32 bits into two
+ * different registers and then combine the results.
+ *
+ * If the processor supports the Zbkb extension, we can combine the final
+ * "slli,slli,srli,add" into the single "pack" instruction. If the processor
+ * doesn't support Zbkb but does support the Zbb extension, we can
+ * combine the final "slli,srli,add" into one instruction "add.uw".
+ */
+#define RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ "lui %[__ret],0x89abd\n\t" \
+ "lui %[__tmp],0x1234\n\t" \
+ "addiw %[__ret],%[__ret],-0x211\n\t" \
+ "addiw %[__tmp],%[__tmp],0x567\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_BASE \
+ "slli %[__tmp],%[__tmp],32\n\t" \
+ "slli %[__ret],%[__ret],32\n\t" \
+ "srli %[__ret],%[__ret],32\n\t" \
+ "add %[__ret],%[__ret],%[__tmp]\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_ZBA \
+ ".option push\n\t" \
+ ".option arch,+zba\n\t" \
+ ".option norvc\n\t" \
+ "slli %[__tmp],%[__tmp],32\n\t" \
+ "add.uw %[__ret],%[__ret],%[__tmp]\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".option pop\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_ZBKB \
+ ".option push\n\t" \
+ ".option arch,+zbkb\n\t" \
+ ".option norvc\n\t" \
+ "pack %[__ret],%[__ret],%[__tmp]\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".option pop\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ ".option pop\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+
+#if defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA) \
+ && defined(CONFIG_RISCV_ISA_ZBKB)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE_2( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBA, \
+ 0, RISCV_ISA_EXT_ZBA, 1, \
+ RISCV_RUNTIME_CONST_64_ZBKB, \
+ 0, RISCV_ISA_EXT_ZBKB, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#elif defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBA, \
+ 0, RISCV_ISA_EXT_ZBA, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#elif defined(CONFIG_RISCV_ISA_ZBKB)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBKB, \
+ 0, RISCV_ISA_EXT_ZBKB, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#else
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ RISCV_RUNTIME_CONST_64_BASE \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#endif
+#endif
+
+#define runtime_const_shift_right_32(val, sym) \
+({ \
+ u32 __ret; \
+ asm_inline(".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ SRLI " %[__ret],%[__val],12\n\t" \
+ ".option pop\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret) \
+ : [__val] "r" (val)); \
+ __ret; \
+})
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+static inline void __runtime_fixup_caches(void *where, unsigned int insns)
+{
+ /* On riscv there are currently only cache-wide flushes so va is ignored. */
+ __always_unused uintptr_t va = (uintptr_t)where;
+
+ flush_icache_range(va, va + 4 * insns);
+}
+
+/*
+ * The 32-bit immediate is stored in a lui+addi pairing.
+ * lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction.
+ * addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction.
+ */
+static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, unsigned int val)
+{
+ unsigned int lower_immediate, upper_immediate;
+ u32 lui_insn, addi_insn, addi_insn_mask;
+ __le32 lui_res, addi_res;
+
+ /* Mask out upper 12 bit of addi */
+ addi_insn_mask = 0x000fffff;
+
+ lui_insn = (u32)le16_to_cpu(lui_parcel[0]) | (u32)le16_to_cpu(lui_parcel[1]) << 16;
+ addi_insn = (u32)le16_to_cpu(addi_parcel[0]) | (u32)le16_to_cpu(addi_parcel[1]) << 16;
+
+ lower_immediate = sign_extend32(val, 11);
+ upper_immediate = (val - lower_immediate);
+
+ if (upper_immediate & 0xfffff000) {
+ /* replace upper 20 bits of lui with upper immediate */
+ lui_insn &= 0x00000fff;
+ lui_insn |= upper_immediate & 0xfffff000;
+ } else {
+ /* replace lui with nop if immediate is small enough to fit in addi */
+ lui_insn = RISCV_INSN_NOP4;
+ /*
+ * lui is being skipped, so do a load instead of an add. A load
+ * is performed by adding with the x0 register. Setting rs to
+ * zero with the following mask will accomplish this goal.
+ */
+ addi_insn_mask &= 0x07fff;
+ }
+
+ if (lower_immediate & 0x00000fff) {
+ /* replace upper 12 bits of addi with lower 12 bits of val */
+ addi_insn &= addi_insn_mask;
+ addi_insn |= (lower_immediate & 0x00000fff) << 20;
+ } else {
+ /* replace addi with nop if lower_immediate is empty */
+ addi_insn = RISCV_INSN_NOP4;
+ }
+
+ addi_res = cpu_to_le32(addi_insn);
+ lui_res = cpu_to_le32(lui_insn);
+ mutex_lock(&text_mutex);
+ patch_insn_write(addi_parcel, &addi_res, sizeof(addi_res));
+ patch_insn_write(lui_parcel, &lui_res, sizeof(lui_res));
+ mutex_unlock(&text_mutex);
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+#ifdef CONFIG_32BIT
+ __runtime_fixup_32(where, where + 4, val);
+ __runtime_fixup_caches(where, 2);
+#else
+ __runtime_fixup_32(where, where + 8, val);
+ __runtime_fixup_32(where + 4, where + 12, val >> 32);
+ __runtime_fixup_caches(where, 4);
+#endif
+}
+
+/*
+ * Replace the least significant 5 bits of the srli/srliw immediate that is
+ * located at bits 20-24
+ */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ __le16 *parcel = where;
+ __le32 res;
+ u32 insn;
+
+ insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
+
+ insn &= 0xfe0fffff;
+ insn |= (val & 0b11111) << 20;
+
+ res = cpu_to_le32(insn);
+ mutex_lock(&text_mutex);
+ patch_text_nosync(where, &res, sizeof(insn));
+ mutex_unlock(&text_mutex);
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif /* _ASM_RISCV_RUNTIME_CONST_H */
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 4ffb022b097f..dc5782b5fbad 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -18,6 +18,10 @@ struct suspend_context {
unsigned long ie;
#ifdef CONFIG_MMU
unsigned long satp;
+ unsigned long stimecmp;
+#if __riscv_xlen < 64
+ unsigned long stimecmph;
+#endif
#endif
};
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 94e33216b2d9..0e71eb82f920 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -117,7 +117,7 @@ do { \
__set_prev_cpu(__prev->thread); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
- if (has_vector()) \
+ if (has_vector() || has_xtheadvector()) \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
local_flush_icache_all(); \
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 121fff429dce..34313387f977 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -30,6 +30,13 @@ static inline int syscall_get_nr(struct task_struct *task,
return regs->a7;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->a7 = nr;
+}
+
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
@@ -62,8 +69,23 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
args[0] = regs->orig_a0;
- args++;
- memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+ args[1] = regs->a1;
+ args[2] = regs->a2;
+ args[3] = regs->a3;
+ args[4] = regs->a4;
+ args[5] = regs->a5;
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ regs->a1 = args[1];
+ regs->a2 = args[2];
+ regs->a3 = args[3];
+ regs->a4 = args[4];
+ regs->a5 = args[5];
}
static inline int syscall_get_arch(struct task_struct *task)
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 1f6c38420d8e..50b63b5c15bd 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,24 +10,6 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
-#ifdef CONFIG_MMU
-#include <linux/swap.h>
-
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void __tlb_remove_table(void *table)
-{
- free_page_and_swap_cache(table);
-}
-
-#endif /* CONFIG_MMU */
-
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 72e559934952..ce0dd0fed764 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -60,8 +60,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
bool arch_tlbbatch_should_defer(struct mm_struct *mm);
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr);
+ struct mm_struct *mm, unsigned long start, unsigned long end);
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index f891478829a5..c130d8100232 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -14,7 +14,7 @@
*/
#ifdef CONFIG_MMU
-#define __VVAR_PAGES 2
+#define __VDSO_PAGES 4
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
diff --git a/arch/riscv/include/asm/vdso/time_data.h b/arch/riscv/include/asm/vdso/arch_data.h
index dfa65228999b..da57a3786f7a 100644
--- a/arch/riscv/include/asm/vdso/time_data.h
+++ b/arch/riscv/include/asm/vdso/arch_data.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __RISCV_ASM_VDSO_TIME_DATA_H
-#define __RISCV_ASM_VDSO_TIME_DATA_H
+#ifndef __RISCV_ASM_VDSO_ARCH_DATA_H
+#define __RISCV_ASM_VDSO_ARCH_DATA_H
#include <linux/types.h>
#include <vdso/datapage.h>
#include <asm/hwprobe.h>
-struct arch_vdso_time_data {
+struct vdso_arch_data {
/* Stash static answers to the hwprobe queries when all CPUs are selected. */
__u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
@@ -14,4 +14,4 @@ struct arch_vdso_time_data {
__u8 homogeneous_cpus;
};
-#endif /* __RISCV_ASM_VDSO_TIME_DATA_H */
+#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
index ba3283cf7acc..29164f84f93c 100644
--- a/arch/riscv/include/asm/vdso/gettimeofday.h
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -69,7 +69,7 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
/*
* The purpose of csr_read(CSR_TIME) is to trap the system into
@@ -79,18 +79,6 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return csr_read(CSR_TIME);
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
-}
-#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
index e8a9c4b53c0c..1140b54b4bc8 100644
--- a/arch/riscv/include/asm/vdso/vsyscall.h
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -6,15 +6,6 @@
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-
-static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-
-#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index c7c023afbacd..e8a83f55be2b 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -18,6 +18,27 @@
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>
+#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
+
+#define __riscv_v_vstate_or(_val, TYPE) ({ \
+ typeof(_val) _res = _val; \
+ if (has_xtheadvector()) \
+ _res = (_res & ~SR_VS_THEAD) | SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = (_res & ~SR_VS) | SR_VS_##TYPE; \
+ _res; \
+})
+
+#define __riscv_v_vstate_check(_val, TYPE) ({ \
+ bool _res; \
+ if (has_xtheadvector()) \
+ _res = ((_val) & SR_VS_THEAD) == SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = ((_val) & SR_VS) == SR_VS_##TYPE; \
+ _res; \
+})
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
@@ -41,39 +62,62 @@ static __always_inline bool has_vector(void)
return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X);
}
+static __always_inline bool has_xtheadvector_no_alternatives(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_isa_vendor_extension_available(THEAD_VENDOR_ID, XTHEADVECTOR);
+ else
+ return false;
+}
+
+static __always_inline bool has_xtheadvector(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_has_vendor_extension_unlikely(THEAD_VENDOR_ID,
+ RISCV_ISA_VENDOR_EXT_XTHEADVECTOR);
+ else
+ return false;
+}
+
static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
+ regs->status = __riscv_v_vstate_or(regs->status, CLEAN);
}
static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+ regs->status = __riscv_v_vstate_or(regs->status, DIRTY);
}
static inline void riscv_v_vstate_off(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
+ regs->status = __riscv_v_vstate_or(regs->status, OFF);
}
static inline void riscv_v_vstate_on(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL;
+ regs->status = __riscv_v_vstate_or(regs->status, INITIAL);
}
static inline bool riscv_v_vstate_query(struct pt_regs *regs)
{
- return (regs->status & SR_VS) != 0;
+ return !__riscv_v_vstate_check(regs->status, OFF);
}
static __always_inline void riscv_v_enable(void)
{
- csr_set(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_set(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_set(CSR_SSTATUS, SR_VS);
}
static __always_inline void riscv_v_disable(void)
{
- csr_clear(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_clear(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_clear(CSR_SSTATUS, SR_VS);
}
static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
@@ -82,10 +126,36 @@ static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
"csrr %0, " __stringify(CSR_VSTART) "\n\t"
"csrr %1, " __stringify(CSR_VTYPE) "\n\t"
"csrr %2, " __stringify(CSR_VL) "\n\t"
- "csrr %3, " __stringify(CSR_VCSR) "\n\t"
- "csrr %4, " __stringify(CSR_VLENB) "\n\t"
: "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
- "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
+ "=r" (dest->vcsr) : :);
+
+ if (has_xtheadvector()) {
+ unsigned long status;
+
+ /*
+ * CSR_VCSR is defined as
+ * [2:1] - vxrm[1:0]
+ * [0] - vxsat
+ * The earlier vector spec implemented by T-Head uses separate
+ * registers for the same bit-elements, so just combine those
+ * into the existing output field.
+ *
+ * Additionally T-Head cores need FS to be enabled when accessing
+ * the VXRM and VXSAT CSRs, otherwise ending in illegal instructions.
+ * Though the cores do not implement the VXRM and VXSAT fields in the
+ * FCSR CSR that vector-0.7.1 specifies.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+ dest->vcsr = csr_read(CSR_VXSAT) | csr_read(CSR_VXRM) << CSR_VXRM_SHIFT;
+
+ dest->vlenb = riscv_v_vsize / 32;
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ dest->vcsr = csr_read(CSR_VCSR);
+ dest->vlenb = csr_read(CSR_VLENB);
+ }
}
static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
@@ -96,9 +166,25 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src
"vsetvl x0, %2, %1\n\t"
".option pop\n\t"
"csrw " __stringify(CSR_VSTART) ", %0\n\t"
- "csrw " __stringify(CSR_VCSR) ", %3\n\t"
- : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl),
- "r" (src->vcsr) :);
+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl));
+
+ if (has_xtheadvector()) {
+ unsigned long status = csr_read(CSR_SSTATUS);
+
+ /*
+ * Similar to __vstate_csr_save above, restore values for the
+ * separate VXRM and VXSAT CSRs from the vcsr variable.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+
+ csr_write(CSR_VXRM, (src->vcsr >> CSR_VXRM_SHIFT) & CSR_VXRM_MASK);
+ csr_write(CSR_VXSAT, src->vcsr & CSR_VXSAT_MASK);
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ csr_write(CSR_VCSR, src->vcsr);
+ }
}
static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
@@ -108,19 +194,33 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
riscv_v_enable();
__vstate_csr_save(save_to);
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vse8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
riscv_v_disable();
}
@@ -130,19 +230,33 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
unsigned long vl;
riscv_v_enable();
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vle8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vle8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
__vstate_csr_restore(restore_from);
riscv_v_disable();
}
@@ -152,33 +266,41 @@ static inline void __riscv_v_vstate_discard(void)
unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
riscv_v_enable();
+ if (has_xtheadvector())
+ asm volatile (THEAD_VSETVLI_T4X0E8M8D1 : : : "t4");
+ else
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ ".option pop\n\t": "=&r" (vl));
+
asm volatile (
".option push\n\t"
".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vmv.v.i v0, -1\n\t"
"vmv.v.i v8, -1\n\t"
"vmv.v.i v16, -1\n\t"
"vmv.v.i v24, -1\n\t"
"vsetvl %0, x0, %1\n\t"
".option pop\n\t"
- : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ : "=&r" (vl) : "r" (vtype_inval));
+
riscv_v_disable();
}
static inline void riscv_v_vstate_discard(struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_OFF)
- return;
-
- __riscv_v_vstate_discard();
- __riscv_v_vstate_dirty(regs);
+ if (riscv_v_vstate_query(regs)) {
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+ }
}
static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_DIRTY) {
+ if (__riscv_v_vstate_check(regs->status, DIRTY)) {
__riscv_v_vstate_save(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -187,7 +309,7 @@ static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
__riscv_v_vstate_restore(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -196,7 +318,7 @@ static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_set_restore(struct task_struct *task,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
riscv_v_vstate_on(regs);
}
@@ -270,6 +392,8 @@ struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
static __always_inline bool has_vector(void) { return false; }
static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
+static __always_inline bool has_xtheadvector_no_alternatives(void) { return false; }
+static __always_inline bool has_xtheadvector(void) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
diff --git a/arch/riscv/include/asm/vendor_extensions/thead.h b/arch/riscv/include/asm/vendor_extensions/thead.h
new file mode 100644
index 000000000000..e85c75b3b340
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+
+#include <asm/vendor_extensions.h>
+
+#include <linux/types.h>
+
+/*
+ * Extension keys must be strictly less than RISCV_ISA_VENDOR_EXT_MAX.
+ */
+#define RISCV_ISA_VENDOR_EXT_XTHEADVECTOR 0
+
+extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead;
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void disable_xtheadvector(void);
+#else
+static inline void disable_xtheadvector(void) { }
+#endif
+
+/* Extension specific helpers */
+
+/*
+ * Vector 0.7.1 as used for example on T-Head Xuantie cores, uses an older
+ * encoding for vsetvli (ta, ma vs. d1), so provide an instruction for
+ * vsetvli t4, x0, e8, m8, d1
+ */
+#define THEAD_VSETVLI_T4X0E8M8D1 ".long 0x00307ed7\n\t"
+
+/*
+ * While in theory, the vector-0.7.1 vsb.v and vlb.v result in the same
+ * encoding as the standard vse8.v and vle8.v, compilers seem to optimize
+ * the call resulting in a different encoding and then using a value for
+ * the "mop" field that is not part of vector-0.7.1
+ * So encode specific variants for vstate_save and _restore.
+ */
+#define THEAD_VSB_V_V0T0 ".long 0x02028027\n\t"
+#define THEAD_VSB_V_V8T0 ".long 0x02028427\n\t"
+#define THEAD_VSB_V_V16T0 ".long 0x02028827\n\t"
+#define THEAD_VSB_V_V24T0 ".long 0x02028c27\n\t"
+#define THEAD_VLB_V_V0T0 ".long 0x012028007\n\t"
+#define THEAD_VLB_V_V8T0 ".long 0x012028407\n\t"
+#define THEAD_VLB_V_V16T0 ".long 0x012028807\n\t"
+#define THEAD_VLB_V_V24T0 ".long 0x012028c07\n\t"
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
new file mode 100644
index 000000000000..65a9c5612466
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+
+#include <linux/cpumask.h>
+
+#include <uapi/asm/hwprobe.h>
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus);
+#else
+static inline void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ pair->value = 0;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
new file mode 100644
index 000000000000..6b9293e984a9
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Rivos, Inc
+ */
+
+#ifndef _ASM_RISCV_SYS_HWPROBE_H
+#define _ASM_RISCV_SYS_HWPROBE_H
+
+#include <asm/cpufeature.h>
+
+#define VENDOR_EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_VENDOR_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ } while (false)
+
+/*
+ * Loop through and record extensions that 1) anyone has, and 2) anyone
+ * doesn't have.
+ *
+ * _extension_checks is an arbitrary C block to set the values of pair->value
+ * and missing. It should be filled with VENDOR_EXT_KEY expressions.
+ */
+#define VENDOR_EXTENSION_SUPPORTED(pair, cpus, per_hart_vendor_bitmap, _extension_checks) \
+ do { \
+ int cpu; \
+ u64 missing = 0; \
+ for_each_cpu(cpu, (cpus)) { \
+ struct riscv_isavendorinfo *isainfo = &(per_hart_vendor_bitmap)[cpu]; \
+ _extension_checks \
+ } \
+ (pair)->value &= ~missing; \
+ } while (false) \
+
+#endif /* _ASM_RISCV_SYS_HWPROBE_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index 2f2bb0c84f9a..a5150cdf34d8 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -6,6 +6,7 @@
#define ASM_VENDOR_LIST_H
#define ANDES_VENDOR_ID 0x31e
+#define MICROCHIP_VENDOR_ID 0x029
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 3af142b99f77..3c2fce939673 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _UAPI_ASM_HWPROBE_H
@@ -73,6 +73,14 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47)
#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48)
#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49)
+#define RISCV_HWPROBE_EXT_ZICNTR (1ULL << 50)
+#define RISCV_HWPROBE_EXT_ZIHPM (1ULL << 51)
+#define RISCV_HWPROBE_EXT_ZFBFMIN (1ULL << 52)
+#define RISCV_HWPROBE_EXT_ZVFBFMIN (1ULL << 53)
+#define RISCV_HWPROBE_EXT_ZVFBFWMA (1ULL << 54)
+#define RISCV_HWPROBE_EXT_ZICBOM (1ULL << 55)
+#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56)
+#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -94,6 +102,8 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
+#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11
+#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 3482c9a73d1b..5f59fd226cc5 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -179,6 +179,11 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SSNPM,
KVM_RISCV_ISA_EXT_SVADE,
KVM_RISCV_ISA_EXT_SVADU,
+ KVM_RISCV_ISA_EXT_SVVPTC,
+ KVM_RISCV_ISA_EXT_ZABHA,
+ KVM_RISCV_ISA_EXT_ZICCRSE,
+ KVM_RISCV_ISA_EXT_ZAAMO,
+ KVM_RISCV_ISA_EXT_ZALRSC,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -198,6 +203,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
+ KVM_RISCV_SBI_EXT_SUSP,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -211,9 +217,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24
diff --git a/arch/riscv/include/uapi/asm/vendor/thead.h b/arch/riscv/include/uapi/asm/vendor/thead.h
new file mode 100644
index 000000000000..43790ebe5faf
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/vendor/thead.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#define RISCV_HWPROBE_VENDOR_EXT_XTHEADVECTOR (1 << 0)