summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm/uaccess.h')
-rw-r--r--arch/riscv/include/asm/uaccess.h218
1 files changed, 163 insertions, 55 deletions
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index fee56b0c8058..d472da4450e6 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -62,6 +62,19 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigne
__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
/*
+ * This is the smallest unsigned integer type that can fit a value
+ * (up to 'long long')
+ */
+#define __inttype(x) __typeof__( \
+ __typefits(x, char, \
+ __typefits(x, short, \
+ __typefits(x, int, \
+ __typefits(x, long, 0ULL)))))
+
+#define __typefits(x, type, not) \
+ __builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)
+
+/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
@@ -83,27 +96,58 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigne
* call.
*/
-#define __get_user_asm(insn, x, ptr, err) \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_user_asm(insn, x, ptr, label) \
+ asm_goto_output( \
+ "1:\n" \
+ " " insn " %0, %1\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, %l2, %0) \
+ : "=&r" (x) \
+ : "m" (*(ptr)) : : label)
+#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+#define __get_user_asm(insn, x, ptr, label) \
do { \
- __typeof__(x) __x; \
+ long __gua_err = 0; \
__asm__ __volatile__ ( \
"1:\n" \
" " insn " %1, %2\n" \
"2:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
- : "+r" (err), "=&r" (__x) \
+ : "+r" (__gua_err), "=&r" (x) \
: "m" (*(ptr))); \
- (x) = __x; \
+ if (__gua_err) \
+ goto label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#ifdef CONFIG_64BIT
-#define __get_user_8(x, ptr, err) \
- __get_user_asm("ld", x, ptr, err)
+#define __get_user_8(x, ptr, label) \
+ __get_user_asm("ld", x, ptr, label)
#else /* !CONFIG_64BIT */
-#define __get_user_8(x, ptr, err) \
+
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_user_8(x, ptr, label) \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u32 __lo, __hi; \
+ asm_goto_output( \
+ "1:\n" \
+ " lw %0, %2\n" \
+ "2:\n" \
+ " lw %1, %3\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, %l4, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, %l4, %0) \
+ : "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]) \
+ : : label); \
+ (x) = (__typeof__(x))((__typeof__((x) - (x)))( \
+ (((u64)__hi << 32) | __lo))); \
+
+#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+#define __get_user_8(x, ptr, label) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
+ long __gu8_err = 0; \
__asm__ __volatile__ ( \
"1:\n" \
" lw %1, %3\n" \
@@ -112,35 +156,62 @@ do { \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
- : "+r" (err), "=&r" (__lo), "=r" (__hi) \
+ : "+r" (__gu8_err), "=&r" (__lo), "=r" (__hi) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
- if (err) \
+ if (__gu8_err) { \
__hi = 0; \
- (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
+ goto label; \
+ } \
+ (x) = (__typeof__(x))((__typeof__((x) - (x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
#endif /* CONFIG_64BIT */
-#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
+unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
+ const void __user *from, unsigned long n);
+
+#define __get_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
+ goto label; \
+ break; \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
- __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
+ __get_user_asm("lb", (x), __gu_ptr, label); \
break; \
case 2: \
- __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
+ __get_user_asm("lh", (x), __gu_ptr, label); \
break; \
case 4: \
- __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
+ __get_user_asm("lw", (x), __gu_ptr, label); \
break; \
case 8: \
- __get_user_8((x), __gu_ptr, __gu_err); \
+ __get_user_8((x), __gu_ptr, label); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
+#define __get_user_error(x, ptr, err) \
+do { \
+ __label__ __gu_failed; \
+ \
+ __get_user_nocheck(x, ptr, __gu_failed); \
+ err = 0; \
+ break; \
+__gu_failed: \
+ x = 0; \
+ err = -EFAULT; \
+} while (0)
+
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -165,13 +236,16 @@ do { \
({ \
const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
long __gu_err = 0; \
+ __typeof__(x) __gu_val; \
\
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
- __get_user_nocheck(x, __gu_ptr, __gu_err); \
+ __get_user_error(__gu_val, __gu_ptr, __gu_err); \
__disable_user_access(); \
\
+ (x) = __gu_val; \
+ \
__gu_err; \
})
@@ -201,61 +275,73 @@ do { \
((x) = (__force __typeof__(x))0, -EFAULT); \
})
-#define __put_user_asm(insn, x, ptr, err) \
+#define __put_user_asm(insn, x, ptr, label) \
do { \
__typeof__(*(ptr)) __x = x; \
- __asm__ __volatile__ ( \
+ asm goto( \
"1:\n" \
- " " insn " %z2, %1\n" \
- "2:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
- : "+r" (err), "=m" (*(ptr)) \
- : "rJ" (__x)); \
+ " " insn " %z0, %1\n" \
+ _ASM_EXTABLE(1b, %l2) \
+ : : "rJ" (__x), "m"(*(ptr)) : : label); \
} while (0)
#ifdef CONFIG_64BIT
-#define __put_user_8(x, ptr, err) \
- __put_user_asm("sd", x, ptr, err)
+#define __put_user_8(x, ptr, label) \
+ __put_user_asm("sd", x, ptr, label)
#else /* !CONFIG_64BIT */
-#define __put_user_8(x, ptr, err) \
+#define __put_user_8(x, ptr, label) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \
- __asm__ __volatile__ ( \
+ asm goto( \
"1:\n" \
- " sw %z3, %1\n" \
+ " sw %z0, %2\n" \
"2:\n" \
- " sw %z4, %2\n" \
- "3:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
- _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
- : "+r" (err), \
- "=m" (__ptr[__LSW]), \
- "=m" (__ptr[__MSW]) \
- : "rJ" (__x), "rJ" (__x >> 32)); \
+ " sw %z1, %3\n" \
+ _ASM_EXTABLE(1b, %l4) \
+ _ASM_EXTABLE(2b, %l4) \
+ : : "rJ" (__x), "rJ" (__x >> 32), \
+ "m" (__ptr[__LSW]), \
+ "m" (__ptr[__MSW]) : : label); \
} while (0)
#endif /* CONFIG_64BIT */
-#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
+#define __put_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ __inttype(x) val = (__inttype(x))x; \
+ if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
+ goto label; \
+ break; \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
- __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sb", (x), __gu_ptr, label); \
break; \
case 2: \
- __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sh", (x), __gu_ptr, label); \
break; \
case 4: \
- __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sw", (x), __gu_ptr, label); \
break; \
case 8: \
- __put_user_8((x), __gu_ptr, __pu_err); \
+ __put_user_8((x), __gu_ptr, label); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
+#define __put_user_error(x, ptr, err) \
+do { \
+ __label__ err_label; \
+ __put_user_nocheck(x, ptr, err_label); \
+ break; \
+err_label: \
+ (err) = -EFAULT; \
+} while (0)
+
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
@@ -286,7 +372,7 @@ do { \
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
- __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __put_user_error(__val, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
@@ -351,23 +437,45 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
}
#define __get_kernel_nofault(dst, src, type, err_label) \
-do { \
- long __kr_err = 0; \
- \
- __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
- if (unlikely(__kr_err)) \
- goto err_label; \
-} while (0)
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), err_label)
#define __put_kernel_nofault(dst, src, type, err_label) \
-do { \
- long __kr_err = 0; \
- \
- __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
- if (unlikely(__kr_err)) \
- goto err_label; \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), err_label)
+
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return 0;
+ __enable_user_access();
+ return 1;
+}
+#define user_access_begin user_access_begin
+#define user_access_end __disable_user_access
+
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long enabled) { }
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_put_user(x, ptr, label) \
+ __put_user_nocheck(x, (ptr), label)
+
+#define unsafe_get_user(x, ptr, label) do { \
+ __inttype(*(ptr)) __gu_val; \
+ __get_user_nocheck(__gu_val, (ptr), label); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
+#define unsafe_copy_to_user(_dst, _src, _len, label) \
+ if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \
+ goto label;
+
+#define unsafe_copy_from_user(_dst, _src, _len, label) \
+ if (__asm_copy_from_user_sum_enabled(_dst, _src, _len)) \
+ goto label;
+
#else /* CONFIG_MMU */
#include <asm-generic/uaccess.h>
#endif /* CONFIG_MMU */