summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorKoakuma <koachan@protonmail.com>2024-08-08 09:05:00 +0700
committerAndreas Larsson <andreas@gaisler.com>2024-11-18 09:59:20 +0100
commitb6370b338e71cf24c61e33880b8f1a0dd5ad0a44 (patch)
tree0670959dbf6f7f4c287e3d64f3d4c7f616d6ac69 /arch/sparc
parent8467d8b282b54d87121f70ce78061af004471d0c (diff)
sparc/vdso: Add helper function for 64-bit right shift on 32-bit target
Add helper function for 64-bit right shift on 32-bit target so that clang does not emit a runtime library call. Signed-off-by: Koakuma <koachan@protonmail.com> Reviewed-by: Andreas Larsson <andreas@gaisler.com> Link: https://lore.kernel.org/r/20240808-sparc-shr64-v2-1-fd18f1b2cea9@protonmail.com Signed-off-by: Andreas Larsson <andreas@gaisler.com>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/vdso/vclock_gettime.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index e794edde6755..79607804ea1b 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv,
}
#ifdef CONFIG_SPARC64
+notrace static __always_inline u64 __shr64(u64 val, int amt)
+{
+ return val >> amt;
+}
+
notrace static __always_inline u64 vread_tick(void)
{
u64 ret;
@@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void)
return ret;
}
#else
+notrace static __always_inline u64 __shr64(u64 val, int amt)
+{
+ u64 ret;
+
+ __asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
+ "srl %L1, 0, %L1\n\t"
+ "or %%g1, %L1, %%g1\n\t"
+ "srlx %%g1, %2, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret)
+ : "r" (val), "r" (amt)
+ : "g1");
+ return ret;
+}
+
notrace static __always_inline u64 vread_tick(void)
{
register unsigned long long ret asm("o4");
@@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns(vvar);
- ns >>= vvar->clock.shift;
+ ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns_stick(vvar);
- ns >>= vvar->clock.shift;
+ ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns(vvar);
- ns >>= vvar->clock.shift;
+ ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns_stick(vvar);
- ns >>= vvar->clock.shift;
+ ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);