summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r--arch/powerpc/include/asm/atomic.h53
1 files changed, 20 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 486ab7889121..47228b177478 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -27,14 +27,22 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
@@ -130,35 +138,6 @@ ATOMIC_OPS(xor, xor, "", K)
#define arch_atomic_xchg_relaxed(v, new) \
arch_xchg_relaxed(&((v)->counter), (new))
-/*
- * Don't want to override the generic atomic_try_cmpxchg_acquire, because
- * we add a lock hint to the lwarx, which may not be wanted for the
- * _acquire case (and is not used by the other _acquire variants so it
- * would be a surprise).
- */
-static __always_inline bool
-arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- unsigned int eh = IS_ENABLED(CONFIG_PPC64);
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n"
-" cmpw 0,%0,%3 \n"
-" bne- 2f \n"
-" stwcx. %4,0,%2 \n"
-" bne- 1b \n"
-"\t" PPC_ACQUIRE_BARRIER " \n"
-"2: \n"
- : "=&r" (r), "+m" (v->counter)
- : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh)
- : "cr0", "memory");
-
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-
/**
* atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
@@ -226,14 +205,22 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \