summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/s390/include/asm/fpu-insn.h18
-rw-r--r--arch/s390/include/asm/fpu.h40
-rw-r--r--arch/s390/kernel/fpu.c81
3 files changed, 67 insertions, 72 deletions
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
index 64ba3d9bcd14..1ce8e2f9786c 100644
--- a/arch/s390/include/asm/fpu-insn.h
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -36,6 +36,15 @@ asm(".include \"asm/fpu-insn-asm.h\"\n");
* barrier.
*/
+static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
+{
+ instrument_read(reg, sizeof(*reg));
+ asm volatile("ld %[fpr],%[reg]\n"
+ :
+ : [fpr] "I" (fpr), [reg] "Q" (reg->ui)
+ : "memory");
+}
+
/**
* fpu_lfpc_safe - Load floating point control register safely.
* @fpc: new value for floating point control register
@@ -64,5 +73,14 @@ static inline void fpu_lfpc_safe(unsigned int *fpc)
: "memory");
}
+static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
+{
+ instrument_write(reg, sizeof(*reg));
+ asm volatile("std %[fpr],%[reg]\n"
+ : [reg] "=Q" (reg->ui)
+ : [fpr] "I" (fpr)
+ : "memory");
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_FPU_INSN_H */
diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h
index eed430c868df..626695de6085 100644
--- a/arch/s390/include/asm/fpu.h
+++ b/arch/s390/include/asm/fpu.h
@@ -84,6 +84,46 @@ void __load_fpu_regs(void);
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
+static __always_inline void save_fp_regs(freg_t *fprs)
+{
+ fpu_std(0, &fprs[0]);
+ fpu_std(1, &fprs[1]);
+ fpu_std(2, &fprs[2]);
+ fpu_std(3, &fprs[3]);
+ fpu_std(4, &fprs[4]);
+ fpu_std(5, &fprs[5]);
+ fpu_std(6, &fprs[6]);
+ fpu_std(7, &fprs[7]);
+ fpu_std(8, &fprs[8]);
+ fpu_std(9, &fprs[9]);
+ fpu_std(10, &fprs[10]);
+ fpu_std(11, &fprs[11]);
+ fpu_std(12, &fprs[12]);
+ fpu_std(13, &fprs[13]);
+ fpu_std(14, &fprs[14]);
+ fpu_std(15, &fprs[15]);
+}
+
+static __always_inline void load_fp_regs(freg_t *fprs)
+{
+ fpu_ld(0, &fprs[0]);
+ fpu_ld(1, &fprs[1]);
+ fpu_ld(2, &fprs[2]);
+ fpu_ld(3, &fprs[3]);
+ fpu_ld(4, &fprs[4]);
+ fpu_ld(5, &fprs[5]);
+ fpu_ld(6, &fprs[6]);
+ fpu_ld(7, &fprs[7]);
+ fpu_ld(8, &fprs[8]);
+ fpu_ld(9, &fprs[9]);
+ fpu_ld(10, &fprs[10]);
+ fpu_ld(11, &fprs[11]);
+ fpu_ld(12, &fprs[12]);
+ fpu_ld(13, &fprs[13]);
+ fpu_ld(14, &fprs[14]);
+ fpu_ld(15, &fprs[15]);
+}
+
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
preempt_disable();
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 98dc9f593a14..f25c54caf32b 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -22,25 +22,8 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
asm volatile("stfpc %0" : "=Q" (state->fpc));
}
if (!cpu_has_vx()) {
- if (flags & KERNEL_VXR_LOW) {
- /* Save floating-point registers */
- asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
- asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
- asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
- asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
- asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
- asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
- asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
- asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
- asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
- asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
- asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
- asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
- asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
- asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
- asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
- asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
- }
+ if (flags & KERNEL_VXR_LOW)
+ save_fp_regs(state->fprs);
return;
}
/* Test and save vector registers */
@@ -102,25 +85,8 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
asm volatile("lfpc %0" : : "Q" (state->fpc));
}
if (!cpu_has_vx()) {
- if (flags & KERNEL_VXR_LOW) {
- /* Restore floating-point registers */
- asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
- asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
- asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
- asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
- asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
- asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
- asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
- asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
- asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
- asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
- asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
- asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
- asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
- asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
- asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
- asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
- }
+ if (flags & KERNEL_VXR_LOW)
+ load_fp_regs(state->fprs);
return;
}
/* Test and restore (load) vector registers */
@@ -171,8 +137,8 @@ EXPORT_SYMBOL(__kernel_fpu_end);
void __load_fpu_regs(void)
{
- unsigned long *regs = current->thread.fpu.regs;
struct fpu *state = &current->thread.fpu;
+ void *regs = current->thread.fpu.regs;
fpu_lfpc_safe(&state->fpc);
if (likely(cpu_has_vx())) {
@@ -183,22 +149,7 @@ void __load_fpu_regs(void)
: "d" (regs)
: "1", "cc", "memory");
} else {
- asm volatile("ld 0,%0" : : "Q" (regs[0]));
- asm volatile("ld 1,%0" : : "Q" (regs[1]));
- asm volatile("ld 2,%0" : : "Q" (regs[2]));
- asm volatile("ld 3,%0" : : "Q" (regs[3]));
- asm volatile("ld 4,%0" : : "Q" (regs[4]));
- asm volatile("ld 5,%0" : : "Q" (regs[5]));
- asm volatile("ld 6,%0" : : "Q" (regs[6]));
- asm volatile("ld 7,%0" : : "Q" (regs[7]));
- asm volatile("ld 8,%0" : : "Q" (regs[8]));
- asm volatile("ld 9,%0" : : "Q" (regs[9]));
- asm volatile("ld 10,%0" : : "Q" (regs[10]));
- asm volatile("ld 11,%0" : : "Q" (regs[11]));
- asm volatile("ld 12,%0" : : "Q" (regs[12]));
- asm volatile("ld 13,%0" : : "Q" (regs[13]));
- asm volatile("ld 14,%0" : : "Q" (regs[14]));
- asm volatile("ld 15,%0" : : "Q" (regs[15]));
+ load_fp_regs(regs);
}
clear_cpu_flag(CIF_FPU);
}
@@ -213,8 +164,9 @@ EXPORT_SYMBOL(load_fpu_regs);
void save_fpu_regs(void)
{
- unsigned long flags, *regs;
+ unsigned long flags;
struct fpu *state;
+ void *regs;
local_irq_save(flags);
@@ -233,22 +185,7 @@ void save_fpu_regs(void)
: "d" (regs)
: "1", "cc", "memory");
} else {
- asm volatile("std 0,%0" : "=Q" (regs[0]));
- asm volatile("std 1,%0" : "=Q" (regs[1]));
- asm volatile("std 2,%0" : "=Q" (regs[2]));
- asm volatile("std 3,%0" : "=Q" (regs[3]));
- asm volatile("std 4,%0" : "=Q" (regs[4]));
- asm volatile("std 5,%0" : "=Q" (regs[5]));
- asm volatile("std 6,%0" : "=Q" (regs[6]));
- asm volatile("std 7,%0" : "=Q" (regs[7]));
- asm volatile("std 8,%0" : "=Q" (regs[8]));
- asm volatile("std 9,%0" : "=Q" (regs[9]));
- asm volatile("std 10,%0" : "=Q" (regs[10]));
- asm volatile("std 11,%0" : "=Q" (regs[11]));
- asm volatile("std 12,%0" : "=Q" (regs[12]));
- asm volatile("std 13,%0" : "=Q" (regs[13]));
- asm volatile("std 14,%0" : "=Q" (regs[14]));
- asm volatile("std 15,%0" : "=Q" (regs[15]));
+ save_fp_regs(regs);
}
set_cpu_flag(CIF_FPU);
out: