diff options
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/copypage_64.S | 10 | ||||
-rw-r--r-- | arch/powerpc/lib/copypage_power7.S | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/copyuser_power7.S | 8 | ||||
-rw-r--r-- | arch/powerpc/lib/hweight_64.S | 8 | ||||
-rw-r--r-- | arch/powerpc/lib/memcmp_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/memcpy_power7.S | 6 | ||||
-rw-r--r-- | arch/powerpc/lib/pmem.c | 7 |
7 files changed, 25 insertions, 22 deletions
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 6812cb19d04a..5d09a029b556 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -18,8 +18,18 @@ FTR_SECTION_ELSE #endif ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l +#ifdef CONFIG_PPC_KERNEL_PCREL + /* + * Hack for toolchain - prefixed instructions cause label difference to + * be non-constant even if 8 byte alignment is known, so they can not + * be put in FTR sections. + */ + LOAD_REG_ADDR(r10, ppc64_caches) +BEGIN_FTR_SECTION +#else BEGIN_FTR_SECTION LOAD_REG_ADDR(r10, ppc64_caches) +#endif lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */ lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */ li r9,0 diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index a9844c6353cf..a783973f1215 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -45,7 +45,7 @@ _GLOBAL(copypage_power7) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl enter_vmx_ops + bl CFUNC(enter_vmx_ops) cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) @@ -88,7 +88,7 @@ _GLOBAL(copypage_power7) addi r3,r3,128 bdnz 1b - b exit_vmx_ops /* tail call optimise */ + b CFUNC(exit_vmx_ops) /* tail call optimise */ #else li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 28f0be523c06..ac41053c3a5a 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -47,7 +47,7 @@ ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Ldo_err3: - bl exit_vmx_usercopy + bl CFUNC(exit_vmx_usercopy) ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit @@ -272,7 +272,7 @@ err1; stb r0,0(r3) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl enter_vmx_usercopy + bl CFUNC(enter_vmx_usercopy) cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) @@ -488,7 +488,7 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b exit_vmx_usercopy /* tail call optimise */ + b CFUNC(exit_vmx_usercopy) /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -691,5 +691,5 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b exit_vmx_usercopy /* tail call optimise */ + b CFUNC(exit_vmx_usercopy) /* tail call optimise */ #endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 6effad901ef7..09af29561314 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -14,7 +14,7 @@ _GLOBAL(__arch_hweight8) BEGIN_FTR_SECTION - b __sw_hweight8 + b CFUNC(__sw_hweight8) nop nop FTR_SECTION_ELSE @@ -26,7 +26,7 @@ EXPORT_SYMBOL(__arch_hweight8) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION - b __sw_hweight16 + b CFUNC(__sw_hweight16) nop nop nop @@ -49,7 +49,7 @@ EXPORT_SYMBOL(__arch_hweight16) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION - b __sw_hweight32 + b CFUNC(__sw_hweight32) nop nop nop @@ -75,7 +75,7 @@ EXPORT_SYMBOL(__arch_hweight32) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION - b __sw_hweight64 + b CFUNC(__sw_hweight64) nop nop nop diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S index 384218df71ba..0b9b1685a33d 100644 --- a/arch/powerpc/lib/memcmp_64.S +++ b/arch/powerpc/lib/memcmp_64.S @@ -44,7 +44,7 @@ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ std r0,16(r1); \ stdu r1,-STACKFRAMESIZE(r1); \ - bl enter_vmx_ops; \ + bl CFUNC(enter_vmx_ops); \ cmpwi cr1,r3,0; \ ld r0,STACKFRAMESIZE+16(r1); \ ld r3,STK_REG(R31)(r1); \ @@ -60,7 +60,7 @@ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ std r0,16(r1); \ stdu r1,-STACKFRAMESIZE(r1); \ - bl exit_vmx_ops; \ + bl CFUNC(exit_vmx_ops); \ ld r0,STACKFRAMESIZE+16(r1); \ ld r3,STK_REG(R31)(r1); \ ld r4,STK_REG(R30)(r1); \ diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 54f226333c94..9398b2b746c4 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -218,7 +218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl enter_vmx_ops + bl CFUNC(enter_vmx_ops) cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) @@ -433,7 +433,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 15: addi r1,r1,STACKFRAMESIZE ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) - b exit_vmx_ops /* tail call optimise */ + b CFUNC(exit_vmx_ops) /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -637,5 +637,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 15: addi r1,r1,STACKFRAMESIZE ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) - b exit_vmx_ops /* tail call optimise */ + b CFUNC(exit_vmx_ops) /* tail call optimise */ #endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index eb2919ddf9b9..4e724c4c01ad 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -85,10 +85,3 @@ void memcpy_flushcache(void *dest, const void *src, size_t size) clean_pmem_range(start, start + size); } EXPORT_SYMBOL(memcpy_flushcache); - -void memcpy_page_flushcache(char *to, struct page *page, size_t offset, - size_t len) -{ - memcpy_flushcache(to, page_to_virt(page) + offset, len); -} -EXPORT_SYMBOL(memcpy_page_flushcache); |