From 200026557c848b29b41da527c879892a2db4cead Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Wed, 13 Jul 2016 15:45:15 +0100 Subject: Fix the translation table library for wraparound cases This patch fixes the translation table library for wraparound cases. These cases are not expected to occur on AArch64 platforms because only the 48 bits of the 64 bit address space are used. But it is a possibility for AArch32 platforms. Change-Id: Ie7735f7ba2977019381e1c124800381471381499 --- lib/xlat_tables/xlat_tables_common.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c index 33784c2d..bc7fed78 100644 --- a/lib/xlat_tables/xlat_tables_common.c +++ b/lib/xlat_tables/xlat_tables_common.c @@ -289,17 +289,17 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va, if (!mm->size) return attr; /* Reached end of list */ - if (mm->base_va >= base_va + size) + if (mm->base_va > base_va + size - 1) return attr; /* Next region is after area so end */ - if (mm->base_va + mm->size <= base_va) + if (mm->base_va + mm->size - 1 < base_va) continue; /* Next region has already been overtaken */ if (mm->attr == attr) continue; /* Region doesn't override attribs so skip */ if (mm->base_va > base_va || - mm->base_va + mm->size < base_va + size) + mm->base_va + mm->size - 1 < base_va + size - 1) return -1; /* Region doesn't fully cover our area */ attr = mm->attr; @@ -328,7 +328,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, if (!mm->size) { /* Done mapping regions; finish zeroing the table */ desc = INVALID_DESC; - } else if (mm->base_va + mm->size <= base_va) { + } else if (mm->base_va + mm->size - 1 < base_va) { /* This area is after the region so get next region */ ++mm; continue; @@ -337,7 +337,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, debug_print("%s VA:%p size:0x%x ", get_level_spacer(level), (void *)base_va, level_size); - if (mm->base_va >= base_va + level_size) { + if (mm->base_va > base_va + level_size - 1) { /* Next region is after this area. Nothing to map yet */ desc = INVALID_DESC; } else { @@ -369,7 +369,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, *table++ = desc; base_va += level_size; - } while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE)); + } while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1)); return mm; } -- cgit From 12ab697e8f91a67a439e6172621b905753d61f46 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Mon, 8 Aug 2016 12:42:53 +0100 Subject: Move spinlock library code to AArch64 folder This patch moves the assembly exclusive lock library code `spinlock.S` into architecture specific folder `aarch64`. A stub file which includes the file from new location is retained at the original location for compatibility. The BL makefiles are also modified to include the file from the new location. Change-Id: Ide0b601b79c439e390c3a017d93220a66be73543 --- lib/locks/exclusive/aarch64/spinlock.S | 52 ++++++++++++++++++++++++++++++++++ lib/locks/exclusive/spinlock.S | 27 +++--------------- lib/psci/psci_lib.mk | 2 +- 3 files changed, 57 insertions(+), 24 deletions(-) create mode 100644 lib/locks/exclusive/aarch64/spinlock.S (limited to 'lib') diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S new file mode 100644 index 00000000..1ca59123 --- /dev/null +++ b/lib/locks/exclusive/aarch64/spinlock.S @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov w2, #1 + sevl +l1: wfe +l2: ldaxr w1, [x0] + cbnz w1, l1 + stxr w1, w2, [x0] + cbnz w1, l2 + ret +endfunc spin_lock + + +func spin_unlock + stlr wzr, [x0] + ret +endfunc spin_unlock diff --git a/lib/locks/exclusive/spinlock.S b/lib/locks/exclusive/spinlock.S index 772f14e9..9c945f9b 100644 --- a/lib/locks/exclusive/spinlock.S +++ b/lib/locks/exclusive/spinlock.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,25 +28,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include - - .globl spin_lock - .globl spin_unlock - - -func spin_lock - mov w2, #1 - sevl -l1: wfe -l2: ldaxr w1, [x0] - cbnz w1, l1 - stxr w1, w2, [x0] - cbnz w1, l2 - ret -endfunc spin_lock - - -func spin_unlock - stlr wzr, [x0] - ret -endfunc spin_unlock +#if !ERROR_DEPRECATED +#include "./aarch64/spinlock.S" +#endif diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk index 662e14a2..3a5833ba 100644 --- a/lib/psci/psci_lib.mk +++ b/lib/psci/psci_lib.mk @@ -33,7 +33,7 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ lib/el3_runtime/aarch64/cpu_data.S \ lib/el3_runtime/aarch64/context_mgmt.c \ lib/cpus/aarch64/cpu_helpers.S \ - lib/locks/exclusive/spinlock.S \ + lib/locks/exclusive/aarch64/spinlock.S \ lib/psci/psci_off.c \ lib/psci/psci_on.c \ lib/psci/psci_suspend.c \ -- cgit From f24307dec43332f2846bf18197ec7d113386c220 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 12:31:57 +0100 Subject: AArch32: Add assembly helpers This patch adds various assembly helpers for AArch32 like : * cache management : Functions to flush, invalidate and clean cache by MVA. Also helpers to do cache operations by set-way are also added. * stack management: Macros to declare stack and get the current stack corresponding to current CPU. * Misc: Macros to access co processor registers in AArch32, macros to define functions in assembly, assert macros, generic `do_panic()` implementation and function to zero block of memory. Change-Id: I7b78ca3f922c0eda39beb9786b7150e9193425be --- lib/aarch32/cache_helpers.S | 237 ++++++++++++++++++++++++++++++++++++++++++++ lib/aarch32/misc_helpers.S | 60 +++++++++++ 2 files changed, 297 insertions(+) create mode 100644 lib/aarch32/cache_helpers.S create mode 100644 lib/aarch32/misc_helpers.S (limited to 'lib') diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S new file mode 100644 index 00000000..d0e5cd06 --- /dev/null +++ b/lib/aarch32/cache_helpers.S @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + + .globl flush_dcache_range + .globl clean_dcache_range + .globl inv_dcache_range + .globl dcsw_op_louis + .globl dcsw_op_all + .globl dcsw_op_level1 + .globl dcsw_op_level2 + .globl dcsw_op_level3 + +/* + * This macro can be used for implementing various data cache operations `op` + */ +.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2 + dcache_line_size r2, r3 + add r1, r0, r1 + sub r3, r2, #1 + bic r0, r0, r3 +loop_\op: + stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2 + add r0, r0, r2 + cmp r0, r1 + blo loop_\op + dsb sy + bx lr +.endm + + /* ------------------------------------------ + * Clean+Invalidate from base address till + * size. 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func flush_dcache_range + do_dcache_maintenance_by_mva cimvac, DCCIMVAC +endfunc flush_dcache_range + + /* ------------------------------------------ + * Clean from base address till size. + * 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func clean_dcache_range + do_dcache_maintenance_by_mva cmvac, DCCMVAC +endfunc clean_dcache_range + + /* ------------------------------------------ + * Invalidate from base address till + * size. 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func inv_dcache_range + do_dcache_maintenance_by_mva imvac, DCIMVAC +endfunc inv_dcache_range + + /* ---------------------------------------------------------------- + * Data cache operations by set/way to the level specified + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * r1: The cache level to begin operation from + * r2: clidr_el1 + * r3: The last cache level to operate on + * and will carry out the operation on each data cache from level 0 + * to the level in r3 in sequence + * + * The dcsw_op macro sets up the r2 and r3 parameters based on + * clidr_el1 cache information before invoking the main function + * ---------------------------------------------------------------- + */ + + .macro dcsw_op shift, fw, ls + ldcopr r2, CLIDR + ubfx r3, r2, \shift, \fw + lsl r3, r3, \ls + mov r1, #0 + b do_dcsw_op + .endm + +func do_dcsw_op + push {r4-r12,lr} + adr r11, dcsw_loop_table // compute cache op based on the operation type + add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions +loop1: + add r10, r1, r1, LSR #1 // Work out 3x current cache level + mov r12, r2, LSR r10 // extract cache type bits from clidr + and r12, r12, #7 // mask the bits for current cache only + cmp r12, #2 // see what cache we have at this level + blt level_done // no cache or only instruction cache at this level + + stcopr r1, CSSELR // select current cache level in csselr + isb // isb to sych the new cssr&csidr + ldcopr r12, CCSIDR // read the new ccsidr + and r10, r12, #7 // extract the length of the cache lines + add r10, r10, #4 // add 4 (r10 = line length offset) + ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned) + clz r5, r4 // r5 = the bit position of the way size increment + mov r9, r4 // r9 working copy of the aligned max way number + +loop2: + ubfx r7, r12, #13, #15 // r7 = max set number (right aligned) + +loop3: + orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0 + orr r0, r0, r7, LSL r10 // factor in the set number + + blx r6 + subs r7, r7, #1 // decrement the set number + bge loop3 + subs r9, r9, #1 // decrement the way number + bge loop2 +level_done: + add r1, r1, #2 // increment the cache number + cmp r3, r1 + dsb sy // ensure completion of previous cache maintenance instruction + bgt loop1 + + mov r6, #0 + stcopr r6, CSSELR //select cache level 0 in csselr + dsb sy + isb + pop {r4-r12,pc} + +dcsw_loop_table: + stcopr r0, DCISW + bx lr + stcopr r0, DCCISW + bx lr + stcopr r0, DCCSW + bx lr + +endfunc do_dcsw_op + + /* --------------------------------------------------------------- + * Data cache operations by set/way till PoU. + * + * The function requires : + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_louis + dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT +endfunc dcsw_op_louis + + /* --------------------------------------------------------------- + * Data cache operations by set/way till PoC. + * + * The function requires : + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_all + dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT +endfunc dcsw_op_all + + + /* --------------------------------------------------------------- + * Helper macro for data cache operations by set/way for the + * level specified + * --------------------------------------------------------------- + */ + .macro dcsw_op_level level + ldcopr r2, CLIDR + mov r3, \level + sub r1, r3, #2 + b do_dcsw_op + .endm + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 1 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level1 + dcsw_op_level #(1 << LEVEL_SHIFT) +endfunc dcsw_op_level1 + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 2 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level2 + dcsw_op_level #(2 << LEVEL_SHIFT) +endfunc dcsw_op_level2 + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 3 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level3 + dcsw_op_level #(3 << LEVEL_SHIFT) +endfunc dcsw_op_level3 diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S new file mode 100644 index 00000000..63ac1a7e --- /dev/null +++ b/lib/aarch32/misc_helpers.S @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + .globl zeromem + +/* ----------------------------------------------------------------------- + * void zeromem(void *mem, unsigned int length); + * + * Initialise a memory region to 0. + * The memory address and length must be 4-byte aligned. + * ----------------------------------------------------------------------- + */ +func zeromem +#if ASM_ASSERTION + tst r0, #0x3 + ASM_ASSERT(eq) + tst r1, #0x3 + ASM_ASSERT(eq) +#endif + add r2, r0, r1 + mov r1, #0 +z_loop: + cmp r2, r0 + beq z_end + str r1, [r0], #4 + b z_loop +z_end: + bx lr +endfunc zeromem -- cgit From b2bca61da51b22cfba303cf389199b9d9d06be4c Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 30 Jun 2016 15:11:07 +0100 Subject: AArch32: Add translation table library support This patch adds translation library supports for AArch32 platforms. The library only supports long descriptor formats for AArch32. The `enable_mmu_secure()` enables the MMU for secure world with `TTBR0` pointing to the populated translation tables. Change-Id: I061345b1779391d098e35e7fe0c76e3ebf850e08 --- lib/xlat_tables/aarch32/xlat_tables.c | 123 ++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 lib/xlat_tables/aarch32/xlat_tables.c (limited to 'lib') diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c new file mode 100644 index 00000000..a97cf312 --- /dev/null +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../xlat_tables_private.h" + +/* + * The virtual address space size must be a power of two. As we start the initial + * lookup at level 1, it must also be between 2 GB and 4 GB. See section + * G4.6.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more + * information. + */ +CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 32) && + IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size); + +#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) + +static uint64_t l1_xlation_table[NUM_L1_ENTRIES] + __aligned(NUM_L1_ENTRIES * sizeof(uint64_t)); + +void init_xlat_tables(void) +{ + unsigned long long max_pa; + uintptr_t max_va; + print_mmap(); + init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa); + assert(max_va < ADDR_SPACE_SIZE); +} + +/******************************************************************************* + * Function for enabling the MMU in Secure PL1, assuming that the + * page-tables have already been created. + ******************************************************************************/ +void enable_mmu_secure(unsigned int flags) +{ + unsigned int mair0, ttbcr, sctlr; + uint64_t ttbr0; + + assert(IS_IN_SECURE()); + assert((read_sctlr() & SCTLR_M_BIT) == 0); + + /* Set attributes in the right indices of the MAIR */ + mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, + ATTR_IWBWA_OWBWA_NTR_INDEX); + mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, + ATTR_NON_CACHEABLE_INDEX); + write_mair0(mair0); + + /* Invalidate TLBs at the current exception level */ + tlbiall(); + + /* + * Set TTBCR bits as well. Set TTBR0 table properties as Inner + * & outer WBWA & shareable. Disable TTBR1. + */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA | + (32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE)); + ttbcr |= TTBCR_EPD1_BIT; + write_ttbcr(ttbcr); + + /* Set TTBR0 bits as well */ + ttbr0 = (uintptr_t) l1_xlation_table; + write64_ttbr0(ttbr0); + write64_ttbr1(0); + + /* + * Ensure all translation table writes have drained + * into memory, the TLB invalidation is complete, + * and translation register writes are committed + * before enabling the MMU + */ + dsb(); + isb(); + + sctlr = read_sctlr(); + sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; + + if (flags & DISABLE_DCACHE) + sctlr &= ~SCTLR_C_BIT; + else + sctlr |= SCTLR_C_BIT; + + write_sctlr(sctlr); + + /* Ensure the MMU enable takes effect immediately */ + isb(); +} -- cgit From e33b78a658bd54a815c780e17c2d0073db6f59db Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 14:10:46 +0100 Subject: AArch32: Add support in TF libraries This patch adds AArch32 support to cpu ops, context management, per-cpu data and spinlock libraries. The `entrypoint_info` structure is modified to add support for AArch32 register arguments. The CPU operations for AEM generic cpu in AArch32 mode is also added. Change-Id: I1e52e79f498661d8f31f1e7b3a29e222bc7a4483 --- lib/cpus/aarch32/aem_generic.S | 68 ++++++++++ lib/cpus/aarch32/cpu_helpers.S | 177 +++++++++++++++++++++++++ lib/el3_runtime/aarch32/context_mgmt.c | 235 +++++++++++++++++++++++++++++++++ lib/el3_runtime/aarch32/cpu_data.S | 63 +++++++++ lib/locks/exclusive/aarch32/spinlock.S | 55 ++++++++ 5 files changed, 598 insertions(+) create mode 100644 lib/cpus/aarch32/aem_generic.S create mode 100644 lib/cpus/aarch32/cpu_helpers.S create mode 100644 lib/el3_runtime/aarch32/context_mgmt.c create mode 100644 lib/el3_runtime/aarch32/cpu_data.S create mode 100644 lib/locks/exclusive/aarch32/spinlock.S (limited to 'lib') diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S new file mode 100644 index 00000000..10ea4e47 --- /dev/null +++ b/lib/cpus/aarch32/aem_generic.S @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include + +func aem_generic_core_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 cache to PoU. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_louis +endfunc aem_generic_core_pwr_dwn + + +func aem_generic_cluster_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 and L2 caches to PoC. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_all +endfunc aem_generic_cluster_pwr_dwn + +/* cpu_ops for Base AEM FVP */ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S new file mode 100644 index 00000000..927a6f50 --- /dev/null +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + /* + * The reset handler common to all platforms. After a matching + * cpu_ops structure entry is found, the correponding reset_handler + * in the cpu_ops is invoked. The reset handler is invoked very early + * in the boot sequence and it is assumed that we can clobber r0 - r10 + * without the need to follow AAPCS. + * Clobbers: r0 - r10 + */ + .globl reset_handler +func reset_handler + mov r10, lr + + /* The plat_reset_handler can clobber r0 - r9 */ + bl plat_reset_handler + + /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ + bl get_cpu_ops_ptr + +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops reset handler */ + ldr r1, [r0, #CPU_RESET_FUNC] + cmp r1, #0 + mov lr, r10 + bxne r1 + bx lr +endfunc reset_handler + + /* + * The prepare core power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_core_pwr_dwn +func prepare_core_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops core_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CORE] + bx r0 +endfunc prepare_core_pwr_dwn + + /* + * The prepare cluster power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_cluster_pwr_dwn +func prepare_cluster_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops cluster_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] + bx r0 +endfunc prepare_cluster_pwr_dwn + + /* + * Initializes the cpu_ops_ptr if not already initialized + * in cpu_data. This must only be called after the data cache + * is enabled. AAPCS is followed. + */ + .globl init_cpu_ops +func init_cpu_ops + push {r4 - r6, lr} + bl _cpu_data + mov r6, r0 + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + cmp r1, #0 + bne 1f + bl get_cpu_ops_ptr +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + str r0, [r6, #CPU_DATA_CPU_OPS_PTR]! +1: + pop {r4 - r6, pc} +endfunc init_cpu_ops + + /* + * The below function returns the cpu_ops structure matching the + * midr of the core. It reads the MIDR and finds the matching + * entry in cpu_ops entries. Only the implementation and part number + * are used to match the entries. + * Return : + * r0 - The matching cpu_ops pointer on Success + * r0 - 0 on failure. + * Clobbers: r0 - r5 + */ + .globl get_cpu_ops_ptr +func get_cpu_ops_ptr + /* Get the cpu_ops start and end locations */ + ldr r4, =(__CPU_OPS_START__ + CPU_MIDR) + ldr r5, =(__CPU_OPS_END__ + CPU_MIDR) + + /* Initialize the return parameter */ + mov r0, #0 + + /* Read the MIDR_EL1 */ + ldcopr r2, MIDR + ldr r3, =CPU_IMPL_PN_MASK + + /* Retain only the implementation and part number using mask */ + and r2, r2, r3 +1: + /* Check if we have reached end of list */ + cmp r4, r5 + bge error_exit + + /* load the midr from the cpu_ops */ + ldr r1, [r4], #CPU_OPS_SIZE + and r1, r1, r3 + + /* Check if midr matches to midr of this core */ + cmp r1, r2 + bne 1b + + /* Subtract the increment and offset to get the cpu-ops pointer */ + sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) +error_exit: + bx lr +endfunc get_cpu_ops_ptr diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c new file mode 100644 index 00000000..6915ded7 --- /dev/null +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload manages the context(s) corresponding to the secure state. + * It also uses this library to get access to the non-secure + * state cpu context pointers. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to initialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr, sctlr; + regs_t *reg_ctx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements + */ + scr = read_scr(); + scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr |= SCR_NS_BIT; + + /* + * Set up SCTLR for the Non Secure context. + * EE bit is taken from the entrypoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to hyp mode, HVC is enabled + * via SCR.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the HYP registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianness and pc + */ + if (security_state != SECURE) { + sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr |= SCTLR_RES1; + write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); + } + + if (GET_M32(ep->spsr) == MODE32_hyp) + scr |= SCR_HCE_BIT; + + reg_ctx = get_regs_ctx(ctx); + + write_ctx_reg(reg_ctx, CTX_SCR, scr); + write_ctx_reg(reg_ctx, CTX_LR, ep->pc); + write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); + + /* + * Store the r0-r3 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to hyp mode, HSCTLR is initialized + * If execution is requested to non-secure PL1, and the CPU supports + * HYP mode then HYP mode is disabled by configuring all necessary HYP mode + * registers. + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr, scr, hcptr; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); + if (scr & SCR_HCE_BIT) { + /* Use SCTLR value to initialize HSCTLR */ + sctlr = read_ctx_reg(get_regs_ctx(ctx), + CTX_NS_SCTLR); + sctlr |= HSCTLR_RES1; + /* Temporarily set the NS bit to access HSCTLR */ + write_scr(read_scr() | SCR_NS_BIT); + /* + * Make sure the write to SCR is complete so that + * we can access HSCTLR + */ + isb(); + write_hsctlr(sctlr); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } else if (read_id_pfr1() & + (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { + /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ + write_scr(read_scr() | SCR_NS_BIT); + isb(); + + /* PL2 present but unused, need to disable safely */ + write_hcr(0); + + /* HSCTLR : can be ignored when bypassing */ + + /* HCPTR : disable all traps TCPAC, TTA, TCP */ + hcptr = read_hcptr(); + hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); + write_hcptr(hcptr); + + /* Enable EL1 access to timer */ + write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write64_cntvoff(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr(read_midr()); + write_vmpidr(read_mpidr()); + + /* + * Reset VTTBR. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write64_vttbr(0); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } + } +} diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S new file mode 100644 index 00000000..b97911fb --- /dev/null +++ b/lib/el3_runtime/aarch32/cpu_data.S @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + + .globl _cpu_data + .globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data(void) + * + * Return the cpu_data structure for the current CPU. + * ----------------------------------------------------------------- + */ +func _cpu_data + push {lr} + bl plat_my_core_pos + pop {lr} + b _cpu_data_by_index +endfunc _cpu_data + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: r0, r1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + ldr r1, =percpu_data + add r0, r1, r0, LSL #CPU_DATA_LOG2SIZE + bx lr +endfunc _cpu_data_by_index diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S new file mode 100644 index 00000000..f3a2bc36 --- /dev/null +++ b/lib/locks/exclusive/aarch32/spinlock.S @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov r2, #1 +1: + ldrex r1, [r0] + cmp r1, #0 + wfene + strexeq r1, r2, [r0] + cmpeq r1, #0 + bne 1b + dmb + bx lr +endfunc spin_lock + + +func spin_unlock + mov r1, #0 + stl r1, [r0] + bx lr +endfunc spin_unlock -- cgit From 727e5238fa3e9220d6a2718fab3b1df22af1dc61 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 14:11:23 +0100 Subject: AArch32: Add support to PSCI lib This patch adds AArch32 support to PSCI library, as follows : * The `psci_helpers.S` is implemented for AArch32. * AArch32 version of internal helper function `psci_get_ns_ep_info()` is defined. * The PSCI Library is responsible for the Non Secure context initialization. Hence a library interface `psci_prepare_next_non_secure_ctx()` is introduced to enable EL3 runtime firmware to initialize the non secure context without invoking context management library APIs. Change-Id: I25595b0cc2dbfdf39dbf7c589b875cba33317b9d --- lib/psci/aarch32/psci_helpers.S | 180 ++++++++++++++++++++++++++++++++++++++++ lib/psci/psci_common.c | 48 +++++++++++ lib/psci/psci_lib.mk | 15 ++-- lib/psci/psci_setup.c | 12 +++ 4 files changed, 249 insertions(+), 6 deletions(-) create mode 100644 lib/psci/aarch32/psci_helpers.S (limited to 'lib') diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S new file mode 100644 index 00000000..36d5d7d9 --- /dev/null +++ b/lib/psci/aarch32/psci_helpers.S @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + .globl psci_power_down_wfi + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(unsigned int power level); + * + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + push {r4, lr} + + /* ---------------------------------------------- + * Turn OFF cache and do stack maintenance + * prior to cpu operations . This sequence is + * different from AArch64 because in AArch32 the + * assembler routines for cpu operations utilize + * the stack whereas in AArch64 it doesn't. + * ---------------------------------------------- + */ + mov r4, r0 + bl do_stack_maintenance + + /* --------------------------------------------- + * Determine how many levels of cache will be + * subject to cache maintenance. Power level + * 0 implies that only the cpu is being powered + * down. Only the L1 data cache needs to be + * flushed to the PoU in this case. For a higher + * power level we are assuming that a flush + * of L1 data and L2 unified cache is enough. + * This information should be provided by the + * platform. + * --------------------------------------------- + */ + cmp r4, #PSCI_CPU_PWR_LVL + pop {r4,lr} + + beq prepare_core_pwr_dwn + b prepare_cluster_pwr_dwn +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + push {lr} + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + bl plat_get_my_stack + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + ldcopr r0, SCTLR + orr r0, r0, #SCTLR_C_BIT + stcopr r0, SCTLR + isb + + pop {pc} +endfunc psci_do_pwrup_cache_maintenance + + /* --------------------------------------------- + * void do_stack_maintenance(void) + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ +func do_stack_maintenance + push {r4, lr} + bl plat_get_my_stack + + /* Turn off the D-cache */ + ldcopr r1, SCTLR + bic r1, #SCTLR_C_BIT + stcopr r1, SCTLR + isb + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. + * --------------------------------------------- + */ + mov r4, r0 + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + sub r0, r4, #PLATFORM_STACK_SIZE + sub r1, sp, r0 + bl inv_dcache_range + + pop {r4, pc} +endfunc do_stack_maintenance + +/* ----------------------------------------------------------------------- + * This function is called to indicate to the power controller that it + * is safe to power down this cpu. It should not exit the wfi and will + * be released from reset upon power up. + * ----------------------------------------------------------------------- + */ +func psci_power_down_wfi + dsb sy // ensure write buffer empty + wfi + bl plat_panic_handler +endfunc psci_power_down_wfi diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c index e87e8c05..68cdd6eb 100644 --- a/lib/psci/psci_common.c +++ b/lib/psci/psci_common.c @@ -592,6 +592,53 @@ int psci_validate_mpidr(u_register_t mpidr) * This function determines the full entrypoint information for the requested * PSCI entrypoint on power on/resume and returns it. ******************************************************************************/ +#ifdef AARCH32 +static int psci_get_ns_ep_info(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + u_register_t ep_attr; + unsigned int aif, ee, mode; + u_register_t scr = read_scr(); + u_register_t ns_sctlr, sctlr; + + /* Switch to non secure state */ + write_scr(scr | SCR_NS_BIT); + isb(); + ns_sctlr = read_sctlr(); + + sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; + + /* Return to original state */ + write_scr(scr); + isb(); + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + memset(&ep->args, 0, sizeof(ep->args)); + ep->args.arg0 = context_id; + + mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; + + ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); + + return PSCI_E_SUCCESS; +} + +#else static int psci_get_ns_ep_info(entry_point_info_t *ep, uintptr_t entrypoint, u_register_t context_id) @@ -646,6 +693,7 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep, return PSCI_E_SUCCESS; } +#endif /******************************************************************************* * This function validates the entrypoint with the platform layer if the diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk index 3a5833ba..8daa8318 100644 --- a/lib/psci/psci_lib.mk +++ b/lib/psci/psci_lib.mk @@ -29,11 +29,10 @@ # PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ - lib/el3_runtime/aarch64/context.S \ - lib/el3_runtime/aarch64/cpu_data.S \ - lib/el3_runtime/aarch64/context_mgmt.c \ - lib/cpus/aarch64/cpu_helpers.S \ - lib/locks/exclusive/aarch64/spinlock.S \ + lib/el3_runtime/${ARCH}/cpu_data.S \ + lib/el3_runtime/${ARCH}/context_mgmt.c \ + lib/cpus/${ARCH}/cpu_helpers.S \ + lib/locks/exclusive/${ARCH}/spinlock.S \ lib/psci/psci_off.c \ lib/psci/psci_on.c \ lib/psci/psci_suspend.c \ @@ -41,7 +40,11 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ lib/psci/psci_main.c \ lib/psci/psci_setup.c \ lib/psci/psci_system_off.c \ - lib/psci/aarch64/psci_helpers.S + lib/psci/${ARCH}/psci_helpers.S + +ifeq (${ARCH}, aarch64) +PSCI_LIB_SOURCES += lib/el3_runtime/aarch64/context.S +endif ifeq (${USE_COHERENT_MEM}, 1) PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c index d35e0001..20d06352 100644 --- a/lib/psci/psci_setup.c +++ b/lib/psci/psci_setup.c @@ -278,3 +278,15 @@ void psci_arch_setup(void) /* Initialize the cpu_ops pointer. */ init_cpu_ops(); } + +/****************************************************************************** + * PSCI Library interface to initialize the cpu context for the next non + * secure image during cold boot. The relevant registers in the cpu context + * need to be retrieved and programmed on return from this interface. + *****************************************************************************/ +void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info) +{ + assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE); + cm_init_my_context(next_image_info); + cm_prepare_el3_exit(NON_SECURE); +} -- cgit