diff options
author | danh-arm <dan.handley@arm.com> | 2016-08-18 11:38:19 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2016-08-18 11:38:19 +0100 |
commit | 937108a04a998c9e6d6ce5734bf62c7eb8c9d42c (patch) | |
tree | fd4951631aad873037981988603922a3662b8a38 /lib | |
parent | 974603b554a71872d8e0a4aca02ba9cf73b1c3fe (diff) | |
parent | 9d29c227b23d8620dec70938716fbb6c47d591ca (diff) |
Merge pull request #678 from soby-mathew/sm/PSCI_AArch32
Introduce AArch32 support for PSCI library
Diffstat (limited to 'lib')
-rw-r--r-- | lib/aarch32/cache_helpers.S | 237 | ||||
-rw-r--r-- | lib/aarch32/misc_helpers.S | 60 | ||||
-rw-r--r-- | lib/cpus/aarch32/aem_generic.S | 68 | ||||
-rw-r--r-- | lib/cpus/aarch32/cpu_helpers.S | 177 | ||||
-rw-r--r-- | lib/el3_runtime/aarch32/context_mgmt.c | 235 | ||||
-rw-r--r-- | lib/el3_runtime/aarch32/cpu_data.S | 63 | ||||
-rw-r--r-- | lib/locks/exclusive/aarch32/spinlock.S | 55 | ||||
-rw-r--r-- | lib/locks/exclusive/aarch64/spinlock.S | 52 | ||||
-rw-r--r-- | lib/locks/exclusive/spinlock.S | 27 | ||||
-rw-r--r-- | lib/psci/aarch32/psci_helpers.S | 180 | ||||
-rw-r--r-- | lib/psci/psci_common.c | 48 | ||||
-rw-r--r-- | lib/psci/psci_lib.mk | 15 | ||||
-rw-r--r-- | lib/psci/psci_setup.c | 12 | ||||
-rw-r--r-- | lib/xlat_tables/aarch32/xlat_tables.c | 123 | ||||
-rw-r--r-- | lib/xlat_tables/xlat_tables_common.c | 12 |
15 files changed, 1329 insertions, 35 deletions
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S new file mode 100644 index 00000000..d0e5cd06 --- /dev/null +++ b/lib/aarch32/cache_helpers.S @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> + + .globl flush_dcache_range + .globl clean_dcache_range + .globl inv_dcache_range + .globl dcsw_op_louis + .globl dcsw_op_all + .globl dcsw_op_level1 + .globl dcsw_op_level2 + .globl dcsw_op_level3 + +/* + * This macro can be used for implementing various data cache operations `op` + */ +.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2 + dcache_line_size r2, r3 + add r1, r0, r1 + sub r3, r2, #1 + bic r0, r0, r3 +loop_\op: + stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2 + add r0, r0, r2 + cmp r0, r1 + blo loop_\op + dsb sy + bx lr +.endm + + /* ------------------------------------------ + * Clean+Invalidate from base address till + * size. 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func flush_dcache_range + do_dcache_maintenance_by_mva cimvac, DCCIMVAC +endfunc flush_dcache_range + + /* ------------------------------------------ + * Clean from base address till size. + * 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func clean_dcache_range + do_dcache_maintenance_by_mva cmvac, DCCMVAC +endfunc clean_dcache_range + + /* ------------------------------------------ + * Invalidate from base address till + * size. 'r0' = addr, 'r1' = size + * ------------------------------------------ + */ +func inv_dcache_range + do_dcache_maintenance_by_mva imvac, DCIMVAC +endfunc inv_dcache_range + + /* ---------------------------------------------------------------- + * Data cache operations by set/way to the level specified + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * r1: The cache level to begin operation from + * r2: clidr_el1 + * r3: The last cache level to operate on + * and will carry out the operation on each data cache from level 0 + * to the level in r3 in sequence + * + * The dcsw_op macro sets up the r2 and r3 parameters based on + * clidr_el1 cache information before invoking the main function + * ---------------------------------------------------------------- + */ + + .macro dcsw_op shift, fw, ls + ldcopr r2, CLIDR + ubfx r3, r2, \shift, \fw + lsl r3, r3, \ls + mov r1, #0 + b do_dcsw_op + .endm + +func do_dcsw_op + push {r4-r12,lr} + adr r11, dcsw_loop_table // compute cache op based on the operation type + add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions +loop1: + add r10, r1, r1, LSR #1 // Work out 3x current cache level + mov r12, r2, LSR r10 // extract cache type bits from clidr + and r12, r12, #7 // mask the bits for current cache only + cmp r12, #2 // see what cache we have at this level + blt level_done // no cache or only instruction cache at this level + + stcopr r1, CSSELR // select current cache level in csselr + isb // isb to sych the new cssr&csidr + ldcopr r12, CCSIDR // read the new ccsidr + and r10, r12, #7 // extract the length of the cache lines + add r10, r10, #4 // add 4 (r10 = line length offset) + ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned) + clz r5, r4 // r5 = the bit position of the way size increment + mov r9, r4 // r9 working copy of the aligned max way number + +loop2: + ubfx r7, r12, #13, #15 // r7 = max set number (right aligned) + +loop3: + orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0 + orr r0, r0, r7, LSL r10 // factor in the set number + + blx r6 + subs r7, r7, #1 // decrement the set number + bge loop3 + subs r9, r9, #1 // decrement the way number + bge loop2 +level_done: + add r1, r1, #2 // increment the cache number + cmp r3, r1 + dsb sy // ensure completion of previous cache maintenance instruction + bgt loop1 + + mov r6, #0 + stcopr r6, CSSELR //select cache level 0 in csselr + dsb sy + isb + pop {r4-r12,pc} + +dcsw_loop_table: + stcopr r0, DCISW + bx lr + stcopr r0, DCCISW + bx lr + stcopr r0, DCCSW + bx lr + +endfunc do_dcsw_op + + /* --------------------------------------------------------------- + * Data cache operations by set/way till PoU. + * + * The function requires : + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_louis + dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT +endfunc dcsw_op_louis + + /* --------------------------------------------------------------- + * Data cache operations by set/way till PoC. + * + * The function requires : + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_all + dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT +endfunc dcsw_op_all + + + /* --------------------------------------------------------------- + * Helper macro for data cache operations by set/way for the + * level specified + * --------------------------------------------------------------- + */ + .macro dcsw_op_level level + ldcopr r2, CLIDR + mov r3, \level + sub r1, r3, #2 + b do_dcsw_op + .endm + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 1 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level1 + dcsw_op_level #(1 << LEVEL_SHIFT) +endfunc dcsw_op_level1 + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 2 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level2 + dcsw_op_level #(2 << LEVEL_SHIFT) +endfunc dcsw_op_level2 + + /* --------------------------------------------------------------- + * Data cache operations by set/way for level 3 cache + * + * The main function, do_dcsw_op requires: + * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW), + * as defined in arch.h + * --------------------------------------------------------------- + */ +func dcsw_op_level3 + dcsw_op_level #(3 << LEVEL_SHIFT) +endfunc dcsw_op_level3 diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S new file mode 100644 index 00000000..63ac1a7e --- /dev/null +++ b/lib/aarch32/misc_helpers.S @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> + + .globl zeromem + +/* ----------------------------------------------------------------------- + * void zeromem(void *mem, unsigned int length); + * + * Initialise a memory region to 0. + * The memory address and length must be 4-byte aligned. + * ----------------------------------------------------------------------- + */ +func zeromem +#if ASM_ASSERTION + tst r0, #0x3 + ASM_ASSERT(eq) + tst r1, #0x3 + ASM_ASSERT(eq) +#endif + add r2, r0, r1 + mov r1, #0 +z_loop: + cmp r2, r0 + beq z_end + str r1, [r0], #4 + b z_loop +z_end: + bx lr +endfunc zeromem diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S new file mode 100644 index 00000000..10ea4e47 --- /dev/null +++ b/lib/cpus/aarch32/aem_generic.S @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <aem_generic.h> +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cpu_macros.S> + +func aem_generic_core_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 cache to PoU. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_louis +endfunc aem_generic_core_pwr_dwn + + +func aem_generic_cluster_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 and L2 caches to PoC. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_all +endfunc aem_generic_cluster_pwr_dwn + +/* cpu_ops for Base AEM FVP */ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S new file mode 100644 index 00000000..927a6f50 --- /dev/null +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cpu_data.h> +#include <cpu_macros.S> + + /* + * The reset handler common to all platforms. After a matching + * cpu_ops structure entry is found, the correponding reset_handler + * in the cpu_ops is invoked. The reset handler is invoked very early + * in the boot sequence and it is assumed that we can clobber r0 - r10 + * without the need to follow AAPCS. + * Clobbers: r0 - r10 + */ + .globl reset_handler +func reset_handler + mov r10, lr + + /* The plat_reset_handler can clobber r0 - r9 */ + bl plat_reset_handler + + /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ + bl get_cpu_ops_ptr + +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops reset handler */ + ldr r1, [r0, #CPU_RESET_FUNC] + cmp r1, #0 + mov lr, r10 + bxne r1 + bx lr +endfunc reset_handler + + /* + * The prepare core power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_core_pwr_dwn +func prepare_core_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops core_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CORE] + bx r0 +endfunc prepare_core_pwr_dwn + + /* + * The prepare cluster power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_cluster_pwr_dwn +func prepare_cluster_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops cluster_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] + bx r0 +endfunc prepare_cluster_pwr_dwn + + /* + * Initializes the cpu_ops_ptr if not already initialized + * in cpu_data. This must only be called after the data cache + * is enabled. AAPCS is followed. + */ + .globl init_cpu_ops +func init_cpu_ops + push {r4 - r6, lr} + bl _cpu_data + mov r6, r0 + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + cmp r1, #0 + bne 1f + bl get_cpu_ops_ptr +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + str r0, [r6, #CPU_DATA_CPU_OPS_PTR]! +1: + pop {r4 - r6, pc} +endfunc init_cpu_ops + + /* + * The below function returns the cpu_ops structure matching the + * midr of the core. It reads the MIDR and finds the matching + * entry in cpu_ops entries. Only the implementation and part number + * are used to match the entries. + * Return : + * r0 - The matching cpu_ops pointer on Success + * r0 - 0 on failure. + * Clobbers: r0 - r5 + */ + .globl get_cpu_ops_ptr +func get_cpu_ops_ptr + /* Get the cpu_ops start and end locations */ + ldr r4, =(__CPU_OPS_START__ + CPU_MIDR) + ldr r5, =(__CPU_OPS_END__ + CPU_MIDR) + + /* Initialize the return parameter */ + mov r0, #0 + + /* Read the MIDR_EL1 */ + ldcopr r2, MIDR + ldr r3, =CPU_IMPL_PN_MASK + + /* Retain only the implementation and part number using mask */ + and r2, r2, r3 +1: + /* Check if we have reached end of list */ + cmp r4, r5 + bge error_exit + + /* load the midr from the cpu_ops */ + ldr r1, [r4], #CPU_OPS_SIZE + and r1, r1, r3 + + /* Check if midr matches to midr of this core */ + cmp r1, r2 + bne 1b + + /* Subtract the increment and offset to get the cpu-ops pointer */ + sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) +error_exit: + bx lr +endfunc get_cpu_ops_ptr diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c new file mode 100644 index 00000000..6915ded7 --- /dev/null +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <bl_common.h> +#include <context.h> +#include <context_mgmt.h> +#include <platform.h> +#include <platform_def.h> +#include <smcc_helpers.h> +#include <string.h> + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload manages the context(s) corresponding to the secure state. + * It also uses this library to get access to the non-secure + * state cpu context pointers. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to initialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr, sctlr; + regs_t *reg_ctx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements + */ + scr = read_scr(); + scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr |= SCR_NS_BIT; + + /* + * Set up SCTLR for the Non Secure context. + * EE bit is taken from the entrypoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to hyp mode, HVC is enabled + * via SCR.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the HYP registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianness and pc + */ + if (security_state != SECURE) { + sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr |= SCTLR_RES1; + write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); + } + + if (GET_M32(ep->spsr) == MODE32_hyp) + scr |= SCR_HCE_BIT; + + reg_ctx = get_regs_ctx(ctx); + + write_ctx_reg(reg_ctx, CTX_SCR, scr); + write_ctx_reg(reg_ctx, CTX_LR, ep->pc); + write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); + + /* + * Store the r0-r3 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to hyp mode, HSCTLR is initialized + * If execution is requested to non-secure PL1, and the CPU supports + * HYP mode then HYP mode is disabled by configuring all necessary HYP mode + * registers. + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr, scr, hcptr; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); + if (scr & SCR_HCE_BIT) { + /* Use SCTLR value to initialize HSCTLR */ + sctlr = read_ctx_reg(get_regs_ctx(ctx), + CTX_NS_SCTLR); + sctlr |= HSCTLR_RES1; + /* Temporarily set the NS bit to access HSCTLR */ + write_scr(read_scr() | SCR_NS_BIT); + /* + * Make sure the write to SCR is complete so that + * we can access HSCTLR + */ + isb(); + write_hsctlr(sctlr); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } else if (read_id_pfr1() & + (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { + /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ + write_scr(read_scr() | SCR_NS_BIT); + isb(); + + /* PL2 present but unused, need to disable safely */ + write_hcr(0); + + /* HSCTLR : can be ignored when bypassing */ + + /* HCPTR : disable all traps TCPAC, TTA, TCP */ + hcptr = read_hcptr(); + hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); + write_hcptr(hcptr); + + /* Enable EL1 access to timer */ + write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write64_cntvoff(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr(read_midr()); + write_vmpidr(read_mpidr()); + + /* + * Reset VTTBR. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write64_vttbr(0); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } + } +} diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S new file mode 100644 index 00000000..b97911fb --- /dev/null +++ b/lib/el3_runtime/aarch32/cpu_data.S @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> +#include <cpu_data.h> + + .globl _cpu_data + .globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data(void) + * + * Return the cpu_data structure for the current CPU. + * ----------------------------------------------------------------- + */ +func _cpu_data + push {lr} + bl plat_my_core_pos + pop {lr} + b _cpu_data_by_index +endfunc _cpu_data + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: r0, r1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + ldr r1, =percpu_data + add r0, r1, r0, LSL #CPU_DATA_LOG2SIZE + bx lr +endfunc _cpu_data_by_index diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S new file mode 100644 index 00000000..f3a2bc36 --- /dev/null +++ b/lib/locks/exclusive/aarch32/spinlock.S @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov r2, #1 +1: + ldrex r1, [r0] + cmp r1, #0 + wfene + strexeq r1, r2, [r0] + cmpeq r1, #0 + bne 1b + dmb + bx lr +endfunc spin_lock + + +func spin_unlock + mov r1, #0 + stl r1, [r0] + bx lr +endfunc spin_unlock diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S new file mode 100644 index 00000000..1ca59123 --- /dev/null +++ b/lib/locks/exclusive/aarch64/spinlock.S @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov w2, #1 + sevl +l1: wfe +l2: ldaxr w1, [x0] + cbnz w1, l1 + stxr w1, w2, [x0] + cbnz w1, l2 + ret +endfunc spin_lock + + +func spin_unlock + stlr wzr, [x0] + ret +endfunc spin_unlock diff --git a/lib/locks/exclusive/spinlock.S b/lib/locks/exclusive/spinlock.S index 772f14e9..9c945f9b 100644 --- a/lib/locks/exclusive/spinlock.S +++ b/lib/locks/exclusive/spinlock.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,25 +28,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include <asm_macros.S> - - .globl spin_lock - .globl spin_unlock - - -func spin_lock - mov w2, #1 - sevl -l1: wfe -l2: ldaxr w1, [x0] - cbnz w1, l1 - stxr w1, w2, [x0] - cbnz w1, l2 - ret -endfunc spin_lock - - -func spin_unlock - stlr wzr, [x0] - ret -endfunc spin_unlock +#if !ERROR_DEPRECATED +#include "./aarch64/spinlock.S" +#endif diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S new file mode 100644 index 00000000..36d5d7d9 --- /dev/null +++ b/lib/psci/aarch32/psci_helpers.S @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> +#include <platform_def.h> +#include <psci.h> + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + .globl psci_power_down_wfi + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(unsigned int power level); + * + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + push {r4, lr} + + /* ---------------------------------------------- + * Turn OFF cache and do stack maintenance + * prior to cpu operations . This sequence is + * different from AArch64 because in AArch32 the + * assembler routines for cpu operations utilize + * the stack whereas in AArch64 it doesn't. + * ---------------------------------------------- + */ + mov r4, r0 + bl do_stack_maintenance + + /* --------------------------------------------- + * Determine how many levels of cache will be + * subject to cache maintenance. Power level + * 0 implies that only the cpu is being powered + * down. Only the L1 data cache needs to be + * flushed to the PoU in this case. For a higher + * power level we are assuming that a flush + * of L1 data and L2 unified cache is enough. + * This information should be provided by the + * platform. + * --------------------------------------------- + */ + cmp r4, #PSCI_CPU_PWR_LVL + pop {r4,lr} + + beq prepare_core_pwr_dwn + b prepare_cluster_pwr_dwn +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + push {lr} + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + bl plat_get_my_stack + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + ldcopr r0, SCTLR + orr r0, r0, #SCTLR_C_BIT + stcopr r0, SCTLR + isb + + pop {pc} +endfunc psci_do_pwrup_cache_maintenance + + /* --------------------------------------------- + * void do_stack_maintenance(void) + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ +func do_stack_maintenance + push {r4, lr} + bl plat_get_my_stack + + /* Turn off the D-cache */ + ldcopr r1, SCTLR + bic r1, #SCTLR_C_BIT + stcopr r1, SCTLR + isb + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. + * --------------------------------------------- + */ + mov r4, r0 + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + sub r0, r4, #PLATFORM_STACK_SIZE + sub r1, sp, r0 + bl inv_dcache_range + + pop {r4, pc} +endfunc do_stack_maintenance + +/* ----------------------------------------------------------------------- + * This function is called to indicate to the power controller that it + * is safe to power down this cpu. It should not exit the wfi and will + * be released from reset upon power up. + * ----------------------------------------------------------------------- + */ +func psci_power_down_wfi + dsb sy // ensure write buffer empty + wfi + bl plat_panic_handler +endfunc psci_power_down_wfi diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c index e87e8c05..68cdd6eb 100644 --- a/lib/psci/psci_common.c +++ b/lib/psci/psci_common.c @@ -592,6 +592,53 @@ int psci_validate_mpidr(u_register_t mpidr) * This function determines the full entrypoint information for the requested * PSCI entrypoint on power on/resume and returns it. ******************************************************************************/ +#ifdef AARCH32 +static int psci_get_ns_ep_info(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + u_register_t ep_attr; + unsigned int aif, ee, mode; + u_register_t scr = read_scr(); + u_register_t ns_sctlr, sctlr; + + /* Switch to non secure state */ + write_scr(scr | SCR_NS_BIT); + isb(); + ns_sctlr = read_sctlr(); + + sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; + + /* Return to original state */ + write_scr(scr); + isb(); + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + memset(&ep->args, 0, sizeof(ep->args)); + ep->args.arg0 = context_id; + + mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; + + ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); + + return PSCI_E_SUCCESS; +} + +#else static int psci_get_ns_ep_info(entry_point_info_t *ep, uintptr_t entrypoint, u_register_t context_id) @@ -646,6 +693,7 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep, return PSCI_E_SUCCESS; } +#endif /******************************************************************************* * This function validates the entrypoint with the platform layer if the diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk index 662e14a2..8daa8318 100644 --- a/lib/psci/psci_lib.mk +++ b/lib/psci/psci_lib.mk @@ -29,11 +29,10 @@ # PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ - lib/el3_runtime/aarch64/context.S \ - lib/el3_runtime/aarch64/cpu_data.S \ - lib/el3_runtime/aarch64/context_mgmt.c \ - lib/cpus/aarch64/cpu_helpers.S \ - lib/locks/exclusive/spinlock.S \ + lib/el3_runtime/${ARCH}/cpu_data.S \ + lib/el3_runtime/${ARCH}/context_mgmt.c \ + lib/cpus/${ARCH}/cpu_helpers.S \ + lib/locks/exclusive/${ARCH}/spinlock.S \ lib/psci/psci_off.c \ lib/psci/psci_on.c \ lib/psci/psci_suspend.c \ @@ -41,7 +40,11 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ lib/psci/psci_main.c \ lib/psci/psci_setup.c \ lib/psci/psci_system_off.c \ - lib/psci/aarch64/psci_helpers.S + lib/psci/${ARCH}/psci_helpers.S + +ifeq (${ARCH}, aarch64) +PSCI_LIB_SOURCES += lib/el3_runtime/aarch64/context.S +endif ifeq (${USE_COHERENT_MEM}, 1) PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c index d35e0001..20d06352 100644 --- a/lib/psci/psci_setup.c +++ b/lib/psci/psci_setup.c @@ -278,3 +278,15 @@ void psci_arch_setup(void) /* Initialize the cpu_ops pointer. */ init_cpu_ops(); } + +/****************************************************************************** + * PSCI Library interface to initialize the cpu context for the next non + * secure image during cold boot. The relevant registers in the cpu context + * need to be retrieved and programmed on return from this interface. + *****************************************************************************/ +void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info) +{ + assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE); + cm_init_my_context(next_image_info); + cm_prepare_el3_exit(NON_SECURE); +} diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c new file mode 100644 index 00000000..a97cf312 --- /dev/null +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <cassert.h> +#include <platform_def.h> +#include <utils.h> +#include <xlat_tables.h> +#include "../xlat_tables_private.h" + +/* + * The virtual address space size must be a power of two. As we start the initial + * lookup at level 1, it must also be between 2 GB and 4 GB. See section + * G4.6.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more + * information. + */ +CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 32) && + IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size); + +#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) + +static uint64_t l1_xlation_table[NUM_L1_ENTRIES] + __aligned(NUM_L1_ENTRIES * sizeof(uint64_t)); + +void init_xlat_tables(void) +{ + unsigned long long max_pa; + uintptr_t max_va; + print_mmap(); + init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa); + assert(max_va < ADDR_SPACE_SIZE); +} + +/******************************************************************************* + * Function for enabling the MMU in Secure PL1, assuming that the + * page-tables have already been created. + ******************************************************************************/ +void enable_mmu_secure(unsigned int flags) +{ + unsigned int mair0, ttbcr, sctlr; + uint64_t ttbr0; + + assert(IS_IN_SECURE()); + assert((read_sctlr() & SCTLR_M_BIT) == 0); + + /* Set attributes in the right indices of the MAIR */ + mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, + ATTR_IWBWA_OWBWA_NTR_INDEX); + mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, + ATTR_NON_CACHEABLE_INDEX); + write_mair0(mair0); + + /* Invalidate TLBs at the current exception level */ + tlbiall(); + + /* + * Set TTBCR bits as well. Set TTBR0 table properties as Inner + * & outer WBWA & shareable. Disable TTBR1. + */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA | + (32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE)); + ttbcr |= TTBCR_EPD1_BIT; + write_ttbcr(ttbcr); + + /* Set TTBR0 bits as well */ + ttbr0 = (uintptr_t) l1_xlation_table; + write64_ttbr0(ttbr0); + write64_ttbr1(0); + + /* + * Ensure all translation table writes have drained + * into memory, the TLB invalidation is complete, + * and translation register writes are committed + * before enabling the MMU + */ + dsb(); + isb(); + + sctlr = read_sctlr(); + sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; + + if (flags & DISABLE_DCACHE) + sctlr &= ~SCTLR_C_BIT; + else + sctlr |= SCTLR_C_BIT; + + write_sctlr(sctlr); + + /* Ensure the MMU enable takes effect immediately */ + isb(); +} diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c index 33784c2d..bc7fed78 100644 --- a/lib/xlat_tables/xlat_tables_common.c +++ b/lib/xlat_tables/xlat_tables_common.c @@ -289,17 +289,17 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va, if (!mm->size) return attr; /* Reached end of list */ - if (mm->base_va >= base_va + size) + if (mm->base_va > base_va + size - 1) return attr; /* Next region is after area so end */ - if (mm->base_va + mm->size <= base_va) + if (mm->base_va + mm->size - 1 < base_va) continue; /* Next region has already been overtaken */ if (mm->attr == attr) continue; /* Region doesn't override attribs so skip */ if (mm->base_va > base_va || - mm->base_va + mm->size < base_va + size) + mm->base_va + mm->size - 1 < base_va + size - 1) return -1; /* Region doesn't fully cover our area */ attr = mm->attr; @@ -328,7 +328,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, if (!mm->size) { /* Done mapping regions; finish zeroing the table */ desc = INVALID_DESC; - } else if (mm->base_va + mm->size <= base_va) { + } else if (mm->base_va + mm->size - 1 < base_va) { /* This area is after the region so get next region */ ++mm; continue; @@ -337,7 +337,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, debug_print("%s VA:%p size:0x%x ", get_level_spacer(level), (void *)base_va, level_size); - if (mm->base_va >= base_va + level_size) { + if (mm->base_va > base_va + level_size - 1) { /* Next region is after this area. Nothing to map yet */ desc = INVALID_DESC; } else { @@ -369,7 +369,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm, *table++ = desc; base_va += level_size; - } while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE)); + } while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1)); return mm; } |