diff options
Diffstat (limited to 'include/common/aarch64')
-rw-r--r-- | include/common/aarch64/asm_macros.S | 76 | ||||
-rw-r--r-- | include/common/aarch64/el3_common_macros.S | 280 |
2 files changed, 281 insertions, 75 deletions
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S index e7669899..cc8f4243 100644 --- a/include/common/aarch64/asm_macros.S +++ b/include/common/aarch64/asm_macros.S @@ -31,6 +31,7 @@ #define __ASM_MACROS_S__ #include <arch.h> +#include <asm_macros_common.S> .macro func_prologue @@ -104,81 +105,6 @@ .endif .endm - /* - * This macro is used to create a function label and place the - * code into a separate text section based on the function name - * to enable elimination of unused code during linking - */ - .macro func _name - .section .text.\_name, "ax" - .type \_name, %function - .func \_name - \_name: - .endm - - /* - * This macro is used to mark the end of a function. - */ - .macro endfunc _name - .endfunc - .size \_name, . - \_name - .endm - - /* - * Theses macros are used to create function labels for deprecated - * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs - * will fail to link and cause build failure. - */ -#if ERROR_DEPRECATED - .macro func_deprecated _name - func deprecated\_name - .endm - - .macro endfunc_deprecated _name - endfunc deprecated\_name - .endm -#else - .macro func_deprecated _name - func \_name - .endm - - .macro endfunc_deprecated _name - endfunc \_name - .endm -#endif - - /* - * Helper assembler macro to count trailing zeros. The output is - * populated in the `TZ_COUNT` symbol. - */ - .macro count_tz _value, _tz_count - .if \_value - count_tz "(\_value >> 1)", "(\_tz_count + 1)" - .else - .equ TZ_COUNT, (\_tz_count - 1) - .endif - .endm - - /* - * This macro declares an array of 1 or more stacks, properly - * aligned and in the requested section - */ -#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */ - - .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN - count_tz \_align, 0 - .if (\_align - (1 << TZ_COUNT)) - .error "Incorrect stack alignment specified (Must be a power of 2)." - .endif - .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0) - .error "Stack size not correctly aligned" - .endif - .section \_section, "aw", %nobits - .align TZ_COUNT - \_name: - .space ((\_count) * (\_size)), 0 - .endm - #if ENABLE_PLAT_COMPAT /* * This macro calculates the base address of an MP stack using the diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S new file mode 100644 index 00000000..9b22a734 --- /dev/null +++ b/include/common/aarch64/el3_common_macros.S @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EL3_COMMON_MACROS_S__ +#define __EL3_COMMON_MACROS_S__ + +#include <arch.h> +#include <asm_macros.S> + + /* + * Helper macro to initialise EL3 registers we care about. + */ + .macro el3_arch_init_common _exception_vectors + /* --------------------------------------------------------------------- + * Enable the instruction cache, stack pointer and data access alignment + * checks + * --------------------------------------------------------------------- + */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) + mrs x0, sctlr_el3 + orr x0, x0, x1 + msr sctlr_el3, x0 + isb + +#if IMAGE_BL31 + /* --------------------------------------------------------------------- + * Initialise the per-cpu cache pointer to the CPU. + * This is done early to enable crash reporting to have access to crash + * stack. Since crash reporting depends on cpu_data to report the + * unhandled exception, not doing so can lead to recursive exceptions + * due to a NULL TPIDR_EL3. + * --------------------------------------------------------------------- + */ + bl init_cpu_data_ptr +#endif /* IMAGE_BL31 */ + + /* --------------------------------------------------------------------- + * Set the exception vectors. + * --------------------------------------------------------------------- + */ + adr x0, \_exception_vectors + msr vbar_el3, x0 + isb + + /* --------------------------------------------------------------------- + * Early set RES1 bits in SCR_EL3. Set EA bit to catch both + * External Aborts and SError Interrupts in EL3 and also the SIF bit + * to disable instruction fetches from Non-secure memory. + * --------------------------------------------------------------------- + */ + mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT) + msr scr_el3, x0 + /* --------------------------------------------------------------------- + * Enable External Aborts and SError Interrupts now that the exception + * vectors have been setup. + * --------------------------------------------------------------------- + */ + msr daifclr, #DAIF_ABT_BIT + + /* --------------------------------------------------------------------- + * The initial state of the Architectural feature trap register + * (CPTR_EL3) is unknown and it must be set to a known state. All + * feature traps are disabled. Some bits in this register are marked as + * reserved and should not be modified. + * + * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1 + * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2. + * + * CPTR_EL3.TTA: This causes access to the Trace functionality to trap + * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register + * access to trace functionality is not supported, this bit is RES0. + * + * CPTR_EL3.TFP: This causes instructions that access the registers + * associated with Floating Point and Advanced SIMD execution to trap + * to EL3 when executed from any exception level, unless trapped to EL1 + * or EL2. + * --------------------------------------------------------------------- + */ + mrs x0, cptr_el3 + bic w0, w0, #TCPAC_BIT + bic w0, w0, #TTA_BIT + bic w0, w0, #TFP_BIT + msr cptr_el3, x0 + .endm + +/* ----------------------------------------------------------------------------- + * This is the super set of actions that need to be performed during a cold boot + * or a warm boot in EL3. This code is shared by BL1 and BL31. + * + * This macro will always perform reset handling, architectural initialisations + * and stack setup. The rest of the actions are optional because they might not + * be needed, depending on the context in which this macro is called. This is + * why this macro is parameterised ; each parameter allows to enable/disable + * some actions. + * + * _set_endian: + * Whether the macro needs to configure the endianness of data accesses. + * + * _warm_boot_mailbox: + * Whether the macro needs to detect the type of boot (cold/warm). The + * detection is based on the platform entrypoint address : if it is zero + * then it is a cold boot, otherwise it is a warm boot. In the latter case, + * this macro jumps on the platform entrypoint address. + * + * _secondary_cold_boot: + * Whether the macro needs to identify the CPU that is calling it: primary + * CPU or secondary CPU. The primary CPU will be allowed to carry on with + * the platform initialisations, while the secondaries will be put in a + * platform-specific state in the meantime. + * + * If the caller knows this macro will only be called by the primary CPU + * then this parameter can be defined to 0 to skip this step. + * + * _init_memory: + * Whether the macro needs to initialise the memory. + * + * _init_c_runtime: + * Whether the macro needs to initialise the C runtime environment. + * + * _exception_vectors: + * Address of the exception vectors to program in the VBAR_EL3 register. + * ----------------------------------------------------------------------------- + */ + .macro el3_entrypoint_common \ + _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \ + _init_memory, _init_c_runtime, _exception_vectors + + .if \_set_endian + /* ------------------------------------------------------------- + * Set the CPU endianness before doing anything that might + * involve memory reads or writes. + * ------------------------------------------------------------- + */ + mrs x0, sctlr_el3 + bic x0, x0, #SCTLR_EE_BIT + msr sctlr_el3, x0 + isb + .endif /* _set_endian */ + + .if \_warm_boot_mailbox + /* ------------------------------------------------------------- + * This code will be executed for both warm and cold resets. + * Now is the time to distinguish between the two. + * Query the platform entrypoint address and if it is not zero + * then it means it is a warm boot so jump to this address. + * ------------------------------------------------------------- + */ + bl plat_get_my_entrypoint + cbz x0, do_cold_boot + br x0 + + do_cold_boot: + .endif /* _warm_boot_mailbox */ + + /* --------------------------------------------------------------------- + * It is a cold boot. + * Perform any processor specific actions upon reset e.g. cache, TLB + * invalidations etc. + * --------------------------------------------------------------------- + */ + bl reset_handler + + el3_arch_init_common \_exception_vectors + + .if \_secondary_cold_boot + /* ------------------------------------------------------------- + * Check if this is a primary or secondary CPU cold boot. + * The primary CPU will set up the platform while the + * secondaries are placed in a platform-specific state until the + * primary CPU performs the necessary actions to bring them out + * of that state and allows entry into the OS. + * ------------------------------------------------------------- + */ + bl plat_is_my_cpu_primary + cbnz w0, do_primary_cold_boot + + /* This is a cold boot on a secondary CPU */ + bl plat_secondary_cold_boot_setup + /* plat_secondary_cold_boot_setup() is not supposed to return */ + bl el3_panic + + do_primary_cold_boot: + .endif /* _secondary_cold_boot */ + + /* --------------------------------------------------------------------- + * Initialize memory now. Secondary CPU initialization won't get to this + * point. + * --------------------------------------------------------------------- + */ + + .if \_init_memory + bl platform_mem_init + .endif /* _init_memory */ + + /* --------------------------------------------------------------------- + * Init C runtime environment: + * - Zero-initialise the NOBITS sections. There are 2 of them: + * - the .bss section; + * - the coherent memory section (if any). + * - Relocate the data section from ROM to RAM, if required. + * --------------------------------------------------------------------- + */ + .if \_init_c_runtime +#if IMAGE_BL31 + /* ------------------------------------------------------------- + * Invalidate the RW memory used by the BL31 image. This + * includes the data and NOBITS sections. This is done to + * safeguard against possible corruption of this memory by + * dirty cache lines in a system cache as a result of use by + * an earlier boot loader stage. + * ------------------------------------------------------------- + */ + adr x0, __RW_START__ + adr x1, __RW_END__ + sub x1, x1, x0 + bl inv_dcache_range +#endif /* IMAGE_BL31 */ + + ldr x0, =__BSS_START__ + ldr x1, =__BSS_SIZE__ + bl zeromem16 + +#if USE_COHERENT_MEM + ldr x0, =__COHERENT_RAM_START__ + ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ + bl zeromem16 +#endif + +#if IMAGE_BL1 + ldr x0, =__DATA_RAM_START__ + ldr x1, =__DATA_ROM_START__ + ldr x2, =__DATA_SIZE__ + bl memcpy16 +#endif + .endif /* _init_c_runtime */ + + /* --------------------------------------------------------------------- + * Use SP_EL0 for the C runtime stack. + * --------------------------------------------------------------------- + */ + msr spsel, #0 + + /* --------------------------------------------------------------------- + * Allocate a stack whose memory will be marked as Normal-IS-WBWA when + * the MMU is enabled. There is no risk of reading stale stack memory + * after enabling the MMU as only the primary CPU is running at the + * moment. + * --------------------------------------------------------------------- + */ + bl plat_set_my_stack + .endm + +#endif /* __EL3_COMMON_MACROS_S__ */ |