From c45f627de4a17d4be6727cbf357e7fb54479a563 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Wed, 20 Jul 2016 14:38:36 +0100 Subject: Move SIZE_FROM_LOG2_WORDS macro to utils.h This patch moves the macro SIZE_FROM_LOG2_WORDS() defined in `arch.h` to `utils.h` as it is utility macro. Change-Id: Ia8171a226978f053a1ee4037f80142c0a4d21430 --- include/lib/aarch64/arch.h | 4 +--- include/lib/utils.h | 2 ++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 07bbd899..fa5cb12b 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -334,8 +334,6 @@ #define CTR_IMINLINE_MASK 0xf #define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */ -#define SIZE_FROM_LOG2_WORDS(n) (4 << (n)) - /* Physical timer control register bit fields shifts and masks */ #define CNTP_CTL_ENABLE_SHIFT 0 diff --git a/include/lib/utils.h b/include/lib/utils.h index 0936cbb3..a234e3c9 100644 --- a/include/lib/utils.h +++ b/include/lib/utils.h @@ -38,6 +38,8 @@ #define IS_POWER_OF_TWO(x) \ (((x) & ((x) - 1)) == 0) +#define SIZE_FROM_LOG2_WORDS(n) (4 << (n)) + /* * The round_up() macro rounds up a value to the given boundary in a * type-agnostic yet type-safe manner. The boundary must be a power of two. -- cgit From 031dbb122472175ad6e888f3c6e0a70d1b2e9ac7 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Mon, 9 May 2016 17:49:55 +0100 Subject: AArch32: Add essential Arch helpers This patch adds the essential AArch32 architecture helpers arch.h and arch_helpers.h and modifies `_types.h` to add AArch32 support. A new build option `ARCH` is defined in the top level makefile to enable the component makefiles to choose the right files based on the Architecture it is being build for. Depending on this flag, either `AARCH32` or `AARCH64` flag is defined by the Makefile. The default value of `ARCH` flag is `aarch64`. The AArch32 build support will be added in a later patch. Change-Id: I405e5fac02db828a55cd25989b572b64cb005241 --- include/lib/aarch32/arch.h | 408 ++++++++++++++++++++++++++++++++++++ include/lib/aarch32/arch_helpers.h | 292 ++++++++++++++++++++++++++ include/lib/stdlib/machine/_types.h | 58 +++-- 3 files changed, 744 insertions(+), 14 deletions(-) create mode 100644 include/lib/aarch32/arch.h create mode 100644 include/lib/aarch32/arch_helpers.h (limited to 'include') diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h new file mode 100644 index 00000000..e571ddcb --- /dev/null +++ b/include/lib/aarch32/arch.h @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ARCH_H__ +#define __ARCH_H__ + +/******************************************************************************* + * MIDR bit definitions + ******************************************************************************/ +#define MIDR_IMPL_MASK 0xff +#define MIDR_IMPL_SHIFT 24 +#define MIDR_VAR_SHIFT 20 +#define MIDR_VAR_BITS 4 +#define MIDR_REV_SHIFT 0 +#define MIDR_REV_BITS 4 +#define MIDR_PN_MASK 0xfff +#define MIDR_PN_SHIFT 4 + +/******************************************************************************* + * MPIDR macros + ******************************************************************************/ +#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK +#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS) +#define MPIDR_AFFINITY_BITS 8 +#define MPIDR_AFFLVL_MASK 0xff +#define MPIDR_AFFLVL_SHIFT 3 +#define MPIDR_AFF0_SHIFT 0 +#define MPIDR_AFF1_SHIFT 8 +#define MPIDR_AFF2_SHIFT 16 +#define MPIDR_AFFINITY_MASK 0x00ffffff +#define MPIDR_AFFLVL0 0 +#define MPIDR_AFFLVL1 1 +#define MPIDR_AFFLVL2 2 + +#define MPIDR_AFFLVL0_VAL(mpidr) \ + (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK) +#define MPIDR_AFFLVL1_VAL(mpidr) \ + (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK) +#define MPIDR_AFFLVL2_VAL(mpidr) \ + (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK) + +/* + * The MPIDR_MAX_AFFLVL count starts from 0. Take care to + * add one while using this macro to define array sizes. + */ +#define MPIDR_MAX_AFFLVL 2 + +/* Data Cache set/way op type defines */ +#define DC_OP_ISW 0x0 +#define DC_OP_CISW 0x1 +#define DC_OP_CSW 0x2 + +/******************************************************************************* + * Generic timer memory mapped registers & offsets + ******************************************************************************/ +#define CNTCR_OFF 0x000 +#define CNTFID_OFF 0x020 + +#define CNTCR_EN (1 << 0) +#define CNTCR_HDBG (1 << 1) +#define CNTCR_FCREQ(x) ((x) << 8) + +/******************************************************************************* + * System register bit definitions + ******************************************************************************/ +/* CLIDR definitions */ +#define LOUIS_SHIFT 21 +#define LOC_SHIFT 24 +#define CLIDR_FIELD_WIDTH 3 + +/* CSSELR definitions */ +#define LEVEL_SHIFT 1 + +/* ID_PFR1 definitions */ +#define ID_PFR1_VIRTEXT_SHIFT 12 +#define ID_PFR1_VIRTEXT_MASK 0xf +#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \ + & ID_PFR1_VIRTEXT_MASK) +#define ID_PFR1_GIC_SHIFT 28 +#define ID_PFR1_GIC_MASK 0xf + +/* SCTLR definitions */ +#define SCTLR_RES1 ((1 << 23) | (1 << 22) | (1 << 11) | (1 << 4) | \ + (1 << 3) | SCTLR_CP15BEN_BIT | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT) +#define SCTLR_M_BIT (1 << 0) +#define SCTLR_A_BIT (1 << 1) +#define SCTLR_C_BIT (1 << 2) +#define SCTLR_CP15BEN_BIT (1 << 5) +#define SCTLR_ITD_BIT (1 << 7) +#define SCTLR_I_BIT (1 << 12) +#define SCTLR_V_BIT (1 << 13) +#define SCTLR_NTWI_BIT (1 << 16) +#define SCTLR_NTWE_BIT (1 << 18) +#define SCTLR_WXN_BIT (1 << 19) +#define SCTLR_UWXN_BIT (1 << 20) +#define SCTLR_EE_BIT (1 << 25) +#define SCTLR_TRE_BIT (1 << 28) +#define SCTLR_AFE_BIT (1 << 29) +#define SCTLR_TE_BIT (1 << 30) + +/* HSCTLR definitions */ +#define HSCTLR_RES1 ((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22) \ + | (1 << 18) | (1 << 16) | (1 << 11) | (1 << 4) \ + | (1 << 3) | HSCTLR_CP15BEN_BIT) +#define HSCTLR_M_BIT (1 << 0) +#define HSCTLR_A_BIT (1 << 1) +#define HSCTLR_C_BIT (1 << 2) +#define HSCTLR_CP15BEN_BIT (1 << 5) +#define HSCTLR_ITD_BIT (1 << 7) +#define HSCTLR_SED_BIT (1 << 8) +#define HSCTLR_I_BIT (1 << 12) +#define HSCTLR_WXN_BIT (1 << 19) +#define HSCTLR_EE_BIT (1 << 25) +#define HSCTLR_TE_BIT (1 << 30) + +/* CPACR definitions */ +#define CPACR_FPEN(x) ((x) << 20) +#define CPACR_FP_TRAP_PL0 0x1 +#define CPACR_FP_TRAP_ALL 0x2 +#define CPACR_FP_TRAP_NONE 0x3 + +/* SCR definitions */ +#define SCR_TWE_BIT (1 << 13) +#define SCR_TWI_BIT (1 << 12) +#define SCR_SIF_BIT (1 << 9) +#define SCR_HCE_BIT (1 << 8) +#define SCR_SCD_BIT (1 << 7) +#define SCR_NET_BIT (1 << 6) +#define SCR_AW_BIT (1 << 5) +#define SCR_FW_BIT (1 << 4) +#define SCR_EA_BIT (1 << 3) +#define SCR_FIQ_BIT (1 << 2) +#define SCR_IRQ_BIT (1 << 1) +#define SCR_NS_BIT (1 << 0) +#define SCR_VALID_BIT_MASK 0x33ff + +#define GET_NS_BIT(scr) ((scr) & SCR_NS_BIT) + +/* HCR definitions */ +#define HCR_AMO_BIT (1 << 5) +#define HCR_IMO_BIT (1 << 4) +#define HCR_FMO_BIT (1 << 3) + +/* CNTHCTL definitions */ +#define EVNTEN_BIT (1 << 2) +#define PL1PCEN_BIT (1 << 1) +#define PL1PCTEN_BIT (1 << 0) + +/* CNTKCTL definitions */ +#define PL0PTEN_BIT (1 << 9) +#define PL0VTEN_BIT (1 << 8) +#define PL0PCTEN_BIT (1 << 0) +#define PL0VCTEN_BIT (1 << 1) +#define EVNTEN_BIT (1 << 2) +#define EVNTDIR_BIT (1 << 3) +#define EVNTI_SHIFT 4 +#define EVNTI_MASK 0xf + +/* HCPTR definitions */ +#define TCPAC_BIT (1 << 31) +#define TTA_BIT (1 << 20) +#define TCP11_BIT (1 << 10) +#define TCP10_BIT (1 << 10) + +/* NASCR definitions */ +#define NSASEDIS_BIT (1 << 15) +#define NASCR_CP11_BIT (1 << 11) +#define NASCR_CP10_BIT (1 << 10) + +/* CPACR definitions */ +#define ASEDIS_BIT (1 << 31) +#define TRCDIS_BIT (1 << 28) +#define CPACR_CP11_SHIFT 22 +#define CPACR_CP10_SHIFT 20 +#define CPACR_ENABLE_FP_ACCESS (0x3 << CPACR_CP11_SHIFT |\ + 0x3 << CPACR_CP10_SHIFT) + +/* FPEXC definitions */ +#define FPEXC_EN_BIT (1 << 30) + +/* SPSR/CPSR definitions */ +#define SPSR_FIQ_BIT (1 << 0) +#define SPSR_IRQ_BIT (1 << 1) +#define SPSR_ABT_BIT (1 << 2) +#define SPSR_AIF_SHIFT 6 +#define SPSR_AIF_MASK 0x7 + +#define SPSR_E_SHIFT 9 +#define SPSR_E_MASK 0x1 +#define SPSR_E_LITTLE 0 +#define SPSR_E_BIG 1 + +#define SPSR_T_SHIFT 5 +#define SPSR_T_MASK 0x1 +#define SPSR_T_ARM 0 +#define SPSR_T_THUMB 1 + +#define SPSR_MODE_SHIFT 0 +#define SPSR_MODE_MASK 0x7 + + +#define DISABLE_ALL_EXCEPTIONS \ + (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT) + +/* + * TTBCR definitions + */ +/* The ARM Trusted Firmware uses the long descriptor format */ +#define TTBCR_EAE_BIT (1 << 31) + +#define TTBCR_SH1_NON_SHAREABLE (0x0 << 28) +#define TTBCR_SH1_OUTER_SHAREABLE (0x2 << 28) +#define TTBCR_SH1_INNER_SHAREABLE (0x3 << 28) + +#define TTBCR_RGN1_OUTER_NC (0x0 << 26) +#define TTBCR_RGN1_OUTER_WBA (0x1 << 26) +#define TTBCR_RGN1_OUTER_WT (0x2 << 26) +#define TTBCR_RGN1_OUTER_WBNA (0x3 << 26) + +#define TTBCR_RGN1_INNER_NC (0x0 << 24) +#define TTBCR_RGN1_INNER_WBA (0x1 << 24) +#define TTBCR_RGN1_INNER_WT (0x2 << 24) +#define TTBCR_RGN1_INNER_WBNA (0x3 << 24) + +#define TTBCR_EPD1_BIT (1 << 23) +#define TTBCR_A1_BIT (1 << 22) + +#define TTBCR_T1SZ_SHIFT 16 +#define TTBCR_T1SZ_MASK (0x7) + +#define TTBCR_SH0_NON_SHAREABLE (0x0 << 12) +#define TTBCR_SH0_OUTER_SHAREABLE (0x2 << 12) +#define TTBCR_SH0_INNER_SHAREABLE (0x3 << 12) + +#define TTBCR_RGN0_OUTER_NC (0x0 << 10) +#define TTBCR_RGN0_OUTER_WBA (0x1 << 10) +#define TTBCR_RGN0_OUTER_WT (0x2 << 10) +#define TTBCR_RGN0_OUTER_WBNA (0x3 << 10) + +#define TTBCR_RGN0_INNER_NC (0x0 << 8) +#define TTBCR_RGN0_INNER_WBA (0x1 << 8) +#define TTBCR_RGN0_INNER_WT (0x2 << 8) +#define TTBCR_RGN0_INNER_WBNA (0x3 << 8) + +#define TTBCR_EPD0_BIT (1 << 7) +#define TTBCR_T0SZ_SHIFT 0 +#define TTBCR_T0SZ_MASK (0x7) + +#define MODE_RW_SHIFT 0x4 +#define MODE_RW_MASK 0x1 +#define MODE_RW_32 0x1 + +#define MODE32_SHIFT 0 +#define MODE32_MASK 0x1f +#define MODE32_usr 0x10 +#define MODE32_fiq 0x11 +#define MODE32_irq 0x12 +#define MODE32_svc 0x13 +#define MODE32_mon 0x16 +#define MODE32_abt 0x17 +#define MODE32_hyp 0x1a +#define MODE32_und 0x1b +#define MODE32_sys 0x1f + +#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK) + +#define SPSR_MODE32(mode, isa, endian, aif) \ + (MODE_RW_32 << MODE_RW_SHIFT | \ + ((mode) & MODE32_MASK) << MODE32_SHIFT | \ + ((isa) & SPSR_T_MASK) << SPSR_T_SHIFT | \ + ((endian) & SPSR_E_MASK) << SPSR_E_SHIFT | \ + ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT) + +/* + * CTR definitions + */ +#define CTR_CWG_SHIFT 24 +#define CTR_CWG_MASK 0xf +#define CTR_ERG_SHIFT 20 +#define CTR_ERG_MASK 0xf +#define CTR_DMINLINE_SHIFT 16 +#define CTR_DMINLINE_WIDTH 4 +#define CTR_DMINLINE_MASK ((1 << 4) - 1) +#define CTR_L1IP_SHIFT 14 +#define CTR_L1IP_MASK 0x3 +#define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf + +#define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */ + +/******************************************************************************* + * Definitions of register offsets and fields in the CNTCTLBase Frame of the + * system level implementation of the Generic Timer. + ******************************************************************************/ +#define CNTNSAR 0x4 +#define CNTNSAR_NS_SHIFT(x) (x) + +#define CNTACR_BASE(x) (0x40 + ((x) << 2)) +#define CNTACR_RPCT_SHIFT 0x0 +#define CNTACR_RVCT_SHIFT 0x1 +#define CNTACR_RFRQ_SHIFT 0x2 +#define CNTACR_RVOFF_SHIFT 0x3 +#define CNTACR_RWVT_SHIFT 0x4 +#define CNTACR_RWPT_SHIFT 0x5 + +/* MAIR macros */ +#define MAIR0_ATTR_SET(attr, index) ((attr) << ((index) << 3)) +#define MAIR1_ATTR_SET(attr, index) ((attr) << (((index) - 3) << 3)) + +/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */ +#define SCR p15, 0, c1, c1, 0 +#define SCTLR p15, 0, c1, c0, 0 +#define MPIDR p15, 0, c0, c0, 5 +#define MIDR p15, 0, c0, c0, 0 +#define VBAR p15, 0, c12, c0, 0 +#define MVBAR p15, 0, c12, c0, 1 +#define NSACR p15, 0, c1, c1, 2 +#define CPACR p15, 0, c1, c0, 2 +#define DCCIMVAC p15, 0, c7, c14, 1 +#define DCCMVAC p15, 0, c7, c10, 1 +#define DCIMVAC p15, 0, c7, c6, 1 +#define DCCISW p15, 0, c7, c14, 2 +#define DCCSW p15, 0, c7, c10, 2 +#define DCISW p15, 0, c7, c6, 2 +#define CTR p15, 0, c0, c0, 1 +#define CNTFRQ p15, 0, c14, c0, 0 +#define ID_PFR1 p15, 0, c0, c1, 1 +#define MAIR0 p15, 0, c10, c2, 0 +#define MAIR1 p15, 0, c10, c2, 1 +#define TTBCR p15, 0, c2, c0, 2 +#define TTBR0 p15, 0, c2, c0, 0 +#define TTBR1 p15, 0, c2, c0, 1 +#define TLBIALL p15, 0, c8, c7, 0 +#define TLBIALLIS p15, 0, c8, c3, 0 +#define TLBIMVA p15, 0, c8, c7, 1 +#define TLBIMVAA p15, 0, c8, c7, 3 +#define HSCTLR p15, 4, c1, c0, 0 +#define HCR p15, 4, c1, c1, 0 +#define HCPTR p15, 4, c1, c1, 2 +#define CNTHCTL p15, 4, c14, c1, 0 +#define VPIDR p15, 4, c0, c0, 0 +#define VMPIDR p15, 4, c0, c0, 5 +#define ISR p15, 0, c12, c1, 0 +#define CLIDR p15, 1, c0, c0, 1 +#define CSSELR p15, 2, c0, c0, 0 +#define CCSIDR p15, 1, c0, c0, 0 + +/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */ +#define ICC_IAR1 p15, 0, c12, c12, 0 +#define ICC_IAR0 p15, 0, c12, c8, 0 +#define ICC_EOIR1 p15, 0, c12, c12, 1 +#define ICC_EOIR0 p15, 0, c12, c8, 1 +#define ICC_HPPIR1 p15, 0, c12, c12, 2 +#define ICC_HPPIR0 p15, 0, c12, c8, 2 +#define ICC_BPR1 p15, 0, c12, c12, 3 +#define ICC_BPR0 p15, 0, c12, c8, 3 +#define ICC_DIR p15, 0, c12, c11, 1 +#define ICC_PMR p15, 0, c4, c6, 0 +#define ICC_RPR p15, 0, c12, c11, 3 +#define ICC_CTLR p15, 0, c12, c12, 4 +#define ICC_MCTLR p15, 6, c12, c12, 4 +#define ICC_SRE p15, 0, c12, c12, 5 +#define ICC_HSRE p15, 4, c12, c9, 5 +#define ICC_MSRE p15, 6, c12, c12, 5 +#define ICC_IGRPEN0 p15, 0, c12, c12, 6 +#define ICC_IGRPEN1 p15, 0, c12, c12, 7 +#define ICC_MGRPEN1 p15, 6, c12, c12, 7 + +/* 64 bit system register defines The format is: coproc, opt1, CRm */ +#define TTBR0_64 p15, 0, c2 +#define TTBR1_64 p15, 1, c2 +#define CNTVOFF_64 p15, 4, c14 +#define VTTBR_64 p15, 6, c2 +#define CNTPCT_64 p15, 0, c14 + +/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */ +#define ICC_SGI1R_EL1_64 p15, 0, c12 +#define ICC_ASGI1R_EL1_64 p15, 1, c12 +#define ICC_SGI0R_EL1_64 p15, 2, c12 + +#endif /* __ARCH_H__ */ diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h new file mode 100644 index 00000000..ddf660b1 --- /dev/null +++ b/include/lib/aarch32/arch_helpers.h @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ARCH_HELPERS_H__ +#define __ARCH_HELPERS_H__ + +#include /* for additional register definitions */ +#include +#include + +/********************************************************************** + * Macros which create inline functions to read or write CPU system + * registers + *********************************************************************/ + +#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \ +static inline void write_## _name(u_register_t v) \ +{ \ + __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\ +} + +#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \ +static inline u_register_t read_ ## _name(void) \ +{ \ + u_register_t v; \ + __asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\ + return v; \ +} + +/* + * The undocumented %Q and %R extended asm are used to implemented the below + * 64 bit `mrrc` and `mcrr` instructions. It works only on Little Endian + * systems for GCC versions < 4.6. Above GCC 4.6, both Little Endian and + * Big Endian systems generate the right instruction encoding. + */ +#if !(__GNUC__ > (4) || __GNUC__ == (4) && __GNUC_MINOR__ >= (6)) +#error "GCC 4.6 or above is required to build AArch32 Trusted Firmware" +#endif + +#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm) \ +static inline void write64_## _name(uint64_t v) \ +{ \ + __asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\ +} + +#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm) \ +static inline uint64_t read64_## _name(void) \ +{ uint64_t v; \ + __asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\ + return v; \ +} + +#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \ +static inline u_register_t read_ ## _name(void) \ +{ \ + u_register_t v; \ + __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \ + return v; \ +} + +#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \ +static inline void write_ ## _name(u_register_t v) \ +{ \ + __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \ +} + +#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name) \ +static inline void write_ ## _name(const u_register_t v) \ +{ \ + __asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v)); \ +} + +/* Define read function for coproc register */ +#define DEFINE_COPROCR_READ_FUNC(_name, ...) \ + _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__) + +/* Define read & write function for coproc register */ +#define DEFINE_COPROCR_RW_FUNCS(_name, ...) \ + _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__) \ + _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__) + +/* Define 64 bit read function for coproc register */ +#define DEFINE_COPROCR_READ_FUNC_64(_name, ...) \ + _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__) + +/* Define 64 bit read & write function for coproc register */ +#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...) \ + _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__) \ + _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__) + +/* Define read & write function for system register */ +#define DEFINE_SYSREG_RW_FUNCS(_name) \ + _DEFINE_SYSREG_READ_FUNC(_name, _name) \ + _DEFINE_SYSREG_WRITE_FUNC(_name, _name) + +/********************************************************************** + * Macros to create inline functions for tlbi operations + *********************************************************************/ + +#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \ +static inline void tlbi##_op(void) \ +{ \ + u_register_t v = 0; \ + __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\ +} + +#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \ +static inline void tlbi##_op(u_register_t v) \ +{ \ + __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\ +} + +/* Define function for simple TLBI operation */ +#define DEFINE_TLBIOP_FUNC(_op, ...) \ + _DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__) + +/* Define function for TLBI operation with register parameter */ +#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...) \ + _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__) + +/********************************************************************** + * Macros to create inline functions for DC operations + *********************************************************************/ +#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \ +static inline void dc##_op(u_register_t v) \ +{ \ + __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\ +} + +/* Define function for DC operation with register parameter */ +#define DEFINE_DCOP_PARAM_FUNC(_op, ...) \ + _DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__) + +/********************************************************************** + * Macros to create inline functions for system instructions + *********************************************************************/ + /* Define function for simple system instruction */ +#define DEFINE_SYSOP_FUNC(_op) \ +static inline void _op(void) \ +{ \ + __asm__ (#_op); \ +} + + +/* Define function for system instruction with type specifier */ +#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \ +static inline void _op ## _type(void) \ +{ \ + __asm__ (#_op " " #_type); \ +} + +/* Define function for system instruction with register parameter */ +#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \ +static inline void _op ## _type(u_register_t v) \ +{ \ + __asm__ (#_op " " #_type ", %0" : : "r" (v)); \ +} + +void flush_dcache_range(uintptr_t addr, size_t size); +void clean_dcache_range(uintptr_t addr, size_t size); +void inv_dcache_range(uintptr_t addr, size_t size); + +DEFINE_SYSOP_FUNC(wfi) +DEFINE_SYSOP_FUNC(wfe) +DEFINE_SYSOP_FUNC(sev) +DEFINE_SYSOP_TYPE_FUNC(dsb, sy) +DEFINE_SYSOP_TYPE_FUNC(dmb, sy) +DEFINE_SYSOP_TYPE_FUNC(dsb, ish) +DEFINE_SYSOP_TYPE_FUNC(dmb, ish) +DEFINE_SYSOP_FUNC(isb) + +DEFINE_SYSREG_RW_FUNCS(spsr) +DEFINE_SYSREG_RW_FUNCS(cpsr) + +/******************************************************************************* + * System register accessor prototypes + ******************************************************************************/ +DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR) +DEFINE_COPROCR_READ_FUNC(midr, MIDR) +DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1) +DEFINE_COPROCR_READ_FUNC(isr, ISR) +DEFINE_COPROCR_READ_FUNC(clidr, CLIDR) +DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64) + +DEFINE_COPROCR_RW_FUNCS(scr, SCR) +DEFINE_COPROCR_RW_FUNCS(ctr, CTR) +DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR) +DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR) +DEFINE_COPROCR_RW_FUNCS(hcr, HCR) +DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR) +DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ) +DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL) +DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0) +DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1) +DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR) +DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0) +DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64) +DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1) +DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR) +DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR) +DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64) +DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64) +DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64) +DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR) + +DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE) +DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE) +DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE) +DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR) +DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1) +DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0) +DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0) +DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1) +DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0) +DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1) +DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0) +DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1) + +/* + * TLBI operation prototypes + */ +DEFINE_TLBIOP_FUNC(all, TLBIALL) +DEFINE_TLBIOP_FUNC(allis, TLBIALLIS) +DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA) +DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA) + +/* + * DC operation prototypes + */ +DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC) +DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC) +DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC) + +/* Previously defined accessor functions with incomplete register names */ +#define dsb() dsbsy() + +#define IS_IN_SECURE() \ + (GET_NS_BIT(read_scr()) == 0) + + /* + * If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3 + */ +#define IS_IN_EL3() \ + ((GET_M32(read_cpsr()) == MODE32_mon) || \ + (IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr))) + +/* Macros for compatibility with AArch64 system registers */ +#define read_mpidr_el1() read_mpidr() + +#define read_scr_el3() read_scr() +#define write_scr_el3(_v) write_scr(_v) + +#define read_hcr_el2() read_hcr() +#define write_hcr_el2(_v) write_hcr(_v) + +#define read_cpacr_el1() read_cpacr() +#define write_cpacr_el1(_v) write_cpacr(_v) + +#define read_cntfrq_el0() read_cntfrq() +#define write_cntfrq_el0(_v) write_cntfrq(_v) +#define read_isr_el1() read_isr() + +#define read_cntpct_el0() read64_cntpct() + +#endif /* __ARCH_HELPERS_H__ */ diff --git a/include/lib/stdlib/machine/_types.h b/include/lib/stdlib/machine/_types.h index 7e993c4c..fb1083b7 100644 --- a/include/lib/stdlib/machine/_types.h +++ b/include/lib/stdlib/machine/_types.h @@ -31,6 +31,10 @@ * From: @(#)types.h 8.3 (Berkeley) 1/5/94 * $FreeBSD$ */ +/* + * Portions copyright (c) 2016, ARM Limited and Contributors. + * All rights reserved. + */ #ifndef _MACHINE__TYPES_H_ #define _MACHINE__TYPES_H_ @@ -48,19 +52,56 @@ typedef short __int16_t; typedef unsigned short __uint16_t; typedef int __int32_t; typedef unsigned int __uint32_t; + + +/* + * Standard type definitions which are different in AArch64 and AArch32 + */ +#ifdef AARCH32 +typedef long long __int64_t; +typedef unsigned long long __uint64_t; +typedef __int32_t __critical_t; +typedef __int32_t __intfptr_t; +typedef __int32_t __intptr_t; +typedef __int32_t __ptrdiff_t; /* ptr1 - ptr2 */ +typedef __int32_t __register_t; +typedef __int32_t __segsz_t; /* segment size (in pages) */ +typedef __uint32_t __size_t; /* sizeof() */ +typedef __int32_t __ssize_t; /* byte count or error */ +typedef __uint32_t __uintfptr_t; +typedef __uint32_t __uintptr_t; +typedef __uint32_t __u_register_t; +typedef __uint32_t __vm_offset_t; +typedef __uint32_t __vm_paddr_t; +typedef __uint32_t __vm_size_t; +#elif defined AARCH64 typedef long __int64_t; typedef unsigned long __uint64_t; +typedef __int64_t __critical_t; +typedef __int64_t __intfptr_t; +typedef __int64_t __intptr_t; +typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */ +typedef __int64_t __register_t; +typedef __int64_t __segsz_t; /* segment size (in pages) */ +typedef __uint64_t __size_t; /* sizeof() */ +typedef __int64_t __ssize_t; /* byte count or error */ +typedef __uint64_t __uintfptr_t; +typedef __uint64_t __uintptr_t; +typedef __uint64_t __u_register_t; +typedef __uint64_t __vm_offset_t; +typedef __uint64_t __vm_paddr_t; +typedef __uint64_t __vm_size_t; +#else +#error "Only AArch32 or AArch64 supported" +#endif /* AARCH32 */ /* * Standard type definitions. */ typedef __int32_t __clock_t; /* clock()... */ -typedef __int64_t __critical_t; typedef double __double_t; typedef float __float_t; -typedef __int64_t __intfptr_t; typedef __int64_t __intmax_t; -typedef __int64_t __intptr_t; typedef __int32_t __int_fast8_t; typedef __int32_t __int_fast16_t; typedef __int32_t __int_fast32_t; @@ -69,15 +110,8 @@ typedef __int8_t __int_least8_t; typedef __int16_t __int_least16_t; typedef __int32_t __int_least32_t; typedef __int64_t __int_least64_t; -typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */ -typedef __int64_t __register_t; -typedef __int64_t __segsz_t; /* segment size (in pages) */ -typedef __uint64_t __size_t; /* sizeof() */ -typedef __int64_t __ssize_t; /* byte count or error */ typedef __int64_t __time_t; /* time()... */ -typedef __uint64_t __uintfptr_t; typedef __uint64_t __uintmax_t; -typedef __uint64_t __uintptr_t; typedef __uint32_t __uint_fast8_t; typedef __uint32_t __uint_fast16_t; typedef __uint32_t __uint_fast32_t; @@ -86,12 +120,8 @@ typedef __uint8_t __uint_least8_t; typedef __uint16_t __uint_least16_t; typedef __uint32_t __uint_least32_t; typedef __uint64_t __uint_least64_t; -typedef __uint64_t __u_register_t; -typedef __uint64_t __vm_offset_t; typedef __int64_t __vm_ooffset_t; -typedef __uint64_t __vm_paddr_t; typedef __uint64_t __vm_pindex_t; -typedef __uint64_t __vm_size_t; /* * Unusual type definitions. -- cgit From f24307dec43332f2846bf18197ec7d113386c220 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 12:31:57 +0100 Subject: AArch32: Add assembly helpers This patch adds various assembly helpers for AArch32 like : * cache management : Functions to flush, invalidate and clean cache by MVA. Also helpers to do cache operations by set-way are also added. * stack management: Macros to declare stack and get the current stack corresponding to current CPU. * Misc: Macros to access co processor registers in AArch32, macros to define functions in assembly, assert macros, generic `do_panic()` implementation and function to zero block of memory. Change-Id: I7b78ca3f922c0eda39beb9786b7150e9193425be --- include/common/aarch32/asm_macros.S | 95 ++++++++++++++++++++++++++++++++++ include/common/aarch32/assert_macros.S | 50 ++++++++++++++++++ include/common/asm_macros_common.S | 2 - 3 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 include/common/aarch32/asm_macros.S create mode 100644 include/common/aarch32/assert_macros.S (limited to 'include') diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S new file mode 100644 index 00000000..11e45bbf --- /dev/null +++ b/include/common/aarch32/asm_macros.S @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASM_MACROS_S__ +#define __ASM_MACROS_S__ + +#include +#include + +#define WORD_SIZE 4 + + /* + * Co processor register accessors + */ + .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2 + mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2 + .endm + + .macro ldcopr16 reg1, reg2, coproc, opc1, CRm + mrrc \coproc, \opc1, \reg1, \reg2, \CRm + .endm + + .macro stcopr reg, coproc, opc1, CRn, CRm, opc2 + mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2 + .endm + + .macro stcopr16 reg1, reg2, coproc, opc1, CRm + mcrr \coproc, \opc1, \reg1, \reg2, \CRm + .endm + + /* Cache line size helpers */ + .macro dcache_line_size reg, tmp + ldcopr \tmp, CTR + ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH + mov \reg, #WORD_SIZE + lsl \reg, \reg, \tmp + .endm + + .macro icache_line_size reg, tmp + ldcopr \tmp, CTR + and \tmp, \tmp, #CTR_IMINLINE_MASK + mov \reg, #WORD_SIZE + lsl \reg, \reg, \tmp + .endm + + /* + * This macro calculates the base address of the current CPU's multi + * processor(MP) stack using the plat_my_core_pos() index, the name of + * the stack storage and the size of each stack. + * Out: r0 = physical address of stack base + * Clobber: r14, r1, r2 + */ + .macro get_my_mp_stack _name, _size + bl plat_my_core_pos + ldr r2, =(\_name + \_size) + mov r1, #\_size + mla r0, r0, r1, r2 + .endm + + /* + * This macro calculates the base address of a uniprocessor(UP) stack + * using the name of the stack storage and the size of the stack + * Out: r0 = physical address of stack base + */ + .macro get_up_stack _name, _size + ldr r0, =(\_name + \_size) + .endm + +#endif /* __ASM_MACROS_S__ */ diff --git a/include/common/aarch32/assert_macros.S b/include/common/aarch32/assert_macros.S new file mode 100644 index 00000000..f35fc6af --- /dev/null +++ b/include/common/aarch32/assert_macros.S @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASSERT_MACROS_S__ +#define __ASSERT_MACROS_S__ + + /* + * Assembler macro to enable asm_assert. We assume that the stack is + * initialized prior to invoking this macro. + */ +#define ASM_ASSERT(_cc) \ +.ifndef .L_assert_filename ;\ + .pushsection .rodata.str1.1, "aS" ;\ + .L_assert_filename: ;\ + .string __FILE__ ;\ + .popsection ;\ +.endif ;\ + b##_cc 300f ;\ + ldr r0, =.L_assert_filename ;\ + mov r1, #__LINE__ ;\ + b . ;\ +300: + +#endif /* __ASSERT_MACROS_S__ */ diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S index ee59a939..023124b3 100644 --- a/include/common/asm_macros_common.S +++ b/include/common/asm_macros_common.S @@ -30,8 +30,6 @@ #ifndef __ASM_MACROS_COMMON_S__ #define __ASM_MACROS_COMMON_S__ -#include - /* * This macro is used to create a function label and place the * code into a separate text section based on the function name -- cgit From b2bca61da51b22cfba303cf389199b9d9d06be4c Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 30 Jun 2016 15:11:07 +0100 Subject: AArch32: Add translation table library support This patch adds translation library supports for AArch32 platforms. The library only supports long descriptor formats for AArch32. The `enable_mmu_secure()` enables the MMU for secure world with `TTBR0` pointing to the populated translation tables. Change-Id: I061345b1779391d098e35e7fe0c76e3ebf850e08 --- include/lib/xlat_tables.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/lib/xlat_tables.h b/include/lib/xlat_tables.h index b51a1de5..3f35e454 100644 --- a/include/lib/xlat_tables.h +++ b/include/lib/xlat_tables.h @@ -188,9 +188,14 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, unsigned int attr); void mmap_add(const mmap_region_t *mm); +#ifdef AARCH32 +/* AArch32 specific translation table API */ +void enable_mmu_secure(uint32_t flags); +#else /* AArch64 specific translation table APIs */ void enable_mmu_el1(unsigned int flags); void enable_mmu_el3(unsigned int flags); +#endif /* AARCH32 */ #endif /*__ASSEMBLY__*/ #endif /* __XLAT_TABLES_H__ */ -- cgit From 1ae0a49a37b0c0e9f54a488f41b2d24eadae16ea Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 12:49:09 +0100 Subject: AArch32: Add API to invoke runtime service handler This patch adds an API in runtime service framework to invoke the registered handler corresponding to the SMC function identifier. This is helpful for AArch32 because the number of arguments required by the handler is more than registers available as per AArch32 program calling conventions and requires the use of stack. Hence this new API will do the necessary argument setup and invoke the appropriate handler. Although this API is primarily intended for AArch32, it can be used for AArch64 as well. Change-Id: Iefa15947fe5a1df55b0859886e677446a0fd7241 --- include/common/runtime_svc.h | 21 ++++++++++++++++++++- include/lib/aarch64/smcc_helpers.h | 12 ++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h index adafcee4..514f334a 100644 --- a/include/common/runtime_svc.h +++ b/include/common/runtime_svc.h @@ -43,10 +43,17 @@ * Constants to allow the assembler access a runtime service * descriptor */ +#ifdef AARCH32 +#define RT_SVC_SIZE_LOG2 4 +#define RT_SVC_DESC_INIT 8 +#define RT_SVC_DESC_HANDLE 12 +#else #define RT_SVC_SIZE_LOG2 5 -#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2) #define RT_SVC_DESC_INIT 16 #define RT_SVC_DESC_HANDLE 24 +#endif /* AARCH32 */ +#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2) + /* * The function identifier has 6 bits for the owning entity number and @@ -123,10 +130,22 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \ ((call_type & FUNCID_TYPE_MASK) \ << FUNCID_OEN_WIDTH)) +/* + * This macro generates the unique owning entity number from the SMC Function + * ID. This unique oen is used to access an entry in the + * 'rt_svc_descs_indices' array to invoke the corresponding runtime service + * handler during SMC handling. + */ +#define get_unique_oen_from_smc_fid(fid) \ + get_unique_oen(((fid) >> FUNCID_OEN_SHIFT), \ + ((fid) >> FUNCID_TYPE_SHIFT)) + /******************************************************************************* * Function & variable prototypes ******************************************************************************/ void runtime_svc_init(void); +uintptr_t handle_runtime_svc(uint32_t smc_fid, void *cookie, void *handle, + unsigned int flags); extern uintptr_t __RT_SVC_DESCS_START__; extern uintptr_t __RT_SVC_DESCS_END__; void init_crash_reporting(void); diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h index 617a5bce..6e633839 100644 --- a/include/lib/aarch64/smcc_helpers.h +++ b/include/lib/aarch64/smcc_helpers.h @@ -82,5 +82,17 @@ ((const uint32_t *) &(_uuid))[2], \ ((const uint32_t *) &(_uuid))[3]) +/* + * Helper macro to retrieve the SMC parameters from cpu_context_t. + */ +#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4) \ + do { \ + const gp_regs_t *regs = get_gpregs_ctx(_hdl); \ + _x1 = read_ctx_reg(regs, CTX_GPREG_X1); \ + _x2 = read_ctx_reg(regs, CTX_GPREG_X2); \ + _x3 = read_ctx_reg(regs, CTX_GPREG_X3); \ + _x4 = read_ctx_reg(regs, CTX_GPREG_X4); \ + } while (0) + #endif /*__ASSEMBLY__*/ #endif /* __SMCC_HELPERS_H__ */ -- cgit From 3e3616ab216df37d610bbd5d2d7aee662bdc717b Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 12:53:53 +0100 Subject: AArch32: Add SMCC context This patch defines a SMCC context to save and restore registers during a SMC call. It also adds appropriate helpers to save and restore from this context for use by AArch32 secure payload and BL stages. Change-Id: I64c8d6fe1d6cac22e1f1f39ea1b54ee1b1b72248 --- include/lib/aarch32/smcc_helpers.h | 171 +++++++++++++++++++++++++++++++++++++ include/lib/aarch32/smcc_macros.S | 118 +++++++++++++++++++++++++ 2 files changed, 289 insertions(+) create mode 100644 include/lib/aarch32/smcc_helpers.h create mode 100644 include/lib/aarch32/smcc_macros.S (limited to 'include') diff --git a/include/lib/aarch32/smcc_helpers.h b/include/lib/aarch32/smcc_helpers.h new file mode 100644 index 00000000..5aeca223 --- /dev/null +++ b/include/lib/aarch32/smcc_helpers.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SMCC_HELPERS_H__ +#define __SMCC_HELPERS_H__ + +#include + +/* These are offsets to registers in smc_ctx_t */ +#define SMC_CTX_GPREG_R0 0x0 +#define SMC_CTX_GPREG_R1 0x4 +#define SMC_CTX_GPREG_R2 0x8 +#define SMC_CTX_GPREG_R3 0xC +#define SMC_CTX_GPREG_R4 0x10 +#define SMC_CTX_SP_USR 0x34 +#define SMC_CTX_SPSR_MON 0x78 +#define SMC_CTX_LR_MON 0x7C +#define SMC_CTX_SIZE 0x80 + +#ifndef __ASSEMBLY__ +#include +#include + +/* + * The generic structure to save arguments and callee saved registers during + * an SMC. Also this structure is used to store the result return values after + * the completion of SMC service. + */ +typedef struct smc_ctx { + u_register_t r0; + u_register_t r1; + u_register_t r2; + u_register_t r3; + u_register_t r4; + u_register_t r5; + u_register_t r6; + u_register_t r7; + u_register_t r8; + u_register_t r9; + u_register_t r10; + u_register_t r11; + u_register_t r12; + /* spsr_usr doesn't exist */ + u_register_t sp_usr; + u_register_t lr_usr; + u_register_t spsr_irq; + u_register_t sp_irq; + u_register_t lr_irq; + u_register_t spsr_fiq; + u_register_t sp_fiq; + u_register_t lr_fiq; + u_register_t spsr_svc; + u_register_t sp_svc; + u_register_t lr_svc; + u_register_t spsr_abt; + u_register_t sp_abt; + u_register_t lr_abt; + u_register_t spsr_und; + u_register_t sp_und; + u_register_t lr_und; + u_register_t spsr_mon; + /* No need to save 'sp_mon' because we are already in monitor mode */ + u_register_t lr_mon; +} smc_ctx_t; + +/* + * Compile time assertions related to the 'smc_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \ + assert_smc_ctx_greg_r0_offset_mismatch); +CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \ + assert_smc_ctx_greg_r1_offset_mismatch); +CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \ + assert_smc_ctx_greg_r2_offset_mismatch); +CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \ + assert_smc_ctx_greg_r3_offset_mismatch); +CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \ + assert_smc_ctx_greg_r4_offset_mismatch); +CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \ + assert_smc_ctx_sp_usr_offset_mismatch); +CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \ + assert_smc_ctx_lr_mon_offset_mismatch); +CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \ + assert_smc_ctx_spsr_mon_offset_mismatch); + +CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch); + +/* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uintptr_t)(_h); \ +} +#define SMC_RET1(_h, _r0) { \ + ((smc_ctx_t *)(_h))->r0 = (_r0); \ + SMC_RET0(_h); \ +} +#define SMC_RET2(_h, _r0, _r1) { \ + ((smc_ctx_t *)(_h))->r1 = (_r1); \ + SMC_RET1(_h, (_r0)); \ +} +#define SMC_RET3(_h, _r0, _r1, _r2) { \ + ((smc_ctx_t *)(_h))->r2 = (_r2); \ + SMC_RET2(_h, (_r0), (_r1)); \ +} +#define SMC_RET4(_h, _r0, _r1, _r2, _r3) { \ + ((smc_ctx_t *)(_h))->r3 = (_r3); \ + SMC_RET3(_h, (_r0), (_r1), (_r2)); \ +} + +/* Return a UUID in the SMC return registers */ +#define SMC_UUID_RET(_h, _uuid) \ + SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ + ((const uint32_t *) &(_uuid))[1], \ + ((const uint32_t *) &(_uuid))[2], \ + ((const uint32_t *) &(_uuid))[3]) + +/* + * Helper macro to retrieve the SMC parameters from smc_ctx_t. + */ +#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) { \ + _r1 = ((smc_ctx_t *)_hdl)->r1; \ + _r2 = ((smc_ctx_t *)_hdl)->r2; \ + _r3 = ((smc_ctx_t *)_hdl)->r3; \ + _r4 = ((smc_ctx_t *)_hdl)->r4; \ + } + +/* ------------------------------------------------------------------------ + * Helper APIs for setting and retrieving appropriate `smc_ctx_t`. + * These functions need to implemented by the BL including this library. + * ------------------------------------------------------------------------ + */ + +/* Get the pointer to `smc_ctx_t` corresponding to the security state. */ +void *smc_get_ctx(int security_state); + +/* Set the next `smc_ctx_t` corresponding to the security state. */ +void smc_set_next_ctx(int security_state); + +/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */ +void *smc_get_next_ctx(void); + +#endif /*__ASSEMBLY__*/ +#endif /* __SMCC_HELPERS_H__ */ diff --git a/include/lib/aarch32/smcc_macros.S b/include/lib/aarch32/smcc_macros.S new file mode 100644 index 00000000..c80c3e47 --- /dev/null +++ b/include/lib/aarch32/smcc_macros.S @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __SMCC_MACROS_S__ +#define __SMCC_MACROS_S__ + +#include + +/* + * Macro to save the General purpose registers including the banked + * registers to the SMC context on entry due a SMC call. On return, r0 + * contains the pointer to the `smc_context_t`. + */ + .macro smcc_save_gp_mode_regs + push {r0-r3, lr} + + ldcopr r0, SCR + and r0, r0, #SCR_NS_BIT + bl smc_get_ctx + + /* Save r4 - r12 in the SMC context */ + add r1, r0, #SMC_CTX_GPREG_R4 + stm r1!, {r4-r12} + + /* + * Pop r0 - r3, lr to r4 - r7, lr from stack and then save + * it to SMC context. + */ + pop {r4-r7, lr} + stm r0, {r4-r7} + + /* Save the banked registers including the current SPSR and LR */ + mrs r4, sp_usr + mrs r5, lr_usr + mrs r6, spsr_irq + mrs r7, sp_irq + mrs r8, lr_irq + mrs r9, spsr_fiq + mrs r10, sp_fiq + mrs r11, lr_fiq + mrs r12, spsr_svc + stm r1!, {r4-r12} + + mrs r4, sp_svc + mrs r5, lr_svc + mrs r6, spsr_abt + mrs r7, sp_abt + mrs r8, lr_abt + mrs r9, spsr_und + mrs r10, sp_und + mrs r11, lr_und + mrs r12, spsr + stm r1!, {r4-r12, lr} + + .endm + +/* + * Macro to restore the General purpose registers including the banked + * registers from the SMC context prior to exit from the SMC call. + * r0 must point to the `smc_context_t` to restore from. + */ + .macro smcc_restore_gp_mode_regs + + /* Restore the banked registers including the current SPSR and LR */ + add r1, r0, #SMC_CTX_SP_USR + ldm r1!, {r4-r12} + msr sp_usr, r4 + msr lr_usr, r5 + msr spsr_irq, r6 + msr sp_irq, r7 + msr lr_irq, r8 + msr spsr_fiq, r9 + msr sp_fiq, r10 + msr lr_fiq, r11 + msr spsr_svc, r12 + + ldm r1!, {r4-r12, lr} + msr sp_svc, r4 + msr lr_svc, r5 + msr spsr_abt, r6 + msr sp_abt, r7 + msr lr_abt, r8 + msr spsr_und, r9 + msr sp_und, r10 + msr lr_und, r11 + msr spsr, r12 + + /* Restore the rest of the general purpose registers */ + ldm r0, {r0-r12} + .endm + +#endif /* __SMCC_MACROS_S__ */ -- cgit From e33b78a658bd54a815c780e17c2d0073db6f59db Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 14:10:46 +0100 Subject: AArch32: Add support in TF libraries This patch adds AArch32 support to cpu ops, context management, per-cpu data and spinlock libraries. The `entrypoint_info` structure is modified to add support for AArch32 register arguments. The CPU operations for AEM generic cpu in AArch32 mode is also added. Change-Id: I1e52e79f498661d8f31f1e7b3a29e222bc7a4483 --- include/common/bl_common.h | 15 +++++ include/lib/cpus/aarch32/aem_generic.h | 37 +++++++++++++ include/lib/cpus/aarch32/cpu_macros.S | 72 ++++++++++++++++++++++++ include/lib/el3_runtime/aarch32/context.h | 91 +++++++++++++++++++++++++++++++ include/lib/el3_runtime/context_mgmt.h | 20 ++++--- include/lib/el3_runtime/cpu_data.h | 25 +++++++-- 6 files changed, 249 insertions(+), 11 deletions(-) create mode 100644 include/lib/cpus/aarch32/aem_generic.h create mode 100644 include/lib/cpus/aarch32/cpu_macros.S create mode 100644 include/lib/el3_runtime/aarch32/context.h (limited to 'include') diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 3aa08360..942843cf 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -50,7 +50,11 @@ * 'entry_point_info' structure at their correct offsets. ******************************************************************************/ #define ENTRY_POINT_INFO_PC_OFFSET 0x08 +#ifdef AARCH32 +#define ENTRY_POINT_INFO_ARGS_OFFSET 0x10 +#else #define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 +#endif /* The following are used to set/get image attributes. */ #define PARAM_EP_SECURITY_MASK (0x1) @@ -192,6 +196,13 @@ typedef struct aapcs64_params { u_register_t arg7; } aapcs64_params_t; +typedef struct aapcs32_params { + u_register_t arg0; + u_register_t arg1; + u_register_t arg2; + u_register_t arg3; +} aapcs32_params_t; + /*************************************************************************** * This structure provides version information and the size of the * structure, attributes for the structure it represents @@ -216,7 +227,11 @@ typedef struct entry_point_info { param_header_t h; uintptr_t pc; uint32_t spsr; +#ifdef AARCH32 + aapcs32_params_t args; +#else aapcs64_params_t args; +#endif } entry_point_info_t; /***************************************************************************** diff --git a/include/lib/cpus/aarch32/aem_generic.h b/include/lib/cpus/aarch32/aem_generic.h new file mode 100644 index 00000000..9b313677 --- /dev/null +++ b/include/lib/cpus/aarch32/aem_generic.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __AEM_GENERIC_H__ +#define __AEM_GENERIC_H__ + +/* BASE AEM midr for revision 0 */ +#define BASE_AEM_MIDR 0x410FD0F0 + +#endif /* __AEM_GENERIC_H__ */ diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S new file mode 100644 index 00000000..f58f3e94 --- /dev/null +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __CPU_MACROS_S__ +#define __CPU_MACROS_S__ + +#include + +#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \ + (MIDR_PN_MASK << MIDR_PN_SHIFT) + + /* + * Define the offsets to the fields in cpu_ops structure. + */ + .struct 0 +CPU_MIDR: /* cpu_ops midr */ + .space 4 +/* Reset fn is needed during reset */ +CPU_RESET_FUNC: /* cpu_ops reset_func */ + .space 4 +CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */ + .space 4 +CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */ + .space 4 +CPU_OPS_SIZE = . + + /* + * Convenience macro to declare cpu_ops structure. + * Make sure the structure fields are as per the offsets + * defined above. + */ + .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0 + .section cpu_ops, "a" + .align 2 + .type cpu_ops_\_name, %object + .word \_midr + .if \_noresetfunc + .word 0 + .else + .word \_name\()_reset_func + .endif + .word \_name\()_core_pwr_dwn + .word \_name\()_cluster_pwr_dwn + .endm + +#endif /* __CPU_MACROS_S__ */ diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h new file mode 100644 index 00000000..51081415 --- /dev/null +++ b/include/lib/el3_runtime/aarch32/context.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_REGS_OFFSET 0x0 +#define CTX_GPREG_R0 0x0 +#define CTX_GPREG_R1 0x4 +#define CTX_GPREG_R2 0x8 +#define CTX_GPREG_R3 0xC +#define CTX_LR 0x10 +#define CTX_SCR 0x14 +#define CTX_SPSR 0x18 +#define CTX_NS_SCTLR 0x1C +#define CTX_REGS_END 0x20 + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Common constants to help define the 'cpu_context' structure and its + * members below. + */ +#define WORD_SHIFT 2 +#define DEFINE_REG_STRUCT(name, num_regs) \ + typedef struct name { \ + uint32_t _regs[num_regs]; \ + } __aligned(8) name##_t + +/* Constants to determine the size of individual context structures */ +#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT) + +DEFINE_REG_STRUCT(regs, CTX_REG_ALL); + +#undef CTX_REG_ALL + +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> WORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> WORD_SHIFT]) \ + = val) +typedef struct cpu_context { + regs_t regs_ctx; +} cpu_context_t; + +/* Macros to access members of the 'cpu_context_t' structure */ +#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx) + +/* + * Compile time assertions related to the 'cpu_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \ + assert_core_context_regs_offset_mismatch); + +#endif /* __ASSEMBLY__ */ + +#endif /* __CONTEXT_H__ */ diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index 672ea11e..b264fc32 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -42,11 +42,6 @@ struct entry_point_info; * Function & variable prototypes ******************************************************************************/ void cm_init(void); -void *cm_get_context_by_mpidr(uint64_t mpidr, - uint32_t security_state) __deprecated; -void cm_set_context_by_mpidr(uint64_t mpidr, - void *context, - uint32_t security_state) __deprecated; void *cm_get_context_by_index(unsigned int cpu_idx, unsigned int security_state); void cm_set_context_by_index(unsigned int cpu_idx, @@ -54,12 +49,12 @@ void cm_set_context_by_index(unsigned int cpu_idx, unsigned int security_state); void *cm_get_context(uint32_t security_state); void cm_set_context(void *context, uint32_t security_state); -void cm_init_context(uint64_t mpidr, - const struct entry_point_info *ep) __deprecated; void cm_init_my_context(const struct entry_point_info *ep); void cm_init_context_by_index(unsigned int cpu_idx, const struct entry_point_info *ep); void cm_prepare_el3_exit(uint32_t security_state); + +#ifndef AARCH32 void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state); void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); @@ -71,6 +66,16 @@ void cm_write_scr_el3_bit(uint32_t security_state, void cm_set_next_eret_context(uint32_t security_state); uint32_t cm_get_scr_el3(uint32_t security_state); + +void cm_init_context(uint64_t mpidr, + const struct entry_point_info *ep) __deprecated; + +void *cm_get_context_by_mpidr(uint64_t mpidr, + uint32_t security_state) __deprecated; +void cm_set_context_by_mpidr(uint64_t mpidr, + void *context, + uint32_t security_state) __deprecated; + /* Inline definitions */ /******************************************************************************* @@ -98,4 +103,5 @@ static inline void cm_set_next_context(void *context) "msr spsel, #0\n" : : "r" (context)); } +#endif /* AARCH32 */ #endif /* __CM_H__ */ diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h index 4fc801bf..910b1534 100644 --- a/include/lib/el3_runtime/cpu_data.h +++ b/include/lib/el3_runtime/cpu_data.h @@ -31,16 +31,28 @@ #ifndef __CPU_DATA_H__ #define __CPU_DATA_H__ +#ifdef AARCH32 + +#if CRASH_REPORTING +#error "Crash reporting is not supported in AArch32" +#endif +#define CPU_DATA_CPU_OPS_PTR 0x0 + +#else /* AARCH32 */ + /* Offsets for the cpu_data structure */ #define CPU_DATA_CRASH_BUF_OFFSET 0x18 +/* need enough space in crash buffer to save 8 registers */ +#define CPU_DATA_CRASH_BUF_SIZE 64 +#define CPU_DATA_CPU_OPS_PTR 0x10 + +#endif /* AARCH32 */ + #if CRASH_REPORTING #define CPU_DATA_LOG2SIZE 7 #else #define CPU_DATA_LOG2SIZE 6 #endif -/* need enough space in crash buffer to save 8 registers */ -#define CPU_DATA_CRASH_BUF_SIZE 64 -#define CPU_DATA_CPU_OPS_PTR 0x10 #ifndef __ASSEMBLY__ @@ -77,7 +89,9 @@ * used for this. ******************************************************************************/ typedef struct cpu_data { +#ifndef AARCH32 void *cpu_context[2]; +#endif uintptr_t cpu_ops_ptr; #if CRASH_REPORTING u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; @@ -104,12 +118,15 @@ CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); +#ifndef AARCH32 /* Return the cpu_data structure for the current CPU. */ static inline struct cpu_data *_cpu_data(void) { return (cpu_data_t *)read_tpidr_el3(); } - +#else +struct cpu_data *_cpu_data(void); +#endif /************************************************************************** * APIs for initialising and accessing per-cpu data -- cgit From 727e5238fa3e9220d6a2718fab3b1df22af1dc61 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 14:11:23 +0100 Subject: AArch32: Add support to PSCI lib This patch adds AArch32 support to PSCI library, as follows : * The `psci_helpers.S` is implemented for AArch32. * AArch32 version of internal helper function `psci_get_ns_ep_info()` is defined. * The PSCI Library is responsible for the Non Secure context initialization. Hence a library interface `psci_prepare_next_non_secure_ctx()` is introduced to enable EL3 runtime firmware to initialize the non secure context without invoking context management library APIs. Change-Id: I25595b0cc2dbfdf39dbf7c589b875cba33317b9d --- include/lib/psci/psci.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h index c3e9ef7e..a583fef7 100644 --- a/include/lib/psci/psci.h +++ b/include/lib/psci/psci.h @@ -359,6 +359,8 @@ u_register_t psci_smc_handler(uint32_t smc_fid, int psci_setup(uintptr_t mailbox_ep); void psci_warmboot_entrypoint(void); void psci_register_spd_pm_hook(const spd_pm_ops_t *pm); +void psci_prepare_next_non_secure_ctx( + struct entry_point_info *next_image_info); #endif /*__ASSEMBLY__*/ -- cgit From c11ba852b970f2a125442da26d907c0842f09a25 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Thu, 5 May 2016 14:32:05 +0100 Subject: AArch32: add a minimal secure payload (SP_MIN) This patch adds a minimal AArch32 secure payload SP_MIN. It relies on PSCI library to initialize the normal world context. It runs in Monitor mode and uses the runtime service framework to handle SMCs. It is added as a BL32 component in the Trusted Firmware source tree. Change-Id: Icc04fa6b242025a769c1f6c7022fde19459c43e9 --- include/bl32/sp_min/platform_sp_min.h | 42 +++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 include/bl32/sp_min/platform_sp_min.h (limited to 'include') diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h new file mode 100644 index 00000000..ae9dd58a --- /dev/null +++ b/include/bl32/sp_min/platform_sp_min.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PLATFORM_SP_MIN_H__ +#define __PLATFORM_SP_MIN_H__ + +/******************************************************************************* + * Mandatory SP_MIN functions + ******************************************************************************/ +void sp_min_early_platform_setup(void); +void sp_min_plat_arch_setup(void); +void sp_min_platform_setup(void); +entry_point_info_t *sp_min_plat_get_bl33_ep_info(void); + +#endif /* __PLATFORM_SP_MIN_H__ */ -- cgit From 877cf3ff12fc6b71ea44e2a4bad2b9303298433c Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Mon, 11 Jul 2016 14:13:56 +0100 Subject: AArch32: Add essential ARM platform and FVP support This patch adds AArch32 support for FVP and implements common platform APIs like `plat_get_my_stack`, `plat_set_my_stack`, `plat_my_core_cos` for AArch32. Only Multi Processor(MP) implementations of these functions are considered in this patch. The ARM Standard platform layer helpers are implemented for AArch32 and the common makefiles are modified to cater for both AArch64 and AArch32 builds. Compatibility with the deprecated platform API is not supported for AArch32. Change-Id: Iad228400613eec91abf731b49e21a15bcf2833ea --- include/plat/arm/common/arm_def.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h index 0b3e66b1..4a4dfd40 100644 --- a/include/plat/arm/common/arm_def.h +++ b/include/plat/arm/common/arm_def.h @@ -321,9 +321,12 @@ # error "Unsupported ARM_TSP_RAM_LOCATION_ID value" #endif +/* BL32 is mandatory in AArch32 */ +#ifndef AARCH32 #ifdef SPD_none #undef BL32_BASE #endif /* SPD_none */ +#endif /******************************************************************************* * FWU Images: NS_BL1U, BL2U & NS_BL2U defines. -- cgit From 181bbd41fbcd65d4c75cf572ab4007b0c16ffddb Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Mon, 11 Jul 2016 14:15:27 +0100 Subject: AArch32: Add FVP support for SP_MIN This patch implements the support for SP_MIN in FVP. The SP_MIN platform APIs are implemented and the required makefile support is added for FVP. Change-Id: Id50bd6093eccbd5e38894e3fd2b20d5baeac5452 --- include/plat/arm/common/plat_arm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index 0ffdb5c0..25aab249 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -167,6 +167,9 @@ void arm_bl31_plat_arch_setup(void); /* TSP utility functions */ void arm_tsp_early_platform_setup(void); +/* SP_MIN utility functions */ +void arm_sp_min_early_platform_setup(void); + /* FIP TOC validity check */ int arm_io_is_toc_valid(void); -- cgit