From 7260022636e3b0d3ef641cbda135d98f9a7df177 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Mon, 12 Sep 2016 16:08:41 +0100 Subject: Add new version of image loading. This patch adds capability to load BL images based on image descriptors instead of hard coded way of loading BL images. This framework is designed such that it can be readily adapted by any BL stage that needs to load images. In order to provide the above capability the following new platform functions are introduced: bl_load_info_t *plat_get_bl_image_load_info(void); This function returns pointer to the list of images that the platform has populated to load. bl_params_t *plat_get_next_bl_params(void); This function returns a pointer to the shared memory that the platform has kept aside to pass trusted firmware related information that next BL image needs. void plat_flush_next_bl_params(void); This function flushes to main memory all the params that are passed to next image. int bl2_plat_handle_post_image_load(unsigned int image_id) This function can be used by the platforms to update/use image information for given `image_id`. `desc_image_load.c` contains utility functions which can be used by the platforms to generate, load and executable, image list based on the registered image descriptors. This patch also adds new version of `load_image/load_auth_image` functions in-order to achieve the above capability. Following are the changes for the new version as compared to old: - Refactor the signature and only keep image_id and image_info_t arguments. Removed image_base argument as it is already passed through image_info_t. Given that the BL image base addresses and limit/size are already provided by the platforms, the meminfo_t and entry_point_info arguments are not needed to provide/reserve the extent of free memory for the given BL image. - Added check for the image size against the defined max size. This is needed because the image size could come from an unauthenticated source (e.g. the FIP header). To make this check, new member is added to the image_info_t struct for identifying the image maximum size. New flag `LOAD_IMAGE_V2` is added in the Makefile. Default value is 0. NOTE: `TRUSTED_BOARD_BOOT` is currently not supported when `LOAD_IMAGE_V2` is enabled. Change-Id: Ia7b643f4817a170d5a2fbf479b9bc12e63112e79 --- Makefile | 12 ++- common/bl_common.c | 166 +++++++++++++++++++++++++++-- common/desc_image_load.c | 220 +++++++++++++++++++++++++++++++++++++++ docs/porting-guide.md | 44 +++++++- docs/user-guide.md | 6 ++ include/common/bl_common.h | 70 ++++++++++++- include/common/desc_image_load.h | 63 +++++++++++ include/plat/common/platform.h | 43 ++++++++ 8 files changed, 609 insertions(+), 15 deletions(-) create mode 100644 common/desc_image_load.c create mode 100644 include/common/desc_image_load.h diff --git a/Makefile b/Makefile index 49d7cc4b..376db8d2 100644 --- a/Makefile +++ b/Makefile @@ -115,7 +115,8 @@ ENABLE_PSCI_STAT := 0 # Whether code and read-only data should be put on separate memory pages. # The platform Makefile is free to override this value. SEPARATE_CODE_AND_RODATA := 0 - +# Flag to enable new version of image loading +LOAD_IMAGE_V2 := 0 ################################################################################ # Checkpatch script options @@ -355,6 +356,13 @@ ifeq (${NEED_BL33},yes) endif endif +# TRUSTED_BOARD_BOOT is currently not supported when LOAD_IMAGE_V2 is enabled. +ifeq (${LOAD_IMAGE_V2},1) + ifeq (${TRUSTED_BOARD_BOOT},1) + $(error "TRUSTED_BOARD_BOOT is currently not supported \ + for LOAD_IMAGE_V2=1") + endif +endif ################################################################################ # Process platform overrideable behaviour @@ -445,6 +453,7 @@ $(eval $(call assert_boolean,PL011_GENERIC_UART)) $(eval $(call assert_boolean,ENABLE_PMF)) $(eval $(call assert_boolean,ENABLE_PSCI_STAT)) $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA)) +$(eval $(call assert_boolean,LOAD_IMAGE_V2)) ################################################################################ @@ -475,6 +484,7 @@ $(eval $(call add_define,PL011_GENERIC_UART)) $(eval $(call add_define,ENABLE_PMF)) $(eval $(call add_define,ENABLE_PSCI_STAT)) $(eval $(call add_define,SEPARATE_CODE_AND_RODATA)) +$(eval $(call add_define,LOAD_IMAGE_V2)) # Define the EL3_PAYLOAD_BASE flag only if it is provided. ifdef EL3_PAYLOAD_BASE $(eval $(call add_define,EL3_PAYLOAD_BASE)) diff --git a/common/bl_common.c b/common/bl_common.c index bae02d4b..193e972c 100644 --- a/common/bl_common.c +++ b/common/bl_common.c @@ -53,10 +53,7 @@ uintptr_t page_align(uintptr_t value, unsigned dir) return value; } -static inline unsigned int is_page_aligned (uintptr_t addr) { - return (addr & (PAGE_SIZE - 1)) == 0; -} - +#if !LOAD_IMAGE_V2 /****************************************************************************** * Determine whether the memory region delimited by 'addr' and 'size' is free, * given the extents of free memory. @@ -179,6 +176,7 @@ static void dump_load_info(uintptr_t image_load_addr, INFO(" free region = [base = %p, size = 0x%zx]\n", (void *) mem_layout->free_base, mem_layout->free_size); } +#endif /* LOAD_IMAGE_V2 */ /* Generic function to return the size of an image */ size_t image_size(unsigned int image_id) @@ -223,6 +221,156 @@ size_t image_size(unsigned int image_id) return image_size; } +#if LOAD_IMAGE_V2 + +/******************************************************************************* + * Generic function to load an image at a specific address given + * an image ID and extents of free memory. + * + * If the load is successful then the image information is updated. + * + * Returns 0 on success, a negative error code otherwise. + ******************************************************************************/ +int load_image(unsigned int image_id, image_info_t *image_data) +{ + uintptr_t dev_handle; + uintptr_t image_handle; + uintptr_t image_spec; + uintptr_t image_base; + size_t image_size; + size_t bytes_read; + int io_result; + + assert(image_data != NULL); + assert(image_data->h.version >= VERSION_2); + + image_base = image_data->image_base; + + /* Obtain a reference to the image by querying the platform layer */ + io_result = plat_get_image_source(image_id, &dev_handle, &image_spec); + if (io_result != 0) { + WARN("Failed to obtain reference to image id=%u (%i)\n", + image_id, io_result); + return io_result; + } + + /* Attempt to access the image */ + io_result = io_open(dev_handle, image_spec, &image_handle); + if (io_result != 0) { + WARN("Failed to access image id=%u (%i)\n", + image_id, io_result); + return io_result; + } + + INFO("Loading image id=%u at address %p\n", image_id, + (void *) image_base); + + /* Find the size of the image */ + io_result = io_size(image_handle, &image_size); + if ((io_result != 0) || (image_size == 0)) { + WARN("Failed to determine the size of the image id=%u (%i)\n", + image_id, io_result); + goto exit; + } + + /* Check that the image size to load is within limit */ + if (image_size > image_data->image_max_size) { + WARN("Image id=%u size out of bounds\n", image_id); + io_result = -EFBIG; + goto exit; + } + + image_data->image_size = image_size; + + /* We have enough space so load the image now */ + /* TODO: Consider whether to try to recover/retry a partially successful read */ + io_result = io_read(image_handle, image_base, image_size, &bytes_read); + if ((io_result != 0) || (bytes_read < image_size)) { + WARN("Failed to load image id=%u (%i)\n", image_id, io_result); + goto exit; + } + +#if !TRUSTED_BOARD_BOOT + /* + * File has been successfully loaded. + * Flush the image to main memory so that it can be executed later by + * any CPU, regardless of cache and MMU state. + * When TBB is enabled the image is flushed later, after image + * authentication. + */ + flush_dcache_range(image_base, image_size); +#endif /* TRUSTED_BOARD_BOOT */ + + INFO("Image id=%u loaded: %p - %p\n", image_id, (void *) image_base, + (void *) (image_base + image_size)); + +exit: + io_close(image_handle); + /* Ignore improbable/unrecoverable error in 'close' */ + + /* TODO: Consider maintaining open device connection from this bootloader stage */ + io_dev_close(dev_handle); + /* Ignore improbable/unrecoverable error in 'dev_close' */ + + return io_result; +} + +/******************************************************************************* + * Generic function to load and authenticate an image. The image is actually + * loaded by calling the 'load_image()' function. Therefore, it returns the + * same error codes if the loading operation failed, or -EAUTH if the + * authentication failed. In addition, this function uses recursion to + * authenticate the parent images up to the root of trust. + ******************************************************************************/ +int load_auth_image(unsigned int image_id, image_info_t *image_data) +{ + int rc; + +#if TRUSTED_BOARD_BOOT + unsigned int parent_id; + + /* Use recursion to authenticate parent images */ + rc = auth_mod_get_parent_id(image_id, &parent_id); + if (rc == 0) { + rc = load_auth_image(parent_id, image_data); + if (rc != 0) { + return rc; + } + } +#endif /* TRUSTED_BOARD_BOOT */ + + /* Load the image */ + rc = load_image(image_id, image_data); + if (rc != 0) { + return rc; + } + +#if TRUSTED_BOARD_BOOT + /* Authenticate it */ + rc = auth_mod_verify_img(image_id, + (void *)image_data->image_base, + image_data->image_size); + if (rc != 0) { + memset((void *)image_data->image_base, 0x00, + image_data->image_size); + flush_dcache_range(image_data->image_base, + image_data->image_size); + return -EAUTH; + } + + /* + * File has been successfully loaded and authenticated. + * Flush the image to main memory so that it can be executed later by + * any CPU, regardless of cache and MMU state. + */ + flush_dcache_range(image_data->image_base, image_data->image_size); +#endif /* TRUSTED_BOARD_BOOT */ + + return 0; +} + +#else /* LOAD_IMAGE_V2 */ + /******************************************************************************* * Generic function to load an image at a specific address given an image ID and * extents of free memory. @@ -255,7 +403,7 @@ int load_image(meminfo_t *mem_layout, assert(mem_layout != NULL); assert(image_data != NULL); - assert(image_data->h.version >= VERSION_1); + assert(image_data->h.version == VERSION_1); /* Obtain a reference to the image by querying the platform layer */ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec); @@ -348,8 +496,10 @@ exit: /******************************************************************************* * Generic function to load and authenticate an image. The image is actually - * loaded by calling the 'load_image()' function. In addition, this function - * uses recursion to authenticate the parent images up to the root of trust. + * loaded by calling the 'load_image()' function. Therefore, it returns the + * same error codes if the loading operation failed, or -EAUTH if the + * authentication failed. In addition, this function uses recursion to + * authenticate the parent images up to the root of trust. ******************************************************************************/ int load_auth_image(meminfo_t *mem_layout, unsigned int image_id, @@ -403,6 +553,8 @@ int load_auth_image(meminfo_t *mem_layout, return 0; } +#endif /* LOAD_IMAGE_V2 */ + /******************************************************************************* * Print the content of an entry_point_info_t structure. ******************************************************************************/ diff --git a/common/desc_image_load.c b/common/desc_image_load.c new file mode 100644 index 00000000..a9762b71 --- /dev/null +++ b/common/desc_image_load.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +extern bl_mem_params_node_t *bl_mem_params_desc_ptr; +extern unsigned int bl_mem_params_desc_num; + +static bl_load_info_t bl_load_info; +static bl_params_t next_bl_params; + + +/******************************************************************************* + * This function flushes the data structures so that they are visible + * in memory for the next BL image. + ******************************************************************************/ +void flush_bl_params_desc(void) +{ + flush_dcache_range((unsigned long)bl_mem_params_desc_ptr, + sizeof(*bl_mem_params_desc_ptr) * bl_mem_params_desc_num); +} + +/******************************************************************************* + * This function returns the index for given image_id, within the + * image descriptor array provided by bl_image_info_descs_ptr, if the + * image is found else it returns -1. + ******************************************************************************/ +int get_bl_params_node_index(unsigned int image_id) +{ + int index; + assert(image_id != INVALID_IMAGE_ID); + + for (index = 0; index < bl_mem_params_desc_num; index++) { + if (bl_mem_params_desc_ptr[index].image_id == image_id) + return index; + } + + return -1; +} + +/******************************************************************************* + * This function returns the pointer to `bl_mem_params_node_t` object for + * given image_id, within the image descriptor array provided by + * bl_mem_params_desc_ptr, if the image is found else it returns NULL. + ******************************************************************************/ +bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id) +{ + int index; + assert(image_id != INVALID_IMAGE_ID); + + index = get_bl_params_node_index(image_id); + if (index >= 0) + return &bl_mem_params_desc_ptr[index]; + else + return NULL; +} + +/******************************************************************************* + * This function creates the list of loadable images, by populating and + * linking each `bl_load_info_node_t` type node, using the internal array + * of image descriptor provided by bl_mem_params_desc_ptr. It also populates + * and returns `bl_load_info_t` type structure that contains head of the list + * of loadable images. + ******************************************************************************/ +bl_load_info_t *get_bl_load_info_from_mem_params_desc(void) +{ + int index = 0; + + /* If there is no image to start with, return NULL */ + if (!bl_mem_params_desc_num) + return NULL; + + /* Assign initial data structures */ + bl_load_info_node_t *bl_node_info = + &bl_mem_params_desc_ptr[index].load_node_mem; + bl_load_info.head = bl_node_info; + SET_PARAM_HEAD(&bl_load_info, PARAM_BL_LOAD_INFO, VERSION_2, 0); + + /* Go through the image descriptor array and create the list */ + for (; index < bl_mem_params_desc_num; index++) { + + /* Populate the image information */ + bl_node_info->image_id = bl_mem_params_desc_ptr[index].image_id; + bl_node_info->image_info = &bl_mem_params_desc_ptr[index].image_info; + + /* Link next image if present */ + if ((index + 1) < bl_mem_params_desc_num) { + /* Get the memory and link the next node */ + bl_node_info->next_load_info = + &bl_mem_params_desc_ptr[index + 1].load_node_mem; + bl_node_info = bl_node_info->next_load_info; + } + } + + return &bl_load_info; +} + +/******************************************************************************* + * This function creates the list of executable images, by populating and + * linking each `bl_params_node_t` type node, using the internal array of + * image descriptor provided by bl_mem_params_desc_ptr. It also populates + * and returns `bl_params_t` type structure that contains head of the list + * of executable images. + ******************************************************************************/ +bl_params_t *get_next_bl_params_from_mem_params_desc(void) +{ + int count; + unsigned int img_id = 0; + int link_index = 0; + bl_params_node_t *bl_current_exec_node = NULL; + bl_params_node_t *bl_last_exec_node = NULL; + bl_mem_params_node_t *desc_ptr; + + /* If there is no image to start with, return NULL */ + if (!bl_mem_params_desc_num) + return NULL; + + /* Get the list HEAD */ + for (count = 0; count < bl_mem_params_desc_num; count++) { + + desc_ptr = &bl_mem_params_desc_ptr[count]; + + if ((EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE) && + (EP_GET_FIRST_EXE(desc_ptr->ep_info.h.attr) == EP_FIRST_EXE)) { + next_bl_params.head = &desc_ptr->params_node_mem; + link_index = count; + break; + } + } + + /* Make sure we have a HEAD node */ + assert(next_bl_params.head != NULL); + + /* Populate the HEAD information */ + SET_PARAM_HEAD(&next_bl_params, PARAM_BL_PARAMS, VERSION_2, 0); + + /* + * Go through the image descriptor array and create the list. + * This bounded loop is to make sure that we are not looping forever. + */ + for (count = 0 ; count < bl_mem_params_desc_num; count++) { + + desc_ptr = &bl_mem_params_desc_ptr[link_index]; + + /* Make sure the image is executable */ + assert(EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE); + + /* Get the memory for current node */ + bl_current_exec_node = &desc_ptr->params_node_mem; + + /* Populate the image information */ + bl_current_exec_node->image_id = desc_ptr->image_id; + bl_current_exec_node->image_info = &desc_ptr->image_info; + bl_current_exec_node->ep_info = &desc_ptr->ep_info; + + if (bl_last_exec_node) { + /* Assert if loop detected */ + assert(bl_last_exec_node->next_params_info == NULL); + + /* Link the previous node to the current one */ + bl_last_exec_node->next_params_info = bl_current_exec_node; + } + + /* Update the last node */ + bl_last_exec_node = bl_current_exec_node; + + /* If no next hand-off image then break out */ + img_id = desc_ptr->next_handoff_image_id; + if (img_id == INVALID_IMAGE_ID) + break; + + /* Get the index for the next hand-off image */ + link_index = get_bl_params_node_index(img_id); + assert((link_index > 0) && + (link_index < bl_mem_params_desc_num)); + } + + /* Invalid image is expected to terminate the loop */ + assert(img_id == INVALID_IMAGE_ID); + + /* Populate arg0 for the next BL image */ + next_bl_params.head->ep_info->args.arg0 = (unsigned long)&next_bl_params; + + /* Flush the parameters to be passed to the next BL image */ + flush_dcache_range((unsigned long)&next_bl_params, + sizeof(next_bl_params)); + + return &next_bl_params; +} diff --git a/docs/porting-guide.md b/docs/porting-guide.md index 195c9374..f42ff649 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -721,7 +721,6 @@ Firmware represents the power domain topology and how this relates to the linear CPU index, please refer [Power Domain Topology Design]. - 2.4 Common optional modifications --------------------------------- @@ -841,10 +840,37 @@ and must be implemented in assembly because it may be called before the C environment is initialized. Note: The address from where it was called is stored in x30 (Link Register). - The default implementation simply spins. +### Function : plat_get_bl_image_load_info() + + Argument : void + Return : bl_load_info_t * + +This function returns pointer to the list of images that the platform has +populated to load. This function is currently invoked in BL2 to load the +BL3xx images, when LOAD_IMAGE_V2 is enabled. + +### Function : plat_get_next_bl_params() + + Argument : void + Return : bl_params_t * + +This function returns a pointer to the shared memory that the platform has +kept aside to pass trusted firmware related information that next BL image +needs. This function is currently invoked in BL2 to pass this information to +the next BL image, when LOAD_IMAGE_V2 is enabled. + +### Function : plat_flush_next_bl_params() + + Argument : void + Return : void + +This function flushes to main memory all the image params that are passed to +next image. This function is currently invoked in BL2 to flush this information +to the next BL image, when LOAD_IMAGE_V2 is enabled. + 3. Modifications specific to a Boot Loader stage ------------------------------------------------- @@ -1175,6 +1201,20 @@ populated with the extents of secure RAM available for BL2 to use. See `bl2_early_platform_setup()` above. +Following function is required only when LOAD_IMAGE_V2 is enabled. + +### Function : bl2_plat_handle_post_image_load() [mandatory] + + Argument : unsigned int + Return : int + +This function can be used by the platforms to update/use image information +for given `image_id`. This function is currently invoked in BL2 to handle +BL image specific information based on the `image_id` passed, when +LOAD_IMAGE_V2 is enabled. + +Following functions are required only when LOAD_IMAGE_V2 is disabled. + ### Function : bl2_plat_get_scp_bl2_meminfo() [mandatory] Argument : meminfo * diff --git a/docs/user-guide.md b/docs/user-guide.md index d545262c..d7d743ac 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -430,6 +430,12 @@ performed. pages" section in [Firmware Design]. This flag is disabled by default and affects all BL images. +* `LOAD_IMAGE_V2`: Boolean option to enable support for new version (v2) of + image loading, which provides more flexibility and scalability around what + images are loaded and executed during boot. Default is 0. + Note: `TRUSTED_BOARD_BOOT` is currently not supported when `LOAD_IMAGE_V2` + is enabled. + #### ARM development platform specific build options * `ARM_TSP_RAM_LOCATION`: location of the TSP binary. Options: diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 942843cf..9fa2a810 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -93,11 +93,22 @@ #define EP_GET_EXE(x) (x & EP_EXE_MASK) #define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee)) +#define EP_FIRST_EXE_MASK 0x10 +#define EP_FIRST_EXE 0x10 +#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK) +#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee)) + #define PARAM_EP 0x01 #define PARAM_IMAGE_BINARY 0x02 #define PARAM_BL31 0x03 +#define PARAM_BL_LOAD_INFO 0x04 +#define PARAM_BL_PARAMS 0x05 + +#define IMAGE_ATTRIB_SKIP_LOADING 0x02 +#define IMAGE_ATTRIB_PLAT_SETUP 0x04 #define VERSION_1 0x01 +#define VERSION_2 0x02 #define INVALID_IMAGE_ID (0xFFFFFFFF) @@ -181,8 +192,10 @@ extern uintptr_t __COHERENT_RAM_END__; typedef struct meminfo { uintptr_t total_base; size_t total_size; +#if !LOAD_IMAGE_V2 uintptr_t free_base; size_t free_size; +#endif } meminfo_t; typedef struct aapcs64_params { @@ -245,6 +258,9 @@ typedef struct image_info { param_header_t h; uintptr_t image_base; /* physical address of base of image */ uint32_t image_size; /* bytes read from image file */ +#if LOAD_IMAGE_V2 + uint32_t image_max_size; +#endif } image_info_t; /***************************************************************************** @@ -263,6 +279,39 @@ typedef struct image_desc { entry_point_info_t ep_info; } image_desc_t; +#if LOAD_IMAGE_V2 +/* BL image node in the BL image loading sequence */ +typedef struct bl_load_info_node { + unsigned int image_id; + image_info_t *image_info; + struct bl_load_info_node *next_load_info; +} bl_load_info_node_t; + +/* BL image head node in the BL image loading sequence */ +typedef struct bl_load_info { + param_header_t h; + bl_load_info_node_t *head; +} bl_load_info_t; + +/* BL image node in the BL image execution sequence */ +typedef struct bl_params_node { + unsigned int image_id; + image_info_t *image_info; + entry_point_info_t *ep_info; + struct bl_params_node *next_params_info; +} bl_params_node_t; + +/* + * BL image head node in the BL image execution sequence + * It is also used to pass information to next BL image. + */ +typedef struct bl_params { + param_header_t h; + bl_params_node_t *head; +} bl_params_t; + +#else /* LOAD_IMAGE_V2 */ + /******************************************************************************* * This structure represents the superset of information that can be passed to * BL31 e.g. while passing control to it from BL2. The BL32 parameters will be @@ -286,6 +335,7 @@ typedef struct bl31_params { image_info_t *bl33_image_info; } bl31_params_t; +#endif /* LOAD_IMAGE_V2 */ /* * Compile time assertions related to the 'entry_point_info' structure to @@ -308,24 +358,34 @@ CASSERT(sizeof(uintptr_t) == /******************************************************************************* * Function & variable prototypes ******************************************************************************/ -uintptr_t page_align(uintptr_t, unsigned); size_t image_size(unsigned int image_id); + +#if LOAD_IMAGE_V2 + +int load_image(unsigned int image_id, image_info_t *image_data); +int load_auth_image(unsigned int image_id, image_info_t *image_data); + +#else + +uintptr_t page_align(uintptr_t, unsigned); int load_image(meminfo_t *mem_layout, unsigned int image_id, uintptr_t image_base, image_info_t *image_data, entry_point_info_t *entry_point_info); int load_auth_image(meminfo_t *mem_layout, - unsigned int image_name, + unsigned int image_id, uintptr_t image_base, image_info_t *image_data, entry_point_info_t *entry_point_info); -extern const char build_message[]; -extern const char version_string[]; - void reserve_mem(uintptr_t *free_base, size_t *free_size, uintptr_t addr, size_t size); +#endif /* LOAD_IMAGE_V2 */ + +extern const char build_message[]; +extern const char version_string[]; + void print_entry_point_info(const entry_point_info_t *ep_info); #endif /*__ASSEMBLY__*/ diff --git a/include/common/desc_image_load.h b/include/common/desc_image_load.h new file mode 100644 index 00000000..78342627 --- /dev/null +++ b/include/common/desc_image_load.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __DESC_IMAGE_LOAD_H__ +#define __DESC_IMAGE_LOAD_H__ + +#include + +#if LOAD_IMAGE_V2 +/* Following structure is used to store BL ep/image info. */ +typedef struct bl_mem_params_node { + unsigned int image_id; + image_info_t image_info; + entry_point_info_t ep_info; + unsigned int next_handoff_image_id; + bl_load_info_node_t load_node_mem; + bl_params_node_t params_node_mem; +} bl_mem_params_node_t; + +/* + * Macro to register list of BL image descriptors, + * defined as an array of bl_mem_params_node_t. + */ +#define REGISTER_BL_IMAGE_DESCS(_img_desc) \ + bl_mem_params_node_t *bl_mem_params_desc_ptr = &_img_desc[0]; \ + unsigned int bl_mem_params_desc_num = ARRAY_SIZE(_img_desc); + +/* BL image loading utility functions */ +void flush_bl_params_desc(void); +int get_bl_params_node_index(unsigned int image_id); +bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id); +bl_load_info_t *get_bl_load_info_from_mem_params_desc(void); +bl_params_t *get_next_bl_params_from_mem_params_desc(void); + + +#endif /* LOAD_IMAGE_V2 */ +#endif /* __DESC_IMAGE_LOAD_H__ */ diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index 1d2a3739..852ccbcd 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -44,6 +44,8 @@ struct image_info; struct entry_point_info; struct bl31_params; struct image_desc; +struct bl_load_info; +struct bl_params; /******************************************************************************* * plat_get_rotpk_info() flags @@ -138,6 +140,15 @@ void bl2_plat_arch_setup(void); void bl2_platform_setup(void); struct meminfo *bl2_plat_sec_mem_layout(void); +#if LOAD_IMAGE_V2 +/* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + */ +int bl2_plat_handle_post_image_load(unsigned int image_id); + +#else /* LOAD_IMAGE_V2 */ + /* * This function returns a pointer to the shared memory that the platform has * kept aside to pass trusted firmware related information that BL31 @@ -194,6 +205,8 @@ void bl2_plat_set_bl32_ep_info(struct image_info *image, /* Gets the memory layout for BL32 */ void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info); +#endif /* LOAD_IMAGE_V2 */ + /******************************************************************************* * Optional BL2 functions (may be overridden) ******************************************************************************/ @@ -218,8 +231,13 @@ int bl2u_plat_handle_scp_bl2u(void); /******************************************************************************* * Mandatory BL31 functions ******************************************************************************/ +#if LOAD_IMAGE_V2 +void bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2); +#else void bl31_early_platform_setup(struct bl31_params *from_bl2, void *plat_params_from_bl2); +#endif void bl31_plat_arch_setup(void); void bl31_platform_setup(void); void bl31_plat_runtime_setup(void); @@ -257,6 +275,31 @@ int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr); int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr); +#if LOAD_IMAGE_V2 +/******************************************************************************* + * Mandatory BL image load functions(may be overridden). + ******************************************************************************/ +/* + * This function returns pointer to the list of images that the + * platform has populated to load. + */ +struct bl_load_info *plat_get_bl_image_load_info(void); + +/* + * This function returns a pointer to the shared memory that the + * platform has kept aside to pass trusted firmware related + * information that next BL image could need. + */ +struct bl_params *plat_get_next_bl_params(void); + +/* + * This function flushes to main memory all the params that are + * passed to next image. + */ +void plat_flush_next_bl_params(void); + +#endif /* LOAD_IMAGE_V2 */ + #if ENABLE_PLAT_COMPAT /* * The below declarations are to enable compatibility for the platform ports -- cgit From 42019bf4e93a111984af9dc44608d8d5203a3b1d Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Mon, 12 Sep 2016 16:10:33 +0100 Subject: Changes for new version of image loading in BL1/BL2 This patch adds changes in BL1 & BL2 to use new version of image loading to load the BL images. Following are the changes in BL1: -Use new version of load_auth_image() to load BL2 -Modified `bl1_init_bl2_mem_layout()` to remove using `reserve_mem()` and to calculate `bl2_mem_layout`. `bl2_mem_layout` calculation now assumes that BL1 RW data is at the top of the bl1_mem_layout, which is more restrictive than the previous BL1 behaviour. Following are the changes in BL2: -The `bl2_main.c` is refactored and all the functions for loading BLxx images are now moved to `bl2_image_load.c` `bl2_main.c` now calls a top level `bl2_load_images()` to load all the images that are applicable in BL2. -Added new file `bl2_image_load_v2.c` that uses new version of image loading to load the BL images in BL2. All the above changes are conditionally compiled using the `LOAD_IMAGE_V2` flag. Change-Id: Ic6dcde5a484495bdc05526d9121c59fa50c1bf23 --- bl1/bl1_main.c | 25 +++- bl2/bl2.mk | 6 + bl2/bl2_image_load.c | 285 +++++++++++++++++++++++++++++++++++++++ bl2/bl2_image_load_v2.c | 117 ++++++++++++++++ bl2/bl2_main.c | 258 ++--------------------------------- bl2/bl2_private.h | 8 +- include/plat/common/common_def.h | 13 ++ 7 files changed, 460 insertions(+), 252 deletions(-) create mode 100644 bl2/bl2_image_load.c create mode 100644 bl2/bl2_image_load_v2.c diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c index cb1bc186..68a17a3c 100644 --- a/bl1/bl1_main.c +++ b/bl1/bl1_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -64,11 +64,19 @@ static void bl1_load_bl2(void); void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout, meminfo_t *bl2_mem_layout) { - const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE; assert(bl1_mem_layout != NULL); assert(bl2_mem_layout != NULL); +#if LOAD_IMAGE_V2 + /* + * Remove BL1 RW data from the scope of memory visible to BL2. + * This is assuming BL1 RW data is at the top of bl1_mem_layout. + */ + assert(BL1_RW_BASE > bl1_mem_layout->total_base); + bl2_mem_layout->total_base = bl1_mem_layout->total_base; + bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base; +#else /* Check that BL1's memory is lying outside of the free memory */ assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) || (BL1_RAM_BASE >= bl1_mem_layout->free_base + @@ -79,7 +87,8 @@ void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout, reserve_mem(&bl2_mem_layout->total_base, &bl2_mem_layout->total_size, BL1_RAM_BASE, - bl1_size); + BL1_RAM_LIMIT - BL1_RAM_BASE); +#endif /* LOAD_IMAGE_V2 */ flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t)); } @@ -182,6 +191,9 @@ void bl1_load_bl2(void) INFO("BL1: Loading BL2\n"); +#if LOAD_IMAGE_V2 + err = load_auth_image(BL2_IMAGE_ID, image_info); +#else /* Load the BL2 image */ err = load_auth_image(bl1_tzram_layout, BL2_IMAGE_ID, @@ -189,6 +201,8 @@ void bl1_load_bl2(void) image_info, ep_info); +#endif /* LOAD_IMAGE_V2 */ + if (err) { ERROR("Failed to load BL2 firmware.\n"); plat_error_handler(err); @@ -201,7 +215,12 @@ void bl1_load_bl2(void) * to BL2. BL2 will read the memory layout before using its * memory for other purposes. */ +#if LOAD_IMAGE_V2 + bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->total_base; +#else bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base; +#endif /* LOAD_IMAGE_V2 */ + bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout); ep_info->args.arg1 = (unsigned long)bl2_tzram_layout; diff --git a/bl2/bl2.mk b/bl2/bl2.mk index d7907389..6cb478d1 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -34,4 +34,10 @@ BL2_SOURCES += bl2/bl2_main.c \ common/aarch64/early_exceptions.S \ lib/locks/exclusive/aarch64/spinlock.S +ifeq (${LOAD_IMAGE_V2},1) +BL2_SOURCES += bl2/bl2_image_load_v2.c +else +BL2_SOURCES += bl2/bl2_image_load.c +endif + BL2_LINKERFILE := bl2/bl2.ld.S diff --git a/bl2/bl2_image_load.c b/bl2/bl2_image_load.c new file mode 100644 index 00000000..ee0eb963 --- /dev/null +++ b/bl2/bl2_image_load.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Check for platforms that use obsolete image terminology + */ +#ifdef BL30_BASE +# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE" +#endif + +/******************************************************************************* + * Load the SCP_BL2 image if there's one. + * If a platform does not want to attempt to load SCP_BL2 image it must leave + * SCP_BL2_BASE undefined. + * Return 0 on success or if there's no SCP_BL2 image to load, a negative error + * code otherwise. + ******************************************************************************/ +static int load_scp_bl2(void) +{ + int e = 0; +#ifdef SCP_BL2_BASE + meminfo_t scp_bl2_mem_info; + image_info_t scp_bl2_image_info; + + /* + * It is up to the platform to specify where SCP_BL2 should be loaded if + * it exists. It could create space in the secure sram or point to a + * completely different memory. + * + * The entry point information is not relevant in this case as the AP + * won't execute the SCP_BL2 image. + */ + INFO("BL2: Loading SCP_BL2\n"); + bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info); + scp_bl2_image_info.h.version = VERSION_1; + e = load_auth_image(&scp_bl2_mem_info, + SCP_BL2_IMAGE_ID, + SCP_BL2_BASE, + &scp_bl2_image_info, + NULL); + + if (e == 0) { + /* The subsequent handling of SCP_BL2 is platform specific */ + e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info); + if (e) { + ERROR("Failure in platform-specific handling of SCP_BL2 image.\n"); + } + } +#endif /* SCP_BL2_BASE */ + + return e; +} + +#ifndef EL3_PAYLOAD_BASE +/******************************************************************************* + * Load the BL31 image. + * The bl2_to_bl31_params and bl31_ep_info params will be updated with the + * relevant BL31 information. + * Return 0 on success, a negative error code otherwise. + ******************************************************************************/ +static int load_bl31(bl31_params_t *bl2_to_bl31_params, + entry_point_info_t *bl31_ep_info) +{ + meminfo_t *bl2_tzram_layout; + int e; + + INFO("BL2: Loading BL31\n"); + assert(bl2_to_bl31_params != NULL); + assert(bl31_ep_info != NULL); + + /* Find out how much free trusted ram remains after BL2 load */ + bl2_tzram_layout = bl2_plat_sec_mem_layout(); + + /* Set the X0 parameter to BL31 */ + bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params; + + /* Load the BL31 image */ + e = load_auth_image(bl2_tzram_layout, + BL31_IMAGE_ID, + BL31_BASE, + bl2_to_bl31_params->bl31_image_info, + bl31_ep_info); + + if (e == 0) { + bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info, + bl31_ep_info); + } + + return e; +} + +/******************************************************************************* + * Load the BL32 image if there's one. + * The bl2_to_bl31_params param will be updated with the relevant BL32 + * information. + * If a platform does not want to attempt to load BL32 image it must leave + * BL32_BASE undefined. + * Return 0 on success or if there's no BL32 image to load, a negative error + * code otherwise. + ******************************************************************************/ +static int load_bl32(bl31_params_t *bl2_to_bl31_params) +{ + int e = 0; +#ifdef BL32_BASE + meminfo_t bl32_mem_info; + + INFO("BL2: Loading BL32\n"); + assert(bl2_to_bl31_params != NULL); + + /* + * It is up to the platform to specify where BL32 should be loaded if + * it exists. It could create space in the secure sram or point to a + * completely different memory. + */ + bl2_plat_get_bl32_meminfo(&bl32_mem_info); + e = load_auth_image(&bl32_mem_info, + BL32_IMAGE_ID, + BL32_BASE, + bl2_to_bl31_params->bl32_image_info, + bl2_to_bl31_params->bl32_ep_info); + + if (e == 0) { + bl2_plat_set_bl32_ep_info( + bl2_to_bl31_params->bl32_image_info, + bl2_to_bl31_params->bl32_ep_info); + } +#endif /* BL32_BASE */ + + return e; +} + +#ifndef PRELOADED_BL33_BASE +/******************************************************************************* + * Load the BL33 image. + * The bl2_to_bl31_params param will be updated with the relevant BL33 + * information. + * Return 0 on success, a negative error code otherwise. + ******************************************************************************/ +static int load_bl33(bl31_params_t *bl2_to_bl31_params) +{ + meminfo_t bl33_mem_info; + int e; + + INFO("BL2: Loading BL33\n"); + assert(bl2_to_bl31_params != NULL); + + bl2_plat_get_bl33_meminfo(&bl33_mem_info); + + /* Load the BL33 image in non-secure memory provided by the platform */ + e = load_auth_image(&bl33_mem_info, + BL33_IMAGE_ID, + plat_get_ns_image_entrypoint(), + bl2_to_bl31_params->bl33_image_info, + bl2_to_bl31_params->bl33_ep_info); + + if (e == 0) { + bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info, + bl2_to_bl31_params->bl33_ep_info); + } + + return e; +} +#endif /* PRELOADED_BL33_BASE */ + +#endif /* EL3_PAYLOAD_BASE */ + +/******************************************************************************* + * This function loads SCP_BL2/BL3x images and returns the ep_info for + * the next executable image. + ******************************************************************************/ +entry_point_info_t *bl2_load_images(void) +{ + bl31_params_t *bl2_to_bl31_params; + entry_point_info_t *bl31_ep_info; + int e; + + e = load_scp_bl2(); + if (e) { + ERROR("Failed to load SCP_BL2 (%i)\n", e); + plat_error_handler(e); + } + + /* Perform platform setup in BL2 after loading SCP_BL2 */ + bl2_platform_setup(); + + /* + * Get a pointer to the memory the platform has set aside to pass + * information to BL31. + */ + bl2_to_bl31_params = bl2_plat_get_bl31_params(); + bl31_ep_info = bl2_plat_get_bl31_ep_info(); + +#ifdef EL3_PAYLOAD_BASE + /* + * In the case of an EL3 payload, we don't need to load any further + * images. Just update the BL31 entrypoint info structure to make BL1 + * jump to the EL3 payload. + * The pointer to the memory the platform has set aside to pass + * information to BL31 in the normal boot flow is reused here, even + * though only a fraction of the information contained in the + * bl31_params_t structure makes sense in the context of EL3 payloads. + * This will be refined in the future. + */ + INFO("BL2: Populating the entrypoint info for the EL3 payload\n"); + bl31_ep_info->pc = EL3_PAYLOAD_BASE; + bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params; + bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info); +#else + e = load_bl31(bl2_to_bl31_params, bl31_ep_info); + if (e) { + ERROR("Failed to load BL31 (%i)\n", e); + plat_error_handler(e); + } + + e = load_bl32(bl2_to_bl31_params); + if (e) { + if (e == -EAUTH) { + ERROR("Failed to authenticate BL32\n"); + plat_error_handler(e); + } else { + WARN("Failed to load BL32 (%i)\n", e); + } + } + +#ifdef PRELOADED_BL33_BASE + /* + * In this case, don't load the BL33 image as it's already loaded in + * memory. Update BL33 entrypoint information. + */ + INFO("BL2: Populating the entrypoint info for the preloaded BL33\n"); + bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE; + bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info); +#else + e = load_bl33(bl2_to_bl31_params); + if (e) { + ERROR("Failed to load BL33 (%i)\n", e); + plat_error_handler(e); + } +#endif /* PRELOADED_BL33_BASE */ + +#endif /* EL3_PAYLOAD_BASE */ + + /* Flush the params to be passed to memory */ + bl2_plat_flush_bl31_params(); + + return bl31_ep_info; +} diff --git a/bl2/bl2_image_load_v2.c b/bl2/bl2_image_load_v2.c new file mode 100644 index 00000000..4fab6556 --- /dev/null +++ b/bl2/bl2_image_load_v2.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************************************************************************* + * This function loads SCP_BL2/BL3x images and returns the ep_info for + * the next executable image. + ******************************************************************************/ +entry_point_info_t *bl2_load_images(void) +{ + bl_params_t *bl2_to_next_bl_params; + bl_load_info_t *bl2_load_info; + const bl_load_info_node_t *bl2_node_info; + int plat_setup_done = 0; + int err; + + /* + * Get information about the images to load. + */ + bl2_load_info = plat_get_bl_image_load_info(); + assert(bl2_load_info); + assert(bl2_load_info->head); + assert(bl2_load_info->h.type == PARAM_BL_LOAD_INFO); + assert(bl2_load_info->h.version >= VERSION_2); + bl2_node_info = bl2_load_info->head; + + while (bl2_node_info) { + /* + * Perform platform setup before loading the image, + * if indicated in the image attributes AND if NOT + * already done before. + */ + if (bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_PLAT_SETUP) { + if (plat_setup_done) { + WARN("BL2: Platform setup already done!!\n"); + } else { + INFO("BL2: Doing platform setup\n"); + bl2_platform_setup(); + plat_setup_done = 1; + } + } + + if (!(bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_SKIP_LOADING)) { + INFO("BL2: Loading image id %d\n", bl2_node_info->image_id); + err = load_auth_image(bl2_node_info->image_id, + bl2_node_info->image_info); + if (err) { + ERROR("BL2: Failed to load image (%i)\n", err); + plat_error_handler(err); + } + } else { + INFO("BL2: Skip loading image id %d\n", bl2_node_info->image_id); + } + + /* Allow platform to handle image information. */ + err = bl2_plat_handle_post_image_load(bl2_node_info->image_id); + if (err) { + ERROR("BL2: Failure in post image load handling (%i)\n", err); + plat_error_handler(err); + } + + /* Go to next image */ + bl2_node_info = bl2_node_info->next_load_info; + } + + /* + * Get information to pass to the next image. + */ + bl2_to_next_bl_params = plat_get_next_bl_params(); + assert(bl2_to_next_bl_params); + assert(bl2_to_next_bl_params->head); + assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS); + assert(bl2_to_next_bl_params->h.version >= VERSION_2); + + /* Flush the parameters to be passed to next image */ + plat_flush_next_bl_params(); + + return bl2_to_next_bl_params->head->ep_info; +} diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c index c8fd683c..fc93e1b1 100644 --- a/bl2/bl2_main.c +++ b/bl2/bl2_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,192 +28,23 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include #include -#include #include #include #include #include -#include #include -#include -#include #include "bl2_private.h" -/* - * Check for platforms that use obsolete image terminology - */ -#ifdef BL30_BASE -# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE" -#endif - -/******************************************************************************* - * Load the SCP_BL2 image if there's one. - * If a platform does not want to attempt to load SCP_BL2 image it must leave - * SCP_BL2_BASE undefined. - * Return 0 on success or if there's no SCP_BL2 image to load, a negative error - * code otherwise. - ******************************************************************************/ -static int load_scp_bl2(void) -{ - int e = 0; -#ifdef SCP_BL2_BASE - meminfo_t scp_bl2_mem_info; - image_info_t scp_bl2_image_info; - - /* - * It is up to the platform to specify where SCP_BL2 should be loaded if - * it exists. It could create space in the secure sram or point to a - * completely different memory. - * - * The entry point information is not relevant in this case as the AP - * won't execute the SCP_BL2 image. - */ - INFO("BL2: Loading SCP_BL2\n"); - bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info); - scp_bl2_image_info.h.version = VERSION_1; - e = load_auth_image(&scp_bl2_mem_info, - SCP_BL2_IMAGE_ID, - SCP_BL2_BASE, - &scp_bl2_image_info, - NULL); - - if (e == 0) { - /* The subsequent handling of SCP_BL2 is platform specific */ - e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info); - if (e) { - ERROR("Failure in platform-specific handling of SCP_BL2 image.\n"); - } - } -#endif /* SCP_BL2_BASE */ - - return e; -} - -#ifndef EL3_PAYLOAD_BASE -/******************************************************************************* - * Load the BL31 image. - * The bl2_to_bl31_params and bl31_ep_info params will be updated with the - * relevant BL31 information. - * Return 0 on success, a negative error code otherwise. - ******************************************************************************/ -static int load_bl31(bl31_params_t *bl2_to_bl31_params, - entry_point_info_t *bl31_ep_info) -{ - meminfo_t *bl2_tzram_layout; - int e; - - INFO("BL2: Loading BL31\n"); - assert(bl2_to_bl31_params != NULL); - assert(bl31_ep_info != NULL); - - /* Find out how much free trusted ram remains after BL2 load */ - bl2_tzram_layout = bl2_plat_sec_mem_layout(); - - /* Set the X0 parameter to BL31 */ - bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params; - - /* Load the BL31 image */ - e = load_auth_image(bl2_tzram_layout, - BL31_IMAGE_ID, - BL31_BASE, - bl2_to_bl31_params->bl31_image_info, - bl31_ep_info); - - if (e == 0) { - bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info, - bl31_ep_info); - } - - return e; -} - -/******************************************************************************* - * Load the BL32 image if there's one. - * The bl2_to_bl31_params param will be updated with the relevant BL32 - * information. - * If a platform does not want to attempt to load BL32 image it must leave - * BL32_BASE undefined. - * Return 0 on success or if there's no BL32 image to load, a negative error - * code otherwise. - ******************************************************************************/ -static int load_bl32(bl31_params_t *bl2_to_bl31_params) -{ - int e = 0; -#ifdef BL32_BASE - meminfo_t bl32_mem_info; - - INFO("BL2: Loading BL32\n"); - assert(bl2_to_bl31_params != NULL); - - /* - * It is up to the platform to specify where BL32 should be loaded if - * it exists. It could create space in the secure sram or point to a - * completely different memory. - */ - bl2_plat_get_bl32_meminfo(&bl32_mem_info); - e = load_auth_image(&bl32_mem_info, - BL32_IMAGE_ID, - BL32_BASE, - bl2_to_bl31_params->bl32_image_info, - bl2_to_bl31_params->bl32_ep_info); - - if (e == 0) { - bl2_plat_set_bl32_ep_info( - bl2_to_bl31_params->bl32_image_info, - bl2_to_bl31_params->bl32_ep_info); - } -#endif /* BL32_BASE */ - - return e; -} - -#ifndef PRELOADED_BL33_BASE -/******************************************************************************* - * Load the BL33 image. - * The bl2_to_bl31_params param will be updated with the relevant BL33 - * information. - * Return 0 on success, a negative error code otherwise. - ******************************************************************************/ -static int load_bl33(bl31_params_t *bl2_to_bl31_params) -{ - meminfo_t bl33_mem_info; - int e; - - INFO("BL2: Loading BL33\n"); - assert(bl2_to_bl31_params != NULL); - - bl2_plat_get_bl33_meminfo(&bl33_mem_info); - - /* Load the BL33 image in non-secure memory provided by the platform */ - e = load_auth_image(&bl33_mem_info, - BL33_IMAGE_ID, - plat_get_ns_image_entrypoint(), - bl2_to_bl31_params->bl33_image_info, - bl2_to_bl31_params->bl33_ep_info); - - if (e == 0) { - bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info, - bl2_to_bl31_params->bl33_ep_info); - } - - return e; -} -#endif /* PRELOADED_BL33_BASE */ - -#endif /* EL3_PAYLOAD_BASE */ /******************************************************************************* * The only thing to do in BL2 is to load further images and pass control to - * BL31. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2 runs - * entirely in S-EL1. + * next BL. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2 + * runs entirely in S-EL1. ******************************************************************************/ void bl2_main(void) { - bl31_params_t *bl2_to_bl31_params; - entry_point_info_t *bl31_ep_info; - int e; + entry_point_info_t *next_bl_ep_info; NOTICE("BL2: %s\n", version_string); NOTICE("BL2: %s\n", build_message); @@ -226,82 +57,13 @@ void bl2_main(void) auth_mod_init(); #endif /* TRUSTED_BOARD_BOOT */ - /* - * Load the subsequent bootloader images - */ - e = load_scp_bl2(); - if (e) { - ERROR("Failed to load SCP_BL2 (%i)\n", e); - plat_error_handler(e); - } - - /* Perform platform setup in BL2 after loading SCP_BL2 */ - bl2_platform_setup(); - - /* - * Get a pointer to the memory the platform has set aside to pass - * information to BL31. - */ - bl2_to_bl31_params = bl2_plat_get_bl31_params(); - bl31_ep_info = bl2_plat_get_bl31_ep_info(); - -#ifdef EL3_PAYLOAD_BASE - /* - * In the case of an EL3 payload, we don't need to load any further - * images. Just update the BL31 entrypoint info structure to make BL1 - * jump to the EL3 payload. - * The pointer to the memory the platform has set aside to pass - * information to BL31 in the normal boot flow is reused here, even - * though only a fraction of the information contained in the - * bl31_params_t structure makes sense in the context of EL3 payloads. - * This will be refined in the future. - */ - INFO("BL2: Populating the entrypoint info for the EL3 payload\n"); - bl31_ep_info->pc = EL3_PAYLOAD_BASE; - bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params; - bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info); -#else - e = load_bl31(bl2_to_bl31_params, bl31_ep_info); - if (e) { - ERROR("Failed to load BL31 (%i)\n", e); - plat_error_handler(e); - } - - e = load_bl32(bl2_to_bl31_params); - if (e) { - if (e == -EAUTH) { - ERROR("Failed to authenticate BL32\n"); - plat_error_handler(e); - } else { - WARN("Failed to load BL32 (%i)\n", e); - } - } - -#ifdef PRELOADED_BL33_BASE - /* - * In this case, don't load the BL33 image as it's already loaded in - * memory. Update BL33 entrypoint information. - */ - INFO("BL2: Populating the entrypoint info for the preloaded BL33\n"); - bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE; - bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info); -#else - e = load_bl33(bl2_to_bl31_params); - if (e) { - ERROR("Failed to load BL33 (%i)\n", e); - plat_error_handler(e); - } -#endif /* PRELOADED_BL33_BASE */ - -#endif /* EL3_PAYLOAD_BASE */ - - /* Flush the params to be passed to memory */ - bl2_plat_flush_bl31_params(); + /* Load the subsequent bootloader images. */ + next_bl_ep_info = bl2_load_images(); /* - * Run BL31 via an SMC to BL1. Information on how to pass control to - * the BL32 (if present) and BL33 software images will be passed to - * BL31 as an argument. + * Run next BL image via an SMC to BL1. Information on how to pass + * control to the BL32 (if present) and BL33 software images will + * be passed to next BL image as an argument. */ - smc(BL1_SMC_RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0); + smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0); } diff --git a/bl2/bl2_private.h b/bl2/bl2_private.h index 022d1e93..b3397778 100644 --- a/bl2/bl2_private.h +++ b/bl2/bl2_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,9 +31,15 @@ #ifndef __BL2_PRIVATE_H__ #define __BL2_PRIVATE_H__ +/****************************************** + * Forward declarations + *****************************************/ +struct entry_point_info; + /****************************************** * Function prototypes *****************************************/ void bl2_arch_setup(void); +struct entry_point_info *bl2_load_images(void); #endif /* __BL2_PRIVATE_H__ */ diff --git a/include/plat/common/common_def.h b/include/plat/common/common_def.h index d6b77727..7fef3392 100644 --- a/include/plat/common/common_def.h +++ b/include/plat/common/common_def.h @@ -70,6 +70,18 @@ #define MAKE_ULL(x) x #endif +#if LOAD_IMAGE_V2 +#define BL2_IMAGE_DESC { \ + .image_id = BL2_IMAGE_ID, \ + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \ + VERSION_2, image_info_t, 0), \ + .image_info.image_base = BL2_BASE, \ + .image_info.image_max_size = BL2_LIMIT - BL2_BASE,\ + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, \ + VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\ + .ep_info.pc = BL2_BASE, \ +} +#else /* LOAD_IMAGE_V2 */ #define BL2_IMAGE_DESC { \ .image_id = BL2_IMAGE_ID, \ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \ @@ -79,6 +91,7 @@ VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\ .ep_info.pc = BL2_BASE, \ } +#endif /* LOAD_IMAGE_V2 */ /* * The following constants identify the extents of the code & read-only data -- cgit From a8aa7fec1d4a6df8617c0d0463f1e10f1827a609 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Tue, 13 Sep 2016 17:07:57 +0100 Subject: ARM platform changes for new version of image loading This patch adds changes in ARM platform code to use new version of image loading. Following are the major changes: -Refactor the signatures for bl31_early_platform_setup() and arm_bl31_early_platform_setup() function to use `void *` instead of `bl31_params_t *`. -Introduce `plat_arm_bl2_handle_scp_bl2()` to handle loading of SCP_BL2 image from BL2. -Remove usage of reserve_mem() function from `arm_bl1_early_platform_setup()` -Extract BL32 & BL33 entrypoint info, from the link list passed by BL2, in `arm_bl31_early_platform_setup()` -Provides weak definitions for following platform functions: plat_get_bl_image_load_info plat_get_next_bl_params plat_flush_next_bl_params bl2_plat_handle_post_image_load -Instantiates a descriptor array for ARM platforms describing image and entrypoint information for `SCP_BL2`, `BL31`, `BL32` and `BL33` images. All the above changes are conditionally compiled using the `LOAD_IMAGE_V2` flag. Change-Id: I5e88b9785a3df1a2b2bbbb37d85b8e353ca61049 --- include/plat/arm/common/plat_arm.h | 14 ++ plat/arm/board/fvp/fvp_bl31_setup.c | 6 +- plat/arm/board/juno/include/platform_def.h | 6 + plat/arm/common/aarch64/arm_bl2_mem_params_desc.c | 152 ++++++++++++++++++++++ plat/arm/common/arm_bl1_setup.c | 5 +- plat/arm/common/arm_bl2_setup.c | 63 ++++++++- plat/arm/common/arm_bl31_setup.c | 61 +++++++-- plat/arm/common/arm_common.mk | 5 + plat/arm/common/arm_image_load.c | 65 +++++++++ plat/arm/css/common/css_bl2_setup.c | 8 ++ 10 files changed, 368 insertions(+), 17 deletions(-) create mode 100644 plat/arm/common/aarch64/arm_bl2_mem_params_desc.c create mode 100644 plat/arm/common/arm_image_load.c diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index d2e8729b..581573b2 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -42,6 +42,7 @@ ******************************************************************************/ struct bl31_params; struct meminfo; +struct image_info; #define ARM_CASSERT_MMAP \ CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS) \ @@ -164,8 +165,13 @@ void arm_bl2u_platform_setup(void); void arm_bl2u_plat_arch_setup(void); /* BL31 utility functions */ +#if LOAD_IMAGE_V2 +void arm_bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2); +#else void arm_bl31_early_platform_setup(struct bl31_params *from_bl2, void *plat_params_from_bl2); +#endif /* LOAD_IMAGE_V2 */ void arm_bl31_platform_setup(void); void arm_bl31_plat_runtime_setup(void); void arm_bl31_plat_arch_setup(void); @@ -194,6 +200,14 @@ void plat_arm_interconnect_init(void); void plat_arm_interconnect_enter_coherency(void); void plat_arm_interconnect_exit_coherency(void); +#if LOAD_IMAGE_V2 +/* + * This function is called after loading SCP_BL2 image and it is used to perform + * any platform-specific actions required to handle the SCP firmware. + */ +int plat_arm_bl2_handle_scp_bl2(struct image_info *scp_bl2_image_info); +#endif + /* * Optional functions required in ARM standard platforms */ diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c index 2ee3ba56..f16d6f0d 100644 --- a/plat/arm/board/fvp/fvp_bl31_setup.c +++ b/plat/arm/board/fvp/fvp_bl31_setup.c @@ -31,9 +31,13 @@ #include #include "fvp_private.h" - +#if LOAD_IMAGE_V2 +void bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) +#else void bl31_early_platform_setup(bl31_params_t *from_bl2, void *plat_params_from_bl2) +#endif { arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2); diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h index c53e938f..691e2f77 100644 --- a/plat/arm/board/juno/include/platform_def.h +++ b/plat/arm/board/juno/include/platform_def.h @@ -184,6 +184,12 @@ #define PLAT_CSS_PRIMARY_CPU_SHIFT 8 #define PLAT_CSS_PRIMARY_CPU_BIT_WIDTH 4 +/* + * PLAT_CSS_MAX_SCP_BL2_SIZE is calculated using the current + * SCP_BL2 size plus a little space for growth. + */ +#define PLAT_CSS_MAX_SCP_BL2_SIZE 0x1D000 + /* * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3 * terminology. On a GICv2 system or mode, the lists will be merged and treated diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c new file mode 100644 index 00000000..64315f7a --- /dev/null +++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { +#ifdef SCP_BL2_BASE + /* Fill SCP_BL2 related information if it exists */ + { + .image_id = SCP_BL2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), + .image_info.image_base = SCP_BL2_BASE, + .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +#endif /* SCP_BL2_BASE */ + +#ifdef EL3_PAYLOAD_BASE + /* Fill EL3 payload related information (BL31 is EL3 payload)*/ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = EL3_PAYLOAD_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING), + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + +#else /* EL3_PAYLOAD_BASE */ + + /* Fill BL31 related information */ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), +#if DEBUG + .ep_info.args.arg1 = ARM_BL31_PLAT_PARAM_VAL, +#endif + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = BL31_LIMIT - BL31_BASE, + +# ifdef BL32_BASE + .next_handoff_image_id = BL32_IMAGE_ID, +# else + .next_handoff_image_id = BL33_IMAGE_ID, +# endif + }, + +# ifdef BL32_BASE + /* Fill BL32 related information */ + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, SECURE | EXECUTABLE), + .ep_info.pc = BL32_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_LIMIT - BL32_BASE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, +# endif /* BL32_BASE */ + + /* Fill BL33 related information */ + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), +# ifdef PRELOADED_BL33_BASE + .ep_info.pc = PRELOADED_BL33_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), +# else + .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET, + .image_info.image_max_size = ARM_DRAM1_SIZE, +# endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +#endif /* EL3_PAYLOAD_BASE */ +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c index c94f0cd7..6782f889 100644 --- a/plat/arm/common/arm_bl1_setup.c +++ b/plat/arm/common/arm_bl1_setup.c @@ -73,7 +73,6 @@ meminfo_t *bl1_plat_sec_mem_layout(void) ******************************************************************************/ void arm_bl1_early_platform_setup(void) { - const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE; #if !ARM_DISABLE_TRUSTED_WDOG /* Enable watchdog */ @@ -88,13 +87,15 @@ void arm_bl1_early_platform_setup(void) bl1_tzram_layout.total_base = ARM_BL_RAM_BASE; bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE; +#if !LOAD_IMAGE_V2 /* Calculate how much RAM BL1 is using and how much remains free */ bl1_tzram_layout.free_base = ARM_BL_RAM_BASE; bl1_tzram_layout.free_size = ARM_BL_RAM_SIZE; reserve_mem(&bl1_tzram_layout.free_base, &bl1_tzram_layout.free_size, BL1_RAM_BASE, - bl1_size); + BL1_RAM_LIMIT - BL1_RAM_BASE); +#endif /* LOAD_IMAGE_V2 */ } void bl1_early_platform_setup(void) diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index b6afaa7f..a2ae11a2 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -30,10 +30,13 @@ #include #include +#include #include #include -#include +#include +#include #include +#include #include #if USE_COHERENT_MEM @@ -51,6 +54,17 @@ /* Data structure which holds the extents of the trusted SRAM for BL2 */ static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak bl2_early_platform_setup +#pragma weak bl2_platform_setup +#pragma weak bl2_plat_arch_setup +#pragma weak bl2_plat_sec_mem_layout + +#if LOAD_IMAGE_V2 + +#pragma weak bl2_plat_handle_post_image_load + +#else /* LOAD_IMAGE_V2 */ /******************************************************************************* * This structure represents the superset of information that is passed to @@ -72,10 +86,6 @@ static bl2_to_bl31_params_mem_t bl31_params_mem; /* Weak definitions may be overridden in specific ARM standard platform */ -#pragma weak bl2_early_platform_setup -#pragma weak bl2_platform_setup -#pragma weak bl2_plat_arch_setup -#pragma weak bl2_plat_sec_mem_layout #pragma weak bl2_plat_get_bl31_params #pragma weak bl2_plat_get_bl31_ep_info #pragma weak bl2_plat_flush_bl31_params @@ -106,7 +116,7 @@ meminfo_t *bl2_plat_sec_mem_layout(void) { return &bl2_tzram_layout; } -#endif +#endif /* ARM_BL31_IN_DRAM */ /******************************************************************************* * This function assigns a pointer to the memory that the platform has kept @@ -180,6 +190,7 @@ struct entry_point_info *bl2_plat_get_bl31_ep_info(void) return &bl31_params_mem.bl31_ep_info; } +#endif /* LOAD_IMAGE_V2 */ /******************************************************************************* * BL1 has passed the extents of the trusted SRAM that should be visible to BL2 @@ -243,6 +254,44 @@ void bl2_plat_arch_setup(void) arm_bl2_plat_arch_setup(); } +#if LOAD_IMAGE_V2 +/******************************************************************************* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + int err = 0; + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + assert(bl_mem_params); + + switch (image_id) { + case BL32_IMAGE_ID: + bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry(); + break; + + case BL33_IMAGE_ID: + /* BL33 expects to receive the primary CPU MPID (through r0) */ + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry(); + break; + +#ifdef SCP_BL2_BASE + case SCP_BL2_IMAGE_ID: + /* The subsequent handling of SCP_BL2 is platform specific */ + err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info); + if (err) { + WARN("Failure in platform-specific handling of SCP_BL2 image.\n"); + } + break; +#endif + } + + return err; +} + +#else /* LOAD_IMAGE_V2 */ + /******************************************************************************* * Populate the extents of memory available for loading SCP_BL2 (if used), * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2. @@ -321,3 +370,5 @@ void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo) bl33_meminfo->free_base = ARM_NS_DRAM1_BASE; bl33_meminfo->free_size = ARM_NS_DRAM1_SIZE; } + +#endif /* LOAD_IMAGE_V2 */ diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index 4ed24772..bc1ec11e 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -98,8 +99,13 @@ entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) * while creating page tables. BL2 has flushed this information to memory, so * we are guaranteed to pick up good data. ******************************************************************************/ +#if LOAD_IMAGE_V2 +void arm_bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) +#else void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, void *plat_params_from_bl2) +#endif { /* Initialize the console to provide early debug support */ console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ, @@ -135,13 +141,8 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); -#else - /* - * Check params passed from BL2 should not be NULL, - */ - assert(from_bl2 != NULL); - assert(from_bl2->h.type == PARAM_BL31); - assert(from_bl2->h.version >= VERSION_1); +#else /* RESET_TO_BL31 */ + /* * In debug builds, we pass a special value in 'plat_params_from_bl2' * to verify platform parameters from BL2 to BL31. @@ -150,6 +151,43 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, assert(((unsigned long long)plat_params_from_bl2) == ARM_BL31_PLAT_PARAM_VAL); +# if LOAD_IMAGE_V2 + /* + * Check params passed from BL2 should not be NULL, + */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + assert(params_from_bl2->h.type == PARAM_BL_PARAMS); + assert(params_from_bl2->h.version >= VERSION_2); + + bl_params_node_t *bl_params = params_from_bl2->head; + + /* + * Copy BL33 and BL32 (if present), entry point information. + * They are stored in Secure RAM, in BL2's address space. + */ + while (bl_params) { + if (bl_params->image_id == BL32_IMAGE_ID) + bl32_image_ep_info = *bl_params->ep_info; + + if (bl_params->image_id == BL33_IMAGE_ID) + bl33_image_ep_info = *bl_params->ep_info; + + bl_params = bl_params->next_params_info; + } + + if (bl33_image_ep_info.pc == 0) + panic(); + +# else /* LOAD_IMAGE_V2 */ + + /* + * Check params passed from BL2 should not be NULL, + */ + assert(from_bl2 != NULL); + assert(from_bl2->h.type == PARAM_BL31); + assert(from_bl2->h.version >= VERSION_1); + /* * Copy BL32 (if populated by BL2) and BL33 entry point information. * They are stored in Secure RAM, in BL2's address space. @@ -157,11 +195,18 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2, if (from_bl2->bl32_ep_info) bl32_image_ep_info = *from_bl2->bl32_ep_info; bl33_image_ep_info = *from_bl2->bl33_ep_info; -#endif + +# endif /* LOAD_IMAGE_V2 */ +#endif /* RESET_TO_BL31 */ } +#if LOAD_IMAGE_V2 +void bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) +#else void bl31_early_platform_setup(bl31_params_t *from_bl2, void *plat_params_from_bl2) +#endif { arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2); diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 0b961ea7..51bd60c9 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -129,6 +129,11 @@ BL2_SOURCES += drivers/io/io_fip.c \ plat/arm/common/arm_bl2_setup.c \ plat/arm/common/arm_io_storage.c \ plat/common/aarch64/platform_up_stack.S +ifeq (${LOAD_IMAGE_V2},1) +BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\ + plat/arm/common/arm_image_load.c \ + common/desc_image_load.c +endif BL2U_SOURCES += plat/arm/common/arm_bl2u_setup.c \ plat/common/aarch64/platform_up_stack.S diff --git a/plat/arm/common/arm_image_load.c b/plat/arm/common/arm_image_load.c new file mode 100644 index 00000000..cb6db77b --- /dev/null +++ b/plat/arm/common/arm_image_load.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +#pragma weak plat_flush_next_bl_params +#pragma weak plat_get_bl_image_load_info +#pragma weak plat_get_next_bl_params + + +/******************************************************************************* + * This function flushes the data structures so that they are visible + * in memory for the next BL image. + ******************************************************************************/ +void plat_flush_next_bl_params(void) +{ + flush_bl_params_desc(); +} + +/******************************************************************************* + * This function returns the list of loadable images. + ******************************************************************************/ +bl_load_info_t *plat_get_bl_image_load_info(void) +{ + return get_bl_load_info_from_mem_params_desc(); +} + +/******************************************************************************* + * This function returns the list of executable images. + ******************************************************************************/ +bl_params_t *plat_get_next_bl_params(void) +{ + return get_next_bl_params_from_mem_params_desc(); +} diff --git a/plat/arm/css/common/css_bl2_setup.c b/plat/arm/css/common/css_bl2_setup.c index 15db8d1c..11ca3423 100644 --- a/plat/arm/css/common/css_bl2_setup.c +++ b/plat/arm/css/common/css_bl2_setup.c @@ -37,13 +37,21 @@ #include "css_scp_bootloader.h" /* Weak definition may be overridden in specific CSS based platform */ +#if LOAD_IMAGE_V2 +#pragma weak plat_arm_bl2_handle_scp_bl2 +#else #pragma weak bl2_plat_handle_scp_bl2 +#endif /******************************************************************************* * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol. * Return 0 on success, -1 otherwise. ******************************************************************************/ +#if LOAD_IMAGE_V2 +int plat_arm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info) +#else int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info) +#endif { int ret; -- cgit From 1a0a3f0622e4b569513304109d9a0d093b71228a Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Tue, 28 Jun 2016 16:58:26 +0100 Subject: AArch32: Common changes needed for BL1/BL2 This patch adds common changes to support AArch32 state in BL1 and BL2. Following are the changes: * Added functions for disabling MMU from Secure state. * Added AArch32 specific SMC function. * Added semihosting support. * Added reporting of unhandled exceptions. * Added uniprocessor stack support. * Added `el3_entrypoint_common` macro that can be shared by BL1 and BL32 (SP_MIN) BL stages. The `el3_entrypoint_common` is similar to the AArch64 counterpart with the main difference in the assembly instructions and the registers that are relevant to AArch32 execution state. * Enabled `LOAD_IMAGE_V2` flag in Makefile for `ARCH=aarch32` and added check to make sure that platform has not overridden to disable it. Change-Id: I33c6d8dfefb2e5d142fdfd06a0f4a7332962e1a3 --- Makefile | 12 + common/aarch32/debug.S | 12 + docs/porting-guide.md | 13 +- include/common/aarch32/asm_macros.S | 10 + include/common/aarch32/el3_common_macros.S | 278 ++++++++++++++++++++++ include/lib/aarch32/arch.h | 1 + include/lib/aarch32/arch_helpers.h | 8 + include/lib/cpus/aarch32/cpu_macros.S | 8 + include/plat/common/common_def.h | 6 +- include/plat/common/platform.h | 2 +- lib/aarch32/misc_helpers.S | 36 +++ lib/cpus/aarch32/cpu_helpers.S | 6 + lib/semihosting/aarch32/semihosting_call.S | 38 +++ plat/arm/board/common/aarch32/board_arm_helpers.S | 55 +++++ plat/arm/board/common/board_common.mk | 6 +- plat/arm/board/fvp/aarch32/fvp_helpers.S | 13 + plat/common/aarch32/platform_helpers.S | 10 + plat/common/aarch32/platform_up_stack.S | 71 ++++++ 18 files changed, 575 insertions(+), 10 deletions(-) create mode 100644 include/common/aarch32/el3_common_macros.S create mode 100644 lib/semihosting/aarch32/semihosting_call.S create mode 100644 plat/arm/board/common/aarch32/board_arm_helpers.S create mode 100644 plat/common/aarch32/platform_up_stack.S diff --git a/Makefile b/Makefile index 376db8d2..15b1ac87 100644 --- a/Makefile +++ b/Makefile @@ -188,6 +188,10 @@ ifneq (${GENERATE_COT},0) FWU_FIP_DEPS += fwu_certificates endif +# For AArch32, enable new version of image loading. +ifeq (${ARCH},aarch32) + LOAD_IMAGE_V2 := 1 +endif ################################################################################ # Toolchain @@ -364,6 +368,14 @@ ifeq (${LOAD_IMAGE_V2},1) endif endif +# For AArch32, LOAD_IMAGE_V2 must be enabled. +ifeq (${ARCH},aarch32) + ifeq (${LOAD_IMAGE_V2}, 0) + $(error "For AArch32, LOAD_IMAGE_V2 must be enabled.") + endif +endif + + ################################################################################ # Process platform overrideable behaviour ################################################################################ diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S index 01ec1e38..6be69512 100644 --- a/common/aarch32/debug.S +++ b/common/aarch32/debug.S @@ -32,6 +32,7 @@ #include .globl do_panic + .globl report_exception /*********************************************************** * The common implementation of do_panic for all BL stages @@ -40,3 +41,14 @@ func do_panic b plat_panic_handler endfunc do_panic + /*********************************************************** + * This function is called from the vector table for + * unhandled exceptions. It reads the current mode and + * passes it to platform. + ***********************************************************/ +func report_exception + mrs r0, cpsr + and r0, #MODE32_MASK + bl plat_report_exception + bl plat_panic_handler +endfunc report_exception diff --git a/docs/porting-guide.md b/docs/porting-guide.md index f42ff649..93c0169a 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -776,11 +776,15 @@ called in the following circumstances: The default implementation doesn't do anything, to avoid making assumptions about the way the platform displays its status information. -This function receives the exception type as its argument. Possible values for -exceptions types are listed in the [include/common/bl_common.h] header file. -Note that these constants are not related to any architectural exception code; -they are just an ARM Trusted Firmware convention. +For AArch64, this function receives the exception type as its argument. +Possible values for exceptions types are listed in the +[include/common/bl_common.h] header file. Note that these constants are not +related to any architectural exception code; they are just an ARM Trusted +Firmware convention. +For AArch32, this function receives the exception mode as its argument. +Possible values for exception modes are listed in the +[include/lib/aarch32/arch.h] header file. ### Function : plat_reset_handler() @@ -2234,6 +2238,7 @@ _Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved._ [plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S [plat/arm/board/fvp/fvp_pm.c]: ../plat/arm/board/fvp/fvp_pm.c [include/common/bl_common.h]: ../include/common/bl_common.h +[include/lib/aarch32/arch.h]: ../include/lib/aarch32/arch.h [include/plat/arm/common/arm_def.h]: ../include/plat/arm/common/arm_def.h [include/plat/common/common_def.h]: ../include/plat/common/common_def.h [include/plat/common/platform.h]: ../include/plat/common/platform.h diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S index 11e45bbf..5f044991 100644 --- a/include/common/aarch32/asm_macros.S +++ b/include/common/aarch32/asm_macros.S @@ -69,6 +69,16 @@ lsl \reg, \reg, \tmp .endm + /* + * Declare the exception vector table, enforcing it is aligned on a + * 32 byte boundary. + */ + .macro vector_base label + .section .vectors, "ax" + .align 5 + \label: + .endm + /* * This macro calculates the base address of the current CPU's multi * processor(MP) stack using the plat_my_core_pos() index, the name of diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S new file mode 100644 index 00000000..a572ef99 --- /dev/null +++ b/include/common/aarch32/el3_common_macros.S @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EL3_COMMON_MACROS_S__ +#define __EL3_COMMON_MACROS_S__ + +#include +#include +#include + + /* + * Helper macro to initialise EL3 registers we care about. + */ + .macro el3_arch_init_common _exception_vectors + /* --------------------------------------------------------------------- + * Enable the instruction cache and alignment checks + * --------------------------------------------------------------------- + */ + ldr r1, =(SCTLR_RES1 | SCTLR_I_BIT | SCTLR_A_BIT) + ldcopr r0, SCTLR + orr r0, r0, r1 + stcopr r0, SCTLR + isb + + /* --------------------------------------------------------------------- + * Set the exception vectors (VBAR/MVBAR). + * --------------------------------------------------------------------- + */ + ldr r0, =\_exception_vectors + stcopr r0, VBAR + stcopr r0, MVBAR + isb + + /* ----------------------------------------------------- + * Enable the SIF bit to disable instruction fetches + * from Non-secure memory. + * ----------------------------------------------------- + */ + ldcopr r0, SCR + orr r0, r0, #SCR_SIF_BIT + stcopr r0, SCR + + /* ----------------------------------------------------- + * Enable the Asynchronous data abort now that the + * exception vectors have been setup. + * ----------------------------------------------------- + */ + cpsie a + isb + + /* Enable access to Advanced SIMD registers */ + ldcopr r0, NSACR + bic r0, r0, #NSASEDIS_BIT + bic r0, r0, #NSTRCDIS_BIT + orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT) + stcopr r0, NSACR + isb + + /* + * Enable access to Advanced SIMD, Floating point and to the Trace + * functionality as well. + */ + ldcopr r0, CPACR + bic r0, r0, #ASEDIS_BIT + bic r0, r0, #TRCDIS_BIT + orr r0, r0, #CPACR_ENABLE_FP_ACCESS + stcopr r0, CPACR + isb + + vmrs r0, FPEXC + orr r0, r0, #FPEXC_EN_BIT + vmsr FPEXC, r0 + isb + .endm + +/* ----------------------------------------------------------------------------- + * This is the super set of actions that need to be performed during a cold boot + * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN). + * + * This macro will always perform reset handling, architectural initialisations + * and stack setup. The rest of the actions are optional because they might not + * be needed, depending on the context in which this macro is called. This is + * why this macro is parameterised ; each parameter allows to enable/disable + * some actions. + * + * _set_endian: + * Whether the macro needs to configure the endianness of data accesses. + * + * _warm_boot_mailbox: + * Whether the macro needs to detect the type of boot (cold/warm). The + * detection is based on the platform entrypoint address : if it is zero + * then it is a cold boot, otherwise it is a warm boot. In the latter case, + * this macro jumps on the platform entrypoint address. + * + * _secondary_cold_boot: + * Whether the macro needs to identify the CPU that is calling it: primary + * CPU or secondary CPU. The primary CPU will be allowed to carry on with + * the platform initialisations, while the secondaries will be put in a + * platform-specific state in the meantime. + * + * If the caller knows this macro will only be called by the primary CPU + * then this parameter can be defined to 0 to skip this step. + * + * _init_memory: + * Whether the macro needs to initialise the memory. + * + * _init_c_runtime: + * Whether the macro needs to initialise the C runtime environment. + * + * _exception_vectors: + * Address of the exception vectors to program in the VBAR_EL3 register. + * ----------------------------------------------------------------------------- + */ + .macro el3_entrypoint_common \ + _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \ + _init_memory, _init_c_runtime, _exception_vectors + + /* Make sure we are in Secure Mode */ +#if ASM_ASSERTION + ldcopr r0, SCR + tst r0, #SCR_NS_BIT + ASM_ASSERT(eq) +#endif + + .if \_set_endian + /* ------------------------------------------------------------- + * Set the CPU endianness before doing anything that might + * involve memory reads or writes. + * ------------------------------------------------------------- + */ + ldcopr r0, SCTLR + bic r0, r0, #SCTLR_EE_BIT + stcopr r0, SCTLR + isb + .endif /* _set_endian */ + + /* Switch to monitor mode */ + cps #MODE32_mon + isb + + .if \_warm_boot_mailbox + /* ------------------------------------------------------------- + * This code will be executed for both warm and cold resets. + * Now is the time to distinguish between the two. + * Query the platform entrypoint address and if it is not zero + * then it means it is a warm boot so jump to this address. + * ------------------------------------------------------------- + */ + bl plat_get_my_entrypoint + cmp r0, #0 + bxne r0 + .endif /* _warm_boot_mailbox */ + + /* --------------------------------------------------------------------- + * It is a cold boot. + * Perform any processor specific actions upon reset e.g. cache, TLB + * invalidations etc. + * --------------------------------------------------------------------- + */ + bl reset_handler + + el3_arch_init_common \_exception_vectors + + .if \_secondary_cold_boot + /* ------------------------------------------------------------- + * Check if this is a primary or secondary CPU cold boot. + * The primary CPU will set up the platform while the + * secondaries are placed in a platform-specific state until the + * primary CPU performs the necessary actions to bring them out + * of that state and allows entry into the OS. + * ------------------------------------------------------------- + */ + bl plat_is_my_cpu_primary + cmp r0, #0 + bne do_primary_cold_boot + + /* This is a cold boot on a secondary CPU */ + bl plat_secondary_cold_boot_setup + /* plat_secondary_cold_boot_setup() is not supposed to return */ + bl plat_panic_handler + + do_primary_cold_boot: + .endif /* _secondary_cold_boot */ + + /* --------------------------------------------------------------------- + * Initialize memory now. Secondary CPU initialization won't get to this + * point. + * --------------------------------------------------------------------- + */ + + .if \_init_memory + bl platform_mem_init + .endif /* _init_memory */ + + /* --------------------------------------------------------------------- + * Init C runtime environment: + * - Zero-initialise the NOBITS sections. There are 2 of them: + * - the .bss section; + * - the coherent memory section (if any). + * - Relocate the data section from ROM to RAM, if required. + * --------------------------------------------------------------------- + */ + .if \_init_c_runtime +#if IMAGE_BL32 + /* ----------------------------------------------------------------- + * Invalidate the RW memory used by the BL32 (SP_MIN) image. This + * includes the data and NOBITS sections. This is done to + * safeguard against possible corruption of this memory by + * dirty cache lines in a system cache as a result of use by + * an earlier boot loader stage. + * ----------------------------------------------------------------- + */ + ldr r0, =__RW_START__ + ldr r1, =__RW_END__ + sub r1, r1, r0 + bl inv_dcache_range +#endif /* IMAGE_BL32 */ + + ldr r0, =__BSS_START__ + ldr r1, =__BSS_SIZE__ + bl zeromem + +#if USE_COHERENT_MEM + ldr r0, =__COHERENT_RAM_START__ + ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__ + bl zeromem +#endif + +#if IMAGE_BL1 + /* ----------------------------------------------------- + * Copy data from ROM to RAM. + * ----------------------------------------------------- + */ + ldr r0, =__DATA_RAM_START__ + ldr r1, =__DATA_ROM_START__ + ldr r2, =__DATA_SIZE__ + bl memcpy +#endif + .endif /* _init_c_runtime */ + + /* --------------------------------------------------------------------- + * Allocate a stack whose memory will be marked as Normal-IS-WBWA when + * the MMU is enabled. There is no risk of reading stale stack memory + * after enabling the MMU as only the primary CPU is running at the + * moment. + * --------------------------------------------------------------------- + */ + bl plat_set_my_stack + .endm + +#endif /* __EL3_COMMON_MACROS_S__ */ diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h index 6653cd14..aba15df1 100644 --- a/include/lib/aarch32/arch.h +++ b/include/lib/aarch32/arch.h @@ -191,6 +191,7 @@ /* NASCR definitions */ #define NSASEDIS_BIT (1 << 15) +#define NSTRCDIS_BIT (1 << 20) #define NASCR_CP11_BIT (1 << 11) #define NASCR_CP10_BIT (1 << 10) diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h index ddf660b1..3b4349c3 100644 --- a/include/lib/aarch32/arch_helpers.h +++ b/include/lib/aarch32/arch_helpers.h @@ -187,6 +187,9 @@ void flush_dcache_range(uintptr_t addr, size_t size); void clean_dcache_range(uintptr_t addr, size_t size); void inv_dcache_range(uintptr_t addr, size_t size); +void disable_mmu_secure(void); +void disable_mmu_icache_secure(void); + DEFINE_SYSOP_FUNC(wfi) DEFINE_SYSOP_FUNC(wfe) DEFINE_SYSOP_FUNC(sev) @@ -196,6 +199,9 @@ DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dmb, ish) DEFINE_SYSOP_FUNC(isb) +void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3, + uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7); + DEFINE_SYSREG_RW_FUNCS(spsr) DEFINE_SYSREG_RW_FUNCS(cpsr) @@ -289,4 +295,6 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC) #define read_cntpct_el0() read64_cntpct() +#define read_ctr_el0() read_ctr() + #endif /* __ARCH_HELPERS_H__ */ diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S index f58f3e94..2b9947e3 100644 --- a/include/lib/cpus/aarch32/cpu_macros.S +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -42,12 +42,16 @@ CPU_MIDR: /* cpu_ops midr */ .space 4 /* Reset fn is needed during reset */ +#if IMAGE_BL1 || IMAGE_BL32 CPU_RESET_FUNC: /* cpu_ops reset_func */ .space 4 +#endif +#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */ .space 4 CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */ .space 4 +#endif CPU_OPS_SIZE = . /* @@ -60,13 +64,17 @@ CPU_OPS_SIZE = . .align 2 .type cpu_ops_\_name, %object .word \_midr +#if IMAGE_BL1 || IMAGE_BL32 .if \_noresetfunc .word 0 .else .word \_name\()_reset_func .endif +#endif +#if IMAGE_BL32 .word \_name\()_core_pwr_dwn .word \_name\()_cluster_pwr_dwn +#endif .endm #endif /* __CPU_MACROS_S__ */ diff --git a/include/plat/common/common_def.h b/include/plat/common/common_def.h index 7fef3392..e2c45138 100644 --- a/include/plat/common/common_def.h +++ b/include/plat/common/common_def.h @@ -41,9 +41,13 @@ /* * Platform binary types for linking */ +#ifdef AARCH32 +#define PLATFORM_LINKER_FORMAT "elf32-littlearm" +#define PLATFORM_LINKER_ARCH arm +#else #define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" #define PLATFORM_LINKER_ARCH aarch64 - +#endif /* AARCH32 */ /* * Generic platform constants diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index 852ccbcd..5b4d11df 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -86,7 +86,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type, * Optional common functions (may be overridden) ******************************************************************************/ uintptr_t plat_get_my_stack(void); -void plat_report_exception(unsigned long); +void plat_report_exception(unsigned int exception_type); int plat_crash_console_init(void); int plat_crash_console_putc(int c); void plat_error_handler(int err) __dead2; diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S index 63ac1a7e..fd7c6dd1 100644 --- a/lib/aarch32/misc_helpers.S +++ b/lib/aarch32/misc_helpers.S @@ -32,7 +32,21 @@ #include #include + .globl smc .globl zeromem + .globl disable_mmu_icache_secure + .globl disable_mmu_secure + +func smc + /* + * For AArch32 only r0-r3 will be in the registers; + * rest r4-r6 will be pushed on to the stack. So here, we'll + * have to load them from the stack to registers r4-r6 explicitly. + * Clobbers: r4-r6 + */ + ldm sp, {r4, r5, r6} + smc #0 +endfunc smc /* ----------------------------------------------------------------------- * void zeromem(void *mem, unsigned int length); @@ -58,3 +72,25 @@ z_loop: z_end: bx lr endfunc zeromem + +/* --------------------------------------------------------------------------- + * Disable the MMU in Secure State + * --------------------------------------------------------------------------- + */ + +func disable_mmu_secure + mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT) +do_disable_mmu: + ldcopr r0, SCTLR + bic r0, r0, r1 + stcopr r0, SCTLR + isb // ensure MMU is off + dsb sy + bx lr +endfunc disable_mmu_secure + + +func disable_mmu_icache_secure + ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) + b do_disable_mmu +endfunc disable_mmu_icache_secure diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index 927a6f50..042ffbdd 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -34,6 +34,7 @@ #include #include +#if IMAGE_BL1 || IMAGE_BL32 /* * The reset handler common to all platforms. After a matching * cpu_ops structure entry is found, the correponding reset_handler @@ -65,6 +66,9 @@ func reset_handler bx lr endfunc reset_handler +#endif /* IMAGE_BL1 || IMAGE_BL32 */ + +#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ /* * The prepare core power down function for all platforms. After * the cpu_ops pointer is retrieved from cpu_data, the corresponding @@ -132,6 +136,8 @@ func init_cpu_ops pop {r4 - r6, pc} endfunc init_cpu_ops +#endif /* IMAGE_BL32 */ + /* * The below function returns the cpu_ops structure matching the * midr of the core. It reads the MIDR and finds the matching diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S new file mode 100644 index 00000000..0cc707a0 --- /dev/null +++ b/lib/semihosting/aarch32/semihosting_call.S @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + + .globl semihosting_call + +func semihosting_call + svc #0x123456 + bx lr +endfunc semihosting_call diff --git a/plat/arm/board/common/aarch32/board_arm_helpers.S b/plat/arm/board/common/aarch32/board_arm_helpers.S new file mode 100644 index 00000000..893267ba --- /dev/null +++ b/plat/arm/board/common/aarch32/board_arm_helpers.S @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include + + .globl plat_report_exception + + + /* ------------------------------------------------------- + * void plat_report_exception(unsigned int type) + * Function to report an unhandled exception + * with platform-specific means. + * On FVP platform, it updates the LEDs + * to indicate where we are. + * SYS_LED[0] - 0x0 + * SYS_LED[2:1] - 0x0 + * SYS_LED[7:3] - Exception Mode. + * Clobbers: r0-r1 + * ------------------------------------------------------- + */ +func plat_report_exception + lsl r0, r0, #V2M_SYS_LED_EC_SHIFT + ldr r1, =V2M_SYSREGS_BASE + add r1, r1, #V2M_SYS_LED + str r0, [r1] + bx lr +endfunc plat_report_exception diff --git a/plat/arm/board/common/board_common.mk b/plat/arm/board/common/board_common.mk index 49136e68..a6d4ce77 100644 --- a/plat/arm/board/common/board_common.mk +++ b/plat/arm/board/common/board_common.mk @@ -31,10 +31,8 @@ PLAT_INCLUDES += -Iinclude/plat/arm/board/common/ \ -Iinclude/plat/arm/board/common/drivers -PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S -ifeq (${ARCH}, aarch64) -PLAT_BL_COMMON_SOURCES += plat/arm/board/common/aarch64/board_arm_helpers.S -endif +PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \ + plat/arm/board/common/${ARCH}/board_arm_helpers.S BL1_SOURCES += plat/arm/board/common/drivers/norflash/norflash.c diff --git a/plat/arm/board/fvp/aarch32/fvp_helpers.S b/plat/arm/board/fvp/aarch32/fvp_helpers.S index 373036c9..4c750cb0 100644 --- a/plat/arm/board/fvp/aarch32/fvp_helpers.S +++ b/plat/arm/board/fvp/aarch32/fvp_helpers.S @@ -34,9 +34,22 @@ #include "../drivers/pwrc/fvp_pwrc.h" #include "../fvp_def.h" + .globl plat_secondary_cold_boot_setup .globl plat_get_my_entrypoint .globl plat_is_my_cpu_primary + /* -------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup (void); + * + * For AArch32, cold-booting secondary CPUs is not yet + * implemented and they panic. + * -------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + /* --------------------------------------------------------------------- * unsigned long plat_get_my_entrypoint (void); * diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S index 481dd68d..069d96d0 100644 --- a/plat/common/aarch32/platform_helpers.S +++ b/plat/common/aarch32/platform_helpers.S @@ -33,6 +33,7 @@ .weak plat_my_core_pos .weak plat_reset_handler + .weak plat_disable_acp .weak platform_mem_init .weak plat_panic_handler @@ -59,6 +60,15 @@ func plat_reset_handler bx lr endfunc plat_reset_handler + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_disable_acp + bx lr +endfunc plat_disable_acp + /* --------------------------------------------------------------------- * Placeholder function which should be redefined by * each platform. diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S new file mode 100644 index 00000000..8275aec8 --- /dev/null +++ b/plat/common/aarch32/platform_up_stack.S @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + .globl plat_get_my_stack + .globl plat_set_my_stack + + /* ----------------------------------------------------- + * unsigned long plat_get_my_stack () + * + * For cold-boot BL images, only the primary CPU needs + * a stack. This function returns the stack pointer for + * a stack allocated in normal memory. + * ----------------------------------------------------- + */ +func plat_get_my_stack + get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE + bx lr +endfunc plat_get_my_stack + + /* ----------------------------------------------------- + * void plat_set_my_stack () + * + * For cold-boot BL images, only the primary CPU needs + * a stack. This function sets the stack pointer to a + * stack allocated in normal memory. + * ----------------------------------------------------- + */ +func plat_set_my_stack + get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE + mov sp, r0 + bx lr +endfunc plat_set_my_stack + + /* ----------------------------------------------------- + * Per-cpu stacks in normal memory. Each cpu gets a + * stack of PLATFORM_STACK_SIZE bytes. + * ----------------------------------------------------- + */ +declare_stack platform_normal_stacks, tzfw_normal_stacks, \ + PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE -- cgit From f3b4914be3b41eb2231184f7af80240296f668c5 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Tue, 28 Jun 2016 17:07:09 +0100 Subject: AArch32: Add generic changes in BL1 This patch adds generic changes in BL1 to support AArch32 state. New AArch32 specific assembly/C files are introduced and some files are moved to AArch32/64 specific folders. BL1 for AArch64 is refactored but functionally identical. BL1 executes in Secure Monitor mode in AArch32 state. NOTE: BL1 in AArch32 state ONLY handles BL1_RUN_IMAGE SMC. Change-Id: I6e2296374c7efbf3cf2aa1a0ce8de0732d8c98a5 --- bl1/aarch32/bl1_arch_setup.c | 38 +++++++ bl1/aarch32/bl1_context_mgmt.c | 177 +++++++++++++++++++++++++++++++++ bl1/aarch32/bl1_entrypoint.S | 124 +++++++++++++++++++++++ bl1/aarch32/bl1_exceptions.S | 96 ++++++++++++++++++ bl1/aarch64/bl1_context_mgmt.c | 124 +++++++++++++++++++++++ bl1/aarch64/bl1_exceptions.S | 6 +- bl1/bl1.mk | 18 ++-- bl1/bl1_context_mgmt.c | 124 ----------------------- bl1/bl1_main.c | 29 ++++-- bl1/bl1_private.h | 12 +-- include/lib/el3_runtime/context_mgmt.h | 4 + 11 files changed, 602 insertions(+), 150 deletions(-) create mode 100644 bl1/aarch32/bl1_arch_setup.c create mode 100644 bl1/aarch32/bl1_context_mgmt.c create mode 100644 bl1/aarch32/bl1_entrypoint.S create mode 100644 bl1/aarch32/bl1_exceptions.S create mode 100644 bl1/aarch64/bl1_context_mgmt.c delete mode 100644 bl1/bl1_context_mgmt.c diff --git a/bl1/aarch32/bl1_arch_setup.c b/bl1/aarch32/bl1_arch_setup.c new file mode 100644 index 00000000..6b906c7e --- /dev/null +++ b/bl1/aarch32/bl1_arch_setup.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/******************************************************************************* + * TODO: Function that does the first bit of architectural setup. + ******************************************************************************/ +void bl1_arch_setup(void) +{ + +} diff --git a/bl1/aarch32/bl1_context_mgmt.c b/bl1/aarch32/bl1_context_mgmt.c new file mode 100644 index 00000000..c7d3c12e --- /dev/null +++ b/bl1/aarch32/bl1_context_mgmt.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Following arrays will be used for context management. + * There are 2 instances, for the Secure and Non-Secure contexts. + */ +static cpu_context_t bl1_cpu_context[2]; +static smc_ctx_t bl1_smc_context[2]; + +/* Following contains the next cpu context pointer. */ +static void *bl1_next_cpu_context_ptr; + +/* Following contains the next smc context pointer. */ +static void *bl1_next_smc_context_ptr; + +/* Following functions are used for SMC context handling */ +void *smc_get_ctx(int security_state) +{ + assert(sec_state_is_valid(security_state)); + return &bl1_smc_context[security_state]; +} + +void smc_set_next_ctx(int security_state) +{ + assert(sec_state_is_valid(security_state)); + bl1_next_smc_context_ptr = &bl1_smc_context[security_state]; +} + +void *smc_get_next_ctx(void) +{ + return bl1_next_smc_context_ptr; +} + +/* Following functions are used for CPU context handling */ +void *cm_get_context(uint32_t security_state) +{ + assert(sec_state_is_valid(security_state)); + return &bl1_cpu_context[security_state]; +} + +void cm_set_next_context(void *cpu_context) +{ + assert(cpu_context); + bl1_next_cpu_context_ptr = cpu_context; +} + +void *cm_get_next_context(void) +{ + return bl1_next_cpu_context_ptr; +} + +/******************************************************************************* + * Following function copies GP regs r0-r4, lr and spsr, + * from the CPU context to the SMC context structures. + ******************************************************************************/ +static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx, + smc_ctx_t *next_smc_ctx) +{ + next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0); + next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1); + next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2); + next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3); + next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR); + next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR); +} + +/******************************************************************************* + * Following function flushes the SMC & CPU context pointer and its data. + ******************************************************************************/ +static void flush_smc_and_cpu_ctx(void) +{ + flush_dcache_range((uintptr_t)&bl1_next_smc_context_ptr, + sizeof(bl1_next_smc_context_ptr)); + flush_dcache_range((uintptr_t)bl1_next_smc_context_ptr, + sizeof(smc_ctx_t)); + + flush_dcache_range((uintptr_t)&bl1_next_cpu_context_ptr, + sizeof(bl1_next_cpu_context_ptr)); + flush_dcache_range((uintptr_t)bl1_next_cpu_context_ptr, + sizeof(cpu_context_t)); +} + +/******************************************************************************* + * This function prepares the context for Secure/Normal world images. + * Normal world images are transitioned to HYP(if supported) else SVC. + ******************************************************************************/ +void bl1_prepare_next_image(unsigned int image_id) +{ + unsigned int security_state; + image_desc_t *image_desc; + entry_point_info_t *next_bl_ep; + + /* Get the image descriptor. */ + image_desc = bl1_plat_get_image_desc(image_id); + assert(image_desc); + + /* Get the entry point info. */ + next_bl_ep = &image_desc->ep_info; + + /* Get the image security state. */ + security_state = GET_SECURITY_STATE(next_bl_ep->h.attr); + + /* Prepare the SPSR for the next BL image. */ + if (security_state == SECURE) { + next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); + } else { + /* Use HYP mode if supported else use SVC. */ + if (GET_VIRT_EXT(read_id_pfr1()) == MODE32_hyp) { + next_bl_ep->spsr = SPSR_MODE32(MODE32_hyp, SPSR_T_ARM, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); + } else { + next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); + } + } + + /* Allow platform to make change */ + bl1_plat_set_ep_info(image_id, next_bl_ep); + + /* Prepare the cpu context for the next BL image. */ + cm_init_my_context(next_bl_ep); + cm_prepare_el3_exit(security_state); + cm_set_next_context(cm_get_context(security_state)); + + /* Prepare the smc context for the next BL image. */ + smc_set_next_ctx(security_state); + copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()), + smc_get_next_ctx()); + + /* + * Flush the SMC & CPU context and the (next)pointers, + * to access them after caches are disabled. + */ + flush_smc_and_cpu_ctx(); + + /* Indicate that image is in execution state. */ + image_desc->state = IMAGE_STATE_EXECUTED; + + print_entry_point_info(next_bl_ep); +} diff --git a/bl1/aarch32/bl1_entrypoint.S b/bl1/aarch32/bl1_entrypoint.S new file mode 100644 index 00000000..b8817864 --- /dev/null +++ b/bl1/aarch32/bl1_entrypoint.S @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + + .globl bl1_vector_table + .globl bl1_entrypoint + + /* ----------------------------------------------------- + * Setup the vector table to support SVC & MON mode. + * ----------------------------------------------------- + */ +vector_base bl1_vector_table + b bl1_entrypoint + b report_exception /* Undef */ + b bl1_aarch32_smc_handler /* SMC call */ + b report_exception /* Prefetch abort */ + b report_exception /* Data abort */ + b report_exception /* Reserved */ + b report_exception /* IRQ */ + b report_exception /* FIQ */ + + /* ----------------------------------------------------- + * bl1_entrypoint() is the entry point into the trusted + * firmware code when a cpu is released from warm or + * cold reset. + * ----------------------------------------------------- + */ + +func bl1_entrypoint +/* --------------------------------------------------------------------- +* If the reset address is programmable then bl1_entrypoint() is +* executed only on the cold boot path. Therefore, we can skip the warm +* boot mailbox mechanism. +* --------------------------------------------------------------------- +*/ + el3_entrypoint_common \ + _set_endian=1 \ + _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ + _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ + _init_memory=1 \ + _init_c_runtime=1 \ + _exception_vectors=bl1_vector_table + + /* ----------------------------------------------------- + * Perform early platform setup & platform + * specific early arch. setup e.g. mmu setup + * ----------------------------------------------------- + */ + bl bl1_early_platform_setup + bl bl1_plat_arch_setup + + /* ----------------------------------------------------- + * Jump to main function. + * ----------------------------------------------------- + */ + bl bl1_main + + /* ----------------------------------------------------- + * Jump to next image. + * ----------------------------------------------------- + */ + + /* + * MMU needs to be disabled because both BL1 and BL2 execute + * in PL1, and therefore share the same address space. + * BL2 will initialize the address space according to its + * own requirement. + */ + bl disable_mmu_icache_secure + stcopr r0, TLBIALL + dsb sy + isb + + /* Get the cpu_context for next BL image */ + bl cm_get_next_context + + /* Restore the SCR */ + ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] + stcopr r2, SCR + isb + + /* + * Get the smc_context for next BL image, + * program the gp/system registers and exit + * secure monitor mode + */ + bl smc_get_next_ctx + smcc_restore_gp_mode_regs + eret +endfunc bl1_entrypoint diff --git a/bl1/aarch32/bl1_exceptions.S b/bl1/aarch32/bl1_exceptions.S new file mode 100644 index 00000000..e109e9f3 --- /dev/null +++ b/bl1/aarch32/bl1_exceptions.S @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + .globl bl1_aarch32_smc_handler + + +func bl1_aarch32_smc_handler + /* ------------------------------------------------ + * SMC in BL1 is handled assuming that the MMU is + * turned off by BL2. + * ------------------------------------------------ + */ + + /* ---------------------------------------------- + * Only RUN_IMAGE SMC is supported. + * ---------------------------------------------- + */ + mov r8, #BL1_SMC_RUN_IMAGE + cmp r8, r0 + blne report_exception + + /* ------------------------------------------------ + * Make sure only Secure world reaches here. + * ------------------------------------------------ + */ + ldcopr r8, SCR + tst r8, #SCR_NS_BIT + blne report_exception + + /* --------------------------------------------------------------------- + * Pass control to next secure image. + * Here it expects r1 to contain the address of a entry_point_info_t + * structure describing the BL entrypoint. + * --------------------------------------------------------------------- + */ + mov r8, r1 + mov r0, r1 + bl bl1_print_next_bl_ep_info + +#if SPIN_ON_BL1_EXIT + bl print_debug_loop_message +debug_loop: + b debug_loop +#endif + + mov r0, r8 + bl bl1_plat_prepare_exit + + stcopr r0, TLBIALL + dsb sy + isb + + /* + * Extract PC and SPSR based on struct `entry_point_info_t` + * and load it in LR and SPSR registers respectively. + */ + ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET] + ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)] + msr spsr, r1 + + add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET + ldm r8, {r0, r1, r2, r3} + eret +endfunc bl1_aarch32_smc_handler diff --git a/bl1/aarch64/bl1_context_mgmt.c b/bl1/aarch64/bl1_context_mgmt.c new file mode 100644 index 00000000..972c7f68 --- /dev/null +++ b/bl1/aarch64/bl1_context_mgmt.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Following array will be used for context management. + * There are 2 instances, for the Secure and Non-Secure contexts. + */ +static cpu_context_t bl1_cpu_context[2]; + +/* Following contains the cpu context pointers. */ +static void *bl1_cpu_context_ptr[2]; + + +void *cm_get_context(uint32_t security_state) +{ + assert(sec_state_is_valid(security_state)); + return bl1_cpu_context_ptr[security_state]; +} + +void cm_set_context(void *context, uint32_t security_state) +{ + assert(sec_state_is_valid(security_state)); + bl1_cpu_context_ptr[security_state] = context; +} + +/******************************************************************************* + * This function prepares the context for Secure/Normal world images. + * Normal world images are transitioned to EL2(if supported) else EL1. + ******************************************************************************/ +void bl1_prepare_next_image(unsigned int image_id) +{ + unsigned int security_state; + image_desc_t *image_desc; + entry_point_info_t *next_bl_ep; + +#if CTX_INCLUDE_AARCH32_REGS + /* + * Ensure that the build flag to save AArch32 system registers in CPU + * context is not set for AArch64-only platforms. + */ + if (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL1_SHIFT) + & ID_AA64PFR0_ELX_MASK) == 0x1) { + ERROR("EL1 supports AArch64-only. Please set build flag " + "CTX_INCLUDE_AARCH32_REGS = 0"); + panic(); + } +#endif + + /* Get the image descriptor. */ + image_desc = bl1_plat_get_image_desc(image_id); + assert(image_desc); + + /* Get the entry point info. */ + next_bl_ep = &image_desc->ep_info; + + /* Get the image security state. */ + security_state = GET_SECURITY_STATE(next_bl_ep->h.attr); + + /* Setup the Secure/Non-Secure context if not done already. */ + if (!cm_get_context(security_state)) + cm_set_context(&bl1_cpu_context[security_state], security_state); + + /* Prepare the SPSR for the next BL image. */ + if (security_state == SECURE) { + next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } else { + /* Use EL2 if supported else use EL1. */ + if (read_id_aa64pfr0_el1() & + (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { + next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } else { + next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } + } + + /* Allow platform to make change */ + bl1_plat_set_ep_info(image_id, next_bl_ep); + + /* Prepare the context for the next BL image. */ + cm_init_my_context(next_bl_ep); + cm_prepare_el3_exit(security_state); + + /* Indicate that image is in execution state. */ + image_desc->state = IMAGE_STATE_EXECUTED; + + print_entry_point_info(next_bl_ep); +} diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S index f080fe89..869261de 100644 --- a/bl1/aarch64/bl1_exceptions.S +++ b/bl1/aarch64/bl1_exceptions.S @@ -192,15 +192,15 @@ func smc_handler64 mov sp, x30 /* --------------------------------------------------------------------- - * Pass EL3 control to BL31. + * Pass EL3 control to next BL image. * Here it expects X1 with the address of a entry_point_info_t - * structure describing the BL31 entrypoint. + * structure describing the next BL image entrypoint. * --------------------------------------------------------------------- */ mov x20, x1 mov x0, x20 - bl bl1_print_bl31_ep_info + bl bl1_print_next_bl_ep_info ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET] msr elr_el3, x0 diff --git a/bl1/bl1.mk b/bl1/bl1.mk index 591e047b..9ef5b401 100644 --- a/bl1/bl1.mk +++ b/bl1/bl1.mk @@ -29,15 +29,19 @@ # BL1_SOURCES += bl1/bl1_main.c \ - bl1/aarch64/bl1_arch_setup.c \ - bl1/aarch64/bl1_entrypoint.S \ - bl1/aarch64/bl1_exceptions.S \ - bl1/bl1_context_mgmt.c \ - lib/cpus/aarch64/cpu_helpers.S \ - lib/el3_runtime/aarch64/context.S \ - lib/el3_runtime/aarch64/context_mgmt.c \ + bl1/${ARCH}/bl1_arch_setup.c \ + bl1/${ARCH}/bl1_context_mgmt.c \ + bl1/${ARCH}/bl1_entrypoint.S \ + bl1/${ARCH}/bl1_exceptions.S \ + lib/cpus/${ARCH}/cpu_helpers.S \ + lib/el3_runtime/${ARCH}/context_mgmt.c \ plat/common/plat_bl1_common.c + +ifeq (${ARCH},aarch64) +BL1_SOURCES += lib/el3_runtime/aarch64/context.S +endif + ifeq (${TRUSTED_BOARD_BOOT},1) BL1_SOURCES += bl1/bl1_fwu.c endif diff --git a/bl1/bl1_context_mgmt.c b/bl1/bl1_context_mgmt.c deleted file mode 100644 index 972c7f68..00000000 --- a/bl1/bl1_context_mgmt.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include - -/* - * Following array will be used for context management. - * There are 2 instances, for the Secure and Non-Secure contexts. - */ -static cpu_context_t bl1_cpu_context[2]; - -/* Following contains the cpu context pointers. */ -static void *bl1_cpu_context_ptr[2]; - - -void *cm_get_context(uint32_t security_state) -{ - assert(sec_state_is_valid(security_state)); - return bl1_cpu_context_ptr[security_state]; -} - -void cm_set_context(void *context, uint32_t security_state) -{ - assert(sec_state_is_valid(security_state)); - bl1_cpu_context_ptr[security_state] = context; -} - -/******************************************************************************* - * This function prepares the context for Secure/Normal world images. - * Normal world images are transitioned to EL2(if supported) else EL1. - ******************************************************************************/ -void bl1_prepare_next_image(unsigned int image_id) -{ - unsigned int security_state; - image_desc_t *image_desc; - entry_point_info_t *next_bl_ep; - -#if CTX_INCLUDE_AARCH32_REGS - /* - * Ensure that the build flag to save AArch32 system registers in CPU - * context is not set for AArch64-only platforms. - */ - if (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL1_SHIFT) - & ID_AA64PFR0_ELX_MASK) == 0x1) { - ERROR("EL1 supports AArch64-only. Please set build flag " - "CTX_INCLUDE_AARCH32_REGS = 0"); - panic(); - } -#endif - - /* Get the image descriptor. */ - image_desc = bl1_plat_get_image_desc(image_id); - assert(image_desc); - - /* Get the entry point info. */ - next_bl_ep = &image_desc->ep_info; - - /* Get the image security state. */ - security_state = GET_SECURITY_STATE(next_bl_ep->h.attr); - - /* Setup the Secure/Non-Secure context if not done already. */ - if (!cm_get_context(security_state)) - cm_set_context(&bl1_cpu_context[security_state], security_state); - - /* Prepare the SPSR for the next BL image. */ - if (security_state == SECURE) { - next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, - DISABLE_ALL_EXCEPTIONS); - } else { - /* Use EL2 if supported else use EL1. */ - if (read_id_aa64pfr0_el1() & - (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { - next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX, - DISABLE_ALL_EXCEPTIONS); - } else { - next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, - DISABLE_ALL_EXCEPTIONS); - } - } - - /* Allow platform to make change */ - bl1_plat_set_ep_info(image_id, next_bl_ep); - - /* Prepare the context for the next BL image. */ - cm_init_my_context(next_bl_ep); - cm_prepare_el3_exit(security_state); - - /* Indicate that image is in execution state. */ - image_desc->state = IMAGE_STATE_EXECUTED; - - print_entry_point_info(next_bl_ep); -} diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c index 68a17a3c..fbb75e02 100644 --- a/bl1/bl1_main.c +++ b/bl1/bl1_main.c @@ -107,15 +107,20 @@ void bl1_main(void) NOTICE("BL1: %s\n", version_string); NOTICE("BL1: %s\n", build_message); - INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT); + INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE, + (void *)BL1_RAM_LIMIT); #if DEBUG - unsigned long val; + u_register_t val; /* * Ensure that MMU/Caches and coherency are turned on */ +#ifdef AARCH32 + val = read_sctlr(); +#else val = read_sctlr_el3(); +#endif assert(val & SCTLR_M_BIT); assert(val & SCTLR_C_BIT); assert(val & SCTLR_I_BIT); @@ -223,21 +228,25 @@ void bl1_load_bl2(void) bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout); - ep_info->args.arg1 = (unsigned long)bl2_tzram_layout; + ep_info->args.arg1 = (uintptr_t)bl2_tzram_layout; NOTICE("BL1: Booting BL2\n"); - VERBOSE("BL1: BL2 memory layout address = 0x%llx\n", - (unsigned long long) bl2_tzram_layout); + VERBOSE("BL1: BL2 memory layout address = %p\n", + (void *) bl2_tzram_layout); } /******************************************************************************* - * Function called just before handing over to BL31 to inform the user about - * the boot progress. In debug mode, also print details about the BL31 image's - * execution context. + * Function called just before handing over to the next BL to inform the user + * about the boot progress. In debug mode, also print details about the BL + * image's execution context. ******************************************************************************/ -void bl1_print_bl31_ep_info(const entry_point_info_t *bl31_ep_info) +void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info) { +#ifdef AARCH32 + NOTICE("BL1: Booting BL32\n"); +#else NOTICE("BL1: Booting BL31\n"); - print_entry_point_info(bl31_ep_info); +#endif /* AARCH32 */ + print_entry_point_info(bl_ep_info); } #if SPIN_ON_BL1_EXIT diff --git a/bl1/bl1_private.h b/bl1/bl1_private.h index 79dde738..2ef8d0e2 100644 --- a/bl1/bl1_private.h +++ b/bl1/bl1_private.h @@ -37,13 +37,13 @@ * Declarations of linker defined symbols which will tell us where BL1 lives * in Trusted ROM and RAM ******************************************************************************/ -extern uint64_t __BL1_ROM_END__; -#define BL1_ROM_END (uint64_t)(&__BL1_ROM_END__) +extern uintptr_t __BL1_ROM_END__; +#define BL1_ROM_END (uintptr_t)(&__BL1_ROM_END__) -extern uint64_t __BL1_RAM_START__; -extern uint64_t __BL1_RAM_END__; -#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__) -#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__) +extern uintptr_t __BL1_RAM_START__; +extern uintptr_t __BL1_RAM_END__; +#define BL1_RAM_BASE (uintptr_t)(&__BL1_RAM_START__) +#define BL1_RAM_LIMIT (uintptr_t)(&__BL1_RAM_END__) /****************************************** * Function prototypes diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index b264fc32..676973ce 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -103,5 +103,9 @@ static inline void cm_set_next_context(void *context) "msr spsel, #0\n" : : "r" (context)); } + +#else +void *cm_get_next_context(void); #endif /* AARCH32 */ + #endif /* __CM_H__ */ -- cgit From 83fc4a930bad68ca0014a25e0c41dc71cffa28b3 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Mon, 4 Jul 2016 11:03:49 +0100 Subject: AArch32: Add ARM platform changes in BL1 This patch adds ARM platform changes in BL1 for AArch32 state. It also enables building of BL1 for ARCH=aarch32. Change-Id: I079be81a93d027f37b0f7d8bb474b1252bb4cf48 --- Makefile | 4 ++-- plat/arm/board/fvp/platform.mk | 4 ++-- plat/arm/common/arm_bl1_setup.c | 4 ++++ plat/arm/common/arm_common.mk | 2 +- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 15b1ac87..aaf84db2 100644 --- a/Makefile +++ b/Makefile @@ -517,13 +517,13 @@ endif ################################################################################ # Include BL specific makefiles ################################################################################ -# BL31 is not needed and BL1, BL2 & BL2U are not currently supported in AArch32 -ifneq (${ARCH},aarch32) ifdef BL1_SOURCES NEED_BL1 := yes include bl1/bl1.mk endif +# For AArch32, BL31 is not applicable, and BL2 & BL2U are not supported at present. +ifneq (${ARCH},aarch32) ifdef BL2_SOURCES NEED_BL2 := yes include bl2/bl2.mk diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index 2865569a..1dad9895 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -113,8 +113,8 @@ endif BL1_SOURCES += drivers/io/io_semihosting.c \ lib/semihosting/semihosting.c \ - lib/semihosting/aarch64/semihosting_call.S \ - plat/arm/board/fvp/aarch64/fvp_helpers.S \ + lib/semihosting/${ARCH}/semihosting_call.S \ + plat/arm/board/fvp/${ARCH}/fvp_helpers.S \ plat/arm/board/fvp/fvp_bl1_setup.c \ plat/arm/board/fvp/fvp_err.c \ plat/arm/board/fvp/fvp_io_storage.c \ diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c index 6782f889..50d102af 100644 --- a/plat/arm/common/arm_bl1_setup.c +++ b/plat/arm/common/arm_bl1_setup.c @@ -132,7 +132,11 @@ void arm_bl1_plat_arch_setup(void) BL1_COHERENT_RAM_LIMIT #endif ); +#ifdef AARCH32 + enable_mmu_secure(0); +#else enable_mmu_el3(0); +#endif /* AARCH32 */ } void bl1_plat_arch_setup(void) diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 51bd60c9..875e9196 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -116,7 +116,7 @@ BL1_SOURCES += drivers/arm/sp805/sp805.c \ drivers/io/io_storage.c \ plat/arm/common/arm_bl1_setup.c \ plat/arm/common/arm_io_storage.c \ - plat/common/aarch64/platform_up_stack.S + plat/common/${ARCH}/platform_up_stack.S ifdef EL3_PAYLOAD_BASE # Need the arm_program_trusted_mailbox() function to release secondary CPUs from # their holding pen -- cgit From d48c12e928c6e86d95c3c17e8fb56f0292afc623 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Thu, 30 Jun 2016 14:52:12 +0100 Subject: AArch32: Add generic changes in BL2 This patch adds generic changes in BL2 to support AArch32 state. New AArch32 specific assembly/C files are introduced and some files are moved to AArch32/64 specific folders. BL2 for AArch64 is refactored but functionally identical. BL2 executes in Secure SVC mode in AArch32 state. Change-Id: Ifaacbc2a91f8640876385b953adb24744d9dbde3 --- bl2/aarch32/bl2_arch_setup.c | 39 ++++++++++++ bl2/aarch32/bl2_entrypoint.S | 145 +++++++++++++++++++++++++++++++++++++++++++ bl2/bl2.mk | 13 ++-- bl2/bl2_main.c | 9 +++ 4 files changed, 201 insertions(+), 5 deletions(-) create mode 100644 bl2/aarch32/bl2_arch_setup.c create mode 100644 bl2/aarch32/bl2_entrypoint.S diff --git a/bl2/aarch32/bl2_arch_setup.c b/bl2/aarch32/bl2_arch_setup.c new file mode 100644 index 00000000..665c29c6 --- /dev/null +++ b/bl2/aarch32/bl2_arch_setup.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/******************************************************************************* + * Place holder function to perform any Secure SVC specific architectural + * setup. At the moment there is nothing to do. + ******************************************************************************/ +void bl2_arch_setup(void) +{ + +} diff --git a/bl2/aarch32/bl2_entrypoint.S b/bl2/aarch32/bl2_entrypoint.S new file mode 100644 index 00000000..6c620e22 --- /dev/null +++ b/bl2/aarch32/bl2_entrypoint.S @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + + .globl bl2_vector_table + .globl bl2_entrypoint + + +vector_base bl2_vector_table + b bl2_entrypoint + b report_exception /* Undef */ + b report_exception /* SVC call */ + b report_exception /* Prefetch abort */ + b report_exception /* Data abort */ + b report_exception /* Reserved */ + b report_exception /* IRQ */ + b report_exception /* FIQ */ + + +func bl2_entrypoint + /*--------------------------------------------- + * Save from r1 the extents of the trusted ram + * available to BL2 for future use. + * r0 is not currently used. + * --------------------------------------------- + */ + mov r11, r1 + + /* --------------------------------------------- + * Set the exception vector to something sane. + * --------------------------------------------- + */ + ldr r0, =bl2_vector_table + stcopr r0, VBAR + isb + + /* ----------------------------------------------------- + * Enable the instruction cache + * ----------------------------------------------------- + */ + ldcopr r0, SCTLR + orr r0, r0, #SCTLR_I_BIT + stcopr r0, SCTLR + isb + + /* --------------------------------------------- + * Since BL2 executes after BL1, it is assumed + * here that BL1 has already has done the + * necessary register initializations. + * --------------------------------------------- + */ + + /* --------------------------------------------- + * Invalidate the RW memory used by the BL2 + * image. This includes the data and NOBITS + * sections. This is done to safeguard against + * possible corruption of this memory by dirty + * cache lines in a system cache as a result of + * use by an earlier boot loader stage. + * --------------------------------------------- + */ + ldr r0, =__RW_START__ + ldr r1, =__RW_END__ + sub r1, r1, r0 + bl inv_dcache_range + + /* --------------------------------------------- + * Zero out NOBITS sections. There are 2 of them: + * - the .bss section; + * - the coherent memory section. + * --------------------------------------------- + */ + ldr r0, =__BSS_START__ + ldr r1, =__BSS_SIZE__ + bl zeromem + +#if USE_COHERENT_MEM + ldr r0, =__COHERENT_RAM_START__ + ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__ + bl zeromem +#endif + + /* -------------------------------------------- + * Allocate a stack whose memory will be marked + * as Normal-IS-WBWA when the MMU is enabled. + * There is no risk of reading stale stack + * memory after enabling the MMU as only the + * primary cpu is running at the moment. + * -------------------------------------------- + */ + bl plat_set_my_stack + + /* --------------------------------------------- + * Perform early platform setup & platform + * specific early arch. setup e.g. mmu setup + * --------------------------------------------- + */ + mov r0, r11 + bl bl2_early_platform_setup + bl bl2_plat_arch_setup + + /* --------------------------------------------- + * Jump to main function. + * --------------------------------------------- + */ + bl bl2_main + + /* --------------------------------------------- + * Should never reach this point. + * --------------------------------------------- + */ + bl plat_panic_handler + +endfunc bl2_entrypoint diff --git a/bl2/bl2.mk b/bl2/bl2.mk index 6cb478d1..f823ef46 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -29,10 +29,13 @@ # BL2_SOURCES += bl2/bl2_main.c \ - bl2/aarch64/bl2_entrypoint.S \ - bl2/aarch64/bl2_arch_setup.c \ - common/aarch64/early_exceptions.S \ - lib/locks/exclusive/aarch64/spinlock.S + bl2/${ARCH}/bl2_entrypoint.S \ + bl2/${ARCH}/bl2_arch_setup.c \ + lib/locks/exclusive/${ARCH}/spinlock.S + +ifeq (${ARCH},aarch64) +BL2_SOURCES += common/aarch64/early_exceptions.S +endif ifeq (${LOAD_IMAGE_V2},1) BL2_SOURCES += bl2/bl2_image_load_v2.c diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c index fc93e1b1..514c0053 100644 --- a/bl2/bl2_main.c +++ b/bl2/bl2_main.c @@ -60,6 +60,15 @@ void bl2_main(void) /* Load the subsequent bootloader images. */ next_bl_ep_info = bl2_load_images(); +#ifdef AARCH32 + /* + * For AArch32 state BL1 and BL2 share the MMU setup. + * Given that BL2 does not map BL1 regions, MMU needs + * to be disabled in order to go back to BL1. + */ + disable_mmu_icache_secure(); +#endif /* AARCH32 */ + /* * Run next BL image via an SMC to BL1. Information on how to pass * control to the BL32 (if present) and BL33 software images will -- cgit From 6fe8aa2fa638a7f8c54e6fc084bf2ed6103c2854 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Mon, 4 Jul 2016 11:26:14 +0100 Subject: AArch32: Add ARM platform changes in BL2 This patch adds ARM platform changes in BL2 for AArch32 state. It instantiates a descriptor array for ARM platforms describing image and entrypoint information for `SCP_BL2`, `BL32` and `BL33`. It also enables building of BL2 for ARCH=aarch32. Change-Id: I60dc7a284311eceba401fc789311c50ac746c51e --- Makefile | 4 +- plat/arm/board/fvp/platform.mk | 2 +- plat/arm/common/aarch32/arm_bl2_mem_params_desc.c | 106 ++++++++++++++++++++++ plat/arm/common/arm_bl2_setup.c | 7 ++ plat/arm/common/arm_common.mk | 2 +- 5 files changed, 117 insertions(+), 4 deletions(-) create mode 100644 plat/arm/common/aarch32/arm_bl2_mem_params_desc.c diff --git a/Makefile b/Makefile index aaf84db2..2b630b32 100644 --- a/Makefile +++ b/Makefile @@ -522,13 +522,13 @@ NEED_BL1 := yes include bl1/bl1.mk endif -# For AArch32, BL31 is not applicable, and BL2 & BL2U are not supported at present. -ifneq (${ARCH},aarch32) ifdef BL2_SOURCES NEED_BL2 := yes include bl2/bl2.mk endif +# For AArch32, BL31 is not applicable, and BL2U is not supported at present. +ifneq (${ARCH},aarch32) ifdef BL2U_SOURCES NEED_BL2U := yes include bl2u/bl2u.mk diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index 1dad9895..ca348d1a 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -126,7 +126,7 @@ BL1_SOURCES += drivers/io/io_semihosting.c \ BL2_SOURCES += drivers/io/io_semihosting.c \ drivers/delay_timer/delay_timer.c \ lib/semihosting/semihosting.c \ - lib/semihosting/aarch64/semihosting_call.S \ + lib/semihosting/${ARCH}/semihosting_call.S \ plat/arm/board/fvp/fvp_bl2_setup.c \ plat/arm/board/fvp/fvp_err.c \ plat/arm/board/fvp/fvp_io_storage.c \ diff --git a/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c new file mode 100644 index 00000000..9c92e5ea --- /dev/null +++ b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { +#ifdef SCP_BL2_BASE + /* Fill SCP_BL2 related information if it exists */ + { + .image_id = SCP_BL2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), + .image_info.image_base = SCP_BL2_BASE, + .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +#endif /* SCP_BL2_BASE */ + + /* Fill BL32 related information */ + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL32_BASE, + .ep_info.spsr = SPSR_MODE32(MODE32_mon, SPSR_T_ARM, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_LIMIT - BL32_BASE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, + + /* Fill BL33 related information */ + { + .image_id = BL33_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), +#ifdef PRELOADED_BL33_BASE + .ep_info.pc = PRELOADED_BL33_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), +#else + .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET, + .image_info.image_max_size = ARM_DRAM1_SIZE, +#endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index a2ae11a2..a4fac0da 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -246,7 +246,12 @@ void arm_bl2_plat_arch_setup(void) BL2_COHERENT_RAM_LIMIT #endif ); + +#ifdef AARCH32 + enable_mmu_secure(0); +#else enable_mmu_el1(0); +#endif } void bl2_plat_arch_setup(void) @@ -266,9 +271,11 @@ int bl2_plat_handle_post_image_load(unsigned int image_id) assert(bl_mem_params); switch (image_id) { +#ifdef AARCH64 case BL32_IMAGE_ID: bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry(); break; +#endif case BL33_IMAGE_ID: /* BL33 expects to receive the primary CPU MPID (through r0) */ diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 875e9196..98d72192 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -128,7 +128,7 @@ BL2_SOURCES += drivers/io/io_fip.c \ drivers/io/io_storage.c \ plat/arm/common/arm_bl2_setup.c \ plat/arm/common/arm_io_storage.c \ - plat/common/aarch64/platform_up_stack.S + plat/common/${ARCH}/platform_up_stack.S ifeq (${LOAD_IMAGE_V2},1) BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\ plat/arm/common/arm_image_load.c \ -- cgit From 3bdf0e5df25cf730fbbde7df3dd857d7f2803d1a Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Thu, 30 Jun 2016 15:02:31 +0100 Subject: AArch32: Refactor SP_MIN to support RESET_TO_SP_MIN This patch uses the `el3_entrypoint_common` macro to initialize CPU registers, in SP_MIN entrypoint.s file, in both cold and warm boot path. It also adds conditional compilation, in cold and warm boot entry path, based on RESET_TO_SP_MIN. Change-Id: Id493ca840dc7b9e26948dc78ee928e9fdb76b9e4 --- bl32/sp_min/aarch32/entrypoint.S | 292 ++++++++++++++++----------------------- bl32/sp_min/sp_min.ld.S | 2 + 2 files changed, 123 insertions(+), 171 deletions(-) diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S index 33d35b9b..54f2cedb 100644 --- a/bl32/sp_min/aarch32/entrypoint.S +++ b/bl32/sp_min/aarch32/entrypoint.S @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -41,7 +42,8 @@ .globl sp_min_entrypoint .globl sp_min_warm_entrypoint -func sp_min_vector_table + +vector_base sp_min_vector_table b sp_min_entrypoint b plat_panic_handler /* Undef */ b handle_smc /* Syscall */ @@ -50,185 +52,70 @@ func sp_min_vector_table b plat_panic_handler /* Reserved */ b plat_panic_handler /* IRQ */ b plat_panic_handler /* FIQ */ -endfunc sp_min_vector_table - -func handle_smc - smcc_save_gp_mode_regs - - /* r0 points to smc_context */ - mov r2, r0 /* handle */ - ldcopr r0, SCR - - /* Save SCR in stack */ - push {r0} - and r3, r0, #SCR_NS_BIT /* flags */ - - /* Switch to Secure Mode*/ - bic r0, #SCR_NS_BIT - stcopr r0, SCR - isb - ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ - /* Check whether an SMC64 is issued */ - tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) - beq 1f /* SMC32 is detected */ - mov r0, #SMC_UNK - str r0, [r2, #SMC_CTX_GPREG_R0] - mov r0, r2 - b 2f /* Skip handling the SMC */ -1: - mov r1, #0 /* cookie */ - bl handle_runtime_svc -2: - /* r0 points to smc context */ - /* Restore SCR from stack */ - pop {r1} - stcopr r1, SCR - isb - - b sp_min_exit -endfunc handle_smc /* * The Cold boot/Reset entrypoint for SP_MIN */ func sp_min_entrypoint - - /* - * The caches and TLBs are disabled at reset. If any implementation - * allows the caches/TLB to be hit while they are disabled, ensure - * that they are invalidated here +#if !RESET_TO_SP_MIN + /* --------------------------------------------------------------- + * Preceding bootloader has populated r0 with a pointer to a + * 'bl_params_t' structure & r1 with a pointer to platform + * specific structure + * --------------------------------------------------------------- */ - - /* Make sure we are in Secure Mode*/ - ldcopr r0, SCR - bic r0, #SCR_NS_BIT - stcopr r0, SCR - isb - - /* Switch to monitor mode */ - cps #MODE32_mon - isb - - /* - * Set sane values for NS SCTLR as well. - * Switch to non secure mode for this. - */ - ldr r0, =(SCTLR_RES1) - ldcopr r1, SCR - orr r2, r1, #SCR_NS_BIT - stcopr r2, SCR - isb - - ldcopr r2, SCTLR - orr r0, r0, r2 - stcopr r0, SCTLR - isb - - stcopr r1, SCR - isb - - /* - * Set the CPU endianness before doing anything that might involve - * memory reads or writes. - */ - ldcopr r0, SCTLR - bic r0, r0, #SCTLR_EE_BIT - stcopr r0, SCTLR - isb - - /* Run the CPU Specific Reset handler */ - bl reset_handler - - /* - * Enable the instruction cache and data access - * alignment checks - */ - ldcopr r0, SCTLR - ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT) - orr r0, r0, r1 - stcopr r0, SCTLR - isb - - /* Set the vector tables */ - ldr r0, =sp_min_vector_table - stcopr r0, VBAR - stcopr r0, MVBAR - isb - - /* - * Enable the SIF bit to disable instruction fetches - * from Non-secure memory. + mov r11, r0 + mov r12, r1 + + /* --------------------------------------------------------------------- + * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches + * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot + * and primary/secondary CPU logic should not be executed in this case. + * + * Also, assume that the previous bootloader has already set up the CPU + * endianness and has initialised the memory. + * --------------------------------------------------------------------- */ - ldcopr r0, SCR - orr r0, r0, #SCR_SIF_BIT - stcopr r0, SCR - - /* - * Enable the SError interrupt now that the exception vectors have been - * setup. + el3_entrypoint_common \ + _set_endian=0 \ + _warm_boot_mailbox=0 \ + _secondary_cold_boot=0 \ + _init_memory=0 \ + _init_c_runtime=1 \ + _exception_vectors=sp_min_vector_table + + /* --------------------------------------------------------------------- + * Relay the previous bootloader's arguments to the platform layer + * --------------------------------------------------------------------- */ - cpsie a - isb - - /* Enable access to Advanced SIMD registers */ - ldcopr r0, NSACR - bic r0, r0, #NSASEDIS_BIT - orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT) - stcopr r0, NSACR - isb - - /* - * Enable access to Advanced SIMD, Floating point and to the Trace - * functionality as well. + mov r0, r11 + mov r1, r12 +#else + /* --------------------------------------------------------------------- + * For RESET_TO_SP_MIN systems which have a programmable reset address, + * sp_min_entrypoint() is executed only on the cold boot path so we can + * skip the warm boot mailbox mechanism. + * --------------------------------------------------------------------- */ - ldcopr r0, CPACR - bic r0, r0, #ASEDIS_BIT - bic r0, r0, #TRCDIS_BIT - orr r0, r0, #CPACR_ENABLE_FP_ACCESS - stcopr r0, CPACR - isb - - vmrs r0, FPEXC - orr r0, r0, #FPEXC_EN_BIT - vmsr FPEXC, r0 - - /* Detect whether Warm or Cold boot */ - bl plat_get_my_entrypoint - cmp r0, #0 - /* If warm boot detected, jump to warm boot entry */ - bxne r0 - - /* Setup C runtime stack */ - bl plat_set_my_stack - - /* Perform platform specific memory initialization */ - bl platform_mem_init - - /* Initialize the C Runtime Environment */ - - /* - * Invalidate the RW memory used by SP_MIN image. This includes - * the data and NOBITS sections. This is done to safeguard against - * possible corruption of this memory by dirty cache lines in a system - * cache as a result of use by an earlier boot loader stage. + el3_entrypoint_common \ + _set_endian=1 \ + _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ + _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ + _init_memory=1 \ + _init_c_runtime=1 \ + _exception_vectors=sp_min_vector_table + + /* --------------------------------------------------------------------- + * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader + * to run so there's no argument to relay from a previous bootloader. + * Zero the arguments passed to the platform layer to reflect that. + * --------------------------------------------------------------------- */ - ldr r0, =__RW_START__ - ldr r1, =__RW_END__ - sub r1, r1, r0 - bl inv_dcache_range - - ldr r0, =__BSS_START__ - ldr r1, =__BSS_SIZE__ - bl zeromem + mov r0, #0 + mov r1, #0 +#endif /* RESET_TO_SP_MIN */ -#if USE_COHERENT_MEM - ldr r0, =__COHERENT_RAM_START__ - ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__ - bl zeromem -#endif - - /* Perform platform specific early arch. setup */ bl sp_min_early_platform_setup bl sp_min_plat_arch_setup @@ -270,13 +157,76 @@ func sp_min_entrypoint b sp_min_exit endfunc sp_min_entrypoint + +/* + * SMC handling function for SP_MIN. + */ +func handle_smc + smcc_save_gp_mode_regs + + /* r0 points to smc_context */ + mov r2, r0 /* handle */ + ldcopr r0, SCR + + /* Save SCR in stack */ + push {r0} + and r3, r0, #SCR_NS_BIT /* flags */ + + /* Switch to Secure Mode*/ + bic r0, #SCR_NS_BIT + stcopr r0, SCR + isb + ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ + /* Check whether an SMC64 is issued */ + tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) + beq 1f /* SMC32 is detected */ + mov r0, #SMC_UNK + str r0, [r2, #SMC_CTX_GPREG_R0] + mov r0, r2 + b 2f /* Skip handling the SMC */ +1: + mov r1, #0 /* cookie */ + bl handle_runtime_svc +2: + /* r0 points to smc context */ + + /* Restore SCR from stack */ + pop {r1} + stcopr r1, SCR + isb + + b sp_min_exit +endfunc handle_smc + + /* * The Warm boot entrypoint for SP_MIN. */ func sp_min_warm_entrypoint - - /* Setup C runtime stack */ - bl plat_set_my_stack + /* + * On the warm boot path, most of the EL3 initialisations performed by + * 'el3_entrypoint_common' must be skipped: + * + * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by + * programming the reset address do we need to set the CPU endianness. + * In other cases, we assume this has been taken care by the + * entrypoint code. + * + * - No need to determine the type of boot, we know it is a warm boot. + * + * - Do not try to distinguish between primary and secondary CPUs, this + * notion only exists for a cold boot. + * + * - No need to initialise the memory or the C runtime environment, + * it has been done once and for all on the cold boot path. + */ + el3_entrypoint_common \ + _set_endian=PROGRAMMABLE_RESET_ADDRESS \ + _warm_boot_mailbox=0 \ + _secondary_cold_boot=0 \ + _init_memory=0 \ + _init_c_runtime=0 \ + _exception_vectors=sp_min_vector_table /* -------------------------------------------- * Enable the MMU with the DCache disabled. It diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index b158db16..e0e23e8f 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -50,6 +50,7 @@ SECTIONS __TEXT_START__ = .; *entrypoint.o(.text*) *(.text*) + *(.vectors) . = NEXT(4096); __TEXT_END__ = .; } >RAM @@ -98,6 +99,7 @@ SECTIONS KEEP(*(cpu_ops)) __CPU_OPS_END__ = .; + *(.vectors) __RO_END_UNALIGNED__ = .; /* -- cgit From d9915518726613919a66b35df7f4cd2db42af3e4 Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Thu, 30 Jun 2016 14:50:58 +0100 Subject: AArch32: Support in SP_MIN to receive arguments from BL2 This patch adds support in SP_MIN to receive generic and platform specific arguments from BL2. The new signature is as following: void sp_min_early_platform_setup(void *from_bl2, void *plat_params_from_bl2); ARM platforms have been modified to use this support. Note: Platforms may break if using old signature. Default value for RESET_TO_SP_MIN is changed to 0. Change-Id: I008d4b09fd3803c7b6231587ebf02a047bdba8d0 --- bl32/sp_min/sp_min.mk | 2 +- include/bl32/sp_min/platform_sp_min.h | 3 +- include/plat/arm/common/plat_arm.h | 3 +- plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c | 5 ++- plat/arm/common/sp_min/arm_sp_min_setup.c | 60 +++++++++++++++++++++------- 5 files changed, 53 insertions(+), 20 deletions(-) diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk index a8b572e0..ac7f03e9 100644 --- a/bl32/sp_min/sp_min.mk +++ b/bl32/sp_min/sp_min.mk @@ -58,6 +58,6 @@ else include ${SP_MIN_PLAT_MAKEFILE} endif -RESET_TO_SP_MIN := 1 +RESET_TO_SP_MIN := 0 $(eval $(call add_define,RESET_TO_SP_MIN)) $(eval $(call assert_boolean,RESET_TO_SP_MIN)) diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h index ae9dd58a..c8c3fc5e 100644 --- a/include/bl32/sp_min/platform_sp_min.h +++ b/include/bl32/sp_min/platform_sp_min.h @@ -34,7 +34,8 @@ /******************************************************************************* * Mandatory SP_MIN functions ******************************************************************************/ -void sp_min_early_platform_setup(void); +void sp_min_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2); void sp_min_plat_arch_setup(void); void sp_min_platform_setup(void); entry_point_info_t *sp_min_plat_get_bl33_ep_info(void); diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index 581573b2..29fcffec 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -180,7 +180,8 @@ void arm_bl31_plat_arch_setup(void); void arm_tsp_early_platform_setup(void); /* SP_MIN utility functions */ -void arm_sp_min_early_platform_setup(void); +void arm_sp_min_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2); /* FIP TOC validity check */ int arm_io_is_toc_valid(void); diff --git a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c index d3bef82f..735c4f06 100644 --- a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c +++ b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c @@ -31,9 +31,10 @@ #include #include "../fvp_private.h" -void sp_min_early_platform_setup(void) +void sp_min_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) { - arm_sp_min_early_platform_setup(); + arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2); /* Initialize the platform config for future decision making */ fvp_config_setup(); diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c index 927f30f5..d48556ee 100644 --- a/plat/arm/common/sp_min/arm_sp_min_setup.c +++ b/plat/arm/common/sp_min/arm_sp_min_setup.c @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -58,10 +59,6 @@ static entry_point_info_t bl33_image_ep_info; #pragma weak sp_min_platform_setup #pragma weak sp_min_plat_arch_setup -#ifndef RESET_TO_SP_MIN -#error (" RESET_TO_SP_MIN flag is expected to be set.") -#endif - /******************************************************************************* * Return a pointer to the 'entry_point_info' structure of the next image for the @@ -86,15 +83,20 @@ entry_point_info_t *sp_min_plat_get_bl33_ep_info(void) } /******************************************************************************* - * Perform early platform setup. We expect SP_MIN is the first boot loader - * image and RESET_TO_SP_MIN build option to be set. + * Perform early platform setup. ******************************************************************************/ -void arm_sp_min_early_platform_setup(void) +void arm_sp_min_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) { /* Initialize the console to provide early debug support */ console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ, ARM_CONSOLE_BAUDRATE); +#if RESET_TO_SP_MIN + /* There are no parameters from BL2 if SP_MIN is a reset vector */ + assert(from_bl2 == NULL); + assert(plat_params_from_bl2 == NULL); + /* Populate entry point information for BL33 */ SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, @@ -104,18 +106,46 @@ void arm_sp_min_early_platform_setup(void) * Tell SP_MIN where the non-trusted software image * is located and the entry state information */ -#ifdef PRELOADED_BL33_BASE - bl33_image_ep_info.pc = PRELOADED_BL33_BASE; -#else bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); -#endif bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#else /* RESET_TO_SP_MIN */ + + /* + * Check params passed from BL2 should not be NULL, + */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + assert(params_from_bl2->h.type == PARAM_BL_PARAMS); + assert(params_from_bl2->h.version >= VERSION_2); + + bl_params_node_t *bl_params = params_from_bl2->head; + + /* + * Copy BL33 entry point information. + * They are stored in Secure RAM, in BL2's address space. + */ + while (bl_params) { + if (bl_params->image_id == BL33_IMAGE_ID) { + bl33_image_ep_info = *bl_params->ep_info; + break; + } + + bl_params = bl_params->next_params_info; + } + + if (bl33_image_ep_info.pc == 0) + panic(); + +#endif /* RESET_TO_SP_MIN */ + } -void sp_min_early_platform_setup(void) +void sp_min_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2) { - arm_sp_min_early_platform_setup(); + arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2); /* * Initialize Interconnect for this cluster during cold boot. @@ -146,10 +176,10 @@ void sp_min_platform_setup(void) /* * Do initial security configuration to allow DRAM/device access * (if earlier BL has not already done so). - * TODO: If RESET_TO_SP_MIN is not set, the security setup needs - * to be skipped. */ +#if RESET_TO_SP_MIN plat_arm_security_setup(); +#endif /* Enable and initialize the System level generic timer */ mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, -- cgit From 03a3042b0430d962641694a49c41d3b166a64d0d Mon Sep 17 00:00:00 2001 From: Yatharth Kochar Date: Tue, 12 Jul 2016 15:47:03 +0100 Subject: AArch32: Add support for ARM Cortex-A32 MPCore Processor This patch adds ARM Cortex-A32 MPCore Processor support in the CPU specific operations framework. It also includes this support for the Base FVP port. Change-Id: If3697b88678df737c29f79cf3fa1ea2cb6fa565d --- include/lib/cpus/aarch32/cortex_a32.h | 44 +++++++++++ lib/cpus/aarch32/cortex_a32.S | 142 ++++++++++++++++++++++++++++++++++ plat/arm/board/fvp/platform.mk | 2 + 3 files changed, 188 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a32.h create mode 100644 lib/cpus/aarch32/cortex_a32.S diff --git a/include/lib/cpus/aarch32/cortex_a32.h b/include/lib/cpus/aarch32/cortex_a32.h new file mode 100644 index 00000000..458b41f6 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a32.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A32_H__ +#define __CORTEX_A32_H__ + +/* Cortex-A32 Main ID register for revision 0 */ +#define CORTEX_A32_MIDR 0x410FD010 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + * CPUECTLR_EL1 is an implementation-specific register. + ******************************************************************************/ +#define CORTEX_A32_CPUECTLR_EL1 p15, 1, c15 +#define CORTEX_A32_CPUECTLR_SMPEN_BIT (1 << 6) + +#endif /* __CORTEX_A32_H__ */ diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S new file mode 100644 index 00000000..b51f997a --- /dev/null +++ b/lib/cpus/aarch32/cortex_a32.S @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + + /* --------------------------------------------- + * Disable intra-cluster coherency + * Clobbers: r0-r1 + * --------------------------------------------- + */ +func cortex_a32_disable_smp + ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1 + bic r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT + stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1 + isb + dsb sy + bx lr +endfunc cortex_a32_disable_smp + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A32. + * Clobbers: r0-r1 + * ------------------------------------------------- + */ +func cortex_a32_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1 + orr r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT + stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1 + isb + bx lr +endfunc cortex_a32_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A32. + * Clobbers: r0-r3 + * ---------------------------------------------------- + */ +func cortex_a32_core_pwr_dwn + push {lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {lr} + b cortex_a32_disable_smp +endfunc cortex_a32_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A32. + * Clobbers: r0-r3 + * ------------------------------------------------------- + */ +func cortex_a32_cluster_pwr_dwn + push {lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 cache. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* --------------------------------------------- + * Flush L2 cache. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {lr} + b cortex_a32_disable_smp +endfunc cortex_a32_cluster_pwr_dwn + +declare_cpu_ops cortex_a32, CORTEX_A32_MIDR diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index ca348d1a..9b827a6b 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -109,6 +109,8 @@ FVP_CPU_LIBS += lib/cpus/aarch64/cortex_a35.S \ lib/cpus/aarch64/cortex_a57.S \ lib/cpus/aarch64/cortex_a72.S \ lib/cpus/aarch64/cortex_a73.S +else +FVP_CPU_LIBS += lib/cpus/aarch32/cortex_a32.S endif BL1_SOURCES += drivers/io/io_semihosting.c \ -- cgit