summaryrefslogtreecommitdiff
path: root/include/common/asm_macros.S
diff options
context:
space:
mode:
authordanh-arm <dan.handley@arm.com>2014-07-28 14:28:40 +0100
committerdanh-arm <dan.handley@arm.com>2014-07-28 14:28:40 +0100
commit6397bf6a99d785caa9b50016cd6c8eb76083c117 (patch)
tree5e9ffd003cb8046b7eba285907bdedf4bd8c20ba /include/common/asm_macros.S
parent9fd412770f1a7d9c68731a21f157a326db3c5725 (diff)
parent8c106902368c40e14c558a0ab91cc57defdc7e81 (diff)
Merge pull request #172 from soby-mathew/sm/asm_assert
Introduce asm assert and optimize crash reporting
Diffstat (limited to 'include/common/asm_macros.S')
-rw-r--r--include/common/asm_macros.S33
1 files changed, 33 insertions, 0 deletions
diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S
index 2bccf581..238fa82a 100644
--- a/include/common/asm_macros.S
+++ b/include/common/asm_macros.S
@@ -162,3 +162,36 @@ wait_for_entrypoint:
.macro get_up_stack _name, _size
ldr x0, =(\_name + \_size)
.endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm