summaryrefslogtreecommitdiff
path: root/core/arch/arm/kernel
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:42:10 +0200
committerr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:49:50 +0200
commitf9a43781767007462965b21f3f518c4cfc0744c7 (patch)
tree201509439b1d9798256227794dae6774345adf43 /core/arch/arm/kernel
parent1fed20f5471aa0dad5e4b4f79d1f2843ac88734f (diff)
downloadtef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.gz
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.bz2
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.zip
Initial commit with upstream sources
Change-Id: Ie9460111f21fc955102fd8732a0173b2d0499a4a
Diffstat (limited to 'core/arch/arm/kernel')
-rw-r--r--core/arch/arm/kernel/abort.c582
-rw-r--r--core/arch/arm/kernel/asm-defines.c107
-rw-r--r--core/arch/arm/kernel/cache_helpers_a64.S207
-rw-r--r--core/arch/arm/kernel/elf32.h245
-rw-r--r--core/arch/arm/kernel/elf64.h248
-rw-r--r--core/arch/arm/kernel/elf_common.h1006
-rw-r--r--core/arch/arm/kernel/elf_load.c646
-rw-r--r--core/arch/arm/kernel/elf_load.h44
-rw-r--r--core/arch/arm/kernel/generic_boot.c710
-rw-r--r--core/arch/arm/kernel/generic_entry_a32.S503
-rw-r--r--core/arch/arm/kernel/generic_entry_a64.S315
-rw-r--r--core/arch/arm/kernel/kern.ld.S340
-rw-r--r--core/arch/arm/kernel/link.mk241
-rw-r--r--core/arch/arm/kernel/misc_a32.S90
-rw-r--r--core/arch/arm/kernel/misc_a64.S41
-rw-r--r--core/arch/arm/kernel/mutex.c279
-rw-r--r--core/arch/arm/kernel/pm_stubs.c41
-rw-r--r--core/arch/arm/kernel/proc_a32.S96
-rw-r--r--core/arch/arm/kernel/proc_a64.S71
-rw-r--r--core/arch/arm/kernel/pseudo_ta.c256
-rw-r--r--core/arch/arm/kernel/spin_lock_a32.S85
-rw-r--r--core/arch/arm/kernel/spin_lock_a64.S89
-rw-r--r--core/arch/arm/kernel/spin_lock_debug.c63
-rw-r--r--core/arch/arm/kernel/ssvce_a32.S334
-rw-r--r--core/arch/arm/kernel/ssvce_a64.S115
-rw-r--r--core/arch/arm/kernel/sub.mk45
-rw-r--r--core/arch/arm/kernel/tee_l2cc_mutex.c160
-rw-r--r--core/arch/arm/kernel/tee_time.c83
-rw-r--r--core/arch/arm/kernel/tee_time_arm_cntpct.c100
-rw-r--r--core/arch/arm/kernel/tee_time_ree.c62
-rw-r--r--core/arch/arm/kernel/thread.c1365
-rw-r--r--core/arch/arm/kernel/thread_a32.S645
-rw-r--r--core/arch/arm/kernel/thread_a64.S816
-rw-r--r--core/arch/arm/kernel/thread_private.h251
-rw-r--r--core/arch/arm/kernel/trace_ext.c50
-rw-r--r--core/arch/arm/kernel/tz_ssvce_pl310_a32.S258
-rw-r--r--core/arch/arm/kernel/unwind_arm32.c417
-rw-r--r--core/arch/arm/kernel/unwind_arm64.c84
-rw-r--r--core/arch/arm/kernel/user_ta.c826
-rw-r--r--core/arch/arm/kernel/vfp.c149
-rw-r--r--core/arch/arm/kernel/vfp_a32.S81
-rw-r--r--core/arch/arm/kernel/vfp_a64.S72
-rw-r--r--core/arch/arm/kernel/vfp_private.h53
-rw-r--r--core/arch/arm/kernel/wait_queue.c225
44 files changed, 12496 insertions, 0 deletions
diff --git a/core/arch/arm/kernel/abort.c b/core/arch/arm/kernel/abort.c
new file mode 100644
index 0000000..3d29521
--- /dev/null
+++ b/core/arch/arm/kernel/abort.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/abort.h>
+#include <kernel/misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/panic.h>
+#include <kernel/user_ta.h>
+#include <kernel/unwind.h>
+#include <mm/core_mmu.h>
+#include <mm/tee_pager.h>
+#include <tee/tee_svc.h>
+#include <trace.h>
+#include <arm.h>
+
+enum fault_type {
+ FAULT_TYPE_USER_TA_PANIC,
+ FAULT_TYPE_USER_TA_VFP,
+ FAULT_TYPE_PAGEABLE,
+ FAULT_TYPE_IGNORE,
+};
+
+#ifdef CFG_CORE_UNWIND
+#ifdef ARM32
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.registers[0] = ai->regs->r0;
+ state.registers[1] = ai->regs->r1;
+ state.registers[2] = ai->regs->r2;
+ state.registers[3] = ai->regs->r3;
+ state.registers[4] = ai->regs->r4;
+ state.registers[5] = ai->regs->r5;
+ state.registers[6] = ai->regs->r6;
+ state.registers[7] = ai->regs->r7;
+ state.registers[8] = ai->regs->r8;
+ state.registers[9] = ai->regs->r9;
+ state.registers[10] = ai->regs->r10;
+ state.registers[11] = ai->regs->r11;
+ state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[15] = ai->pc;
+
+ do {
+ EMSG_RAW(" pc 0x%08x", state.registers[15]);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = ai->regs->elr;
+ state.fp = ai->regs->x29;
+
+ do {
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM64*/
+
+static void print_stack_unwind(struct abort_info *ai)
+{
+ EMSG_RAW("Call stack:");
+ __print_stack_unwind(ai);
+}
+#else /*CFG_CORE_UNWIND*/
+static void print_stack_unwind(struct abort_info *ai __unused)
+{
+}
+#endif /*CFG_CORE_UNWIND*/
+
+static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
+{
+ if (abort_type == ABORT_TYPE_DATA)
+ return "data";
+ if (abort_type == ABORT_TYPE_PREFETCH)
+ return "prefetch";
+ return "undef";
+}
+
+static __maybe_unused const char *fault_to_str(uint32_t abort_type,
+ uint32_t fault_descr)
+{
+ /* fault_descr is only valid for data or prefetch abort */
+ if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
+ return "";
+
+ switch (core_mmu_get_fault_type(fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ return " (alignment fault)";
+ case CORE_MMU_FAULT_TRANSLATION:
+ return " (translation fault)";
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return " (read permission fault)";
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ return " (write permission fault)";
+ default:
+ return "";
+ }
+}
+
+static __maybe_unused void print_detailed_abort(
+ struct abort_info *ai __maybe_unused,
+ const char *ctx __maybe_unused)
+{
+ EMSG_RAW("\n");
+ EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s\n",
+ ctx, abort_type_to_str(ai->abort_type), ai->va,
+ fault_to_str(ai->abort_type, ai->fault_descr));
+#ifdef ARM32
+ EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0(), read_ttbr1(),
+ read_contextidr());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), ai->regs->spsr);
+ EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n",
+ ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
+ EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n",
+ ai->regs->r1, ai->regs->r5, ai->regs->r9,
+ read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n",
+ ai->regs->r2, ai->regs->r6, ai->regs->r10,
+ read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n",
+ ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 " cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
+ read_contextidr_el1());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), (uint32_t)ai->regs->spsr);
+ EMSG_RAW("x0 %016" PRIx64 " x1 %016" PRIx64,
+ ai->regs->x0, ai->regs->x1);
+ EMSG_RAW("x2 %016" PRIx64 " x3 %016" PRIx64,
+ ai->regs->x2, ai->regs->x3);
+ EMSG_RAW("x4 %016" PRIx64 " x5 %016" PRIx64,
+ ai->regs->x4, ai->regs->x5);
+ EMSG_RAW("x6 %016" PRIx64 " x7 %016" PRIx64,
+ ai->regs->x6, ai->regs->x7);
+ EMSG_RAW("x8 %016" PRIx64 " x9 %016" PRIx64,
+ ai->regs->x8, ai->regs->x9);
+ EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
+ ai->regs->x10, ai->regs->x11);
+ EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
+ ai->regs->x12, ai->regs->x13);
+ EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
+ ai->regs->x14, ai->regs->x15);
+ EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
+ ai->regs->x16, ai->regs->x17);
+ EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
+ ai->regs->x18, ai->regs->x19);
+ EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
+ ai->regs->x20, ai->regs->x21);
+ EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
+ ai->regs->x22, ai->regs->x23);
+ EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
+ ai->regs->x24, ai->regs->x25);
+ EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
+ ai->regs->x26, ai->regs->x27);
+ EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
+ ai->regs->x28, ai->regs->x29);
+ EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
+ ai->regs->x30, ai->regs->elr);
+ EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
+#endif /*ARM64*/
+}
+
+static void print_user_abort(struct abort_info *ai __maybe_unused)
+{
+#ifdef CFG_TEE_CORE_TA_TRACE
+ print_detailed_abort(ai, "user TA");
+ tee_ta_dump_current();
+#endif
+}
+
+void abort_print(struct abort_info *ai __maybe_unused)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ print_detailed_abort(ai, "core");
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+}
+
+void abort_print_error(struct abort_info *ai)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ /* full verbose log at DEBUG level */
+ print_detailed_abort(ai, "core");
+#else
+#ifdef ARM32
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
+ "CPUID 0x%x CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
+ read_mpidr(), read_spsr());
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
+ "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
+ read_contextidr_el1(),
+ read_mpidr_el1(), (uint32_t)ai->regs->spsr);
+#endif /*ARM64*/
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+ print_stack_unwind(ai);
+}
+
+#ifdef ARM32
+static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
+ struct abort_info *ai)
+{
+ switch (abort_type) {
+ case ABORT_TYPE_DATA:
+ ai->fault_descr = read_dfsr();
+ ai->va = read_dfar();
+ break;
+ case ABORT_TYPE_PREFETCH:
+ ai->fault_descr = read_ifsr();
+ ai->va = read_ifar();
+ break;
+ default:
+ ai->fault_descr = 0;
+ ai->va = regs->elr;
+ break;
+ }
+ ai->abort_type = abort_type;
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_abort_info(uint32_t abort_type __unused,
+ struct thread_abort_regs *regs, struct abort_info *ai)
+{
+ ai->fault_descr = read_esr_el1();
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_IABT_EL0:
+ case ESR_EC_IABT_EL1:
+ ai->abort_type = ABORT_TYPE_PREFETCH;
+ ai->va = read_far_el1();
+ break;
+ case ESR_EC_DABT_EL0:
+ case ESR_EC_DABT_EL1:
+ case ESR_EC_SP_ALIGN:
+ ai->abort_type = ABORT_TYPE_DATA;
+ ai->va = read_far_el1();
+ break;
+ default:
+ ai->abort_type = ABORT_TYPE_UNDEF;
+ ai->va = regs->elr;
+ }
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->r1 = true;
+ ai->regs->r2 = 0xdeadbeef;
+ ai->regs->elr = (uint32_t)thread_unwind_user_mode;
+ ai->regs->spsr = read_cpsr();
+ ai->regs->spsr &= ~CPSR_MODE_MASK;
+ ai->regs->spsr |= CPSR_MODE_SVC;
+ ai->regs->spsr &= ~CPSR_FIA;
+ ai->regs->spsr |= read_spsr() & CPSR_FIA;
+ /* Select Thumb or ARM mode */
+ if (ai->regs->elr & 1)
+ ai->regs->spsr |= CPSR_T;
+ else
+ ai->regs->spsr &= ~CPSR_T;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ uint32_t daif;
+
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->x1 = true;
+ ai->regs->x2 = 0xdeadbeef;
+ ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
+ ai->regs->sp_el0 = thread_get_saved_thread_sp();
+
+ daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
+ /* XXX what about DAIF_D? */
+ ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
+}
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+static void handle_user_ta_vfp(void)
+{
+ struct tee_ta_session *s;
+
+ if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
+ panic();
+
+ thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef CFG_WITH_USER_TA
+#ifdef ARM32
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ uint32_t spsr = ai->regs->spsr;
+
+ if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_USER_TA*/
+bool abort_is_user_exception(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_USER_TA*/
+
+#ifdef ARM32
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*ARM64*/
+
+
+#if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
+#ifdef ARM32
+
+#define T32_INSTR(w1, w0) \
+ ((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
+
+#define T32_VTRANS32_MASK T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
+#define T32_VTRANS32_VAL T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
+
+#define T32_VTRANS64_MASK T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
+#define T32_VTRANS64_VAL T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
+
+#define T32_VLDST_MASK T32_INSTR((0xff << 8) | (1 << 4), 0)
+#define T32_VLDST_VAL T32_INSTR( 0xf9 << 8 , 0)
+
+#define T32_VXLDST_MASK T32_INSTR(0xfc << 8, 7 << 9)
+#define T32_VXLDST_VAL T32_INSTR(0xec << 8, 5 << 9)
+
+#define T32_VPROC_MASK T32_INSTR(0xef << 8, 0)
+#define T32_VPROC_VAL T32_VPROC_MASK
+
+#define A32_INSTR(x) ((uint32_t)(x))
+
+#define A32_VTRANS32_MASK A32_INSTR(SHIFT_U32(0xf, 24) | \
+ SHIFT_U32(7, 9) | BIT32(4))
+#define A32_VTRANS32_VAL A32_INSTR(SHIFT_U32(0xe, 24) | \
+ SHIFT_U32(5, 9) | BIT32(4))
+
+#define A32_VTRANS64_MASK A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
+#define A32_VTRANS64_VAL A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
+
+#define A32_VLDST_MASK A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
+#define A32_VLDST_VAL A32_INSTR(SHIFT_U32(0xf4, 24))
+#define A32_VXLDST_MASK A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
+#define A32_VXLDST_VAL A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
+
+#define A32_VPROC_MASK A32_INSTR(SHIFT_U32(0x7f, 25))
+#define A32_VPROC_VAL A32_INSTR(SHIFT_U32(0x79, 25))
+
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ TEE_Result res;
+ uint32_t instr;
+
+ if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
+ return false;
+
+ res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
+ if (res != TEE_SUCCESS)
+ return false;
+
+ if (ai->regs->spsr & CPSR_T) {
+ /* Thumb mode */
+ return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
+ ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
+ ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
+ ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
+ ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
+ } else {
+ /* ARM mode */
+ return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
+ ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
+ ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
+ ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
+ ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
+ }
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_FP_ASIMD:
+ case ESR_EC_AARCH32_FP:
+ case ESR_EC_AARCH64_FP:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+static bool is_vfp_fault(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+
+static enum fault_type get_fault_type(struct abort_info *ai)
+{
+ if (abort_is_user_exception(ai)) {
+ if (is_vfp_fault(ai))
+ return FAULT_TYPE_USER_TA_VFP;
+#ifndef CFG_WITH_PAGER
+ return FAULT_TYPE_USER_TA_PANIC;
+#endif
+ }
+
+ if (is_abort_in_abort_handler(ai)) {
+ abort_print_error(ai);
+ panic("[abort] abort in abort handler (trap CPU)");
+ }
+
+ if (ai->abort_type == ABORT_TYPE_UNDEF) {
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] undefined abort (trap CPU)");
+ }
+
+ switch (core_mmu_get_fault_type(ai->fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] alignement fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_ACCESS_BIT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] access bit fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_DEBUG_EVENT:
+ abort_print(ai);
+ DMSG("[abort] Ignoring debug event!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return FAULT_TYPE_PAGEABLE;
+
+ case CORE_MMU_FAULT_ASYNC_EXTERNAL:
+ abort_print(ai);
+ DMSG("[abort] Ignoring async external abort!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_OTHER:
+ default:
+ abort_print(ai);
+ DMSG("[abort] Unhandled fault!");
+ return FAULT_TYPE_IGNORE;
+ }
+}
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
+{
+ struct abort_info ai;
+ bool handled;
+
+ set_abort_info(abort_type, regs, &ai);
+
+ switch (get_fault_type(&ai)) {
+ case FAULT_TYPE_IGNORE:
+ break;
+ case FAULT_TYPE_USER_TA_PANIC:
+ DMSG("[abort] abort in User mode (TA will panic)");
+ print_user_abort(&ai);
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ break;
+#ifdef CFG_WITH_VFP
+ case FAULT_TYPE_USER_TA_VFP:
+ handle_user_ta_vfp();
+ break;
+#endif
+ case FAULT_TYPE_PAGEABLE:
+ default:
+ thread_kernel_save_vfp();
+ handled = tee_pager_handle_fault(&ai);
+ thread_kernel_restore_vfp();
+ if (!handled) {
+ if (!abort_is_user_exception(&ai)) {
+ abort_print_error(&ai);
+ panic("unhandled pageable abort");
+ }
+ print_user_abort(&ai);
+ DMSG("[abort] abort in User mode (TA will panic)");
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ }
+ break;
+ }
+}
diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c
new file mode 100644
index 0000000..99c0a63
--- /dev/null
+++ b/core/arch/arm/kernel/asm-defines.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/thread.h>
+#include <sm/sm.h>
+#include <types_ext.h>
+#include "thread_private.h"
+
+#define DEFINES void __defines(void); void __defines(void)
+
+#define DEFINE(def, val) \
+ asm volatile("\n==>" #def " %0 " #val : : "i" (val))
+
+DEFINES
+{
+#ifdef ARM32
+ DEFINE(SM_NSEC_CTX_R0, offsetof(struct sm_nsec_ctx, r0));
+ DEFINE(SM_NSEC_CTX_R8, offsetof(struct sm_nsec_ctx, r8));
+ DEFINE(SM_SEC_CTX_R0, offsetof(struct sm_sec_ctx, r0));
+ DEFINE(SM_SEC_CTX_MON_LR, offsetof(struct sm_sec_ctx, mon_lr));
+ DEFINE(SM_CTX_SIZE, sizeof(struct sm_ctx));
+ DEFINE(SM_CTX_NSEC, offsetof(struct sm_ctx, nsec));
+ DEFINE(SM_CTX_SEC, offsetof(struct sm_ctx, sec));
+
+ DEFINE(THREAD_VECTOR_TABLE_FIQ_ENTRY,
+ offsetof(struct thread_vector_table, fiq_entry));
+
+ DEFINE(THREAD_SVC_REG_R0, offsetof(struct thread_svc_regs, r0));
+ DEFINE(THREAD_SVC_REG_R5, offsetof(struct thread_svc_regs, r5));
+ DEFINE(THREAD_SVC_REG_R6, offsetof(struct thread_svc_regs, r6));
+#endif /*ARM32*/
+
+#ifdef ARM64
+ DEFINE(THREAD_SMC_ARGS_X0, offsetof(struct thread_smc_args, a0));
+ DEFINE(THREAD_SMC_ARGS_SIZE, sizeof(struct thread_smc_args));
+
+ DEFINE(THREAD_SVC_REG_X0, offsetof(struct thread_svc_regs, x0));
+ DEFINE(THREAD_SVC_REG_X5, offsetof(struct thread_svc_regs, x5));
+ DEFINE(THREAD_SVC_REG_X6, offsetof(struct thread_svc_regs, x6));
+ DEFINE(THREAD_SVC_REG_X30, offsetof(struct thread_svc_regs, x30));
+ DEFINE(THREAD_SVC_REG_ELR, offsetof(struct thread_svc_regs, elr));
+ DEFINE(THREAD_SVC_REG_SPSR, offsetof(struct thread_svc_regs, spsr));
+ DEFINE(THREAD_SVC_REG_SP_EL0, offsetof(struct thread_svc_regs, sp_el0));
+ DEFINE(THREAD_SVC_REG_SIZE, sizeof(struct thread_svc_regs));
+
+ /* struct thread_abort_regs */
+ DEFINE(THREAD_ABT_REG_X0, offsetof(struct thread_abort_regs, x0));
+ DEFINE(THREAD_ABT_REG_X2, offsetof(struct thread_abort_regs, x2));
+ DEFINE(THREAD_ABT_REG_X30, offsetof(struct thread_abort_regs, x30));
+ DEFINE(THREAD_ABT_REG_SPSR, offsetof(struct thread_abort_regs, spsr));
+ DEFINE(THREAD_ABT_REGS_SIZE, sizeof(struct thread_abort_regs));
+
+ /* struct thread_ctx */
+ DEFINE(THREAD_CTX_KERN_SP, offsetof(struct thread_ctx, kern_sp));
+ DEFINE(THREAD_CTX_SIZE, sizeof(struct thread_ctx));
+
+ /* struct thread_ctx_regs */
+ DEFINE(THREAD_CTX_REGS_SP, offsetof(struct thread_ctx_regs, sp));
+ DEFINE(THREAD_CTX_REGS_X0, offsetof(struct thread_ctx_regs, x[0]));
+ DEFINE(THREAD_CTX_REGS_X1, offsetof(struct thread_ctx_regs, x[1]));
+ DEFINE(THREAD_CTX_REGS_X4, offsetof(struct thread_ctx_regs, x[4]));
+ DEFINE(THREAD_CTX_REGS_X19, offsetof(struct thread_ctx_regs, x[19]));
+
+ /* struct thread_user_mode_rec */
+ DEFINE(THREAD_USER_MODE_REC_EXIT_STATUS0_PTR,
+ offsetof(struct thread_user_mode_rec, exit_status0_ptr));
+ DEFINE(THREAD_USER_MODE_REC_X19,
+ offsetof(struct thread_user_mode_rec, x[0]));
+ DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));
+
+ /* struct thread_core_local */
+ DEFINE(THREAD_CORE_LOCAL_TMP_STACK_VA_END,
+ offsetof(struct thread_core_local, tmp_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_CURR_THREAD,
+ offsetof(struct thread_core_local, curr_thread));
+ DEFINE(THREAD_CORE_LOCAL_FLAGS,
+ offsetof(struct thread_core_local, flags));
+ DEFINE(THREAD_CORE_LOCAL_ABT_STACK_VA_END,
+ offsetof(struct thread_core_local, abt_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_X0, offsetof(struct thread_core_local, x[0]));
+ DEFINE(THREAD_CORE_LOCAL_X2, offsetof(struct thread_core_local, x[2]));
+#endif /*ARM64*/
+}
diff --git a/core/arch/arm/kernel/cache_helpers_a64.S b/core/arch/arm/kernel/cache_helpers_a64.S
new file mode 100644
index 0000000..d3a0248
--- /dev/null
+++ b/core/arch/arm/kernel/cache_helpers_a64.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm64.h>
+#include <asm.S>
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC flush_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+flush_loop:
+ dc civac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo flush_loop
+ dsb sy
+ ret
+END_FUNC flush_dcache_range
+
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC inv_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+inv_loop:
+ dc ivac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo inv_loop
+ dsb sy
+ ret
+END_FUNC inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ b do_dcsw_op
+ .endm
+
+LOCAL_FUNC do_dcsw_op , :
+ cbz x3, exit
+ mov x10, xzr
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache op
+ isb
+exit:
+ ret
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+END_FUNC do_dcsw_op
+
+
+FUNC dcsw_op_louis , :
+ dcsw_op #CLIDR_LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_louis
+
+
+FUNC dcsw_op_all , :
+ dcsw_op #CLIDR_LOC_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_all
diff --git a/core/arch/arm/kernel/elf32.h b/core/arch/arm/kernel/elf32.h
new file mode 100644
index 0000000..d374208
--- /dev/null
+++ b/core/arch/arm/kernel/elf32.h
@@ -0,0 +1,245 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF32_H_
+#define _SYS_ELF32_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 32-bit architectures.
+ */
+
+typedef uint32_t Elf32_Addr;
+typedef uint16_t Elf32_Half;
+typedef uint32_t Elf32_Off;
+typedef int32_t Elf32_Sword;
+typedef uint32_t Elf32_Word;
+typedef uint64_t Elf32_Lword;
+
+typedef Elf32_Word Elf32_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf32_Word Elf32_Size;
+typedef Elf32_Sword Elf32_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf32_Half e_type; /* File type. */
+ Elf32_Half e_machine; /* Machine architecture. */
+ Elf32_Word e_version; /* ELF format version. */
+ Elf32_Addr e_entry; /* Entry point. */
+ Elf32_Off e_phoff; /* Program header file offset. */
+ Elf32_Off e_shoff; /* Section header file offset. */
+ Elf32_Word e_flags; /* Architecture-specific flags. */
+ Elf32_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf32_Half e_phentsize; /* Size of program header entry. */
+ Elf32_Half e_phnum; /* Number of program header entries. */
+ Elf32_Half e_shentsize; /* Size of section header entry. */
+ Elf32_Half e_shnum; /* Number of section header entries. */
+ Elf32_Half e_shstrndx; /* Section name strings section. */
+} Elf32_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf32_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf32_Word sh_type; /* Section type. */
+ Elf32_Word sh_flags; /* Section flags. */
+ Elf32_Addr sh_addr; /* Address in memory image. */
+ Elf32_Off sh_offset; /* Offset in file. */
+ Elf32_Word sh_size; /* Size in bytes. */
+ Elf32_Word sh_link; /* Index of a related section. */
+ Elf32_Word sh_info; /* Depends on section type. */
+ Elf32_Word sh_addralign; /* Alignment in bytes. */
+ Elf32_Word sh_entsize; /* Size of each entry in section. */
+} Elf32_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf32_Word p_type; /* Entry type. */
+ Elf32_Off p_offset; /* File offset of contents. */
+ Elf32_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf32_Addr p_paddr; /* Physical address (not used). */
+ Elf32_Word p_filesz; /* Size of contents in file. */
+ Elf32_Word p_memsz; /* Size of contents in memory. */
+ Elf32_Word p_flags; /* Access permission flags. */
+ Elf32_Word p_align; /* Alignment in memory and file. */
+} Elf32_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf32_Sword d_tag; /* Entry type. */
+ union {
+ Elf32_Word d_val; /* Integer value. */
+ Elf32_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf32_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+} Elf32_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+ Elf32_Sword r_addend; /* Addend. */
+} Elf32_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF32_R_SYM(info) ((info) >> 8)
+#define ELF32_R_TYPE(info) ((unsigned char)(info))
+
+/* Macro for constructing r_info from field values. */
+#define ELF32_R_INFO(sym, type) (((sym) << 8) + (unsigned char)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf32_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf32_Lword m_value; /* symbol value */
+ Elf32_Word m_info; /* size + index */
+ Elf32_Word m_poffset; /* symbol offset */
+ Elf32_Half m_repeat; /* repeat count */
+ Elf32_Half m_stride; /* stride info */
+} Elf32_Move;
+
+/*
+ * The macros compose and decompose values for Move.r_info
+ *
+ * sym = ELF32_M_SYM(M.m_info)
+ * size = ELF32_M_SIZE(M.m_info)
+ * M.m_info = ELF32_M_INFO(sym, size)
+ */
+#define ELF32_M_SYM(info) ((info)>>8)
+#define ELF32_M_SIZE(info) ((unsigned char)(info))
+#define ELF32_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf32_Word c_tag; /* how to interpret value */
+ union {
+ Elf32_Word c_val;
+ Elf32_Addr c_ptr;
+ } c_un;
+} Elf32_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf32_Word st_name; /* String table index of name. */
+ Elf32_Addr st_value; /* Symbol value. */
+ Elf32_Word st_size; /* Size of associated object. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf32_Half st_shndx; /* Section index of symbol. */
+} Elf32_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF32_ST_BIND(info) ((info) >> 4)
+#define ELF32_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF32_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU symbol versioning. */
+typedef struct
+{
+ Elf32_Half vd_version;
+ Elf32_Half vd_flags;
+ Elf32_Half vd_ndx;
+ Elf32_Half vd_cnt;
+ Elf32_Word vd_hash;
+ Elf32_Word vd_aux;
+ Elf32_Word vd_next;
+} Elf32_Verdef;
+
+typedef struct
+{
+ Elf32_Word vda_name;
+ Elf32_Word vda_next;
+} Elf32_Verdaux;
+
+typedef struct
+{
+ Elf32_Half vn_version;
+ Elf32_Half vn_cnt;
+ Elf32_Word vn_file;
+ Elf32_Word vn_aux;
+ Elf32_Word vn_next;
+} Elf32_Verneed;
+
+typedef struct
+{
+ Elf32_Word vna_hash;
+ Elf32_Half vna_flags;
+ Elf32_Half vna_other;
+ Elf32_Word vna_name;
+ Elf32_Word vna_next;
+} Elf32_Vernaux;
+
+typedef Elf32_Half Elf32_Versym;
+
+typedef struct {
+ Elf32_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf32_Half si_flags; /* per symbol flags */
+} Elf32_Syminfo;
+
+#endif /* !_SYS_ELF32_H_ */
diff --git a/core/arch/arm/kernel/elf64.h b/core/arch/arm/kernel/elf64.h
new file mode 100644
index 0000000..c468dcd
--- /dev/null
+++ b/core/arch/arm/kernel/elf64.h
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF64_H_
+#define _SYS_ELF64_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 64-bit architectures.
+ */
+
+typedef uint64_t Elf64_Addr;
+typedef uint16_t Elf64_Half;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Sword;
+typedef int64_t Elf64_Sxword;
+typedef uint32_t Elf64_Word;
+typedef uint64_t Elf64_Lword;
+typedef uint64_t Elf64_Xword;
+
+/*
+ * Types of dynamic symbol hash table bucket and chain elements.
+ *
+ * This is inconsistent among 64 bit architectures, so a machine dependent
+ * typedef is required.
+ */
+
+typedef Elf64_Word Elf64_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf64_Xword Elf64_Size;
+typedef Elf64_Sxword Elf64_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf64_Half e_type; /* File type. */
+ Elf64_Half e_machine; /* Machine architecture. */
+ Elf64_Word e_version; /* ELF format version. */
+ Elf64_Addr e_entry; /* Entry point. */
+ Elf64_Off e_phoff; /* Program header file offset. */
+ Elf64_Off e_shoff; /* Section header file offset. */
+ Elf64_Word e_flags; /* Architecture-specific flags. */
+ Elf64_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf64_Half e_phentsize; /* Size of program header entry. */
+ Elf64_Half e_phnum; /* Number of program header entries. */
+ Elf64_Half e_shentsize; /* Size of section header entry. */
+ Elf64_Half e_shnum; /* Number of section header entries. */
+ Elf64_Half e_shstrndx; /* Section name strings section. */
+} Elf64_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf64_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf64_Word sh_type; /* Section type. */
+ Elf64_Xword sh_flags; /* Section flags. */
+ Elf64_Addr sh_addr; /* Address in memory image. */
+ Elf64_Off sh_offset; /* Offset in file. */
+ Elf64_Xword sh_size; /* Size in bytes. */
+ Elf64_Word sh_link; /* Index of a related section. */
+ Elf64_Word sh_info; /* Depends on section type. */
+ Elf64_Xword sh_addralign; /* Alignment in bytes. */
+ Elf64_Xword sh_entsize; /* Size of each entry in section. */
+} Elf64_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf64_Word p_type; /* Entry type. */
+ Elf64_Word p_flags; /* Access permission flags. */
+ Elf64_Off p_offset; /* File offset of contents. */
+ Elf64_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf64_Addr p_paddr; /* Physical address (not used). */
+ Elf64_Xword p_filesz; /* Size of contents in file. */
+ Elf64_Xword p_memsz; /* Size of contents in memory. */
+ Elf64_Xword p_align; /* Alignment in memory and file. */
+} Elf64_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf64_Sxword d_tag; /* Entry type. */
+ union {
+ Elf64_Xword d_val; /* Integer value. */
+ Elf64_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf64_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+} Elf64_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+ Elf64_Sxword r_addend; /* Addend. */
+} Elf64_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF64_R_SYM(info) ((info) >> 32)
+#define ELF64_R_TYPE(info) ((info) & 0xffffffffL)
+
+/* Macro for constructing r_info from field values. */
+#define ELF64_R_INFO(sym, type) (((sym) << 32) + ((type) & 0xffffffffL))
+
+#define ELF64_R_TYPE_DATA(info) (((Elf64_Xword)(info)<<32)>>40)
+#define ELF64_R_TYPE_ID(info) (((Elf64_Xword)(info)<<56)>>56)
+#define ELF64_R_TYPE_INFO(data, type) \
+ (((Elf64_Xword)(data)<<8)+(Elf64_Xword)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf64_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf64_Lword m_value; /* symbol value */
+ Elf64_Xword m_info; /* size + index */
+ Elf64_Xword m_poffset; /* symbol offset */
+ Elf64_Half m_repeat; /* repeat count */
+ Elf64_Half m_stride; /* stride info */
+} Elf64_Move;
+
+#define ELF64_M_SYM(info) ((info)>>8)
+#define ELF64_M_SIZE(info) ((unsigned char)(info))
+#define ELF64_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf64_Xword c_tag; /* how to interpret value */
+ union {
+ Elf64_Xword c_val;
+ Elf64_Addr c_ptr;
+ } c_un;
+} Elf64_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf64_Word st_name; /* String table index of name. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf64_Half st_shndx; /* Section index of symbol. */
+ Elf64_Addr st_value; /* Symbol value. */
+ Elf64_Xword st_size; /* Size of associated object. */
+} Elf64_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF64_ST_BIND(info) ((info) >> 4)
+#define ELF64_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF64_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF64_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU-style symbol versioning. */
+typedef struct {
+ Elf64_Half vd_version;
+ Elf64_Half vd_flags;
+ Elf64_Half vd_ndx;
+ Elf64_Half vd_cnt;
+ Elf64_Word vd_hash;
+ Elf64_Word vd_aux;
+ Elf64_Word vd_next;
+} Elf64_Verdef;
+
+typedef struct {
+ Elf64_Word vda_name;
+ Elf64_Word vda_next;
+} Elf64_Verdaux;
+
+typedef struct {
+ Elf64_Half vn_version;
+ Elf64_Half vn_cnt;
+ Elf64_Word vn_file;
+ Elf64_Word vn_aux;
+ Elf64_Word vn_next;
+} Elf64_Verneed;
+
+typedef struct {
+ Elf64_Word vna_hash;
+ Elf64_Half vna_flags;
+ Elf64_Half vna_other;
+ Elf64_Word vna_name;
+ Elf64_Word vna_next;
+} Elf64_Vernaux;
+
+typedef Elf64_Half Elf64_Versym;
+
+typedef struct {
+ Elf64_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf64_Half si_flags; /* per symbol flags */
+} Elf64_Syminfo;
+
+#endif /* !_SYS_ELF64_H_ */
diff --git a/core/arch/arm/kernel/elf_common.h b/core/arch/arm/kernel/elf_common.h
new file mode 100644
index 0000000..dd8cd50
--- /dev/null
+++ b/core/arch/arm/kernel/elf_common.h
@@ -0,0 +1,1006 @@
+/*-
+ * Copyright (c) 2000, 2001, 2008, 2011, David E. O'Brien
+ * Copyright (c) 1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF_COMMON_H_
+#define _SYS_ELF_COMMON_H_ 1
+
+/*
+ * ELF definitions that are independent of architecture or word size.
+ */
+
+/*
+ * Note header. The ".note" section contains an array of notes. Each
+ * begins with this header, aligned to a word boundary. Immediately
+ * following the note header is n_namesz bytes of name, padded to the
+ * next word boundary. Then comes n_descsz bytes of descriptor, again
+ * padded to a word boundary. The values of n_namesz and n_descsz do
+ * not include the padding.
+ */
+
+typedef struct {
+ uint32_t n_namesz; /* Length of name. */
+ uint32_t n_descsz; /* Length of descriptor. */
+ uint32_t n_type; /* Type of this note. */
+} Elf_Note;
+
+/*
+ * The header for GNU-style hash sections.
+ */
+
+typedef struct {
+ uint32_t gh_nbuckets; /* Number of hash buckets. */
+ uint32_t gh_symndx; /* First visible symbol in .dynsym. */
+ uint32_t gh_maskwords; /* #maskwords used in bloom filter. */
+ uint32_t gh_shift2; /* Bloom filter shift count. */
+} Elf_GNU_Hash_Header;
+
+/* Indexes into the e_ident array. Keep synced with
+ http://www.sco.com/developers/gabi/latest/ch4.eheader.html */
+#define EI_MAG0 0 /* Magic number, byte 0. */
+#define EI_MAG1 1 /* Magic number, byte 1. */
+#define EI_MAG2 2 /* Magic number, byte 2. */
+#define EI_MAG3 3 /* Magic number, byte 3. */
+#define EI_CLASS 4 /* Class of machine. */
+#define EI_DATA 5 /* Data format. */
+#define EI_VERSION 6 /* ELF format version. */
+#define EI_OSABI 7 /* Operating system / ABI identification */
+#define EI_ABIVERSION 8 /* ABI version */
+#define OLD_EI_BRAND 8 /* Start of architecture identification. */
+#define EI_PAD 9 /* Start of padding (per SVR4 ABI). */
+#define EI_NIDENT 16 /* Size of e_ident array. */
+
+/* Values for the magic number bytes. */
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF" /* magic string */
+#define SELFMAG 4 /* magic string size */
+
+/* Values for e_ident[EI_VERSION] and e_version. */
+#define EV_NONE 0
+#define EV_CURRENT 1
+
+/* Values for e_ident[EI_CLASS]. */
+#define ELFCLASSNONE 0 /* Unknown class. */
+#define ELFCLASS32 1 /* 32-bit architecture. */
+#define ELFCLASS64 2 /* 64-bit architecture. */
+
+/* Values for e_ident[EI_DATA]. */
+#define ELFDATANONE 0 /* Unknown data format. */
+#define ELFDATA2LSB 1 /* 2's complement little-endian. */
+#define ELFDATA2MSB 2 /* 2's complement big-endian. */
+
+/* Values for e_ident[EI_OSABI]. */
+#define ELFOSABI_NONE 0 /* UNIX System V ABI */
+#define ELFOSABI_HPUX 1 /* HP-UX operating system */
+#define ELFOSABI_NETBSD 2 /* NetBSD */
+#define ELFOSABI_LINUX 3 /* GNU/Linux */
+#define ELFOSABI_HURD 4 /* GNU/Hurd */
+#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
+#define ELFOSABI_SOLARIS 6 /* Solaris */
+#define ELFOSABI_AIX 7 /* AIX */
+#define ELFOSABI_IRIX 8 /* IRIX */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD */
+#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD */
+#define ELFOSABI_OPENVMS 13 /* Open VMS */
+#define ELFOSABI_NSK 14 /* HP Non-Stop Kernel */
+#define ELFOSABI_AROS 15 /* Amiga Research OS */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+#define ELFOSABI_SYSV ELFOSABI_NONE /* symbol used in old spec */
+#define ELFOSABI_MONTEREY ELFOSABI_AIX /* Monterey */
+
+/* e_ident */
+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
+ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
+ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
+ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
+
+/* Values for e_type. */
+#define ET_NONE 0 /* Unknown type. */
+#define ET_REL 1 /* Relocatable. */
+#define ET_EXEC 2 /* Executable. */
+#define ET_DYN 3 /* Shared object. */
+#define ET_CORE 4 /* Core file. */
+#define ET_LOOS 0xfe00 /* First operating system specific. */
+#define ET_HIOS 0xfeff /* Last operating system-specific. */
+#define ET_LOPROC 0xff00 /* First processor-specific. */
+#define ET_HIPROC 0xffff /* Last processor-specific. */
+
+/* Values for e_machine. */
+#define EM_NONE 0 /* Unknown machine. */
+#define EM_M32 1 /* AT&T WE32100. */
+#define EM_SPARC 2 /* Sun SPARC. */
+#define EM_386 3 /* Intel i386. */
+#define EM_68K 4 /* Motorola 68000. */
+#define EM_88K 5 /* Motorola 88000. */
+#define EM_860 7 /* Intel i860. */
+#define EM_MIPS 8 /* MIPS R3000 Big-Endian only. */
+#define EM_S370 9 /* IBM System/370. */
+#define EM_MIPS_RS3_LE 10 /* MIPS R3000 Little-Endian. */
+#define EM_PARISC 15 /* HP PA-RISC. */
+#define EM_VPP500 17 /* Fujitsu VPP500. */
+#define EM_SPARC32PLUS 18 /* SPARC v8plus. */
+#define EM_960 19 /* Intel 80960. */
+#define EM_PPC 20 /* PowerPC 32-bit. */
+#define EM_PPC64 21 /* PowerPC 64-bit. */
+#define EM_S390 22 /* IBM System/390. */
+#define EM_V800 36 /* NEC V800. */
+#define EM_FR20 37 /* Fujitsu FR20. */
+#define EM_RH32 38 /* TRW RH-32. */
+#define EM_RCE 39 /* Motorola RCE. */
+#define EM_ARM 40 /* ARM. */
+#define EM_SH 42 /* Hitachi SH. */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit. */
+#define EM_TRICORE 44 /* Siemens TriCore embedded processor. */
+#define EM_ARC 45 /* Argonaut RISC Core. */
+#define EM_H8_300 46 /* Hitachi H8/300. */
+#define EM_H8_300H 47 /* Hitachi H8/300H. */
+#define EM_H8S 48 /* Hitachi H8S. */
+#define EM_H8_500 49 /* Hitachi H8/500. */
+#define EM_IA_64 50 /* Intel IA-64 Processor. */
+#define EM_MIPS_X 51 /* Stanford MIPS-X. */
+#define EM_COLDFIRE 52 /* Motorola ColdFire. */
+#define EM_68HC12 53 /* Motorola M68HC12. */
+#define EM_MMA 54 /* Fujitsu MMA. */
+#define EM_PCP 55 /* Siemens PCP. */
+#define EM_NCPU 56 /* Sony nCPU. */
+#define EM_NDR1 57 /* Denso NDR1 microprocessor. */
+#define EM_STARCORE 58 /* Motorola Star*Core processor. */
+#define EM_ME16 59 /* Toyota ME16 processor. */
+#define EM_ST100 60 /* STMicroelectronics ST100 processor. */
+#define EM_TINYJ 61 /* Advanced Logic Corp. TinyJ processor. */
+#define EM_X86_64 62 /* Advanced Micro Devices x86-64 */
+#define EM_AMD64 EM_X86_64 /* Advanced Micro Devices x86-64 (compat) */
+#define EM_PDSP 63 /* Sony DSP Processor. */
+#define EM_FX66 66 /* Siemens FX66 microcontroller. */
+#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16
+ microcontroller. */
+#define EM_ST7 68 /* STmicroelectronics ST7 8-bit
+ microcontroller. */
+#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller. */
+#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller. */
+#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller. */
+#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller. */
+#define EM_SVX 73 /* Silicon Graphics SVx. */
+#define EM_ST19 74 /* STMicroelectronics ST19 8-bit mc. */
+#define EM_VAX 75 /* Digital VAX. */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded
+ processor. */
+#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded
+ processor. */
+#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor. */
+#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor. */
+#define EM_MMIX 80 /* Donald Knuth's educational 64-bit proc. */
+#define EM_HUANY 81 /* Harvard University machine-independent
+ object files. */
+#define EM_PRISM 82 /* SiTera Prism. */
+#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller. */
+#define EM_FR30 84 /* Fujitsu FR30. */
+#define EM_D10V 85 /* Mitsubishi D10V. */
+#define EM_D30V 86 /* Mitsubishi D30V. */
+#define EM_V850 87 /* NEC v850. */
+#define EM_M32R 88 /* Mitsubishi M32R. */
+#define EM_MN10300 89 /* Matsushita MN10300. */
+#define EM_MN10200 90 /* Matsushita MN10200. */
+#define EM_PJ 91 /* picoJava. */
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor. */
+#define EM_ARC_A5 93 /* ARC Cores Tangent-A5. */
+#define EM_XTENSA 94 /* Tensilica Xtensa Architecture. */
+#define EM_VIDEOCORE 95 /* Alphamosaic VideoCore processor. */
+#define EM_TMM_GPP 96 /* Thompson Multimedia General Purpose
+ Processor. */
+#define EM_NS32K 97 /* National Semiconductor 32000 series. */
+#define EM_TPC 98 /* Tenor Network TPC processor. */
+#define EM_SNP1K 99 /* Trebia SNP 1000 processor. */
+#define EM_ST200 100 /* STMicroelectronics ST200 microcontroller. */
+#define EM_IP2K 101 /* Ubicom IP2xxx microcontroller family. */
+#define EM_MAX 102 /* MAX Processor. */
+#define EM_CR 103 /* National Semiconductor CompactRISC
+ microprocessor. */
+#define EM_F2MC16 104 /* Fujitsu F2MC16. */
+#define EM_MSP430 105 /* Texas Instruments embedded microcontroller
+ msp430. */
+#define EM_BLACKFIN 106 /* Analog Devices Blackfin (DSP) processor. */
+#define EM_SE_C33 107 /* S1C33 Family of Seiko Epson processors. */
+#define EM_SEP 108 /* Sharp embedded microprocessor. */
+#define EM_ARCA 109 /* Arca RISC Microprocessor. */
+#define EM_UNICORE 110 /* Microprocessor series from PKU-Unity Ltd.
+ and MPRC of Peking University */
+#define EM_AARCH64 183 /* AArch64 (64-bit ARM) */
+
+/* Non-standard or deprecated. */
+#define EM_486 6 /* Intel i486. */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
+#define EM_ALPHA_STD 41 /* Digital Alpha (standard value). */
+#define EM_ALPHA 0x9026 /* Alpha (written in the absence of an ABI) */
+
+/* e_flags for EM_ARM */
+#define EF_ARM_ABI_VERSION 0x05000000 /* ABI version 5 */
+#define EF_ARM_ABIMASK 0xFF000000
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_ABI_FLOAT_HARD 0x00000400 /* ABI version 5 and later */
+#define EF_ARM_ABI_FLOAT_SOFT 0x00000200 /* ABI version 5 and later */
+
+/* Special section indexes. */
+#define SHN_UNDEF 0 /* Undefined, missing, irrelevant. */
+#define SHN_LORESERVE 0xff00 /* First of reserved range. */
+#define SHN_LOPROC 0xff00 /* First processor-specific. */
+#define SHN_HIPROC 0xff1f /* Last processor-specific. */
+#define SHN_LOOS 0xff20 /* First operating system-specific. */
+#define SHN_HIOS 0xff3f /* Last operating system-specific. */
+#define SHN_ABS 0xfff1 /* Absolute values. */
+#define SHN_COMMON 0xfff2 /* Common data. */
+#define SHN_XINDEX 0xffff /* Escape -- index stored elsewhere. */
+#define SHN_HIRESERVE 0xffff /* Last of reserved range. */
+
+/* sh_type */
+#define SHT_NULL 0 /* inactive */
+#define SHT_PROGBITS 1 /* program defined information */
+#define SHT_SYMTAB 2 /* symbol table section */
+#define SHT_STRTAB 3 /* string table section */
+#define SHT_RELA 4 /* relocation section with addends */
+#define SHT_HASH 5 /* symbol hash table section */
+#define SHT_DYNAMIC 6 /* dynamic section */
+#define SHT_NOTE 7 /* note section */
+#define SHT_NOBITS 8 /* no space section */
+#define SHT_REL 9 /* relocation section - no addends */
+#define SHT_SHLIB 10 /* reserved - purpose unknown */
+#define SHT_DYNSYM 11 /* dynamic symbol table section */
+#define SHT_INIT_ARRAY 14 /* Initialization function pointers. */
+#define SHT_FINI_ARRAY 15 /* Termination function pointers. */
+#define SHT_PREINIT_ARRAY 16 /* Pre-initialization function ptrs. */
+#define SHT_GROUP 17 /* Section group. */
+#define SHT_SYMTAB_SHNDX 18 /* Section indexes (see SHN_XINDEX). */
+#define SHT_LOOS 0x60000000 /* First of OS specific semantics */
+#define SHT_LOSUNW 0x6ffffff4
+#define SHT_SUNW_dof 0x6ffffff4
+#define SHT_SUNW_cap 0x6ffffff5
+#define SHT_SUNW_SIGNATURE 0x6ffffff6
+#define SHT_GNU_HASH 0x6ffffff6
+#define SHT_GNU_LIBLIST 0x6ffffff7
+#define SHT_SUNW_ANNOTATE 0x6ffffff7
+#define SHT_SUNW_DEBUGSTR 0x6ffffff8
+#define SHT_SUNW_DEBUG 0x6ffffff9
+#define SHT_SUNW_move 0x6ffffffa
+#define SHT_SUNW_COMDAT 0x6ffffffb
+#define SHT_SUNW_syminfo 0x6ffffffc
+#define SHT_SUNW_verdef 0x6ffffffd
+#define SHT_GNU_verdef 0x6ffffffd /* Symbol versions provided */
+#define SHT_SUNW_verneed 0x6ffffffe
+#define SHT_GNU_verneed 0x6ffffffe /* Symbol versions required */
+#define SHT_SUNW_versym 0x6fffffff
+#define SHT_GNU_versym 0x6fffffff /* Symbol version table */
+#define SHT_HISUNW 0x6fffffff
+#define SHT_HIOS 0x6fffffff /* Last of OS specific semantics */
+#define SHT_LOPROC 0x70000000 /* reserved range for processor */
+#define SHT_AMD64_UNWIND 0x70000001 /* unwind information */
+#define SHT_ARM_EXIDX 0x70000001 /* Exception index table. */
+#define SHT_ARM_PREEMPTMAP 0x70000002 /* BPABI DLL dynamic linking
+ pre-emption map. */
+#define SHT_ARM_ATTRIBUTES 0x70000003 /* Object file compatibility
+ attributes. */
+#define SHT_ARM_DEBUGOVERLAY 0x70000004 /* See DBGOVL for details. */
+#define SHT_ARM_OVERLAYSECTION 0x70000005 /* See DBGOVL for details. */
+#define SHT_MIPS_REGINFO 0x70000006
+#define SHT_MIPS_OPTIONS 0x7000000d
+#define SHT_MIPS_DWARF 0x7000001e /* MIPS gcc uses MIPS_DWARF */
+#define SHT_HIPROC 0x7fffffff /* specific section header types */
+#define SHT_LOUSER 0x80000000 /* reserved range for application */
+#define SHT_HIUSER 0xffffffff /* specific indexes */
+
+/* Flags for sh_flags. */
+#define SHF_WRITE 0x1 /* Section contains writable data. */
+#define SHF_ALLOC 0x2 /* Section occupies memory. */
+#define SHF_EXECINSTR 0x4 /* Section contains instructions. */
+#define SHF_MERGE 0x10 /* Section may be merged. */
+#define SHF_STRINGS 0x20 /* Section contains strings. */
+#define SHF_INFO_LINK 0x40 /* sh_info holds section index. */
+#define SHF_LINK_ORDER 0x80 /* Special ordering requirements. */
+#define SHF_OS_NONCONFORMING 0x100 /* OS-specific processing required. */
+#define SHF_GROUP 0x200 /* Member of section group. */
+#define SHF_TLS 0x400 /* Section contains TLS data. */
+#define SHF_MASKOS 0x0ff00000 /* OS-specific semantics. */
+#define SHF_MASKPROC 0xf0000000 /* Processor-specific semantics. */
+
+/* Values for p_type. */
+#define PT_NULL 0 /* Unused entry. */
+#define PT_LOAD 1 /* Loadable segment. */
+#define PT_DYNAMIC 2 /* Dynamic linking information segment. */
+#define PT_INTERP 3 /* Pathname of interpreter. */
+#define PT_NOTE 4 /* Auxiliary information. */
+#define PT_SHLIB 5 /* Reserved (not used). */
+#define PT_PHDR 6 /* Location of program header itself. */
+#define PT_TLS 7 /* Thread local storage segment */
+#define PT_LOOS 0x60000000 /* First OS-specific. */
+#define PT_SUNW_UNWIND 0x6464e550 /* amd64 UNWIND program header */
+#define PT_GNU_EH_FRAME 0x6474e550
+#define PT_GNU_STACK 0x6474e551
+#define PT_GNU_RELRO 0x6474e552
+#define PT_LOSUNW 0x6ffffffa
+#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
+#define PT_SUNWSTACK 0x6ffffffb /* describes the stack segment */
+#define PT_SUNWDTRACE 0x6ffffffc /* private */
+#define PT_SUNWCAP 0x6ffffffd /* hard/soft capabilities segment */
+#define PT_HISUNW 0x6fffffff
+#define PT_HIOS 0x6fffffff /* Last OS-specific. */
+#define PT_LOPROC 0x70000000 /* First processor-specific type. */
+#define PT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for p_flags. */
+#define PF_X 0x1 /* Executable. */
+#define PF_W 0x2 /* Writable. */
+#define PF_R 0x4 /* Readable. */
+#define PF_MASKOS 0x0ff00000 /* Operating system-specific. */
+#define PF_MASKPROC 0xf0000000 /* Processor-specific. */
+
+/* Extended program header index. */
+#define PN_XNUM 0xffff
+
+/* Values for d_tag. */
+#define DT_NULL 0 /* Terminating entry. */
+#define DT_NEEDED 1 /* String table offset of a needed shared
+ library. */
+#define DT_PLTRELSZ 2 /* Total size in bytes of PLT relocations. */
+#define DT_PLTGOT 3 /* Processor-dependent address. */
+#define DT_HASH 4 /* Address of symbol hash table. */
+#define DT_STRTAB 5 /* Address of string table. */
+#define DT_SYMTAB 6 /* Address of symbol table. */
+#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */
+#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */
+#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */
+#define DT_STRSZ 10 /* Size of string table. */
+#define DT_SYMENT 11 /* Size of each symbol table entry. */
+#define DT_INIT 12 /* Address of initialization function. */
+#define DT_FINI 13 /* Address of finalization function. */
+#define DT_SONAME 14 /* String table offset of shared object
+ name. */
+#define DT_RPATH 15 /* String table offset of library path. [sup] */
+#define DT_SYMBOLIC 16 /* Indicates "symbolic" linking. [sup] */
+#define DT_REL 17 /* Address of ElfNN_Rel relocations. */
+#define DT_RELSZ 18 /* Total size of ElfNN_Rel relocations. */
+#define DT_RELENT 19 /* Size of each ElfNN_Rel relocation. */
+#define DT_PLTREL 20 /* Type of relocation used for PLT. */
+#define DT_DEBUG 21 /* Reserved (not used). */
+#define DT_TEXTREL 22 /* Indicates there may be relocations in
+ non-writable segments. [sup] */
+#define DT_JMPREL 23 /* Address of PLT relocations. */
+#define DT_BIND_NOW 24 /* [sup] */
+#define DT_INIT_ARRAY 25 /* Address of the array of pointers to
+ initialization functions */
+#define DT_FINI_ARRAY 26 /* Address of the array of pointers to
+ termination functions */
+#define DT_INIT_ARRAYSZ 27 /* Size in bytes of the array of
+ initialization functions. */
+#define DT_FINI_ARRAYSZ 28 /* Size in bytes of the array of
+ termination functions. */
+#define DT_RUNPATH 29 /* String table offset of a null-terminated
+ library search path string. */
+#define DT_FLAGS 30 /* Object specific flag values. */
+#define DT_ENCODING 32 /* Values greater than or equal to DT_ENCODING
+ and less than DT_LOOS follow the rules for
+ the interpretation of the d_un union
+ as follows: even == 'd_ptr', odd == 'd_val'
+ or none */
+#define DT_PREINIT_ARRAY 32 /* Address of the array of pointers to
+ pre-initialization functions. */
+#define DT_PREINIT_ARRAYSZ 33 /* Size in bytes of the array of
+ pre-initialization functions. */
+#define DT_MAXPOSTAGS 34 /* number of positive tags */
+#define DT_LOOS 0x6000000d /* First OS-specific */
+#define DT_SUNW_AUXILIARY 0x6000000d /* symbol auxiliary name */
+#define DT_SUNW_RTLDINF 0x6000000e /* ld.so.1 info (private) */
+#define DT_SUNW_FILTER 0x6000000f /* symbol filter name */
+#define DT_SUNW_CAP 0x60000010 /* hardware/software */
+#define DT_HIOS 0x6ffff000 /* Last OS-specific */
+
+/*
+ * DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
+ * Dyn.d_un.d_val field of the Elf*_Dyn structure.
+ */
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_CHECKSUM 0x6ffffdf8 /* elf checksum */
+#define DT_PLTPADSZ 0x6ffffdf9 /* pltpadding size */
+#define DT_MOVEENT 0x6ffffdfa /* move table entry size */
+#define DT_MOVESZ 0x6ffffdfb /* move table size */
+#define DT_FEATURE_1 0x6ffffdfc /* feature holder */
+#define DT_POSFLAG_1 0x6ffffdfd /* flags for DT_* entries, effecting */
+ /* the following DT_* entry. */
+ /* See DF_P1_* definitions */
+#define DT_SYMINSZ 0x6ffffdfe /* syminfo table size (in bytes) */
+#define DT_SYMINENT 0x6ffffdff /* syminfo entry size (in bytes) */
+#define DT_VALRNGHI 0x6ffffdff
+
+/*
+ * DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
+ * Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
+ *
+ * If any adjustment is made to the ELF object after it has been
+ * built, these entries will need to be adjusted.
+ */
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table */
+#define DT_CONFIG 0x6ffffefa /* configuration information */
+#define DT_DEPAUDIT 0x6ffffefb /* dependency auditing */
+#define DT_AUDIT 0x6ffffefc /* object auditing */
+#define DT_PLTPAD 0x6ffffefd /* pltpadding (sparcv9) */
+#define DT_MOVETAB 0x6ffffefe /* move table */
+#define DT_SYMINFO 0x6ffffeff /* syminfo table */
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0 /* Address of versym section. */
+#define DT_RELACOUNT 0x6ffffff9 /* number of RELATIVE relocations */
+#define DT_RELCOUNT 0x6ffffffa /* number of RELATIVE relocations */
+#define DT_FLAGS_1 0x6ffffffb /* state flags - see DF_1_* defs */
+#define DT_VERDEF 0x6ffffffc /* Address of verdef section. */
+#define DT_VERDEFNUM 0x6ffffffd /* Number of elems in verdef section */
+#define DT_VERNEED 0x6ffffffe /* Address of verneed section. */
+#define DT_VERNEEDNUM 0x6fffffff /* Number of elems in verneed section */
+
+#define DT_LOPROC 0x70000000 /* First processor-specific type. */
+#define DT_DEPRECATED_SPARC_REGISTER 0x7000001
+#define DT_AUXILIARY 0x7ffffffd /* shared library auxiliary name */
+#define DT_USED 0x7ffffffe /* ignored - same as needed */
+#define DT_FILTER 0x7fffffff /* shared library filter name */
+#define DT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for DT_FLAGS */
+#define DF_ORIGIN 0x0001 /* Indicates that the object being loaded may
+ make reference to the $ORIGIN substitution
+ string */
+#define DF_SYMBOLIC 0x0002 /* Indicates "symbolic" linking. */
+#define DF_TEXTREL 0x0004 /* Indicates there may be relocations in
+ non-writable segments. */
+#define DF_BIND_NOW 0x0008 /* Indicates that the dynamic linker should
+ process all relocations for the object
+ containing this entry before transferring
+ control to the program. */
+#define DF_STATIC_TLS 0x0010 /* Indicates that the shared object or
+ executable contains code using a static
+ thread-local storage scheme. */
+
+/* Values for DT_FLAGS_1 */
+#define DF_1_BIND_NOW 0x00000001 /* Same as DF_BIND_NOW */
+#define DF_1_GLOBAL 0x00000002 /* Set the RTLD_GLOBAL for object */
+#define DF_1_NODELETE 0x00000008 /* Set the RTLD_NODELETE for object */
+#define DF_1_LOADFLTR 0x00000010 /* Immediate loading of filtees */
+#define DF_1_NOOPEN 0x00000040 /* Do not allow loading on dlopen() */
+#define DF_1_ORIGIN 0x00000080 /* Process $ORIGIN */
+#define DF_1_INTERPOSE 0x00000400 /* Interpose all objects but main */
+#define DF_1_NODEFLIB 0x00000800 /* Do not search default paths */
+
+/* Values for n_type. Used in core files. */
+#define NT_PRSTATUS 1 /* Process status. */
+#define NT_FPREGSET 2 /* Floating point registers. */
+#define NT_PRPSINFO 3 /* Process state info. */
+#define NT_THRMISC 7 /* Thread miscellaneous info. */
+#define NT_PROCSTAT_PROC 8 /* Procstat proc data. */
+#define NT_PROCSTAT_FILES 9 /* Procstat files data. */
+#define NT_PROCSTAT_VMMAP 10 /* Procstat vmmap data. */
+#define NT_PROCSTAT_GROUPS 11 /* Procstat groups data. */
+#define NT_PROCSTAT_UMASK 12 /* Procstat umask data. */
+#define NT_PROCSTAT_RLIMIT 13 /* Procstat rlimit data. */
+#define NT_PROCSTAT_OSREL 14 /* Procstat osreldate data. */
+#define NT_PROCSTAT_PSSTRINGS 15 /* Procstat ps_strings data. */
+#define NT_PROCSTAT_AUXV 16 /* Procstat auxv data. */
+
+/* Symbol Binding - ELFNN_ST_BIND - st_info */
+#define STB_LOCAL 0 /* Local symbol */
+#define STB_GLOBAL 1 /* Global symbol */
+#define STB_WEAK 2 /* like global - lower precedence */
+#define STB_LOOS 10 /* Reserved range for operating system */
+#define STB_HIOS 12 /* specific semantics. */
+#define STB_LOPROC 13 /* reserved range for processor */
+#define STB_HIPROC 15 /* specific semantics. */
+
+/* Symbol type - ELFNN_ST_TYPE - st_info */
+#define STT_NOTYPE 0 /* Unspecified type. */
+#define STT_OBJECT 1 /* Data object. */
+#define STT_FUNC 2 /* Function. */
+#define STT_SECTION 3 /* Section. */
+#define STT_FILE 4 /* Source file. */
+#define STT_COMMON 5 /* Uninitialized common block. */
+#define STT_TLS 6 /* TLS object. */
+#define STT_NUM 7
+#define STT_LOOS 10 /* Reserved range for operating system */
+#define STT_GNU_IFUNC 10
+#define STT_HIOS 12 /* specific semantics. */
+#define STT_LOPROC 13 /* reserved range for processor */
+#define STT_HIPROC 15 /* specific semantics. */
+
+/* Symbol visibility - ELFNN_ST_VISIBILITY - st_other */
+#define STV_DEFAULT 0x0 /* Default visibility (see binding). */
+#define STV_INTERNAL 0x1 /* Special meaning in relocatable objects. */
+#define STV_HIDDEN 0x2 /* Not visible. */
+#define STV_PROTECTED 0x3 /* Visible but not preemptible. */
+#define STV_EXPORTED 0x4
+#define STV_SINGLETON 0x5
+#define STV_ELIMINATE 0x6
+
+/* Special symbol table indexes. */
+#define STN_UNDEF 0 /* Undefined symbol index. */
+
+/* Symbol versioning flags. */
+#define VER_DEF_CURRENT 1
+#define VER_DEF_IDX(x) VER_NDX(x)
+
+#define VER_FLG_BASE 0x01
+#define VER_FLG_WEAK 0x02
+
+#define VER_NEED_CURRENT 1
+#define VER_NEED_WEAK (1u << 15)
+#define VER_NEED_HIDDEN VER_NDX_HIDDEN
+#define VER_NEED_IDX(x) VER_NDX(x)
+
+#define VER_NDX_LOCAL 0
+#define VER_NDX_GLOBAL 1
+#define VER_NDX_GIVEN 2
+
+#define VER_NDX_HIDDEN (1u << 15)
+#define VER_NDX(x) ((x) & ~(1u << 15))
+
+#define CA_SUNW_NULL 0
+#define CA_SUNW_HW_1 1 /* first hardware capabilities entry */
+#define CA_SUNW_SF_1 2 /* first software capabilities entry */
+
+/*
+ * Syminfo flag values
+ */
+#define SYMINFO_FLG_DIRECT 0x0001 /* symbol ref has direct association */
+ /* to object containing defn. */
+#define SYMINFO_FLG_PASSTHRU 0x0002 /* ignored - see SYMINFO_FLG_FILTER */
+#define SYMINFO_FLG_COPY 0x0004 /* symbol is a copy-reloc */
+#define SYMINFO_FLG_LAZYLOAD 0x0008 /* object containing defn should be */
+ /* lazily-loaded */
+#define SYMINFO_FLG_DIRECTBIND 0x0010 /* ref should be bound directly to */
+ /* object containing defn. */
+#define SYMINFO_FLG_NOEXTDIRECT 0x0020 /* don't let an external reference */
+ /* directly bind to this symbol */
+#define SYMINFO_FLG_FILTER 0x0002 /* symbol ref is associated to a */
+#define SYMINFO_FLG_AUXILIARY 0x0040 /* standard or auxiliary filter */
+
+/*
+ * Syminfo.si_boundto values.
+ */
+#define SYMINFO_BT_SELF 0xffff /* symbol bound to self */
+#define SYMINFO_BT_PARENT 0xfffe /* symbol bound to parent */
+#define SYMINFO_BT_NONE 0xfffd /* no special symbol binding */
+#define SYMINFO_BT_EXTERN 0xfffc /* symbol defined as external */
+#define SYMINFO_BT_LOWRESERVE 0xff00 /* beginning of reserved entries */
+
+/*
+ * Syminfo version values.
+ */
+#define SYMINFO_NONE 0 /* Syminfo version */
+#define SYMINFO_CURRENT 1
+#define SYMINFO_NUM 2
+
+/*
+ * Relocation types.
+ *
+ * All machine architectures are defined here to allow tools on one to
+ * handle others.
+ */
+
+#define R_386_NONE 0 /* No relocation. */
+#define R_386_32 1 /* Add symbol value. */
+#define R_386_PC32 2 /* Add PC-relative symbol value. */
+#define R_386_GOT32 3 /* Add PC-relative GOT offset. */
+#define R_386_PLT32 4 /* Add PC-relative PLT offset. */
+#define R_386_COPY 5 /* Copy data from shared object. */
+#define R_386_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_386_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_386_RELATIVE 8 /* Add load address of shared object. */
+#define R_386_GOTOFF 9 /* Add GOT-relative symbol address. */
+#define R_386_GOTPC 10 /* Add PC-relative GOT table address. */
+#define R_386_TLS_TPOFF 14 /* Negative offset in static TLS block */
+#define R_386_TLS_IE 15 /* Absolute address of GOT for -ve static TLS */
+#define R_386_TLS_GOTIE 16 /* GOT entry for negative static TLS block */
+#define R_386_TLS_LE 17 /* Negative offset relative to static TLS */
+#define R_386_TLS_GD 18 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_LDM 19 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_GD_32 24 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_GD_PUSH 25 /* pushl instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_CALL 26 /* call instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_POP 27 /* popl instruction for Sun ABI GD sequence */
+#define R_386_TLS_LDM_32 28 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_LDM_PUSH 29 /* pushl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_CALL 30 /* call instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_POP 31 /* popl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDO_32 32 /* 32 bit offset from start of TLS block */
+#define R_386_TLS_IE_32 33 /* 32 bit offset to GOT static TLS offset entry */
+#define R_386_TLS_LE_32 34 /* 32 bit offset within static TLS block */
+#define R_386_TLS_DTPMOD32 35 /* GOT entry containing TLS index */
+#define R_386_TLS_DTPOFF32 36 /* GOT entry containing TLS offset */
+#define R_386_TLS_TPOFF32 37 /* GOT entry of -ve static TLS offset */
+#define R_386_IRELATIVE 42 /* PLT entry resolved indirectly at runtime */
+
+#define R_AARCH64_RELATIVE 1027
+
+#define R_ARM_NONE 0 /* No relocation. */
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5
+#define R_ARM_ABS12 6
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+/* TLS relocations */
+#define R_ARM_TLS_DTPMOD32 17 /* ID of module containing symbol */
+#define R_ARM_TLS_DTPOFF32 18 /* Offset in TLS block */
+#define R_ARM_TLS_TPOFF32 19 /* Offset in static TLS block */
+#define R_ARM_COPY 20 /* Copy data from shared object. */
+#define R_ARM_GLOB_DAT 21 /* Set GOT entry to data address. */
+#define R_ARM_JUMP_SLOT 22 /* Set GOT entry to code address. */
+#define R_ARM_RELATIVE 23 /* Add load address of shared object. */
+#define R_ARM_GOTOFF 24 /* Add GOT-relative symbol address. */
+#define R_ARM_GOTPC 25 /* Add PC-relative GOT table address. */
+#define R_ARM_GOT32 26 /* Add PC-relative GOT offset. */
+#define R_ARM_PLT32 27 /* Add PC-relative PLT offset. */
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS32 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+
+/* Name Value Field Calculation */
+#define R_IA_64_NONE 0 /* None */
+#define R_IA_64_IMM14 0x21 /* immediate14 S + A */
+#define R_IA_64_IMM22 0x22 /* immediate22 S + A */
+#define R_IA_64_IMM64 0x23 /* immediate64 S + A */
+#define R_IA_64_DIR32MSB 0x24 /* word32 MSB S + A */
+#define R_IA_64_DIR32LSB 0x25 /* word32 LSB S + A */
+#define R_IA_64_DIR64MSB 0x26 /* word64 MSB S + A */
+#define R_IA_64_DIR64LSB 0x27 /* word64 LSB S + A */
+#define R_IA_64_GPREL22 0x2a /* immediate22 @gprel(S + A) */
+#define R_IA_64_GPREL64I 0x2b /* immediate64 @gprel(S + A) */
+#define R_IA_64_GPREL32MSB 0x2c /* word32 MSB @gprel(S + A) */
+#define R_IA_64_GPREL32LSB 0x2d /* word32 LSB @gprel(S + A) */
+#define R_IA_64_GPREL64MSB 0x2e /* word64 MSB @gprel(S + A) */
+#define R_IA_64_GPREL64LSB 0x2f /* word64 LSB @gprel(S + A) */
+#define R_IA_64_LTOFF22 0x32 /* immediate22 @ltoff(S + A) */
+#define R_IA_64_LTOFF64I 0x33 /* immediate64 @ltoff(S + A) */
+#define R_IA_64_PLTOFF22 0x3a /* immediate22 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64I 0x3b /* immediate64 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64MSB 0x3e /* word64 MSB @pltoff(S + A) */
+#define R_IA_64_PLTOFF64LSB 0x3f /* word64 LSB @pltoff(S + A) */
+#define R_IA_64_FPTR64I 0x43 /* immediate64 @fptr(S + A) */
+#define R_IA_64_FPTR32MSB 0x44 /* word32 MSB @fptr(S + A) */
+#define R_IA_64_FPTR32LSB 0x45 /* word32 LSB @fptr(S + A) */
+#define R_IA_64_FPTR64MSB 0x46 /* word64 MSB @fptr(S + A) */
+#define R_IA_64_FPTR64LSB 0x47 /* word64 LSB @fptr(S + A) */
+#define R_IA_64_PCREL60B 0x48 /* immediate60 form1 S + A - P */
+#define R_IA_64_PCREL21B 0x49 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL21M 0x4a /* immediate21 form2 S + A - P */
+#define R_IA_64_PCREL21F 0x4b /* immediate21 form3 S + A - P */
+#define R_IA_64_PCREL32MSB 0x4c /* word32 MSB S + A - P */
+#define R_IA_64_PCREL32LSB 0x4d /* word32 LSB S + A - P */
+#define R_IA_64_PCREL64MSB 0x4e /* word64 MSB S + A - P */
+#define R_IA_64_PCREL64LSB 0x4f /* word64 LSB S + A - P */
+#define R_IA_64_LTOFF_FPTR22 0x52 /* immediate22 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64I 0x53 /* immediate64 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32MSB 0x54 /* word32 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32LSB 0x55 /* word32 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64MSB 0x56 /* word64 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64LSB 0x57 /* word64 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_SEGREL32MSB 0x5c /* word32 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL32LSB 0x5d /* word32 LSB @segrel(S + A) */
+#define R_IA_64_SEGREL64MSB 0x5e /* word64 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL64LSB 0x5f /* word64 LSB @segrel(S + A) */
+#define R_IA_64_SECREL32MSB 0x64 /* word32 MSB @secrel(S + A) */
+#define R_IA_64_SECREL32LSB 0x65 /* word32 LSB @secrel(S + A) */
+#define R_IA_64_SECREL64MSB 0x66 /* word64 MSB @secrel(S + A) */
+#define R_IA_64_SECREL64LSB 0x67 /* word64 LSB @secrel(S + A) */
+#define R_IA_64_REL32MSB 0x6c /* word32 MSB BD + A */
+#define R_IA_64_REL32LSB 0x6d /* word32 LSB BD + A */
+#define R_IA_64_REL64MSB 0x6e /* word64 MSB BD + A */
+#define R_IA_64_REL64LSB 0x6f /* word64 LSB BD + A */
+#define R_IA_64_LTV32MSB 0x74 /* word32 MSB S + A */
+#define R_IA_64_LTV32LSB 0x75 /* word32 LSB S + A */
+#define R_IA_64_LTV64MSB 0x76 /* word64 MSB S + A */
+#define R_IA_64_LTV64LSB 0x77 /* word64 LSB S + A */
+#define R_IA_64_PCREL21BI 0x79 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL22 0x7a /* immediate22 S + A - P */
+#define R_IA_64_PCREL64I 0x7b /* immediate64 S + A - P */
+#define R_IA_64_IPLTMSB 0x80 /* function descriptor MSB special */
+#define R_IA_64_IPLTLSB 0x81 /* function descriptor LSB speciaal */
+#define R_IA_64_SUB 0x85 /* immediate64 A - S */
+#define R_IA_64_LTOFF22X 0x86 /* immediate22 special */
+#define R_IA_64_LDXMOV 0x87 /* immediate22 special */
+#define R_IA_64_TPREL14 0x91 /* imm14 @tprel(S + A) */
+#define R_IA_64_TPREL22 0x92 /* imm22 @tprel(S + A) */
+#define R_IA_64_TPREL64I 0x93 /* imm64 @tprel(S + A) */
+#define R_IA_64_TPREL64MSB 0x96 /* word64 MSB @tprel(S + A) */
+#define R_IA_64_TPREL64LSB 0x97 /* word64 LSB @tprel(S + A) */
+#define R_IA_64_LTOFF_TPREL22 0x9a /* imm22 @ltoff(@tprel(S+A)) */
+#define R_IA_64_DTPMOD64MSB 0xa6 /* word64 MSB @dtpmod(S + A) */
+#define R_IA_64_DTPMOD64LSB 0xa7 /* word64 LSB @dtpmod(S + A) */
+#define R_IA_64_LTOFF_DTPMOD22 0xaa /* imm22 @ltoff(@dtpmod(S+A)) */
+#define R_IA_64_DTPREL14 0xb1 /* imm14 @dtprel(S + A) */
+#define R_IA_64_DTPREL22 0xb2 /* imm22 @dtprel(S + A) */
+#define R_IA_64_DTPREL64I 0xb3 /* imm64 @dtprel(S + A) */
+#define R_IA_64_DTPREL32MSB 0xb4 /* word32 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL32LSB 0xb5 /* word32 LSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64MSB 0xb6 /* word64 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64LSB 0xb7 /* word64 LSB @dtprel(S + A) */
+#define R_IA_64_LTOFF_DTPREL22 0xba /* imm22 @ltoff(@dtprel(S+A)) */
+
+#define R_MIPS_NONE 0 /* No reloc */
+#define R_MIPS_16 1 /* Direct 16 bit */
+#define R_MIPS_32 2 /* Direct 32 bit */
+#define R_MIPS_REL32 3 /* PC relative 32 bit */
+#define R_MIPS_26 4 /* Direct 26 bit shifted */
+#define R_MIPS_HI16 5 /* High 16 bit */
+#define R_MIPS_LO16 6 /* Low 16 bit */
+#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
+#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
+#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
+#define R_MIPS_PC16 10 /* PC relative 16 bit */
+#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
+#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
+#define R_MIPS_64 18 /* Direct 64 bit */
+#define R_MIPS_GOTHI16 21 /* GOT HI 16 bit */
+#define R_MIPS_GOTLO16 22 /* GOT LO 16 bit */
+#define R_MIPS_CALLHI16 30 /* upper 16 bit GOT entry for function */
+#define R_MIPS_CALLLO16 31 /* lower 16 bit GOT entry for function */
+
+#define R_PPC_NONE 0 /* No relocation. */
+#define R_PPC_ADDR32 1
+#define R_PPC_ADDR24 2
+#define R_PPC_ADDR16 3
+#define R_PPC_ADDR16_LO 4
+#define R_PPC_ADDR16_HI 5
+#define R_PPC_ADDR16_HA 6
+#define R_PPC_ADDR14 7
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10
+#define R_PPC_REL14 11
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/*
+ * 64-bit relocations
+ */
+#define R_PPC64_ADDR64 38
+#define R_PPC64_ADDR16_HIGHER 39
+#define R_PPC64_ADDR16_HIGHERA 40
+#define R_PPC64_ADDR16_HIGHEST 41
+#define R_PPC64_ADDR16_HIGHESTA 42
+#define R_PPC64_UADDR64 43
+#define R_PPC64_REL64 44
+#define R_PPC64_PLT64 45
+#define R_PPC64_PLTREL64 46
+#define R_PPC64_TOC16 47
+#define R_PPC64_TOC16_LO 48
+#define R_PPC64_TOC16_HI 49
+#define R_PPC64_TOC16_HA 50
+#define R_PPC64_TOC 51
+#define R_PPC64_DTPMOD64 68
+#define R_PPC64_TPREL64 73
+#define R_PPC64_DTPREL64 78
+
+/*
+ * TLS relocations
+ */
+#define R_PPC_TLS 67
+#define R_PPC_DTPMOD32 68
+#define R_PPC_TPREL16 69
+#define R_PPC_TPREL16_LO 70
+#define R_PPC_TPREL16_HI 71
+#define R_PPC_TPREL16_HA 72
+#define R_PPC_TPREL32 73
+#define R_PPC_DTPREL16 74
+#define R_PPC_DTPREL16_LO 75
+#define R_PPC_DTPREL16_HI 76
+#define R_PPC_DTPREL16_HA 77
+#define R_PPC_DTPREL32 78
+#define R_PPC_GOT_TLSGD16 79
+#define R_PPC_GOT_TLSGD16_LO 80
+#define R_PPC_GOT_TLSGD16_HI 81
+#define R_PPC_GOT_TLSGD16_HA 82
+#define R_PPC_GOT_TLSLD16 83
+#define R_PPC_GOT_TLSLD16_LO 84
+#define R_PPC_GOT_TLSLD16_HI 85
+#define R_PPC_GOT_TLSLD16_HA 86
+#define R_PPC_GOT_TPREL16 87
+#define R_PPC_GOT_TPREL16_LO 88
+#define R_PPC_GOT_TPREL16_HI 89
+#define R_PPC_GOT_TPREL16_HA 90
+
+/*
+ * The remaining relocs are from the Embedded ELF ABI, and are not in the
+ * SVR4 ELF ABI.
+ */
+
+#define R_PPC_EMB_NADDR32 101
+#define R_PPC_EMB_NADDR16 102
+#define R_PPC_EMB_NADDR16_LO 103
+#define R_PPC_EMB_NADDR16_HI 104
+#define R_PPC_EMB_NADDR16_HA 105
+#define R_PPC_EMB_SDAI16 106
+#define R_PPC_EMB_SDA2I16 107
+#define R_PPC_EMB_SDA2REL 108
+#define R_PPC_EMB_SDA21 109
+#define R_PPC_EMB_MRKREF 110
+#define R_PPC_EMB_RELSEC16 111
+#define R_PPC_EMB_RELST_LO 112
+#define R_PPC_EMB_RELST_HI 113
+#define R_PPC_EMB_RELST_HA 114
+#define R_PPC_EMB_BIT_FLD 115
+#define R_PPC_EMB_RELSDA 116
+
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_PC_HH22 37
+#define R_SPARC_PC_HM10 38
+#define R_SPARC_PC_LM22 39
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_GLOB_JMP 42
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
+#define R_SPARC_DISP64 46
+#define R_SPARC_PLT64 47
+#define R_SPARC_HIX22 48
+#define R_SPARC_LOX10 49
+#define R_SPARC_H44 50
+#define R_SPARC_M44 51
+#define R_SPARC_L44 52
+#define R_SPARC_REGISTER 53
+#define R_SPARC_UA64 54
+#define R_SPARC_UA16 55
+#define R_SPARC_TLS_GD_HI22 56
+#define R_SPARC_TLS_GD_LO10 57
+#define R_SPARC_TLS_GD_ADD 58
+#define R_SPARC_TLS_GD_CALL 59
+#define R_SPARC_TLS_LDM_HI22 60
+#define R_SPARC_TLS_LDM_LO10 61
+#define R_SPARC_TLS_LDM_ADD 62
+#define R_SPARC_TLS_LDM_CALL 63
+#define R_SPARC_TLS_LDO_HIX22 64
+#define R_SPARC_TLS_LDO_LOX10 65
+#define R_SPARC_TLS_LDO_ADD 66
+#define R_SPARC_TLS_IE_HI22 67
+#define R_SPARC_TLS_IE_LO10 68
+#define R_SPARC_TLS_IE_LD 69
+#define R_SPARC_TLS_IE_LDX 70
+#define R_SPARC_TLS_IE_ADD 71
+#define R_SPARC_TLS_LE_HIX22 72
+#define R_SPARC_TLS_LE_LOX10 73
+#define R_SPARC_TLS_DTPMOD32 74
+#define R_SPARC_TLS_DTPMOD64 75
+#define R_SPARC_TLS_DTPOFF32 76
+#define R_SPARC_TLS_DTPOFF64 77
+#define R_SPARC_TLS_TPOFF32 78
+#define R_SPARC_TLS_TPOFF64 79
+
+#define R_X86_64_NONE 0 /* No relocation. */
+#define R_X86_64_64 1 /* Add 64 bit symbol value. */
+#define R_X86_64_PC32 2 /* PC-relative 32 bit signed sym value. */
+#define R_X86_64_GOT32 3 /* PC-relative 32 bit GOT offset. */
+#define R_X86_64_PLT32 4 /* PC-relative 32 bit PLT offset. */
+#define R_X86_64_COPY 5 /* Copy data from shared object. */
+#define R_X86_64_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_X86_64_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_X86_64_RELATIVE 8 /* Add load address of shared object. */
+#define R_X86_64_GOTPCREL 9 /* Add 32 bit signed pcrel offset to GOT. */
+#define R_X86_64_32 10 /* Add 32 bit zero extended symbol value */
+#define R_X86_64_32S 11 /* Add 32 bit sign extended symbol value */
+#define R_X86_64_16 12 /* Add 16 bit zero extended symbol value */
+#define R_X86_64_PC16 13 /* Add 16 bit signed extended pc relative symbol value */
+#define R_X86_64_8 14 /* Add 8 bit zero extended symbol value */
+#define R_X86_64_PC8 15 /* Add 8 bit signed extended pc relative symbol value */
+#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
+#define R_X86_64_DTPOFF64 17 /* Offset in TLS block */
+#define R_X86_64_TPOFF64 18 /* Offset in static TLS block */
+#define R_X86_64_TLSGD 19 /* PC relative offset to GD GOT entry */
+#define R_X86_64_TLSLD 20 /* PC relative offset to LD GOT entry */
+#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
+#define R_X86_64_GOTTPOFF 22 /* PC relative offset to IE GOT entry */
+#define R_X86_64_TPOFF32 23 /* Offset in static TLS block */
+#define R_X86_64_IRELATIVE 37
+
+
+#endif /* !_SYS_ELF_COMMON_H_ */
diff --git a/core/arch/arm/kernel/elf_load.c b/core/arch/arm/kernel/elf_load.c
new file mode 100644
index 0000000..420ba59
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <types_ext.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <kernel/tee_misc.h>
+#include <tee/tee_cryp_provider.h>
+#include <stdlib.h>
+#include <string.h>
+#include <util.h>
+#include <trace.h>
+#include "elf_load.h"
+#include "elf_common.h"
+#include "elf32.h"
+#include "elf64.h"
+
+struct elf_load_state {
+ bool is_32bit;
+
+ uint8_t *nwdata;
+ size_t nwdata_len;
+
+ void *hash_ctx;
+ uint32_t hash_algo;
+
+ size_t next_offs;
+
+ void *ta_head;
+ size_t ta_head_size;
+
+ void *ehdr;
+ void *phdr;
+
+ size_t vasize;
+ void *shdr;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Ehdr */
+struct elf_ehdr {
+ size_t e_phoff;
+ size_t e_shoff;
+ uint32_t e_phentsize;
+ uint32_t e_phnum;
+ uint32_t e_shentsize;
+ uint32_t e_shnum;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Phdr */
+struct elf_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uintptr_t p_vaddr;
+ size_t p_filesz;
+ size_t p_memsz;
+ size_t p_offset;
+};
+
+#ifdef ARM64
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) \
+ do { \
+ if ((state)->is_32bit) { \
+ is_32bit_action; \
+ } else { \
+ is_64bit_action; \
+ } \
+ } while (0)
+#else
+/* No need to assert state->is_32bit since that is caught before this is used */
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) is_32bit_action
+#endif
+
+#define COPY_EHDR(dst, src) \
+ do { \
+ (dst)->e_phoff = (src)->e_phoff; \
+ (dst)->e_shoff = (src)->e_shoff; \
+ (dst)->e_phentsize = (src)->e_phentsize; \
+ (dst)->e_phnum = (src)->e_phnum; \
+ (dst)->e_shentsize = (src)->e_shentsize; \
+ (dst)->e_shnum = (src)->e_shnum; \
+ } while (0)
+static void copy_ehdr(struct elf_ehdr *ehdr, struct elf_load_state *state)
+{
+ DO_ACTION(state, COPY_EHDR(ehdr, ((Elf32_Ehdr *)state->ehdr)),
+ COPY_EHDR(ehdr, ((Elf64_Ehdr *)state->ehdr)));
+}
+
+static uint32_t get_shdr_type(struct elf_load_state *state, size_t idx)
+{
+ DO_ACTION(state, return ((Elf32_Shdr *)state->shdr + idx)->sh_type,
+ return ((Elf64_Shdr *)state->shdr + idx)->sh_type);
+}
+
+#define COPY_PHDR(dst, src) \
+ do { \
+ (dst)->p_type = (src)->p_type; \
+ (dst)->p_vaddr = (src)->p_vaddr; \
+ (dst)->p_filesz = (src)->p_filesz; \
+ (dst)->p_memsz = (src)->p_memsz; \
+ (dst)->p_offset = (src)->p_offset; \
+ (dst)->p_flags = (src)->p_flags; \
+ } while (0)
+static void copy_phdr(struct elf_phdr *phdr, struct elf_load_state *state,
+ size_t idx)
+{
+ DO_ACTION(state, COPY_PHDR(phdr, ((Elf32_Phdr *)state->phdr + idx)),
+ COPY_PHDR(phdr, ((Elf64_Phdr *)state->phdr + idx)));
+}
+
+static TEE_Result advance_to(struct elf_load_state *state, size_t offs)
+{
+ TEE_Result res;
+
+ if (offs < state->next_offs)
+ return TEE_ERROR_BAD_STATE;
+ if (offs == state->next_offs)
+ return TEE_SUCCESS;
+
+ if (offs > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ state->nwdata + state->next_offs,
+ offs - state->next_offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs;
+ return res;
+}
+
+static TEE_Result copy_to(struct elf_load_state *state,
+ void *dst, size_t dst_size, size_t dst_offs,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+
+ res = advance_to(state, offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (!len)
+ return TEE_SUCCESS;
+
+ /* Check for integer overflow */
+ if ((len + dst_offs) < dst_offs || (len + dst_offs) > dst_size ||
+ (len + offs) < offs || (len + offs) > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ memcpy((uint8_t *)dst + dst_offs, state->nwdata + offs, len);
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ (uint8_t *)dst + dst_offs, len);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs + len;
+ return res;
+}
+
+static TEE_Result alloc_and_copy_to(void **p, struct elf_load_state *state,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+ void *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ res = copy_to(state, buf, len, 0, offs, len);
+ if (res == TEE_SUCCESS)
+ *p = buf;
+ else
+ free(buf);
+ return res;
+}
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **ret_state)
+{
+ struct elf_load_state *state;
+
+ state = calloc(1, sizeof(*state));
+ if (!state)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->hash_ctx = hash_ctx;
+ state->hash_algo = hash_algo;
+ state->nwdata = nwdata;
+ state->nwdata_len = nwdata_len;
+ *ret_state = state;
+ return TEE_SUCCESS;
+}
+
+static TEE_Result e32_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *ehdr)
+{
+ if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
+ ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
+ ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
+ ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
+ (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
+#ifndef CFG_WITH_VFP
+ (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
+#endif
+ ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf32_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = malloc(sizeof(*ehdr));
+ if (!state->ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(state->ehdr, ehdr, sizeof(*ehdr));
+ state->is_32bit = true;
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *eh32)
+{
+ TEE_Result res;
+ Elf64_Ehdr *ehdr = NULL;
+
+ if (eh32->e_ident[EI_VERSION] != EV_CURRENT ||
+ eh32->e_ident[EI_CLASS] != ELFCLASS64 ||
+ eh32->e_ident[EI_DATA] != ELFDATA2LSB ||
+ eh32->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ eh32->e_type != ET_DYN || eh32->e_machine != EM_AARCH64)
+ return TEE_ERROR_BAD_FORMAT;
+
+ ehdr = malloc(sizeof(*ehdr));
+ if (!ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->ehdr = ehdr;
+ memcpy(ehdr, eh32, sizeof(*eh32));
+ res = copy_to(state, ehdr, sizeof(*ehdr), sizeof(*eh32),
+ sizeof(*eh32), sizeof(*ehdr) - sizeof(*eh32));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf64_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = ehdr;
+ state->is_32bit = false;
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_load_ehdr(struct elf_load_state *state __unused,
+ Elf32_Ehdr *eh32 __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+static TEE_Result load_head(struct elf_load_state *state, size_t head_size)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ struct elf_ehdr ehdr;
+ struct elf_phdr phdr;
+ struct elf_phdr phdr0;
+
+ copy_ehdr(&ehdr, state);
+ /*
+ * Program headers are supposed to be arranged as:
+ * PT_LOAD [0] : .ta_head ...
+ * ...
+ * PT_LOAD [n]
+ *
+ * .ta_head must be located first in the first program header,
+ * which also has to be of PT_LOAD type.
+ *
+ * A PT_DYNAMIC segment may appear, but is ignored. Any other
+ * segment except PT_LOAD and PT_DYNAMIC will cause an error. All
+ * sections not included by a PT_LOAD segment are ignored.
+ */
+ if (ehdr.e_phnum < 1)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check for integer overflow */
+ if (((uint64_t)ehdr.e_phnum * ehdr.e_phentsize) > SIZE_MAX)
+ return TEE_ERROR_SECURITY;
+
+ res = alloc_and_copy_to(&p, state, ehdr.e_phoff,
+ ehdr.e_phnum * ehdr.e_phentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->phdr = p;
+
+ /*
+ * Check that the first program header is a PT_LOAD (not strictly
+ * needed but our link script is supposed to arrange it that way)
+ * and that it starts at virtual address 0.
+ */
+ copy_phdr(&phdr0, state, 0);
+ if (phdr0.p_type != PT_LOAD || phdr0.p_vaddr != 0)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /*
+ * Calculate amount of required virtual memory for TA. Find the max
+ * address used by a PT_LOAD type. Note that last PT_LOAD type
+ * dictates the total amount of needed memory. Eventual holes in
+ * the memory will also be allocated.
+ *
+ * Note that this loop will terminate at n = 0 if not earlier
+ * as we already know from above that state->phdr[0].p_type == PT_LOAD
+ */
+ n = ehdr.e_phnum;
+ do {
+ n--;
+ copy_phdr(&phdr, state, n);
+ } while (phdr.p_type != PT_LOAD);
+ state->vasize = phdr.p_vaddr + phdr.p_memsz;
+
+ /* Check for integer overflow */
+ if (state->vasize < phdr.p_vaddr)
+ return TEE_ERROR_SECURITY;
+
+ /*
+ * Read .ta_head from first segment, make sure the segment is large
+ * enough. We're only interested in seeing that the
+ * TA_FLAG_EXEC_DDR flag is set. If that's true we set that flag in
+ * the TA context to enable mapping the TA. Later when this
+ * function has returned and the hash has been verified the flags
+ * field will be updated with eventual other flags.
+ */
+ if (phdr0.p_filesz < head_size)
+ return TEE_ERROR_BAD_FORMAT;
+ res = alloc_and_copy_to(&p, state, phdr0.p_offset, head_size);
+ if (res == TEE_SUCCESS) {
+ state->ta_head = p;
+ state->ta_head_size = head_size;
+ }
+ return res;
+}
+
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit)
+{
+ TEE_Result res;
+ Elf32_Ehdr ehdr;
+
+ /*
+ * The ELF resides in shared memory, to avoid attacks based on
+ * modifying the ELF while we're parsing it here we only read each
+ * byte from the ELF once. We're also hashing the ELF while reading
+ * so we're limited to only read the ELF sequentially from start to
+ * end.
+ */
+
+ res = copy_to(state, &ehdr, sizeof(ehdr), 0, 0, sizeof(Elf32_Ehdr));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (!IS_ELF(ehdr))
+ return TEE_ERROR_BAD_FORMAT;
+ res = e32_load_ehdr(state, &ehdr);
+ if (res == TEE_ERROR_BAD_FORMAT)
+ res = e64_load_ehdr(state, &ehdr);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = load_head(state, head_size);
+ if (res == TEE_SUCCESS) {
+ *head = state->ta_head;
+ *vasize = state->vasize;
+ *is_32bit = state->is_32bit;
+ }
+ return res;
+}
+
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags)
+{
+ struct elf_ehdr ehdr;
+
+ copy_ehdr(&ehdr, state);
+ while (*idx < ehdr.e_phnum) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, *idx);
+ (*idx)++;
+ if (phdr.p_type == PT_LOAD) {
+ if (vaddr)
+ *vaddr = phdr.p_vaddr;
+ if (size)
+ *size = phdr.p_memsz;
+ if (flags)
+ *flags = phdr.p_flags;
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+
+static TEE_Result e32_process_rel(struct elf_load_state *state, size_t rel_sidx,
+ vaddr_t vabase)
+{
+ Elf32_Ehdr *ehdr = state->ehdr;
+ Elf32_Shdr *shdr = state->shdr;
+ Elf32_Rel *rel;
+ Elf32_Rel *rel_end;
+ size_t sym_tab_idx;
+ Elf32_Sym *sym_tab = NULL;
+ size_t num_syms = 0;
+
+ if (shdr[rel_sidx].sh_type != SHT_REL)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab_idx = shdr[rel_sidx].sh_link;
+ if (sym_tab_idx) {
+ if (sym_tab_idx >= ehdr->e_shnum)
+ return TEE_ERROR_BAD_FORMAT;
+
+ if (shdr[sym_tab_idx].sh_entsize != sizeof(Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[sym_tab_idx].sh_addr > state->vasize ||
+ (shdr[sym_tab_idx].sh_addr +
+ shdr[sym_tab_idx].sh_size) > state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab = (Elf32_Sym *)(vabase + shdr[sym_tab_idx].sh_addr);
+ if (!ALIGNMENT_IS_OK(sym_tab, Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
+ }
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel = (Elf32_Rel *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rel, Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
+ for (; rel < rel_end; rel++) {
+ Elf32_Addr *where;
+ size_t sym_idx;
+
+ /* Check the address is inside TA memory */
+ if (rel->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf32_Addr *)(vabase + rel->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf32_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_ABS32:
+ sym_idx = ELF32_R_SYM(rel->r_info);
+ if (sym_idx >= num_syms)
+ return TEE_ERROR_BAD_FORMAT;
+
+ *where += vabase + sym_tab[sym_idx].st_value;
+ break;
+ case R_ARM_RELATIVE:
+ *where += vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %d",
+ ELF32_R_TYPE(rel->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_process_rel(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase)
+{
+ Elf64_Shdr *shdr = state->shdr;
+ Elf64_Rela *rela;
+ Elf64_Rela *rela_end;
+
+ if (shdr[rel_sidx].sh_type != SHT_RELA)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela = (Elf64_Rela *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rela, Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
+ for (; rela < rela_end; rela++) {
+ Elf64_Addr *where;
+
+ /* Check the address is inside TA memory */
+ if (rela->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf64_Addr *)(vabase + rela->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf64_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_AARCH64_RELATIVE:
+ *where = rela->r_addend + vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %zd",
+ ELF64_R_TYPE(rela->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_process_rel(struct elf_load_state *state __unused,
+ size_t rel_sidx __unused, vaddr_t vabase __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ uint8_t *dst = (uint8_t *)vabase;
+ struct elf_ehdr ehdr;
+ size_t offs;
+
+ copy_ehdr(&ehdr, state);
+
+ /*
+ * Zero initialize everything to make sure that all memory not
+ * updated from the ELF is zero (covering .bss and eventual gaps).
+ */
+ memset(dst, 0, state->vasize);
+
+ /*
+ * Copy the segments
+ */
+ memcpy(dst, state->ta_head, state->ta_head_size);
+ offs = state->ta_head_size;
+ for (n = 0; n < ehdr.e_phnum; n++) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, n);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ res = copy_to(state, dst, state->vasize,
+ phdr.p_vaddr + offs,
+ phdr.p_offset + offs,
+ phdr.p_filesz - offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ offs = 0;
+ }
+
+ /*
+ * We have now loaded all segments into TA memory, now we need to
+ * process relocation information. To find relocation information
+ * we need to locate the section headers. The section headers are
+ * located somewhere between the last segment and the end of the
+ * ELF.
+ */
+ if (ehdr.e_shoff) {
+ /* We have section headers */
+ res = alloc_and_copy_to(&p, state, ehdr.e_shoff,
+ ehdr.e_shnum * ehdr.e_shentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->shdr = p;
+ }
+
+ /* Hash until end of ELF */
+ res = advance_to(state, state->nwdata_len);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (state->shdr) {
+ TEE_Result (*process_rel)(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase);
+
+ if (state->is_32bit)
+ process_rel = e32_process_rel;
+ else
+ process_rel = e64_process_rel;
+
+ /* Process relocation */
+ for (n = 0; n < ehdr.e_shnum; n++) {
+ uint32_t sh_type = get_shdr_type(state, n);
+
+ if (sh_type == SHT_REL || sh_type == SHT_RELA) {
+ res = process_rel(state, n, vabase);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+void elf_load_final(struct elf_load_state *state)
+{
+ if (state) {
+ free(state->ta_head);
+ free(state->ehdr);
+ free(state->phdr);
+ free(state->shdr);
+ free(state);
+ }
+}
diff --git a/core/arch/arm/kernel/elf_load.h b/core/arch/arm/kernel/elf_load.h
new file mode 100644
index 0000000..4944e3a
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ELF_LOAD_H
+#define ELF_LOAD_H
+
+#include <types_ext.h>
+#include <tee_api_types.h>
+
+struct elf_load_state;
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **state);
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit);
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase);
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags);
+void elf_load_final(struct elf_load_state *state);
+
+#endif /*ELF_LOAD_H*/
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
new file mode 100644
index 0000000..8f13c36
--- /dev/null
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <compiler.h>
+#include <inttypes.h>
+#include <keep.h>
+#include <kernel/generic_boot.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <kernel/asan.h>
+#include <malloc.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <sm/tee_mon.h>
+#include <trace.h>
+#include <tee/tee_cryp_provider.h>
+#include <utee_defines.h>
+#include <util.h>
+#include <stdio.h>
+
+#include <platform_config.h>
+
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+#include <sm/sm.h>
+#endif
+
+#if defined(CFG_WITH_VFP)
+#include <kernel/vfp.h>
+#endif
+
+#if defined(CFG_DT)
+#include <libfdt.h>
+#endif
+
+/*
+ * In this file we're using unsigned long to represent physical pointers as
+ * they are received in a single register when OP-TEE is initially entered.
+ * This limits 32-bit systems to only use make use of the lower 32 bits
+ * of a physical address for initial parameters.
+ *
+ * 64-bit systems on the other hand can use full 64-bit physical pointers.
+ */
+#define PADDR_INVALID ULONG_MAX
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+paddr_t ns_entry_addrs[CFG_TEE_CORE_NB_CORE] __early_bss;
+static uint32_t spin_table[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+#ifdef CFG_BOOT_SYNC_CPU
+/*
+ * Array used when booting, to synchronize cpu.
+ * When 0, the cpu has not started.
+ * When 1, it has started
+ */
+uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_late(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_late);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_early(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_early);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_init_gic(void)
+{
+}
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_secondary_init_gic(void)
+{
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+void init_sec_mon(unsigned long nsec_entry __maybe_unused)
+{
+ assert(nsec_entry == PADDR_INVALID);
+ /* Do nothing as we don't have a secure monitor */
+}
+#else
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void init_sec_mon(unsigned long nsec_entry)
+{
+ struct sm_nsec_ctx *nsec_ctx;
+
+ assert(nsec_entry != PADDR_INVALID);
+
+ /* Initialize secure monitor */
+ nsec_ctx = sm_get_nsec_ctx();
+ nsec_ctx->mon_lr = nsec_entry;
+ nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
+
+}
+#endif
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+static void init_vfp_nsec(void)
+{
+}
+#else
+static void init_vfp_nsec(void)
+{
+ /* Normal world can use CP10 and CP11 (SIMD/VFP) */
+ write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
+}
+#endif
+
+#if defined(CFG_WITH_VFP)
+
+#ifdef ARM32
+static void init_vfp_sec(void)
+{
+ uint32_t cpacr = read_cpacr();
+
+ /*
+ * Enable Advanced SIMD functionality.
+ * Enable use of D16-D31 of the Floating-point Extension register
+ * file.
+ */
+ cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
+ /*
+ * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
+ * mode.
+ */
+ cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
+ cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
+ write_cpacr(cpacr);
+}
+#endif /* ARM32 */
+
+#ifdef ARM64
+static void init_vfp_sec(void)
+{
+ /* Not using VFP until thread_kernel_enable_vfp() */
+ vfp_disable();
+}
+#endif /* ARM64 */
+
+#else /* CFG_WITH_VFP */
+
+static void init_vfp_sec(void)
+{
+ /* Not using VFP */
+}
+#endif
+
+#ifdef CFG_WITH_PAGER
+
+static size_t get_block_size(void)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned l;
+
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
+ panic("can't find mmu tables");
+
+ l = tbl_info.level - 1;
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
+ panic("can't find mmu table upper level");
+
+ return 1 << tbl_info.shift;
+}
+
+static void init_runtime(unsigned long pageable_part)
+{
+ size_t n;
+ size_t init_size = (size_t)__init_size;
+ size_t pageable_size = __pageable_end - __pageable_start;
+ size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
+ TEE_SHA256_HASH_SIZE;
+ tee_mm_entry_t *mm;
+ uint8_t *paged_store;
+ uint8_t *hashes;
+ size_t block_size;
+
+ assert(pageable_size % SMALL_PAGE_SIZE == 0);
+ assert(hash_size == (size_t)__tmp_hashes_size);
+
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ /*
+ * This needs to be initialized early to support address lookup
+ * in MEM_AREA_TEE_RAM
+ */
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
+ &tee_pager_tbl_info))
+ panic("can't find mmu tables");
+
+ if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
+ panic("Unsupported page size in translation table");
+
+ thread_init_boot_thread();
+
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+ malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
+
+ hashes = malloc(hash_size);
+ IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
+ assert(hashes);
+ memcpy(hashes, __tmp_hashes_start, hash_size);
+
+ /*
+ * Need tee_mm_sec_ddr initialized to be able to allocate secure
+ * DDR below.
+ */
+ teecore_init_ta_ram();
+
+ mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
+ assert(mm);
+ paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
+ /* Copy init part into pageable area */
+ memcpy(paged_store, __init_start, init_size);
+ /* Copy pageable part after init part into pageable area */
+ memcpy(paged_store + init_size,
+ phys_to_virt(pageable_part,
+ core_mmu_get_type_by_pa(pageable_part)),
+ __pageable_part_end - __pageable_part_start);
+
+ /* Check that hashes of what's in pageable area is OK */
+ DMSG("Checking hashes of pageable area");
+ for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
+ const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
+ const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
+ TEE_Result res;
+
+ DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
+ res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
+ if (res != TEE_SUCCESS) {
+ EMSG("Hash failed for page %zu at %p: res 0x%x",
+ n, page, res);
+ panic();
+ }
+ }
+
+ /*
+ * Copy what's not initialized in the last init page. Needed
+ * because we're not going fault in the init pages again. We can't
+ * fault in pages until we've switched to the new vector by calling
+ * thread_init_handlers() below.
+ */
+ if (init_size % SMALL_PAGE_SIZE) {
+ uint8_t *p;
+
+ memcpy(__init_start + init_size, paged_store + init_size,
+ SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
+
+ p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
+ ~SMALL_PAGE_MASK);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
+ SMALL_PAGE_SIZE);
+ }
+
+ /*
+ * Initialize the virtual memory pool used for main_mmu_l2_ttb which
+ * is supplied to tee_pager_init() below.
+ */
+ block_size = get_block_size();
+ if (!tee_mm_init(&tee_mm_vcore,
+ ROUNDDOWN(CFG_TEE_RAM_START, block_size),
+ ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
+ block_size),
+ SMALL_PAGE_SHIFT, 0))
+ panic("tee_mm_vcore init failed");
+
+ /*
+ * Assign alias area for pager end of the small page block the rest
+ * of the binary is loaded into. We're taking more than needed, but
+ * we're guaranteed to not need more than the physical amount of
+ * TZSRAM.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore,
+ (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
+ assert(mm);
+ tee_pager_init(mm);
+
+ /*
+ * Claim virtual memory which isn't paged, note that there migth be
+ * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
+ * claimed to avoid later allocations to get that memory.
+ * Linear memory (flat map core memory) ends there.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
+ (vaddr_t)(__pageable_start - tee_mm_vcore.lo));
+ assert(mm);
+
+ /*
+ * Allocate virtual memory for the pageable area and let the pager
+ * take charge of all the pages already assigned to that memory.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
+ pageable_size);
+ assert(mm);
+ if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ TEE_MATTR_PRX, paged_store, hashes))
+ panic("failed to add pageable to vcore");
+
+ tee_pager_add_pages((vaddr_t)__pageable_start,
+ ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
+ tee_pager_add_pages((vaddr_t)__pageable_start +
+ ROUNDUP(init_size, SMALL_PAGE_SIZE),
+ (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
+ SMALL_PAGE_SIZE, true);
+
+}
+#else
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+static void init_run_constructors(void)
+{
+ vaddr_t *ctor;
+
+ for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
+ ((void (*)(void))(*ctor))();
+}
+
+static void init_asan(void)
+{
+
+ /*
+ * CFG_ASAN_SHADOW_OFFSET is also supplied as
+ * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
+ * Since all the needed values to calculate the value of
+ * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
+ * calculate it in advance and hard code it into the platform
+ * conf.mk. Here where we have all the needed values we double
+ * check that the compiler is supplied the correct value.
+ */
+
+#define __ASAN_SHADOW_START \
+ ROUNDUP(CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
+ assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
+#define __CFG_ASAN_SHADOW_OFFSET \
+ (__ASAN_SHADOW_START - (CFG_TEE_RAM_START / 8))
+ COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
+#undef __ASAN_SHADOW_START
+#undef __CFG_ASAN_SHADOW_OFFSET
+
+ /*
+ * Assign area covered by the shadow area, everything from start up
+ * to the beginning of the shadow area.
+ */
+ asan_set_shadowed((void *)CFG_TEE_LOAD_ADDR, &__asan_shadow_start);
+
+ /*
+ * Add access to areas that aren't opened automatically by a
+ * constructor.
+ */
+ asan_tag_access(&__initcall_start, &__initcall_end);
+ asan_tag_access(&__ctor_list, &__ctor_end);
+ asan_tag_access(__rodata_start, __rodata_end);
+ asan_tag_access(__early_bss_start, __early_bss_end);
+ asan_tag_access(__nozi_start, __nozi_end);
+
+ init_run_constructors();
+
+ /* Everything is tagged correctly, let's start address sanitizing. */
+ asan_start();
+}
+#else /*CFG_CORE_SANITIZE_KADDRESS*/
+static void init_asan(void)
+{
+}
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+static void init_runtime(unsigned long pageable_part __unused)
+{
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ thread_init_boot_thread();
+
+ init_asan();
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+
+ /*
+ * Initialized at this stage in the pager version of this function
+ * above
+ */
+ teecore_init_ta_ram();
+}
+#endif
+
+#ifdef CFG_DT
+static int add_optee_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+
+ if (fdt_path_offset(fdt, "/firmware/optee") >= 0) {
+ IMSG("OP-TEE Device Tree node already exists!\n");
+ return 0;
+ }
+
+ offs = fdt_path_offset(fdt, "/firmware");
+ if (offs < 0) {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "firmware");
+ if (offs < 0)
+ return -1;
+ }
+
+ offs = fdt_add_subnode(fdt, offs, "optee");
+ if (offs < 0)
+ return -1;
+
+ ret = fdt_setprop_string(fdt, offs, "compatible", "linaro,optee-tz");
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_string(fdt, offs, "method", "smc");
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+static int get_dt_cell_size(void *fdt, int offs, const char *cell_name,
+ uint32_t *cell_size)
+{
+ int len;
+ const uint32_t *cell = fdt_getprop(fdt, offs, cell_name, &len);
+
+ if (len != sizeof(*cell))
+ return -1;
+ *cell_size = fdt32_to_cpu(*cell);
+ if (*cell_size != 1 && *cell_size != 2)
+ return -1;
+ return 0;
+}
+
+static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
+{
+ if (cell_size == 1) {
+ uint32_t v = cpu_to_fdt32((uint32_t)val);
+
+ memcpy(data, &v, sizeof(v));
+ } else {
+ uint64_t v = cpu_to_fdt64(val);
+
+ memcpy(data, &v, sizeof(v));
+ }
+}
+
+static int add_optee_res_mem_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+ uint32_t addr_size = 2;
+ uint32_t len_size = 2;
+ vaddr_t shm_va_start;
+ vaddr_t shm_va_end;
+ paddr_t shm_pa;
+ char subnode_name[80];
+
+ offs = fdt_path_offset(fdt, "/reserved-memory");
+ if (offs >= 0) {
+ ret = get_dt_cell_size(fdt, offs, "#address-cells", &addr_size);
+ if (ret < 0)
+ return -1;
+ ret = get_dt_cell_size(fdt, offs, "#size-cells", &len_size);
+ if (ret < 0)
+ return -1;
+ } else {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "reserved-memory");
+ if (offs < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#address-cells", addr_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#size-cells", len_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "ranges", NULL, 0);
+ if (ret < 0)
+ return -1;
+ }
+
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
+ shm_pa = virt_to_phys((void *)shm_va_start);
+ snprintf(subnode_name, sizeof(subnode_name),
+ "optee@0x%" PRIxPA, shm_pa);
+ offs = fdt_add_subnode(fdt, offs, subnode_name);
+ if (offs >= 0) {
+ uint32_t data[addr_size + len_size] ;
+
+ set_dt_val(data, addr_size, shm_pa);
+ set_dt_val(data + addr_size, len_size,
+ shm_va_end - shm_va_start);
+ ret = fdt_setprop(fdt, offs, "reg", data, sizeof(data));
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "no-map", NULL, 0);
+ if (ret < 0)
+ return -1;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+static void init_fdt(unsigned long phys_fdt)
+{
+ void *fdt;
+ int ret;
+
+ if (!phys_fdt) {
+ EMSG("Device Tree missing");
+ /*
+ * No need to panic as we're not using the DT in OP-TEE
+ * yet, we're only adding some nodes for normal world use.
+ * This makes the switch to using DT easier as we can boot
+ * a newer OP-TEE with older boot loaders. Once we start to
+ * initialize devices based on DT we'll likely panic
+ * instead of returning here.
+ */
+ return;
+ }
+
+ if (!core_mmu_add_mapping(MEM_AREA_IO_NSEC, phys_fdt, CFG_DTB_MAX_SIZE))
+ panic("failed to map fdt");
+
+ fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
+ if (!fdt)
+ panic();
+
+ ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
+ if (ret < 0) {
+ EMSG("Invalid Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+
+ if (add_optee_dt_node(fdt))
+ panic("Failed to add OP-TEE Device Tree node");
+
+ if (add_optee_res_mem_dt_node(fdt))
+ panic("Failed to add OP-TEE reserved memory DT node");
+
+ ret = fdt_pack(fdt);
+ if (ret < 0) {
+ EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+}
+#else
+static void init_fdt(unsigned long phys_fdt __unused)
+{
+}
+#endif /*!CFG_DT*/
+
+static void init_primary_helper(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+ init_vfp_sec();
+
+ init_runtime(pageable_part);
+
+ IMSG("Initializing (%s)\n", core_v_str);
+
+ thread_init_primary(generic_boot_get_handlers());
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ init_fdt(fdt);
+ main_init_gic();
+ init_vfp_nsec();
+
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+ DMSG("Primary CPU switching to normal world boot\n");
+}
+
+static void init_secondary_helper(unsigned long nsec_entry)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ main_secondary_init_gic();
+ init_vfp_sec();
+ init_vfp_nsec();
+
+ DMSG("Secondary CPU Switching to normal world boot\n");
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long u __unused,
+ unsigned long fdt)
+{
+ init_primary_helper(pageable_part, PADDR_INVALID, fdt);
+ return &thread_vector_table;
+}
+
+unsigned long generic_boot_cpu_on_handler(unsigned long a0 __maybe_unused,
+ unsigned long a1 __unused)
+{
+ DMSG("cpu %zu: a0 0x%lx", get_core_pos(), a0);
+ init_secondary_helper(PADDR_INVALID);
+ return 0;
+}
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ init_primary_helper(pageable_part, nsec_entry, fdt);
+}
+
+void generic_boot_init_secondary(unsigned long nsec_entry)
+{
+ init_secondary_helper(nsec_entry);
+}
+#endif
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+int generic_boot_core_release(size_t core_idx, paddr_t entry)
+{
+ if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
+ return -1;
+
+ ns_entry_addrs[core_idx] = entry;
+ dmb();
+ spin_table[core_idx] = 1;
+ dsb();
+ sev();
+
+ return 0;
+}
+
+/*
+ * spin until secondary boot request, then returns with
+ * the secondary core entry address.
+ */
+paddr_t generic_boot_core_hpen(void)
+{
+#ifdef CFG_PSCI_ARM32
+ return ns_entry_addrs[get_core_pos()];
+#else
+ do {
+ wfe();
+ } while (!spin_table[get_core_pos()]);
+ dmb();
+ return ns_entry_addrs[get_core_pos()];
+#endif
+}
+#endif
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
new file mode 100644
index 0000000..27717d5
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+#include <kernel/asan.h>
+
+.section .data
+.balign 4
+
+#ifdef CFG_BOOT_SYNC_CPU
+.equ SEM_CPU_READY, 1
+#endif
+
+#ifdef CFG_PL310
+.section .rodata.init
+panic_boot_file:
+ .asciz __FILE__
+
+/*
+ * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
+ */
+.section .text.init
+LOCAL_FUNC __assert_flat_mapped_range , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push { r4-r6, lr }
+ mov r4, r0
+ mov r5, r1
+ bl cpu_mmu_enabled
+ cmp r0, #0
+ beq 1f
+ mov r0, r4
+ bl virt_to_phys
+ cmp r0, r4
+ beq 1f
+ /*
+ * this must be compliant with the panic generic routine:
+ * __do_panic(__FILE__, __LINE__, __func__, str)
+ */
+ ldr r0, =panic_boot_file
+ mov r1, r5
+ mov r2, #0
+ mov r3, #0
+ bl __do_panic
+ b . /* should NOT return */
+1: pop { r4-r6, pc }
+UNWIND( .fnend)
+END_FUNC __assert_flat_mapped_range
+
+ /* panic if mmu is enable and vaddr != paddr (scratch lr) */
+ .macro assert_flat_mapped_range va, line
+ ldr r0, =(\va)
+ ldr r1, =\line
+ bl __assert_flat_mapped_range
+ .endm
+#endif /* CFG_PL310 */
+
+.section .text.boot
+FUNC _start , :
+ b reset
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+END_FUNC _start
+
+ .macro cpu_is_ready
+#ifdef CFG_BOOT_SYNC_CPU
+ bl get_core_pos
+ lsl r0, r0, #2
+ ldr r1,=sem_cpu_sync
+ ldr r2, =SEM_CPU_READY
+ str r2, [r1, r0]
+ dsb
+ sev
+#endif
+ .endm
+
+ .macro wait_primary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 1b
+#endif
+ .endm
+
+ .macro wait_secondary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r3, #CFG_TEE_CORE_NB_CORE
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ subs r3, r3, #1
+ beq 3f
+ add r0, r0, #4
+2:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 2b
+ b 1b
+3:
+#endif
+ .endm
+
+ /*
+ * Save boot arguments
+ * entry r0, saved r4: pagestore
+ * entry r1, saved r7: (ARMv7 standard bootarg #1)
+ * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
+ * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
+ */
+ .macro bootargs_entry
+#if defined(CFG_NS_ENTRY_ADDR)
+ ldr r5, =CFG_NS_ENTRY_ADDR
+#else
+ mov r5, lr
+#endif
+#if defined(CFG_PAGEABLE_ADDR)
+ ldr r4, =CFG_PAGEABLE_ADDR
+#else
+ mov r4, r0
+#endif
+#if defined(CFG_DT_ADDR)
+ ldr r6, =CFG_DT_ADDR
+#else
+ mov r6, r2
+#endif
+ mov r7, r1
+ .endm
+
+LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ bootargs_entry
+
+ /* Enable alignment checks and disable data and instruction cache. */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ bic r0, r0, #SCTLR_C
+ bic r0, r0, #SCTLR_I
+ write_sctlr r0
+ isb
+
+ /* Early ARM secure MP specific configuration */
+ bl plat_cpu_reset_early
+
+ ldr r0, =_start
+ write_vbar r0
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ b reset_primary
+#else
+ bl get_core_pos
+ cmp r0, #0
+ beq reset_primary
+ b reset_secondary
+#endif
+UNWIND( .fnend)
+END_FUNC reset
+
+ /*
+ * Setup sp to point to the top of the tmp stack for the current CPU:
+ * sp is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp r0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add r0, r0, #1
+ ldr r2, =stack_tmp_stride
+ ldr r1, [r2]
+ mul r2, r0, r1
+ ldr r1, =stack_tmp
+ add r1, r1, r2
+ ldr r2, =stack_tmp_offset
+ ldr r2, [r2]
+ sub sp, r1, r2
+ .endm
+
+ /*
+ * Cache maintenance during entry: handle outer cache.
+ * End address is exclusive: first byte not to be changed.
+ * Note however arm_clX_inv/cleanbyva operate on full cache lines.
+ *
+ * Use ANSI #define to trap source file line number for PL310 assertion
+ */
+ .macro __inval_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_invbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_invbyva
+ .endm
+
+ .macro __flush_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleanbyva
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_cleaninvbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleaninvbyva
+ .endm
+
+#define inval_cache_vrange(vbase, vend) \
+ __inval_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#define flush_cache_vrange(vbase, vend) \
+ __flush_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#ifdef CFG_BOOT_SYNC_CPU
+#define flush_cpu_semaphores \
+ flush_cache_vrange(sem_cpu_sync, \
+ (sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
+#else
+#define flush_cpu_semaphores
+#endif
+
+LOCAL_FUNC reset_primary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ /* preserve r4-r7: bootargs */
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location and move hashes to a
+ * temporary safe location until the heap is initialized.
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before initializing pager
+ *
+ */
+ ldr r0, =__text_init_start /* dst */
+ ldr r1, =__data_end /* src */
+ ldr r2, =__tmp_hashes_end /* dst limit */
+ /* Copy backwards (as memmove) in case we're overlapping */
+ sub r2, r2, r0 /* len */
+ add r0, r0, r2
+ add r1, r1, r2
+ ldr r2, =__text_init_start
+copy_init:
+ ldmdb r1!, {r3, r8-r12, sp}
+ stmdb r0!, {r3, r8-r12, sp}
+ cmp r0, r2
+ bgt copy_init
+#endif
+
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ /* First initialize the entire shadow area with no access */
+ ldr r0, =__asan_shadow_start /* start */
+ ldr r1, =__asan_shadow_end /* limit */
+ mov r2, #ASAN_DATA_RED_ZONE
+shadow_no_access:
+ str r2, [r0], #4
+ cmp r0, r1
+ bls shadow_no_access
+
+ /* Mark the entire stack area as OK */
+ ldr r2, =CFG_ASAN_SHADOW_OFFSET
+ ldr r0, =__nozi_stack_start /* start */
+ lsr r0, r0, #ASAN_BLOCK_SHIFT
+ add r0, r0, r2
+ ldr r1, =__nozi_stack_end /* limit */
+ lsr r1, r1, #ASAN_BLOCK_SHIFT
+ add r1, r1, r2
+ mov r2, #0
+shadow_stack_access_ok:
+ strb r2, [r0], #1
+ cmp r0, r1
+ bls shadow_stack_access_ok
+#endif
+
+ set_sp
+
+ /* complete ARM secure MP common configuration */
+ bl plat_cpu_reset_late
+
+ /* Enable Console */
+ bl console_init
+
+#ifdef CFG_PL310
+ bl pl310_base
+ bl arm_cl2_config
+#endif
+
+ /*
+ * Invalidate dcache for all memory used during initialization to
+ * avoid nasty surprices when the cache is turned on. We must not
+ * invalidate memory not used by OP-TEE since we may invalidate
+ * entries used by for instance ARM Trusted Firmware.
+ */
+#ifdef CFG_WITH_PAGER
+ inval_cache_vrange(__text_start, __tmp_hashes_end)
+#else
+ inval_cache_vrange(__text_start, __end)
+#endif
+
+#ifdef CFG_PL310
+ /* Enable PL310 if not yet enabled */
+ bl pl310_base
+ bl arm_cl2_enable
+#endif
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4 /* pageable part address */
+ mov r1, r5 /* ns-entry address */
+ mov r2, r6 /* DT address */
+ bl generic_boot_init_primary
+ mov r4, r0 /* save entry test vector */
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+#ifdef CFG_WITH_PAGER
+ flush_cache_vrange(__text_start, __init_end)
+#else
+ flush_cache_vrange(__text_start, __end)
+#endif
+
+ /* release secondary boot cores and sync with them */
+ cpu_is_ready
+ flush_cpu_semaphores
+ wait_secondary
+
+#ifdef CFG_PL310_LOCKED
+ /* lock/invalidate all lines: pl310 behaves as if disable */
+ bl pl310_base
+ bl arm_cl2_lockallways
+ bl pl310_base
+ bl arm_cl2_cleaninvbyway
+#endif
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread() in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Pass the vector address returned from main_init */
+ mov r1, r4
+#else
+ /* realy standard bootarg #1 and #2 to non secure entry */
+ mov r4, #0
+ mov r3, r6 /* std bootarg #2 for register R2 */
+ mov r2, r7 /* std bootarg #1 for register R1 */
+ mov r1, #0
+#endif /* CFG_WITH_ARM_TRUSTED_FW */
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_primary
+
+
+LOCAL_FUNC unhandled_cpu , :
+UNWIND( .fnstart)
+ wfi
+ b unhandled_cpu
+UNWIND( .fnend)
+END_FUNC unhandled_cpu
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+FUNC cpu_on_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mov r4, r0
+ mov r5, r1
+ mov r6, lr
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ write_sctlr r0
+
+ ldr r0, =_start
+ write_vbar r0
+
+ mov r4, lr
+ set_sp
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4
+ mov r1, r5
+ bl generic_boot_cpu_on_handler
+
+ bx r6
+UNWIND( .fnend)
+END_FUNC cpu_on_handler
+
+#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
+
+LOCAL_FUNC reset_secondary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ wait_primary
+
+ set_sp
+
+ bl plat_cpu_reset_late
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* if L1 is not invalidated before, do it here */
+ bl arm_cl1_d_invbysetway
+#endif
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ cpu_is_ready
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* generic_boot_core_hpen return value (r0) is ns entry point */
+ bl generic_boot_core_hpen
+#else
+ mov r0, r5 /* ns-entry address */
+#endif
+ bl generic_boot_init_secondary
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ mov r4, #0
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_secondary
+#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
diff --git a/core/arch/arm/kernel/generic_entry_a64.S b/core/arch/arm/kernel/generic_entry_a64.S
new file mode 100644
index 0000000..5a5dd53
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a64.S
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+
+ /*
+ * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
+ * SP_EL0 is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ * SP_EL1 is assigned thread_core_local[cpu_id]
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp x0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add x0, x0, #1
+ adr x2, stack_tmp_stride
+ ldr w1, [x2]
+ mul x2, x0, x1
+ adrp x1, stack_tmp
+ add x1, x1, :lo12:stack_tmp
+ add x1, x1, x2
+ adr x2, stack_tmp_offset
+ ldr w2, [x2]
+ sub x1, x1, x2
+ msr spsel, #0
+ mov sp, x1
+ bl thread_get_core_local
+ msr spsel, #1
+ mov sp, x0
+ msr spsel, #0
+ .endm
+
+.section .text.boot
+FUNC _start , :
+ mov x19, x0 /* Save pagable part address */
+ mov x20, x2 /* Save DT address */
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before clearing bss
+ *
+ * When we copy init code and rodata into correct location we don't
+ * need to worry about hashes being overwritten as size of .bss,
+ * .heap, .nozi and .heap3 is much larger than the size of init
+ * code and rodata and hashes.
+ */
+ adr x0, __text_init_start /* dst */
+ adr x1, __data_end /* src */
+ adr x2, __rodata_init_end /* dst limit */
+copy_init:
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ cmp x0, x2
+ b.lt copy_init
+#endif
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Enable Console */
+ bl console_init
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19 /* pagable part address */
+ mov x1, #-1
+ mov x2, x20 /* DT address */
+ bl generic_boot_init_primary
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+ mov x19, x0
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl flush_dcache_range
+
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+ /* Pass the vector address returned from main_init */
+ mov x1, x19
+ mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC _start
+
+
+.section .text.cpu_on_handler
+FUNC cpu_on_handler , :
+ mov x19, x0
+ mov x20, x1
+ mov x21, x30
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19
+ mov x1, x20
+ mov x30, x21
+ b generic_boot_cpu_on_handler
+END_FUNC cpu_on_handler
+
+LOCAL_FUNC unhandled_cpu , :
+ wfi
+ b unhandled_cpu
+END_FUNC unhandled_cpu
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+ .align 11
+LOCAL_FUNC reset_vect_table , :
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+SynchronousExceptionSP0:
+ b SynchronousExceptionSP0
+ check_vector_size SynchronousExceptionSP0
+
+ .align 7
+IrqSP0:
+ b IrqSP0
+ check_vector_size IrqSP0
+
+ .align 7
+FiqSP0:
+ b FiqSP0
+ check_vector_size FiqSP0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA64:
+ b SynchronousExceptionA64
+ check_vector_size SynchronousExceptionA64
+
+ .align 7
+IrqA64:
+ b IrqA64
+ check_vector_size IrqA64
+
+ .align 7
+FiqA64:
+ b FiqA64
+ check_vector_size FiqA64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA32:
+ b SynchronousExceptionA32
+ check_vector_size SynchronousExceptionA32
+
+ .align 7
+IrqA32:
+ b IrqA32
+ check_vector_size IrqA32
+
+ .align 7
+FiqA32:
+ b FiqA32
+ check_vector_size FiqA32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC reset_vect_table
diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S
new file mode 100644
index 0000000..10dac6e
--- /dev/null
+++ b/core/arch/arm/kernel/kern.ld.S
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2008-2010 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <platform_config.h>
+
+OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
+OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
+
+ENTRY(_start)
+SECTIONS
+{
+ . = CFG_TEE_LOAD_ADDR;
+
+ /* text/read-only data */
+ .text : {
+ __text_start = .;
+ KEEP(*(.text.boot.vectab1))
+ KEEP(*(.text.boot.vectab2))
+ KEEP(*(.text.boot))
+
+ . = ALIGN(8);
+ __initcall_start = .;
+ KEEP(*(.initcall1))
+ KEEP(*(.initcall2))
+ KEEP(*(.initcall3))
+ KEEP(*(.initcall4))
+ __initcall_end = .;
+
+#ifdef CFG_WITH_PAGER
+ *(.text)
+/* Include list of sections needed for paging */
+#include <text_unpaged.ld.S>
+#else
+ *(.text .text.*)
+#endif
+ *(.sram.text.glue_7* .gnu.linkonce.t.*)
+ . = ALIGN(8);
+ __text_end = .;
+ }
+
+ .rodata : ALIGN(8) {
+ __rodata_start = .;
+ *(.gnu.linkonce.r.*)
+#ifdef CFG_WITH_PAGER
+ *(.rodata .rodata.__unpaged)
+#include <rodata_unpaged.ld.S>
+#else
+ *(.rodata .rodata.*)
+
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+#endif
+ . = ALIGN(8);
+ __rodata_end = .;
+ }
+
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x9090
+ .plt : { *(.plt) }
+
+ /* .ARM.exidx is sorted, so has to go in its own output section. */
+ .ARM.exidx : {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ }
+
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
+ .data : ALIGN(8) {
+ /* writable data */
+ __data_start_rom = .;
+ /* in one segment binaries, the rom data address is on top
+ of the ram data address */
+ __early_bss_start = .;
+ *(.early_bss .early_bss.*)
+ . = ALIGN(8);
+ __early_bss_end = .;
+ __data_start = .;
+ *(.data .data.* .gnu.linkonce.d.*)
+ . = ALIGN(8);
+ }
+
+ .ctors : ALIGN(8) {
+ __ctor_list = .;
+ KEEP(*(.ctors .ctors.* .init_array .init_array.*))
+ __ctor_end = .;
+ }
+ .dtors : ALIGN(8) {
+ __dtor_list = .;
+ KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
+ __dtor_end = .;
+ }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+
+ __data_end = .;
+ /* unintialized data */
+ .bss : ALIGN(8) {
+ __bss_start = .;
+ *(.bss .bss.*)
+ *(.gnu.linkonce.b.*)
+ *(COMMON)
+ . = ALIGN(8);
+ __bss_end = .;
+ }
+
+ .heap1 (NOLOAD) : {
+ /*
+ * We're keeping track of the padding added before the
+ * .nozi section so we can do something useful with
+ * this otherwise wasted memory.
+ */
+ __heap1_start = .;
+#ifndef CFG_WITH_PAGER
+ . += CFG_CORE_HEAP_SIZE;
+#endif
+ . = ALIGN(16 * 1024);
+ __heap1_end = .;
+ }
+
+ /*
+ * Uninitialized data that shouldn't be zero initialized at
+ * runtime.
+ *
+ * L1 mmu table requires 16 KiB alignment
+ */
+ .nozi (NOLOAD) : ALIGN(16 * 1024) {
+ __nozi_start = .;
+ KEEP(*(.nozi .nozi.*))
+ . = ALIGN(16);
+ __nozi_end = .;
+ __nozi_stack_start = .;
+ KEEP(*(.nozi_stack))
+ . = ALIGN(8);
+ __nozi_stack_end = .;
+ }
+
+#ifdef CFG_WITH_PAGER
+ .heap2 (NOLOAD) : {
+ __heap2_start = .;
+ /*
+ * Reserve additional memory for heap, the total should be
+ * at least CFG_CORE_HEAP_SIZE, but count what has already
+ * been reserved in .heap1
+ */
+ . += CFG_CORE_HEAP_SIZE - (__heap1_end - __heap1_start);
+ . = ALIGN(4 * 1024);
+ __heap2_end = .;
+ }
+
+ .text_init : ALIGN(4 * 1024) {
+ __text_init_start = .;
+/*
+ * Include list of sections needed for boot initialization, this list
+ * overlaps with unpaged.ld.S but since unpaged.ld.S is first all those
+ * sections will go into the unpaged area.
+ */
+#include <text_init.ld.S>
+ . = ALIGN(8);
+ __text_init_end = .;
+ }
+
+ .rodata_init : ALIGN(8) {
+ __rodata_init_start = .;
+#include <rodata_init.ld.S>
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+ . = ALIGN(8);
+ __rodata_init_end = .;
+ }
+ __init_start = __text_init_start;
+ __init_end = .;
+ __init_size = __init_end - __text_init_start;
+
+ .text_pageable : ALIGN(8) {
+ __text_pageable_start = .;
+ *(.text*)
+ . = ALIGN(8);
+ __text_pageable_end = .;
+ }
+
+ .rodata_pageable : ALIGN(8) {
+ __rodata_pageable_start = .;
+ *(.rodata*)
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(4 * 1024);
+ __rodata_pageable_end = .;
+ }
+
+ __pageable_part_start = __rodata_init_end;
+ __pageable_part_end = __rodata_pageable_end;
+ __pageable_start = __text_init_start;
+ __pageable_end = __pageable_part_end;
+
+ /*
+ * Assign a safe spot to store the hashes of the pages before
+ * heap is initialized.
+ */
+ __tmp_hashes_start = __rodata_init_end;
+ __tmp_hashes_size = ((__pageable_end - __pageable_start) /
+ (4 * 1024)) * 32;
+ __tmp_hashes_end = __tmp_hashes_start + __tmp_hashes_size;
+
+ __init_mem_usage = __tmp_hashes_end - CFG_TEE_LOAD_ADDR;
+
+ ASSERT(CFG_TEE_LOAD_ADDR >= CFG_TEE_RAM_START,
+ "Load address before start of physical memory")
+ ASSERT(CFG_TEE_LOAD_ADDR < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "Load address after end of physical memory")
+ ASSERT(__tmp_hashes_end < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "OP-TEE can't fit init part into available physical memory")
+ ASSERT((CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE - __init_end) >
+ 1 * 4096, "Too few free pages to initialize paging")
+
+
+#endif /*CFG_WITH_PAGER*/
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ . = CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8;
+ . = ALIGN(8);
+ .asan_shadow : {
+ __asan_shadow_start = .;
+ . += CFG_TEE_RAM_VA_SIZE / 9;
+ __asan_shadow_end = .;
+ }
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+ __end = .;
+
+#ifndef CFG_WITH_PAGER
+ __init_size = __data_end - CFG_TEE_LOAD_ADDR;
+ __init_mem_usage = __end - CFG_TEE_LOAD_ADDR;
+#endif
+ . = CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE;
+ _end_of_ram = .;
+
+ /DISCARD/ : {
+ /* Strip unnecessary stuff */
+ *(.comment .note .eh_frame)
+ /* Strip meta variables */
+ *(__keep_meta_vars*)
+ }
+
+}
diff --git a/core/arch/arm/kernel/link.mk b/core/arch/arm/kernel/link.mk
new file mode 100644
index 0000000..4a7bd8e
--- /dev/null
+++ b/core/arch/arm/kernel/link.mk
@@ -0,0 +1,241 @@
+link-out-dir = $(out-dir)/core
+
+link-script = $(platform-dir)/kern.ld.S
+link-script-pp = $(link-out-dir)/kern.ld
+link-script-dep = $(link-out-dir)/.kern.ld.d
+
+AWK = awk
+
+
+link-ldflags = $(LDFLAGS)
+link-ldflags += -T $(link-script-pp) -Map=$(link-out-dir)/tee.map
+link-ldflags += --sort-section=alignment
+link-ldflags += --fatal-warnings
+link-ldflags += --gc-sections
+
+link-ldadd = $(LDADD)
+link-ldadd += $(addprefix -L,$(libdirs))
+link-ldadd += $(addprefix -l,$(libnames))
+ldargs-tee.elf := $(link-ldflags) $(objs) $(link-out-dir)/version.o \
+ $(link-ldadd) $(libgcccore)
+
+link-script-cppflags := -DASM=1 \
+ $(filter-out $(CPPFLAGS_REMOVE) $(cppflags-remove), \
+ $(nostdinccore) $(CPPFLAGS) \
+ $(addprefix -I,$(incdirscore) $(link-out-dir)) \
+ $(cppflagscore))
+
+entries-unpaged += thread_init_vbar
+entries-unpaged += sm_init
+entries-unpaged += core_init_mmu_regs
+entries-unpaged += sem_cpu_sync
+entries-unpaged += generic_boot_get_handlers
+
+ldargs-all_objs := -i $(objs) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/all_objs.o
+$(link-out-dir)/all_objs.o: $(objs) $(libdeps) $(MAKEFILE_LIST)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-all_objs) -o $@
+
+cleanfiles += $(link-out-dir)/unpaged_entries.txt
+$(link-out-dir)/unpaged_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_pager/ { printf "-u%s ", $$3 }' > $@
+
+objs-unpaged-rem += core/arch/arm/tee/entry_std.o
+objs-unpaged-rem += core/arch/arm/tee/arch_svc.o
+objs-unpaged := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-unpaged-rem)), $(objs))
+ldargs-unpaged = -i --gc-sections $(addprefix -u, $(entries-unpaged))
+ldargs-unpaged-objs := $(objs-unpaged) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/unpaged.o
+$(link-out-dir)/unpaged.o: $(link-out-dir)/unpaged_entries.txt
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-unpaged) \
+ `cat $(link-out-dir)/unpaged_entries.txt` \
+ $(ldargs-unpaged-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_unpaged.ld.S
+$(link-out-dir)/text_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_unpaged.ld.S
+$(link-out-dir)/rodata_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+
+cleanfiles += $(link-out-dir)/init_entries.txt
+$(link-out-dir)/init_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_init/ { printf "-u%s", $$3 }' > $@
+
+objs-init-rem += core/arch/arm/tee/arch_svc.o
+objs-init-rem += core/arch/arm/tee/arch_svc_asm.o
+objs-init-rem += core/arch/arm/tee/init.o
+objs-init-rem += core/arch/arm/tee/entry_std.o
+entries-init += _start
+objs-init := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-init-rem)), $(objs) \
+ $(link-out-dir)/version.o)
+ldargs-init := -i --gc-sections $(addprefix -u, $(entries-init))
+
+ldargs-init-objs := $(objs-init) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/init.o
+$(link-out-dir)/init.o: $(link-out-dir)/init_entries.txt
+ $(call gen-version-o)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-init) \
+ `cat $(link-out-dir)/init_entries.txt` \
+ $(ldargs-init-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_init.ld.S
+$(link-out-dir)/text_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_init.ld.S
+$(link-out-dir)/rodata_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+-include $(link-script-dep)
+
+link-script-extra-deps += $(link-out-dir)/text_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/text_init.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_init.ld.S
+link-script-extra-deps += $(conf-file)
+cleanfiles += $(link-script-pp) $(link-script-dep)
+$(link-script-pp): $(link-script) $(link-script-extra-deps)
+ @$(cmd-echo-silent) ' CPP $@'
+ @mkdir -p $(dir $@)
+ $(q)$(CPPcore) -Wp,-P,-MT,$@,-MD,$(link-script-dep) \
+ $(link-script-cppflags) $< > $@
+
+define update-buildcount
+ @$(cmd-echo-silent) ' UPD $(1)'
+ $(q)if [ ! -f $(1) ]; then \
+ mkdir -p $(dir $(1)); \
+ echo 1 >$(1); \
+ else \
+ expr 0`cat $(1)` + 1 >$(1); \
+ fi
+endef
+
+version-o-cflags = $(filter-out -g3,$(core-platform-cflags) \
+ $(platform-cflags)) # Workaround objdump warning
+DATE_STR = `date -u`
+BUILD_COUNT_STR = `cat $(link-out-dir)/.buildcount`
+define gen-version-o
+ $(call update-buildcount,$(link-out-dir)/.buildcount)
+ @$(cmd-echo-silent) ' GEN $(link-out-dir)/version.o'
+ $(q)echo -e "const char core_v_str[] =" \
+ "\"$(TEE_IMPL_VERSION) \"" \
+ "\"#$(BUILD_COUNT_STR) \"" \
+ "\"$(DATE_STR) \"" \
+ "\"$(CFG_KERN_LINKER_ARCH)\";\n" \
+ | $(CCcore) $(version-o-cflags) \
+ -xc - -c -o $(link-out-dir)/version.o
+endef
+$(link-out-dir)/version.o:
+ $(call gen-version-o)
+
+all: $(link-out-dir)/tee.elf
+cleanfiles += $(link-out-dir)/tee.elf $(link-out-dir)/tee.map
+cleanfiles += $(link-out-dir)/version.o
+cleanfiles += $(link-out-dir)/.buildcount
+$(link-out-dir)/tee.elf: $(objs) $(libdeps) $(link-script-pp)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-tee.elf) -o $@
+
+all: $(link-out-dir)/tee.dmp
+cleanfiles += $(link-out-dir)/tee.dmp
+$(link-out-dir)/tee.dmp: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJDUMP $@'
+ $(q)$(OBJDUMPcore) -l -x -d $< > $@
+
+pageable_sections := .*_pageable
+init_sections := .*_init
+cleanfiles += $(link-out-dir)/tee-pager.bin
+$(link-out-dir)/tee-pager.bin: $(link-out-dir)/tee.elf \
+ $(link-out-dir)/tee-data_end.txt
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --remove-section="$(pageable_sections)" \
+ --remove-section="$(init_sections)" \
+ --pad-to `cat $(link-out-dir)/tee-data_end.txt` \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-pageable.bin
+$(link-out-dir)/tee-pageable.bin: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --only-section="$(init_sections)" \
+ --only-section="$(pageable_sections)" \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-data_end.txt
+$(link-out-dir)/tee-data_end.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __data_end | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_size.txt
+$(link-out-dir)/tee-init_size.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __init_size | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_load_addr.txt
+$(link-out-dir)/tee-init_load_addr.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' _start' | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_mem_usage.txt
+$(link-out-dir)/tee-init_mem_usage.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' __init_mem_usage' | sed 's/ .*$$//' >> $@
+
+all: $(link-out-dir)/tee.bin
+cleanfiles += $(link-out-dir)/tee.bin
+$(link-out-dir)/tee.bin: $(link-out-dir)/tee-pager.bin \
+ $(link-out-dir)/tee-pageable.bin \
+ $(link-out-dir)/tee-init_size.txt \
+ $(link-out-dir)/tee-init_load_addr.txt \
+ $(link-out-dir)/tee-init_mem_usage.txt \
+ ./scripts/gen_hashed_bin.py
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)load_addr=`cat $(link-out-dir)/tee-init_load_addr.txt` && \
+ ./scripts/gen_hashed_bin.py \
+ --arch $(if $(filter y,$(CFG_ARM64_core)),arm64,arm32) \
+ --init_size `cat $(link-out-dir)/tee-init_size.txt` \
+ --init_load_addr_hi $$(($$load_addr >> 32 & 0xffffffff)) \
+ --init_load_addr_lo $$(($$load_addr & 0xffffffff)) \
+ --init_mem_usage `cat $(link-out-dir)/tee-init_mem_usage.txt` \
+ --tee_pager_bin $(link-out-dir)/tee-pager.bin \
+ --tee_pageable_bin $(link-out-dir)/tee-pageable.bin \
+ --out $@
+
+
+all: $(link-out-dir)/tee.symb_sizes
+cleanfiles += $(link-out-dir)/tee.symb_sizes
+$(link-out-dir)/tee.symb_sizes: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) --print-size --reverse-sort --size-sort $< > $@
+
+cleanfiles += $(link-out-dir)/tee.mem_usage
+ifneq ($(filter mem_usage,$(MAKECMDGOALS)),)
+mem_usage: $(link-out-dir)/tee.mem_usage
+
+$(link-out-dir)/tee.mem_usage: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/mem_usage.awk > $@
+endif
diff --git a/core/arch/arm/kernel/misc_a32.S b/core/arch/arm/kernel/misc_a32.S
new file mode 100644
index 0000000..48fd8ba
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a32.S
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ add r0, r1, r0, LSR #6
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
+
+/*
+ * uint32_t temp_set_mode(int cpu_mode)
+ * returns cpsr to be set
+ */
+LOCAL_FUNC temp_set_mode , :
+UNWIND( .fnstart)
+ mov r1, r0
+ cmp r1, #CPSR_MODE_USR /* update mode: usr -> sys */
+ moveq r1, #CPSR_MODE_SYS
+ cpsid aif /* disable interrupts */
+ mrs r0, cpsr /* get cpsr with disabled its*/
+ bic r0, #CPSR_MODE_MASK /* clear mode */
+ orr r0, r1 /* set expected mode */
+ bx lr
+UNWIND( .fnend)
+END_FUNC temp_set_mode
+
+/* uint32_t read_mode_sp(int cpu_mode) */
+FUNC read_mode_sp , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, sp /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_sp
+
+/* uint32_t read_mode_lr(int cpu_mode) */
+FUNC read_mode_lr , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, lr /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_lr
diff --git a/core/arch/arm/kernel/misc_a64.S b/core/arch/arm/kernel/misc_a64.S
new file mode 100644
index 0000000..2b4da4a
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a64.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+ mrs x0, mpidr_el1
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+END_FUNC get_core_pos
diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c
new file mode 100644
index 0000000..0e1b836
--- /dev/null
+++ b/core/arch/arm/kernel/mutex.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+void mutex_init(struct mutex *m)
+{
+ *m = (struct mutex)MUTEX_INITIALIZER;
+}
+
+static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
+{
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ while (true) {
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+ struct wait_queue_elem wqe;
+
+ /*
+ * If the mutex is locked we need to initialize the wqe
+ * before releasing the spinlock to guarantee that we don't
+ * miss the wakeup from mutex_unlock().
+ *
+ * If the mutex is unlocked we don't need to use the wqe at
+ * all.
+ */
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ wq_wait_init(&m->wq, &wqe);
+ } else {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ /*
+ * Someone else is holding the lock, wait in normal
+ * world for the lock to become available.
+ */
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+ } else
+ return;
+ }
+}
+
+static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ wq_wake_one(&m->wq, m, fname, lineno);
+}
+
+static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
+ int lineno __unused)
+{
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_UNLOCKED) {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return old_value == MUTEX_VALUE_UNLOCKED;
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_unlock(m, fname, lineno);
+}
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_lock(m, fname, lineno);
+}
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ return __mutex_trylock(m, fname, lineno);
+}
+#else
+void mutex_unlock(struct mutex *m)
+{
+ __mutex_unlock(m, NULL, -1);
+}
+
+void mutex_lock(struct mutex *m)
+{
+ __mutex_lock(m, NULL, -1);
+}
+
+bool mutex_trylock(struct mutex *m)
+{
+ return __mutex_trylock(m, NULL, -1);
+}
+#endif
+
+
+
+void mutex_destroy(struct mutex *m)
+{
+ /*
+ * Caller guarantees that no one will try to take the mutex so
+ * there's no need to take the spinlock before accessing it.
+ */
+ if (m->value != MUTEX_VALUE_UNLOCKED)
+ panic();
+ if (!wq_is_empty(&m->wq))
+ panic("waitqueue not empty");
+}
+
+void condvar_init(struct condvar *cv)
+{
+ *cv = (struct condvar)CONDVAR_INITIALIZER;
+}
+
+void condvar_destroy(struct condvar *cv)
+{
+ if (cv->m && wq_have_condvar(&cv->m->wq, cv))
+ panic();
+
+ condvar_init(cv);
+}
+
+static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
+ int lineno)
+{
+ uint32_t old_itr_status;
+ struct mutex *m;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&cv->spin_lock);
+ m = cv->m;
+ cpu_spin_unlock(&cv->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (m)
+ wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
+
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, true /* only one */, fname, lineno);
+}
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, false /* all */, fname, lineno);
+}
+
+#else
+void condvar_signal(struct condvar *cv)
+{
+ cv_signal(cv, true /* only one */, NULL, -1);
+}
+
+void condvar_broadcast(struct condvar *cv)
+{
+ cv_signal(cv, false /* all */, NULL, -1);
+}
+#endif /*CFG_MUTEX_DEBUG*/
+
+static void __condvar_wait(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem wqe;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ /* Link this condvar to this mutex until reinitialized */
+ cpu_spin_lock(&cv->spin_lock);
+ if (cv->m && cv->m != m)
+ panic("invalid mutex");
+
+ cv->m = m;
+ cpu_spin_unlock(&cv->spin_lock);
+
+ cpu_spin_lock(&m->spin_lock);
+
+ /* Add to mutex wait queue as a condvar waiter */
+ wq_wait_init_condvar(&m->wq, &wqe, cv);
+
+ /* Unlock the mutex */
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+
+ thread_unmask_exceptions(old_itr_status);
+
+ /* Wake eventual waiters */
+ wq_wake_one(&m->wq, m, fname, lineno);
+
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+
+ mutex_lock(m);
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ __condvar_wait(cv, m, fname, lineno);
+}
+#else
+void condvar_wait(struct condvar *cv, struct mutex *m)
+{
+ __condvar_wait(cv, m, NULL, -1);
+}
+#endif
diff --git a/core/arch/arm/kernel/pm_stubs.c b/core/arch/arm/kernel/pm_stubs.c
new file mode 100644
index 0000000..db77e7c
--- /dev/null
+++ b/core/arch/arm/kernel/pm_stubs.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <compiler.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+
+unsigned long pm_panic(unsigned long a0 __unused, unsigned long a1 __unused)
+{
+ panic();
+}
+
+unsigned long pm_do_nothing(unsigned long a0 __unused,
+ unsigned long a1 __unused)
+{
+ return 0;
+}
diff --git a/core/arch/arm/kernel/proc_a32.S b/core/arch/arm/kernel/proc_a32.S
new file mode 100644
index 0000000..f0446a6
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a32.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <keep.h>
+#include <kernel/unwind.h>
+
+/*
+ * void cpu_mmu_enable(void) - enable MMU
+ *
+ * TLBs are invalidated before MMU is enabled.
+ * An DSB and ISB insures MMUs is enabled before routine returns
+ */
+FUNC cpu_mmu_enable , :
+UNWIND( .fnstart)
+ /* Invalidate TLB */
+ write_tlbiall
+
+ /* Enable the MMU */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_M
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable
+KEEP_PAGER cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) - enable instruction cache */
+FUNC cpu_mmu_enable_icache , :
+UNWIND( .fnstart)
+ /* Invalidate instruction cache and branch predictor */
+ write_iciallu
+ write_bpiall
+
+ /* Enable the instruction cache */
+ read_sctlr r1
+ orr r1, r1, #SCTLR_I
+ write_sctlr r1
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_icache
+KEEP_PAGER cpu_mmu_enable_icache
+
+/* void cpu_mmu_enable_dcache(void) - enable data cache */
+FUNC cpu_mmu_enable_dcache , :
+UNWIND( .fnstart)
+ read_sctlr r0
+ orr r0, r0, #SCTLR_C
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_dcache
+KEEP_PAGER cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/proc_a64.S b/core/arch/arm/kernel/proc_a64.S
new file mode 100644
index 0000000..5db895a
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a64.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm64.h>
+#include <asm.S>
+
+/* void cpu_mmu_enable(void) */
+FUNC cpu_mmu_enable , :
+ /* Invalidate TLB */
+ tlbi vmalle1
+
+ /*
+ * Make sure translation table writes have drained into memory and
+ * the TLB invalidation is complete.
+ */
+ dsb sy
+ isb
+
+ /* Enable the MMU */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_M
+ msr sctlr_el1, x0
+ isb
+
+ ret
+END_FUNC cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) */
+FUNC cpu_mmu_enable_icache , :
+ /* Invalidate instruction cache and branch predictor */
+ ic iallu
+ isb
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_icache
+
+
+/* void cpu_mmu_enable_dcache(void) */
+FUNC cpu_mmu_enable_dcache , :
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_C
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/pseudo_ta.c b/core/arch/arm/kernel/pseudo_ta.c
new file mode 100644
index 0000000..6352a28
--- /dev/null
+++ b/core/arch/arm/kernel/pseudo_ta.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <initcall.h>
+#include <kernel/panic.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/tee_ta_manager.h>
+#include <mm/core_memprot.h>
+#include <mm/mobj.h>
+#include <sm/tee_mon.h>
+#include <stdlib.h>
+#include <string.h>
+#include <trace.h>
+#include <types_ext.h>
+
+/* Maps static TA params */
+static TEE_Result copy_in_param(struct tee_ta_param *param,
+ TEE_Param tee_param[TEE_NUM_PARAMS])
+{
+ size_t n;
+ void *va;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_INPUT:
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ tee_param[n].value.a = param->u[n].val.a;
+ tee_param[n].value.b = param->u[n].val.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ va = mobj_get_va(param->u[n].mem.mobj,
+ param->u[n].mem.offs);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ tee_param[n].memref.buffer = va;
+ tee_param[n].memref.size = param->u[n].mem.size;
+ break;
+ default:
+ memset(tee_param + n, 0, sizeof(TEE_Param));
+ break;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+static void update_out_param(TEE_Param tee_param[TEE_NUM_PARAMS],
+ struct tee_ta_param *param)
+{
+ size_t n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ param->u[n].val.a = tee_param[n].value.a;
+ param->u[n].val.b = tee_param[n].value.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ param->u[n].mem.size = tee_param[n].memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static TEE_Result pseudo_ta_enter_open_session(struct tee_ta_session *s,
+ struct tee_ta_param *param, TEE_ErrorOrigin *eo)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->create_entry_point) {
+ res = stc->pseudo_ta->create_entry_point();
+ if (res != TEE_SUCCESS)
+ goto out;
+ }
+
+ if (stc->pseudo_ta->open_session_entry_point) {
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ res = stc->pseudo_ta->open_session_entry_point(param->types,
+ tee_param,
+ &s->user_ctx);
+ update_out_param(tee_param, param);
+ }
+
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static TEE_Result pseudo_ta_enter_invoke_cmd(struct tee_ta_session *s,
+ uint32_t cmd, struct tee_ta_param *param,
+ TEE_ErrorOrigin *eo)
+{
+ TEE_Result res;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+ res = stc->pseudo_ta->invoke_command_entry_point(s->user_ctx, cmd,
+ param->types,
+ tee_param);
+ update_out_param(tee_param, param);
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static void pseudo_ta_enter_close_session(struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+
+ tee_ta_push_current_session(s);
+
+ if (stc->pseudo_ta->close_session_entry_point)
+ stc->pseudo_ta->close_session_entry_point(s->user_ctx);
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->destroy_entry_point)
+ stc->pseudo_ta->destroy_entry_point();
+
+ tee_ta_pop_current_session();
+}
+
+static void pseudo_ta_destroy(struct tee_ta_ctx *ctx)
+{
+ free(to_pseudo_ta_ctx(ctx));
+}
+
+static const struct tee_ta_ops pseudo_ta_ops = {
+ .enter_open_session = pseudo_ta_enter_open_session,
+ .enter_invoke_cmd = pseudo_ta_enter_invoke_cmd,
+ .enter_close_session = pseudo_ta_enter_close_session,
+ .destroy = pseudo_ta_destroy,
+};
+
+
+/* Defined in link script */
+extern const struct pseudo_ta_head __start_ta_head_section;
+extern const struct pseudo_ta_head __stop_ta_head_section;
+
+/* Insures declared pseudo TAs conforms with core expectations */
+static TEE_Result verify_pseudo_tas_conformance(void)
+{
+ const struct pseudo_ta_head *start = &__start_ta_head_section;
+ const struct pseudo_ta_head *end = &__stop_ta_head_section;
+ const struct pseudo_ta_head *pta;
+
+ for (pta = start; pta < end; pta++) {
+ const struct pseudo_ta_head *pta2;
+
+ /* PTAs must all have a specific UUID */
+ for (pta2 = pta + 1; pta2 < end; pta2++)
+ if (!memcmp(&pta->uuid, &pta2->uuid, sizeof(TEE_UUID)))
+ goto err;
+
+ if (!pta->name ||
+ (pta->flags & PTA_MANDATORY_FLAGS) != PTA_MANDATORY_FLAGS ||
+ pta->flags & ~PTA_ALLOWED_FLAGS ||
+ !pta->invoke_command_entry_point)
+ goto err;
+ }
+ return TEE_SUCCESS;
+err:
+ DMSG("pseudo TA error at %p", (void *)pta);
+ panic("pta");
+}
+
+service_init(verify_pseudo_tas_conformance);
+
+/*-----------------------------------------------------------------------------
+ * Initialises a session based on the UUID or ptr to the ta
+ * Returns ptr to the session (ta_session) and a TEE_Result
+ *---------------------------------------------------------------------------*/
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = NULL;
+ struct tee_ta_ctx *ctx;
+ const struct pseudo_ta_head *ta;
+
+ DMSG(" Lookup for Static TA %pUl", (void *)uuid);
+
+ ta = &__start_ta_head_section;
+ while (true) {
+ if (ta >= &__stop_ta_head_section)
+ return TEE_ERROR_ITEM_NOT_FOUND;
+ if (memcmp(&ta->uuid, uuid, sizeof(TEE_UUID)) == 0)
+ break;
+ ta++;
+ }
+
+ /* Load a new TA and create a session */
+ DMSG(" Open %s", ta->name);
+ stc = calloc(1, sizeof(struct pseudo_ta_ctx));
+ if (!stc)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ ctx = &stc->ctx;
+
+ ctx->ref_count = 1;
+ s->ctx = ctx;
+ ctx->flags = ta->flags;
+ stc->pseudo_ta = ta;
+ ctx->uuid = ta->uuid;
+ ctx->ops = &pseudo_ta_ops;
+ TAILQ_INSERT_TAIL(&tee_ctxes, ctx, link);
+
+ DMSG(" %s : %pUl", stc->pseudo_ta->name, (void *)&ctx->uuid);
+
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm/kernel/spin_lock_a32.S b/core/arch/arm/kernel/spin_lock_a32.S
new file mode 100644
index 0000000..52d8e9f
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a32.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+#include <kernel/unwind.h>
+
+/* void __cpu_spin_lock(unsigned int *lock) */
+FUNC __cpu_spin_lock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+1:
+ ldrex r1, [r0]
+ cmp r1, #SPINLOCK_UNLOCK
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_lock
+
+/* int __cpu_spin_trylock(unsigned int *lock) - return 0 on success */
+FUNC __cpu_spin_trylock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+ mov r1, r0
+1:
+ ldrex r0, [r1]
+ cmp r0, #0
+ bne 1f
+ strex r0, r2, [r1]
+ cmp r0, #0
+ bne 1b
+ dmb
+ bx lr
+1:
+ clrex
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock) */
+FUNC __cpu_spin_unlock , :
+UNWIND( .fnstart)
+ dmb
+ mov r1, #SPINLOCK_UNLOCK
+ str r1, [r0]
+ dsb
+ sev
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_a64.S b/core/arch/arm/kernel/spin_lock_a64.S
new file mode 100644
index 0000000..97fce42
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a64.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+
+/* void __cpu_spin_lock(unsigned int *lock); */
+FUNC __cpu_spin_lock , :
+ mov w2, #SPINLOCK_LOCK
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+END_FUNC __cpu_spin_lock
+
+/* unsigned int __cpu_spin_trylock(unsigned int *lock); */
+FUNC __cpu_spin_trylock , :
+ mov x1, x0
+ mov w2, #SPINLOCK_LOCK
+.loop: ldaxr w0, [x1]
+ cbnz w0, .cpu_spin_trylock_out
+ stxr w0, w2, [x1]
+ cbnz w0, .loop
+.cpu_spin_trylock_out:
+ ret
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock); */
+FUNC __cpu_spin_unlock , :
+ stlr wzr, [x0]
+ ret
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_debug.c b/core/arch/arm/kernel/spin_lock_debug.c
new file mode 100644
index 0000000..2a450a5
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_debug.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/spinlock.h>
+#include "thread_private.h"
+
+void spinlock_count_incr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ l->locked_count++;
+ assert(l->locked_count);
+}
+
+void spinlock_count_decr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->locked_count);
+ l->locked_count--;
+}
+
+bool have_spinlock(void)
+{
+ struct thread_core_local *l;
+
+ if (!thread_irq_disabled()) {
+ /*
+ * Normally we can't be holding a spinlock since doing so would
+ * imply IRQ are disabled (or the spinlock logic is flawed).
+ */
+ return false;
+ }
+
+ l = thread_get_core_local();
+
+ return !!l->locked_count;
+}
diff --git a/core/arch/arm/kernel/ssvce_a32.S b/core/arch/arm/kernel/ssvce_a32.S
new file mode 100644
index 0000000..e2850f1
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a32.S
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+/*
+ * Variable(s)
+ */
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+
+ .section .text.ssvce
+
+
+
+/*
+ * - MMU maintenaince support ---------------------------------------------
+ */
+
+
+/*
+ * void secure_mmu_unifiedtlbinvall(void);
+ */
+FUNC secure_mmu_unifiedtlbinvall , :
+UNWIND( .fnstart)
+
+ write_tlbiallis
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/*
+ * void secure_mmu_unifiedtlbinvbymva(mva);
+ *
+ * Combine VA and current ASID, and invalidate matching TLB
+ */
+FUNC secure_mmu_unifiedtlbinvbymva , :
+UNWIND( .fnstart)
+
+ b . @ Wrong code to force fix/check the routine before using it
+
+ MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
+ ANDS R1, R1, #0xFF /* Get current ASID */
+ ORR R1, R1, R0 /* Combine MVA and ASID */
+
+ MCR p15, 0, R1, c8, c7, 1 /* Invalidate Unified TLB entry by MVA */
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvbymva
+
+/*
+ * void secure_mmu_unifiedtlbinv_curasid(void)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+UNWIND( .fnstart)
+ read_contextidr r0
+ and r0, r0, #0xff /* Get current ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/*
+ * void secure_mmu_unifiedtlbinv_byasid(unsigned int asid)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+UNWIND( .fnstart)
+ and r0, r0, #0xff /* Get ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * void arm_cl1_d_cleanbysetway(void)
+ */
+FUNC arm_cl1_d_cleanbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cl_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cl_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c10, 2 @ ; DCCSW Clean data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cl_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cl_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbysetway
+
+FUNC arm_cl1_d_invbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_dcache_off:
+ MOV R0, #0 @ ; set way number to 0
+_inv_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_inv_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c6, 2 @ ; DCISW Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _inv_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _inv_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbysetway
+
+FUNC arm_cl1_d_cleaninvbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cli_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cli_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c14, 2 @ ; DCCISW Clean and Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cli_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cli_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/*
+ * void arm_cl1_d_cleanbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleanbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cl_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cl_area_nextLine:
+ MCR p15, 0, R0, c7, c10, 1 @ ; Clean data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cl_area_nextLine
+
+_cl_area_exit:
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbyva
+
+/*
+ * void arm_cl1_d_invbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_invbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _inv_area_dcache_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_area_dcache_off:
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_inv_area_dcache_nl:
+ MCR p15, 0, R0, c7, c6, 1 @ ; Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _inv_area_dcache_nl
+
+_inv_area_dcache_exit:
+ DSB
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbyva
+
+/*
+ * void arm_cl1_d_cleaninvbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleaninvbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cli_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cli_area_nextLine:
+ MCR p15, 0, R0, c7, c14, 1 @ ; Clean and Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cli_area_nextLine
+
+_cli_area_exit:
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/*
+ * void arm_cl1_i_inv_all( void );
+ *
+ * Invalidates the whole instruction cache.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv_all , :
+UNWIND( .fnstart)
+
+ /* Invalidate Entire Instruction Cache */
+ write_icialluis
+ DSB
+
+ /* Flush entire branch target cache */
+ write_bpiallis
+
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv_all
+
+/*
+ * void arm_cl1_i_inv(void *start, void *end);
+ *
+ * Invalidates instruction cache area whose limits are given in parameters.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 /* Check that end >= start. Otherwise return. */
+ BHI _inv_icache_exit
+
+ BIC R0, R0, #0x1F /* Mask 5 LSBits */
+_inv_icache_nextLine:
+ MCR p15, 0, R0, c7, c5, 1 /* Invalidate ICache single entry (MVA) */
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET /* Next cache line */
+ CMP R1, R0
+ BPL _inv_icache_nextLine
+ DSB
+
+ /* Flush entire branch target cache */
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+_inv_icache_exit:
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/ssvce_a64.S b/core/arch/arm/kernel/ssvce_a64.S
new file mode 100644
index 0000000..6c9bbac
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a64.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tz_ssvce.h>
+#include <arm64.h>
+#include <asm.S>
+
+/* void secure_mmu_unifiedtlbinvall(void); */
+FUNC secure_mmu_unifiedtlbinvall , :
+ tlbi vmalle1
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/* void secure_mmu_unifiedtlbinv_curasid(void) */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+ mrs x0, ttbr0_el1
+ lsr x0, x0, #TTBR_ASID_SHIFT
+ b secure_mmu_unifiedtlbinv_byasid
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+ and x0, x0, #TTBR_ASID_MASK
+ tlbi aside1, x0
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * Compatibility wrappers to be used while the rest of the code stops caring
+ * about which cache level it operates on. CL1 -> Inner cache.
+ */
+
+/* void arm_cl1_d_cleanbysetway(void); */
+FUNC arm_cl1_d_cleanbysetway , :
+ mov x0, #DCCSW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleanbysetway
+
+/* void arm_cl1_d_invbysetway(void); */
+FUNC arm_cl1_d_invbysetway , :
+ mov x0, #DCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_invbysetway
+
+/* void arm_cl1_d_cleaninvbysetway(void); */
+FUNC arm_cl1_d_cleaninvbysetway , :
+ mov x0, #DCCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/* void arm_cl1_d_cleanbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleanbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ /*
+ * flush_dcache_range() does Clean+Invalidate, but that shouldn't
+ * matter to the caller.
+ */
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleanbyva
+
+/* void arm_cl1_d_invbyva(void *s, void *e); */
+FUNC arm_cl1_d_invbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b inv_dcache_range
+END_FUNC arm_cl1_d_invbyva
+
+/* void arm_cl1_d_cleaninvbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleaninvbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/* void arm_cl1_i_inv_all( void ); */
+FUNC arm_cl1_i_inv_all , :
+ ic ialluis
+ isb
+ ret
+END_FUNC arm_cl1_i_inv_all
+
+/* void arm_cl1_i_inv(void *start, void *end); */
+FUNC arm_cl1_i_inv , :
+ /*
+ * Invalidate the entire icache instead, it shouldn't matter to the
+ * caller.
+ */
+ b arm_cl1_i_inv_all
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk
new file mode 100644
index 0000000..cee3aee
--- /dev/null
+++ b/core/arch/arm/kernel/sub.mk
@@ -0,0 +1,45 @@
+srcs-$(CFG_WITH_USER_TA) += user_ta.c
+srcs-y += pseudo_ta.c
+srcs-y += elf_load.c
+srcs-y += tee_time.c
+
+srcs-$(CFG_SECURE_TIME_SOURCE_CNTPCT) += tee_time_arm_cntpct.c
+srcs-$(CFG_SECURE_TIME_SOURCE_REE) += tee_time_ree.c
+
+srcs-$(CFG_ARM32_core) += proc_a32.S
+srcs-$(CFG_ARM32_core) += spin_lock_a32.S
+srcs-$(CFG_ARM64_core) += proc_a64.S
+srcs-$(CFG_ARM64_core) += spin_lock_a64.S
+srcs-$(CFG_TEE_CORE_DEBUG) += spin_lock_debug.c
+srcs-$(CFG_ARM32_core) += ssvce_a32.S
+srcs-$(CFG_ARM64_core) += ssvce_a64.S
+srcs-$(CFG_ARM64_core) += cache_helpers_a64.S
+srcs-$(CFG_PL310) += tz_ssvce_pl310_a32.S
+srcs-$(CFG_PL310) += tee_l2cc_mutex.c
+
+srcs-$(CFG_ARM32_core) += thread_a32.S
+srcs-$(CFG_ARM64_core) += thread_a64.S
+srcs-y += thread.c
+srcs-y += abort.c
+srcs-$(CFG_WITH_VFP) += vfp.c
+ifeq ($(CFG_WITH_VFP),y)
+srcs-$(CFG_ARM32_core) += vfp_a32.S
+srcs-$(CFG_ARM64_core) += vfp_a64.S
+endif
+srcs-y += trace_ext.c
+srcs-$(CFG_ARM32_core) += misc_a32.S
+srcs-$(CFG_ARM64_core) += misc_a64.S
+srcs-y += mutex.c
+srcs-y += wait_queue.c
+srcs-$(CFG_PM_STUBS) += pm_stubs.c
+
+srcs-$(CFG_GENERIC_BOOT) += generic_boot.c
+ifeq ($(CFG_GENERIC_BOOT),y)
+srcs-$(CFG_ARM32_core) += generic_entry_a32.S
+srcs-$(CFG_ARM64_core) += generic_entry_a64.S
+endif
+
+ifeq ($(CFG_CORE_UNWIND),y)
+srcs-$(CFG_ARM32_core) += unwind_arm32.c
+srcs-$(CFG_ARM64_core) += unwind_arm64.c
+endif
diff --git a/core/arch/arm/kernel/tee_l2cc_mutex.c b/core/arch/arm/kernel/tee_l2cc_mutex.c
new file mode 100644
index 0000000..2afda4d
--- /dev/null
+++ b/core/arch/arm/kernel/tee_l2cc_mutex.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tee_common.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/spinlock.h>
+#include <mm/tee_mm.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <tee_api_defines.h>
+#include <trace.h>
+
+/*
+ * l2cc_mutex_va holds teecore virtual address of TZ L2CC mutex or NULL.
+ *
+ * l2cc_mutex_pa holds TZ L2CC mutex physical address. It is relevant only
+ * if 'l2cc_mutex_va' hold a non-NULL address.
+ */
+#define MUTEX_SZ sizeof(uint32_t)
+
+static uint32_t *l2cc_mutex_va;
+static uint32_t l2cc_mutex_pa;
+static uint32_t l2cc_mutex_boot_pa;
+static unsigned int *l2cc_mutex;
+
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa)
+{
+ l2cc_mutex_boot_pa = pa;
+}
+
+/*
+ * Allocate public RAM to get a L2CC mutex to shared with NSec.
+ * Return 0 on success.
+ */
+static int l2cc_mutex_alloc(void)
+{
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return -1;
+
+ l2cc_mutex_pa = l2cc_mutex_boot_pa;
+
+ va = phys_to_virt(l2cc_mutex_pa, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return -1;
+
+ *(uint32_t *)va = 0;
+ l2cc_mutex_va = va;
+ return 0;
+}
+
+static void l2cc_mutex_set(void *mutex)
+{
+ l2cc_mutex = (unsigned int *)mutex;
+}
+
+/*
+ * tee_xxx_l2cc_mutex(): Handle L2 mutex configuration requests from NSec
+ *
+ * Policy:
+ * - if NSec did not register a L2 mutex, default allocate it in public RAM.
+ * - if NSec disables L2 mutex, disable the current mutex and unregister it.
+ *
+ * Enable L2CC: NSec allows teecore to run safe outer maintance
+ * with shared mutex.
+ * Disable L2CC: NSec will run outer maintenance with locking
+ * shared mutex. teecore cannot run outer maintenance.
+ * Set L2CC: NSec proposes a Shared Memory locaiotn for the outer
+ * maintenance shared mutex.
+ * Get L2CC: NSec requests the outer maintenance shared mutex
+ * location. If NSec has successufully registered one,
+ * return its location, otherwise, allocated one in NSec
+ * and provided NSec the physical location.
+ */
+TEE_Result tee_enable_l2cc_mutex(void)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ l2cc_mutex_set(l2cc_mutex_va);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_disable_l2cc_mutex(void)
+{
+ l2cc_mutex_va = NULL;
+ l2cc_mutex_set(NULL);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ *mutex = l2cc_mutex_pa;
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex)
+{
+ uint32_t addr;
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+ addr = *mutex;
+ if (core_pbuf_is(CORE_MEM_NSEC_SHM, addr, MUTEX_SZ) == false)
+ return TEE_ERROR_BAD_PARAMETERS;
+ va = phys_to_virt(addr, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ l2cc_mutex_pa = addr;
+ l2cc_mutex_va = va;
+ return TEE_SUCCESS;
+}
+
+void tee_l2cc_mutex_lock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_lock(l2cc_mutex);
+}
+
+void tee_l2cc_mutex_unlock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_unlock(l2cc_mutex);
+}
diff --git a/core/arch/arm/kernel/tee_time.c b/core/arch/arm/kernel/tee_time.c
new file mode 100644
index 0000000..671a8e9
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, Linaro Limied
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/thread.h>
+#include <optee_msg.h>
+#include <mm/core_mmu.h>
+
+struct time_source _time_source;
+
+TEE_Result tee_time_get_sys_time(TEE_Time *time)
+{
+ return _time_source.get_sys_time(time);
+}
+
+uint32_t tee_time_get_sys_time_protection_level(void)
+{
+ return _time_source.protection_level;
+}
+
+void tee_time_wait(uint32_t milliseconds_delay)
+{
+ struct optee_msg_param params;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params.u.value.a = milliseconds_delay;
+ thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SUSPEND, 1, &params);
+}
+
+/*
+ * tee_time_get_ree_time(): this function implements the GP Internal API
+ * function TEE_GetREETime()
+ * Goal is to get the time of the Rich Execution Environment
+ * This is why this time is provided through the supplicant
+ */
+TEE_Result tee_time_get_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+ struct optee_msg_param params;
+
+ if (!time)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT;
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_GET_TIME, 1, &params);
+ if (res == TEE_SUCCESS) {
+ time->seconds = params.u.value.a;
+ time->millis = params.u.value.b / 1000000;
+ }
+
+ return res;
+}
diff --git a/core/arch/arm/kernel/tee_time_arm_cntpct.c b/core/arch/arm/kernel/tee_time_arm_cntpct.c
new file mode 100644
index 0000000..90e7f20
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_arm_cntpct.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014, 2015 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/misc.h>
+#include <kernel/tee_time.h>
+#include <trace.h>
+#include <kernel/time_source.h>
+#include <mm/core_mmu.h>
+#include <utee_defines.h>
+
+#include <tee/tee_cryp_utl.h>
+
+#include <stdint.h>
+#include <mpa.h>
+#include <arm.h>
+
+static TEE_Result arm_cntpct_get_sys_time(TEE_Time *time)
+{
+ uint64_t cntpct = read_cntpct();
+ uint32_t cntfrq = read_cntfrq();
+
+ time->seconds = cntpct / cntfrq;
+ time->millis = (cntpct % cntfrq) / (cntfrq / TEE_TIME_MILLIS_BASE);
+
+ return TEE_SUCCESS;
+}
+
+static const struct time_source arm_cntpct_time_source = {
+ .name = "arm cntpct",
+ .protection_level = 1000,
+ .get_sys_time = arm_cntpct_get_sys_time,
+};
+
+REGISTER_TIME_SOURCE(arm_cntpct_time_source)
+
+/*
+ * We collect jitter using cntpct in 32- or 64-bit mode that is typically
+ * clocked at around 1MHz.
+ *
+ * The first time we are called, we add low 16 bits of the counter as entropy.
+ *
+ * Subsequently, accumulate 2 low bits each time by:
+ *
+ * - rotating the accumumlator by 2 bits
+ * - XORing it in 2-bit chunks with the whole CNTPCT contents
+ *
+ * and adding one byte of entropy when we reach 8 rotated bits.
+ */
+
+void plat_prng_add_jitter_entropy(void)
+{
+ uint64_t tsc = read_cntpct();
+ int bytes = 0, n;
+ static uint8_t first, bits;
+ static uint16_t acc;
+
+ if (!first) {
+ acc = tsc;
+ bytes = 2;
+ first = 1;
+ } else {
+ acc = (acc << 2) | ((acc >> 6) & 3);
+ for (n = 0; n < 64; n += 2)
+ acc ^= (tsc >> n) & 3;
+ bits += 2;
+ if (bits >= 8) {
+ bits = 0;
+ bytes = 1;
+ }
+ }
+ if (bytes) {
+ DMSG("%s: 0x%02X\n", __func__,
+ (int)acc & ((1 << (bytes * 8)) - 1));
+ tee_prng_add_entropy((uint8_t *)&acc, bytes);
+ }
+}
diff --git a/core/arch/arm/kernel/tee_time_ree.c b/core/arch/arm/kernel/tee_time_ree.c
new file mode 100644
index 0000000..d2a9bb1
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_ree.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/mutex.h>
+
+static TEE_Time prev;
+
+static struct mutex time_mu = MUTEX_INITIALIZER;
+
+static TEE_Result get_monotonic_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+
+ res = tee_time_get_ree_time(time);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ mutex_lock(&time_mu);
+ if (time->seconds < prev.seconds ||
+ (time->seconds == prev.seconds &&
+ time->millis < prev.millis))
+ *time = prev; /* REE time was rolled back */
+ else
+ prev = *time;
+ mutex_unlock(&time_mu);
+
+ return res;
+}
+
+static const struct time_source ree_time_source = {
+ .name = "ree",
+ .protection_level = 100,
+ .get_sys_time = get_monotonic_ree_time,
+};
+
+REGISTER_TIME_SOURCE(ree_time_source)
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
new file mode 100644
index 0000000..c988b65
--- /dev/null
+++ b/core/arch/arm/kernel/thread.c
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <arm.h>
+#include <assert.h>
+#include <keep.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread_defs.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <sm/sm.h>
+#include <tee/tee_fs_rpc.h>
+#include <tee/tee_cryp_utl.h>
+#include <trace.h>
+#include <util.h>
+
+#include "thread_private.h"
+
+#ifdef CFG_WITH_ARM_TRUSTED_FW
+#define STACK_TMP_OFFS 0
+#else
+#define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE
+#endif
+
+
+#ifdef ARM32
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS)
+#else
+#define STACK_TMP_SIZE (1024 + STACK_TMP_OFFS)
+#endif
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 2048
+#endif
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+
+#endif /*ARM32*/
+
+#ifdef ARM64
+#define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS)
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+#endif /*ARM64*/
+
+struct thread_ctx threads[CFG_NUM_THREADS];
+
+static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE];
+
+#ifdef CFG_WITH_STACK_CANARIES
+#ifdef ARM32
+#define STACK_CANARY_SIZE (4 * sizeof(uint32_t))
+#endif
+#ifdef ARM64
+#define STACK_CANARY_SIZE (8 * sizeof(uint32_t))
+#endif
+#define START_CANARY_VALUE 0xdededede
+#define END_CANARY_VALUE 0xabababab
+#define GET_START_CANARY(name, stack_num) name[stack_num][0]
+#define GET_END_CANARY(name, stack_num) \
+ name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
+#else
+#define STACK_CANARY_SIZE 0
+#endif
+
+#define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
+linkage uint32_t name[num_stacks] \
+ [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \
+ sizeof(uint32_t)] \
+ __attribute__((section(".nozi_stack"), \
+ aligned(STACK_ALIGNMENT)))
+
+#define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2)
+
+#define GET_STACK(stack) \
+ ((vaddr_t)(stack) + STACK_SIZE(stack))
+
+DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, /* global */);
+DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
+#ifndef CFG_WITH_PAGER
+DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
+#endif
+
+const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]);
+const uint32_t stack_tmp_offset = STACK_TMP_OFFS + STACK_CANARY_SIZE / 2;
+
+/*
+ * These stack setup info are required by secondary boot cores before they
+ * each locally enable the pager (the mmu). Hence kept in pager sections.
+ */
+KEEP_PAGER(stack_tmp);
+KEEP_PAGER(stack_tmp_stride);
+KEEP_PAGER(stack_tmp_offset);
+
+thread_smc_handler_t thread_std_smc_handler_ptr;
+static thread_smc_handler_t thread_fast_smc_handler_ptr;
+thread_fiq_handler_t thread_fiq_handler_ptr;
+thread_pm_handler_t thread_cpu_on_handler_ptr;
+thread_pm_handler_t thread_cpu_off_handler_ptr;
+thread_pm_handler_t thread_cpu_suspend_handler_ptr;
+thread_pm_handler_t thread_cpu_resume_handler_ptr;
+thread_pm_handler_t thread_system_off_handler_ptr;
+thread_pm_handler_t thread_system_reset_handler_ptr;
+
+
+static unsigned int thread_global_lock = SPINLOCK_UNLOCK;
+static bool thread_prealloc_rpc_cache;
+
+static void init_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+#define INIT_CANARY(name) \
+ for (n = 0; n < ARRAY_SIZE(name); n++) { \
+ uint32_t *start_canary = &GET_START_CANARY(name, n); \
+ uint32_t *end_canary = &GET_END_CANARY(name, n); \
+ \
+ *start_canary = START_CANARY_VALUE; \
+ *end_canary = END_CANARY_VALUE; \
+ DMSG("#Stack canaries for %s[%zu] with top at %p\n", \
+ #name, n, (void *)(end_canary - 1)); \
+ DMSG("watch *%p\n", (void *)end_canary); \
+ }
+
+ INIT_CANARY(stack_tmp);
+ INIT_CANARY(stack_abt);
+#ifndef CFG_WITH_PAGER
+ INIT_CANARY(stack_thread);
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+#define CANARY_DIED(stack, loc, n) \
+ do { \
+ EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \
+ panic(); \
+ } while (0)
+
+void thread_check_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) {
+ if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, start, n);
+ if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, end, n);
+ }
+
+ for (n = 0; n < ARRAY_SIZE(stack_abt); n++) {
+ if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_abt, start, n);
+ if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_abt, end, n);
+
+ }
+#ifndef CFG_WITH_PAGER
+ for (n = 0; n < ARRAY_SIZE(stack_thread); n++) {
+ if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_thread, start, n);
+ if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_thread, end, n);
+ }
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+static void lock_global(void)
+{
+ cpu_spin_lock(&thread_global_lock);
+}
+
+static void unlock_global(void)
+{
+ cpu_spin_unlock(&thread_global_lock);
+}
+
+#ifdef ARM32
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t cpsr = read_cpsr();
+
+ return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t cpsr = read_cpsr();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
+ cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
+ write_cpsr(cpsr);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t daif = read_daif();
+
+ return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t daif = read_daif();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
+ daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
+ write_daif(daif);
+}
+#endif /*ARM64*/
+
+uint32_t thread_mask_exceptions(uint32_t exceptions)
+{
+ uint32_t state = thread_get_exceptions();
+
+ thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
+ return state;
+}
+
+void thread_unmask_exceptions(uint32_t state)
+{
+ thread_set_exceptions(state & THREAD_EXCP_ALL);
+}
+
+
+struct thread_core_local *thread_get_core_local(void)
+{
+ uint32_t cpu_id = get_core_pos();
+
+ /*
+ * IRQs must be disabled before playing with core_local since
+ * we otherwise may be rescheduled to a different core in the
+ * middle of this function.
+ */
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+
+ assert(cpu_id < CFG_TEE_CORE_NB_CORE);
+ return &thread_core_local[cpu_id];
+}
+
+static void thread_lazy_save_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ thr->vfp_state.ns_saved = false;
+#if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW)
+ /*
+ * ARM TF saves and restores CPACR_EL1, so we must assume NS world
+ * uses VFP and always preserve the register file when secure world
+ * is about to use it
+ */
+ thr->vfp_state.ns.force_save = true;
+#endif
+ vfp_lazy_save_state_init(&thr->vfp_state.ns);
+#endif /*CFG_WITH_VFP*/
+}
+
+static void thread_lazy_restore_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
+
+ if (tuv && tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
+ thr->vfp_state.ns_saved = false;
+#endif /*CFG_WITH_VFP*/
+}
+
+#ifdef ARM32
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint32_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
+ thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A;
+ /* Enable thumb mode if it's a thumb instruction */
+ if (thread->regs.pc & 1)
+ thread->regs.cpsr |= CPSR_T;
+ /* Reinitialize stack pointer */
+ thread->regs.svc_sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in r0-r7 when thread is started.
+ */
+ thread->regs.r0 = args->a0;
+ thread->regs.r1 = args->a1;
+ thread->regs.r2 = args->a2;
+ thread->regs.r3 = args->a3;
+ thread->regs.r4 = args->a4;
+ thread->regs.r5 = args->a5;
+ thread->regs.r6 = args->a6;
+ thread->regs.r7 = args->a7;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint64_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
+ DAIFBIT_IRQ | DAIFBIT_ABT);
+ /* Reinitialize stack pointer */
+ thread->regs.sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in x0-x7 when thread is started.
+ */
+ thread->regs.x[0] = args->a0;
+ thread->regs.x[1] = args->a1;
+ thread->regs.x[2] = args->a2;
+ thread->regs.x[3] = args->a3;
+ thread->regs.x[4] = args->a4;
+ thread->regs.x[5] = args->a5;
+ thread->regs.x[6] = args->a6;
+ thread->regs.x[7] = args->a7;
+
+ /* Set up frame pointer as per the Aarch64 AAPCS */
+ thread->regs.x[29] = 0;
+}
+#endif /*ARM64*/
+
+void thread_init_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ size_t n;
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ TAILQ_INIT(&threads[n].mutexes);
+ TAILQ_INIT(&threads[n].tsd.sess_stack);
+#ifdef CFG_SMALL_PAGE_USER_TA
+ SLIST_INIT(&threads[n].tsd.pgt_cache);
+#endif
+ }
+
+ for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
+ thread_core_local[n].curr_thread = -1;
+
+ l->curr_thread = 0;
+ threads[0].state = THREAD_STATE_ACTIVE;
+}
+
+void thread_clr_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
+ assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
+ assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes));
+ threads[l->curr_thread].state = THREAD_STATE_FREE;
+ l->curr_thread = -1;
+}
+
+static void thread_alloc_and_run(struct thread_smc_args *args)
+{
+ size_t n;
+ struct thread_core_local *l = thread_get_core_local();
+ bool found_thread = false;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state == THREAD_STATE_FREE) {
+ threads[n].state = THREAD_STATE_ACTIVE;
+ found_thread = true;
+ break;
+ }
+ }
+
+ unlock_global();
+
+ if (!found_thread) {
+ args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ threads[n].flags = 0;
+ init_regs(threads + n, args);
+
+ /* Save Hypervisor Client ID */
+ threads[n].hyp_clnt_id = args->a7;
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+#ifdef ARM32
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * r0-r3 when thread is resumed.
+ */
+ regs->r0 = args->a0;
+ regs->r1 = args->a1;
+ regs->r2 = args->a2;
+ regs->r3 = args->a3;
+ regs->r4 = args->a4;
+ regs->r5 = args->a5;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * x0-x3 when thread is resumed.
+ */
+ regs->x[0] = args->a0;
+ regs->x[1] = args->a1;
+ regs->x[2] = args->a2;
+ regs->x[3] = args->a3;
+ regs->x[4] = args->a4;
+ regs->x[5] = args->a5;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static bool is_from_user(uint32_t cpsr)
+{
+ return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif
+
+#ifdef ARM64
+static bool is_from_user(uint32_t cpsr)
+{
+ if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif
+
+static bool is_user_mode(struct thread_ctx_regs *regs)
+{
+ return is_from_user((uint32_t)regs->cpsr);
+}
+
+static void thread_resume_from_rpc(struct thread_smc_args *args)
+{
+ size_t n = args->a3; /* thread id */
+ struct thread_core_local *l = thread_get_core_local();
+ uint32_t rv = 0;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ if (n < CFG_NUM_THREADS &&
+ threads[n].state == THREAD_STATE_SUSPENDED &&
+ args->a7 == threads[n].hyp_clnt_id)
+ threads[n].state = THREAD_STATE_ACTIVE;
+ else
+ rv = OPTEE_SMC_RETURN_ERESUME;
+
+ unlock_global();
+
+ if (rv) {
+ args->a0 = rv;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ if (is_user_mode(&threads[n].regs))
+ tee_ta_update_session_utime_resume();
+
+ if (threads[n].have_user_map)
+ core_mmu_set_user_map(&threads[n].user_map);
+
+ /*
+ * Return from RPC to request service of an IRQ must not
+ * get parameters from non-secure world.
+ */
+ if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
+ copy_a0_to_a5(&threads[n].regs, args);
+ threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ }
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+void thread_handle_fast_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+ thread_fast_smc_handler_ptr(args);
+ /* Fast handlers must not unmask any exceptions */
+ assert(thread_get_exceptions() == THREAD_EXCP_ALL);
+}
+
+void thread_handle_std_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+
+ if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC)
+ thread_resume_from_rpc(args);
+ else
+ thread_alloc_and_run(args);
+}
+
+/* Helper routine for the assembly function thread_std_smc_entry() */
+void __thread_std_smc_entry(struct thread_smc_args *args)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (!thr->rpc_arg) {
+ paddr_t parg;
+ uint64_t carg;
+ void *arg;
+
+ thread_rpc_alloc_arg(
+ OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS),
+ &parg, &carg);
+ if (!parg || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) ||
+ !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) {
+ thread_rpc_free_arg(carg);
+ args->a0 = OPTEE_SMC_RETURN_ENOMEM;
+ return;
+ }
+
+ thr->rpc_arg = arg;
+ thr->rpc_carg = carg;
+ }
+
+ thread_std_smc_handler_ptr(args);
+
+ tee_fs_rpc_cache_clear(&thr->tsd);
+ if (!thread_prealloc_rpc_cache) {
+ thread_rpc_free_arg(thr->rpc_carg);
+ thr->rpc_carg = 0;
+ thr->rpc_arg = 0;
+ }
+}
+
+void *thread_get_tmp_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ return (void *)l->tmp_stack_va_end;
+}
+
+#ifdef ARM64
+vaddr_t thread_get_saved_thread_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ return threads[ct].kern_sp;
+}
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va)
+{
+ struct thread_ctx *thr;
+ int ct = thread_get_id_may_fail();
+
+ if (ct == -1)
+ return false;
+
+ thr = threads + ct;
+ return va < thr->stack_va_end &&
+ va >= (thr->stack_va_end - STACK_THREAD_SIZE);
+}
+
+void thread_state_free(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ assert(TAILQ_EMPTY(&threads[ct].mutexes));
+
+ thread_lazy_restore_ns_vfp();
+ tee_pager_release_phys(
+ (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
+ STACK_THREAD_SIZE);
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].state = THREAD_STATE_FREE;
+ threads[ct].flags = 0;
+ l->curr_thread = -1;
+
+ unlock_global();
+}
+
+#ifdef CFG_WITH_PAGER
+static void release_unused_kernel_stack(struct thread_ctx *thr)
+{
+ vaddr_t sp = thr->regs.svc_sp;
+ vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
+ size_t len = sp - base;
+
+ tee_pager_release_phys((void *)base, len);
+}
+#else
+static void release_unused_kernel_stack(struct thread_ctx *thr __unused)
+{
+}
+#endif
+
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+
+ thread_check_canaries();
+
+ release_unused_kernel_stack(threads + ct);
+
+ if (is_from_user(cpsr)) {
+ thread_user_save_vfp();
+ tee_ta_update_session_utime_suspend();
+ tee_ta_gprof_sample_pc(pc);
+ }
+ thread_lazy_restore_ns_vfp();
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].flags |= flags;
+ threads[ct].regs.cpsr = cpsr;
+ threads[ct].regs.pc = pc;
+ threads[ct].state = THREAD_STATE_SUSPENDED;
+
+ threads[ct].have_user_map = core_mmu_user_mapping_is_active();
+ if (threads[ct].have_user_map) {
+ core_mmu_get_user_map(&threads[ct].user_map);
+ core_mmu_set_user_map(NULL);
+ }
+
+ l->curr_thread = -1;
+
+ unlock_global();
+
+ return ct;
+}
+
+#ifdef ARM32
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->tmp_stack_va_end = sp;
+ thread_set_irq_sp(sp);
+ thread_set_fiq_sp(sp);
+}
+
+static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp)
+{
+ thread_set_abt_sp(sp);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ /*
+ * We're already using the tmp stack when this function is called
+ * so there's no need to assign it to any stack pointer. However,
+ * we'll need to restore it at different times so store it here.
+ */
+ l->tmp_stack_va_end = sp;
+}
+
+static void set_abt_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->abt_stack_va_end = sp;
+}
+#endif /*ARM64*/
+
+bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
+{
+ if (thread_id >= CFG_NUM_THREADS)
+ return false;
+ threads[thread_id].stack_va_end = sp;
+ return true;
+}
+
+int thread_get_id_may_fail(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ thread_unmask_exceptions(exceptions);
+ return ct;
+}
+
+int thread_get_id(void)
+{
+ int ct = thread_get_id_may_fail();
+
+ assert(ct >= 0 && ct < CFG_NUM_THREADS);
+ return ct;
+}
+
+static void init_handlers(const struct thread_handlers *handlers)
+{
+ thread_std_smc_handler_ptr = handlers->std_smc;
+ thread_fast_smc_handler_ptr = handlers->fast_smc;
+ thread_fiq_handler_ptr = handlers->fiq;
+ thread_cpu_on_handler_ptr = handlers->cpu_on;
+ thread_cpu_off_handler_ptr = handlers->cpu_off;
+ thread_cpu_suspend_handler_ptr = handlers->cpu_suspend;
+ thread_cpu_resume_handler_ptr = handlers->cpu_resume;
+ thread_system_off_handler_ptr = handlers->system_off;
+ thread_system_reset_handler_ptr = handlers->system_reset;
+}
+
+#ifdef CFG_WITH_PAGER
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /*
+ * Allocate virtual memory for thread stacks.
+ */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ tee_mm_entry_t *mm;
+ vaddr_t sp;
+
+ /* Find vmem for thread stack and its protection gap */
+ mm = tee_mm_alloc(&tee_mm_vcore,
+ SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
+ assert(mm);
+
+ /* Claim eventual physical page */
+ tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
+ true);
+
+ /* Add the area to the pager */
+ tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
+ tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE,
+ TEE_MATTR_PRW | TEE_MATTR_LOCKED,
+ NULL, NULL);
+
+ /* init effective stack */
+ sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
+ if (!thread_init_stack(n, sp))
+ panic("init stack failed");
+ }
+}
+#else
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /* Assign the thread stacks */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
+ panic("thread_init_stack failed");
+ }
+}
+#endif /*CFG_WITH_PAGER*/
+
+void thread_init_primary(const struct thread_handlers *handlers)
+{
+ init_handlers(handlers);
+
+ /* Initialize canaries around the stacks */
+ init_canaries();
+
+ init_thread_stacks();
+ pgt_init();
+}
+
+static void init_sec_mon(size_t pos __maybe_unused)
+{
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Initialize secure monitor */
+ sm_init(GET_STACK(stack_tmp[pos]));
+#endif
+}
+
+void thread_init_per_cpu(void)
+{
+ size_t pos = get_core_pos();
+ struct thread_core_local *l = thread_get_core_local();
+
+ init_sec_mon(pos);
+
+ set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS);
+ set_abt_stack(l, GET_STACK(stack_abt[pos]));
+
+ thread_init_vbar();
+}
+
+struct thread_specific_data *thread_get_tsd(void)
+{
+ return &threads[thread_get_id()].tsd;
+}
+
+struct thread_ctx_regs *thread_get_ctx_regs(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+ return &threads[l->curr_thread].regs;
+}
+
+void thread_set_irq(bool enable)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (enable) {
+ threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE;
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+ } else {
+ /*
+ * No need to disable IRQ here since it's already disabled
+ * above.
+ */
+ threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE;
+ }
+}
+
+void thread_restore_irq(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE)
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+uint32_t thread_kernel_enable_vfp(void)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (thr->vfp_state.sec_lazy_saved &&
+ !thr->vfp_state.sec_saved) {
+ /*
+ * This happens when we're handling an abort while the
+ * thread was using the VFP state.
+ */
+ vfp_lazy_save_state_final(&thr->vfp_state.sec);
+ thr->vfp_state.sec_saved = true;
+ } else if (tuv && tuv->lazy_saved && !tuv->saved) {
+ /*
+ * This can happen either during syscall or abort
+ * processing (while processing a syscall).
+ */
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_enable();
+ return exceptions;
+}
+
+void thread_kernel_disable_vfp(uint32_t state)
+{
+ uint32_t exceptions;
+
+ assert(vfp_is_enabled());
+
+ vfp_disable();
+ exceptions = thread_get_exceptions();
+ assert(exceptions & THREAD_EXCP_IRQ);
+ exceptions &= ~THREAD_EXCP_IRQ;
+ exceptions |= state & THREAD_EXCP_IRQ;
+ thread_set_exceptions(exceptions);
+}
+
+void thread_kernel_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (vfp_is_enabled()) {
+ vfp_lazy_save_state_init(&thr->vfp_state.sec);
+ thr->vfp_state.sec_lazy_saved = true;
+ }
+}
+
+void thread_kernel_restore_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+ if (thr->vfp_state.sec_lazy_saved) {
+ vfp_lazy_restore_state(&thr->vfp_state.sec,
+ thr->vfp_state.sec_saved);
+ thr->vfp_state.sec_saved = false;
+ thr->vfp_state.sec_lazy_saved = false;
+ }
+}
+
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (tuv && uvfp != tuv) {
+ if (tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+ }
+
+ if (uvfp->lazy_saved)
+ vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+
+ thr->vfp_state.uvfp = uvfp;
+ vfp_enable();
+}
+
+void thread_user_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (!vfp_is_enabled())
+ return;
+
+ assert(tuv && !tuv->lazy_saved && !tuv->saved);
+ vfp_lazy_save_state_init(&tuv->vfp);
+ tuv->lazy_saved = true;
+}
+
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (uvfp == thr->vfp_state.uvfp)
+ thr->vfp_state.uvfp = NULL;
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef ARM32
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (!is_32bit)
+ return false;
+
+ s = read_spsr();
+ s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
+ s |= CPSR_MODE_USR;
+ if (entry_func & 1)
+ s |= CPSR_T;
+ *spsr = s;
+ return true;
+}
+#endif
+
+#ifdef ARM64
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (is_32bit) {
+ s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
+ s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
+ s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
+ } else {
+ s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
+ }
+
+ *spsr = s;
+ return true;
+}
+#endif
+
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1)
+{
+ uint32_t spsr;
+
+ tee_ta_update_session_utime_resume();
+
+ if (!get_spsr(is_32bit, entry_func, &spsr)) {
+ *exit_status0 = 1; /* panic */
+ *exit_status1 = 0xbadbadba;
+ return 0;
+ }
+ return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func,
+ spsr, exit_status0, exit_status1);
+}
+
+void thread_add_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == -1);
+ m->owner_id = ct;
+ TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link);
+}
+
+void thread_rem_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == ct);
+ m->owner_id = -1;
+ TAILQ_REMOVE(&threads[ct].mutexes, m, link);
+}
+
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].rpc_arg) {
+ *cookie = threads[n].rpc_carg;
+ threads[n].rpc_carg = 0;
+ threads[n].rpc_arg = NULL;
+ goto out;
+ }
+ }
+
+ *cookie = 0;
+ thread_prealloc_rpc_cache = false;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+bool thread_enable_prealloc_rpc_cache(void)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ thread_prealloc_rpc_cache = true;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ const size_t params_size = sizeof(struct optee_msg_param) * num_params;
+ size_t n;
+
+ assert(arg && carg && num_params <= THREAD_RPC_MAX_NUM_PARAMS);
+
+ plat_prng_add_jitter_entropy();
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS));
+ arg->cmd = cmd;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = num_params;
+ memcpy(OPTEE_MSG_GET_PARAMS(arg), params, params_size);
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ for (n = 0; n < num_params; n++) {
+ switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) {
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ memcpy(params + n, OPTEE_MSG_GET_PARAMS(arg) + n,
+ sizeof(struct optee_msg_param));
+ break;
+ default:
+ break;
+ }
+ }
+ return arg->ret;
+}
+
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t ret;
+
+ ret = rpc_cmd_nolock(cmd, num_params, params);
+
+ return ret;
+}
+
+static bool check_alloced_shm(paddr_t pa, size_t len, size_t align)
+{
+ if (pa & (align - 1))
+ return false;
+ return core_pbuf_is(CORE_MEM_NSEC_SHM, pa, len);
+}
+
+void thread_rpc_free_arg(uint64_t cookie)
+{
+ if (cookie) {
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_FREE
+ };
+
+ reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ }
+}
+
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie)
+{
+ paddr_t pa;
+ uint64_t co;
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_ALLOC, size
+ };
+
+ thread_rpc(rpc_args);
+
+ pa = reg_pair_to_64(rpc_args[1], rpc_args[2]);
+ co = reg_pair_to_64(rpc_args[4], rpc_args[5]);
+ if (!check_alloced_shm(pa, size, sizeof(uint64_t))) {
+ thread_rpc_free_arg(co);
+ pa = 0;
+ co = 0;
+ }
+
+ *arg = pa;
+ *cookie = co;
+}
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc()
+ *
+ * @cookie: cookie received when allocating the buffer
+ * @bt: must be the same as supplied when allocating
+ */
+static void thread_rpc_free(unsigned int bt, uint64_t cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = cookie;
+ params[0].u.value.c = 0;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+}
+
+/**
+ * Allocates shared memory buffer via RPC
+ *
+ * @size: size in bytes of shared memory buffer
+ * @align: required alignment of buffer
+ * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_*
+ * @payload: returned physical pointer to buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt,
+ paddr_t *payload, uint64_t *cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = size;
+ params[0].u.value.c = align;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ if (arg->ret != TEE_SUCCESS)
+ goto fail;
+
+ if (arg->num_params != 1)
+ goto fail;
+
+ if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
+ goto fail;
+
+ if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) {
+ thread_rpc_free(bt, params[0].u.tmem.shm_ref);
+ goto fail;
+ }
+
+ *payload = params[0].u.tmem.buf_ptr;
+ *cookie = params[0].u.tmem.shm_ref;
+ return;
+fail:
+ *payload = 0;
+ *cookie = 0;
+}
+
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie)
+{
+ thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, payload, cookie);
+}
+
+void thread_rpc_free_payload(uint64_t cookie)
+{
+ thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie);
+}
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
new file mode 100644
index 0000000..6d3ac35
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/abort.h>
+#include <kernel/thread_defs.h>
+#include <kernel/unwind.h>
+
+ .section .text.thread_asm
+
+LOCAL_FUNC vector_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_fast_smc
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_on_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_suspend_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_resume_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_reset_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization. Also used when compiled with the internal monitor, but
+ * the cpu_*_entry and system_*_entry are not used then.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+UNWIND( .fnend)
+END_FUNC thread_vector_table
+
+FUNC thread_set_abt_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_ABT
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_abt_sp
+
+FUNC thread_set_irq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_IRQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_irq_sp
+
+FUNC thread_set_fiq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_FIQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_fiq_sp
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
+
+ cps #CPSR_MODE_SYS
+ ldm r12!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ ldm r12!, {r1, sp, lr}
+ msr spsr_fsxc, r1
+
+ cps #CPSR_MODE_SVC
+ ldm r12, {r1, r2}
+ push {r1, r2}
+
+ ldm r0, {r0-r12}
+
+ /* Restore CPSR and jump to the instruction to resume at */
+ rfefd sp!
+UNWIND( .fnend)
+END_FUNC thread_resume
+
+/*
+ * Disables IRQ and FIQ and saves state of thread, returns original
+ * CPSR.
+ */
+LOCAL_FUNC thread_save_state , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r12, lr}
+ /*
+ * Uses stack for temporary storage, while storing needed
+ * context in the thread context struct.
+ */
+
+ mrs r12, cpsr
+
+ cpsid aif /* Disable Async abort, IRQ and FIQ */
+
+ push {r4-r7}
+ push {r0-r3}
+
+ mov r5, r12 /* Save CPSR in a preserved register */
+ mrs r6, cpsr /* Save current CPSR */
+
+ bl thread_get_ctx_regs
+
+ pop {r1-r4} /* r0-r3 pushed above */
+ stm r0!, {r1-r4}
+ pop {r1-r4} /* r4-r7 pushed above */
+ stm r0!, {r1-r4}
+ stm r0!, {r8-r11}
+
+ pop {r12, lr}
+ stm r0!, {r12}
+
+ cps #CPSR_MODE_SYS
+ stm r0!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+ orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */
+ msr cpsr, r6 /* Restore mode */
+
+ mov r0, r5 /* Return original CPSR */
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_save_state
+
+FUNC thread_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Pass r0-r7 in a struct thread_smc_args */
+ push {r0-r7}
+ mov r0, sp
+ bl __thread_std_smc_entry
+ /*
+ * Load the returned r0-r3 into preserved registers and skip the
+ * "returned" r4-r7 since they will not be returned to normal
+ * world.
+ */
+ pop {r4-r7}
+ add sp, #(4 * 4)
+
+ /* Disable interrupts before switching to temporary stack */
+ cpsid aif
+ bl thread_get_tmp_sp
+ mov sp, r0
+
+ bl thread_state_free
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov r1, r4
+ mov r2, r5
+ mov r3, r6
+ mov r4, r7
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_std_smc_entry
+
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+/*
+ * r0-r2 are used to pass parameters to normal world
+ * r0-r5 are used to pass return vaule back from normal world
+ *
+ * note that r3 is used to pass "resume information", that is, which
+ * thread it is that should resume.
+ *
+ * Since the this function is following AAPCS we need to preserve r4-r5
+ * which are otherwise modified when returning back from normal world.
+ */
+UNWIND( .fnstart)
+ push {r4-r5, lr}
+UNWIND( .save {r4-r5, lr})
+ push {r0}
+UNWIND( .save {r0})
+
+ bl thread_save_state
+ mov r4, r0 /* Save original CPSR */
+
+ /*
+ * Switch to temporary stack and SVC mode. Save CPSR to resume into.
+ */
+ bl thread_get_tmp_sp
+ ldr r5, [sp] /* Get pointer to rv[] */
+ cps #CPSR_MODE_SVC /* Change to SVC mode */
+ mov sp, r0 /* Switch to tmp stack */
+
+ mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ mov r1, r4 /* CPSR to restore */
+ ldr r2, =.thread_rpc_return
+ bl thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldm r5, {r1-r3} /* Load rv[] into r0-r2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * it had when thread_save_state() was called above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop {r12} /* Get pointer to rv[] */
+ stm r12, {r0-r5} /* Store r0-r5 into rv[] */
+ pop {r4-r5, pc}
+UNWIND( .fnend)
+END_FUNC thread_rpc
+
+LOCAL_FUNC thread_fiq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /*
+ * We're saving {r0-r3} and the banked fiq registers {r8-r12}. The
+ * banked fiq registers need to be saved because the secure monitor
+ * doesn't save those. The treatment of the banked fiq registers is
+ * somewhat analogous to the lazy save of VFP registers.
+ */
+ push {r0-r3, r8-r12, lr}
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ pop {r0-r3, r8-r12, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_fiq_handler
+
+LOCAL_FUNC thread_irq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * IRQ mode is set up to use tmp stack so FIQ has to be
+ * disabled before touching the stack. We can also assign
+ * SVC sp from IRQ sp to get SVC mode into the state we
+ * need when doing the SMC below.
+ */
+ cpsid f /* Disable FIQ also */
+ sub lr, lr, #4
+ push {lr}
+ push {r12}
+
+ bl thread_save_state
+
+ mov r0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs r1, spsr
+ pop {r12}
+ pop {r2}
+ blx thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+
+ /*
+ * Switch to SVC mode and copy current stack pointer as it already
+ * is the tmp stack.
+ */
+ mov r0, sp
+ cps #CPSR_MODE_SVC
+ mov sp, r0
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr r1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov r2, #0
+ mov r3, #0
+ /* r4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_irq_handler
+
+FUNC thread_init_vbar , :
+UNWIND( .fnstart)
+ /* Set vector (VBAR) */
+ ldr r0, =thread_vect_table
+ write_vbar r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_init_vbar
+
+/*
+ * Below are low level routines handling entry and return from user mode.
+ *
+ * thread_enter_user_mode() saves all that registers user mode can change
+ * so kernel mode can restore needed registers when resuming execution
+ * after the call to thread_enter_user_mode() has returned.
+ * thread_enter_user_mode() doesn't return directly since it enters user
+ * mode instead, it's thread_unwind_user_mode() that does the
+ * returning by restoring the registers saved by thread_enter_user_mode().
+ *
+ * There's three ways for thread_enter_user_mode() to return to caller,
+ * user TA calls utee_return, user TA calls utee_panic or through an abort.
+ *
+ * Calls to utee_return or utee_panic are handled as:
+ * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which
+ * calls syscall_return() or syscall_panic().
+ *
+ * These function calls returns normally except thread_svc_handler() which
+ * which is an exception handling routine so it reads return address and
+ * SPSR to restore from the stack. syscall_return() and syscall_panic()
+ * changes return address and SPSR used by thread_svc_handler() to instead of
+ * returning into user mode as with other syscalls it returns into
+ * thread_unwind_user_mode() in kernel mode instead. When
+ * thread_svc_handler() returns the stack pointer at the point where
+ * thread_enter_user_mode() left it so this is where
+ * thread_unwind_user_mode() can operate.
+ *
+ * Aborts are handled in a similar way but by thread_abort_handler()
+ * instead, when the pager sees that it's an abort from user mode that
+ * can't be handled it updates SPSR and return address used by
+ * thread_abort_handler() to return into thread_unwind_user_mode()
+ * instead.
+ */
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Save all registers to allow syscall_return() to resume execution
+ * as if this function would have returned. This is also used in
+ * syscall_panic().
+ *
+ * If stack usage of this function is changed
+ * thread_unwind_user_mode() has to be updated.
+ */
+ push {r4-r12,lr}
+
+ ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */
+ ldr r5, [sp, #(11 * 0x4)] /* user function */
+ ldr r6, [sp, #(12 * 0x4)] /* spsr */
+
+ /*
+ * Set the saved Processors Status Register to user mode to allow
+ * entry of user mode through movs below.
+ */
+ msr spsr_cxsf, r6
+
+ /*
+ * Save old user sp and set new user sp.
+ */
+ cps #CPSR_MODE_SYS
+ mov r6, sp
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+ push {r6,r7}
+
+ /*
+ * Don't allow return from this function, return is done through
+ * thread_unwind_user_mode() below.
+ */
+ mov lr, #0
+ /* Call the user function with its arguments */
+ movs pc, r5
+UNWIND( .fnend)
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */
+ str r1, [ip]
+ ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */
+ str r2, [ip]
+
+ /* Restore old user sp */
+ pop {r4,r7}
+ cps #CPSR_MODE_SYS
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+
+ pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/
+UNWIND( .fnend)
+END_FUNC thread_unwind_user_mode
+
+LOCAL_FUNC thread_abort_handler , :
+thread_abort_handler:
+thread_und_handler:
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Switch to abort mode to use that stack instead.
+ */
+ cps #CPSR_MODE_ABT
+ push {r0-r11, ip}
+ cps #CPSR_MODE_UND
+ mrs r0, spsr
+ tst r0, #CPSR_T
+ subne r1, lr, #2
+ subeq r1, lr, #4
+ cps #CPSR_MODE_ABT
+ push {r0, r1}
+ msr spsr_fsxc, r0 /* In case some code reads spsr directly */
+ mov r0, #ABORT_TYPE_UNDEF
+ b .thread_abort_generic
+
+thread_dabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #8
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_DATA
+ b .thread_abort_generic
+
+thread_pabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #4
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_PREFETCH
+ b .thread_abort_generic
+
+.thread_abort_generic:
+ cps #CPSR_MODE_SYS
+ mov r1, sp
+ mov r2, lr
+ cps #CPSR_MODE_ABT
+ push {r1-r3}
+ mov r1, sp
+ bl abort_handler
+ pop {r1-r3}
+ cps #CPSR_MODE_SYS
+ mov sp, r1
+ mov lr, r2
+ cps #CPSR_MODE_ABT
+ pop {r0, r1}
+ mov lr, r1
+ msr spsr_fsxc, r0
+ pop {r0-r11, ip}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_abort_handler
+
+LOCAL_FUNC thread_svc_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7, lr}
+ mrs r0, spsr
+ push {r0}
+ mov r0, sp
+ bl tee_svc_handler
+ pop {r0}
+ msr spsr_fsxc, r0
+ pop {r0-r7, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_svc_handler
+
+ .align 5
+LOCAL_FUNC thread_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b . /* Reset */
+ b thread_und_handler /* Undefined instruction */
+ b thread_svc_handler /* System call */
+ b thread_pabort_handler /* Prefetch abort */
+ b thread_dabort_handler /* Data abort */
+ b . /* Reserved */
+ b thread_irq_handler /* IRQ */
+ b thread_fiq_handler /* FIQ */
+UNWIND( .fnend)
+END_FUNC thread_vect_table
diff --git a/core/arch/arm/kernel/thread_a64.S b/core/arch/arm/kernel/thread_a64.S
new file mode 100644
index 0000000..abd482b
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a64.S
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm64_macros.S>
+#include <arm64.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <asm-defines.h>
+#include <kernel/thread_defs.h>
+#include "thread_private.h"
+
+ .macro get_thread_ctx core_local, res, tmp0, tmp1
+ ldr w\tmp0, [\core_local, \
+ #THREAD_CORE_LOCAL_CURR_THREAD]
+ adr x\res, threads
+ mov x\tmp1, #THREAD_CTX_SIZE
+ madd x\res, x\tmp0, x\tmp1, x\res
+ .endm
+
+ .section .text.thread_asm
+LOCAL_FUNC vector_std_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_fast_smc
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+ adr x16, thread_cpu_on_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+ adr x16, thread_cpu_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+ adr x16, thread_cpu_suspend_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+ adr x16, thread_cpu_resume_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+ adr x16, thread_system_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+ adr x16, thread_system_reset_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+END_FUNC thread_vector_table
+
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+ load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
+ mov sp, x1
+ msr elr_el1, x2
+ msr spsr_el1, x3
+ load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
+ ldr x0, [x0, THREAD_CTX_REGS_X0]
+ eret
+END_FUNC thread_resume
+
+FUNC thread_std_smc_entry , :
+ /* pass x0-x7 in a struct thread_smc_args */
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+
+ /* Call the registered handler */
+ bl __thread_std_smc_entry
+
+ /*
+ * Load the returned x0-x3 into preserved registers and skip the
+ * "returned" x4-x7 since they will not be returned to normal
+ * world.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ bl thread_get_tmp_sp
+ mov sp, x0
+
+ bl thread_state_free
+
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov x1, x20
+ mov x2, x21
+ mov x3, x22
+ mov x4, x23
+ smc #0
+ b . /* SMC should not return */
+END_FUNC thread_std_smc_entry
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+ /* Read daif and create an SPSR */
+ mrs x1, daif
+ orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ push x0, xzr
+ push x1, x30
+ bl thread_get_ctx_regs
+ ldr x30, [sp, #8]
+ store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
+ mov x19, x0
+
+ bl thread_get_tmp_sp
+ pop x1, xzr /* Match "push x1, x30" above */
+ mov x2, sp
+ str x2, [x19, #THREAD_CTX_REGS_SP]
+ ldr x20, [sp] /* Get pointer to rv[] */
+ mov sp, x0 /* Switch to tmp stack */
+
+ adr x2, .thread_rpc_return
+ mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ bl thread_state_suspend
+ mov x4, x0 /* Supply thread index */
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * stored in THREAD_CTX above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop x16, xzr /* Get pointer to rv[] */
+ store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */
+ ret
+END_FUNC thread_rpc
+
+FUNC thread_init_vbar , :
+ adr x0, thread_vect_table
+ msr vbar_el1, x0
+ ret
+END_FUNC thread_init_vbar
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+ ldr x8, [sp]
+ /*
+ * Create the and fill in the struct thread_user_mode_rec
+ */
+ sub sp, sp, #THREAD_USER_MODE_REC_SIZE
+ store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
+ store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+
+ /*
+ * Switch to SP_EL1
+ * Disable exceptions
+ * Save kern sp in x19
+ */
+ msr daifset, #DAIFBIT_ALL
+ mov x19, sp
+ msr spsel, #1
+
+ /*
+ * Save the kernel stack pointer in the thread context
+ */
+ /* get pointer to current thread context */
+ get_thread_ctx sp, 21, 20, 22
+ /*
+ * Save kernel stack pointer to ensure that el0_svc() uses
+ * correct stack pointer
+ */
+ str x19, [x21, #THREAD_CTX_KERN_SP]
+
+ /*
+ * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
+ */
+ msr spsr_el1, x6
+ /* Set user sp */
+ mov x13, x4 /* Used when running TA in Aarch32 */
+ msr sp_el0, x4 /* Used when running TA in Aarch64 */
+ /* Set user function */
+ msr elr_el1, x5
+
+ /* Jump into user mode */
+ eret
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+ /* Store the exit status */
+ ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
+ str w1, [x3]
+ str w2, [x4]
+ /* Restore x19..x30 */
+ load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+ add sp, sp, #THREAD_USER_MODE_REC_SIZE
+ /* Return from the call of thread_enter_user_mode() */
+ ret
+END_FUNC thread_unwind_user_mode
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+
+ .align 11
+LOCAL_FUNC thread_vect_table , :
+ /* -----------------------------------------------------
+ * EL1 with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b el1_sync_abort
+ check_vector_size sync_el1_sp0
+
+ .align 7
+irq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size irq_el1_sp0
+
+ .align 7
+fiq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size fiq_el1_sp0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH64_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a64
+
+ .align 7
+el0_irq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a64
+
+ .align 7
+el0_fiq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH32_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a32
+
+ .align 7
+el0_irq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a32
+
+ .align 7
+el0_fiq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC thread_vect_table
+
+LOCAL_FUNC el0_svc , :
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* load saved kernel sp */
+ ldr x0, [x0, #THREAD_CTX_KERN_SP]
+ /* Keep pointer to initial recod in x1 */
+ mov x1, sp
+ /* Switch to SP_EL0 and restore kernel sp */
+ msr spsel, #0
+ mov x2, sp /* Save SP_EL0 */
+ mov sp, x0
+
+ /* Make room for struct thread_svc_regs */
+ sub sp, sp, #THREAD_SVC_REG_SIZE
+ stp x30,x2, [sp, #THREAD_SVC_REG_X30]
+
+ /* Restore x0-x3 */
+ ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
+ ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
+
+ /* Prepare the argument for the handler */
+ store_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mrs x0, elr_el1
+ mrs x1, spsr_el1
+ store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ mov x0, sp
+
+ /*
+ * Unmask FIQ, Serror, and debug exceptions since we have nothing
+ * left in sp_el1. Note that the SVC handler is excepted to
+ * re-enable IRQs by itself.
+ */
+ msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
+
+ /* Call the handler */
+ bl tee_svc_handler
+
+ /* Mask all maskable exceptions since we're switching back to sp_el1 */
+ msr daifset, #DAIFBIT_ALL
+
+ /*
+ * Save kernel sp we'll had at the beginning of this function.
+ * This is when this TA has called another TA because
+ * __thread_enter_user_mode() also saves the stack pointer in this
+ * field.
+ */
+ msr spsel, #1
+ get_thread_ctx sp, 0, 1, 2
+ msr spsel, #0
+ add x1, sp, #THREAD_SVC_REG_SIZE
+ str x1, [x0, #THREAD_CTX_KERN_SP]
+
+ /* Restore registers to the required state and return*/
+ load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ msr elr_el1, x0
+ msr spsr_el1, x1
+ load_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mov x30, sp
+ ldr x0, [x30, #THREAD_SVC_REG_SP_EL0]
+ mov sp, x0
+ ldr x0, [x30, THREAD_SVC_REG_X0]
+ ldr x30, [x30, #THREAD_SVC_REG_X30]
+
+ eret
+END_FUNC el0_svc
+
+LOCAL_FUNC el1_sync_abort , :
+ mov x0, sp
+ msr spsel, #0
+ mov x3, sp /* Save original sp */
+
+ /*
+ * Update core local flags.
+ * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
+ */
+ ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
+ .Lsel_tmp_sp
+
+ /* Select abort stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ b .Lset_sp
+
+.Lsel_tmp_sp:
+ /* Select tmp stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
+
+.Lset_sp:
+ mov sp, x2
+ str w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in CORE_LOCAL */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el1_sync_abort
+
+ /* sp_el0 in x3 */
+LOCAL_FUNC el0_sync_abort , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+
+ /* load abt_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ /* Keep pointer to initial record in x0 */
+ mov x0, sp
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in EL1_REC */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el0_sync_abort
+
+LOCAL_FUNC elx_irq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_TMP
+ orr w1, w1, #THREAD_CLF_IRQ
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+
+ /* Store original sp_el0 */
+ str x2, [x0, #THREAD_CTX_REGS_SP]
+ /* store x4..x30 */
+ store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
+ /* Load original x0..x3 into x10..x13 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
+ /* Save original x0..x3 */
+ store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Mark current thread as suspended
+ */
+ mov w0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs x1, spsr_el1
+ mrs x2, elr_el1
+ bl thread_state_suspend
+ mov w4, w0 /* Supply thread index */
+
+ /* Update core local flags */
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ msr spsel, #0
+
+ /*
+ * Note that we're exiting with SP_EL0 selected since the entry
+ * functions expects to have SP_EL0 selected with the tmp stack
+ * set.
+ */
+
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr w1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov w2, #0
+ mov w3, #0
+ /* w4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+END_FUNC elx_irq
+
+/*
+ * This struct is never used from C it's only here to visualize the
+ * layout.
+ *
+ * struct elx_fiq_rec {
+ * uint64_t x[19 - 4]; x4..x18
+ * uint64_t lr;
+ * uint64_t sp_el0;
+ * };
+ */
+#define ELX_FIQ_REC_X(x) (8 * ((x) - 4))
+#define ELX_FIQ_REC_LR (8 + ELX_FIQ_REC_X(19))
+#define ELX_FIQ_REC_SP_EL0 (8 + ELX_FIQ_REC_LR)
+#define ELX_FIQ_REC_SIZE (8 + ELX_FIQ_REC_SP_EL0)
+
+LOCAL_FUNC elx_fiq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_FIQ
+ orr w1, w1, #THREAD_CLF_TMP
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Save registers on stack that can be corrupted by a call to
+ * a C function
+ */
+ /* Make room for struct elx_fiq_rec */
+ sub sp, sp, #ELX_FIQ_REC_SIZE
+ /* Store x4..x18 */
+ store_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Store lr and original sp_el0 */
+ stp x30, x2, [sp, #ELX_FIQ_REC_LR]
+
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+
+ /*
+ * Restore registers
+ */
+ /* Restore x4..x18 */
+ load_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Load lr and original sp_el0 */
+ ldp x30, x2, [sp, #ELX_FIQ_REC_LR]
+ /* Restore SP_El0 */
+ mov sp, x2
+ /* Switch back to SP_EL1 */
+ msr spsel, #1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0..x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC elx_fiq
diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h
new file mode 100644
index 0000000..3d87c88
--- /dev/null
+++ b/core/arch/arm/kernel/thread_private.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_PRIVATE_H
+#define THREAD_PRIVATE_H
+
+#ifndef ASM
+
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <kernel/vfp.h>
+#include <kernel/mutex.h>
+#include <kernel/thread.h>
+
+enum thread_state {
+ THREAD_STATE_FREE,
+ THREAD_STATE_SUSPENDED,
+ THREAD_STATE_ACTIVE,
+};
+
+#ifdef ARM32
+struct thread_ctx_regs {
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t pc;
+ uint32_t cpsr;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct thread_ctx_regs {
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t cpsr;
+ uint64_t x[31];
+};
+#endif /*ARM64*/
+
+#ifdef ARM64
+struct thread_user_mode_rec {
+ uint64_t exit_status0_ptr;
+ uint64_t exit_status1_ptr;
+ uint64_t x[31 - 19]; /* x19..x30 */
+};
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+struct thread_vfp_state {
+ bool ns_saved;
+ bool sec_saved;
+ bool sec_lazy_saved;
+ struct vfp_state ns;
+ struct vfp_state sec;
+ struct thread_user_vfp_state *uvfp;
+};
+
+#endif /*CFG_WITH_VFP*/
+
+struct thread_ctx {
+ struct thread_ctx_regs regs;
+ enum thread_state state;
+ vaddr_t stack_va_end;
+ uint32_t hyp_clnt_id;
+ uint32_t flags;
+ struct core_mmu_user_map user_map;
+ bool have_user_map;
+#ifdef ARM64
+ vaddr_t kern_sp; /* Saved kernel SP during user TA execution */
+#endif
+#ifdef CFG_WITH_VFP
+ struct thread_vfp_state vfp_state;
+#endif
+ void *rpc_arg;
+ uint64_t rpc_carg;
+ struct mutex_head mutexes;
+ struct thread_specific_data tsd;
+};
+
+#ifdef ARM64
+/*
+ * struct thread_core_local need to have alignment suitable for a stack
+ * pointer since SP_EL1 points to this
+ */
+#define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
+#else
+#define THREAD_CORE_LOCAL_ALIGNED
+#endif
+
+struct thread_core_local {
+ vaddr_t tmp_stack_va_end;
+ int curr_thread;
+#ifdef ARM64
+ uint32_t flags;
+ vaddr_t abt_stack_va_end;
+ uint64_t x[4];
+#endif
+#ifdef CFG_TEE_CORE_DEBUG
+ unsigned int locked_count; /* Number of spinlocks held */
+#endif
+} THREAD_CORE_LOCAL_ALIGNED;
+
+#endif /*ASM*/
+
+#ifdef ARM64
+#ifdef CFG_WITH_VFP
+#define THREAD_VFP_STATE_SIZE \
+ (16 + (16 * 32 + 16) * 2 + 16)
+#else
+#define THREAD_VFP_STATE_SIZE 0
+#endif
+
+/* Describes the flags field of struct thread_core_local */
+#define THREAD_CLF_SAVED_SHIFT 4
+#define THREAD_CLF_CURR_SHIFT 0
+#define THREAD_CLF_MASK 0xf
+#define THREAD_CLF_TMP_SHIFT 0
+#define THREAD_CLF_ABORT_SHIFT 1
+#define THREAD_CLF_IRQ_SHIFT 2
+#define THREAD_CLF_FIQ_SHIFT 3
+
+#define THREAD_CLF_TMP (1 << THREAD_CLF_TMP_SHIFT)
+#define THREAD_CLF_ABORT (1 << THREAD_CLF_ABORT_SHIFT)
+#define THREAD_CLF_IRQ (1 << THREAD_CLF_IRQ_SHIFT)
+#define THREAD_CLF_FIQ (1 << THREAD_CLF_FIQ_SHIFT)
+
+#endif /*ARM64*/
+
+#ifndef ASM
+/*
+ * Initializes VBAR for current CPU (called by thread_init_per_cpu()
+ */
+void thread_init_vbar(void);
+
+/* Handles a stdcall, r0-r7 holds the parameters */
+void thread_std_smc_entry(void);
+
+struct thread_core_local *thread_get_core_local(void);
+
+/*
+ * Resumes execution of currently active thread by restoring context and
+ * jumping to the instruction where to continue execution.
+ *
+ * Arguments supplied by non-secure world will be copied into the saved
+ * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
+ * in the flags field in the thread context.
+ */
+void thread_resume(struct thread_ctx_regs *regs);
+
+uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long user_func, unsigned long spsr,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * Private functions made available for thread_asm.S
+ */
+
+/* Returns the temp stack for current CPU */
+void *thread_get_tmp_sp(void);
+
+/*
+ * Marks the current thread as suspended. And updated the flags
+ * for the thread context (see thread resume for use of flags).
+ * Returns thread index of the thread that was suspended.
+ */
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
+
+/*
+ * Marks the current thread as free.
+ */
+void thread_state_free(void);
+
+/* Returns a pointer to the saved registers in current thread context. */
+struct thread_ctx_regs *thread_get_ctx_regs(void);
+
+#ifdef ARM32
+/* Sets sp for abort mode */
+void thread_set_abt_sp(vaddr_t sp);
+
+/* Sets sp for irq mode */
+void thread_set_irq_sp(vaddr_t sp);
+
+/* Sets sp for fiq mode */
+void thread_set_fiq_sp(vaddr_t sp);
+#endif /*ARM32*/
+
+/* Handles a fast SMC by dispatching it to the registered fast SMC handler */
+void thread_handle_fast_smc(struct thread_smc_args *args);
+
+/* Handles a std SMC by dispatching it to the registered std SMC handler */
+void thread_handle_std_smc(struct thread_smc_args *args);
+
+/*
+ * Suspends current thread and temorarily exits to non-secure world.
+ * This function returns later when non-secure world returns.
+ *
+ * The purpose of this function is to request services from non-secure
+ * world.
+ */
+#define THREAD_RPC_NUM_ARGS 6
+void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
+
+/* Checks stack canaries */
+void thread_check_canaries(void);
+
+void __thread_std_smc_entry(struct thread_smc_args *args);
+
+#endif /*ASM*/
+
+#endif /*THREAD_PRIVATE_H*/
diff --git a/core/arch/arm/kernel/trace_ext.c b/core/arch/arm/kernel/trace_ext.c
new file mode 100644
index 0000000..8b8454c
--- /dev/null
+++ b/core/arch/arm/kernel/trace_ext.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdbool.h>
+#include <trace.h>
+#include <console.h>
+#include <kernel/thread.h>
+
+const char trace_ext_prefix[] = "TEE-CORE";
+int trace_level = TRACE_LEVEL;
+
+void trace_ext_puts(const char *str)
+{
+ const char *p;
+
+ console_flush();
+
+ for (p = str; *p; p++)
+ console_putc(*p);
+
+ console_flush();
+}
+
+int trace_ext_get_thread_id(void)
+{
+ return thread_get_id_may_fail();
+}
diff --git a/core/arch/arm/kernel/tz_ssvce_pl310_a32.S b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
new file mode 100644
index 0000000..184e936
--- /dev/null
+++ b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+#define PL310_LOCKDOWN_NBREGS 8
+#define PL310_LOCKDOWN_SZREG 4
+
+#define PL310_8WAYS_MASK 0x00FF
+#define PL310_16WAYS_UPPERMASK 0xFF00
+
+/*
+ * void arm_cl2_lockallways(vaddr_t base)
+ *
+ * lock all L2 caches ways for data and instruction
+ */
+FUNC arm_cl2_lockallways , :
+UNWIND( .fnstart)
+ add r1, r0, #PL310_DCACHE_LOCKDOWN_BASE
+ ldr r2, [r0, #PL310_AUX_CTRL]
+ tst r2, #PL310_AUX_16WAY_BIT
+ mov r2, #PL310_8WAYS_MASK
+ orrne r2, #PL310_16WAYS_UPPERMASK
+ mov r0, #PL310_LOCKDOWN_NBREGS
+1: /* lock Dcache and Icache */
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ subs r0, r0, #1
+ bne 1b
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_lockallways
+
+/*
+ * Set sync operation mask according to ways associativity.
+ * Preserve r0 = pl310 iomem base address
+ */
+.macro syncbyway_set_mask reg
+ ldr \reg, [r0, #PL310_AUX_CTRL]
+ tst \reg, #PL310_AUX_16WAY_BIT
+ mov \reg, #PL310_8WAYS_MASK
+ orrne \reg, \reg, #PL310_16WAYS_UPPERMASK
+.endm
+
+/*
+ * void arm_cl2_cleaninvbyway(vaddr_t base)
+ * clean & invalidate the whole L2 cache.
+ */
+FUNC arm_cl2_cleaninvbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_FLUSH_BY_WAY]
+
+ /* Wait for all cache ways to be cleaned and invalidated */
+loop_cli_way_done:
+ ldr r2, [r0, #PL310_FLUSH_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cli_way_done
+
+ /* Cache Sync */
+
+ /* Wait for writing cache sync */
+loop_cli_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cli_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbyway
+
+/* void arm_cl2_invbyway(vaddr_t base) */
+FUNC arm_cl2_invbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_INV_BY_WAY]
+
+loop_inv_way_done:
+ ldr r2, [r0, #PL310_INV_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_inv_way_done
+
+loop_inv_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_inv_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbyway
+
+/* void arm_cl2_cleanbyway(vaddr_t base) */
+FUNC arm_cl2_cleanbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_CLEAN_BY_WAY]
+
+loop_cl_way_done:
+ ldr r2, [r0, #PL310_CLEAN_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cl_way_done
+
+loop_cl_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cl_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbyway
+
+/*
+ * void _arm_cl2_xxxbypa(vaddr_t pl310_base, paddr_t start, paddr_t end,
+ * int pl310value);
+ * pl310value is one of PL310_CLEAN_BY_PA, PL310_INV_BY_PA or PL310_FLUSH_BY_PA
+ */
+LOCAL_FUNC _arm_cl2_xxxbypa , :
+UNWIND( .fnstart)
+ /* Align start address on PL310 line size */
+ and r1, #(~(PL310_LINE_SIZE - 1))
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ /*
+ * NOTE:
+ * We're assuming that if mmu is enabled PL310_BASE and SCU_BASE
+ * still have the same relative offsets from each other.
+ */
+ sub r0, r0, #(PL310_BASE - SCU_BASE)
+ mov r12, #1
+ str r12, [r0, #SCU_ERRATA744369]
+ dsb
+ add r0, r0, #(PL310_BASE - SCU_BASE)
+
+loop_cl2_xxxbypa:
+ str r1, [r0, r3]
+
+loop_xxx_pa_done:
+ ldr r12, [r0, r3]
+ and r12, r12, r1
+ cmp r12, #0
+ bne loop_xxx_pa_done
+
+ add r1, r1, #PL310_LINE_SIZE
+ cmp r2, r1
+ bpl loop_cl2_xxxbypa
+
+loop_xxx_pa_sync:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync
+
+ mov r12, #1
+ str r12, [r0, #PL310_SYNC]
+
+loop_xxx_pa_sync_done:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC _arm_cl2_xxxbypa
+
+/*
+ * void _arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleanbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_CLEAN_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbypa
+
+/*
+ * void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_invbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_INV_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbypa
+
+/*
+ * void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean and invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleaninvbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_FLUSH_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbypa
+
diff --git a/core/arch/arm/kernel/unwind_arm32.c b/core/arch/arm/kernel/unwind_arm32.c
new file mode 100644
index 0000000..7efe94b
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm32.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2015 Linaro Limited
+ * Copyright 2013-2014 Andrew Turner.
+ * Copyright 2013-2014 Ian Lepore.
+ * Copyright 2013-2014 Rui Paulo.
+ * Copyright 2013 Eitan Adler.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/misc.h>
+#include <kernel/unwind.h>
+#include <string.h>
+#include <trace.h>
+
+/* The register names */
+#define FP 11
+#define SP 13
+#define LR 14
+#define PC 15
+
+/*
+ * Definitions for the instruction interpreter.
+ *
+ * The ARM EABI specifies how to perform the frame unwinding in the
+ * Exception Handling ABI for the ARM Architecture document. To perform
+ * the unwind we need to know the initial frame pointer, stack pointer,
+ * link register and program counter. We then find the entry within the
+ * index table that points to the function the program counter is within.
+ * This gives us either a list of three instructions to process, a 31-bit
+ * relative offset to a table of instructions, or a value telling us
+ * we can't unwind any further.
+ *
+ * When we have the instructions to process we need to decode them
+ * following table 4 in section 9.3. This describes a collection of bit
+ * patterns to encode that steps to take to update the stack pointer and
+ * link register to the correct values at the start of the function.
+ */
+
+/* A special case when we are unable to unwind past this function */
+#define EXIDX_CANTUNWIND 1
+
+/*
+ * Entry types.
+ * These are the only entry types that have been seen in the kernel.
+ */
+#define ENTRY_MASK 0xff000000
+#define ENTRY_ARM_SU16 0x80000000
+#define ENTRY_ARM_LU16 0x81000000
+
+/* Instruction masks. */
+#define INSN_VSP_MASK 0xc0
+#define INSN_VSP_SIZE_MASK 0x3f
+#define INSN_STD_MASK 0xf0
+#define INSN_STD_DATA_MASK 0x0f
+#define INSN_POP_TYPE_MASK 0x08
+#define INSN_POP_COUNT_MASK 0x07
+#define INSN_VSP_LARGE_INC_MASK 0xff
+
+/* Instruction definitions */
+#define INSN_VSP_INC 0x00
+#define INSN_VSP_DEC 0x40
+#define INSN_POP_MASKED 0x80
+#define INSN_VSP_REG 0x90
+#define INSN_POP_COUNT 0xa0
+#define INSN_FINISH 0xb0
+#define INSN_POP_REGS 0xb1
+#define INSN_VSP_LARGE_INC 0xb2
+
+/* An item in the exception index table */
+struct unwind_idx {
+ uint32_t offset;
+ uint32_t insn;
+};
+
+/*
+ * These are set in the linker script. Their addresses will be
+ * either the start or end of the exception table or index.
+ */
+extern struct unwind_idx __exidx_start;
+extern struct unwind_idx __exidx_end;
+
+/* Expand a 31-bit signed value to a 32-bit signed value */
+static int32_t expand_prel31(uint32_t prel31)
+{
+
+ return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2;
+}
+
+/*
+ * Perform a binary search of the index table to find the function
+ * with the largest address that doesn't exceed addr.
+ */
+static struct unwind_idx *find_index(uint32_t addr)
+{
+ vaddr_t idx_start, idx_end;
+ unsigned int min, mid, max;
+ struct unwind_idx *start;
+ struct unwind_idx *item;
+ int32_t prel31_addr;
+ uint32_t func_addr;
+
+ start = &__exidx_start;
+ idx_start = (vaddr_t)&__exidx_start;
+ idx_end = (vaddr_t)&__exidx_end;
+
+ min = 0;
+ max = (idx_end - idx_start) / sizeof(struct unwind_idx);
+
+ while (min != max) {
+ mid = min + (max - min + 1) / 2;
+
+ item = &start[mid];
+
+ prel31_addr = expand_prel31(item->offset);
+ func_addr = (uint32_t)&item->offset + prel31_addr;
+
+ if (func_addr <= addr) {
+ min = mid;
+ } else {
+ max = mid - 1;
+ }
+ }
+
+ return &start[min];
+}
+
+/* Reads the next byte from the instruction list */
+static uint8_t unwind_exec_read_byte(struct unwind_state *state)
+{
+ uint8_t insn;
+
+ /* Read the unwind instruction */
+ insn = (*state->insn) >> (state->byte * 8);
+
+ /* Update the location of the next instruction */
+ if (state->byte == 0) {
+ state->byte = 3;
+ state->insn++;
+ state->entries--;
+ } else
+ state->byte--;
+
+ return insn;
+}
+
+/* Executes the next instruction on the list */
+static bool unwind_exec_insn(struct unwind_state *state)
+{
+ unsigned int insn;
+ uint32_t *vsp = (uint32_t *)state->registers[SP];
+ int update_vsp = 0;
+
+ /* This should never happen */
+ if (state->entries == 0)
+ return false;
+
+ /* Read the next instruction */
+ insn = unwind_exec_read_byte(state);
+
+ if ((insn & INSN_VSP_MASK) == INSN_VSP_INC) {
+ state->registers[SP] += ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_VSP_MASK) == INSN_VSP_DEC) {
+ state->registers[SP] -= ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_MASKED) {
+ unsigned int mask, reg;
+
+ /* Load the mask */
+ mask = unwind_exec_read_byte(state);
+ mask |= (insn & INSN_STD_DATA_MASK) << 8;
+
+ /* We have a refuse to unwind instruction */
+ if (mask == 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 4; mask && reg < 16; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+
+ /* If we have updated SP kep its value */
+ if (reg == SP)
+ update_vsp = 0;
+ }
+ }
+
+ } else if ((insn & INSN_STD_MASK) == INSN_VSP_REG &&
+ ((insn & INSN_STD_DATA_MASK) != 13) &&
+ ((insn & INSN_STD_DATA_MASK) != 15)) {
+ /* sp = register */
+ state->registers[SP] =
+ state->registers[insn & INSN_STD_DATA_MASK];
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_COUNT) {
+ unsigned int count, reg;
+
+ /* Read how many registers to load */
+ count = insn & INSN_POP_COUNT_MASK;
+
+ /* Update sp */
+ update_vsp = 1;
+
+ /* Pop the registers */
+ for (reg = 4; reg <= 4 + count; reg++) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+
+ /* Check if we are in the pop r14 version */
+ if ((insn & INSN_POP_TYPE_MASK) != 0) {
+ state->registers[14] = *vsp++;
+ }
+
+ } else if (insn == INSN_FINISH) {
+ /* Stop processing */
+ state->entries = 0;
+
+ } else if (insn == INSN_POP_REGS) {
+ unsigned int mask, reg;
+
+ mask = unwind_exec_read_byte(state);
+ if (mask == 0 || (mask & 0xf0) != 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 0; mask && reg < 4; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+ }
+
+ } else if ((insn & INSN_VSP_LARGE_INC_MASK) == INSN_VSP_LARGE_INC) {
+ unsigned int uleb128;
+
+ /* Read the increment value */
+ uleb128 = unwind_exec_read_byte(state);
+
+ state->registers[SP] += 0x204 + (uleb128 << 2);
+
+ } else {
+ /* We hit a new instruction that needs to be implemented */
+ DMSG("Unhandled instruction %.2x\n", insn);
+ return false;
+ }
+
+ if (update_vsp) {
+ state->registers[SP] = (uint32_t)vsp;
+ }
+
+ return true;
+}
+
+/* Performs the unwind of a function */
+static bool unwind_tab(struct unwind_state *state)
+{
+ uint32_t entry;
+
+ /* Set PC to a known value */
+ state->registers[PC] = 0;
+
+ /* Read the personality */
+ entry = *state->insn & ENTRY_MASK;
+
+ if (entry == ENTRY_ARM_SU16) {
+ state->byte = 2;
+ state->entries = 1;
+ } else if (entry == ENTRY_ARM_LU16) {
+ state->byte = 1;
+ state->entries = ((*state->insn >> 16) & 0xFF) + 1;
+ } else {
+ DMSG("Unknown entry: %x\n", entry);
+ return true;
+ }
+
+ while (state->entries > 0) {
+ if (!unwind_exec_insn(state))
+ return true;
+ }
+
+ /*
+ * The program counter was not updated, load it from the link register.
+ */
+ if (state->registers[PC] == 0) {
+ state->registers[PC] = state->registers[LR];
+
+ /*
+ * If the program counter changed, flag it in the update mask.
+ */
+ if (state->start_pc != state->registers[PC])
+ state->update_mask |= 1 << PC;
+ }
+
+ return false;
+}
+
+bool unwind_stack(struct unwind_state *state)
+{
+ struct unwind_idx *index;
+ bool finished;
+
+ /* Reset the mask of updated registers */
+ state->update_mask = 0;
+
+ /* The pc value is correct and will be overwritten, save it */
+ state->start_pc = state->registers[PC];
+
+ /* Find the item to run */
+ index = find_index(state->start_pc);
+
+ finished = false;
+ if (index->insn != EXIDX_CANTUNWIND) {
+ if (index->insn & (1U << 31)) {
+ /* The data is within the instruction */
+ state->insn = &index->insn;
+ } else {
+ /* A prel31 offset to the unwind table */
+ state->insn = (uint32_t *)
+ ((uintptr_t)&index->insn +
+ expand_prel31(index->insn));
+ }
+ /* Run the unwind function */
+ finished = unwind_tab(state);
+ }
+
+ /* This is the top of the stack, finish */
+ if (index->insn == EXIDX_CANTUNWIND)
+ finished = true;
+
+ return !finished;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(state.registers, 0, sizeof(state.registers));
+ /* r7: Thumb-style frame pointer */
+ state.registers[7] = read_r7();
+ /* r11: ARM-style frame pointer */
+ state.registers[FP] = read_fp();
+ state.registers[SP] = read_sp();
+ state.registers[LR] = read_lr();
+ state.registers[PC] = (uint32_t)print_stack;
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
+
+/*
+ * These functions are referenced but never used
+ */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void);
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/core/arch/arm/kernel/unwind_arm64.c b/core/arch/arm/kernel/unwind_arm64.c
new file mode 100644
index 0000000..10b70ef
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm64.c
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/unwind.h>
+#include <kernel/thread.h>
+#include <string.h>
+#include <trace.h>
+
+bool unwind_stack(struct unwind_state *frame)
+{
+ uint64_t fp;
+
+ fp = frame->fp;
+ if (!thread_addr_is_in_stack(fp))
+ return false;
+
+ frame->sp = fp + 0x10;
+ /* FP to previous frame (X29) */
+ frame->fp = *(uint64_t *)(fp);
+ /* LR (X30) */
+ frame->pc = *(uint64_t *)(fp + 8) - 4;
+
+ return true;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = read_pc();
+ state.fp = read_fp();
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c
new file mode 100644
index 0000000..a63fb22
--- /dev/null
+++ b/core/arch/arm/kernel/user_ta.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015-2017 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include <keep.h>
+#include <kernel/panic.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <kernel/user_ta.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg_supplicant.h>
+#include <signed_hdr.h>
+#include <stdlib.h>
+#include <ta_pub_key.h>
+#include <tee/tee_cryp_provider.h>
+#include <tee/tee_cryp_utl.h>
+#include <tee/tee_obj.h>
+#include <tee/tee_svc_cryp.h>
+#include <tee/tee_svc.h>
+#include <tee/tee_svc_storage.h>
+#include <tee/uuid.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <utee_defines.h>
+#include <util.h>
+
+#include "elf_load.h"
+#include "elf_common.h"
+
+#define STACK_ALIGNMENT (sizeof(long) * 2)
+
+static TEE_Result load_header(const struct shdr *signed_ta,
+ struct shdr **sec_shdr)
+{
+ size_t s;
+
+ if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta)))
+ return TEE_ERROR_SECURITY;
+
+ s = SHDR_GET_SIZE(signed_ta);
+ if (!tee_vbuf_is_non_sec(signed_ta, s))
+ return TEE_ERROR_SECURITY;
+
+ /* Copy signed header into secure memory */
+ *sec_shdr = malloc(s);
+ if (!*sec_shdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(*sec_shdr, signed_ta, s);
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result check_shdr(struct shdr *shdr)
+{
+ struct rsa_public_key key;
+ TEE_Result res;
+ uint32_t e = TEE_U32_TO_BIG_ENDIAN(ta_pub_key_exponent);
+ size_t hash_size;
+
+ if (shdr->magic != SHDR_MAGIC || shdr->img_type != SHDR_TA)
+ return TEE_ERROR_SECURITY;
+
+ if (TEE_ALG_GET_MAIN_ALG(shdr->algo) != TEE_MAIN_ALGO_RSA)
+ return TEE_ERROR_SECURITY;
+
+ res = tee_hash_get_digest_size(TEE_DIGEST_HASH_TO_ALGO(shdr->algo),
+ &hash_size);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (hash_size != shdr->hash_size)
+ return TEE_ERROR_SECURITY;
+
+ if (!crypto_ops.acipher.alloc_rsa_public_key ||
+ !crypto_ops.acipher.free_rsa_public_key ||
+ !crypto_ops.acipher.rsassa_verify ||
+ !crypto_ops.bignum.bin2bn)
+ return TEE_ERROR_NOT_SUPPORTED;
+
+ res = crypto_ops.acipher.alloc_rsa_public_key(&key, shdr->sig_size);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = crypto_ops.bignum.bin2bn((uint8_t *)&e, sizeof(e), key.e);
+ if (res != TEE_SUCCESS)
+ goto out;
+ res = crypto_ops.bignum.bin2bn(ta_pub_key_modulus,
+ ta_pub_key_modulus_size, key.n);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = crypto_ops.acipher.rsassa_verify(shdr->algo, &key, -1,
+ SHDR_GET_HASH(shdr), shdr->hash_size,
+ SHDR_GET_SIG(shdr), shdr->sig_size);
+out:
+ crypto_ops.acipher.free_rsa_public_key(&key);
+ if (res != TEE_SUCCESS)
+ return TEE_ERROR_SECURITY;
+ return TEE_SUCCESS;
+}
+
+static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs)
+{
+ uint32_t mattr = 0;
+
+ if (init_attrs)
+ mattr = TEE_MATTR_PRW;
+ else {
+ if (flags & PF_X)
+ mattr |= TEE_MATTR_UX;
+ if (flags & PF_W)
+ mattr |= TEE_MATTR_UW;
+ if (flags & PF_R)
+ mattr |= TEE_MATTR_UR;
+ }
+
+ return mattr;
+}
+
+#ifdef CFG_PAGED_USER_TA
+static TEE_Result config_initial_paging(struct user_ta_ctx *utc)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ if (!tee_pager_add_uta_area(utc, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size))
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+
+static TEE_Result config_final_paging(struct user_ta_ctx *utc)
+{
+ size_t n;
+ uint32_t flags;
+
+ tee_pager_assign_uta_tables(utc);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ flags = utc->mmu->regions[n].attr &
+ (TEE_MATTR_PRW | TEE_MATTR_URWX);
+ if (!tee_pager_set_uta_area_attr(utc, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size,
+ flags))
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+#else /*!CFG_PAGED_USER_TA*/
+static TEE_Result config_initial_paging(struct user_ta_ctx *utc __unused)
+{
+ return TEE_SUCCESS;
+}
+
+static TEE_Result config_final_paging(struct user_ta_ctx *utc)
+{
+ void *va = (void *)utc->mmu->ta_private_vmem_start;
+ size_t vasize = utc->mmu->ta_private_vmem_end -
+ utc->mmu->ta_private_vmem_start;
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va, vasize);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va, vasize);
+ return TEE_SUCCESS;
+}
+#endif /*!CFG_PAGED_USER_TA*/
+
+static TEE_Result load_elf_segments(struct user_ta_ctx *utc,
+ struct elf_load_state *elf_state, bool init_attrs)
+{
+ TEE_Result res;
+ uint32_t mattr;
+ size_t idx = 0;
+
+ tee_mmu_map_clear(utc);
+
+ /*
+ * Add stack segment
+ */
+ tee_mmu_map_stack(utc, utc->mobj_stack);
+
+ /*
+ * Add code segment
+ */
+ while (true) {
+ vaddr_t offs;
+ size_t size;
+ uint32_t flags;
+
+ res = elf_load_get_next_segment(elf_state, &idx, &offs, &size,
+ &flags);
+ if (res == TEE_ERROR_ITEM_NOT_FOUND)
+ break;
+ if (res != TEE_SUCCESS)
+ return res;
+
+ mattr = elf_flags_to_mattr(flags, init_attrs);
+ res = tee_mmu_map_add_segment(utc, utc->mobj_code, offs, size,
+ mattr);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ if (init_attrs)
+ return config_initial_paging(utc);
+ else
+ return config_final_paging(utc);
+}
+
+static struct mobj *alloc_ta_mem(size_t size)
+{
+#ifdef CFG_PAGED_USER_TA
+ return mobj_paged_alloc(size);
+#else
+ return mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr);
+#endif
+}
+
+static TEE_Result load_elf(struct user_ta_ctx *utc, struct shdr *shdr,
+ const struct shdr *nmem_shdr)
+{
+ TEE_Result res;
+ size_t hash_ctx_size;
+ void *hash_ctx = NULL;
+ uint32_t hash_algo;
+ uint8_t *nwdata = (uint8_t *)nmem_shdr + SHDR_GET_SIZE(shdr);
+ size_t nwdata_len = shdr->img_size;
+ void *digest = NULL;
+ struct elf_load_state *elf_state = NULL;
+ struct ta_head *ta_head;
+ void *p;
+ size_t vasize;
+
+ if (!tee_vbuf_is_non_sec(nwdata, nwdata_len))
+ return TEE_ERROR_SECURITY;
+
+ if (!crypto_ops.hash.get_ctx_size || !crypto_ops.hash.init ||
+ !crypto_ops.hash.update || !crypto_ops.hash.final) {
+ res = TEE_ERROR_NOT_IMPLEMENTED;
+ goto out;
+ }
+ hash_algo = TEE_DIGEST_HASH_TO_ALGO(shdr->algo);
+ res = crypto_ops.hash.get_ctx_size(hash_algo, &hash_ctx_size);
+ if (res != TEE_SUCCESS)
+ goto out;
+ hash_ctx = malloc(hash_ctx_size);
+ if (!hash_ctx) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+ res = crypto_ops.hash.init(hash_ctx, hash_algo);
+ if (res != TEE_SUCCESS)
+ goto out;
+ res = crypto_ops.hash.update(hash_ctx, hash_algo,
+ (uint8_t *)shdr, sizeof(struct shdr));
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = elf_load_init(hash_ctx, hash_algo, nwdata, nwdata_len,
+ &elf_state);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = elf_load_head(elf_state, sizeof(struct ta_head), &p, &vasize,
+ &utc->is_32bit);
+ if (res != TEE_SUCCESS)
+ goto out;
+ ta_head = p;
+
+ utc->mobj_code = alloc_ta_mem(vasize);
+ if (!utc->mobj_code) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ /* Currently all TA must execute from DDR */
+ if (!(ta_head->flags & TA_FLAG_EXEC_DDR)) {
+ res = TEE_ERROR_BAD_FORMAT;
+ goto out;
+ }
+ /* Temporary assignment to setup memory mapping */
+ utc->ctx.flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+
+ /* Ensure proper aligment of stack */
+ utc->mobj_stack = alloc_ta_mem(ROUNDUP(ta_head->stack_size,
+ STACK_ALIGNMENT));
+ if (!utc->mobj_stack) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ /*
+ * Map physical memory into TA virtual memory
+ */
+
+ res = tee_mmu_init(utc);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = load_elf_segments(utc, elf_state, true /* init attrs */);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ tee_mmu_set_ctx(&utc->ctx);
+
+ res = elf_load_body(elf_state, tee_mmu_get_load_addr(&utc->ctx));
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ digest = malloc(shdr->hash_size);
+ if (!digest) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ res = crypto_ops.hash.final(hash_ctx, hash_algo, digest,
+ shdr->hash_size);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ if (memcmp(digest, SHDR_GET_HASH(shdr), shdr->hash_size) != 0) {
+ res = TEE_ERROR_SECURITY;
+ goto out;
+ }
+
+ /*
+ * Replace the init attributes with attributes used when the TA is
+ * running.
+ */
+ res = load_elf_segments(utc, elf_state, false /* final attrs */);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+out:
+ elf_load_final(elf_state);
+ free(digest);
+ free(hash_ctx);
+ return res;
+}
+
+/*-----------------------------------------------------------------------------
+ * Loads TA header and hashes.
+ * Verifies the TA signature.
+ * Returns context ptr and TEE_Result.
+ *---------------------------------------------------------------------------*/
+static TEE_Result ta_load(const TEE_UUID *uuid, const struct shdr *signed_ta,
+ struct tee_ta_ctx **ta_ctx)
+{
+ TEE_Result res;
+ /* man_flags: mandatory flags */
+ uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+ /* opt_flags: optional flags */
+ uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
+ TA_FLAG_MULTI_SESSION | TA_FLAG_UNSAFE_NW_PARAMS |
+ TA_FLAG_INSTANCE_KEEP_ALIVE | TA_FLAG_CACHE_MAINTENANCE;
+ struct user_ta_ctx *utc = NULL;
+ struct shdr *sec_shdr = NULL;
+ struct ta_head *ta_head;
+
+ res = load_header(signed_ta, &sec_shdr);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ res = check_shdr(sec_shdr);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ /*
+ * ------------------------------------------------------------------
+ * 2nd step: Register context
+ * Alloc and init the ta context structure, alloc physical/virtual
+ * memories to store/map the TA.
+ * ------------------------------------------------------------------
+ */
+
+ /*
+ * Register context
+ */
+
+ /* code below must be protected by mutex (multi-threaded) */
+ utc = calloc(1, sizeof(struct user_ta_ctx));
+ if (!utc) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+ TAILQ_INIT(&utc->open_sessions);
+ TAILQ_INIT(&utc->cryp_states);
+ TAILQ_INIT(&utc->objects);
+ TAILQ_INIT(&utc->storage_enums);
+#if defined(CFG_SE_API)
+ utc->se_service = NULL;
+#endif
+
+ res = load_elf(utc, sec_shdr, signed_ta);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ utc->load_addr = tee_mmu_get_load_addr(&utc->ctx);
+ ta_head = (struct ta_head *)(vaddr_t)utc->load_addr;
+
+ if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) {
+ res = TEE_ERROR_SECURITY;
+ goto error_return;
+ }
+
+ /* check input flags bitmask consistency and save flags */
+ if ((ta_head->flags & opt_flags) != ta_head->flags ||
+ (ta_head->flags & man_flags) != man_flags) {
+ EMSG("TA flag issue: flags=%x opt=%X man=%X",
+ ta_head->flags, opt_flags, man_flags);
+ res = TEE_ERROR_BAD_FORMAT;
+ goto error_return;
+ }
+
+ utc->ctx.flags = ta_head->flags;
+ utc->ctx.uuid = ta_head->uuid;
+ utc->entry_func = ta_head->entry.ptr64;
+
+ utc->ctx.ref_count = 1;
+
+ condvar_init(&utc->ctx.busy_cv);
+ TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link);
+ *ta_ctx = &utc->ctx;
+
+ DMSG("ELF load address 0x%x", utc->load_addr);
+
+ tee_mmu_set_ctx(NULL);
+ /* end thread protection (multi-threaded) */
+
+ free(sec_shdr);
+ return TEE_SUCCESS;
+
+error_return:
+ free(sec_shdr);
+ tee_mmu_set_ctx(NULL);
+ if (utc) {
+ pgt_flush_ctx(&utc->ctx);
+ tee_pager_rem_uta_areas(utc);
+ tee_mmu_final(utc);
+ mobj_free(utc->mobj_code);
+ mobj_free(utc->mobj_stack);
+ free(utc);
+ }
+ return res;
+}
+
+static void init_utee_param(struct utee_params *up,
+ const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
+{
+ size_t n;
+
+ up->types = p->types;
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uintptr_t a;
+ uintptr_t b;
+
+ switch (TEE_PARAM_TYPE_GET(p->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ a = (uintptr_t)va[n];
+ b = p->u[n].mem.size;
+ break;
+ case TEE_PARAM_TYPE_VALUE_INPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ a = p->u[n].val.a;
+ b = p->u[n].val.b;
+ break;
+ default:
+ a = 0;
+ b = 0;
+ break;
+ }
+ /* See comment for struct utee_params in utee_types.h */
+ up->vals[n * 2] = a;
+ up->vals[n * 2 + 1] = b;
+ }
+}
+
+static void update_from_utee_param(struct tee_ta_param *p,
+ const struct utee_params *up)
+{
+ size_t n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(p->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ /* See comment for struct utee_params in utee_types.h */
+ p->u[n].mem.size = up->vals[n * 2 + 1];
+ break;
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ /* See comment for struct utee_params in utee_types.h */
+ p->u[n].val.a = up->vals[n * 2];
+ p->u[n].val.b = up->vals[n * 2 + 1];
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void clear_vfp_state(struct user_ta_ctx *utc __unused)
+{
+#ifdef CFG_WITH_VFP
+ thread_user_clear_vfp(&utc->vfp);
+#endif
+}
+
+static TEE_Result user_ta_enter(TEE_ErrorOrigin *err,
+ struct tee_ta_session *session,
+ enum utee_entry_func func, uint32_t cmd,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+ struct utee_params *usr_params;
+ uaddr_t usr_stack;
+ struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
+ TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
+ struct tee_ta_session *s __maybe_unused;
+ void *param_va[TEE_NUM_PARAMS] = { NULL };
+
+ if (!(utc->ctx.flags & TA_FLAG_EXEC_DDR))
+ panic("TA does not exec in DDR");
+
+ /* Map user space memory */
+ res = tee_mmu_map_param(utc, param, param_va);
+ if (res != TEE_SUCCESS)
+ goto cleanup_return;
+
+ /* Switch to user ctx */
+ tee_ta_push_current_session(session);
+
+ /* Make room for usr_params at top of stack */
+ usr_stack = (uaddr_t)utc->mmu->regions[0].va + utc->mobj_stack->size;
+ usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
+ usr_params = (struct utee_params *)usr_stack;
+ init_utee_param(usr_params, param, param_va);
+
+ res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session),
+ (vaddr_t)usr_params, cmd, usr_stack,
+ utc->entry_func, utc->is_32bit,
+ &utc->ctx.panicked, &utc->ctx.panic_code);
+
+ clear_vfp_state(utc);
+ /*
+ * According to GP spec the origin should allways be set to the
+ * TA after TA execution
+ */
+ serr = TEE_ORIGIN_TRUSTED_APP;
+
+ if (utc->ctx.panicked) {
+ DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
+ utc->ctx.panic_code);
+ serr = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_TARGET_DEAD;
+ }
+
+ /* Copy out value results */
+ update_from_utee_param(param, usr_params);
+
+ s = tee_ta_pop_current_session();
+ assert(s == session);
+cleanup_return:
+
+ /*
+ * Clear the cancel state now that the user TA has returned. The next
+ * time the TA will be invoked will be with a new operation and should
+ * not have an old cancellation pending.
+ */
+ session->cancel = false;
+
+ /*
+ * Can't update *err until now since it may point to an address
+ * mapped for the user mode TA.
+ */
+ *err = serr;
+
+ return res;
+}
+
+/*
+ * Load a TA via RPC with UUID defined by input param uuid. The virtual
+ * address of the TA is recieved in out parameter ta
+ *
+ * Function is not thread safe
+ */
+static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,
+ uint64_t *cookie_ta)
+{
+ TEE_Result res;
+ struct optee_msg_param params[2];
+ paddr_t phta = 0;
+ uint64_t cta = 0;
+
+
+ if (!uuid || !ta || !cookie_ta)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(params, 0, sizeof(params));
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ tee_uuid_to_octets((void *)&params[0].u.value, uuid);
+ params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ params[1].u.tmem.buf_ptr = 0;
+ params[1].u.tmem.size = 0;
+ params[1].u.tmem.shm_ref = 0;
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ thread_rpc_alloc_payload(params[1].u.tmem.size, &phta, &cta);
+ if (!phta)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ *ta = phys_to_virt(phta, MEM_AREA_NSEC_SHM);
+ if (!*ta) {
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+ *cookie_ta = cta;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ tee_uuid_to_octets((void *)&params[0].u.value, uuid);
+ params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ params[1].u.tmem.buf_ptr = phta;
+ params[1].u.tmem.shm_ref = cta;
+ /* Note that params[1].u.tmem.size is already assigned */
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
+out:
+ if (res != TEE_SUCCESS)
+ thread_rpc_free_payload(cta);
+ return res;
+}
+
+static TEE_Result init_session_with_signed_ta(const TEE_UUID *uuid,
+ const struct shdr *signed_ta,
+ struct tee_ta_session *s)
+{
+ TEE_Result res;
+
+ DMSG(" Load dynamic TA");
+ /* load and verify */
+ res = ta_load(uuid, signed_ta, &s->ctx);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ DMSG(" dyn TA : %pUl", (void *)&s->ctx->uuid);
+
+ return res;
+}
+
+static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s,
+ struct tee_ta_param *param, TEE_ErrorOrigin *eo)
+{
+ return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param);
+}
+
+static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s,
+ uint32_t cmd, struct tee_ta_param *param,
+ TEE_ErrorOrigin *eo)
+{
+ return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param);
+}
+
+static void user_ta_enter_close_session(struct tee_ta_session *s)
+{
+ TEE_ErrorOrigin eo;
+ struct tee_ta_param param = { 0 };
+
+ user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, &param);
+}
+
+static void user_ta_dump_state(struct tee_ta_ctx *ctx)
+{
+ struct user_ta_ctx *utc __maybe_unused = to_user_ta_ctx(ctx);
+ size_t n;
+
+ EMSG_RAW("- load addr : 0x%x ctx-idr: %d",
+ utc->load_addr, utc->context);
+ EMSG_RAW("- stack: 0x%" PRIxVA " %zu",
+ utc->mmu->regions[0].va, utc->mobj_stack->size);
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ paddr_t pa = 0;
+
+ if (utc->mmu->regions[n].mobj)
+ mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset, 0, &pa);
+
+ EMSG_RAW("sect %zu : va %#" PRIxVA " pa %#" PRIxPA " %#zx",
+ n, utc->mmu->regions[n].va, pa,
+ utc->mmu->regions[n].size);
+ }
+}
+KEEP_PAGER(user_ta_dump_state);
+
+static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
+{
+ struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
+
+ tee_pager_rem_uta_areas(utc);
+
+ /*
+ * Clean all traces of the TA, both RO and RW data.
+ * No L2 cache maintenance to avoid sync problems
+ */
+ if (ctx->flags & TA_FLAG_EXEC_DDR) {
+ void *va;
+
+ if (utc->mobj_code) {
+ va = mobj_get_va(utc->mobj_code, 0);
+ if (va) {
+ memset(va, 0, utc->mobj_code->size);
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ utc->mobj_code->size);
+ }
+ }
+
+ if (utc->mobj_stack) {
+ va = mobj_get_va(utc->mobj_stack, 0);
+ if (va) {
+ memset(va, 0, utc->mobj_stack->size);
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ utc->mobj_stack->size);
+ }
+ }
+ }
+
+ /*
+ * Close sessions opened by this TA
+ * Note that tee_ta_close_session() removes the item
+ * from the utc->open_sessions list.
+ */
+ while (!TAILQ_EMPTY(&utc->open_sessions)) {
+ tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
+ &utc->open_sessions, KERN_IDENTITY);
+ }
+
+ tee_mmu_final(utc);
+ mobj_free(utc->mobj_code);
+ mobj_free(utc->mobj_stack);
+
+ /* Free cryp states created by this TA */
+ tee_svc_cryp_free_states(utc);
+ /* Close cryp objects opened by this TA */
+ tee_obj_close_all(utc);
+ /* Free emums created by this TA */
+ tee_svc_storage_close_all_enum(utc);
+ free(utc);
+}
+
+static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx)
+{
+ return to_user_ta_ctx(ctx)->context;
+}
+
+static const struct tee_ta_ops user_ta_ops __rodata_unpaged = {
+ .enter_open_session = user_ta_enter_open_session,
+ .enter_invoke_cmd = user_ta_enter_invoke_cmd,
+ .enter_close_session = user_ta_enter_close_session,
+ .dump_state = user_ta_dump_state,
+ .destroy = user_ta_ctx_destroy,
+ .get_instance_id = user_ta_get_instance_id,
+};
+
+TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s)
+{
+ TEE_Result res;
+ struct shdr *ta = NULL;
+ uint64_t cookie_ta = 0;
+
+
+ /* Request TA from tee-supplicant */
+ res = rpc_load(uuid, &ta, &cookie_ta);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = init_session_with_signed_ta(uuid, ta, s);
+ /*
+ * Free normal world shared memory now that the TA either has been
+ * copied into secure memory or the TA failed to be initialized.
+ */
+ thread_rpc_free_payload(cookie_ta);
+
+ if (res == TEE_SUCCESS)
+ s->ctx->ops = &user_ta_ops;
+ return res;
+}
diff --git a/core/arch/arm/kernel/vfp.c b/core/arch/arm/kernel/vfp.c
new file mode 100644
index 0000000..9903642
--- /dev/null
+++ b/core/arch/arm/kernel/vfp.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/vfp.h>
+#include "vfp_private.h"
+
+#ifdef ARM32
+bool vfp_is_enabled(void)
+{
+ return !!(vfp_read_fpexc() & FPEXC_EN);
+}
+
+void vfp_enable(void)
+{
+ vfp_write_fpexc(vfp_read_fpexc() | FPEXC_EN);
+}
+
+void vfp_disable(void)
+{
+ vfp_write_fpexc(vfp_read_fpexc() & ~FPEXC_EN);
+}
+
+void vfp_lazy_save_state_init(struct vfp_state *state)
+{
+ uint32_t fpexc = vfp_read_fpexc();
+
+ state->fpexc = fpexc;
+ vfp_write_fpexc(fpexc & ~FPEXC_EN);
+}
+
+void vfp_lazy_save_state_final(struct vfp_state *state)
+{
+ if (state->fpexc & FPEXC_EN) {
+ uint32_t fpexc = vfp_read_fpexc();
+
+ assert(!(fpexc & FPEXC_EN));
+ vfp_write_fpexc(fpexc | FPEXC_EN);
+ state->fpscr = vfp_read_fpscr();
+ vfp_save_extension_regs(state->reg);
+ vfp_write_fpexc(fpexc);
+ }
+}
+
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state)
+{
+
+ if (full_state) {
+ /*
+ * Only restore VFP registers if they have been touched as they
+ * otherwise are intact.
+ */
+
+ /* FPEXC is restored to what's in state->fpexc below */
+ vfp_write_fpexc(vfp_read_fpexc() | FPEXC_EN);
+
+ vfp_write_fpscr(state->fpscr);
+ vfp_restore_extension_regs(state->reg);
+ }
+ vfp_write_fpexc(state->fpexc);
+}
+#endif /* ARM32 */
+
+#ifdef ARM64
+bool vfp_is_enabled(void)
+{
+ return (CPACR_EL1_FPEN(read_cpacr_el1()) & CPACR_EL1_FPEN_EL0EL1);
+}
+
+void vfp_enable(void)
+{
+ uint32_t val = read_cpacr_el1();
+
+ val |= (CPACR_EL1_FPEN_EL0EL1 << CPACR_EL1_FPEN_SHIFT);
+ write_cpacr_el1(val);
+ isb();
+}
+
+void vfp_disable(void)
+{
+ uint32_t val = read_cpacr_el1();
+
+ val &= ~(CPACR_EL1_FPEN_MASK << CPACR_EL1_FPEN_SHIFT);
+ write_cpacr_el1(val);
+ isb();
+}
+
+void vfp_lazy_save_state_init(struct vfp_state *state)
+{
+ state->cpacr_el1 = read_cpacr_el1();
+ vfp_disable();
+}
+
+void vfp_lazy_save_state_final(struct vfp_state *state)
+{
+ if ((CPACR_EL1_FPEN(state->cpacr_el1) & CPACR_EL1_FPEN_EL0EL1) ||
+ state->force_save) {
+ assert(!vfp_is_enabled());
+ vfp_enable();
+ state->fpcr = read_fpcr();
+ state->fpsr = read_fpsr();
+ vfp_save_extension_regs(state->reg);
+ vfp_disable();
+ }
+}
+
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state)
+{
+ if (full_state) {
+ /*
+ * Only restore VFP registers if they have been touched as they
+ * otherwise are intact.
+ */
+
+ /* CPACR_EL1 is restored to what's in state->cpacr_el1 below */
+ vfp_enable();
+ write_fpcr(state->fpcr);
+ write_fpsr(state->fpsr);
+ vfp_restore_extension_regs(state->reg);
+ }
+ write_cpacr_el1(state->cpacr_el1);
+ isb();
+}
+#endif /* ARM64 */
diff --git a/core/arch/arm/kernel/vfp_a32.S b/core/arch/arm/kernel/vfp_a32.S
new file mode 100644
index 0000000..6cc3e77
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_a32.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/unwind.h>
+
+ .section .text.vfp_asm
+
+/* void vfp_save_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
+FUNC vfp_save_extension_regs , :
+UNWIND( .fnstart)
+ vstm r0!, {d0-d15}
+ vstm r0, {d16-d31}
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_save_extension_regs
+
+/* void vfp_restore_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
+FUNC vfp_restore_extension_regs , :
+UNWIND( .fnstart)
+ vldm r0!, {d0-d15}
+ vldm r0, {d16-d31}
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_restore_extension_regs
+
+/* void vfp_write_fpexc(uint32_t fpexc) */
+FUNC vfp_write_fpexc , :
+UNWIND( .fnstart)
+ vmsr fpexc, r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_write_fpexc
+
+/* uint32_t vfp_read_fpexc(void) */
+FUNC vfp_read_fpexc , :
+UNWIND( .fnstart)
+ vmrs r0, fpexc
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_read_fpexc
+
+/* void vfp_write_fpscr(uint32_t fpscr) */
+FUNC vfp_write_fpscr , :
+UNWIND( .fnstart)
+ vmsr fpscr, r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_write_fpscr
+
+/* uint32_t vfp_read_fpscr(void) */
+FUNC vfp_read_fpscr , :
+UNWIND( .fnstart)
+ vmrs r0, fpscr
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_read_fpscr
diff --git a/core/arch/arm/kernel/vfp_a64.S b/core/arch/arm/kernel/vfp_a64.S
new file mode 100644
index 0000000..53210c5
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_a64.S
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+
+ .section .text.vfp_asm
+
+/* void vfp_save_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]); */
+FUNC vfp_save_extension_regs , :
+ stp q0, q1, [x0, #16 * 0]
+ stp q2, q3, [x0, #16 * 2]
+ stp q4, q5, [x0, #16 * 4]
+ stp q6, q7, [x0, #16 * 6]
+ stp q8, q9, [x0, #16 * 8]
+ stp q10, q11, [x0, #16 * 10]
+ stp q12, q13, [x0, #16 * 12]
+ stp q14, q15, [x0, #16 * 14]
+ stp q16, q17, [x0, #16 * 16]
+ stp q18, q19, [x0, #16 * 18]
+ stp q20, q21, [x0, #16 * 20]
+ stp q22, q23, [x0, #16 * 22]
+ stp q24, q25, [x0, #16 * 24]
+ stp q26, q27, [x0, #16 * 26]
+ stp q28, q29, [x0, #16 * 28]
+ stp q30, q31, [x0, #16 * 30]
+ ret
+END_FUNC vfp_save_extension_regs
+
+/* void vfp_restore_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]); */
+FUNC vfp_restore_extension_regs , :
+ ldp q0, q1, [x0, #16 * 0]
+ ldp q2, q3, [x0, #16 * 2]
+ ldp q4, q5, [x0, #16 * 4]
+ ldp q6, q7, [x0, #16 * 6]
+ ldp q8, q9, [x0, #16 * 8]
+ ldp q10, q11, [x0, #16 * 10]
+ ldp q12, q13, [x0, #16 * 12]
+ ldp q14, q15, [x0, #16 * 14]
+ ldp q16, q17, [x0, #16 * 16]
+ ldp q18, q19, [x0, #16 * 18]
+ ldp q20, q21, [x0, #16 * 20]
+ ldp q22, q23, [x0, #16 * 22]
+ ldp q24, q25, [x0, #16 * 24]
+ ldp q26, q27, [x0, #16 * 26]
+ ldp q28, q29, [x0, #16 * 28]
+ ldp q30, q31, [x0, #16 * 30]
+ ret
+END_FUNC vfp_restore_extension_regs
diff --git a/core/arch/arm/kernel/vfp_private.h b/core/arch/arm/kernel/vfp_private.h
new file mode 100644
index 0000000..0c0ffba
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_private.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VFP_PRIVATE
+#define VFP_PRIVATE
+
+#include <kernel/vfp.h>
+
+void vfp_save_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]);
+void vfp_restore_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]);
+void vfp_clear_extension_regs(void);
+
+#ifdef ARM32
+
+#define FPEXC_EN (1 << 30)
+
+/*
+ * These functions can't be implemented in inline assembly when compiling
+ * for thumb mode, to make it easy always implement then in ARM assembly as
+ * ordinary functions.
+ */
+void vfp_write_fpexc(uint32_t fpexc);
+uint32_t vfp_read_fpexc(void);
+void vfp_write_fpscr(uint32_t fpscr);
+uint32_t vfp_read_fpscr(void);
+
+#endif /* ARM32 */
+
+#endif /*VFP_PRIVATE*/
diff --git a/core/arch/arm/kernel/wait_queue.c b/core/arch/arm/kernel/wait_queue.c
new file mode 100644
index 0000000..a96e0fe
--- /dev/null
+++ b/core/arch/arm/kernel/wait_queue.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <types_ext.h>
+#include <tee_api_defines.h>
+#include <string.h>
+#include <optee_msg.h>
+#include <kernel/spinlock.h>
+#include <kernel/wait_queue.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+static unsigned wq_spin_lock;
+
+
+void wq_init(struct wait_queue *wq)
+{
+ *wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
+}
+
+static void wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
+ const char *fname, int lineno __maybe_unused)
+{
+ uint32_t ret;
+ struct optee_msg_param params;
+ const char *cmd_str __maybe_unused =
+ func == OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
+
+ if (fname)
+ DMSG("%s thread %u %p %s:%d", cmd_str, id,
+ sync_obj, fname, lineno);
+ else
+ DMSG("%s thread %u %p", cmd_str, id, sync_obj);
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params.u.value.a = func;
+ params.u.value.b = id;
+
+ ret = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_WAIT_QUEUE, 1, &params);
+ if (ret != TEE_SUCCESS)
+ DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
+}
+
+static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
+{
+ struct wait_queue_elem *wqe_iter;
+
+ /* Add elem to end of wait queue */
+ wqe_iter = SLIST_FIRST(wq);
+ if (wqe_iter) {
+ while (SLIST_NEXT(wqe_iter, link))
+ wqe_iter = SLIST_NEXT(wqe_iter, link);
+ SLIST_INSERT_AFTER(wqe_iter, wqe, link);
+ } else
+ SLIST_INSERT_HEAD(wq, wqe, link);
+}
+
+void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ struct condvar *cv)
+{
+ uint32_t old_itr_status;
+
+ wqe->handle = thread_get_id();
+ wqe->done = false;
+ wqe->cv = cv;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ slist_add_tail(wq, wqe);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+}
+
+void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ const void *sync_obj, const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ unsigned done;
+
+ do {
+ wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
+ sync_obj, fname, lineno);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ done = wqe->done;
+ if (done)
+ SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+ } while (!done);
+}
+
+void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
+ const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+ int handle = -1;
+ bool do_wakeup = false;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ SLIST_FOREACH(wqe, wq, link) {
+ if (!wqe->cv) {
+ do_wakeup = !wqe->done;
+ wqe->done = true;
+ handle = wqe->handle;
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (do_wakeup)
+ wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP, handle,
+ sync_obj, fname, lineno);
+}
+
+void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
+ bool only_one, const void *sync_obj __unused,
+ const char *fname, int lineno __maybe_unused)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+
+ if (!cv)
+ return;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ /*
+ * Find condvar waiter(s) and promote each to an active waiter.
+ * This is a bit unfair to eventual other active waiters as a
+ * condvar waiter is added the the queue when waiting for the
+ * condvar.
+ */
+ SLIST_FOREACH(wqe, wq, link) {
+ if (wqe->cv == cv) {
+ if (fname)
+ FMSG("promote thread %u %p %s:%d",
+ wqe->handle, (void *)cv->m, fname, lineno);
+ else
+ FMSG("promote thread %u %p",
+ wqe->handle, (void *)cv->m);
+
+ wqe->cv = NULL;
+ if (only_one)
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+}
+
+bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+ bool rc = false;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ SLIST_FOREACH(wqe, wq, link) {
+ if (wqe->cv == cv) {
+ rc = true;
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return rc;
+}
+
+bool wq_is_empty(struct wait_queue *wq)
+{
+ uint32_t old_itr_status;
+ bool ret;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ ret = SLIST_EMPTY(wq);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return ret;
+}