summaryrefslogtreecommitdiff
path: root/core/arch/arm/include/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'core/arch/arm/include/kernel')
-rw-r--r--core/arch/arm/include/kernel/abort.h57
-rw-r--r--core/arch/arm/include/kernel/generic_boot.h101
-rw-r--r--core/arch/arm/include/kernel/misc.h53
-rw-r--r--core/arch/arm/include/kernel/mutex.h98
-rw-r--r--core/arch/arm/include/kernel/pm_stubs.h37
-rw-r--r--core/arch/arm/include/kernel/pseudo_ta.h84
-rw-r--r--core/arch/arm/include/kernel/spinlock.h86
-rw-r--r--core/arch/arm/include/kernel/tee_l2cc_mutex.h72
-rw-r--r--core/arch/arm/include/kernel/thread.h559
-rw-r--r--core/arch/arm/include/kernel/thread_defs.h35
-rw-r--r--core/arch/arm/include/kernel/time_source.h44
-rw-r--r--core/arch/arm/include/kernel/tz_proc_def.h110
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce.h73
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_def.h141
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_pl310.h46
-rw-r--r--core/arch/arm/include/kernel/unwind.h77
-rw-r--r--core/arch/arm/include/kernel/user_ta.h93
-rw-r--r--core/arch/arm/include/kernel/vfp.h127
-rw-r--r--core/arch/arm/include/kernel/wait_queue.h85
19 files changed, 1978 insertions, 0 deletions
diff --git a/core/arch/arm/include/kernel/abort.h b/core/arch/arm/include/kernel/abort.h
new file mode 100644
index 0000000..0480f43
--- /dev/null
+++ b/core/arch/arm/include/kernel/abort.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_ABORT_H
+#define KERNEL_ABORT_H
+
+#define ABORT_TYPE_UNDEF 0
+#define ABORT_TYPE_PREFETCH 1
+#define ABORT_TYPE_DATA 2
+
+#ifndef ASM
+
+#include <compiler.h>
+#include <types_ext.h>
+
+struct abort_info {
+ uint32_t abort_type;
+ uint32_t fault_descr; /* only valid for data of prefetch abort */
+ vaddr_t va;
+ uint32_t pc;
+ struct thread_abort_regs *regs;
+};
+
+void abort_print(struct abort_info *ai);
+void abort_print_error(struct abort_info *ai);
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs);
+
+bool abort_is_user_exception(struct abort_info *ai);
+
+#endif /*ASM*/
+#endif /*KERNEL_ABORT_H*/
+
diff --git a/core/arch/arm/include/kernel/generic_boot.h b/core/arch/arm/include/kernel/generic_boot.h
new file mode 100644
index 0000000..622c6ff
--- /dev/null
+++ b/core/arch/arm/include/kernel/generic_boot.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_GENERIC_BOOT_H
+#define KERNEL_GENERIC_BOOT_H
+
+#include <initcall.h>
+#include <types_ext.h>
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+unsigned long cpu_on_handler(unsigned long a0, unsigned long a1);
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long unused,
+ unsigned long fdt);
+unsigned long generic_boot_cpu_on_handler(unsigned long a0, unsigned long a1);
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt);
+void generic_boot_init_secondary(unsigned long nsec_entry);
+#endif
+
+void main_init_gic(void);
+void main_secondary_init_gic(void);
+
+void init_sec_mon(unsigned long nsec_entry);
+
+const struct thread_handlers *generic_boot_get_handlers(void);
+
+/* weak routines eventually overridden by platform */
+void plat_cpu_reset_early(void);
+void plat_cpu_reset_late(void);
+void arm_cl2_config(vaddr_t pl310);
+void arm_cl2_enable(vaddr_t pl310);
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+extern paddr_t ns_entry_addrs[] __early_bss;
+int generic_boot_core_release(size_t core_idx, paddr_t entry);
+paddr_t generic_boot_core_hpen(void);
+#endif
+
+extern uint8_t __text_init_start[];
+extern uint8_t __text_start[];
+extern initcall_t __initcall_start;
+extern initcall_t __initcall_end;
+extern uint8_t __data_start[];
+extern uint8_t __data_end[];
+extern uint8_t __rodata_start[];
+extern uint8_t __rodata_end[];
+extern uint8_t __early_bss_start[];
+extern uint8_t __early_bss_end[];
+extern uint8_t __bss_start[];
+extern uint8_t __bss_end[];
+extern uint8_t __nozi_start[];
+extern uint8_t __nozi_end[];
+extern uint8_t __nozi_stack_start[];
+extern uint8_t __nozi_stack_end[];
+extern uint8_t __init_start[];
+extern uint8_t __init_size[];
+extern uint8_t __tmp_hashes_start[];
+extern uint8_t __tmp_hashes_size[];
+extern uint8_t __heap1_start[];
+extern uint8_t __heap1_end[];
+extern uint8_t __heap2_start[];
+extern uint8_t __heap2_end[];
+extern uint8_t __pageable_part_start[];
+extern uint8_t __pageable_part_end[];
+extern uint8_t __pageable_start[];
+extern uint8_t __pageable_end[];
+extern uint8_t __asan_shadow_start[];
+extern uint8_t __asan_shadow_end[];
+extern vaddr_t __ctor_list;
+extern vaddr_t __ctor_end;
+extern uint8_t __end[];
+
+/* Generated by core/arch/arm/kernel/link.mk */
+extern const char core_v_str[];
+
+#endif /* KERNEL_GENERIC_BOOT_H */
diff --git a/core/arch/arm/include/kernel/misc.h b/core/arch/arm/include/kernel/misc.h
new file mode 100644
index 0000000..a9174a8
--- /dev/null
+++ b/core/arch/arm/include/kernel/misc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_MISC_H
+#define KERNEL_MISC_H
+
+#include <types_ext.h>
+#include <arm.h>
+#include <kernel/thread.h>
+
+size_t get_core_pos(void);
+
+uint32_t read_mode_sp(int cpu_mode);
+uint32_t read_mode_lr(int cpu_mode);
+
+static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
+{
+ return (uint64_t)reg0 << 32 | reg1;
+}
+
+static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
+ uint32_t *reg1)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+#endif /*KERNEL_MISC_H*/
+
diff --git a/core/arch/arm/include/kernel/mutex.h b/core/arch/arm/include/kernel/mutex.h
new file mode 100644
index 0000000..1698b35
--- /dev/null
+++ b/core/arch/arm/include/kernel/mutex.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_MUTEX_H
+#define KERNEL_MUTEX_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+#include <kernel/wait_queue.h>
+
+enum mutex_value {
+ MUTEX_VALUE_UNLOCKED,
+ MUTEX_VALUE_LOCKED,
+};
+
+struct mutex {
+ enum mutex_value value;
+ unsigned spin_lock; /* used when operating on this struct */
+ struct wait_queue wq;
+ int owner_id;
+ TAILQ_ENTRY(mutex) link;
+};
+#define MUTEX_INITIALIZER \
+ { .value = MUTEX_VALUE_UNLOCKED, .owner_id = -1, \
+ .wq = WAIT_QUEUE_INITIALIZER, }
+
+TAILQ_HEAD(mutex_head, mutex);
+
+void mutex_init(struct mutex *m);
+void mutex_destroy(struct mutex *m);
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_unlock(m) mutex_unlock_debug((m), __FILE__, __LINE__)
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_lock(m) mutex_lock_debug((m), __FILE__, __LINE__)
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_trylock(m) mutex_trylock_debug((m), __FILE__, __LINE__)
+
+#else
+void mutex_unlock(struct mutex *m);
+void mutex_lock(struct mutex *m);
+bool mutex_trylock(struct mutex *m);
+#endif
+
+
+struct condvar {
+ unsigned spin_lock;
+ struct mutex *m;
+};
+#define CONDVAR_INITIALIZER { .m = NULL }
+
+void condvar_init(struct condvar *cv);
+void condvar_destroy(struct condvar *cv);
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_signal(cv) condvar_signal_debug((cv), __FILE__, __LINE__)
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_broadcast(cv) condvar_broadcast_debug((cv), __FILE__, __LINE__)
+
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno);
+#define condvar_wait(cv, m) condvar_wait_debug((cv), (m), __FILE__, __LINE__)
+#else
+void condvar_signal(struct condvar *cv);
+void condvar_broadcast(struct condvar *cv);
+void condvar_wait(struct condvar *cv, struct mutex *m);
+#endif
+
+#endif /*KERNEL_MUTEX_H*/
+
diff --git a/core/arch/arm/include/kernel/pm_stubs.h b/core/arch/arm/include/kernel/pm_stubs.h
new file mode 100644
index 0000000..6cbe897
--- /dev/null
+++ b/core/arch/arm/include/kernel/pm_stubs.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PM_STUBS_H
+#define PM_STUBS_H
+
+#include <stdint.h>
+#include <compiler.h>
+
+unsigned long pm_panic(unsigned long a0, unsigned long a1) __noreturn;
+unsigned long pm_do_nothing(unsigned long a0, unsigned long a1);
+
+#endif /* PM_STUBS_H */
diff --git a/core/arch/arm/include/kernel/pseudo_ta.h b/core/arch/arm/include/kernel/pseudo_ta.h
new file mode 100644
index 0000000..98316bd
--- /dev/null
+++ b/core/arch/arm/include/kernel/pseudo_ta.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_PSEUDO_TA_H
+#define KERNEL_PSEUDO_TA_H
+
+#include <assert.h>
+#include <compiler.h>
+#include <kernel/tee_ta_manager.h>
+#include <tee_api_types.h>
+#include <user_ta_header.h>
+#include <util.h>
+
+#define PTA_MANDATORY_FLAGS (TA_FLAG_SINGLE_INSTANCE | \
+ TA_FLAG_MULTI_SESSION | \
+ TA_FLAG_INSTANCE_KEEP_ALIVE)
+
+#define PTA_ALLOWED_FLAGS PTA_MANDATORY_FLAGS
+#define PTA_DEFAULT_FLAGS PTA_MANDATORY_FLAGS
+
+struct pseudo_ta_head {
+ TEE_UUID uuid;
+ const char *name;
+ uint32_t flags;
+
+ TEE_Result (*create_entry_point)(void);
+ void (*destroy_entry_point)(void);
+ TEE_Result (*open_session_entry_point)(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS],
+ void **ppSessionContext);
+ void (*close_session_entry_point)(void *pSessionContext);
+ TEE_Result (*invoke_command_entry_point)(void *pSessionContext,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS]);
+};
+
+#define pseudo_ta_register(...) static const struct pseudo_ta_head __head \
+ __used __section("ta_head_section") = { __VA_ARGS__ }
+
+
+struct pseudo_ta_ctx {
+ const struct pseudo_ta_head *pseudo_ta;
+ struct tee_ta_ctx ctx;
+};
+
+static inline bool is_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct pseudo_ta_ctx *to_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_pseudo_ta_ctx(ctx));
+ return container_of(ctx, struct pseudo_ta_ctx, ctx);
+}
+
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+
+#endif /* KERNEL_PSEUDO_TA_H */
+
diff --git a/core/arch/arm/include/kernel/spinlock.h b/core/arch/arm/include/kernel/spinlock.h
new file mode 100644
index 0000000..c248673
--- /dev/null
+++ b/core/arch/arm/include/kernel/spinlock.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_SPINLOCK_H
+#define KERNEL_SPINLOCK_H
+
+#define SPINLOCK_LOCK 1
+#define SPINLOCK_UNLOCK 0
+
+#ifndef ASM
+#include <assert.h>
+#include <compiler.h>
+#include <stdbool.h>
+#include <kernel/thread.h>
+
+#ifdef CFG_TEE_CORE_DEBUG
+void spinlock_count_incr(void);
+void spinlock_count_decr(void);
+bool have_spinlock(void);
+static inline void assert_have_no_spinlock(void)
+{
+ assert(!have_spinlock());
+}
+#else
+static inline void spinlock_count_incr(void) { }
+static inline void spinlock_count_decr(void) { }
+static inline void assert_have_no_spinlock(void) { }
+#endif
+
+void __cpu_spin_lock(unsigned int *lock);
+void __cpu_spin_unlock(unsigned int *lock);
+/* returns 0 on locking success, non zero on failure */
+unsigned int __cpu_spin_trylock(unsigned int *lock);
+
+static inline void cpu_spin_lock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_lock(lock);
+ spinlock_count_incr();
+}
+
+static inline bool cpu_spin_trylock(unsigned int *lock)
+{
+ unsigned int rc;
+
+ assert(thread_irq_disabled());
+ rc = __cpu_spin_trylock(lock);
+ if (!rc)
+ spinlock_count_incr();
+ return !rc;
+}
+
+static inline void cpu_spin_unlock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_unlock(lock);
+ spinlock_count_decr();
+}
+#endif /* ASM */
+
+#endif /* KERNEL_SPINLOCK_H */
diff --git a/core/arch/arm/include/kernel/tee_l2cc_mutex.h b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
new file mode 100644
index 0000000..508a510
--- /dev/null
+++ b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_L2CC_MUTEX_H
+#define TEE_L2CC_MUTEX_H
+#include <inttypes.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <compiler.h>
+
+#if defined(CFG_PL310)
+TEE_Result tee_enable_l2cc_mutex(void);
+TEE_Result tee_disable_l2cc_mutex(void);
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+void tee_l2cc_mutex_lock(void);
+void tee_l2cc_mutex_unlock(void);
+
+/*
+ * Store the pa of a mutex used for l2cc
+ * It is allocated from the boot
+ */
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa);
+
+#else
+static TEE_Result tee_enable_l2cc_mutex(void);
+static TEE_Result tee_disable_l2cc_mutex(void);
+static TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+static TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+
+static inline TEE_Result tee_enable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_disable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_get_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_set_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+#endif /* TEE_L2CC_MUTEX_H */
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
new file mode 100644
index 0000000..175ba77
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_H
+#define KERNEL_THREAD_H
+
+#ifndef ASM
+#include <types_ext.h>
+#include <compiler.h>
+#include <optee_msg.h>
+#include <kernel/mutex.h>
+#include <kernel/vfp.h>
+#include <mm/pgt_cache.h>
+#endif
+
+#define THREAD_ID_0 0
+#define THREAD_ID_INVALID -1
+
+#define THREAD_RPC_MAX_NUM_PARAMS 4
+
+#ifndef ASM
+struct thread_vector_table {
+ uint32_t std_smc_entry;
+ uint32_t fast_smc_entry;
+ uint32_t cpu_on_entry;
+ uint32_t cpu_off_entry;
+ uint32_t cpu_resume_entry;
+ uint32_t cpu_suspend_entry;
+ uint32_t fiq_entry;
+ uint32_t system_off_entry;
+ uint32_t system_reset_entry;
+};
+extern struct thread_vector_table thread_vector_table;
+
+struct thread_specific_data {
+ TAILQ_HEAD(, tee_ta_session) sess_stack;
+ struct tee_ta_ctx *ctx;
+#ifdef CFG_SMALL_PAGE_USER_TA
+ struct pgt_cache pgt_cache;
+#endif
+ void *rpc_fs_payload;
+ paddr_t rpc_fs_payload_pa;
+ uint64_t rpc_fs_payload_cookie;
+ size_t rpc_fs_payload_size;
+};
+
+struct thread_user_vfp_state {
+ struct vfp_state vfp;
+ bool lazy_saved;
+ bool saved;
+};
+
+#ifdef ARM32
+struct thread_smc_args {
+ uint32_t a0; /* SMC function ID */
+ uint32_t a1; /* Parameter */
+ uint32_t a2; /* Parameter */
+ uint32_t a3; /* Thread ID when returning from RPC */
+ uint32_t a4; /* Not used */
+ uint32_t a5; /* Not used */
+ uint32_t a6; /* Not used */
+ uint32_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_smc_args {
+ uint64_t a0; /* SMC function ID */
+ uint64_t a1; /* Parameter */
+ uint64_t a2; /* Parameter */
+ uint64_t a3; /* Thread ID when returning from RPC */
+ uint64_t a4; /* Not used */
+ uint64_t a5; /* Not used */
+ uint64_t a6; /* Not used */
+ uint64_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_abort_regs {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t pad;
+ uint32_t spsr;
+ uint32_t elr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t ip;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_abort_regs {
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x15;
+ uint64_t x16;
+ uint64_t x17;
+ uint64_t x18;
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+ uint64_t x22;
+ uint64_t x23;
+ uint64_t x24;
+ uint64_t x25;
+ uint64_t x26;
+ uint64_t x27;
+ uint64_t x28;
+ uint64_t x29;
+ uint64_t x30;
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t sp_el0;
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_svc_regs {
+ uint32_t spsr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t lr;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_svc_regs {
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x30;
+ uint64_t sp_el0;
+ uint64_t pad;
+} __aligned(16);
+#endif /*ARM64*/
+#endif /*ASM*/
+
+#ifndef ASM
+typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
+typedef void (*thread_fiq_handler_t)(void);
+typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
+ unsigned long a1);
+struct thread_handlers {
+ /*
+ * stdcall and fastcall are called as regular functions and
+ * normal ARM Calling Convention applies. Return values are passed
+ * args->param{1-3} and forwarded into r0-r3 when returned to
+ * non-secure world.
+ *
+ * stdcall handles calls which can be preemted from non-secure
+ * world. This handler is executed with a large stack.
+ *
+ * fastcall handles fast calls which can't be preemted. This
+ * handler is executed with a limited stack. This handler must not
+ * cause any aborts or reenenable FIQs which are temporarily masked
+ * while executing this handler.
+ *
+ * TODO investigate if we should execute fastcalls and FIQs on
+ * different stacks allowing FIQs to be enabled during a fastcall.
+ */
+ thread_smc_handler_t std_smc;
+ thread_smc_handler_t fast_smc;
+
+ /*
+ * fiq is called as a regular function and normal ARM Calling
+ * Convention applies.
+ *
+ * This handler handles FIQs which can't be preemted. This handler
+ * is executed with a limited stack. This handler must not cause
+ * any aborts or reenenable FIQs which are temporarily masked while
+ * executing this handler.
+ */
+ thread_fiq_handler_t fiq;
+
+ /*
+ * Power management handlers triggered from ARM Trusted Firmware.
+ * Not used when using internal monitor.
+ */
+ thread_pm_handler_t cpu_on;
+ thread_pm_handler_t cpu_off;
+ thread_pm_handler_t cpu_suspend;
+ thread_pm_handler_t cpu_resume;
+ thread_pm_handler_t system_off;
+ thread_pm_handler_t system_reset;
+};
+void thread_init_primary(const struct thread_handlers *handlers);
+void thread_init_per_cpu(void);
+
+/*
+ * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
+ * first stack, THREAD_ID_0 + 1 for the next and so on.
+ *
+ * Returns true on success and false on errors.
+ */
+bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
+
+/*
+ * Initializes a thread to be used during boot
+ */
+void thread_init_boot_thread(void);
+
+/*
+ * Clears the current thread id
+ * Only supposed to be used during initialization.
+ */
+void thread_clr_boot_thread(void);
+
+/*
+ * Returns current thread id.
+ */
+int thread_get_id(void);
+
+/*
+ * Returns current thread id, return -1 on failure.
+ */
+int thread_get_id_may_fail(void);
+
+/* Returns Thread Specific Data (TSD) pointer. */
+struct thread_specific_data *thread_get_tsd(void);
+
+/*
+ * Sets IRQ status for current thread, must only be called from an
+ * active thread context.
+ *
+ * enable == true -> enable IRQ
+ * enable == false -> disable IRQ
+ */
+void thread_set_irq(bool enable);
+
+/*
+ * Restores the IRQ status (in CPSR) for current thread, must only be called
+ * from an active thread context.
+ */
+void thread_restore_irq(void);
+
+/*
+ * Defines the bits for the exception mask used the the
+ * thread_*_exceptions() functions below.
+ */
+#define THREAD_EXCP_FIQ (1 << 0)
+#define THREAD_EXCP_IRQ (1 << 1)
+#define THREAD_EXCP_ABT (1 << 2)
+#define THREAD_EXCP_ALL (THREAD_EXCP_FIQ | THREAD_EXCP_IRQ | THREAD_EXCP_ABT)
+
+/*
+ * thread_get_exceptions() - return current exception mask
+ */
+uint32_t thread_get_exceptions(void);
+
+/*
+ * thread_set_exceptions() - set exception mask
+ * @exceptions: exception mask to set
+ *
+ * Any previous exception mask is replaced by this exception mask, that is,
+ * old bits are cleared and replaced by these.
+ */
+void thread_set_exceptions(uint32_t exceptions);
+
+/*
+ * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
+ * @exceptions exceptions to mask
+ * @returns old exception state
+ */
+uint32_t thread_mask_exceptions(uint32_t exceptions);
+
+/*
+ * thread_unmask_exceptions() - Unmasks asynchronous exceptions
+ * @state Old asynchronous exception state to restore (returned by
+ * thread_mask_exceptions())
+ */
+void thread_unmask_exceptions(uint32_t state);
+
+
+static inline bool thread_irq_disabled(void)
+{
+ return !!(thread_get_exceptions() & THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+/*
+ * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
+ *
+ * IRQ is masked while VFP is enabled. User space must not be entered before
+ * thread_kernel_disable_vfp() has been called to disable VFP and restore the
+ * IRQ status.
+ *
+ * This function may only be called from an active thread context and may
+ * not be called again before thread_kernel_disable_vfp() has been called.
+ *
+ * VFP state is saved as needed.
+ *
+ * Returns a state variable that should be passed to
+ * thread_kernel_disable_vfp().
+ */
+uint32_t thread_kernel_enable_vfp(void);
+
+/*
+ * thread_kernel_disable_vfp() - Disables usage of VFP
+ * @state: state variable returned by thread_kernel_enable_vfp()
+ *
+ * Disables usage of VFP and restores IRQ status after a call to
+ * thread_kernel_enable_vfp().
+ *
+ * This function may only be called after a call to
+ * thread_kernel_enable_vfp().
+ */
+void thread_kernel_disable_vfp(uint32_t state);
+
+/*
+ * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
+ */
+void thread_kernel_save_vfp(void);
+
+/*
+ * thread_kernel_save_vfp() - Restores kernel vfp state
+ */
+void thread_kernel_restore_vfp(void);
+
+/*
+ * thread_user_enable_vfp() - Enables vfp for user mode usage
+ * @uvfp: pointer to where to save the vfp state if needed
+ */
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
+#else /*CFG_WITH_VFP*/
+static inline void thread_kernel_save_vfp(void)
+{
+}
+
+static inline void thread_kernel_restore_vfp(void)
+{
+}
+#endif /*CFG_WITH_VFP*/
+
+/*
+ * thread_user_save_vfp() - Saves the user vfp state if enabled
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_save_vfp(void);
+#else
+static inline void thread_user_save_vfp(void)
+{
+}
+#endif
+
+/*
+ * thread_user_clear_vfp() - Clears the vfp state
+ * @uvfp: pointer to saved state to clear
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp);
+#else
+static inline void thread_user_clear_vfp(
+ struct thread_user_vfp_state *uvfp __unused)
+{
+}
+#endif
+
+
+/*
+ * thread_enter_user_mode() - Enters user mode
+ * @a0: Passed in r/x0 for user_func
+ * @a1: Passed in r/x1 for user_func
+ * @a2: Passed in r/x2 for user_func
+ * @a3: Passed in r/x3 for user_func
+ * @user_sp: Assigned sp value in user mode
+ * @user_func: Function to execute in user mode
+ * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
+ * @exit_status0: Pointer to opaque exit staus 0
+ * @exit_status1: Pointer to opaque exit staus 1
+ *
+ * This functions enters user mode with the argument described above,
+ * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
+ * when returning back to the caller of this function through an exception
+ * handler.
+ *
+ * @Returns what's passed in "ret" to thread_unwind_user_mode()
+ */
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * thread_unwind_user_mode() - Unwinds kernel stack from user entry
+ * @ret: Value to return from thread_enter_user_mode()
+ * @exit_status0: Exit status 0
+ * @exit_status1: Exit status 1
+ *
+ * This is the function that exception handlers can return into
+ * to resume execution in kernel mode instead of user mode.
+ *
+ * This function is closely coupled with thread_enter_user_mode() since it
+ * need to restore registers saved by thread_enter_user_mode() and when it
+ * returns make it look like thread_enter_user_mode() just returned. It is
+ * expected that the stack pointer is where thread_enter_user_mode() left
+ * it. The stack will be unwound and the function will return to where
+ * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
+ * are filled in the corresponding pointers supplied to
+ * thread_enter_user_mode().
+ */
+void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ uint32_t exit_status1);
+
+#ifdef ARM64
+/*
+ * thread_get_saved_thread_sp() - Returns the saved sp of current thread
+ *
+ * When switching from the thread stack pointer the value is stored
+ * separately in the current thread context. This function returns this
+ * saved value.
+ *
+ * @returns stack pointer
+ */
+vaddr_t thread_get_saved_thread_sp(void);
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va);
+
+/*
+ * Adds a mutex to the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_add_mutex(struct mutex *m);
+
+/*
+ * Removes a mutex from the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_rem_mutex(struct mutex *m);
+
+/*
+ * Disables and empties the prealloc RPC cache one reference at a time. If
+ * all threads are idle this function returns true and a cookie of one shm
+ * object which was removed from the cache. When the cache is empty *cookie
+ * is set to 0 and the cache is disabled else a valid cookie value. If one
+ * thread isn't idle this function returns false.
+ */
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
+
+/*
+ * Enabled the prealloc RPC cache. If all threads are idle the cache is
+ * enabled and this function returns true. If one thread isn't idle this
+ * function return false.
+ */
+bool thread_enable_prealloc_rpc_cache(void);
+
+/**
+ * Allocates data for struct optee_msg_arg.
+ *
+ * @size: size in bytes of struct optee_msg_arg
+ * @arg: returned physcial pointer to a struct optee_msg_arg buffer,
+ * 0 if allocation failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_arg()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_arg(uint64_t cookie);
+
+/**
+ * Allocates data for payload buffers.
+ *
+ * @size: size in bytes of payload buffer
+ * @payload: returned physcial pointer to payload buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_payload()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_payload(uint64_t cookie);
+
+/**
+ * Does an RPC using a preallocated argument buffer
+ * @cmd: RPC cmd
+ * @num_params: number of parameters (max 2)
+ * @params: RPC parameters
+ * @returns RPC return value
+ */
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params);
+
+#endif /*ASM*/
+
+#endif /*KERNEL_THREAD_H*/
diff --git a/core/arch/arm/include/kernel/thread_defs.h b/core/arch/arm/include/kernel/thread_defs.h
new file mode 100644
index 0000000..0f54569
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread_defs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_DEFS_H
+#define KERNEL_THREAD_DEFS_H
+
+#define THREAD_FLAGS_COPY_ARGS_ON_RETURN (1 << 0)
+#define THREAD_FLAGS_IRQ_ENABLE (1 << 1)
+#define THREAD_FLAGS_EXIT_ON_IRQ (1 << 2)
+
+#endif /*KERNEL_THREAD_DEFS_H*/
diff --git a/core/arch/arm/include/kernel/time_source.h b/core/arch/arm/include/kernel/time_source.h
new file mode 100644
index 0000000..ddabfe9
--- /dev/null
+++ b/core/arch/arm/include/kernel/time_source.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+
+struct time_source {
+ const char *name;
+ uint32_t protection_level;
+ TEE_Result (*get_sys_time)(TEE_Time *time);
+};
+void time_source_init(void);
+
+#define REGISTER_TIME_SOURCE(source) \
+ void time_source_init(void) { \
+ _time_source = source; \
+ }
+
+extern struct time_source _time_source;
+
+void arm_prng_add_jitter_entropy(void);
diff --git a/core/arch/arm/include/kernel/tz_proc_def.h b/core/arch/arm/include/kernel/tz_proc_def.h
new file mode 100644
index 0000000..abe281b
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_proc_def.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * General constants
+ */
+
+/*
+ * CP15 Multiprocessor Affinity register (MPIDR)
+ */
+#define CP15_CONFIG_CPU_ID_MASK 0x00000003
+#define CPU_ID0 0x00000000
+#define CPU_ID1 0x00000001
+
+/*
+ * CP15 Secure configuration register
+ */
+#define CP15_CONFIG_NS_MASK 0x00000001
+#define CP15_CONFIG_IRQ_MASK 0x00000002
+#define CP15_CONFIG_FIQ_MASK 0x00000004
+#define CP15_CONFIG_EA_MASK 0x00000008
+#define CP15_CONFIG_FW_MASK 0x00000010
+#define CP15_CONFIG_AW_MASK 0x00000020
+#define CP15_CONFIG_nET_MASK 0x00000040
+
+/*
+ * CP15 Control register
+ */
+#define CP15_CONTROL_M_MASK 0x00000001
+#define CP15_CONTROL_C_MASK 0x00000004
+#define CP15_CONTROL_Z_MASK 0x00000800
+#define CP15_CONTROL_I_MASK 0x00001000
+#define CP15_CONTROL_V_MASK 0x00002000
+#define CP15_CONTROL_HA_MASK 0x00020000
+#define CP15_CONTROL_EE_MASK 0x02000000
+#define CP15_CONTROL_NMFI_MASK 0x08000000
+#define CP15_CONTROL_TRE_MASK 0x10000000
+#define CP15_CONTROL_AFE_MASK 0x20000000
+#define CP15_CONTROL_TE_MASK 0x40000000
+
+/*
+ * CP15 Auxiliary Control register
+ */
+#define CP15_CONTROL_SMP_MASK 0x00000040
+#define CP15_CONTROL_EXCL_MASK 0x00000080
+
+/*
+ * CP15 Non secure access control register
+ */
+#define CP15_NSAC_TL_MASK 0x10000
+#define CP15_NSAC_CL_MASK 0x20000
+#define CP15_NSAC_CPN_MASK 0x3FFF
+
+/*
+ * CP15 Cache register
+ */
+#define CP15_CACHE_ADDR_R_BIT 12
+#define CP15_CACHE_ADDR_L_BIT (32-CP15_CACHE_ADDR_R_BIT)
+#define CP15_CACHE_RESULT_MASK 0x00000001
+
+/*
+ * CP15 TCM register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_TCM_ENABLE_MASK 0x00000001
+#define CP15_TCM_INSTR_TCM 0x2010000C
+#define CP15_TCM_DATA_TCM 0x2010100C
+
+/*
+ * CP15 cache lockdown register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_CACHE_LOCK_ALLWAYS_MASK 0x0000000F
+
+/*
+ * CP15 cache cleaning constant definition
+ */
+/* start of line number field offset in way/index format */
+#define LINE_FIELD_OFFSET 5
+/* Warning: this assumes a 256 lines/way cache (32kB cache) */
+#define LINE_FIELD_OVERFLOW 13
+/* start of way number field offset in way/index format */
+#define WAY_FIELD_OFFSET 30
diff --git a/core/arch/arm/include/kernel/tz_ssvce.h b/core/arch/arm/include/kernel/tz_ssvce.h
new file mode 100644
index 0000000..a886f9d
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_H
+#define TZ_SSVCE_H
+
+#ifndef ASM
+
+#include <types_ext.h>
+
+unsigned int secure_get_cpu_id(void);
+
+void arm_cl1_d_cleanbysetway(void);
+void arm_cl1_d_invbysetway(void);
+void arm_cl1_d_cleaninvbysetway(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleanbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_invbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleaninvbyva(void *start, void *end);
+void arm_cl1_i_inv_all(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_i_inv(void *start, void *end);
+
+void secure_mmu_datatlbinvall(void);
+void secure_mmu_unifiedtlbinvall(void);
+void secure_mmu_unifiedtlbinvbymva(unsigned long addr);
+void secure_mmu_unifiedtlbinv_curasid(void);
+void secure_mmu_unifiedtlbinv_byasid(unsigned long asid);
+
+void secure_mmu_disable(void);
+#endif /*!ASM*/
+
+#ifdef ARM64
+/* D$ set/way op type defines */
+#define DCISW 0x0
+#define DCCISW 0x1
+#define DCCSW 0x2
+
+#ifndef ASM
+void flush_dcache_range(vaddr_t va, size_t len);
+void inv_dcache_range(vaddr_t va, size_t len);
+void dcsw_op_louis(uint32_t op);
+void dcsw_op_all(uint32_t op);
+#endif /*!ASM*/
+#endif /*ARM64*/
+
+#endif
diff --git a/core/arch/arm/include/kernel/tz_ssvce_def.h b/core/arch/arm/include/kernel/tz_ssvce_def.h
new file mode 100644
index 0000000..3e9f9fc
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_def.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TZ_SSVCE_DEF_H
+#define TZ_SSVCE_DEF_H
+
+#include <util.h>
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+#define CPSR_OFFSET 0x00
+#define STACK_INT_USAGE 0x04
+
+/*
+ * tee service IDs (TODO: align with the service ID list).
+ * Set by NSec in R4 before SMC to request a TEE service.
+ */
+#define SSAPI_RET_FROM_INT_SERV 4
+#define SSAPI_RET_FROM_RPC_SERV 5
+
+/*
+ * TEE monitor: status returned by the routine that checks the entry
+ * reason (valid Service ID / secure context).
+ */
+#define SEC_INVALID_ENTRY 0
+#define SEC_PRE_INIT_ENTRY 1
+#define SEC_RET_FROM_INT_ENTRY 2
+#define SEC_RET_FROM_RPC_ENTRY 3
+#define SEC_NORMAL_ENTRY 4
+
+/*
+ * teecore exit reason.
+ * Set by Secure in R4 before SMC to request a switch to NSec.
+ */
+#define SEC_EXIT_NORMAL 1
+#define SEC_EXIT_START_EXT_CODE 2
+#define SEC_EXIT_INT 3
+#define SEC_EXIT_RPC_CALL 4
+#define SEC_EXIT_FIRST 5
+#define SEC_EXIT_DEEP_SLEEP 6
+
+/* misc */
+
+#define SEC_UNDEF_STACK_OFFSET 4
+#define SEC_ABORT_STACK_OFFSET 12
+
+#define SEC_ENTRY_STATUS_NOK 0
+#define SEC_ENTRY_STATUS_OK 1
+
+/*
+ * Outer cache iomem
+ */
+#define PL310_LINE_SIZE 32
+#define PL310_8_WAYS 8
+
+/* reg1 */
+#define PL310_CTRL 0x100
+#define PL310_AUX_CTRL 0x104
+#define PL310_TAG_RAM_CTRL 0x108
+#define PL310_DATA_RAM_CTRL 0x10C
+/* reg7 */
+#define PL310_SYNC 0x730
+#define PL310_INV_BY_WAY 0x77C
+#define PL310_CLEAN_BY_WAY 0x7BC
+#define PL310_FLUSH_BY_WAY 0x7FC
+#define PL310_INV_BY_PA 0x770
+#define PL310_CLEAN_BY_PA 0x7B0
+#define PL310_FLUSH_BY_PA 0x7F0
+#define PL310_FLUSH_BY_INDEXWAY 0x7F8
+/* reg9 */
+#define PL310_DCACHE_LOCKDOWN_BASE 0x900
+#define PL310_ICACHE_LOCKDOWN_BASE 0x904
+/* reg12 */
+#define PL310_ADDR_FILT_START 0xC00
+#define PL310_ADDR_FILT_END 0xC04
+/* reg15 */
+#define PL310_DEBUG_CTRL 0xF40
+#define PL310_PREFETCH_CTRL 0xF60
+#define PL310_POWER_CTRL 0xF80
+
+#define PL310_CTRL_ENABLE_BIT BIT32(0)
+#define PL310_AUX_16WAY_BIT BIT32(16)
+
+/*
+ * SCU iomem
+ */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_POWER 0x08
+#define SCU_INV_SEC 0x0C
+#define SCU_FILT_SA 0x40
+#define SCU_FILT_EA 0x44
+#define SCU_SAC 0x50
+#define SCU_NSAC 0x54
+#define SCU_ERRATA744369 0x30
+
+#define SCU_ACCESS_CONTROL_CPU0 BIT32(0)
+#define SCU_ACCESS_CONTROL_CPU1 BIT32(1)
+#define SCU_ACCESS_CONTROL_CPU2 BIT32(2)
+#define SCU_ACCESS_CONTROL_CPU3 BIT32(3)
+#define SCU_NSAC_SCU_SHIFT 0
+#define SCU_NSAC_PTIMER_SHIFT 4
+#define SCU_NSAC_GTIMER_SHIFT 8
+
+/*
+ * GIC iomem
+ */
+#define GIC_DIST_ISR0 0x080
+#define GIC_DIST_ISR1 0x084
+
+/*
+ * CPU iomem
+ */
+#define CORE_ICC_ICCPMR 0x0004
+
+#endif /* TZ_SSVCE_DEF_H */
diff --git a/core/arch/arm/include/kernel/tz_ssvce_pl310.h b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
new file mode 100644
index 0000000..88b91e1
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_PL310_H
+#define TZ_SSVCE_PL310_H
+
+#include <util.h>
+#include <kernel/tz_ssvce_def.h>
+#include <types_ext.h>
+
+vaddr_t pl310_base(void);
+/*
+ * End address is included in the range (last address in range)
+ */
+void arm_cl2_cleaninvbyway(vaddr_t pl310_base);
+void arm_cl2_invbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+
+#endif /* TZ_SSVCE_PL310_H */
diff --git a/core/arch/arm/include/kernel/unwind.h b/core/arch/arm/include/kernel/unwind.h
new file mode 100644
index 0000000..cc5ff5a
--- /dev/null
+++ b/core/arch/arm/include/kernel/unwind.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef KERNEL_UNWIND
+#define KERNEL_UNWIND
+
+#ifndef ASM
+#include <types_ext.h>
+
+#ifdef ARM32
+/* The state of the unwind process */
+struct unwind_state {
+ uint32_t registers[16];
+ uint32_t start_pc;
+ uint32_t *insn;
+ unsigned entries;
+ unsigned byte;
+ uint16_t update_mask;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct unwind_state {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+};
+#endif /*ARM64*/
+
+bool unwind_stack(struct unwind_state *state);
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+void print_stack(int level);
+#else
+static inline void print_stack(int level __unused)
+{
+}
+#endif
+
+#endif /*ASM*/
+
+#ifdef CFG_CORE_UNWIND
+#define UNWIND(...) __VA_ARGS__
+#else
+#define UNWIND(...)
+#endif
+
+#endif /*KERNEL_UNWIND*/
diff --git a/core/arch/arm/include/kernel/user_ta.h b/core/arch/arm/include/kernel/user_ta.h
new file mode 100644
index 0000000..196c0af
--- /dev/null
+++ b/core/arch/arm/include/kernel/user_ta.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_USER_TA_H
+#define KERNEL_USER_TA_H
+
+#include <assert.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <mm/tee_mm.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+TAILQ_HEAD(tee_cryp_state_head, tee_cryp_state);
+TAILQ_HEAD(tee_obj_head, tee_obj);
+TAILQ_HEAD(tee_storage_enum_head, tee_storage_enum);
+
+struct user_ta_ctx {
+ uaddr_t entry_func;
+ bool is_32bit; /* true if 32-bit ta, false if 64-bit ta */
+ /* list of sessions opened by this TA */
+ struct tee_ta_session_head open_sessions;
+ /* List of cryp states created by this TA */
+ struct tee_cryp_state_head cryp_states;
+ /* List of storage objects opened by this TA */
+ struct tee_obj_head objects;
+ /* List of storage enumerators opened by this TA */
+ struct tee_storage_enum_head storage_enums;
+ struct mobj *mobj_code; /* secure world memory */
+ struct mobj *mobj_stack; /* stack */
+ uint32_t load_addr; /* elf load addr (from TAs address space) */
+ uint32_t context; /* Context ID of the process */
+ struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */
+ void *ta_time_offs; /* Time reference used by the TA */
+ struct tee_pager_area_head *areas;
+#if defined(CFG_SE_API)
+ struct tee_se_service *se_service;
+#endif
+#if defined(CFG_WITH_VFP)
+ struct thread_user_vfp_state vfp;
+#endif
+ struct tee_ta_ctx ctx;
+
+};
+
+static inline bool is_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !!(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct user_ta_ctx *to_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_user_ta_ctx(ctx));
+ return container_of(ctx, struct user_ta_ctx, ctx);
+}
+
+#ifdef CFG_WITH_USER_TA
+TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+#else
+static inline TEE_Result tee_ta_init_user_ta_session(
+ const TEE_UUID *uuid __unused,
+ struct tee_ta_session *s __unused)
+{
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+#endif
+
+#endif /*KERNEL_USER_TA_H*/
diff --git a/core/arch/arm/include/kernel/vfp.h b/core/arch/arm/include/kernel/vfp.h
new file mode 100644
index 0000000..267dee2
--- /dev/null
+++ b/core/arch/arm/include/kernel/vfp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_VFP_H
+#define KERNEL_VFP_H
+
+#include <types_ext.h>
+#include <compiler.h>
+
+#ifdef ARM32
+/*
+ * Advanced SIMD/floating point state on ARMv7-A or ARMv8-A AArch32 has:
+ * - 32 64-bit data registers
+ * - FPSCR (32 bits)
+ * - FPEXC (32 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint64_t v;
+};
+
+struct vfp_state {
+ uint32_t fpexc;
+ uint32_t fpscr;
+ struct vfp_reg reg[VFP_NUM_REGS];
+};
+#endif
+
+#ifdef ARM64
+/*
+ * Advanced SIMD/floating point state on ARMv8-A AArch64 has:
+ * - 32 128-bit data registers
+ * - FPSR (32 bits)
+ * - FPCR (32 bits)
+ * - CPACR_EL1.FPEN (2 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint8_t v[16];
+} __aligned(16);
+
+struct vfp_state {
+ struct vfp_reg reg[VFP_NUM_REGS];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ uint32_t cpacr_el1;
+ bool force_save; /* Save to reg even if VFP was not enabled */
+};
+#endif
+
+#ifdef CFG_WITH_VFP
+/* vfp_is_enabled() - Returns true if VFP is enabled */
+bool vfp_is_enabled(void);
+
+/* vfp_enable() - Enables vfp */
+void vfp_enable(void);
+
+/* vfp_disable() - Disables vfp */
+void vfp_disable(void);
+#else
+static inline bool vfp_is_enabled(void)
+{
+ return false;
+}
+
+static inline void vfp_enable(void)
+{
+}
+
+static inline void vfp_disable(void)
+{
+}
+#endif
+
+/*
+ * vfp_lazy_save_state_init() - Saves VFP enable status and disables VFP
+ * @state: VFP state structure to initialize
+ */
+void vfp_lazy_save_state_init(struct vfp_state *state);
+
+/*
+ * vfp_lazy_save_state_final() - Saves rest of VFP state
+ * @state: VFP state to save to
+ *
+ * If VFP was enabled when vfp_lazy_save_state_init() was called: save rest
+ * of state and disable VFP. Otherwise, do nothing.
+ */
+void vfp_lazy_save_state_final(struct vfp_state *state);
+
+/*
+ * vfp_lazy_restore_state() - Lazy restore VFP state
+ * @state: VFP state to restore
+ *
+ * Restores VFP enable status and also restores rest of VFP state if
+ * vfp_lazy_save_state_final() was called on this state.
+ */
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state);
+
+#endif /*KERNEL_VFP_H*/
diff --git a/core/arch/arm/include/kernel/wait_queue.h b/core/arch/arm/include/kernel/wait_queue.h
new file mode 100644
index 0000000..eb8f881
--- /dev/null
+++ b/core/arch/arm/include/kernel/wait_queue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_WAIT_QUEUE_H
+#define KERNEL_WAIT_QUEUE_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+
+struct wait_queue_elem;
+SLIST_HEAD(wait_queue, wait_queue_elem);
+
+#define WAIT_QUEUE_INITIALIZER { .slh_first = NULL }
+
+struct condvar;
+struct wait_queue_elem {
+ short handle;
+ bool done;
+ struct condvar *cv;
+ SLIST_ENTRY(wait_queue_elem) link;
+};
+
+/*
+ * Initializes a wait queue
+ */
+void wq_init(struct wait_queue *wq);
+
+/*
+ * Initializes a wait queue element and adds it to the wait queue. This
+ * function is supposed to be called before the lock that protects the
+ * resource we need to wait for is released.
+ *
+ * One call to this function must be followed by one call to wq_wait_final()
+ * on the same wait queue element.
+ */
+void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ struct condvar *cv);
+
+static inline void wq_wait_init(struct wait_queue *wq,
+ struct wait_queue_elem *wqe)
+{
+ wq_wait_init_condvar(wq, wqe, NULL);
+}
+
+/* Waits for the wait queue element to the awakened. */
+void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ const void *sync_obj, const char *fname, int lineno);
+
+/* Wakes up the first wait queue element in the wait queue, if there is one */
+void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
+ const char *fname, int lineno);
+
+/* Returns true if the wait queue doesn't contain any elements */
+bool wq_is_empty(struct wait_queue *wq);
+
+void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
+ bool only_one, const void *sync_obj, const char *fname,
+ int lineno);
+bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv);
+
+#endif /*KERNEL_WAIT_QUEUE_H*/
+