summaryrefslogtreecommitdiff
path: root/core/arch/arm/kernel
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-06-05 12:44:25 +0200
committerr.tyminski <r.tyminski@partner.samsung.com>2017-06-05 12:44:25 +0200
commit146aec115cd05a164a88e6d7b07435c57a33817f (patch)
treed8099075c92576b1928069af274f9b833aca996e /core/arch/arm/kernel
parentf9a43781767007462965b21f3f518c4cfc0744c7 (diff)
downloadtef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.tar.gz
tef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.tar.bz2
tef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.zip
Update from upstream to 2.4.0 versionupstream/2.4.0upstream
Change-Id: I2b3a30f20684d6629fe379d9cd7895aff759c301
Diffstat (limited to 'core/arch/arm/kernel')
-rw-r--r--core/arch/arm/kernel/elf_common.h1
-rw-r--r--core/arch/arm/kernel/elf_load.c33
-rw-r--r--core/arch/arm/kernel/generic_boot.c17
-rw-r--r--core/arch/arm/kernel/generic_entry_a32.S18
-rw-r--r--core/arch/arm/kernel/kern.ld.S8
-rw-r--r--core/arch/arm/kernel/mutex.c7
-rw-r--r--core/arch/arm/kernel/pseudo_ta.c45
-rw-r--r--core/arch/arm/kernel/spin_lock_debug.c5
-rw-r--r--core/arch/arm/kernel/tee_time_arm_cntpct.c2
-rw-r--r--core/arch/arm/kernel/thread.c108
-rw-r--r--core/arch/arm/kernel/thread_a32.S8
-rw-r--r--core/arch/arm/kernel/thread_a64.S14
-rw-r--r--core/arch/arm/kernel/trace_ext.c19
-rw-r--r--core/arch/arm/kernel/user_ta.c14
-rw-r--r--core/arch/arm/kernel/wait_queue.c16
15 files changed, 214 insertions, 101 deletions
diff --git a/core/arch/arm/kernel/elf_common.h b/core/arch/arm/kernel/elf_common.h
index dd8cd50..497a902 100644
--- a/core/arch/arm/kernel/elf_common.h
+++ b/core/arch/arm/kernel/elf_common.h
@@ -645,6 +645,7 @@ typedef struct {
#define R_386_TLS_TPOFF32 37 /* GOT entry of -ve static TLS offset */
#define R_386_IRELATIVE 42 /* PLT entry resolved indirectly at runtime */
+#define R_AARCH64_ABS64 257
#define R_AARCH64_RELATIVE 1027
#define R_ARM_NONE 0 /* No relocation. */
diff --git a/core/arch/arm/kernel/elf_load.c b/core/arch/arm/kernel/elf_load.c
index 420ba59..b1d6102 100644
--- a/core/arch/arm/kernel/elf_load.c
+++ b/core/arch/arm/kernel/elf_load.c
@@ -499,9 +499,13 @@ static TEE_Result e32_process_rel(struct elf_load_state *state, size_t rel_sidx,
static TEE_Result e64_process_rel(struct elf_load_state *state,
size_t rel_sidx, vaddr_t vabase)
{
+ Elf64_Ehdr *ehdr = state->ehdr;
Elf64_Shdr *shdr = state->shdr;
Elf64_Rela *rela;
Elf64_Rela *rela_end;
+ size_t sym_tab_idx;
+ Elf64_Sym *sym_tab = NULL;
+ size_t num_syms = 0;
if (shdr[rel_sidx].sh_type != SHT_RELA)
return TEE_ERROR_NOT_IMPLEMENTED;
@@ -509,6 +513,27 @@ static TEE_Result e64_process_rel(struct elf_load_state *state,
if (shdr[rel_sidx].sh_entsize != sizeof(Elf64_Rela))
return TEE_ERROR_BAD_FORMAT;
+ sym_tab_idx = shdr[rel_sidx].sh_link;
+ if (sym_tab_idx) {
+ if (sym_tab_idx >= ehdr->e_shnum)
+ return TEE_ERROR_BAD_FORMAT;
+
+ if (shdr[sym_tab_idx].sh_entsize != sizeof(Elf64_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[sym_tab_idx].sh_addr > state->vasize ||
+ (shdr[sym_tab_idx].sh_addr +
+ shdr[sym_tab_idx].sh_size) > state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab = (Elf64_Sym *)(vabase + shdr[sym_tab_idx].sh_addr);
+ if (!ALIGNMENT_IS_OK(sym_tab, Elf64_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf64_Sym);
+ }
+
/* Check the address is inside TA memory */
if (shdr[rel_sidx].sh_addr >= state->vasize)
return TEE_ERROR_BAD_FORMAT;
@@ -522,6 +547,7 @@ static TEE_Result e64_process_rel(struct elf_load_state *state,
rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
for (; rela < rela_end; rela++) {
Elf64_Addr *where;
+ size_t sym_idx;
/* Check the address is inside TA memory */
if (rela->r_offset >= state->vasize)
@@ -532,6 +558,13 @@ static TEE_Result e64_process_rel(struct elf_load_state *state,
return TEE_ERROR_BAD_FORMAT;
switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_AARCH64_ABS64:
+ sym_idx = ELF64_R_SYM(rela->r_info);
+ if (sym_idx > num_syms)
+ return TEE_ERROR_BAD_FORMAT;
+ *where = rela->r_addend + sym_tab[sym_idx].st_value +
+ vabase;
+ break;
case R_AARCH64_RELATIVE:
*where = rela->r_addend + vabase;
break;
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
index 8f13c36..0d78d40 100644
--- a/core/arch/arm/kernel/generic_boot.c
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -93,12 +93,6 @@ __weak void plat_cpu_reset_late(void)
KEEP_PAGER(plat_cpu_reset_late);
/* May be overridden in plat-$(PLATFORM)/main.c */
-__weak void plat_cpu_reset_early(void)
-{
-}
-KEEP_PAGER(plat_cpu_reset_early);
-
-/* May be overridden in plat-$(PLATFORM)/main.c */
__weak void main_init_gic(void)
{
}
@@ -289,9 +283,8 @@ static void init_runtime(unsigned long pageable_part)
p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
~SMALL_PAGE_MASK);
- cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
- cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
- SMALL_PAGE_SIZE);
+ cache_op_inner(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
+ cache_op_inner(ICACHE_AREA_INVALIDATE, p, SMALL_PAGE_SIZE);
}
/*
@@ -607,7 +600,8 @@ static void init_primary_helper(unsigned long pageable_part,
* Mask asynchronous exceptions before switch to the thread vector
* as the thread handler requires those to be masked while
* executing with the temporary stack. The thread subsystem also
- * asserts that IRQ is blocked when using most if its functions.
+ * asserts that the foreign interrupts are blocked when using most of
+ * its functions.
*/
thread_set_exceptions(THREAD_EXCP_ALL);
init_vfp_sec();
@@ -634,7 +628,8 @@ static void init_secondary_helper(unsigned long nsec_entry)
* Mask asynchronous exceptions before switch to the thread vector
* as the thread handler requires those to be masked while
* executing with the temporary stack. The thread subsystem also
- * asserts that IRQ is blocked when using most if its functions.
+ * asserts that the foreign interrupts are blocked when using most of
+ * its functions.
*/
thread_set_exceptions(THREAD_EXCP_ALL);
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
index 27717d5..9c2ef41 100644
--- a/core/arch/arm/kernel/generic_entry_a32.S
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -25,16 +25,16 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <platform_config.h>
-
-#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
+#include <asm.S>
+#include <platform_config.h>
+#include <keep.h>
+#include <kernel/asan.h>
+#include <kernel/unwind.h>
#include <sm/optee_smc.h>
#include <sm/teesmc_opteed_macros.h>
#include <sm/teesmc_opteed.h>
-#include <kernel/unwind.h>
-#include <kernel/asan.h>
.section .data
.balign 4
@@ -87,6 +87,14 @@ END_FUNC __assert_flat_mapped_range
.endm
#endif /* CFG_PL310 */
+.weak plat_cpu_reset_early
+FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
+ bx lr
+UNWIND( .fnend)
+END_FUNC plat_cpu_reset_early
+KEEP_PAGER plat_cpu_reset_early
+
.section .text.boot
FUNC _start , :
b reset
diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S
index 10dac6e..b761aea 100644
--- a/core/arch/arm/kernel/kern.ld.S
+++ b/core/arch/arm/kernel/kern.ld.S
@@ -107,6 +107,10 @@ SECTIONS
__start_phys_mem_map_section = . ;
KEEP(*(phys_mem_map_section))
__end_phys_mem_map_section = . ;
+ . = ALIGN(8);
+ __start_phys_sdp_mem_section = . ;
+ KEEP(*(phys_sdp_mem_section))
+ __end_phys_sdp_mem_section = . ;
#endif
. = ALIGN(8);
__rodata_end = .;
@@ -254,6 +258,10 @@ SECTIONS
KEEP(*(phys_mem_map_section))
__end_phys_mem_map_section = . ;
. = ALIGN(8);
+ __start_phys_sdp_mem_section = . ;
+ KEEP(*(phys_sdp_mem_section))
+ __end_phys_sdp_mem_section = . ;
+ . = ALIGN(8);
__rodata_init_end = .;
}
__init_start = __text_init_start;
diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c
index 0e1b836..a25ca12 100644
--- a/core/arch/arm/kernel/mutex.c
+++ b/core/arch/arm/kernel/mutex.c
@@ -45,6 +45,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
uint32_t old_itr_status;
enum mutex_value old_value;
struct wait_queue_elem wqe;
+ int owner = MUTEX_OWNER_ID_NONE;
/*
* If the mutex is locked we need to initialize the wqe
@@ -61,6 +62,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
old_value = m->value;
if (old_value == MUTEX_VALUE_LOCKED) {
wq_wait_init(&m->wq, &wqe);
+ owner = m->owner_id;
} else {
m->value = MUTEX_VALUE_LOCKED;
thread_add_mutex(m);
@@ -74,7 +76,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
* Someone else is holding the lock, wait in normal
* world for the lock to become available.
*/
- wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+ wq_wait_final(&m->wq, &wqe, m, owner, fname, lineno);
} else
return;
}
@@ -260,7 +262,8 @@ static void __condvar_wait(struct condvar *cv, struct mutex *m,
/* Wake eventual waiters */
wq_wake_one(&m->wq, m, fname, lineno);
- wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+ wq_wait_final(&m->wq, &wqe,
+ m, MUTEX_OWNER_ID_CONDVAR_SLEEP, fname, lineno);
mutex_lock(m);
}
diff --git a/core/arch/arm/kernel/pseudo_ta.c b/core/arch/arm/kernel/pseudo_ta.c
index 6352a28..78b2bfd 100644
--- a/core/arch/arm/kernel/pseudo_ta.c
+++ b/core/arch/arm/kernel/pseudo_ta.c
@@ -37,9 +37,41 @@
#include <trace.h>
#include <types_ext.h>
+#ifdef CFG_SECURE_DATA_PATH
+static bool client_is_secure(struct tee_ta_session *s)
+{
+ /* rely on core entry to have constrained client IDs */
+ if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP)
+ return true;
+
+ return false;
+}
+
+static bool validate_in_param(struct tee_ta_session *s, struct mobj *mobj)
+{
+ /* for secure clients, core entry always holds valid memref objects */
+ if (client_is_secure(s))
+ return true;
+
+ /* all non-secure memory references are hanlded by pTAs */
+ if (mobj_is_nonsec(mobj))
+ return true;
+
+ return false;
+}
+#else
+static bool validate_in_param(struct tee_ta_session *s __unused,
+ struct mobj *mobj __unused)
+{
+ /* At this point, core has filled only valid accessible memref mobj */
+ return true;
+}
+#endif
+
/* Maps static TA params */
-static TEE_Result copy_in_param(struct tee_ta_param *param,
- TEE_Param tee_param[TEE_NUM_PARAMS])
+static TEE_Result copy_in_param(struct tee_ta_session *s __maybe_unused,
+ struct tee_ta_param *param,
+ TEE_Param tee_param[TEE_NUM_PARAMS])
{
size_t n;
void *va;
@@ -55,6 +87,9 @@ static TEE_Result copy_in_param(struct tee_ta_param *param,
case TEE_PARAM_TYPE_MEMREF_INPUT:
case TEE_PARAM_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_TYPE_MEMREF_INOUT:
+ if (!validate_in_param(s, param->u[n].mem.mobj))
+ return TEE_ERROR_BAD_PARAMETERS;
+
va = mobj_get_va(param->u[n].mem.mobj,
param->u[n].mem.offs);
if (!va)
@@ -110,7 +145,7 @@ static TEE_Result pseudo_ta_enter_open_session(struct tee_ta_session *s,
}
if (stc->pseudo_ta->open_session_entry_point) {
- res = copy_in_param(param, tee_param);
+ res = copy_in_param(s, param, tee_param);
if (res != TEE_SUCCESS) {
*eo = TEE_ORIGIN_TEE;
goto out;
@@ -136,7 +171,7 @@ static TEE_Result pseudo_ta_enter_invoke_cmd(struct tee_ta_session *s,
TEE_Param tee_param[TEE_NUM_PARAMS];
tee_ta_push_current_session(s);
- res = copy_in_param(param, tee_param);
+ res = copy_in_param(s, param, tee_param);
if (res != TEE_SUCCESS) {
*eo = TEE_ORIGIN_TEE;
goto out;
@@ -224,7 +259,7 @@ TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
struct tee_ta_ctx *ctx;
const struct pseudo_ta_head *ta;
- DMSG(" Lookup for Static TA %pUl", (void *)uuid);
+ DMSG(" Lookup for pseudo TA %pUl", (void *)uuid);
ta = &__start_ta_head_section;
while (true) {
diff --git a/core/arch/arm/kernel/spin_lock_debug.c b/core/arch/arm/kernel/spin_lock_debug.c
index 2a450a5..00a2a00 100644
--- a/core/arch/arm/kernel/spin_lock_debug.c
+++ b/core/arch/arm/kernel/spin_lock_debug.c
@@ -49,10 +49,11 @@ bool have_spinlock(void)
{
struct thread_core_local *l;
- if (!thread_irq_disabled()) {
+ if (!thread_foreign_intr_disabled()) {
/*
* Normally we can't be holding a spinlock since doing so would
- * imply IRQ are disabled (or the spinlock logic is flawed).
+ * imply foreign interrupts are disabled (or the spinlock
+ * logic is flawed).
*/
return false;
}
diff --git a/core/arch/arm/kernel/tee_time_arm_cntpct.c b/core/arch/arm/kernel/tee_time_arm_cntpct.c
index 90e7f20..59d6ea4 100644
--- a/core/arch/arm/kernel/tee_time_arm_cntpct.c
+++ b/core/arch/arm/kernel/tee_time_arm_cntpct.c
@@ -93,7 +93,7 @@ void plat_prng_add_jitter_entropy(void)
}
}
if (bytes) {
- DMSG("%s: 0x%02X\n", __func__,
+ FMSG("%s: 0x%02X\n", __func__,
(int)acc & ((1 << (bytes * 8)) - 1));
tee_prng_add_entropy((uint8_t *)&acc, bytes);
}
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
index c988b65..2aaa0e6 100644
--- a/core/arch/arm/kernel/thread.c
+++ b/core/arch/arm/kernel/thread.c
@@ -66,15 +66,11 @@
#endif
#define STACK_THREAD_SIZE 8192
-#if TRACE_LEVEL > 0
#ifdef CFG_CORE_SANITIZE_KADDRESS
#define STACK_ABT_SIZE 3072
#else
#define STACK_ABT_SIZE 2048
#endif
-#else
-#define STACK_ABT_SIZE 1024
-#endif
#endif /*ARM32*/
@@ -140,7 +136,7 @@ KEEP_PAGER(stack_tmp_offset);
thread_smc_handler_t thread_std_smc_handler_ptr;
static thread_smc_handler_t thread_fast_smc_handler_ptr;
-thread_fiq_handler_t thread_fiq_handler_ptr;
+thread_nintr_handler_t thread_nintr_handler_ptr;
thread_pm_handler_t thread_cpu_on_handler_ptr;
thread_pm_handler_t thread_cpu_off_handler_ptr;
thread_pm_handler_t thread_cpu_suspend_handler_ptr;
@@ -234,8 +230,8 @@ void thread_set_exceptions(uint32_t exceptions)
{
uint32_t cpsr = read_cpsr();
- /* IRQ must not be unmasked while holding a spinlock */
- if (!(exceptions & THREAD_EXCP_IRQ))
+ /* Foreign interrupts must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
assert_have_no_spinlock();
cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
@@ -256,8 +252,8 @@ void thread_set_exceptions(uint32_t exceptions)
{
uint32_t daif = read_daif();
- /* IRQ must not be unmasked while holding a spinlock */
- if (!(exceptions & THREAD_EXCP_IRQ))
+ /* Foreign interrupts must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
assert_have_no_spinlock();
daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
@@ -285,11 +281,11 @@ struct thread_core_local *thread_get_core_local(void)
uint32_t cpu_id = get_core_pos();
/*
- * IRQs must be disabled before playing with core_local since
- * we otherwise may be rescheduled to a different core in the
+ * Foreign interrupts must be disabled before playing with core_local
+ * since we otherwise may be rescheduled to a different core in the
* middle of this function.
*/
- assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
assert(cpu_id < CFG_TEE_CORE_NB_CORE);
return &thread_core_local[cpu_id];
@@ -338,11 +334,12 @@ static void init_regs(struct thread_ctx *thread,
thread->regs.pc = (uint32_t)thread_std_smc_entry;
/*
- * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
- * abort and unmasked FIQ.
- */
+ * Stdcalls starts in SVC mode with masked foreign interrupts, masked
+ * Asynchronous abort and unmasked native interrupts.
+ */
thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
- thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A;
+ thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A |
+ (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT);
/* Enable thumb mode if it's a thumb instruction */
if (thread->regs.pc & 1)
thread->regs.cpsr |= CPSR_T;
@@ -371,11 +368,11 @@ static void init_regs(struct thread_ctx *thread,
thread->regs.pc = (uint64_t)thread_std_smc_entry;
/*
- * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
- * abort and unmasked FIQ.
- */
+ * Stdcalls starts in SVC mode with masked foreign interrupts, masked
+ * Asynchronous abort and unmasked native interrupts.
+ */
thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
- DAIFBIT_IRQ | DAIFBIT_ABT);
+ THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT);
/* Reinitialize stack pointer */
thread->regs.sp = thread->stack_va_end;
@@ -556,7 +553,7 @@ static void thread_resume_from_rpc(struct thread_smc_args *args)
core_mmu_set_user_map(&threads[n].user_map);
/*
- * Return from RPC to request service of an IRQ must not
+ * Return from RPC to request service of a foreign interrupt must not
* get parameters from non-secure world.
*/
if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
@@ -769,8 +766,10 @@ bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
int thread_get_id_may_fail(void)
{
- /* thread_get_core_local() requires IRQs to be disabled */
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ /*
+ * thread_get_core_local() requires foreign interrupts to be disabled
+ */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
struct thread_core_local *l = thread_get_core_local();
int ct = l->curr_thread;
@@ -790,7 +789,7 @@ static void init_handlers(const struct thread_handlers *handlers)
{
thread_std_smc_handler_ptr = handlers->std_smc;
thread_fast_smc_handler_ptr = handlers->fast_smc;
- thread_fiq_handler_ptr = handlers->fiq;
+ thread_nintr_handler_ptr = handlers->nintr;
thread_cpu_on_handler_ptr = handlers->cpu_on;
thread_cpu_off_handler_ptr = handlers->cpu_off;
thread_cpu_suspend_handler_ptr = handlers->cpu_suspend;
@@ -890,10 +889,10 @@ struct thread_ctx_regs *thread_get_ctx_regs(void)
return &threads[l->curr_thread].regs;
}
-void thread_set_irq(bool enable)
+void thread_set_foreign_intr(bool enable)
{
- /* thread_get_core_local() requires IRQs to be disabled */
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ /* thread_get_core_local() requires foreign interrupts to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
struct thread_core_local *l;
l = thread_get_core_local();
@@ -901,35 +900,37 @@ void thread_set_irq(bool enable)
assert(l->curr_thread != -1);
if (enable) {
- threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE;
- thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+ threads[l->curr_thread].flags |=
+ THREAD_FLAGS_FOREIGN_INTR_ENABLE;
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
} else {
/*
- * No need to disable IRQ here since it's already disabled
- * above.
+ * No need to disable foreign interrupts here since they're
+ * already disabled above.
*/
- threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE;
+ threads[l->curr_thread].flags &=
+ ~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
}
}
-void thread_restore_irq(void)
+void thread_restore_foreign_intr(void)
{
- /* thread_get_core_local() requires IRQs to be disabled */
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ /* thread_get_core_local() requires foreign interrupts to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
struct thread_core_local *l;
l = thread_get_core_local();
assert(l->curr_thread != -1);
- if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE)
- thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+ if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
}
#ifdef CFG_WITH_VFP
uint32_t thread_kernel_enable_vfp(void)
{
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
struct thread_ctx *thr = threads + thread_get_id();
struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
@@ -967,9 +968,9 @@ void thread_kernel_disable_vfp(uint32_t state)
vfp_disable();
exceptions = thread_get_exceptions();
- assert(exceptions & THREAD_EXCP_IRQ);
- exceptions &= ~THREAD_EXCP_IRQ;
- exceptions |= state & THREAD_EXCP_IRQ;
+ assert(exceptions & THREAD_EXCP_FOREIGN_INTR);
+ exceptions &= ~THREAD_EXCP_FOREIGN_INTR;
+ exceptions |= state & THREAD_EXCP_FOREIGN_INTR;
thread_set_exceptions(exceptions);
}
@@ -977,7 +978,7 @@ void thread_kernel_save_vfp(void)
{
struct thread_ctx *thr = threads + thread_get_id();
- assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
if (vfp_is_enabled()) {
vfp_lazy_save_state_init(&thr->vfp_state.sec);
thr->vfp_state.sec_lazy_saved = true;
@@ -988,7 +989,7 @@ void thread_kernel_restore_vfp(void)
{
struct thread_ctx *thr = threads + thread_get_id();
- assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
assert(!vfp_is_enabled());
if (thr->vfp_state.sec_lazy_saved) {
vfp_lazy_restore_state(&thr->vfp_state.sec,
@@ -1003,7 +1004,7 @@ void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
struct thread_ctx *thr = threads + thread_get_id();
struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
- assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
assert(!vfp_is_enabled());
if (!thr->vfp_state.ns_saved) {
@@ -1030,7 +1031,7 @@ void thread_user_save_vfp(void)
struct thread_ctx *thr = threads + thread_get_id();
struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
- assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
if (!vfp_is_enabled())
return;
@@ -1110,7 +1111,7 @@ void thread_add_mutex(struct mutex *m)
int ct = l->curr_thread;
assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
- assert(m->owner_id == -1);
+ assert(m->owner_id == MUTEX_OWNER_ID_NONE);
m->owner_id = ct;
TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link);
}
@@ -1122,7 +1123,7 @@ void thread_rem_mutex(struct mutex *m)
assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
assert(m->owner_id == ct);
- m->owner_id = -1;
+ m->owner_id = MUTEX_OWNER_ID_NONE;
TAILQ_REMOVE(&threads[ct].mutexes, m, link);
}
@@ -1130,7 +1131,7 @@ bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
{
bool rv;
size_t n;
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
lock_global();
@@ -1163,7 +1164,7 @@ bool thread_enable_prealloc_rpc_cache(void)
{
bool rv;
size_t n;
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
lock_global();
@@ -1194,7 +1195,14 @@ static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params,
assert(arg && carg && num_params <= THREAD_RPC_MAX_NUM_PARAMS);
- plat_prng_add_jitter_entropy();
+
+ /*
+ * Break recursion in case plat_prng_add_jitter_entropy_norpc()
+ * sleeps on a mutex or unlocks a mutex with a sleeper (contended
+ * mutex).
+ */
+ if (cmd != OPTEE_MSG_RPC_CMD_WAIT_QUEUE)
+ plat_prng_add_jitter_entropy_norpc();
memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS));
arg->cmd = cmd;
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
index 6d3ac35..1a1c696 100644
--- a/core/arch/arm/kernel/thread_a32.S
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -75,7 +75,7 @@ UNWIND( .fnstart)
UNWIND( .cantunwind)
/* Secure Monitor received a FIQ and passed control to us. */
bl thread_check_canaries
- ldr lr, =thread_fiq_handler_ptr
+ ldr lr, =thread_nintr_handler_ptr
ldr lr, [lr]
blx lr
mov r1, r0
@@ -392,7 +392,7 @@ UNWIND( .cantunwind)
*/
push {r0-r3, r8-r12, lr}
bl thread_check_canaries
- ldr lr, =thread_fiq_handler_ptr
+ ldr lr, =thread_nintr_handler_ptr
ldr lr, [lr]
blx lr
pop {r0-r3, r8-r12, lr}
@@ -416,7 +416,7 @@ UNWIND( .cantunwind)
bl thread_save_state
- mov r0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
mrs r1, spsr
pop {r12}
pop {r2}
@@ -432,7 +432,7 @@ UNWIND( .cantunwind)
mov sp, r0
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
- ldr r1, =OPTEE_SMC_RETURN_RPC_IRQ
+ ldr r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
mov r2, #0
mov r3, #0
/* r4 is already filled in above */
diff --git a/core/arch/arm/kernel/thread_a64.S b/core/arch/arm/kernel/thread_a64.S
index abd482b..241868a 100644
--- a/core/arch/arm/kernel/thread_a64.S
+++ b/core/arch/arm/kernel/thread_a64.S
@@ -77,7 +77,7 @@ END_FUNC vector_fast_smc_entry
LOCAL_FUNC vector_fiq_entry , :
/* Secure Monitor received a FIQ and passed control to us. */
bl thread_check_canaries
- adr x16, thread_fiq_handler_ptr
+ adr x16, thread_nintr_handler_ptr
ldr x16, [x16]
blr x16
ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
@@ -487,9 +487,9 @@ LOCAL_FUNC el0_svc , :
mov x0, sp
/*
- * Unmask FIQ, Serror, and debug exceptions since we have nothing
- * left in sp_el1. Note that the SVC handler is excepted to
- * re-enable IRQs by itself.
+ * Unmask native interrupts, Serror, and debug exceptions since we have
+ * nothing left in sp_el1. Note that the SVC handler is excepted to
+ * re-enable foreign interrupts by itself.
*/
msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
@@ -713,7 +713,7 @@ LOCAL_FUNC elx_irq , :
/*
* Mark current thread as suspended
*/
- mov w0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
mrs x1, spsr_el1
mrs x2, elr_el1
bl thread_state_suspend
@@ -734,7 +734,7 @@ LOCAL_FUNC elx_irq , :
*/
ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
- ldr w1, =OPTEE_SMC_RETURN_RPC_IRQ
+ ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
mov w2, #0
mov w3, #0
/* w4 is already filled in above */
@@ -787,7 +787,7 @@ LOCAL_FUNC elx_fiq , :
stp x30, x2, [sp, #ELX_FIQ_REC_LR]
bl thread_check_canaries
- adr x16, thread_fiq_handler_ptr
+ adr x16, thread_nintr_handler_ptr
ldr x16, [x16]
blr x16
diff --git a/core/arch/arm/kernel/trace_ext.c b/core/arch/arm/kernel/trace_ext.c
index 8b8454c..6cedba3 100644
--- a/core/arch/arm/kernel/trace_ext.c
+++ b/core/arch/arm/kernel/trace_ext.c
@@ -27,21 +27,40 @@
#include <stdbool.h>
#include <trace.h>
#include <console.h>
+#include <kernel/spinlock.h>
#include <kernel/thread.h>
+#include <mm/core_mmu.h>
const char trace_ext_prefix[] = "TEE-CORE";
int trace_level = TRACE_LEVEL;
+static unsigned int puts_lock = SPINLOCK_UNLOCK;
void trace_ext_puts(const char *str)
{
+ uint32_t itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ bool mmu_enabled = cpu_mmu_enabled();
+ bool was_contended = false;
const char *p;
+ if (mmu_enabled && !cpu_spin_trylock(&puts_lock)) {
+ was_contended = true;
+ cpu_spin_lock(&puts_lock);
+ }
+
console_flush();
+ if (was_contended)
+ console_putc('*');
+
for (p = str; *p; p++)
console_putc(*p);
console_flush();
+
+ if (mmu_enabled)
+ cpu_spin_unlock(&puts_lock);
+
+ thread_unmask_exceptions(itr_status);
}
int trace_ext_get_thread_id(void)
diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c
index a63fb22..5c9aae8 100644
--- a/core/arch/arm/kernel/user_ta.c
+++ b/core/arch/arm/kernel/user_ta.c
@@ -194,8 +194,8 @@ static TEE_Result config_final_paging(struct user_ta_ctx *utc)
size_t vasize = utc->mmu->ta_private_vmem_end -
utc->mmu->ta_private_vmem_start;
- cache_maintenance_l1(DCACHE_AREA_CLEAN, va, vasize);
- cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va, vasize);
+ cache_op_inner(DCACHE_AREA_CLEAN, va, vasize);
+ cache_op_inner(ICACHE_AREA_INVALIDATE, va, vasize);
return TEE_SUCCESS;
}
#endif /*!CFG_PAGED_USER_TA*/
@@ -386,7 +386,7 @@ static TEE_Result ta_load(const TEE_UUID *uuid, const struct shdr *signed_ta,
uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
/* opt_flags: optional flags */
uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
- TA_FLAG_MULTI_SESSION | TA_FLAG_UNSAFE_NW_PARAMS |
+ TA_FLAG_MULTI_SESSION | TA_FLAG_SECURE_DATA_PATH |
TA_FLAG_INSTANCE_KEEP_ALIVE | TA_FLAG_CACHE_MAINTENANCE;
struct user_ta_ctx *utc = NULL;
struct shdr *sec_shdr = NULL;
@@ -748,8 +748,8 @@ static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
va = mobj_get_va(utc->mobj_code, 0);
if (va) {
memset(va, 0, utc->mobj_code->size);
- cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
- utc->mobj_code->size);
+ cache_op_inner(DCACHE_AREA_CLEAN, va,
+ utc->mobj_code->size);
}
}
@@ -757,8 +757,8 @@ static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
va = mobj_get_va(utc->mobj_stack, 0);
if (va) {
memset(va, 0, utc->mobj_stack->size);
- cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
- utc->mobj_stack->size);
+ cache_op_inner(DCACHE_AREA_CLEAN, va,
+ utc->mobj_stack->size);
}
}
}
diff --git a/core/arch/arm/kernel/wait_queue.c b/core/arch/arm/kernel/wait_queue.c
index a96e0fe..6fb4456 100644
--- a/core/arch/arm/kernel/wait_queue.c
+++ b/core/arch/arm/kernel/wait_queue.c
@@ -43,7 +43,8 @@ void wq_init(struct wait_queue *wq)
}
static void wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
- const char *fname, int lineno __maybe_unused)
+ int owner __maybe_unused, const char *fname,
+ int lineno __maybe_unused)
{
uint32_t ret;
struct optee_msg_param params;
@@ -51,10 +52,10 @@ static void wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
func == OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
if (fname)
- DMSG("%s thread %u %p %s:%d", cmd_str, id,
- sync_obj, fname, lineno);
+ DMSG("%s thread %u %p %d %s:%d", cmd_str, id,
+ sync_obj, owner, fname, lineno);
else
- DMSG("%s thread %u %p", cmd_str, id, sync_obj);
+ DMSG("%s thread %u %p %d", cmd_str, id, sync_obj, owner);
memset(&params, 0, sizeof(params));
params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
@@ -99,14 +100,15 @@ void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
}
void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
- const void *sync_obj, const char *fname, int lineno)
+ const void *sync_obj, int owner, const char *fname,
+ int lineno)
{
uint32_t old_itr_status;
unsigned done;
do {
wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
- sync_obj, fname, lineno);
+ sync_obj, owner, fname, lineno);
old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
cpu_spin_lock(&wq_spin_lock);
@@ -145,7 +147,7 @@ void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
if (do_wakeup)
wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP, handle,
- sync_obj, fname, lineno);
+ sync_obj, MUTEX_OWNER_ID_MUTEX_UNLOCK, fname, lineno);
}
void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,