summaryrefslogtreecommitdiff
path: root/core/arch/arm/include
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-06-05 12:44:25 +0200
committerr.tyminski <r.tyminski@partner.samsung.com>2017-06-05 12:44:25 +0200
commit146aec115cd05a164a88e6d7b07435c57a33817f (patch)
treed8099075c92576b1928069af274f9b833aca996e /core/arch/arm/include
parentf9a43781767007462965b21f3f518c4cfc0744c7 (diff)
downloadtef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.tar.gz
tef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.tar.bz2
tef-optee_os-146aec115cd05a164a88e6d7b07435c57a33817f.zip
Update from upstream to 2.4.0 versionupstream/2.4.0upstream
Change-Id: I2b3a30f20684d6629fe379d9cd7895aff759c301
Diffstat (limited to 'core/arch/arm/include')
-rw-r--r--core/arch/arm/include/kernel/mutex.h11
-rw-r--r--core/arch/arm/include/kernel/pseudo_ta.h4
-rw-r--r--core/arch/arm/include/kernel/spinlock.h6
-rw-r--r--core/arch/arm/include/kernel/thread.h64
-rw-r--r--core/arch/arm/include/kernel/thread_defs.h4
-rw-r--r--core/arch/arm/include/kernel/wait_queue.h3
-rw-r--r--core/arch/arm/include/mm/core_memprot.h17
-rw-r--r--core/arch/arm/include/mm/core_mmu.h68
-rw-r--r--core/arch/arm/include/mm/mobj.h5
-rw-r--r--core/arch/arm/include/sm/optee_smc.h12
-rw-r--r--core/arch/arm/include/sm/sm.h14
11 files changed, 128 insertions, 80 deletions
diff --git a/core/arch/arm/include/kernel/mutex.h b/core/arch/arm/include/kernel/mutex.h
index 1698b35..893313e 100644
--- a/core/arch/arm/include/kernel/mutex.h
+++ b/core/arch/arm/include/kernel/mutex.h
@@ -36,6 +36,15 @@ enum mutex_value {
MUTEX_VALUE_LOCKED,
};
+/*
+ * Positive owner ids signifies actual threads, negative ids has special
+ * meanings according to the defines below. Note that only the first of the
+ * defines is allowed in struct mutex::owener_id.
+ */
+#define MUTEX_OWNER_ID_NONE -1
+#define MUTEX_OWNER_ID_CONDVAR_SLEEP -2
+#define MUTEX_OWNER_ID_MUTEX_UNLOCK -3
+
struct mutex {
enum mutex_value value;
unsigned spin_lock; /* used when operating on this struct */
@@ -44,7 +53,7 @@ struct mutex {
TAILQ_ENTRY(mutex) link;
};
#define MUTEX_INITIALIZER \
- { .value = MUTEX_VALUE_UNLOCKED, .owner_id = -1, \
+ { .value = MUTEX_VALUE_UNLOCKED, .owner_id = MUTEX_OWNER_ID_NONE, \
.wq = WAIT_QUEUE_INITIALIZER, }
TAILQ_HEAD(mutex_head, mutex);
diff --git a/core/arch/arm/include/kernel/pseudo_ta.h b/core/arch/arm/include/kernel/pseudo_ta.h
index 98316bd..55d5e2b 100644
--- a/core/arch/arm/include/kernel/pseudo_ta.h
+++ b/core/arch/arm/include/kernel/pseudo_ta.h
@@ -38,7 +38,9 @@
TA_FLAG_MULTI_SESSION | \
TA_FLAG_INSTANCE_KEEP_ALIVE)
-#define PTA_ALLOWED_FLAGS PTA_MANDATORY_FLAGS
+#define PTA_ALLOWED_FLAGS (PTA_MANDATORY_FLAGS | \
+ TA_FLAG_SECURE_DATA_PATH)
+
#define PTA_DEFAULT_FLAGS PTA_MANDATORY_FLAGS
struct pseudo_ta_head {
diff --git a/core/arch/arm/include/kernel/spinlock.h b/core/arch/arm/include/kernel/spinlock.h
index c248673..a19b764 100644
--- a/core/arch/arm/include/kernel/spinlock.h
+++ b/core/arch/arm/include/kernel/spinlock.h
@@ -59,7 +59,7 @@ unsigned int __cpu_spin_trylock(unsigned int *lock);
static inline void cpu_spin_lock(unsigned int *lock)
{
- assert(thread_irq_disabled());
+ assert(thread_foreign_intr_disabled());
__cpu_spin_lock(lock);
spinlock_count_incr();
}
@@ -68,7 +68,7 @@ static inline bool cpu_spin_trylock(unsigned int *lock)
{
unsigned int rc;
- assert(thread_irq_disabled());
+ assert(thread_foreign_intr_disabled());
rc = __cpu_spin_trylock(lock);
if (!rc)
spinlock_count_incr();
@@ -77,7 +77,7 @@ static inline bool cpu_spin_trylock(unsigned int *lock)
static inline void cpu_spin_unlock(unsigned int *lock)
{
- assert(thread_irq_disabled());
+ assert(thread_foreign_intr_disabled());
__cpu_spin_unlock(lock);
spinlock_count_decr();
}
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
index 175ba77..831b5d6 100644
--- a/core/arch/arm/include/kernel/thread.h
+++ b/core/arch/arm/include/kernel/thread.h
@@ -30,6 +30,7 @@
#define KERNEL_THREAD_H
#ifndef ASM
+#include <arm.h>
#include <types_ext.h>
#include <compiler.h>
#include <optee_msg.h>
@@ -203,7 +204,7 @@ struct thread_svc_regs {
#ifndef ASM
typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
-typedef void (*thread_fiq_handler_t)(void);
+typedef void (*thread_nintr_handler_t)(void);
typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
unsigned long a1);
struct thread_handlers {
@@ -218,11 +219,12 @@ struct thread_handlers {
*
* fastcall handles fast calls which can't be preemted. This
* handler is executed with a limited stack. This handler must not
- * cause any aborts or reenenable FIQs which are temporarily masked
- * while executing this handler.
+ * cause any aborts or reenenable native interrupts which are
+ * temporarily masked while executing this handler.
*
- * TODO investigate if we should execute fastcalls and FIQs on
- * different stacks allowing FIQs to be enabled during a fastcall.
+ * TODO investigate if we should execute fastcalls and native interrupts
+ * on different stacks allowing native interrupts to be enabled during
+ * a fastcall.
*/
thread_smc_handler_t std_smc;
thread_smc_handler_t fast_smc;
@@ -231,12 +233,12 @@ struct thread_handlers {
* fiq is called as a regular function and normal ARM Calling
* Convention applies.
*
- * This handler handles FIQs which can't be preemted. This handler
- * is executed with a limited stack. This handler must not cause
- * any aborts or reenenable FIQs which are temporarily masked while
- * executing this handler.
+ * This handler handles native interrupts which can't be preemted. This
+ * handler is executed with a limited stack. This handler must not cause
+ * any aborts or reenenable native interrupts which are temporarily
+ * masked while executing this handler.
*/
- thread_fiq_handler_t fiq;
+ thread_nintr_handler_t nintr;
/*
* Power management handlers triggered from ARM Trusted Firmware.
@@ -285,28 +287,30 @@ int thread_get_id_may_fail(void);
struct thread_specific_data *thread_get_tsd(void);
/*
- * Sets IRQ status for current thread, must only be called from an
- * active thread context.
+ * Sets foreign interrupts status for current thread, must only be called
+ * from an active thread context.
*
- * enable == true -> enable IRQ
- * enable == false -> disable IRQ
+ * enable == true -> enable foreign interrupts
+ * enable == false -> disable foreign interrupts
*/
-void thread_set_irq(bool enable);
+void thread_set_foreign_intr(bool enable);
/*
- * Restores the IRQ status (in CPSR) for current thread, must only be called
- * from an active thread context.
+ * Restores the foreign interrupts status (in CPSR) for current thread, must
+ * only be called from an active thread context.
*/
-void thread_restore_irq(void);
+void thread_restore_foreign_intr(void);
/*
* Defines the bits for the exception mask used the the
* thread_*_exceptions() functions below.
+ * These definitions are compatible with both ARM32 and ARM64.
*/
-#define THREAD_EXCP_FIQ (1 << 0)
-#define THREAD_EXCP_IRQ (1 << 1)
-#define THREAD_EXCP_ABT (1 << 2)
-#define THREAD_EXCP_ALL (THREAD_EXCP_FIQ | THREAD_EXCP_IRQ | THREAD_EXCP_ABT)
+#define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
+#define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
+#define THREAD_EXCP_ALL (THREAD_EXCP_FOREIGN_INTR \
+ | THREAD_EXCP_NATIVE_INTR \
+ | (ARM32_CPSR_A >> ARM32_CPSR_F_SHIFT))
/*
* thread_get_exceptions() - return current exception mask
@@ -337,18 +341,18 @@ uint32_t thread_mask_exceptions(uint32_t exceptions);
void thread_unmask_exceptions(uint32_t state);
-static inline bool thread_irq_disabled(void)
+static inline bool thread_foreign_intr_disabled(void)
{
- return !!(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
}
#ifdef CFG_WITH_VFP
/*
* thread_kernel_enable_vfp() - Temporarily enables usage of VFP
*
- * IRQ is masked while VFP is enabled. User space must not be entered before
- * thread_kernel_disable_vfp() has been called to disable VFP and restore the
- * IRQ status.
+ * Foreign interrupts are masked while VFP is enabled. User space must not be
+ * entered before thread_kernel_disable_vfp() has been called to disable VFP
+ * and restore the foreign interrupt status.
*
* This function may only be called from an active thread context and may
* not be called again before thread_kernel_disable_vfp() has been called.
@@ -364,7 +368,7 @@ uint32_t thread_kernel_enable_vfp(void);
* thread_kernel_disable_vfp() - Disables usage of VFP
* @state: state variable returned by thread_kernel_enable_vfp()
*
- * Disables usage of VFP and restores IRQ status after a call to
+ * Disables usage of VFP and restores foreign interrupt status after a call to
* thread_kernel_enable_vfp().
*
* This function may only be called after a call to
@@ -484,13 +488,13 @@ bool thread_addr_is_in_stack(vaddr_t va);
/*
* Adds a mutex to the list of held mutexes for current thread
- * Requires IRQs to be disabled.
+ * Requires foreign interrupts to be disabled.
*/
void thread_add_mutex(struct mutex *m);
/*
* Removes a mutex from the list of held mutexes for current thread
- * Requires IRQs to be disabled.
+ * Requires foreign interrupts to be disabled.
*/
void thread_rem_mutex(struct mutex *m);
diff --git a/core/arch/arm/include/kernel/thread_defs.h b/core/arch/arm/include/kernel/thread_defs.h
index 0f54569..e081895 100644
--- a/core/arch/arm/include/kernel/thread_defs.h
+++ b/core/arch/arm/include/kernel/thread_defs.h
@@ -29,7 +29,7 @@
#define KERNEL_THREAD_DEFS_H
#define THREAD_FLAGS_COPY_ARGS_ON_RETURN (1 << 0)
-#define THREAD_FLAGS_IRQ_ENABLE (1 << 1)
-#define THREAD_FLAGS_EXIT_ON_IRQ (1 << 2)
+#define THREAD_FLAGS_FOREIGN_INTR_ENABLE (1 << 1)
+#define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR (1 << 2)
#endif /*KERNEL_THREAD_DEFS_H*/
diff --git a/core/arch/arm/include/kernel/wait_queue.h b/core/arch/arm/include/kernel/wait_queue.h
index eb8f881..bb53cb6 100644
--- a/core/arch/arm/include/kernel/wait_queue.h
+++ b/core/arch/arm/include/kernel/wait_queue.h
@@ -67,7 +67,8 @@ static inline void wq_wait_init(struct wait_queue *wq,
/* Waits for the wait queue element to the awakened. */
void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
- const void *sync_obj, const char *fname, int lineno);
+ const void *sync_obj, int owner, const char *fname,
+ int lineno);
/* Wakes up the first wait queue element in the wait queue, if there is one */
void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
diff --git a/core/arch/arm/include/mm/core_memprot.h b/core/arch/arm/include/mm/core_memprot.h
index b7ccd21..99514fd 100644
--- a/core/arch/arm/include/mm/core_memprot.h
+++ b/core/arch/arm/include/mm/core_memprot.h
@@ -45,14 +45,14 @@
/* memory atttributes */
enum buf_is_attr {
- CORE_MEM_SEC,
+ CORE_MEM_CACHED,
+ CORE_MEM_EXTRAM,
+ CORE_MEM_NSEC_SHM,
CORE_MEM_NON_SEC,
+ CORE_MEM_SEC,
CORE_MEM_TEE_RAM,
CORE_MEM_TA_RAM,
- CORE_MEM_NSEC_SHM,
- CORE_MEM_EXTRAM,
- CORE_MEM_INTRAM,
- CORE_MEM_CACHED,
+ CORE_MEM_SDP_MEM,
};
/* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
@@ -95,6 +95,13 @@ bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
void *phys_to_virt(paddr_t pa, enum teecore_memtypes m);
/*
+ * Translate physical address to virtual address trying MEM_AREA_IO_SEC
+ * first then MEM_AREA_IO_NSEC if not found.
+ * Returns NULL on failure or a valid virtual address on success.
+ */
+void *phys_to_virt_io(paddr_t pa);
+
+/*
* Translate virtual address to physical address
* Returns 0 on failure or a valid physical address on success.
*/
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
index 03ad93d..70be5ab 100644
--- a/core/arch/arm/include/mm/core_mmu.h
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -100,6 +100,7 @@ enum teecore_memtypes {
MEM_AREA_IO_SEC,
MEM_AREA_RES_VASPACE,
MEM_AREA_TA_VASPACE,
+ MEM_AREA_SDP_MEM,
MEM_AREA_MAXTYPE
};
@@ -115,6 +116,13 @@ struct core_mmu_phys_mem {
__used __section("phys_mem_map_section") = \
{ #addr, (type), (addr), (size) }
+#define __register_sdp_mem2(pa, sz, id) \
+ static const struct core_mmu_phys_mem __phys_sdp_mem_ ## id \
+ __used __section("phys_sdp_mem_section") = \
+ { .type = MEM_AREA_SDP_MEM, .addr = (pa), .size = (sz), }
+
+#define __register_sdp_mem1(pa, sz, id) __register_sdp_mem2(pa, sz, id)
+#define register_sdp_mem(pa, sz) __register_sdp_mem1(pa, sz, __COUNTER__)
/* Default NSec shared memory allocated from NSec world */
extern unsigned long default_nsec_shm_paddr;
@@ -350,20 +358,6 @@ bool core_mmu_is_shm_cached(void);
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
-/* L1/L2 cache maintenance (op: refer to ???) */
-unsigned int cache_maintenance_l1(int op, void *va, size_t len);
-#ifdef CFG_PL310
-unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len);
-#else
-static inline unsigned int cache_maintenance_l2(int op __unused,
- paddr_t pa __unused,
- size_t len __unused)
-{
- /* Nothing to do about L2 Cache Maintenance when no PL310 */
- return TEE_SUCCESS;
-}
-#endif
-
/* various invalidate secure TLB */
enum teecore_tlb_op {
TLBINV_UNIFIEDTLB, /* invalidate unified tlb */
@@ -375,25 +369,37 @@ enum teecore_tlb_op {
int core_tlb_maintenance(int op, unsigned int a);
/* Cache maintenance operation type */
-typedef enum {
- DCACHE_CLEAN = 0x1,
- DCACHE_AREA_CLEAN = 0x2,
- DCACHE_INVALIDATE = 0x3,
- DCACHE_AREA_INVALIDATE = 0x4,
- ICACHE_INVALIDATE = 0x5,
- ICACHE_AREA_INVALIDATE = 0x6,
- WRITE_BUFFER_DRAIN = 0x7,
- DCACHE_CLEAN_INV = 0x8,
- DCACHE_AREA_CLEAN_INV = 0x9,
- L2CACHE_INVALIDATE = 0xA,
- L2CACHE_AREA_INVALIDATE = 0xB,
- L2CACHE_CLEAN = 0xC,
- L2CACHE_AREA_CLEAN = 0xD,
- L2CACHE_CLEAN_INV = 0xE,
- L2CACHE_AREA_CLEAN_INV = 0xF
-} t_cache_operation_id;
+enum cache_op {
+ DCACHE_CLEAN,
+ DCACHE_AREA_CLEAN,
+ DCACHE_INVALIDATE,
+ DCACHE_AREA_INVALIDATE,
+ ICACHE_INVALIDATE,
+ ICACHE_AREA_INVALIDATE,
+ DCACHE_CLEAN_INV,
+ DCACHE_AREA_CLEAN_INV,
+};
+
+/* L1/L2 cache maintenance */
+TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
+#ifdef CFG_PL310
+TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len);
+#else
+static inline TEE_Result cache_op_outer(enum cache_op op __unused,
+ paddr_t pa __unused,
+ size_t len __unused)
+{
+ /* Nothing to do about L2 Cache Maintenance when no PL310 */
+ return TEE_SUCCESS;
+}
+#endif
/* Check cpu mmu enabled or not */
bool cpu_mmu_enabled(void);
+#ifdef CFG_SECURE_DATA_PATH
+/* Alloc and fill SDP memory objects table - table is NULL terminated */
+struct mobj **core_sdp_mem_create_mobjs(void);
+#endif
+
#endif /* CORE_MMU_H */
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
index d5eeb69..1a76149 100644
--- a/core/arch/arm/include/mm/mobj.h
+++ b/core/arch/arm/include/mm/mobj.h
@@ -108,6 +108,11 @@ static inline bool mobj_is_secure(struct mobj *mobj)
return mobj_matches(mobj, CORE_MEM_SEC);
}
+static inline bool mobj_is_sdp_mem(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_SDP_MEM);
+}
+
struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
tee_mm_pool_t *pool);
diff --git a/core/arch/arm/include/sm/optee_smc.h b/core/arch/arm/include/sm/optee_smc.h
index b6fcd65..c369708 100644
--- a/core/arch/arm/include/sm/optee_smc.h
+++ b/core/arch/arm/include/sm/optee_smc.h
@@ -385,7 +385,7 @@
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY)
/*
- * Resume from RPC (for example after processing an IRQ)
+ * Resume from RPC (for example after processing a foreign interrupt)
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
@@ -470,19 +470,19 @@
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
/*
- * Deliver an IRQ in normal world.
+ * Deliver a foreign interrupt in normal world.
*
* "Call" register usage:
- * a0 OPTEE_SMC_RETURN_RPC_IRQ
+ * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
* a1-7 Resume information, must be preserved
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1-7 Preserved
*/
-#define OPTEE_SMC_RPC_FUNC_IRQ 4
-#define OPTEE_SMC_RETURN_RPC_IRQ \
- OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
+#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4
+#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
/*
* Do an RPC request. The supplied struct optee_msg_arg tells which
diff --git a/core/arch/arm/include/sm/sm.h b/core/arch/arm/include/sm/sm.h
index 6368359..3446506 100644
--- a/core/arch/arm/include/sm/sm.h
+++ b/core/arch/arm/include/sm/sm.h
@@ -29,6 +29,7 @@
#ifndef SM_SM_H
#define SM_SM_H
+#include <compiler.h>
#include <types_ext.h>
struct sm_mode_regs {
@@ -120,4 +121,17 @@ void *sm_get_sp(void);
*/
void sm_init(vaddr_t stack_pointer);
+#ifndef CFG_SM_PLATFORM_HANDLER
+/*
+ * Returns false if we handled the monitor service and should now return
+ * back to the non-secure state
+ */
+static inline bool sm_platform_handler(__unused struct sm_ctx *ctx)
+{
+ return true;
+}
+#else
+bool sm_platform_handler(struct sm_ctx *ctx);
+#endif
+
#endif /*SM_SM_H*/