summaryrefslogtreecommitdiff
path: root/core/arch/arm/include/mm
diff options
context:
space:
mode:
Diffstat (limited to 'core/arch/arm/include/mm')
-rw-r--r--core/arch/arm/include/mm/core_memprot.h17
-rw-r--r--core/arch/arm/include/mm/core_mmu.h68
-rw-r--r--core/arch/arm/include/mm/mobj.h5
3 files changed, 54 insertions, 36 deletions
diff --git a/core/arch/arm/include/mm/core_memprot.h b/core/arch/arm/include/mm/core_memprot.h
index b7ccd21..99514fd 100644
--- a/core/arch/arm/include/mm/core_memprot.h
+++ b/core/arch/arm/include/mm/core_memprot.h
@@ -45,14 +45,14 @@
/* memory atttributes */
enum buf_is_attr {
- CORE_MEM_SEC,
+ CORE_MEM_CACHED,
+ CORE_MEM_EXTRAM,
+ CORE_MEM_NSEC_SHM,
CORE_MEM_NON_SEC,
+ CORE_MEM_SEC,
CORE_MEM_TEE_RAM,
CORE_MEM_TA_RAM,
- CORE_MEM_NSEC_SHM,
- CORE_MEM_EXTRAM,
- CORE_MEM_INTRAM,
- CORE_MEM_CACHED,
+ CORE_MEM_SDP_MEM,
};
/* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
@@ -95,6 +95,13 @@ bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
void *phys_to_virt(paddr_t pa, enum teecore_memtypes m);
/*
+ * Translate physical address to virtual address trying MEM_AREA_IO_SEC
+ * first then MEM_AREA_IO_NSEC if not found.
+ * Returns NULL on failure or a valid virtual address on success.
+ */
+void *phys_to_virt_io(paddr_t pa);
+
+/*
* Translate virtual address to physical address
* Returns 0 on failure or a valid physical address on success.
*/
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
index 03ad93d..70be5ab 100644
--- a/core/arch/arm/include/mm/core_mmu.h
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -100,6 +100,7 @@ enum teecore_memtypes {
MEM_AREA_IO_SEC,
MEM_AREA_RES_VASPACE,
MEM_AREA_TA_VASPACE,
+ MEM_AREA_SDP_MEM,
MEM_AREA_MAXTYPE
};
@@ -115,6 +116,13 @@ struct core_mmu_phys_mem {
__used __section("phys_mem_map_section") = \
{ #addr, (type), (addr), (size) }
+#define __register_sdp_mem2(pa, sz, id) \
+ static const struct core_mmu_phys_mem __phys_sdp_mem_ ## id \
+ __used __section("phys_sdp_mem_section") = \
+ { .type = MEM_AREA_SDP_MEM, .addr = (pa), .size = (sz), }
+
+#define __register_sdp_mem1(pa, sz, id) __register_sdp_mem2(pa, sz, id)
+#define register_sdp_mem(pa, sz) __register_sdp_mem1(pa, sz, __COUNTER__)
/* Default NSec shared memory allocated from NSec world */
extern unsigned long default_nsec_shm_paddr;
@@ -350,20 +358,6 @@ bool core_mmu_is_shm_cached(void);
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
-/* L1/L2 cache maintenance (op: refer to ???) */
-unsigned int cache_maintenance_l1(int op, void *va, size_t len);
-#ifdef CFG_PL310
-unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len);
-#else
-static inline unsigned int cache_maintenance_l2(int op __unused,
- paddr_t pa __unused,
- size_t len __unused)
-{
- /* Nothing to do about L2 Cache Maintenance when no PL310 */
- return TEE_SUCCESS;
-}
-#endif
-
/* various invalidate secure TLB */
enum teecore_tlb_op {
TLBINV_UNIFIEDTLB, /* invalidate unified tlb */
@@ -375,25 +369,37 @@ enum teecore_tlb_op {
int core_tlb_maintenance(int op, unsigned int a);
/* Cache maintenance operation type */
-typedef enum {
- DCACHE_CLEAN = 0x1,
- DCACHE_AREA_CLEAN = 0x2,
- DCACHE_INVALIDATE = 0x3,
- DCACHE_AREA_INVALIDATE = 0x4,
- ICACHE_INVALIDATE = 0x5,
- ICACHE_AREA_INVALIDATE = 0x6,
- WRITE_BUFFER_DRAIN = 0x7,
- DCACHE_CLEAN_INV = 0x8,
- DCACHE_AREA_CLEAN_INV = 0x9,
- L2CACHE_INVALIDATE = 0xA,
- L2CACHE_AREA_INVALIDATE = 0xB,
- L2CACHE_CLEAN = 0xC,
- L2CACHE_AREA_CLEAN = 0xD,
- L2CACHE_CLEAN_INV = 0xE,
- L2CACHE_AREA_CLEAN_INV = 0xF
-} t_cache_operation_id;
+enum cache_op {
+ DCACHE_CLEAN,
+ DCACHE_AREA_CLEAN,
+ DCACHE_INVALIDATE,
+ DCACHE_AREA_INVALIDATE,
+ ICACHE_INVALIDATE,
+ ICACHE_AREA_INVALIDATE,
+ DCACHE_CLEAN_INV,
+ DCACHE_AREA_CLEAN_INV,
+};
+
+/* L1/L2 cache maintenance */
+TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
+#ifdef CFG_PL310
+TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len);
+#else
+static inline TEE_Result cache_op_outer(enum cache_op op __unused,
+ paddr_t pa __unused,
+ size_t len __unused)
+{
+ /* Nothing to do about L2 Cache Maintenance when no PL310 */
+ return TEE_SUCCESS;
+}
+#endif
/* Check cpu mmu enabled or not */
bool cpu_mmu_enabled(void);
+#ifdef CFG_SECURE_DATA_PATH
+/* Alloc and fill SDP memory objects table - table is NULL terminated */
+struct mobj **core_sdp_mem_create_mobjs(void);
+#endif
+
#endif /* CORE_MMU_H */
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
index d5eeb69..1a76149 100644
--- a/core/arch/arm/include/mm/mobj.h
+++ b/core/arch/arm/include/mm/mobj.h
@@ -108,6 +108,11 @@ static inline bool mobj_is_secure(struct mobj *mobj)
return mobj_matches(mobj, CORE_MEM_SEC);
}
+static inline bool mobj_is_sdp_mem(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_SDP_MEM);
+}
+
struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
tee_mm_pool_t *pool);