summaryrefslogtreecommitdiff
path: root/core/arch/arm/mm/core_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'core/arch/arm/mm/core_mmu.c')
-rw-r--r--core/arch/arm/mm/core_mmu.c152
1 files changed, 137 insertions, 15 deletions
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
index 62dda73..f85e496 100644
--- a/core/arch/arm/mm/core_mmu.c
+++ b/core/arch/arm/mm/core_mmu.c
@@ -101,6 +101,10 @@ static struct memaccess_area nsec_shared[] = {
MEMACCESS_AREA(CFG_SHMEM_START, CFG_SHMEM_SIZE),
};
+#ifdef CFG_TEE_SDP_MEM_BASE
+register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
+#endif
+
register_phys_mem(MEM_AREA_TEE_RAM, CFG_TEE_RAM_START, CFG_TEE_RAM_PH_SIZE);
register_phys_mem(MEM_AREA_TA_RAM, CFG_TA_RAM_START, CFG_TA_RAM_SIZE);
register_phys_mem(MEM_AREA_NSEC_SHM, CFG_SHMEM_START, CFG_SHMEM_SIZE);
@@ -222,6 +226,107 @@ static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
return NULL;
}
+#ifdef CFG_SECURE_DATA_PATH
+extern const struct core_mmu_phys_mem __start_phys_sdp_mem_section;
+extern const struct core_mmu_phys_mem __end_phys_sdp_mem_section;
+
+static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
+{
+ const struct core_mmu_phys_mem *mem;
+
+ for (mem = &__start_phys_sdp_mem_section;
+ mem < &__end_phys_sdp_mem_section; mem++)
+ if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
+ return true;
+
+ return false;
+}
+
+#define MSG_SDP_INSTERSECT(pa1, sz1, pa2, sz2) \
+ EMSG("[%" PRIxPA " %" PRIxPA "] intersecs [%" PRIxPA " %" PRIxPA "]", \
+ pa1, pa1 + sz1, pa2, pa2 + sz2)
+
+/* Check SDP memories comply with registered memories */
+static void verify_sdp_mem_areas(struct tee_mmap_region *mem_map, size_t len)
+{
+ const struct core_mmu_phys_mem *mem;
+ const struct core_mmu_phys_mem *mem2;
+ const struct core_mmu_phys_mem *start = &__start_phys_sdp_mem_section;
+ const struct core_mmu_phys_mem *end = &__end_phys_sdp_mem_section;
+ struct tee_mmap_region *mmap;
+ size_t n;
+
+ if (start == end) {
+ IMSG("Secure data path enabled without any SDP memory area");
+ return;
+ }
+
+ for (mem = start; mem < end; mem++)
+ DMSG("SDP memory [%" PRIxPA " %" PRIxPA "]",
+ mem->addr, mem->addr + mem->size);
+
+ /* Check SDP memories do not intersect each other */
+ for (mem = start; mem < end - 1; mem++) {
+ for (mem2 = mem + 1; mem2 < end; mem2++) {
+ if (core_is_buffer_intersect(mem2->addr, mem2->size,
+ mem->addr, mem->size)) {
+ MSG_SDP_INSTERSECT(mem2->addr, mem2->size,
+ mem->addr, mem->size);
+ panic("SDP memory intersection");
+ }
+ }
+ }
+
+ /*
+ * Check SDP memories do not intersect any mapped memory.
+ * This is called before reserved VA space is loaded in mem_map.
+ */
+ for (mem = start; mem < end; mem++) {
+ for (mmap = mem_map, n = 0; n < len; mmap++, n++) {
+ if (core_is_buffer_intersect(mem->addr, mem->size,
+ mmap->pa, mmap->size)) {
+ MSG_SDP_INSTERSECT(mem->addr, mem->size,
+ mmap->pa, mmap->size);
+ panic("SDP memory intersection");
+ }
+ }
+ }
+}
+
+struct mobj **core_sdp_mem_create_mobjs(void)
+{
+ const struct core_mmu_phys_mem *mem;
+ struct mobj **mobj_base;
+ struct mobj **mobj;
+ int cnt = &__end_phys_sdp_mem_section - &__start_phys_sdp_mem_section;
+
+ /* SDP mobjs table must end with a NULL entry */
+ mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
+ if (!mobj_base)
+ panic("Out of memory");
+
+ for (mem = &__start_phys_sdp_mem_section, mobj = mobj_base;
+ mem < &__end_phys_sdp_mem_section; mem++, mobj++) {
+ *mobj = mobj_phys_alloc(mem->addr, mem->size,
+ TEE_MATTR_CACHE_CACHED,
+ CORE_MEM_SDP_MEM);
+ if (!*mobj)
+ panic("can't create SDP physical memory object");
+ }
+ return mobj_base;
+}
+#else /* CFG_SECURE_DATA_PATH */
+static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
+{
+ return false;
+}
+
+static void verify_sdp_mem_areas(struct tee_mmap_region *mem_map __unused,
+ size_t len __unused)
+{
+}
+#endif /* CFG_SECURE_DATA_PATH */
+
extern const struct core_mmu_phys_mem __start_phys_mem_map_section;
extern const struct core_mmu_phys_mem __end_phys_mem_map_section;
@@ -353,6 +458,8 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
add_phys_mem(memory_map, num_elems, &m, &last);
}
+ verify_sdp_mem_areas(memory_map, num_elems);
+
add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
RES_VASPACE_SIZE, &last);
@@ -382,7 +489,7 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
assert(map->type == MEM_AREA_TEE_RAM);
map->va = map->pa;
#ifdef CFG_WITH_PAGER
- map->region_size = SMALL_PAGE_SIZE,
+ map->region_size = SMALL_PAGE_SIZE;
#endif
map->attr = core_mmu_type_to_attr(map->type);
@@ -393,6 +500,7 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
while (map->type != MEM_AREA_NOTYPE) {
map->attr = core_mmu_type_to_attr(map->type);
va -= map->size;
+ va = ROUNDDOWN(va, map->region_size);
map->va = va;
map++;
}
@@ -413,6 +521,7 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
map++;
while (map->type != MEM_AREA_NOTYPE) {
map->attr = core_mmu_type_to_attr(map->type);
+ va = ROUNDUP(va, map->region_size);
map->va = va;
va += map->size;
map++;
@@ -541,6 +650,8 @@ bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
return pbuf_inside_map_area(pbuf, len, map_ta_ram);
case CORE_MEM_NSEC_SHM:
return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
+ case CORE_MEM_SDP_MEM:
+ return pbuf_is_sdp_mem(pbuf, len);
case CORE_MEM_EXTRAM:
return pbuf_is_inside(ddr, pbuf, len);
case CORE_MEM_CACHED:
@@ -648,7 +759,7 @@ int core_tlb_maintenance(int op, unsigned int a)
return 0;
}
-unsigned int cache_maintenance_l1(int op, void *va, size_t len)
+TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len)
{
switch (op) {
case DCACHE_CLEAN:
@@ -672,10 +783,6 @@ unsigned int cache_maintenance_l1(int op, void *va, size_t len)
if (len)
arm_cl1_i_inv(va, (char *)va + len - 1);
break;
- case WRITE_BUFFER_DRAIN:
- DMSG("unsupported operation 0x%X (WRITE_BUFFER_DRAIN)",
- (unsigned int)op);
- return -1;
case DCACHE_CLEAN_INV:
arm_cl1_d_cleaninvbysetway();
break;
@@ -690,31 +797,31 @@ unsigned int cache_maintenance_l1(int op, void *va, size_t len)
}
#ifdef CFG_PL310
-unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len)
+TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len)
{
- unsigned int ret = TEE_SUCCESS;
- uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ TEE_Result ret = TEE_SUCCESS;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
tee_l2cc_mutex_lock();
switch (op) {
- case L2CACHE_INVALIDATE:
+ case DCACHE_INVALIDATE:
arm_cl2_invbyway(pl310_base());
break;
- case L2CACHE_AREA_INVALIDATE:
+ case DCACHE_AREA_INVALIDATE:
if (len)
arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
break;
- case L2CACHE_CLEAN:
+ case DCACHE_CLEAN:
arm_cl2_cleanbyway(pl310_base());
break;
- case L2CACHE_AREA_CLEAN:
+ case DCACHE_AREA_CLEAN:
if (len)
arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
break;
- case L2CACHE_CLEAN_INV:
+ case DCACHE_CLEAN_INV:
arm_cl2_cleaninvbyway(pl310_base());
break;
- case L2CACHE_AREA_CLEAN_INV:
+ case DCACHE_AREA_CLEAN_INV:
if (len)
arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
break;
@@ -1163,6 +1270,21 @@ void *phys_to_virt(paddr_t pa, enum teecore_memtypes m)
return va;
}
+void *phys_to_virt_io(paddr_t pa)
+{
+ struct tee_mmap_region *map;
+ void *va;
+
+ map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa);
+ if (!map)
+ map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa);
+ if (!map)
+ return NULL;
+ va = map_pa2va(map, pa);
+ check_va_matches_pa(pa, va);
+ return va;
+}
+
bool cpu_mmu_enabled(void)
{
uint32_t sctlr;