summaryrefslogtreecommitdiff
path: root/core/arch/arm/mm
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:42:10 +0200
committerr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:49:50 +0200
commitf9a43781767007462965b21f3f518c4cfc0744c7 (patch)
tree201509439b1d9798256227794dae6774345adf43 /core/arch/arm/mm
parent1fed20f5471aa0dad5e4b4f79d1f2843ac88734f (diff)
downloadtef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.gz
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.bz2
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.zip
Initial commit with upstream sources
Change-Id: Ie9460111f21fc955102fd8732a0173b2d0499a4a
Diffstat (limited to 'core/arch/arm/mm')
-rw-r--r--core/arch/arm/mm/core_mmu.c1177
-rw-r--r--core/arch/arm/mm/core_mmu_lpae.c890
-rw-r--r--core/arch/arm/mm/core_mmu_private.h43
-rw-r--r--core/arch/arm/mm/core_mmu_v7.c790
-rw-r--r--core/arch/arm/mm/mobj.c439
-rw-r--r--core/arch/arm/mm/pager_aes_gcm.c348
-rw-r--r--core/arch/arm/mm/pager_private.h45
-rw-r--r--core/arch/arm/mm/pgt_cache.c567
-rw-r--r--core/arch/arm/mm/sub.mk12
-rw-r--r--core/arch/arm/mm/tee_mm.c354
-rw-r--r--core/arch/arm/mm/tee_mmu.c896
-rw-r--r--core/arch/arm/mm/tee_pager.c1473
12 files changed, 7034 insertions, 0 deletions
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
new file mode 100644
index 0000000..62dda73
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu.c
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This core mmu supports static section mapping (1MByte) and finer mapping
+ * with 4k pages.
+ * It should also allow core to map/unmap (and va/pa) at run-time.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <kernel/tz_ssvce.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <platform_config.h>
+#include <stdlib.h>
+#include <trace.h>
+#include <util.h>
+
+#include "core_mmu_private.h"
+
+#define MAX_MMAP_REGIONS 10
+#define RES_VASPACE_SIZE (CORE_MMU_PGDIR_SIZE * 10)
+
+/*
+ * These variables are initialized before .bss is cleared. To avoid
+ * resetting them when .bss is cleared we're storing them in .data instead,
+ * even if they initially are zero.
+ */
+
+/* Default NSec shared memory allocated from NSec world */
+unsigned long default_nsec_shm_size __early_bss;
+unsigned long default_nsec_shm_paddr __early_bss;
+
+static struct tee_mmap_region
+ static_memory_map[MAX_MMAP_REGIONS + 1] __early_bss;
+static bool mem_map_inited __early_bss;
+
+static struct tee_mmap_region *map_tee_ram __early_bss;
+static struct tee_mmap_region *map_ta_ram __early_bss;
+static struct tee_mmap_region *map_nsec_shm __early_bss;
+
+/* Define the platform's memory layout. */
+struct memaccess_area {
+ paddr_t paddr;
+ size_t size;
+};
+#define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
+
+static struct memaccess_area ddr[] = {
+ MEMACCESS_AREA(DRAM0_BASE, DRAM0_SIZE),
+#ifdef DRAM1_BASE
+ MEMACCESS_AREA(DRAM1_BASE, DRAM1_SIZE),
+#endif
+};
+
+static struct memaccess_area secure_only[] = {
+#ifdef TZSRAM_BASE
+ MEMACCESS_AREA(TZSRAM_BASE, TZSRAM_SIZE),
+#endif
+ MEMACCESS_AREA(TZDRAM_BASE, TZDRAM_SIZE),
+};
+
+static struct memaccess_area nsec_shared[] = {
+ MEMACCESS_AREA(CFG_SHMEM_START, CFG_SHMEM_SIZE),
+};
+
+register_phys_mem(MEM_AREA_TEE_RAM, CFG_TEE_RAM_START, CFG_TEE_RAM_PH_SIZE);
+register_phys_mem(MEM_AREA_TA_RAM, CFG_TA_RAM_START, CFG_TA_RAM_SIZE);
+register_phys_mem(MEM_AREA_NSEC_SHM, CFG_SHMEM_START, CFG_SHMEM_SIZE);
+#ifdef DEVICE0_PA_BASE
+register_phys_mem(DEVICE0_TYPE, DEVICE0_PA_BASE, DEVICE0_SIZE);
+#endif
+#ifdef DEVICE1_PA_BASE
+register_phys_mem(DEVICE1_TYPE, DEVICE1_PA_BASE, DEVICE1_SIZE);
+#endif
+#ifdef DEVICE2_PA_BASE
+register_phys_mem(DEVICE2_TYPE, DEVICE2_PA_BASE, DEVICE2_SIZE);
+#endif
+#ifdef DEVICE3_PA_BASE
+register_phys_mem(DEVICE3_TYPE, DEVICE3_PA_BASE, DEVICE3_SIZE);
+#endif
+#ifdef DEVICE4_PA_BASE
+register_phys_mem(DEVICE4_TYPE, DEVICE4_PA_BASE, DEVICE4_SIZE);
+#endif
+#ifdef DEVICE5_PA_BASE
+register_phys_mem(DEVICE5_TYPE, DEVICE5_PA_BASE, DEVICE5_SIZE);
+#endif
+#ifdef DEVICE6_PA_BASE
+register_phys_mem(DEVICE6_TYPE, DEVICE6_PA_BASE, DEVICE6_SIZE);
+#endif
+
+static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
+ paddr_t pa, size_t size)
+{
+ size_t n;
+
+ for (n = 0; n < alen; n++)
+ if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
+ return true;
+ return false;
+}
+#define pbuf_intersects(a, pa, size) \
+ _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
+
+static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
+ paddr_t pa, size_t size)
+{
+ size_t n;
+
+ for (n = 0; n < alen; n++)
+ if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
+ return true;
+ return false;
+}
+#define pbuf_is_inside(a, pa, size) \
+ _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
+
+static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa)
+{
+ if (!map)
+ return false;
+ return (pa >= map->pa && pa <= (map->pa + map->size - 1));
+}
+
+static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
+{
+ if (!map)
+ return false;
+ return (va >= map->va && va <= (map->va + map->size - 1));
+}
+
+/* check if target buffer fits in a core default map area */
+static bool pbuf_inside_map_area(unsigned long p, size_t l,
+ struct tee_mmap_region *map)
+{
+ return core_is_buffer_inside(p, l, map->pa, map->size);
+}
+
+static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
+{
+ struct tee_mmap_region *map;
+
+ for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++)
+ if (map->type == type)
+ return map;
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_type_and_pa(
+ enum teecore_memtypes type, paddr_t pa)
+{
+ struct tee_mmap_region *map;
+
+ for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ if (map->type != type)
+ continue;
+ if (pa_is_in_map(map, pa))
+ return map;
+ }
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_va(void *va)
+{
+ struct tee_mmap_region *map = static_memory_map;
+ unsigned long a = (unsigned long)va;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((a >= map->va) && (a <= (map->va - 1 + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
+{
+ struct tee_mmap_region *map = static_memory_map;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((pa >= map->pa) && (pa < (map->pa + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+extern const struct core_mmu_phys_mem __start_phys_mem_map_section;
+extern const struct core_mmu_phys_mem __end_phys_mem_map_section;
+
+static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
+ const struct core_mmu_phys_mem *mem, size_t *last)
+{
+ size_t n = 0;
+ paddr_t pa;
+ size_t size;
+
+ /*
+ * When all entries are added we'd like to have it in a sorted
+ * array first based on memory type and secondly on physical
+ * address. If some ranges of memory of the same type overlaps of
+ * are next to each others they are coalesced into one entry. This
+ * makes it easier later when building the translation tables.
+ *
+ * Note that it's valid to have the same physical memory as several
+ * different memory types, for instance the same device memory
+ * mapped as both secure and non-secure. This will probably not
+ * happen often in practice.
+ */
+ DMSG("%s %d 0x%08" PRIxPA " size 0x%08zx",
+ mem->name, mem->type, mem->addr, mem->size);
+ while (true) {
+ if (n >= (num_elems - 1)) {
+ EMSG("Out of entries (%zu) in memory_map", num_elems);
+ panic();
+ }
+ if (n == *last)
+ break;
+ pa = memory_map[n].pa;
+ size = memory_map[n].size;
+ if (mem->addr >= pa && mem->addr <= (pa + (size - 1)) &&
+ mem->type == memory_map[n].type) {
+ DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
+ memory_map[n].pa = MIN(pa, mem->addr);
+ memory_map[n].size = MAX(size, mem->size) +
+ (pa - memory_map[n].pa);
+ return;
+ }
+ if (mem->type < memory_map[n].type ||
+ (mem->type == memory_map[n].type && mem->addr < pa))
+ break; /* found the spot where to inseart this memory */
+ n++;
+ }
+
+ memmove(memory_map + n + 1, memory_map + n,
+ sizeof(struct tee_mmap_region) * (*last - n));
+ (*last)++;
+ memset(memory_map + n, 0, sizeof(memory_map[0]));
+ memory_map[n].type = mem->type;
+ memory_map[n].pa = mem->addr;
+ memory_map[n].size = mem->size;
+}
+
+static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
+ unsigned int type, size_t size, size_t *last) {
+ size_t n = 0;
+
+ DMSG("type %d size 0x%08zx", type, size);
+ while (true) {
+ if (n >= (num_elems - 1)) {
+ EMSG("Out of entries (%zu) in memory_map", num_elems);
+ panic();
+ }
+ if (n == *last)
+ break;
+ if (type < memory_map[n].type)
+ break;
+ n++;
+ }
+
+ memmove(memory_map + n + 1, memory_map + n,
+ sizeof(struct tee_mmap_region) * (*last - n));
+ (*last)++;
+ memset(memory_map + n, 0, sizeof(memory_map[0]));
+ memory_map[n].type = type;
+ memory_map[n].size = size;
+}
+
+uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
+{
+ const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+ TEE_MATTR_GLOBAL;
+ const uint32_t cached = TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT;
+ const uint32_t noncache = TEE_MATTR_CACHE_NONCACHE <<
+ TEE_MATTR_CACHE_SHIFT;
+
+ switch (t) {
+ case MEM_AREA_TEE_RAM:
+ return attr | TEE_MATTR_SECURE | TEE_MATTR_PX | cached;
+ case MEM_AREA_TA_RAM:
+ return attr | TEE_MATTR_SECURE | cached;
+ case MEM_AREA_NSEC_SHM:
+ return attr | cached;
+ case MEM_AREA_IO_NSEC:
+ return attr | noncache;
+ case MEM_AREA_IO_SEC:
+ return attr | TEE_MATTR_SECURE | noncache;
+ case MEM_AREA_RAM_NSEC:
+ return attr | cached;
+ case MEM_AREA_RAM_SEC:
+ return attr | TEE_MATTR_SECURE | cached;
+ case MEM_AREA_RES_VASPACE:
+ return 0;
+ default:
+ panic("invalid type");
+ }
+}
+
+static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
+{
+ const struct core_mmu_phys_mem *mem;
+ struct tee_mmap_region *map;
+ size_t last = 0;
+ vaddr_t va;
+ size_t n;
+
+ for (mem = &__start_phys_mem_map_section;
+ mem < &__end_phys_mem_map_section; mem++) {
+ struct core_mmu_phys_mem m = *mem;
+
+ if (m.type == MEM_AREA_IO_NSEC || m.type == MEM_AREA_IO_SEC) {
+ m.addr = ROUNDDOWN(m.addr, CORE_MMU_PGDIR_SIZE);
+ m.size = ROUNDUP(m.size + (mem->addr - m.addr),
+ CORE_MMU_PGDIR_SIZE);
+ }
+ add_phys_mem(memory_map, num_elems, &m, &last);
+ }
+
+ add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
+ RES_VASPACE_SIZE, &last);
+
+ memory_map[last].type = MEM_AREA_NOTYPE;
+
+ /*
+ * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
+ * SMALL_PAGE_SIZE if paging is enabled.
+ */
+ for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ paddr_t mask = map->pa | map->size;
+
+ if (!(mask & CORE_MMU_PGDIR_MASK))
+ map->region_size = CORE_MMU_PGDIR_SIZE;
+ else if (!(mask & SMALL_PAGE_MASK))
+ map->region_size = SMALL_PAGE_SIZE;
+ else
+ panic("Impossible memory alignment");
+ }
+
+ /*
+ * bootcfg_memory_map is sorted in order first by type and last by
+ * address. This puts TEE_RAM first and TA_RAM second
+ *
+ */
+ map = memory_map;
+ assert(map->type == MEM_AREA_TEE_RAM);
+ map->va = map->pa;
+#ifdef CFG_WITH_PAGER
+ map->region_size = SMALL_PAGE_SIZE,
+#endif
+ map->attr = core_mmu_type_to_attr(map->type);
+
+
+ if (core_mmu_place_tee_ram_at_top(map->pa)) {
+ va = map->va;
+ map++;
+ while (map->type != MEM_AREA_NOTYPE) {
+ map->attr = core_mmu_type_to_attr(map->type);
+ va -= map->size;
+ map->va = va;
+ map++;
+ }
+ /*
+ * The memory map should be sorted by virtual address
+ * when this function returns. As we're assigning va in
+ * the oposite direction we need to reverse the list.
+ */
+ for (n = 0; n < last / 2; n++) {
+ struct tee_mmap_region r;
+
+ r = memory_map[last - n - 1];
+ memory_map[last - n - 1] = memory_map[n];
+ memory_map[n] = r;
+ }
+ } else {
+ va = ROUNDUP(map->va + map->size, CORE_MMU_PGDIR_SIZE);
+ map++;
+ while (map->type != MEM_AREA_NOTYPE) {
+ map->attr = core_mmu_type_to_attr(map->type);
+ map->va = va;
+ va += map->size;
+ map++;
+ }
+ }
+
+ for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ vaddr_t __maybe_unused vstart;
+
+ vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
+ DMSG("type va %d 0x%08" PRIxVA "..0x%08" PRIxVA
+ " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size %#zx",
+ map->type, vstart, vstart + map->size - 1,
+ (paddr_t)map->pa, (paddr_t)map->pa + map->size - 1,
+ map->size);
+ }
+}
+
+/*
+ * core_init_mmu_map - init tee core default memory mapping
+ *
+ * this routine sets the static default tee core mapping.
+ *
+ * If an error happend: core_init_mmu_map is expected to reset.
+ */
+void core_init_mmu_map(void)
+{
+ struct tee_mmap_region *map;
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
+ if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
+ secure_only[n].size))
+ panic("Invalid memory access config: sec/nsec");
+ }
+
+ if (!mem_map_inited)
+ init_mem_map(static_memory_map, ARRAY_SIZE(static_memory_map));
+
+ map = static_memory_map;
+ while (map->type != MEM_AREA_NOTYPE) {
+ switch (map->type) {
+ case MEM_AREA_TEE_RAM:
+ if (!pbuf_is_inside(secure_only, map->pa, map->size))
+ panic("TEE_RAM can't fit in secure_only");
+
+ map_tee_ram = map;
+ break;
+ case MEM_AREA_TA_RAM:
+ if (!pbuf_is_inside(secure_only, map->pa, map->size))
+ panic("TA_RAM can't fit in secure_only");
+ map_ta_ram = map;
+ break;
+ case MEM_AREA_NSEC_SHM:
+ if (!pbuf_is_inside(nsec_shared, map->pa, map->size))
+ panic("NS_SHM can't fit in nsec_shared");
+ map_nsec_shm = map;
+ break;
+ case MEM_AREA_IO_SEC:
+ case MEM_AREA_IO_NSEC:
+ case MEM_AREA_RAM_SEC:
+ case MEM_AREA_RAM_NSEC:
+ case MEM_AREA_RES_VASPACE:
+ break;
+ default:
+ EMSG("Uhandled memtype %d", map->type);
+ panic();
+ }
+ map++;
+ }
+
+ /* Check that we have the mandatory memory areas defined */
+ if (!map_tee_ram || !map_ta_ram || !map_nsec_shm)
+ panic("mandatory area(s) not found");
+
+ core_init_mmu_tables(static_memory_map);
+}
+
+/* routines to retrieve shared mem configuration */
+bool core_mmu_is_shm_cached(void)
+{
+ if (!map_nsec_shm)
+ return false;
+ return map_nsec_shm->attr >> TEE_MATTR_CACHE_SHIFT ==
+ TEE_MATTR_CACHE_CACHED;
+}
+
+bool core_mmu_mattr_is_ok(uint32_t mattr)
+{
+ /*
+ * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
+ * core_mmu_v7.c:mattr_to_texcb
+ */
+
+ switch ((mattr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
+ case TEE_MATTR_CACHE_NONCACHE:
+ case TEE_MATTR_CACHE_CACHED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * test attributes of target physical buffer
+ *
+ * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
+ *
+ */
+bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
+{
+ struct tee_mmap_region *map;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ switch (attr) {
+ case CORE_MEM_SEC:
+ return pbuf_is_inside(secure_only, pbuf, len);
+ case CORE_MEM_NON_SEC:
+ return pbuf_is_inside(nsec_shared, pbuf, len);
+ case CORE_MEM_TEE_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_tee_ram);
+ case CORE_MEM_TA_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_ta_ram);
+ case CORE_MEM_NSEC_SHM:
+ return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
+ case CORE_MEM_EXTRAM:
+ return pbuf_is_inside(ddr, pbuf, len);
+ case CORE_MEM_CACHED:
+ map = find_map_by_pa(pbuf);
+ if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
+ return false;
+ return map->attr >> TEE_MATTR_CACHE_SHIFT ==
+ TEE_MATTR_CACHE_CACHED;
+ default:
+ return false;
+ }
+}
+
+/* test attributes of target virtual buffer (in core mapping) */
+bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
+{
+ paddr_t p;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ p = virt_to_phys((void *)vbuf);
+ if (!p)
+ return false;
+
+ return core_pbuf_is(attr, p, len);
+}
+
+
+/* core_va2pa - teecore exported service */
+int core_va2pa_helper(void *va, paddr_t *pa)
+{
+ struct tee_mmap_region *map;
+
+ map = find_map_by_va(va);
+ if (!va_is_in_map(map, (vaddr_t)va))
+ return -1;
+
+ *pa = ((uintptr_t)va & (map->region_size - 1)) |
+ ((map->pa + (uintptr_t)va - map->va) & ~(map->region_size - 1));
+ return 0;
+}
+
+static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa)
+{
+ if (!pa_is_in_map(map, pa))
+ return NULL;
+ return (void *)((pa & (map->region_size - 1)) |
+ ((map->va + pa - map->pa) & ~((vaddr_t)map->region_size - 1)));
+}
+
+/*
+ * teecore gets some memory area definitions
+ */
+void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
+{
+ struct tee_mmap_region *map = find_map_by_type(type);
+
+ if (map) {
+ *s = map->va;
+ *e = map->va + map->size;
+ } else {
+ *s = 0;
+ *e = 0;
+ }
+}
+
+enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
+{
+ struct tee_mmap_region *map = find_map_by_pa(pa);
+
+ if (!map)
+ return MEM_AREA_NOTYPE;
+ return map->type;
+}
+
+int core_tlb_maintenance(int op, unsigned int a)
+{
+ /*
+ * We're doing TLB invalidation because we've changed mapping.
+ * The dsb() makes sure that written data is visible.
+ */
+ dsb();
+
+ switch (op) {
+ case TLBINV_UNIFIEDTLB:
+ secure_mmu_unifiedtlbinvall();
+ break;
+ case TLBINV_CURRENT_ASID:
+ secure_mmu_unifiedtlbinv_curasid();
+ break;
+ case TLBINV_BY_ASID:
+ secure_mmu_unifiedtlbinv_byasid(a);
+ break;
+ case TLBINV_BY_MVA:
+ EMSG("TLB_INV_SECURE_MVA is not yet supported!");
+ while (1)
+ ;
+ secure_mmu_unifiedtlbinvbymva(a);
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+unsigned int cache_maintenance_l1(int op, void *va, size_t len)
+{
+ switch (op) {
+ case DCACHE_CLEAN:
+ arm_cl1_d_cleanbysetway();
+ break;
+ case DCACHE_AREA_CLEAN:
+ if (len)
+ arm_cl1_d_cleanbyva(va, (char *)va + len - 1);
+ break;
+ case DCACHE_INVALIDATE:
+ arm_cl1_d_invbysetway();
+ break;
+ case DCACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl1_d_invbyva(va, (char *)va + len - 1);
+ break;
+ case ICACHE_INVALIDATE:
+ arm_cl1_i_inv_all();
+ break;
+ case ICACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl1_i_inv(va, (char *)va + len - 1);
+ break;
+ case WRITE_BUFFER_DRAIN:
+ DMSG("unsupported operation 0x%X (WRITE_BUFFER_DRAIN)",
+ (unsigned int)op);
+ return -1;
+ case DCACHE_CLEAN_INV:
+ arm_cl1_d_cleaninvbysetway();
+ break;
+ case DCACHE_AREA_CLEAN_INV:
+ if (len)
+ arm_cl1_d_cleaninvbyva(va, (char *)va + len - 1);
+ break;
+ default:
+ return TEE_ERROR_NOT_IMPLEMENTED;
+ }
+ return TEE_SUCCESS;
+}
+
+#ifdef CFG_PL310
+unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len)
+{
+ unsigned int ret = TEE_SUCCESS;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ tee_l2cc_mutex_lock();
+ switch (op) {
+ case L2CACHE_INVALIDATE:
+ arm_cl2_invbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ case L2CACHE_CLEAN:
+ arm_cl2_cleanbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_CLEAN:
+ if (len)
+ arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ case L2CACHE_CLEAN_INV:
+ arm_cl2_cleaninvbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_CLEAN_INV:
+ if (len)
+ arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ default:
+ ret = TEE_ERROR_NOT_IMPLEMENTED;
+ }
+
+ tee_l2cc_mutex_unlock();
+ thread_set_exceptions(exceptions);
+ return ret;
+}
+#endif /*CFG_PL310*/
+
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr)
+{
+ assert(idx < tbl_info->num_entries);
+ core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
+ idx, pa, attr);
+}
+
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ assert(idx < tbl_info->num_entries);
+ core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
+ idx, pa, attr);
+}
+
+static void set_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned end;
+ unsigned idx;
+ paddr_t pa;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+ pa = region->pa;
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, pa, region->attr);
+ idx++;
+ pa += 1 << tbl_info->shift;
+ }
+}
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+static void set_pg_region(struct core_mmu_table_info *dir_info,
+ struct tee_ta_region *region, struct pgt **pgt,
+ struct core_mmu_table_info *pg_info)
+{
+ struct tee_mmap_region r = {
+ .va = region->va,
+ .size = region->size,
+ .attr = region->attr,
+ };
+ vaddr_t end = r.va + r.size;
+ uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
+
+ while (r.va < end) {
+ if (!pg_info->table ||
+ r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
+ /*
+ * We're assigning a new translation table.
+ */
+ unsigned int idx;
+
+ assert(*pgt); /* We should have alloced enough */
+
+ /* Virtual addresses must grow */
+ assert(r.va > pg_info->va_base);
+
+ idx = core_mmu_va2idx(dir_info, r.va);
+ pg_info->table = (*pgt)->tbl;
+ pg_info->va_base = core_mmu_idx2va(dir_info, idx);
+#ifdef CFG_PAGED_USER_TA
+ assert((*pgt)->vabase == pg_info->va_base);
+#endif
+ *pgt = SLIST_NEXT(*pgt, link);
+
+ core_mmu_set_entry(dir_info, idx,
+ virt_to_phys(pg_info->table),
+ pgt_attr);
+ }
+
+ r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
+ end - r.va);
+ if (!mobj_is_paged(region->mobj)) {
+ size_t granule = BIT(pg_info->shift);
+ size_t offset = r.va - region->va + region->offset;
+
+ if (mobj_get_pa(region->mobj, offset, granule,
+ &r.pa) != TEE_SUCCESS)
+ panic("Failed to get PA of unpaged mobj");
+ set_region(pg_info, &r);
+ }
+ r.va += r.size;
+ }
+}
+
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc)
+{
+ struct core_mmu_table_info pg_info;
+ struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
+ struct pgt *pgt;
+ size_t n;
+
+ /* Find the last valid entry */
+ n = ARRAY_SIZE(utc->mmu->regions);
+ while (true) {
+ n--;
+ if (utc->mmu->regions[n].size)
+ break;
+ if (!n)
+ return; /* Nothing to map */
+ }
+
+ /*
+ * Allocate all page tables in advance.
+ */
+ pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->regions[0].va,
+ utc->mmu->regions[n].va + utc->mmu->regions[n].size - 1);
+ pgt = SLIST_FIRST(pgt_cache);
+
+ core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++)
+ mobj_update_mapping(utc->mmu->regions[n].mobj, utc,
+ utc->mmu->regions[n].va);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ set_pg_region(dir_info, utc->mmu->regions + n, &pgt, &pg_info);
+ }
+}
+
+#else
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc)
+{
+ unsigned n;
+ struct tee_mmap_region r;
+ size_t offset;
+ size_t granule = BIT(dir_info->shift);
+
+ memset(&r, 0, sizeof(r));
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+
+ offset = utc->mmu->regions[n].offset;
+ r.va = utc->mmu->regions[n].va;
+ r.size = utc->mmu->regions[n].size;
+ r.attr = utc->mmu->regions[n].attr;
+
+ if (mobj_get_pa(utc->mmu->regions[n].mobj, offset, granule,
+ &r.pa) != TEE_SUCCESS)
+ panic("Failed to get PA of unpaged mobj");
+
+ set_region(dir_info, &r);
+ }
+}
+#endif
+
+bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
+{
+ struct core_mmu_table_info tbl_info;
+ struct tee_mmap_region *map;
+ size_t n;
+ size_t granule;
+ paddr_t p;
+ size_t l;
+
+ if (!len)
+ return true;
+
+ /* Check if the memory is already mapped */
+ map = find_map_by_type_and_pa(type, addr);
+ if (map && pbuf_inside_map_area(addr, len, map))
+ return true;
+
+ /* Find the reserved va space used for late mappings */
+ map = find_map_by_type(MEM_AREA_RES_VASPACE);
+ if (!map)
+ return false;
+
+ if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
+ return false;
+
+ granule = 1 << tbl_info.shift;
+ p = ROUNDDOWN(addr, granule);
+ l = ROUNDUP(len + addr - p, granule);
+ /*
+ * Something is wrong, we can't fit the va range into the selected
+ * table. The reserved va range is possibly missaligned with
+ * granule.
+ */
+ if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
+ return false;
+
+ /* Find end of the memory map */
+ n = 0;
+ while (static_memory_map[n].type != MEM_AREA_NOTYPE)
+ n++;
+
+ if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
+ /* There's room for another entry */
+ static_memory_map[n].va = map->va;
+ static_memory_map[n].size = l;
+ static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
+ map->va += l;
+ map->size -= l;
+ map = static_memory_map + n;
+ } else {
+ /*
+ * There isn't room for another entry, steal the reserved
+ * entry as it's not useful for anything else any longer.
+ */
+ map->size = l;
+ }
+ map->type = type;
+ map->region_size = granule;
+ map->attr = core_mmu_type_to_attr(type);
+ map->pa = p;
+
+ set_region(&tbl_info, map);
+ return true;
+}
+
+static bool arm_va2pa_helper(void *va, paddr_t *pa)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+ paddr_t par;
+ paddr_t par_pa_mask;
+ bool ret = false;
+
+#ifdef ARM32
+ write_ats1cpr((vaddr_t)va);
+ isb();
+#ifdef CFG_WITH_LPAE
+ par = read_par64();
+ par_pa_mask = PAR64_PA_MASK;
+#else
+ par = read_par32();
+ par_pa_mask = PAR32_PA_MASK;
+#endif
+#endif /*ARM32*/
+
+#ifdef ARM64
+ write_at_s1e1r((vaddr_t)va);
+ isb();
+ par = read_par_el1();
+ par_pa_mask = PAR_PA_MASK;
+#endif
+ if (par & PAR_F)
+ goto out;
+ *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
+ ((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1));
+
+ ret = true;
+out:
+ thread_unmask_exceptions(exceptions);
+ return ret;
+}
+
+#ifdef CFG_WITH_PAGER
+static vaddr_t get_linear_map_end(void)
+{
+ /* this is synced with the generic linker file kern.ld.S */
+ return (vaddr_t)__heap2_end;
+}
+#endif
+
+#if defined(CFG_TEE_CORE_DEBUG)
+static void check_pa_matches_va(void *va, paddr_t pa)
+{
+ TEE_Result res;
+ vaddr_t v = (vaddr_t)va;
+ paddr_t p = 0;
+
+ if (core_mmu_user_va_range_is_defined()) {
+ vaddr_t user_va_base;
+ size_t user_va_size;
+
+ core_mmu_get_user_va_range(&user_va_base, &user_va_size);
+ if (v >= user_va_base &&
+ v <= (user_va_base - 1 + user_va_size)) {
+ if (!core_mmu_user_mapping_is_active()) {
+ if (pa)
+ panic("issue in linear address space");
+ return;
+ }
+
+ res = tee_mmu_user_va2pa_helper(
+ to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
+ if (res == TEE_SUCCESS && pa != p)
+ panic("bad pa");
+ if (res != TEE_SUCCESS && pa)
+ panic("false pa");
+ return;
+ }
+ }
+#ifdef CFG_WITH_PAGER
+ if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
+ if (v != pa)
+ panic("issue in linear address space");
+ return;
+ }
+ if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
+ v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ uint32_t a;
+
+ /*
+ * Lookups in the page table managed by the pager is
+ * dangerous for addresses in the paged area as those pages
+ * changes all the time. But some ranges are safe,
+ * rw-locked areas when the page is populated for instance.
+ */
+ core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
+ if (a & TEE_MATTR_VALID_BLOCK) {
+ paddr_t mask = ((1 << ti->shift) - 1);
+
+ p |= v & mask;
+ if (pa != p)
+ panic();
+ } else
+ if (pa)
+ panic();
+ return;
+ }
+#endif
+ if (!core_va2pa_helper(va, &p)) {
+ if (pa != p)
+ panic();
+ } else {
+ if (pa)
+ panic();
+ }
+}
+#else
+static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
+{
+}
+#endif
+
+paddr_t virt_to_phys(void *va)
+{
+ paddr_t pa;
+
+ if (!arm_va2pa_helper(va, &pa))
+ pa = 0;
+ check_pa_matches_va(va, pa);
+ return pa;
+}
+
+#if defined(CFG_TEE_CORE_DEBUG)
+static void check_va_matches_pa(paddr_t pa, void *va)
+{
+ if (va && virt_to_phys(va) != pa)
+ panic();
+}
+#else
+static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
+{
+}
+#endif
+
+static void *phys_to_virt_ta_vaspace(paddr_t pa)
+{
+ TEE_Result res;
+ void *va = NULL;
+
+ if (!core_mmu_user_mapping_is_active())
+ return NULL;
+
+ res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
+ pa, &va);
+ if (res != TEE_SUCCESS)
+ return NULL;
+ return va;
+}
+
+#ifdef CFG_WITH_PAGER
+static void *phys_to_virt_tee_ram(paddr_t pa)
+{
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ unsigned idx;
+ unsigned end_idx;
+ uint32_t a;
+ paddr_t p;
+
+ if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
+ return (void *)(vaddr_t)pa;
+
+ end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
+ CFG_TEE_RAM_VA_SIZE);
+ /* Most addresses are mapped lineary, try that first if possible. */
+ idx = core_mmu_va2idx(ti, pa);
+ if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
+ idx < end_idx) {
+ core_mmu_get_entry(ti, idx, &p, &a);
+ if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
+ return (void *)core_mmu_idx2va(ti, idx);
+ }
+
+ for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
+ idx < end_idx; idx++) {
+ core_mmu_get_entry(ti, idx, &p, &a);
+ if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
+ return (void *)core_mmu_idx2va(ti, idx);
+ }
+
+ return NULL;
+}
+#else
+static void *phys_to_virt_tee_ram(paddr_t pa)
+{
+ return map_pa2va(find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa), pa);
+}
+#endif
+
+void *phys_to_virt(paddr_t pa, enum teecore_memtypes m)
+{
+ void *va;
+
+ switch (m) {
+ case MEM_AREA_TA_VASPACE:
+ va = phys_to_virt_ta_vaspace(pa);
+ break;
+ case MEM_AREA_TEE_RAM:
+ va = phys_to_virt_tee_ram(pa);
+ break;
+ default:
+ va = map_pa2va(find_map_by_type_and_pa(m, pa), pa);
+ }
+ check_va_matches_pa(pa, va);
+ return va;
+}
+
+bool cpu_mmu_enabled(void)
+{
+ uint32_t sctlr;
+
+#ifdef ARM32
+ sctlr = read_sctlr();
+#else
+ sctlr = read_sctlr_el1();
+#endif
+
+ return sctlr & SCTLR_M ? true : false;
+}
diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c
new file mode 100644
index 0000000..eb96c70
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_lpae.c
@@ -0,0 +1,890 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+
+#include <arm.h>
+#include <assert.h>
+#include <compiler.h>
+#include <inttypes.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <mm/core_memprot.h>
+#include <mm/pgt_cache.h>
+#include <mm/core_memprot.h>
+#include <string.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <util.h>
+
+#include "core_mmu_private.h"
+
+#ifndef DEBUG_XLAT_TABLE
+#define DEBUG_XLAT_TABLE 0
+#endif
+
+#if DEBUG_XLAT_TABLE
+#define debug_print(...) DMSG_RAW(__VA_ARGS__)
+#else
+#define debug_print(...) ((void)0)
+#endif
+
+
+/*
+ * Miscellaneous MMU related constants
+ */
+
+#define INVALID_DESC 0x0
+#define BLOCK_DESC 0x1
+#define L3_BLOCK_DESC 0x3
+#define TABLE_DESC 0x3
+#define DESC_ENTRY_TYPE_MASK 0x3
+
+#define HIDDEN_DESC 0x4
+#define HIDDEN_DIRTY_DESC 0x8
+
+#define XN (1ull << 2)
+#define PXN (1ull << 1)
+#define CONT_HINT (1ull << 0)
+
+#define UPPER_ATTRS(x) (((x) & 0x7) << 52)
+#define NON_GLOBAL (1ull << 9)
+#define ACCESS_FLAG (1ull << 8)
+#define NSH (0x0 << 6)
+#define OSH (0x2 << 6)
+#define ISH (0x3 << 6)
+
+#define AP_RO (0x1 << 5)
+#define AP_RW (0x0 << 5)
+#define AP_UNPRIV (0x1 << 4)
+
+#define NS (0x1 << 3)
+#define LOWER_ATTRS_SHIFT 2
+#define LOWER_ATTRS(x) (((x) & 0xfff) << LOWER_ATTRS_SHIFT)
+
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_NTR_INDEX 0x1
+#define ATTR_INDEX_MASK 0x7
+
+#define ATTR_DEVICE (0x4)
+#define ATTR_IWBWA_OWBWA_NTR (0xff)
+
+#define MAIR_ATTR_SET(attr, index) (((uint64_t)attr) << ((index) << 3))
+
+#define OUTPUT_ADDRESS_MASK (0x0000FFFFFFFFF000ULL)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB (0x0)
+#define TCR_PS_BITS_64GB (0x1)
+#define TCR_PS_BITS_1TB (0x2)
+#define TCR_PS_BITS_4TB (0x3)
+#define TCR_PS_BITS_16TB (0x4)
+#define TCR_PS_BITS_256TB (0x5)
+
+#define ADDR_MASK_48_TO_63 0xFFFF000000000000ULL
+#define ADDR_MASK_44_TO_47 0x0000F00000000000ULL
+#define ADDR_MASK_42_TO_43 0x00000C0000000000ULL
+#define ADDR_MASK_40_TO_41 0x0000030000000000ULL
+#define ADDR_MASK_36_TO_39 0x000000F000000000ULL
+#define ADDR_MASK_32_TO_35 0x0000000F00000000ULL
+
+#define UNSET_DESC ((uint64_t)-1)
+
+#define FOUR_KB_SHIFT 12
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
+
+#define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
+#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+
+#define MAX_MMAP_REGIONS 16
+#define NUM_L1_ENTRIES \
+ (CFG_LPAE_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES 5
+#endif
+
+/* MMU L1 table, one for each core */
+static uint64_t l1_xlation_table[CFG_TEE_CORE_NB_CORE][NUM_L1_ENTRIES]
+ __aligned(NUM_L1_ENTRIES * XLAT_ENTRY_SIZE) __section(".nozi.mmu.l1");
+
+static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+/* MMU L2 table for TAs, one for each thread */
+static uint64_t xlat_tables_ul1[CFG_NUM_THREADS][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+
+static unsigned next_xlat __early_bss;
+static uint64_t tcr_ps_bits __early_bss;
+static int user_va_idx = -1;
+
+static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
+{
+ uint32_t a;
+
+ if (!(desc & 1)) {
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & HIDDEN_DIRTY_DESC)
+ return TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ return 0;
+ }
+
+ if (level == 3) {
+ if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
+ return 0;
+ } else {
+ if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
+ return TEE_MATTR_TABLE;
+ }
+
+ a = TEE_MATTR_VALID_BLOCK;
+
+ if (desc & LOWER_ATTRS(ACCESS_FLAG))
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & LOWER_ATTRS(AP_RO)))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
+ a &= ~TEE_MATTR_URWX;
+
+ if (desc & UPPER_ATTRS(XN))
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & UPPER_ATTRS(PXN))
+ a &= ~TEE_MATTR_PX;
+
+ COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
+ COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
+ TEE_MATTR_CACHE_CACHED);
+
+ a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
+ TEE_MATTR_CACHE_SHIFT;
+
+ if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & LOWER_ATTRS(NS)))
+ a |= TEE_MATTR_SECURE;
+
+ return a;
+}
+
+static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint64_t desc;
+ uint32_t a = attr;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_HIDDEN_DIRTY_BLOCK)
+ return INVALID_DESC | HIDDEN_DIRTY_DESC;
+
+ if (a & TEE_MATTR_TABLE)
+ return TABLE_DESC;
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+ if (level == 3)
+ desc = L3_BLOCK_DESC;
+ else
+ desc = BLOCK_DESC;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= UPPER_ATTRS(XN);
+ if (!(a & TEE_MATTR_PX))
+ desc |= UPPER_ATTRS(PXN);
+
+ if (a & TEE_MATTR_UR)
+ desc |= LOWER_ATTRS(AP_UNPRIV);
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= LOWER_ATTRS(AP_RO);
+
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ switch ((a >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
+ case TEE_MATTR_CACHE_NONCACHE:
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ break;
+ case TEE_MATTR_CACHE_CACHED:
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ break;
+ default:
+ /*
+ * "Can't happen" the attribute is supposed to be checked
+ * with core_mmu_mattr_is_ok() before.
+ */
+ panic();
+ }
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= LOWER_ATTRS(NON_GLOBAL);
+
+ desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
+
+ return desc;
+}
+
+static uint64_t mmap_desc(uint32_t attr, uint64_t addr_pa,
+ unsigned level)
+{
+ return mattr_to_desc(level, attr) | addr_pa;
+}
+
+static int mmap_region_attr(struct tee_mmap_region *mm, uint64_t base_va,
+ uint64_t size)
+{
+ uint32_t attr = mm->attr;
+
+ for (;;) {
+ mm++;
+
+ if (!mm->size)
+ return attr; /* Reached end of list */
+
+ if (mm->va >= base_va + size)
+ return attr; /* Next region is after area so end */
+
+ if (mm->va + mm->size <= base_va)
+ continue; /* Next region has already been overtaken */
+
+ if (mm->attr == attr)
+ continue; /* Region doesn't override attribs so skip */
+
+ if (mm->va > base_va ||
+ mm->va + mm->size < base_va + size)
+ return -1; /* Region doesn't fully cover our area */
+ }
+}
+
+static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
+ uint64_t base_va, uint64_t *table, unsigned level)
+{
+ unsigned int level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned int level_size = BIT32(level_size_shift);
+ uint64_t level_index_mask = SHIFT_U64(XLAT_TABLE_ENTRIES_MASK,
+ level_size_shift);
+
+ assert(level <= 3);
+
+ debug_print("New xlat table (level %u):", level);
+
+ do {
+ uint64_t desc = UNSET_DESC;
+
+ if (mm->va + mm->size <= base_va) {
+ /* Area now after the region so skip it */
+ mm++;
+ continue;
+ }
+
+
+ if (mm->va >= base_va + level_size) {
+ /* Next region is after area so nothing to map yet */
+ desc = INVALID_DESC;
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ } else if (mm->va <= base_va &&
+ mm->va + mm->size >= base_va + level_size &&
+ !(mm->pa & (level_size - 1))) {
+ /* Next region covers all of area */
+ int attr = mmap_region_attr(mm, base_va, level_size);
+
+ if (attr >= 0) {
+ desc = mmap_desc(attr,
+ base_va - mm->va + mm->pa,
+ level);
+ debug_print("%*s%010" PRIx64 " %8x %s-%s-%s-%s",
+ level * 2, "", base_va, level_size,
+ attr & (TEE_MATTR_CACHE_CACHED <<
+ TEE_MATTR_CACHE_SHIFT) ?
+ "MEM" : "DEV",
+ attr & TEE_MATTR_PW ? "RW" : "RO",
+ attr & TEE_MATTR_PX ? "X" : "XN",
+ attr & TEE_MATTR_SECURE ? "S" : "NS");
+ } else {
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ }
+ }
+ /* else Next region only partially covers area, so need */
+
+ if (desc == UNSET_DESC) {
+ /* Area not covered by a region so need finer table */
+ uint64_t *new_table = xlat_tables[next_xlat++];
+ /* Clear table before use */
+ if (next_xlat > MAX_XLAT_TABLES)
+ panic("running out of xlat tables");
+ memset(new_table, 0, XLAT_TABLE_SIZE);
+
+ desc = TABLE_DESC | virt_to_phys(new_table);
+
+ /* Recurse to fill in new table */
+ mm = init_xlation_table(mm, base_va, new_table,
+ level + 1);
+ }
+
+ *table++ = desc;
+ base_va += level_size;
+ } while (mm->size && (base_va & level_index_mask));
+
+ return mm;
+}
+
+static unsigned int calc_physical_addr_size_bits(uint64_t max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert(!(max_addr & ADDR_MASK_48_TO_63));
+
+ /* 48 bits address */
+ if (max_addr & ADDR_MASK_44_TO_47)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if (max_addr & ADDR_MASK_42_TO_43)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if (max_addr & ADDR_MASK_40_TO_41)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if (max_addr & ADDR_MASK_36_TO_39)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if (max_addr & ADDR_MASK_32_TO_35)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ paddr_t max_pa = 0;
+ uint64_t max_va = 0;
+ size_t n;
+
+ for (n = 0; mm[n].size; n++) {
+ paddr_t pa_end;
+ vaddr_t va_end;
+
+ debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
+ mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);
+
+ if (!IS_PAGE_ALIGNED(mm[n].pa) || !IS_PAGE_ALIGNED(mm[n].size))
+ panic("unaligned region");
+
+ pa_end = mm[n].pa + mm[n].size - 1;
+ va_end = mm[n].va + mm[n].size - 1;
+ if (pa_end > max_pa)
+ max_pa = pa_end;
+ if (va_end > max_va)
+ max_va = va_end;
+ }
+
+ /* Clear table before use */
+ memset(l1_xlation_table[0], 0, NUM_L1_ENTRIES * XLAT_ENTRY_SIZE);
+ init_xlation_table(mm, 0, l1_xlation_table[0], 1);
+ for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++)
+ memcpy(l1_xlation_table[n], l1_xlation_table[0],
+ XLAT_ENTRY_SIZE * NUM_L1_ENTRIES);
+
+ for (n = 1; n < NUM_L1_ENTRIES; n++) {
+ if (!l1_xlation_table[0][n]) {
+ user_va_idx = n;
+ break;
+ }
+ }
+ assert(user_va_idx != -1);
+
+ tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+ COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_SIZE > 0);
+ assert(max_va < CFG_LPAE_ADDR_SPACE_SIZE);
+}
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
+{
+ size_t l1size = (1 << L1_XLAT_ADDRESS_SHIFT);
+ paddr_t l1mask = l1size - 1;
+
+ return (paddr & l1mask) > (l1size / 2);
+}
+
+#ifdef ARM32
+void core_init_mmu_regs(void)
+{
+ uint32_t ttbcr = TTBCR_EAE;
+ uint32_t mair;
+ paddr_t ttbr0;
+
+ ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
+
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ write_mair0(mair);
+
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
+ ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
+
+ /* Disable the use of TTBR1 */
+ ttbcr |= TTBCR_EPD1;
+
+ /* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
+
+ write_ttbcr(ttbcr);
+ write_ttbr0_64bit(ttbr0);
+ write_ttbr1_64bit(0);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+void core_init_mmu_regs(void)
+{
+ uint64_t mair;
+ uint64_t tcr;
+ paddr_t ttbr0;
+
+ ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
+
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ write_mair_el1(mair);
+
+ tcr = TCR_RES1;
+ tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
+ tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
+ tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
+ tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
+ tcr |= 64 - __builtin_ctzl(CFG_LPAE_ADDR_SPACE_SIZE);
+
+ /* Disable the use of TTBR1 */
+ tcr |= TCR_EPD1;
+
+ /*
+ * TCR.A1 = 0 => ASID is stored in TTBR0
+ * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
+ */
+
+ write_tcr_el1(tcr);
+ write_ttbr0_el1(ttbr0);
+ write_ttbr1_el1(0);
+}
+#endif /*ARM64*/
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table)
+{
+ tbl_info->level = level;
+ tbl_info->table = table;
+ tbl_info->va_base = va_base;
+ tbl_info->shift = L1_XLAT_ADDRESS_SHIFT -
+ (level - 1) * XLAT_TABLE_ENTRIES_SHIFT;
+ assert(level <= 3);
+ if (level == 1)
+ tbl_info->num_entries = NUM_L1_ENTRIES;
+ else
+ tbl_info->num_entries = XLAT_TABLE_ENTRIES;
+}
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
+{
+ vaddr_t va_range_base;
+ void *tbl = xlat_tables_ul1[thread_get_id()];
+
+ core_mmu_get_user_va_range(&va_range_base, NULL);
+ core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
+}
+
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map)
+{
+ struct core_mmu_table_info dir_info;
+
+ COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
+
+ core_mmu_get_user_pgdir(&dir_info);
+ memset(dir_info.table, 0, PGT_SIZE);
+ core_mmu_populate_user_map(&dir_info, utc);
+ map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
+ map->asid = utc->context & TTBR_ASID_MASK;
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint64_t *tbl = l1_xlation_table[get_core_pos()];
+ uintptr_t ntbl;
+ unsigned level = 1;
+ vaddr_t va_base = 0;
+ unsigned num_entries = NUM_L1_ENTRIES;
+
+ while (true) {
+ unsigned level_size_shift =
+ L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned n = (va - va_base) >> level_size_shift;
+
+ if (n >= num_entries)
+ return false;
+
+ if (level == max_level || level == 3 ||
+ (tbl[n] & TABLE_DESC) != TABLE_DESC) {
+ /*
+ * We've either reached max_level, level 3, a block
+ * mapping entry or an "invalid" mapping entry.
+ */
+ tbl_info->table = tbl;
+ tbl_info->va_base = va_base;
+ tbl_info->level = level;
+ tbl_info->shift = level_size_shift;
+ tbl_info->num_entries = num_entries;
+ return true;
+ }
+
+ /* Copy bits 39:12 from tbl[n] to ntbl */
+ ntbl = (tbl[n] & ((1ULL << 40) - 1)) & ~((1 << 12) - 1);
+
+ tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM);
+ if (!tbl)
+ return false;
+
+ va_base += n << level_size_shift;
+ level++;
+ num_entries = XLAT_TABLE_ENTRIES;
+ }
+}
+
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx)
+{
+ uint64_t *new_table;
+ uint64_t *entry;
+ uint64_t new_table_desc;
+ size_t new_entry_size;
+ paddr_t paddr;
+ uint32_t attr;
+ int i;
+
+ if (tbl_info->level >= 3)
+ return false;
+
+ if (next_xlat >= MAX_XLAT_TABLES)
+ return false;
+
+ if (tbl_info->level == 1 && idx >= NUM_L1_ENTRIES)
+ return false;
+
+ if (tbl_info->level > 1 && idx >= XLAT_TABLE_ENTRIES)
+ return false;
+
+ entry = (uint64_t *)tbl_info->table + idx;
+ assert((*entry & DESC_ENTRY_TYPE_MASK) == BLOCK_DESC);
+
+ new_table = xlat_tables[next_xlat++];
+ new_table_desc = TABLE_DESC | (uint64_t)(uintptr_t)new_table;
+
+ /* store attributes of original block */
+ attr = desc_to_mattr(tbl_info->level, *entry);
+ paddr = *entry & OUTPUT_ADDRESS_MASK;
+ new_entry_size = 1 << (tbl_info->shift - XLAT_TABLE_ENTRIES_SHIFT);
+
+ /* Fill new xlat table with entries pointing to the same memory */
+ for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
+ *new_table = paddr | mattr_to_desc(tbl_info->level + 1, attr);
+ paddr += new_entry_size;
+ new_table++;
+ }
+
+ /* Update descriptor at current level */
+ *entry = new_table_desc;
+ return true;
+}
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint64_t *tbl = table;
+ uint64_t desc = mattr_to_desc(level, attr);
+
+ tbl[idx] = desc | pa;
+}
+
+void core_mmu_get_entry_primitive(const void *table, size_t level,
+ size_t idx, paddr_t *pa, uint32_t *attr)
+{
+ const uint64_t *tbl = table;
+
+ if (pa)
+ *pa = (tbl[idx] & ((1ull << 40) - 1)) & ~((1 << 12) - 1);
+
+ if (attr)
+ *attr = desc_to_mattr(level, tbl[idx]);
+}
+
+bool core_mmu_user_va_range_is_defined(void)
+{
+ return user_va_idx != -1;
+}
+
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
+{
+ assert(user_va_idx != -1);
+
+ if (base)
+ *base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
+ if (size)
+ *size = 1 << L1_XLAT_ADDRESS_SHIFT;
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ assert(user_va_idx != -1);
+ return !!l1_xlation_table[get_core_pos()][user_va_idx];
+}
+
+#ifdef ARM32
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ assert(user_va_idx != -1);
+
+ map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
+ if (map->user_map) {
+ map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
+ TTBR_ASID_MASK;
+ } else {
+ map->asid = 0;
+ }
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint64_t ttbr;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ assert(user_va_idx != -1);
+
+ ttbr = read_ttbr0_64bit();
+ /* Clear ASID */
+ ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
+ write_ttbr0_64bit(ttbr);
+ isb();
+
+ /* Set the new map */
+ if (map && map->user_map) {
+ l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
+ dsb(); /* Make sure the write above is visible */
+ ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
+ write_ttbr0_64bit(ttbr);
+ isb();
+ } else {
+ l1_xlation_table[get_core_pos()][user_va_idx] = 0;
+ dsb(); /* Make sure the write above is visible */
+ }
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ thread_unmask_exceptions(exceptions);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
+{
+ assert(fault_descr & FSR_LPAE);
+
+ switch (fault_descr & FSR_STATUS_MASK) {
+ case 0x21: /* b100001 Alignment fault */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+ case 0x12: /* b100010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ default:
+ break;
+ }
+
+ switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
+ case 0x1: /* b0001LL Translation fault */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0x2: /* b0010LL Access flag fault */
+ case 0x3: /* b0011LL Permission fault */
+ if (fault_descr & FSR_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ assert(user_va_idx != -1);
+
+ map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
+ if (map->user_map) {
+ map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
+ TTBR_ASID_MASK;
+ } else {
+ map->asid = 0;
+ }
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint64_t ttbr;
+ uint32_t daif = read_daif();
+
+ write_daif(daif | DAIF_AIF);
+
+ ttbr = read_ttbr0_el1();
+ /* Clear ASID */
+ ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
+ write_ttbr0_el1(ttbr);
+ isb();
+
+ /* Set the new map */
+ if (map && map->user_map) {
+ l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
+ dsb(); /* Make sure the write above is visible */
+ ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
+ write_ttbr0_el1(ttbr);
+ isb();
+ } else {
+ l1_xlation_table[get_core_pos()][user_va_idx] = 0;
+ dsb(); /* Make sure the write above is visible */
+ }
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ write_daif(daif);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
+{
+ switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_SP_ALIGN:
+ case ESR_EC_PC_ALIGN:
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case ESR_EC_IABT_EL0:
+ case ESR_EC_DABT_EL0:
+ case ESR_EC_IABT_EL1:
+ case ESR_EC_DABT_EL1:
+ switch (fault_descr & ESR_FSC_MASK) {
+ case ESR_FSC_TRANS_L0:
+ case ESR_FSC_TRANS_L1:
+ case ESR_FSC_TRANS_L2:
+ case ESR_FSC_TRANS_L3:
+ return CORE_MMU_FAULT_TRANSLATION;
+ case ESR_FSC_ACCF_L1:
+ case ESR_FSC_ACCF_L2:
+ case ESR_FSC_ACCF_L3:
+ case ESR_FSC_PERMF_L1:
+ case ESR_FSC_PERMF_L2:
+ case ESR_FSC_PERMF_L3:
+ if (fault_descr & ESR_ABT_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ case ESR_FSC_ALIGN:
+ return CORE_MMU_FAULT_ALIGNMENT;
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
+#endif /*ARM64*/
diff --git a/core/arch/arm/mm/core_mmu_private.h b/core/arch/arm/mm/core_mmu_private.h
new file mode 100644
index 0000000..5bcb9ea
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_private.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MMU_PRIVATE_H
+#define CORE_MMU_PRIVATE_H
+
+#include <mm/core_mmu.h>
+#include <mm/tee_mmu_types.h>
+
+
+void core_init_mmu_tables(struct tee_mmap_region *mm);
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table);
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc);
+
+
+#endif /*CORE_MMU_PRIVATE_H*/
+
diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c
new file mode 100644
index 0000000..54f6caa
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_v7.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/panic.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <platform_config.h>
+#include <stdlib.h>
+#include <string.h>
+#include <trace.h>
+#include <util.h>
+#include "core_mmu_private.h"
+
+#ifdef CFG_WITH_LPAE
+#error This file is not to be used with LPAE
+#endif
+
+/*
+ * MMU related values
+ */
+
+/* Sharable */
+#define TEE_MMU_TTB_S (1 << 1)
+
+/* Not Outer Sharable */
+#define TEE_MMU_TTB_NOS (1 << 5)
+
+/* Normal memory, Inner Non-cacheable */
+#define TEE_MMU_TTB_IRGN_NC 0
+
+/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
+
+/* Normal memory, Inner Write-Through Cacheable */
+#define TEE_MMU_TTB_IRGN_WT 1
+
+/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
+
+/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_RNG_WBWA (1 << 3)
+
+#define TEE_MMU_DEFAULT_ATTRS \
+ (TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
+ TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
+
+
+#define INVALID_DESC 0x0
+#define HIDDEN_DESC 0x4
+#define HIDDEN_DIRTY_DESC 0x8
+
+#define SECTION_SHIFT 20
+#define SECTION_MASK 0x000fffff
+#define SECTION_SIZE 0x00100000
+
+/* armv7 memory mapping attributes: section mapping */
+#define SECTION_SECURE (0 << 19)
+#define SECTION_NOTSECURE (1 << 19)
+#define SECTION_SHARED (1 << 16)
+#define SECTION_NOTGLOBAL (1 << 17)
+#define SECTION_ACCESS_FLAG (1 << 10)
+#define SECTION_UNPRIV (1 << 11)
+#define SECTION_RO (1 << 15)
+#define SECTION_TEXCB(texcb) ((((texcb) >> 2) << 12) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SECTION_DEVICE SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL_CACHED SECTION_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+
+#define SECTION_XN (1 << 4)
+#define SECTION_PXN (1 << 0)
+#define SECTION_SECTION (2 << 0)
+
+#define SECTION_PT_NOTSECURE (1 << 3)
+#define SECTION_PT_PT (1 << 0)
+
+#define SMALL_PAGE_SMALL_PAGE (1 << 1)
+#define SMALL_PAGE_SHARED (1 << 10)
+#define SMALL_PAGE_NOTGLOBAL (1 << 11)
+#define SMALL_PAGE_TEXCB(texcb) ((((texcb) >> 2) << 6) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL_CACHED SMALL_PAGE_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+#define SMALL_PAGE_ACCESS_FLAG (1 << 4)
+#define SMALL_PAGE_UNPRIV (1 << 5)
+#define SMALL_PAGE_RO (1 << 9)
+#define SMALL_PAGE_XN (1 << 0)
+
+
+/* The TEX, C and B bits concatenated */
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_INDEX 0x1
+
+#define PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
+ ((uint32_t)(nos) << ((idx) + 24)))
+#define NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
+ ((uint32_t)(or) << (2 * (idx) + 16)))
+#define PRRR_DS0 (1 << 16)
+#define PRRR_DS1 (1 << 17)
+#define PRRR_NS0 (1 << 18)
+#define PRRR_NS1 (1 << 19)
+
+#define ATTR_DEVICE_PRRR PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
+#define ATTR_DEVICE_NMRR NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
+
+#define ATTR_IWBWA_OWBWA_PRRR PRRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 2, 1)
+#define ATTR_IWBWA_OWBWA_NMRR NMRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 1, 1)
+
+#define NUM_L1_ENTRIES 4096
+#define NUM_L2_ENTRIES 256
+
+#define L1_TBL_SIZE (NUM_L1_ENTRIES * 4)
+#define L2_TBL_SIZE (NUM_L2_ENTRIES * 4)
+#define L1_ALIGNMENT L1_TBL_SIZE
+#define L2_ALIGNMENT L2_TBL_SIZE
+
+/* Defined to the smallest possible secondary L1 MMU table */
+#define TTBCR_N_VALUE 7
+
+/* Number of sections in ttbr0 when user mapping activated */
+#define NUM_UL1_ENTRIES (1 << (12 - TTBCR_N_VALUE))
+#define UL1_ALIGNMENT (NUM_UL1_ENTRIES * 4)
+/* TTB attributes */
+
+/* TTB0 of TTBR0 (depends on TTBCR_N_VALUE) */
+#define TTB_UL1_MASK (~(UL1_ALIGNMENT - 1))
+/* TTB1 of TTBR1 */
+#define TTB_L1_MASK (~(L1_ALIGNMENT - 1))
+
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES 4
+#endif
+
+enum desc_type {
+ DESC_TYPE_PAGE_TABLE,
+ DESC_TYPE_SECTION,
+ DESC_TYPE_SUPER_SECTION,
+ DESC_TYPE_LARGE_PAGE,
+ DESC_TYPE_SMALL_PAGE,
+ DESC_TYPE_INVALID,
+};
+
+/* Main MMU L1 table for teecore */
+static uint32_t main_mmu_l1_ttb[NUM_L1_ENTRIES]
+ __aligned(L1_ALIGNMENT) __section(".nozi.mmu.l1");
+
+/* L2 MMU tables */
+static uint32_t main_mmu_l2_ttb[MAX_XLAT_TABLES][NUM_L2_ENTRIES]
+ __aligned(L2_ALIGNMENT) __section(".nozi.mmu.l2");
+
+/* MMU L1 table for TAs, one for each thread */
+static uint32_t main_mmu_ul1_ttb[CFG_NUM_THREADS][NUM_UL1_ENTRIES]
+ __aligned(UL1_ALIGNMENT) __section(".nozi.mmu.ul1");
+
+static vaddr_t core_mmu_get_main_ttb_va(void)
+{
+ return (vaddr_t)main_mmu_l1_ttb;
+}
+
+static paddr_t core_mmu_get_main_ttb_pa(void)
+{
+ paddr_t pa = virt_to_phys((void *)core_mmu_get_main_ttb_va());
+
+ if (pa & ~TTB_L1_MASK)
+ panic("invalid core l1 table");
+ return pa;
+}
+
+static vaddr_t core_mmu_get_ul1_ttb_va(void)
+{
+ return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
+}
+
+static paddr_t core_mmu_get_ul1_ttb_pa(void)
+{
+ paddr_t pa = virt_to_phys((void *)core_mmu_get_ul1_ttb_va());
+
+ if (pa & ~TTB_UL1_MASK)
+ panic("invalid user l1 table");
+ return pa;
+}
+
+static void *core_mmu_alloc_l2(size_t size)
+{
+ /* Can't have this in .bss since it's not initialized yet */
+ static uint32_t tables_used __early_bss;
+ uint32_t to_alloc = ROUNDUP(size, NUM_L2_ENTRIES * SMALL_PAGE_SIZE) /
+ (NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
+
+ if (tables_used + to_alloc > MAX_XLAT_TABLES)
+ return NULL;
+
+ tables_used += to_alloc;
+ return main_mmu_l2_ttb[tables_used - to_alloc];
+}
+
+static enum desc_type get_desc_type(unsigned level, uint32_t desc)
+{
+ assert(level >= 1 && level <= 2);
+
+ if (level == 1) {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_PAGE_TABLE;
+
+ if ((desc & 0x2) == 0x2) {
+ if (desc & (1 << 18))
+ return DESC_TYPE_SUPER_SECTION;
+ return DESC_TYPE_SECTION;
+ }
+ } else {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_LARGE_PAGE;
+
+ if ((desc & 0x2) == 0x2)
+ return DESC_TYPE_SMALL_PAGE;
+ }
+
+ return DESC_TYPE_INVALID;
+}
+
+static uint32_t texcb_to_mattr(uint32_t texcb)
+{
+ COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
+ COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_INDEX == TEE_MATTR_CACHE_CACHED);
+
+ return texcb << TEE_MATTR_CACHE_SHIFT;
+}
+
+static uint32_t mattr_to_texcb(uint32_t attr)
+{
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ return (attr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK;
+}
+
+
+static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
+{
+ uint32_t a;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_PAGE_TABLE:
+ a = TEE_MATTR_TABLE;
+ if (!(desc & SECTION_PT_NOTSECURE))
+ a |= TEE_MATTR_SECURE;
+ break;
+ case DESC_TYPE_SECTION:
+ a = TEE_MATTR_VALID_BLOCK;
+ if (desc & SECTION_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SECTION_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SECTION_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & SECTION_PXN)
+ a &= ~TEE_MATTR_PX;
+
+ a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SECTION_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & SECTION_NOTSECURE))
+ a |= TEE_MATTR_SECURE;
+
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ a = TEE_MATTR_VALID_BLOCK;
+ if (desc & SMALL_PAGE_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SMALL_PAGE_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SMALL_PAGE_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SMALL_PAGE_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+ break;
+ case DESC_TYPE_INVALID:
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & HIDDEN_DIRTY_DESC)
+ return TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ return 0;
+ default:
+ return 0;
+ }
+
+ return a;
+}
+
+static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint32_t desc;
+ uint32_t a = attr;
+ unsigned texcb;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_HIDDEN_DIRTY_BLOCK)
+ return INVALID_DESC | HIDDEN_DIRTY_DESC;
+
+ if (level == 1 && (a & TEE_MATTR_TABLE)) {
+ desc = SECTION_PT_PT;
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_PT_NOTSECURE;
+ return desc;
+ }
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+
+ texcb = mattr_to_texcb(a);
+
+ if (level == 1) { /* Section */
+ desc = SECTION_SECTION | SECTION_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SECTION_XN;
+
+#ifdef CFG_HWSUPP_MEM_PERM_PXN
+ if (!(a & TEE_MATTR_PX))
+ desc |= SECTION_PXN;
+#endif
+
+ if (a & TEE_MATTR_UR)
+ desc |= SECTION_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SECTION_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SECTION_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SECTION_NOTGLOBAL;
+
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_NOTSECURE;
+
+ desc |= SECTION_TEXCB(texcb);
+ } else {
+ desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SMALL_PAGE_XN;
+
+ if (a & TEE_MATTR_UR)
+ desc |= SMALL_PAGE_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SMALL_PAGE_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SMALL_PAGE_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SMALL_PAGE_NOTGLOBAL;
+
+ desc |= SMALL_PAGE_TEXCB(texcb);
+ }
+
+ return desc;
+}
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table)
+{
+ tbl_info->level = level;
+ tbl_info->table = table;
+ tbl_info->va_base = va_base;
+ assert(level <= 2);
+ if (level == 1) {
+ tbl_info->shift = SECTION_SHIFT;
+ tbl_info->num_entries = NUM_L1_ENTRIES;
+ } else {
+ tbl_info->shift = SMALL_PAGE_SHIFT;
+ tbl_info->num_entries = NUM_L2_ENTRIES;
+ }
+}
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
+{
+ void *tbl = (void *)core_mmu_get_ul1_ttb_va();
+
+ core_mmu_set_info_table(pgd_info, 1, 0, tbl);
+ pgd_info->num_entries = NUM_UL1_ENTRIES;
+}
+
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map)
+{
+ struct core_mmu_table_info dir_info;
+
+ COMPILE_TIME_ASSERT(L2_TBL_SIZE == PGT_SIZE);
+
+ core_mmu_get_user_pgdir(&dir_info);
+ memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t));
+ core_mmu_populate_user_map(&dir_info, utc);
+ map->ttbr0 = core_mmu_get_ul1_ttb_pa() | TEE_MMU_DEFAULT_ATTRS;
+ map->ctxid = utc->context & 0xff;
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint32_t *tbl = (uint32_t *)core_mmu_get_main_ttb_va();
+ unsigned n = va >> SECTION_SHIFT;
+
+ if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
+ core_mmu_set_info_table(tbl_info, 1, 0, tbl);
+ } else {
+ paddr_t ntbl = tbl[n] & ~((1 << 10) - 1);
+ void *l2tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM);
+
+ if (!l2tbl)
+ return false;
+
+ core_mmu_set_info_table(tbl_info, 2, n << SECTION_SHIFT, l2tbl);
+ }
+ return true;
+}
+
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx)
+{
+ uint32_t *new_table;
+ uint32_t *entry;
+ uint32_t new_table_desc;
+ paddr_t paddr;
+ uint32_t attr;
+ int i;
+
+ if (tbl_info->level != 1)
+ return false;
+
+ if (idx >= NUM_L1_ENTRIES)
+ return false;
+
+ new_table = core_mmu_alloc_l2(NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
+ if (!new_table)
+ return false;
+
+ entry = (uint32_t *)tbl_info->table + idx;
+ assert(get_desc_type(1, *entry) == DESC_TYPE_SECTION);
+
+ new_table_desc = SECTION_PT_PT | (uint32_t)new_table;
+ if (*entry & SECTION_NOTSECURE)
+ new_table_desc |= SECTION_PT_NOTSECURE;
+
+ /* store attributes of original block */
+ attr = desc_to_mattr(1, *entry);
+ paddr = *entry & ~SECTION_MASK;
+
+ /* Fill new xlat table with entries pointing to the same memory */
+ for (i = 0; i < NUM_L2_ENTRIES; i++) {
+ *new_table = paddr | mattr_to_desc(tbl_info->level + 1, attr);
+ paddr += SMALL_PAGE_SIZE;
+ new_table++;
+ }
+
+ /* Update descriptor at current level */
+ *entry = new_table_desc;
+ return true;
+}
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint32_t *tbl = table;
+ uint32_t desc = mattr_to_desc(level, attr);
+
+ tbl[idx] = desc | pa;
+}
+
+static paddr_t desc_to_pa(unsigned level, uint32_t desc)
+{
+ unsigned shift_mask;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_PAGE_TABLE:
+ shift_mask = 10;
+ break;
+ case DESC_TYPE_SECTION:
+ shift_mask = 20;
+ break;
+ case DESC_TYPE_SUPER_SECTION:
+ shift_mask = 24; /* We're ignoring bits 32 and above. */
+ break;
+ case DESC_TYPE_LARGE_PAGE:
+ shift_mask = 16;
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ shift_mask = 12;
+ break;
+ default:
+ /* Invalid section, HIDDEN_DESC, HIDDEN_DIRTY_DESC */
+ shift_mask = 4;
+ }
+
+ return desc & ~((1 << shift_mask) - 1);
+}
+
+void core_mmu_get_entry_primitive(const void *table, size_t level,
+ size_t idx, paddr_t *pa, uint32_t *attr)
+{
+ const uint32_t *tbl = table;
+
+ if (pa)
+ *pa = desc_to_pa(level, tbl[idx]);
+
+ if (attr)
+ *attr = desc_to_mattr(level, tbl[idx]);
+}
+
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
+{
+ if (base) {
+ /* Leaving the first entry unmapped to make NULL unmapped */
+ *base = 1 << SECTION_SHIFT;
+ }
+
+ if (size)
+ *size = (NUM_UL1_ENTRIES - 1) << SECTION_SHIFT;
+}
+
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ map->ttbr0 = read_ttbr0();
+ map->ctxid = read_contextidr();
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ /*
+ * Update the reserved Context ID and TTBR0
+ */
+
+ dsb(); /* ARM erratum 754322 */
+ write_contextidr(0);
+ isb();
+
+ if (map) {
+ write_ttbr0(map->ttbr0);
+ isb();
+ write_contextidr(map->ctxid);
+ } else {
+ write_ttbr0(read_ttbr1());
+ }
+ isb();
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ /* Restore interrupts */
+ thread_unmask_exceptions(exceptions);
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ return read_ttbr0() != read_ttbr1();
+}
+
+static paddr_t map_page_memarea(struct tee_mmap_region *mm)
+{
+ uint32_t *l2 = core_mmu_alloc_l2(mm->size);
+ size_t pg_idx;
+ uint32_t attr;
+
+ if (!l2)
+ panic("no l2 table");
+
+ attr = mattr_to_desc(2, mm->attr);
+
+ /* Zero fill initial entries */
+ pg_idx = 0;
+ while ((pg_idx * SMALL_PAGE_SIZE) < (mm->va & SECTION_MASK)) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ /* Fill in the entries */
+ while ((pg_idx * SMALL_PAGE_SIZE) <
+ (mm->size + (mm->va & SECTION_MASK))) {
+ l2[pg_idx] = ((mm->pa & ~SECTION_MASK) +
+ pg_idx * SMALL_PAGE_SIZE) | attr;
+ pg_idx++;
+ }
+
+ /* Zero fill the rest */
+ while (pg_idx < ROUNDUP(mm->size, SECTION_SIZE) / SMALL_PAGE_SIZE) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ return virt_to_phys(l2);
+}
+
+/*
+* map_memarea - load mapping in target L1 table
+* A finer mapping must be supported. Currently section mapping only!
+*/
+static void map_memarea(struct tee_mmap_region *mm, uint32_t *ttb)
+{
+ size_t m, n;
+ uint32_t attr;
+ paddr_t pa;
+ uint32_t region_size;
+
+ assert(mm && ttb);
+
+ /*
+ * If mm->va is smaller than 32M, then mm->va will conflict with
+ * user TA address space. This mapping will be overridden/hidden
+ * later when a user TA is loaded since these low addresses are
+ * used as TA virtual address space.
+ *
+ * Some SoCs have devices at low addresses, so we need to map at
+ * least those devices at a virtual address which isn't the same
+ * as the physical.
+ *
+ * TODO: support mapping devices at a virtual address which isn't
+ * the same as the physical address.
+ */
+ if (mm->va < (NUM_UL1_ENTRIES * SECTION_SIZE))
+ panic("va conflicts with user ta address");
+
+ if ((mm->va | mm->pa | mm->size) & SECTION_MASK) {
+ region_size = SMALL_PAGE_SIZE;
+
+ /*
+ * Need finer grained mapping, if small pages aren't
+ * good enough, panic.
+ */
+ if ((mm->va | mm->pa | mm->size) & SMALL_PAGE_MASK)
+ panic("memarea can't be mapped");
+
+ attr = mattr_to_desc(1, mm->attr | TEE_MATTR_TABLE);
+ pa = map_page_memarea(mm);
+ } else {
+ region_size = SECTION_SIZE;
+
+ attr = mattr_to_desc(1, mm->attr);
+ pa = mm->pa;
+ }
+
+ m = (mm->va >> SECTION_SHIFT);
+ n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
+ while (n--) {
+ ttb[m] = pa | attr;
+ m++;
+ if (region_size == SECTION_SIZE)
+ pa += SECTION_SIZE;
+ else
+ pa += L2_TBL_SIZE;
+ }
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ void *ttb1 = (void *)core_mmu_get_main_ttb_va();
+ size_t n;
+
+ /* reset L1 table */
+ memset(ttb1, 0, L1_TBL_SIZE);
+
+ for (n = 0; mm[n].size; n++)
+ map_memarea(mm + n, ttb1);
+}
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
+{
+ return paddr > 0x80000000;
+}
+
+void core_init_mmu_regs(void)
+{
+ uint32_t prrr;
+ uint32_t nmrr;
+ paddr_t ttb_pa = core_mmu_get_main_ttb_pa();
+
+ /* Enable Access flag (simplified access permissions) and TEX remap */
+ write_sctlr(read_sctlr() | SCTLR_AFE | SCTLR_TRE);
+
+ prrr = ATTR_DEVICE_PRRR | ATTR_IWBWA_OWBWA_PRRR;
+ nmrr = ATTR_DEVICE_NMRR | ATTR_IWBWA_OWBWA_NMRR;
+
+ prrr |= PRRR_NS1 | PRRR_DS1;
+
+ write_prrr(prrr);
+ write_nmrr(nmrr);
+
+
+ /*
+ * Program Domain access control register with two domains:
+ * domain 0: teecore
+ * domain 1: TA
+ */
+ write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
+ DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT));
+
+ /*
+ * Enable lookups using TTBR0 and TTBR1 with the split of addresses
+ * defined by TEE_MMU_TTBCR_N_VALUE.
+ */
+ write_ttbcr(TTBCR_N_VALUE);
+
+ write_ttbr0(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+ write_ttbr1(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
+{
+ assert(!(fsr & FSR_LPAE));
+
+ switch (fsr & FSR_FS_MASK) {
+ case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ case 0x4: /* DFSR[10,3:0] b00100 Fault on instr cache maintenance */
+ case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
+ case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
+ case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
+ if (fsr & FSR_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ case 0x3: /* DFSR[10,3:0] b00011 access bit fault on section */
+ case 0x6: /* DFSR[10,3:0] b00110 access bit fault on page */
+ return CORE_MMU_FAULT_ACCESS_BIT;
+ case (1 << 10) | 0x6:
+ /* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
diff --git a/core/arch/arm/mm/mobj.c b/core/arch/arm/mm/mobj.c
new file mode 100644
index 0000000..5458638
--- /dev/null
+++ b/core/arch/arm/mm/mobj.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <keep.h>
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <stdlib.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+struct mobj *mobj_sec_ddr;
+
+/*
+ * mobj_phys implementation
+ */
+
+struct mobj_phys {
+ struct mobj mobj;
+ enum buf_is_attr battr;
+ uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
+ vaddr_t va;
+ paddr_t pa;
+};
+
+static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
+
+static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ if (!moph->va)
+ return NULL;
+
+ return (void *)(moph->va + offset);
+}
+
+static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+ paddr_t p;
+
+ if (!pa)
+ return TEE_ERROR_GENERIC;
+
+ p = moph->pa + offs;
+
+ if (granule) {
+ if (granule != SMALL_PAGE_SIZE &&
+ granule != CORE_MMU_PGDIR_SIZE)
+ return TEE_ERROR_GENERIC;
+ p &= ~(granule - 1);
+ }
+
+ *pa = p;
+ return TEE_SUCCESS;
+}
+/* ifndef due to an asserting AArch64 linker */
+#ifndef ARM64
+KEEP_PAGER(mobj_phys_get_pa);
+#endif
+
+static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ if (!cattr)
+ return TEE_ERROR_GENERIC;
+
+ *cattr = moph->cattr;
+ return TEE_SUCCESS;
+}
+
+static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+ enum buf_is_attr a;
+
+ a = moph->battr;
+
+ switch (attr) {
+ case CORE_MEM_SEC:
+ return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
+ a == CORE_MEM_TA_RAM;
+ case CORE_MEM_NON_SEC:
+ return a == CORE_MEM_NSEC_SHM;
+ case CORE_MEM_TEE_RAM:
+ case CORE_MEM_TA_RAM:
+ case CORE_MEM_NSEC_SHM:
+ return attr == a;
+ default:
+ return false;
+ }
+}
+
+static void mobj_phys_free(struct mobj *mobj)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ free(moph);
+}
+
+static const struct mobj_ops mobj_phys_ops __rodata_unpaged = {
+ .get_va = mobj_phys_get_va,
+ .get_pa = mobj_phys_get_pa,
+ .get_cattr = mobj_phys_get_cattr,
+ .matches = mobj_phys_matches,
+ .free = mobj_phys_free,
+};
+
+static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_phys_ops);
+ return container_of(mobj, struct mobj_phys, mobj);
+}
+
+struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
+ enum buf_is_attr battr)
+{
+ struct mobj_phys *moph;
+ void *va;
+
+ if ((pa & CORE_MMU_USER_PARAM_MASK) ||
+ (size & CORE_MMU_USER_PARAM_MASK)) {
+ DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
+ return NULL;
+ }
+
+ va = phys_to_virt(pa, battr);
+ if (!va)
+ return NULL;
+
+ moph = calloc(1, sizeof(*moph));
+ if (!moph)
+ return NULL;
+
+ moph->battr = battr;
+ moph->cattr = cattr;
+ moph->mobj.size = size;
+ moph->mobj.ops = &mobj_phys_ops;
+ moph->pa = pa;
+ moph->va = (vaddr_t)va;
+
+ return &moph->mobj;
+}
+
+/*
+ * mobj_virt implementation
+ */
+
+static void mobj_virt_assert_type(struct mobj *mobj);
+
+static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
+{
+ mobj_virt_assert_type(mobj);
+
+ return (void *)(vaddr_t)offset;
+}
+
+static const struct mobj_ops mobj_virt_ops __rodata_unpaged = {
+ .get_va = mobj_virt_get_va,
+};
+
+static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
+{
+ assert(mobj->ops == &mobj_virt_ops);
+}
+
+struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
+
+/*
+ * mobj_mm implementation
+ */
+
+struct mobj_mm {
+ tee_mm_entry_t *mm;
+ struct mobj *parent_mobj;
+ struct mobj mobj;
+};
+
+static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
+
+static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
+{
+ tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
+
+ return (mm->offset << mm->pool->shift) + offs;
+}
+
+static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
+{
+ return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
+ mobj_mm_offs(mobj, offs));
+}
+
+
+static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
+ mobj_mm_offs(mobj, offs), granule, pa);
+}
+/* ifndef due to an asserting AArch64 linker */
+#ifndef ARM64
+KEEP_PAGER(mobj_mm_get_pa);
+#endif
+
+static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
+}
+
+static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
+}
+
+static void mobj_mm_free(struct mobj *mobj)
+{
+ struct mobj_mm *m = to_mobj_mm(mobj);
+
+ tee_mm_free(m->mm);
+ free(m);
+}
+
+static const struct mobj_ops mobj_mm_ops __rodata_unpaged = {
+ .get_va = mobj_mm_get_va,
+ .get_pa = mobj_mm_get_pa,
+ .get_cattr = mobj_mm_get_cattr,
+ .matches = mobj_mm_matches,
+ .free = mobj_mm_free,
+};
+
+static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_mm_ops);
+ return container_of(mobj, struct mobj_mm, mobj);
+}
+
+struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
+ tee_mm_pool_t *pool)
+{
+ struct mobj_mm *m = calloc(1, sizeof(*m));
+
+ if (!m)
+ return NULL;
+
+ m->mm = tee_mm_alloc(pool, size);
+ if (!m->mm) {
+ free(m);
+ return NULL;
+ }
+
+ m->parent_mobj = mobj_parent;
+ m->mobj.size = size;
+ m->mobj.ops = &mobj_mm_ops;
+
+ return &m->mobj;
+}
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * mobj_paged implementation
+ */
+
+static void mobj_paged_free(struct mobj *mobj);
+
+static const struct mobj_ops mobj_paged_ops __rodata_unpaged = {
+ .free = mobj_paged_free,
+};
+
+static void mobj_paged_free(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_paged_ops);
+ free(mobj);
+}
+
+struct mobj *mobj_paged_alloc(size_t size)
+{
+ struct mobj *mobj = calloc(1, sizeof(*mobj));
+
+ if (mobj) {
+ mobj->size = size;
+ mobj->ops = &mobj_paged_ops;
+ }
+ return mobj;
+}
+
+/*
+ * mobj_seccpy_shm implementation
+ */
+
+struct mobj_seccpy_shm {
+ struct user_ta_ctx *utc;
+ vaddr_t va;
+ size_t pgdir_offset;
+ struct mobj mobj;
+};
+
+static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
+
+static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
+{
+ assert(mobj_is_seccpy_shm(mobj));
+ return container_of(mobj, struct mobj_seccpy_shm, mobj);
+}
+
+static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
+{
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+
+ if (&m->utc->ctx != thread_get_tsd()->ctx)
+ return NULL;
+
+ if (offs >= mobj->size)
+ return NULL;
+ return (void *)(m->va + offs);
+}
+
+static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
+ enum buf_is_attr attr)
+{
+ assert(mobj_is_seccpy_shm(mobj));
+
+ return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
+}
+
+static void mobj_seccpy_shm_free(struct mobj *mobj)
+{
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+
+ tee_pager_rem_uta_region(m->utc, m->va, mobj->size);
+ tee_mmu_rem_rwmem(m->utc, mobj, m->va);
+ free(m);
+}
+
+static void mobj_seccpy_shm_update_mapping(struct mobj *mobj,
+ struct user_ta_ctx *utc, vaddr_t va)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+ size_t s;
+
+ if (utc == m->utc && va == m->va)
+ return;
+
+ s = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
+ pgt_transfer(&tsd->pgt_cache, &m->utc->ctx, m->va, &utc->ctx, va, s);
+
+ m->va = va;
+ m->utc = utc;
+}
+
+static const struct mobj_ops mobj_seccpy_shm_ops __rodata_unpaged = {
+ .get_va = mobj_seccpy_shm_get_va,
+ .matches = mobj_seccpy_shm_matches,
+ .free = mobj_seccpy_shm_free,
+ .update_mapping = mobj_seccpy_shm_update_mapping,
+};
+
+static bool mobj_is_seccpy_shm(struct mobj *mobj)
+{
+ return mobj && mobj->ops == &mobj_seccpy_shm_ops;
+}
+
+struct mobj *mobj_seccpy_shm_alloc(size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct mobj_seccpy_shm *m;
+ struct user_ta_ctx *utc;
+ vaddr_t va = 0;
+
+ if (!is_user_ta_ctx(tsd->ctx))
+ return NULL;
+ utc = to_user_ta_ctx(tsd->ctx);
+
+ m = calloc(1, sizeof(*m));
+ if (!m)
+ return NULL;
+
+ m->mobj.size = size;
+ m->mobj.ops = &mobj_seccpy_shm_ops;
+
+ if (tee_mmu_add_rwmem(utc, &m->mobj, -1, &va) != TEE_SUCCESS)
+ goto bad;
+
+ if (!tee_pager_add_uta_area(utc, va, size))
+ goto bad;
+
+ m->va = va;
+ m->pgdir_offset = va & CORE_MMU_PGDIR_MASK;
+ m->utc = to_user_ta_ctx(tsd->ctx);
+ return &m->mobj;
+bad:
+ if (va)
+ tee_mmu_rem_rwmem(utc, &m->mobj, va);
+ free(m);
+ return NULL;
+}
+
+bool mobj_is_paged(struct mobj *mobj)
+{
+ return mobj->ops == &mobj_paged_ops ||
+ mobj->ops == &mobj_seccpy_shm_ops;
+}
+#endif /*CFG_PAGED_USER_TA*/
diff --git a/core/arch/arm/mm/pager_aes_gcm.c b/core/arch/arm/mm/pager_aes_gcm.c
new file mode 100644
index 0000000..e0ff286
--- /dev/null
+++ b/core/arch/arm/mm/pager_aes_gcm.c
@@ -0,0 +1,348 @@
+/*
+ * Galois/Counter Mode (GCM) and GMAC with AES
+ *
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2012, Jouni Malinen <j@w1.fi>
+ *
+ * This software may be distributed under the terms of the BSD license.
+ * See README for more details.
+ *
+ * The license part of what was the "README" above:
+ * License
+ * -------
+ *
+ * This software may be distributed, used, and modified under the terms of
+ * BSD license:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name(s) of the above-listed copyright holder(s) nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include "pager_private.h"
+#include <tomcrypt.h>
+#include <trace.h>
+#include <utee_defines.h>
+#include <util.h>
+
+/*
+ * Source copied from git://w1.fi/srv/git/hostap.git files
+ * src/utils/common.h and src/crypto/aes-gcm.c
+ *
+ * The source has been modified for the pager use case.
+ */
+
+#define BLOCK_ALIGNMENT sizeof(uint64_t)
+
+static uint32_t get_be32(const void *a)
+{
+ return TEE_U32_FROM_BIG_ENDIAN(*(const uint32_t *)a);
+}
+
+static void put_be32(void *a, uint32_t val)
+{
+ *(uint32_t *)a = TEE_U32_TO_BIG_ENDIAN(val);
+}
+
+static void put_be64(void *a, uint64_t val)
+{
+ *(uint64_t *)a = TEE_U64_TO_BIG_ENDIAN(val);
+}
+
+static void aes_encrypt(symmetric_key *skey, const uint8_t *plain,
+ uint8_t *crypt)
+{
+ aes_ecb_encrypt(plain, crypt, skey);
+}
+
+static void inc32(uint8_t *block)
+{
+ uint32_t val;
+
+ val = get_be32(block + TEE_AES_BLOCK_SIZE - 4);
+ val++;
+ put_be32(block + TEE_AES_BLOCK_SIZE - 4, val);
+}
+
+static void xor_block(void *dst, const void *src)
+{
+ uint64_t *d = dst;
+ const uint64_t *s = src;
+
+ *d++ ^= *s++;
+ *d++ ^= *s++;
+}
+
+static void shift_right_block(uint8_t *v)
+{
+ uint32_t next_val;
+ uint32_t val;
+
+ val = get_be32(v + 12);
+ next_val = get_be32(v + 8);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 12, val);
+
+ val = next_val;
+ next_val = get_be32(v + 4);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 8, val);
+
+ val = next_val;
+ next_val = get_be32(v);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 4, val);
+
+ val = next_val;
+ val >>= 1;
+ put_be32(v, val);
+}
+
+/* Multiplication in GF(2^128) */
+static void gf_mult(const uint8_t *x, const uint8_t *y, uint8_t *z)
+{
+ uint8_t v[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ unsigned i;
+ unsigned j;
+
+ memset(z, 0, TEE_AES_BLOCK_SIZE); /* Z_0 = 0^128 */
+ memcpy(v, y, TEE_AES_BLOCK_SIZE); /* V_0 = Y */
+
+ for (i = 0; i < TEE_AES_BLOCK_SIZE; i++) {
+ for (j = 0; j < 8; j++) {
+ if (x[i] & BIT(7 - j)) {
+ /* Z_(i + 1) = Z_i XOR V_i */
+ xor_block(z, v);
+ } else {
+ /* Z_(i + 1) = Z_i */
+ }
+
+ if (v[15] & 0x01) {
+ /* V_(i + 1) = (V_i >> 1) XOR R */
+ shift_right_block(v);
+ /* R = 11100001 || 0^120 */
+ v[0] ^= 0xe1;
+ } else {
+ /* V_(i + 1) = V_i >> 1 */
+ shift_right_block(v);
+ }
+ }
+ }
+}
+
+static void ghash_start(uint8_t *y)
+{
+ /* Y_0 = 0^128 */
+ memset(y, 0, TEE_AES_BLOCK_SIZE);
+}
+
+
+static void ghash(const uint8_t *h, const uint8_t *in, size_t len, uint8_t *out)
+{
+ size_t n;
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ /* We're only dealing with complete blocks */
+ assert(!(len % TEE_AES_BLOCK_SIZE));
+
+ for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) {
+ /* Y_i = (Y^(i-1) XOR X_i) dot H */
+ xor_block(out, in + n);
+
+ /* dot operation:
+ * multiplication operation for binary Galois (finite) field of
+ * 2^128 elements */
+ gf_mult(out, h, tmp);
+ memcpy(out, tmp, TEE_AES_BLOCK_SIZE);
+ }
+ /* Return Y_m */
+}
+
+static bool aes_gcm_init_hash_subkey(symmetric_key *skey, const uint8_t *key,
+ size_t key_len, uint8_t *H)
+{
+ if (aes_setup(key, key_len, 0, skey) != CRYPT_OK)
+ return false;
+
+ /* Generate hash subkey H = AES_K(0^128) */
+ memset(H, 0, TEE_AES_BLOCK_SIZE);
+ aes_encrypt(skey, H, H);
+ return true;
+}
+
+
+static void aes_gcm_prepare_j0(const struct pager_aes_gcm_iv *iv, uint8_t *J0)
+{
+ /* Prepare block J_0 = IV || 0^31 || 1 [len(IV) = 96] */
+ memcpy(J0, iv, sizeof(*iv));
+ memset(J0 + sizeof(*iv), 0, TEE_AES_BLOCK_SIZE - sizeof(*iv));
+ J0[TEE_AES_BLOCK_SIZE - 1] = 0x01;
+}
+
+static void aes_gcm_core(symmetric_key *skey, bool enc, const uint8_t *J0,
+ const uint8_t *H, const uint8_t *in, size_t len,
+ uint8_t *out, uint8_t *tmp, uint8_t *S)
+{
+ uint8_t J0inc[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ size_t n;
+
+ /* We're only dealing with complete blocks */
+ assert(len && !(len % TEE_AES_BLOCK_SIZE));
+
+ /*
+ * Below in the loop we're doing the encryption and hashing
+ * on each block interleaved since the encrypted data is stored
+ * in less secure memory.
+ */
+
+ /*
+ * u = 128 * ceil[len(C)/128] - len(C)
+ * v = 128 * ceil[len(A)/128] - len(A)
+ * S = GHASH_H(A || 0^v || C || 0^u || [len(A)]64 || [len(C)]64)
+ * (i.e., zero padded to block size A || C and lengths of each in bits)
+ */
+ ghash_start(S);
+
+
+ memcpy(J0inc, J0, TEE_AES_BLOCK_SIZE);
+ inc32(J0inc);
+
+ /* Full blocks */
+ for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) {
+ aes_encrypt(skey, J0inc, tmp);
+ xor_block(tmp, in + n);
+ memcpy(out + n, tmp, TEE_AES_BLOCK_SIZE);
+ inc32(J0inc);
+
+ /* Hash */
+ if (enc)
+ xor_block(S, tmp);
+ else
+ xor_block(S, in + n);
+ gf_mult(S, H, tmp);
+ memcpy(S, tmp, TEE_AES_BLOCK_SIZE);
+ }
+
+ put_be64(tmp, 0); /* no aad */
+ put_be64(tmp + 8, len * 8);
+ ghash(H, tmp, TEE_AES_BLOCK_SIZE, S);
+}
+
+/**
+ * aes_gcm_ae - GCM-AE_K(IV, P, A)
+ */
+static bool aes_gcm_ae(const uint8_t *key, size_t key_len,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t *plain, size_t plain_len,
+ uint8_t *crypt, uint8_t *tag)
+{
+ symmetric_key skey;
+ uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H))
+ return false;
+
+ aes_gcm_prepare_j0(iv, J0);
+
+ /* C = GCTR_K(inc_32(J_0), P) */
+ aes_gcm_core(&skey, true, J0, H, plain, plain_len, crypt, tmp, S);
+
+ /* T = MSB_t(GCTR_K(J_0, S)) */
+ aes_encrypt(&skey, J0, tag);
+ xor_block(tag, S);
+
+ /* Return (C, T) */
+
+ aes_done(&skey);
+
+ return true;
+}
+
+/**
+ * aes_gcm_ad - GCM-AD_K(IV, C, A, T)
+ */
+static bool aes_gcm_ad(const uint8_t *key, size_t key_len,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t *crypt, size_t crypt_len,
+ const uint8_t *tag, uint8_t *plain)
+{
+ symmetric_key skey;
+ uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H))
+ return false;
+
+ aes_gcm_prepare_j0(iv, J0);
+
+ /* P = GCTR_K(inc_32(J_0), C) */
+ aes_gcm_core(&skey, false, J0, H, crypt, crypt_len, plain, tmp, S);
+
+ /* T' = MSB_t(GCTR_K(J_0, S)) */
+ aes_encrypt(&skey, J0, tmp);
+ xor_block(tmp, S);
+
+ aes_done(&skey);
+
+ return !buf_compare_ct(tag, tmp, TEE_AES_BLOCK_SIZE);
+}
+
+static bool check_block_alignment(const void *p)
+{
+ return !((vaddr_t)p % BLOCK_ALIGNMENT);
+}
+
+bool pager_aes_gcm_decrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen)
+{
+ if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) ||
+ !check_block_alignment(src) || !check_block_alignment(dst))
+ return false;
+ return aes_gcm_ad(key, keylen, iv, src, datalen, tag, dst);
+}
+
+bool pager_aes_gcm_encrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen)
+{
+ if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) ||
+ !check_block_alignment(src) || !check_block_alignment(dst))
+ return false;
+ return aes_gcm_ae(key, keylen, iv, src, datalen, dst, tag);
+}
diff --git a/core/arch/arm/mm/pager_private.h b/core/arch/arm/mm/pager_private.h
new file mode 100644
index 0000000..e7acf95
--- /dev/null
+++ b/core/arch/arm/mm/pager_private.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <types_ext.h>
+
+struct pager_aes_gcm_iv {
+ uint32_t iv[3];
+};
+
+#define PAGER_AES_GCM_TAG_LEN 16
+
+bool pager_aes_gcm_decrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen);
+
+bool pager_aes_gcm_encrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen);
+
diff --git a/core/arch/arm/mm/pgt_cache.c b/core/arch/arm/mm/pgt_cache.c
new file mode 100644
index 0000000..76c9e6e
--- /dev/null
+++ b/core/arch/arm/mm/pgt_cache.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/mutex.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_pager.h>
+#include <stdlib.h>
+#include <trace.h>
+#include <util.h>
+
+/*
+ * With pager enabled we allocate page table from the pager.
+ *
+ * For LPAE each page table is a complete page which is allocated and freed
+ * using the interface provided by the pager.
+ *
+ * For compat v7 page tables there's room for four page table in one page
+ * so we need to keep track of how much of an allocated page is used. When
+ * a page is completely unused it's returned to the pager.
+ *
+ * With pager disabled we have a static allocation of page tables instead.
+ *
+ * In all cases we limit the number of active page tables to
+ * PGT_CACHE_SIZE. This pool of page tables are shared between all
+ * threads. In case a thread can't allocate the needed number of pager
+ * tables it will release all its current tables and wait for some more to
+ * be freed. A threads allocated tables are freed each time a TA is
+ * unmapped so each thread should be able to allocate the needed tables in
+ * turn if needed.
+ */
+
+#if defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
+struct pgt_parent {
+ size_t num_used;
+ struct pgt_cache pgt_cache;
+};
+
+static struct pgt_parent pgt_parents[PGT_CACHE_SIZE / PGT_NUM_PGT_PER_PAGE];
+#else
+
+static struct pgt_cache pgt_free_list = SLIST_HEAD_INITIALIZER(pgt_free_list);
+#endif
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * When a user TA context is temporarily unmapped the used struct pgt's of
+ * the context (page tables holding valid physical pages) are saved in this
+ * cache in the hope that some of the valid physical pages may still be
+ * valid when the context is mapped again.
+ */
+static struct pgt_cache pgt_cache_list = SLIST_HEAD_INITIALIZER(pgt_cache_list);
+#endif
+
+static struct pgt pgt_entries[PGT_CACHE_SIZE];
+
+static struct mutex pgt_mu = MUTEX_INITIALIZER;
+static struct condvar pgt_cv = CONDVAR_INITIALIZER;
+
+#if defined(CFG_WITH_PAGER) && defined(CFG_WITH_LPAE)
+void pgt_init(void)
+{
+ size_t n;
+
+ for (n = 0; n < PGT_CACHE_SIZE; n++) {
+ struct pgt *p = pgt_entries + n;
+
+ p->tbl = tee_pager_alloc(PGT_SIZE, TEE_MATTR_LOCKED);
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+ }
+}
+#elif defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
+void pgt_init(void)
+{
+ size_t n;
+ size_t m;
+
+ COMPILE_TIME_ASSERT(PGT_CACHE_SIZE % PGT_NUM_PGT_PER_PAGE == 0);
+ COMPILE_TIME_ASSERT(PGT_SIZE * PGT_NUM_PGT_PER_PAGE == SMALL_PAGE_SIZE);
+
+ for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
+ uint8_t *tbl = tee_pager_alloc(SMALL_PAGE_SIZE,
+ TEE_MATTR_LOCKED);
+
+ SLIST_INIT(&pgt_parents[n].pgt_cache);
+ for (m = 0; m < PGT_NUM_PGT_PER_PAGE; m++) {
+ struct pgt *p = pgt_entries +
+ n * PGT_NUM_PGT_PER_PAGE + m;
+
+ p->tbl = tbl + m * PGT_SIZE;
+ p->parent = &pgt_parents[n];
+ SLIST_INSERT_HEAD(&pgt_parents[n].pgt_cache, p, link);
+ }
+ }
+}
+#else
+void pgt_init(void)
+{
+ /*
+ * We're putting this in .nozi.* instead of .bss because .nozi.* already
+ * has a large alignment, while .bss has a small alignment. The current
+ * link script is optimized for small alignment in .bss
+ */
+ static uint8_t pgt_tables[PGT_CACHE_SIZE][PGT_SIZE]
+ __aligned(PGT_SIZE) __section(".nozi.pgt_cache");
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(pgt_tables); n++) {
+ struct pgt *p = pgt_entries + n;
+
+ p->tbl = pgt_tables[n];
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+ }
+}
+#endif
+
+#if defined(CFG_WITH_LPAE) || !defined(CFG_WITH_PAGER)
+static struct pgt *pop_from_free_list(void)
+{
+ struct pgt *p = SLIST_FIRST(&pgt_free_list);
+
+ if (p) {
+ SLIST_REMOVE_HEAD(&pgt_free_list, link);
+ memset(p->tbl, 0, PGT_SIZE);
+ }
+ return p;
+}
+
+static void push_to_free_list(struct pgt *p)
+{
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+#if defined(CFG_WITH_PAGER)
+ tee_pager_release_phys(p->tbl, PGT_SIZE);
+#endif
+}
+#else
+static struct pgt *pop_from_free_list(void)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
+ struct pgt *p = SLIST_FIRST(&pgt_parents[n].pgt_cache);
+
+ if (p) {
+ SLIST_REMOVE_HEAD(&pgt_parents[n].pgt_cache, link);
+ pgt_parents[n].num_used++;
+ memset(p->tbl, 0, PGT_SIZE);
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void push_to_free_list(struct pgt *p)
+{
+ SLIST_INSERT_HEAD(&p->parent->pgt_cache, p, link);
+ assert(p->parent->num_used > 0);
+ p->parent->num_used--;
+ if (!p->parent->num_used) {
+ vaddr_t va = (vaddr_t)p->tbl & ~SMALL_PAGE_MASK;
+
+ tee_pager_release_phys((void *)va, SMALL_PAGE_SIZE);
+ }
+}
+#endif
+
+#ifdef CFG_PAGED_USER_TA
+static void push_to_cache_list(struct pgt *pgt)
+{
+ SLIST_INSERT_HEAD(&pgt_cache_list, pgt, link);
+}
+
+static bool match_pgt(struct pgt *pgt, vaddr_t vabase, void *ctx)
+{
+ return pgt->ctx == ctx && pgt->vabase == vabase;
+}
+
+static struct pgt *pop_from_cache_list(vaddr_t vabase, void *ctx)
+{
+ struct pgt *pgt;
+ struct pgt *p;
+
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ if (!pgt)
+ return NULL;
+ if (match_pgt(pgt, vabase, ctx)) {
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ return pgt;
+ }
+
+ while (true) {
+ p = SLIST_NEXT(pgt, link);
+ if (!p)
+ break;
+ if (match_pgt(p, vabase, ctx)) {
+ SLIST_REMOVE_AFTER(pgt, link);
+ break;
+ }
+ pgt = p;
+ }
+ return p;
+}
+
+static struct pgt *pop_least_used_from_cache_list(void)
+{
+ struct pgt *pgt;
+ struct pgt *p_prev = NULL;
+ size_t least_used;
+
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ if (!pgt)
+ return NULL;
+ if (!pgt->num_used_entries)
+ goto out;
+ least_used = pgt->num_used_entries;
+
+ while (true) {
+ if (!SLIST_NEXT(pgt, link))
+ break;
+ if (SLIST_NEXT(pgt, link)->num_used_entries <= least_used) {
+ p_prev = pgt;
+ least_used = SLIST_NEXT(pgt, link)->num_used_entries;
+ }
+ pgt = SLIST_NEXT(pgt, link);
+ }
+
+out:
+ if (p_prev) {
+ pgt = SLIST_NEXT(p_prev, link);
+ SLIST_REMOVE_AFTER(p_prev, link);
+ } else {
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ }
+ return pgt;
+}
+
+static void pgt_free_unlocked(struct pgt_cache *pgt_cache, bool save_ctx)
+{
+ while (!SLIST_EMPTY(pgt_cache)) {
+ struct pgt *p = SLIST_FIRST(pgt_cache);
+
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ if (save_ctx && p->num_used_entries) {
+ push_to_cache_list(p);
+ } else {
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+
+ push_to_free_list(p);
+ }
+ }
+}
+
+static struct pgt *pop_from_some_list(vaddr_t vabase, void *ctx)
+{
+ struct pgt *p = pop_from_cache_list(vabase, ctx);
+
+ if (p)
+ return p;
+ p = pop_from_free_list();
+ if (!p) {
+ p = pop_least_used_from_cache_list();
+ if (!p)
+ return NULL;
+ tee_pager_pgt_save_and_release_entries(p);
+ }
+ assert(!p->num_used_entries);
+ p->ctx = ctx;
+ p->vabase = vabase;
+ return p;
+}
+
+void pgt_flush_ctx(struct tee_ta_ctx *ctx)
+{
+ struct pgt *p;
+ struct pgt *pp = NULL;
+
+ mutex_lock(&pgt_mu);
+
+ while (true) {
+ p = SLIST_FIRST(&pgt_cache_list);
+ if (!p)
+ goto out;
+ if (p->ctx != ctx)
+ break;
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+ push_to_free_list(p);
+ }
+
+ pp = p;
+ while (true) {
+ p = SLIST_NEXT(pp, link);
+ if (!p)
+ break;
+ if (p->ctx == ctx) {
+ SLIST_REMOVE_AFTER(pp, link);
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+ push_to_free_list(p);
+ } else {
+ pp = p;
+ }
+ }
+
+out:
+ mutex_unlock(&pgt_mu);
+}
+
+static void flush_pgt_entry(struct pgt *p)
+{
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+}
+
+static bool pgt_entry_matches(struct pgt *p, void *ctx, vaddr_t begin,
+ vaddr_t last)
+{
+ if (!p)
+ return false;
+ if (p->ctx != ctx)
+ return false;
+ if (last <= begin)
+ return false;
+ if (!core_is_buffer_inside(p->vabase, SMALL_PAGE_SIZE, begin,
+ last - begin))
+ return false;
+
+ return true;
+}
+
+static void flush_ctx_range_from_list(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ struct pgt *p;
+ struct pgt *next_p;
+
+ /*
+ * Do the special case where the first element in the list is
+ * removed first.
+ */
+ p = SLIST_FIRST(pgt_cache);
+ while (pgt_entry_matches(p, ctx, begin, last)) {
+ flush_pgt_entry(p);
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ push_to_free_list(p);
+ p = SLIST_FIRST(pgt_cache);
+ }
+
+ /*
+ * p either points to the first element in the list or it's NULL,
+ * if NULL the list is empty and we're done.
+ */
+ if (!p)
+ return;
+
+ /*
+ * Do the common case where the next element in the list is
+ * removed.
+ */
+ while (true) {
+ next_p = SLIST_NEXT(p, link);
+ if (!next_p)
+ break;
+ if (pgt_entry_matches(next_p, ctx, begin, last)) {
+ flush_pgt_entry(next_p);
+ SLIST_REMOVE_AFTER(p, link);
+ push_to_free_list(next_p);
+ continue;
+ }
+
+ p = SLIST_NEXT(p, link);
+ }
+}
+
+void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ mutex_lock(&pgt_mu);
+
+ flush_ctx_range_from_list(pgt_cache, ctx, begin, last);
+ flush_ctx_range_from_list(&pgt_cache_list, ctx, begin, last);
+
+ condvar_broadcast(&pgt_cv);
+ mutex_unlock(&pgt_mu);
+}
+
+static void transfer_tables(struct pgt_cache *pgt_cache, void *old_ctx,
+ vaddr_t old_va, void *new_ctx, vaddr_t new_va,
+ size_t size)
+{
+ const size_t pgtsize = CORE_MMU_PGDIR_SIZE;
+ const vaddr_t new_base = ROUNDDOWN(new_va, pgtsize);
+ const vaddr_t old_base = ROUNDDOWN(old_va, pgtsize);
+ const size_t num_new_pgt = (size - 1 + new_va - new_base) / pgtsize + 1;
+ const size_t num_old_pgt = (size - 1 + old_va - old_base) / pgtsize + 1;
+ struct pgt *new_pgt[num_new_pgt];
+ struct pgt *old_pgt[num_old_pgt];
+ struct pgt *pgt;
+ size_t n;
+
+ /*
+ * Fill in new_pgt based on pgt_cache. Note that the pages should
+ * already have been allocated.
+ */
+ SLIST_FOREACH(pgt, pgt_cache, link) {
+ if (pgt->vabase < new_base)
+ continue;
+ n = (pgt->vabase - new_base) / pgtsize;
+ if (n < num_new_pgt)
+ new_pgt[n] = pgt;
+ }
+ for (n = 0; n < num_new_pgt; n++) {
+ assert(new_pgt[n]);
+ assert(new_pgt[n]->ctx == new_ctx);
+ }
+
+ mutex_lock(&pgt_mu);
+
+ /* Extract the array of pgts that need their content transferred */
+ for (n = 0; n < num_old_pgt; n++) {
+ /*
+ * If the pgt isn't in the cache list there's nothing to
+ * transfer, so NULL here is OK.
+ */
+ old_pgt[n] = pop_from_cache_list(old_base + n * pgtsize,
+ old_ctx);
+ }
+
+ tee_pager_transfer_uta_region(to_user_ta_ctx(old_ctx), old_va,
+ to_user_ta_ctx(new_ctx), new_va, new_pgt,
+ size);
+
+ for (n = 0; n < num_old_pgt; n++) {
+ if (!old_pgt[n])
+ continue;
+
+ if (old_pgt[n]->num_used_entries)
+ push_to_cache_list(old_pgt[n]);
+ else
+ push_to_free_list(old_pgt[n]);
+ }
+
+ mutex_unlock(&pgt_mu);
+}
+
+void pgt_transfer(struct pgt_cache *pgt_cache, void *old_ctx, vaddr_t old_va,
+ void *new_ctx, vaddr_t new_va, size_t size)
+{
+ if (size)
+ transfer_tables(pgt_cache, old_ctx, old_va, new_ctx,
+ new_va, size);
+}
+
+#else /*!CFG_PAGED_USER_TA*/
+
+static void pgt_free_unlocked(struct pgt_cache *pgt_cache,
+ bool save_ctx __unused)
+{
+ while (!SLIST_EMPTY(pgt_cache)) {
+ struct pgt *p = SLIST_FIRST(pgt_cache);
+
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ push_to_free_list(p);
+ }
+}
+
+static struct pgt *pop_from_some_list(vaddr_t vabase __unused,
+ void *ctx __unused)
+{
+ return pop_from_free_list();
+}
+#endif /*!CFG_PAGED_USER_TA*/
+
+static bool pgt_alloc_unlocked(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ const vaddr_t base = ROUNDDOWN(begin, CORE_MMU_PGDIR_SIZE);
+ const size_t num_tbls = ((last - base) >> CORE_MMU_PGDIR_SHIFT) + 1;
+ size_t n = 0;
+ struct pgt *p;
+ struct pgt *pp = NULL;
+
+ while (n < num_tbls) {
+ p = pop_from_some_list(base + n * CORE_MMU_PGDIR_SIZE, ctx);
+ if (!p) {
+ pgt_free_unlocked(pgt_cache, ctx);
+ return false;
+ }
+
+ if (pp)
+ SLIST_INSERT_AFTER(pp, p, link);
+ else
+ SLIST_INSERT_HEAD(pgt_cache, p, link);
+ pp = p;
+ n++;
+ }
+
+ return true;
+}
+
+void pgt_alloc(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ if (last <= begin)
+ return;
+
+ mutex_lock(&pgt_mu);
+
+ pgt_free_unlocked(pgt_cache, ctx);
+ while (!pgt_alloc_unlocked(pgt_cache, ctx, begin, last)) {
+ DMSG("Waiting for page tables");
+ condvar_broadcast(&pgt_cv);
+ condvar_wait(&pgt_cv, &pgt_mu);
+ }
+
+ mutex_unlock(&pgt_mu);
+}
+
+void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx)
+{
+ if (SLIST_EMPTY(pgt_cache))
+ return;
+
+ mutex_lock(&pgt_mu);
+
+ pgt_free_unlocked(pgt_cache, save_ctx);
+
+ condvar_broadcast(&pgt_cv);
+ mutex_unlock(&pgt_mu);
+}
diff --git a/core/arch/arm/mm/sub.mk b/core/arch/arm/mm/sub.mk
new file mode 100644
index 0000000..71f70f3
--- /dev/null
+++ b/core/arch/arm/mm/sub.mk
@@ -0,0 +1,12 @@
+srcs-y += core_mmu.c
+srcs-$(CFG_WITH_PAGER) += tee_pager.c
+srcs-$(CFG_WITH_PAGER) += pager_aes_gcm.c
+srcs-y += tee_mmu.c
+ifeq ($(CFG_WITH_LPAE),y)
+srcs-y += core_mmu_lpae.c
+else
+srcs-y += core_mmu_v7.c
+endif
+srcs-y += tee_mm.c
+srcs-$(CFG_SMALL_PAGE_USER_TA) += pgt_cache.c
+srcs-y += mobj.c
diff --git a/core/arch/arm/mm/tee_mm.c b/core/arch/arm/mm/tee_mm.c
new file mode 100644
index 0000000..22a39df
--- /dev/null
+++ b/core/arch/arm/mm/tee_mm.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/panic.h>
+#include <kernel/tee_common.h>
+#include <util.h>
+#include <trace.h>
+
+#include <mm/tee_mm.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_pager.h>
+
+bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_t hi, uint8_t shift,
+ uint32_t flags)
+{
+ if (pool == NULL)
+ return false;
+
+ lo = ROUNDUP(lo, 1 << shift);
+ hi = ROUNDDOWN(hi, 1 << shift);
+
+ assert(((uint64_t)(hi - lo) >> shift) < (uint64_t)UINT32_MAX);
+
+ pool->lo = lo;
+ pool->hi = hi;
+ pool->shift = shift;
+ pool->flags = flags;
+ pool->entry = calloc(1, sizeof(tee_mm_entry_t));
+
+ if (pool->entry == NULL)
+ return false;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ pool->entry->offset = ((hi - lo - 1) >> shift) + 1;
+ pool->entry->pool = pool;
+
+ return true;
+}
+
+void tee_mm_final(tee_mm_pool_t *pool)
+{
+ if (pool == NULL || pool->entry == NULL)
+ return;
+
+ while (pool->entry->next != NULL)
+ tee_mm_free(pool->entry->next);
+ free(pool->entry);
+ pool->entry = NULL;
+}
+
+static tee_mm_entry_t *tee_mm_add(tee_mm_entry_t *p)
+{
+ /* add to list */
+ if (p->next == NULL) {
+ p->next = malloc(sizeof(tee_mm_entry_t));
+ if (p->next == NULL)
+ return NULL;
+ p->next->next = NULL;
+ } else {
+ tee_mm_entry_t *nn = malloc(sizeof(tee_mm_entry_t));
+
+ if (nn == NULL)
+ return NULL;
+ nn->next = p->next;
+ p->next = nn;
+ }
+ return p->next;
+}
+
+#ifdef CFG_WITH_STATS
+static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
+{
+ tee_mm_entry_t *entry;
+ uint32_t sz = 0;
+
+ if (!pool)
+ return 0;
+
+ entry = pool->entry;
+ while (entry) {
+ sz += entry->size;
+ entry = entry->next;
+ }
+
+ return sz << pool->shift;
+}
+
+void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct malloc_stats *stats,
+ bool reset)
+{
+ memset(stats, 0, sizeof(*stats));
+
+ stats->size = pool->hi - pool->lo;
+ stats->max_allocated = pool->max_allocated;
+ stats->allocated = tee_mm_stats_allocated(pool);
+
+ if (reset)
+ pool->max_allocated = 0;
+}
+
+static void update_max_allocated(tee_mm_pool_t *pool)
+{
+ size_t sz = tee_mm_stats_allocated(pool);
+
+ if (sz > pool->max_allocated)
+ pool->max_allocated = sz;
+}
+#else /* CFG_WITH_STATS */
+static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
+{
+}
+#endif /* CFG_WITH_STATS */
+
+tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
+{
+ size_t psize;
+ tee_mm_entry_t *entry;
+ tee_mm_entry_t *nn;
+ size_t remaining;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ entry = pool->entry;
+ if (size == 0)
+ psize = 0;
+ else
+ psize = ((size - 1) >> pool->shift) + 1;
+ /* Protect with mutex (multi thread) */
+
+ /* find free slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL && psize >
+ (entry->offset - entry->next->offset -
+ entry->next->size))
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && psize >
+ (entry->next->offset - entry->size - entry->offset))
+ entry = entry->next;
+ }
+
+ /* check if we have enough memory */
+ if (entry->next == NULL) {
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ /*
+ * entry->offset is a "block count" offset from
+ * pool->lo. The byte offset is
+ * (entry->offset << pool->shift).
+ * In the HI_ALLOC allocation scheme the memory is
+ * allocated from the end of the segment, thus to
+ * validate there is sufficient memory validate that
+ * (entry->offset << pool->shift) > size.
+ */
+ if ((entry->offset << pool->shift) < size)
+ /* out of memory */
+ return NULL;
+ } else {
+ if (pool->hi <= pool->lo)
+ panic("invalid pool");
+
+ remaining = (pool->hi - pool->lo);
+ remaining -= ((entry->offset + entry->size) <<
+ pool->shift);
+
+ if (remaining < size)
+ /* out of memory */
+ return NULL;
+ }
+ }
+
+ nn = tee_mm_add(entry);
+ if (nn == NULL)
+ return NULL;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ nn->offset = entry->offset - psize;
+ else
+ nn->offset = entry->offset + entry->size;
+ nn->size = psize;
+ nn->pool = pool;
+
+ update_max_allocated(pool);
+
+ /* Protect with mutex end (multi thread) */
+
+ return nn;
+}
+
+static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
+ paddr_t offslo, paddr_t offshi)
+{
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ if (offshi > e->offset ||
+ (e->next != NULL &&
+ (offslo < e->next->offset + e->next->size)) ||
+ (offshi << pool->shift) - 1 > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ } else {
+ if (offslo < (e->offset + e->size) ||
+ (e->next != NULL && (offshi > e->next->offset)) ||
+ (offshi << pool->shift) > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ }
+
+ return true;
+}
+
+tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
+{
+ tee_mm_entry_t *entry;
+ paddr_t offslo;
+ paddr_t offshi;
+ tee_mm_entry_t *mm;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ /* Wrapping and sanity check */
+ if ((base + size) < base || base < pool->lo)
+ return NULL;
+
+ entry = pool->entry;
+ offslo = (base - pool->lo) >> pool->shift;
+ offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
+
+ /* find slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL &&
+ offshi < entry->next->offset + entry->next->size)
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && offslo > entry->next->offset)
+ entry = entry->next;
+ }
+
+ /* Check that memory is available */
+ if (!fit_in_gap(pool, entry, offslo, offshi))
+ return NULL;
+
+ mm = tee_mm_add(entry);
+ if (mm == NULL)
+ return NULL;
+
+ mm->offset = offslo;
+ mm->size = offshi - offslo;
+ mm->pool = pool;
+
+ update_max_allocated(pool);
+
+ return mm;
+}
+
+void tee_mm_free(tee_mm_entry_t *p)
+{
+ tee_mm_entry_t *entry;
+
+ if (!p || !p->pool)
+ return;
+
+ entry = p->pool->entry;
+
+ /* Protect with mutex (multi thread) */
+
+ /* remove entry from list */
+ while (entry->next != NULL && entry->next != p)
+ entry = entry->next;
+
+ if (!entry->next)
+ panic("invalid mm_entry");
+
+ entry->next = entry->next->next;
+
+ free(p);
+
+ /* Protect with mutex end (multi thread) */
+}
+
+size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
+{
+ if (!mm || !mm->pool)
+ return 0;
+ else
+ return mm->size << mm->pool->shift;
+}
+
+bool tee_mm_addr_is_within_range(tee_mm_pool_t *pool, paddr_t addr)
+{
+ return (pool && ((addr >= pool->lo) && (addr <= pool->hi)));
+}
+
+bool tee_mm_is_empty(tee_mm_pool_t *pool)
+{
+ return pool == NULL || pool->entry == NULL || pool->entry->next == NULL;
+}
+
+/* Physical Secure DDR pool */
+tee_mm_pool_t tee_mm_sec_ddr __early_bss;
+
+/* Virtual eSRAM pool */
+tee_mm_pool_t tee_mm_vcore __early_bss;
+
+tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
+{
+ tee_mm_entry_t *entry = pool->entry;
+ uint16_t offset = (addr - pool->lo) >> pool->shift;
+
+ if (addr > pool->hi || addr < pool->lo)
+ return NULL;
+
+ while (entry->next != NULL) {
+ entry = entry->next;
+
+ if ((offset >= entry->offset) &&
+ (offset < (entry->offset + entry->size))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
+{
+ return (mm->offset << mm->pool->shift) + mm->pool->lo;
+}
diff --git a/core/arch/arm/mm/tee_mmu.c b/core/arch/arm/mm/tee_mmu.c
new file mode 100644
index 0000000..f5c6dde
--- /dev/null
+++ b/core/arch/arm/mm/tee_mmu.c
@@ -0,0 +1,896 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/panic.h>
+#include <kernel/tee_common.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tz_ssvce.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_mmu_types.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mm.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/tee_pager.h>
+#include <sm/optee_smc.h>
+#include <stdlib.h>
+#include <tee_api_defines_extensions.h>
+#include <tee_api_types.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <user_ta_header.h>
+#include <util.h>
+
+#ifdef CFG_PL310
+#include <kernel/tee_l2cc_mutex.h>
+#endif
+
+#define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URW | \
+ TEE_MATTR_SECURE)
+#define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URWX | \
+ TEE_MATTR_SECURE)
+
+#define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_CACHE_CACHED << \
+ TEE_MATTR_CACHE_SHIFT)
+
+/* Support for 31 concurrent sessions */
+static uint32_t g_asid = 0xffffffff;
+
+static TEE_Result tee_mmu_umap_add_param(struct tee_mmu_info *mmu,
+ struct param_mem *mem)
+{
+ TEE_Result res;
+ struct tee_ta_region *last_entry = NULL;
+ size_t n;
+ uint32_t attr = TEE_MMU_UDATA_ATTR;
+ size_t nsz;
+ size_t noffs;
+
+ if (!mobj_is_paged(mem->mobj)) {
+ uint32_t cattr;
+
+ res = mobj_get_cattr(mem->mobj, &cattr);
+ if (res != TEE_SUCCESS)
+ return res;
+ attr |= cattr << TEE_MATTR_CACHE_SHIFT;
+ }
+
+ if (!mobj_is_secure(mem->mobj))
+ attr &= ~TEE_MATTR_SECURE;
+
+ /* Check that we can map memory using this attribute */
+ if (!core_mmu_mattr_is_ok(attr))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ /* Find empty entry */
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++)
+ if (!mmu->regions[n].size)
+ break;
+
+ if (n == TEE_MMU_UMAP_MAX_ENTRIES) {
+ /* No entries left "can't happen" */
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ mmu->regions[n].mobj = mem->mobj;
+ mmu->regions[n].offset = ROUNDDOWN(mem->offs, CORE_MMU_USER_PARAM_SIZE);
+ mmu->regions[n].size = ROUNDUP(mem->offs - mmu->regions[n].offset +
+ mem->size,
+ CORE_MMU_USER_PARAM_SIZE);
+ mmu->regions[n].attr = attr;
+
+ /* Try to coalesce some entries */
+ while (true) {
+ /* Find last param */
+ n = TEE_MMU_UMAP_MAX_ENTRIES - 1;
+
+ while (!mmu->regions[n].size) {
+ n--;
+ if (n < TEE_MMU_UMAP_PARAM_IDX) {
+ /* No param entries found, "can't happen" */
+ return TEE_ERROR_BAD_STATE;
+ }
+ }
+
+ if (last_entry == mmu->regions + n)
+ return TEE_SUCCESS; /* Can't coalesc more */
+ last_entry = mmu->regions + n;
+
+ n--;
+ while (n >= TEE_MMU_UMAP_PARAM_IDX) {
+ struct tee_ta_region *entry = mmu->regions + n;
+
+ n--;
+ if (last_entry->mobj != entry->mobj)
+ continue;
+
+ if ((last_entry->offset + last_entry->size) ==
+ entry->offset ||
+ (entry->offset + entry->size) ==
+ last_entry->offset ||
+ core_is_buffer_intersect(last_entry->offset,
+ last_entry->size,
+ entry->offset,
+ entry->size)) {
+ noffs = MIN(last_entry->offset, entry->offset);
+ nsz = MAX(last_entry->offset + last_entry->size,
+ entry->offset + entry->size) - noffs;
+ entry->offset = noffs;
+ entry->size = nsz;
+ last_entry->mobj = NULL;
+ last_entry->size = 0;
+ last_entry->attr = 0;
+ break;
+ }
+ }
+ }
+}
+
+static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu)
+{
+ const size_t granule = CORE_MMU_USER_PARAM_SIZE;
+ vaddr_t va_range_base;
+ vaddr_t va;
+ size_t va_range_size;
+ size_t n;
+
+ /* Find last table entry used to map code and data */
+ n = TEE_MMU_UMAP_PARAM_IDX - 1;
+ while (n && !mmu->regions[n].size)
+ n--;
+ va = mmu->regions[n].va + mmu->regions[n].size;
+ assert(va);
+
+ core_mmu_get_user_va_range(&va_range_base, &va_range_size);
+ assert(va_range_base == mmu->ta_private_vmem_start);
+
+ /*
+ * Assign parameters in secure memory.
+ */
+ va = ROUNDUP(va, granule);
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ if (!mmu->regions[n].size ||
+ !(mmu->regions[n].attr & TEE_MATTR_SECURE))
+ continue;
+ mmu->regions[n].va = va;
+ va += mmu->regions[n].size;
+ /* Put some empty space between each area */
+ va += granule;
+ if ((va - va_range_base) >= va_range_size)
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ /*
+ * Assign parameters in nonsecure shared memory.
+ * Note that we're making sure that they will reside in a new page
+ * directory as they are to be mapped nonsecure.
+ */
+ va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE);
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ if (!mmu->regions[n].size ||
+ (mmu->regions[n].attr & TEE_MATTR_SECURE))
+ continue;
+ mmu->regions[n].va = va;
+ va += mmu->regions[n].size;
+ /* Put some empty space between each area */
+ va += granule;
+ if ((va - va_range_base) >= va_range_size)
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_mmu_init(struct user_ta_ctx *utc)
+{
+ uint32_t asid = 1;
+
+ if (!utc->context) {
+ utc->context = 1;
+
+ /* Find available ASID */
+ while (!(asid & g_asid) && (asid != 0)) {
+ utc->context++;
+ asid = asid << 1;
+ }
+
+ if (asid == 0) {
+ DMSG("Failed to allocate ASID");
+ return TEE_ERROR_GENERIC;
+ }
+ g_asid &= ~asid;
+ }
+
+ utc->mmu = calloc(1, sizeof(struct tee_mmu_info));
+ if (!utc->mmu)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ core_mmu_get_user_va_range(&utc->mmu->ta_private_vmem_start, NULL);
+ return TEE_SUCCESS;
+}
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+static TEE_Result alloc_pgt(struct user_ta_ctx *utc __maybe_unused,
+ vaddr_t base, vaddr_t end)
+{
+ struct thread_specific_data *tsd __maybe_unused;
+ vaddr_t b = ROUNDDOWN(base, CORE_MMU_PGDIR_SIZE);
+ vaddr_t e = ROUNDUP(end, CORE_MMU_PGDIR_SIZE);
+ size_t ntbl = (e - b) >> CORE_MMU_PGDIR_SHIFT;
+
+ if (!pgt_check_avail(ntbl)) {
+ EMSG("%zu page tables not available", ntbl);
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+
+#ifdef CFG_PAGED_USER_TA
+ tsd = thread_get_tsd();
+ if (&utc->ctx == tsd->ctx) {
+ /*
+ * The supplied utc is the current active utc, allocate the
+ * page tables too as the pager needs to use them soon.
+ */
+ pgt_alloc(&tsd->pgt_cache, &utc->ctx, b, e - 1);
+ }
+#endif
+
+ return TEE_SUCCESS;
+}
+
+static void free_pgt(struct user_ta_ctx *utc, vaddr_t base, size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct pgt_cache *pgt_cache = NULL;
+
+ if (&utc->ctx == tsd->ctx)
+ pgt_cache = &tsd->pgt_cache;
+
+ pgt_flush_ctx_range(pgt_cache, &utc->ctx, base, base + size);
+}
+
+#else
+static TEE_Result alloc_pgt(struct user_ta_ctx *utc __unused,
+ vaddr_t base __unused, vaddr_t end __unused)
+{
+ return TEE_SUCCESS;
+}
+
+static void free_pgt(struct user_ta_ctx *utc __unused, vaddr_t base __unused,
+ size_t size __unused)
+{
+}
+#endif
+
+void tee_mmu_map_stack(struct user_ta_ctx *utc, struct mobj *mobj)
+{
+ const size_t granule = CORE_MMU_USER_CODE_SIZE;
+ struct tee_ta_region *region = utc->mmu->regions +
+ TEE_MMU_UMAP_STACK_IDX;
+
+ region->mobj = mobj;
+ region->offset = 0;
+ region->va = utc->mmu->ta_private_vmem_start;
+ region->size = ROUNDUP(utc->mobj_stack->size, granule);
+ region->attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ TEE_MATTR_URW | TEE_MATTR_PRW |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
+}
+
+TEE_Result tee_mmu_map_add_segment(struct user_ta_ctx *utc, struct mobj *mobj,
+ size_t offs, size_t size, uint32_t prot)
+{
+ const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
+ const size_t granule = CORE_MMU_USER_CODE_SIZE;
+ struct tee_ta_region *tbl = utc->mmu->regions;
+ vaddr_t va;
+ vaddr_t end_va;
+ size_t n = TEE_MMU_UMAP_CODE_IDX;
+ size_t o;
+
+ if (!tbl[n].size) {
+ /* We're continuing the va space from previous entry. */
+ assert(tbl[n - 1].size);
+
+ /* This is the first segment */
+ va = tbl[n - 1].va + tbl[n - 1].size;
+ end_va = ROUNDUP((offs & (granule - 1)) + size, granule) + va;
+ o = ROUNDDOWN(offs, granule);
+ goto set_entry;
+ }
+
+ /*
+ * mobj of code segments must not change once the first is
+ * assigned.
+ */
+ if (mobj != tbl[n].mobj)
+ return TEE_ERROR_SECURITY;
+
+ /*
+ * Let's find an entry we overlap with or if we need to add a new
+ * entry.
+ */
+ o = offs - tbl[n].offset;
+ va = ROUNDDOWN(o, granule) + tbl[n].va;
+ end_va = ROUNDUP(o + size, granule) + tbl[n].va;
+ o = ROUNDDOWN(offs, granule);
+ while (true) {
+ if (va >= (tbl[n].va + tbl[n].size)) {
+ n++;
+ if (n >= TEE_MMU_UMAP_PARAM_IDX)
+ return TEE_ERROR_SECURITY;
+ if (!tbl[n].size)
+ goto set_entry;
+ continue;
+ }
+
+ /*
+ * There's at least partial overlap with this entry
+ *
+ * Since we're overlapping there should be at least one
+ * free entry after this.
+ */
+ if (((n + 1) >= TEE_MMU_UMAP_PARAM_IDX) || tbl[n + 1].size)
+ return TEE_ERROR_SECURITY;
+
+ /* offset must match or the segments aren't added in order */
+ if (o != (va - tbl[n].va + tbl[n].offset))
+ return TEE_ERROR_SECURITY;
+ /* We should only overlap in the last granule of the entry. */
+ if ((va + granule) < (tbl[n].va + tbl[n].size))
+ return TEE_ERROR_SECURITY;
+
+ /* Merge protection attribute for this entry */
+ tbl[n].attr |= prot;
+
+ va += granule;
+ /* If the segment was completely overlapped, we're done. */
+ if (va == end_va)
+ return TEE_SUCCESS;
+ o += granule;
+ n++;
+ goto set_entry;
+ }
+
+set_entry:
+ tbl[n].mobj = mobj;
+ tbl[n].va = va;
+ tbl[n].offset = o;
+ tbl[n].size = end_va - va;
+ tbl[n].attr = prot | attr;
+
+ utc->mmu->ta_private_vmem_end = tbl[n].va + tbl[n].size;
+ /*
+ * Check that we have enough translation tables available to map
+ * this TA.
+ */
+ return alloc_pgt(utc, utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end);
+}
+
+void tee_mmu_map_clear(struct user_ta_ctx *utc)
+{
+ utc->mmu->ta_private_vmem_end = 0;
+ memset(utc->mmu->regions, 0, sizeof(utc->mmu->regions));
+}
+
+static void clear_param_map(struct user_ta_ctx *utc)
+{
+ const size_t n = TEE_MMU_UMAP_PARAM_IDX;
+ const size_t array_size = ARRAY_SIZE(utc->mmu->regions);
+
+ memset(utc->mmu->regions + n, 0,
+ (array_size - n) * sizeof(utc->mmu->regions[0]));
+}
+
+static TEE_Result param_mem_to_user_va(struct user_ta_ctx *utc,
+ struct param_mem *mem, void **user_va)
+{
+ size_t n;
+
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ struct tee_ta_region *region = utc->mmu->regions + n;
+ vaddr_t va;
+
+ if (mem->mobj != region->mobj)
+ continue;
+ if (mem->offs < region->offset)
+ continue;
+ if (mem->offs >= (region->offset + region->size))
+ continue;
+ va = region->va + mem->offs - region->offset;
+ *user_va = (void *)va;
+ return TEE_SUCCESS;
+ }
+ return TEE_ERROR_GENERIC;
+}
+
+TEE_Result tee_mmu_map_param(struct user_ta_ctx *utc,
+ struct tee_ta_param *param, void *param_va[TEE_NUM_PARAMS])
+{
+ TEE_Result res = TEE_SUCCESS;
+ size_t n;
+
+ /* Clear all the param entries as they can hold old information */
+ clear_param_map(utc);
+
+ /* Map secure memory params first then nonsecure memory params */
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (!mem->size)
+ continue;
+ if (mobj_is_nonsec(mem->mobj))
+ continue;
+
+ res = tee_mmu_umap_add_param(utc->mmu, mem);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (!mem->size)
+ continue;
+ if (!mobj_is_nonsec(mem->mobj))
+ continue;
+
+ res = tee_mmu_umap_add_param(utc->mmu, mem);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ res = tee_mmu_umap_set_vas(utc->mmu);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (mem->size == 0)
+ continue;
+
+ res = param_mem_to_user_va(utc, mem, param_va + n);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ utc->mmu->ta_private_vmem_start = utc->mmu->regions[0].va;
+
+ n = ARRAY_SIZE(utc->mmu->regions);
+ do {
+ n--;
+ } while (n && !utc->mmu->regions[n].size);
+
+ return alloc_pgt(utc, utc->mmu->ta_private_vmem_start,
+ utc->mmu->regions[n].va + utc->mmu->regions[n].size);
+}
+
+TEE_Result tee_mmu_add_rwmem(struct user_ta_ctx *utc, struct mobj *mobj,
+ int pgdir_offset, vaddr_t *va)
+{
+ struct tee_ta_region *reg = NULL;
+ struct tee_ta_region *last_reg;
+ vaddr_t v;
+ vaddr_t end_v;
+ size_t n;
+
+ assert(pgdir_offset < CORE_MMU_PGDIR_SIZE);
+
+ /*
+ * Avoid the corner case when no regions are assigned, currently
+ * stack and code areas are always assigned before we end up here.
+ */
+ if (!utc->mmu->regions[0].size)
+ return TEE_ERROR_GENERIC;
+
+ for (n = 1; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!reg && utc->mmu->regions[n].size)
+ continue;
+ last_reg = utc->mmu->regions + n;
+
+ if (!reg) {
+ reg = last_reg;
+ v = ROUNDUP((reg - 1)->va + (reg - 1)->size,
+ SMALL_PAGE_SIZE);
+#ifndef CFG_WITH_LPAE
+ /*
+ * Non-LPAE mappings can't mix secure and
+ * non-secure in a single pgdir.
+ */
+ if (mobj_is_secure((reg - 1)->mobj) !=
+ mobj_is_secure(mobj))
+ v = ROUNDUP(v, CORE_MMU_PGDIR_SIZE);
+#endif
+
+ /*
+ * If mobj needs to span several page directories
+ * the offset into the first pgdir need to match
+ * the supplied offset or some area used by the
+ * pager may not fit into a single pgdir.
+ */
+ if (pgdir_offset >= 0 &&
+ mobj->size > CORE_MMU_PGDIR_SIZE) {
+ if ((v & CORE_MMU_PGDIR_MASK) <
+ (size_t)pgdir_offset)
+ v = ROUNDDOWN(v, CORE_MMU_PGDIR_SIZE);
+ else
+ v = ROUNDUP(v, CORE_MMU_PGDIR_SIZE);
+ v += pgdir_offset;
+ }
+ end_v = ROUNDUP(v + mobj->size, SMALL_PAGE_SIZE);
+ continue;
+ }
+
+ if (!last_reg->size)
+ continue;
+ /*
+ * There's one registered region after our selected spot,
+ * check if we can still fit or if we need a later spot.
+ */
+ if (end_v > last_reg->va) {
+ reg = NULL;
+ continue;
+ }
+#ifndef CFG_WITH_LPAE
+ if (mobj_is_secure(mobj) != mobj_is_secure(last_reg->mobj) &&
+ end_v > ROUNDDOWN(last_reg->va, CORE_MMU_PGDIR_SIZE))
+ reg = NULL;
+#endif
+ }
+
+ if (reg) {
+ TEE_Result res;
+
+ end_v = MAX(end_v, last_reg->va + last_reg->size);
+ res = alloc_pgt(utc, utc->mmu->ta_private_vmem_start, end_v);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ *va = v;
+ reg->va = v;
+ reg->mobj = mobj;
+ reg->offset = 0;
+ reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
+ if (mobj_is_secure(mobj))
+ reg->attr = TEE_MATTR_SECURE;
+ else
+ reg->attr = 0;
+ return TEE_SUCCESS;
+ }
+
+ return TEE_ERROR_OUT_OF_MEMORY;
+}
+
+void tee_mmu_rem_rwmem(struct user_ta_ctx *utc, struct mobj *mobj, vaddr_t va)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ struct tee_ta_region *reg = utc->mmu->regions + n;
+
+ if (reg->mobj == mobj && reg->va == va) {
+ free_pgt(utc, reg->va, reg->size);
+ memset(reg, 0, sizeof(*reg));
+ return;
+ }
+ }
+}
+
+/*
+ * tee_mmu_final - finalise and free ctx mmu
+ */
+void tee_mmu_final(struct user_ta_ctx *utc)
+{
+ uint32_t asid = 1 << ((utc->context - 1) & 0xff);
+
+ /* return ASID */
+ g_asid |= asid;
+
+ /* clear MMU entries to avoid clash when asid is reused */
+ secure_mmu_unifiedtlbinv_byasid(utc->context & 0xff);
+ utc->context = 0;
+
+ free(utc->mmu);
+ utc->mmu = NULL;
+}
+
+/* return true only if buffer fits inside TA private memory */
+bool tee_mmu_is_vbuf_inside_ta_private(const struct user_ta_ctx *utc,
+ const void *va, size_t size)
+{
+ return core_is_buffer_inside(va, size,
+ utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start);
+}
+
+/* return true only if buffer intersects TA private memory */
+bool tee_mmu_is_vbuf_intersect_ta_private(const struct user_ta_ctx *utc,
+ const void *va, size_t size)
+{
+ return core_is_buffer_intersect(va, size,
+ utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start);
+}
+
+TEE_Result tee_mmu_vbuf_to_mobj_offs(const struct user_ta_ctx *utc,
+ const void *va, size_t size,
+ struct mobj **mobj, size_t *offs)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].mobj)
+ continue;
+ if (core_is_buffer_inside(va, size, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size)) {
+ *mobj = utc->mmu->regions[n].mobj;
+ *offs = (vaddr_t)va - utc->mmu->regions[n].va +
+ utc->mmu->regions[n].offset;
+ return TEE_SUCCESS;
+ }
+ }
+
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+static TEE_Result tee_mmu_user_va2pa_attr(const struct user_ta_ctx *utc,
+ void *ua, paddr_t *pa, uint32_t *attr)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (core_is_buffer_inside(ua, 1, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size)) {
+ if (pa) {
+ TEE_Result res;
+ paddr_t p;
+
+ res = mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset,
+ 0, &p);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ *pa = (paddr_t)ua - utc->mmu->regions[n].va + p;
+ }
+ if (attr)
+ *attr = utc->mmu->regions[n].attr;
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_user_va2pa_helper(const struct user_ta_ctx *utc, void *ua,
+ paddr_t *pa)
+{
+ return tee_mmu_user_va2pa_attr(utc, ua, pa, NULL);
+}
+
+/* */
+TEE_Result tee_mmu_user_pa2va_helper(const struct user_ta_ctx *utc,
+ paddr_t pa, void **va)
+{
+ TEE_Result res;
+ paddr_t p;
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].mobj)
+ continue;
+
+ res = mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset, 0, &p);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (core_is_buffer_inside(pa, 1, p,
+ utc->mmu->regions[n].size)) {
+ *va = (void *)(pa - p + utc->mmu->regions[n].va);
+ return TEE_SUCCESS;
+ }
+ }
+
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_check_access_rights(const struct user_ta_ctx *utc,
+ uint32_t flags, uaddr_t uaddr,
+ size_t len)
+{
+ uaddr_t a;
+ size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
+ CORE_MMU_USER_PARAM_SIZE);
+
+ /* Address wrap */
+ if ((uaddr + len) < uaddr)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
+ (flags & TEE_MEMORY_ACCESS_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ /*
+ * Rely on TA private memory test to check if address range is private
+ * to TA or not.
+ */
+ if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
+ !tee_mmu_is_vbuf_inside_ta_private(utc, (void *)uaddr, len))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ for (a = uaddr; a < (uaddr + len); a += addr_incr) {
+ uint32_t attr;
+ TEE_Result res;
+
+ res = tee_mmu_user_va2pa_attr(utc, (void *)a, NULL, &attr);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
+ (attr & TEE_MATTR_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
+ !(attr & TEE_MATTR_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
+ return TEE_ERROR_ACCESS_DENIED;
+ if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
+ return TEE_ERROR_ACCESS_DENIED;
+ }
+
+ return TEE_SUCCESS;
+}
+
+void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+
+ core_mmu_set_user_map(NULL);
+#ifdef CFG_SMALL_PAGE_USER_TA
+ /*
+ * No matter what happens below, the current user TA will not be
+ * current any longer. Make sure pager is in sync with that.
+ * This function has to be called before there's a chance that
+ * pgt_free_unlocked() is called.
+ *
+ * Save translation tables in a cache if it's a user TA.
+ */
+ pgt_free(&tsd->pgt_cache, tsd->ctx && is_user_ta_ctx(tsd->ctx));
+#endif
+
+ if (ctx && is_user_ta_ctx(ctx)) {
+ struct core_mmu_user_map map;
+ struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
+
+ core_mmu_create_user_map(utc, &map);
+ core_mmu_set_user_map(&map);
+ tee_pager_assign_uta_tables(utc);
+ }
+ tsd->ctx = ctx;
+}
+
+struct tee_ta_ctx *tee_mmu_get_ctx(void)
+{
+ return thread_get_tsd()->ctx;
+}
+
+uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
+{
+ const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);
+
+ assert(utc->mmu);
+ return utc->mmu->regions[TEE_MMU_UMAP_CODE_IDX].va;
+}
+
+void teecore_init_ta_ram(void)
+{
+ vaddr_t s;
+ vaddr_t e;
+ paddr_t ps;
+ paddr_t pe;
+
+ /* get virtual addr/size of RAM where TA are loaded/executedNSec
+ * shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
+ ps = virt_to_phys((void *)s);
+ pe = virt_to_phys((void *)(e - 1)) + 1;
+
+ if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
+ !pe || (pe & CORE_MMU_USER_CODE_MASK))
+ panic("invalid TA RAM");
+
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ if (!tee_pbuf_is_sec(ps, pe - ps))
+ panic("TA RAM is not secure");
+
+ if (!tee_mm_is_empty(&tee_mm_sec_ddr))
+ panic("TA RAM pool is not empty");
+
+ /* remove previous config and init TA ddr memory pool */
+ tee_mm_final(&tee_mm_sec_ddr);
+ tee_mm_init(&tee_mm_sec_ddr, ps, pe, CORE_MMU_USER_CODE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS);
+}
+
+void teecore_init_pub_ram(void)
+{
+ vaddr_t s;
+ vaddr_t e;
+
+ /* get virtual addr/size of NSec shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
+
+ if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
+ panic("invalid PUB RAM");
+
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ if (!tee_vbuf_is_non_sec(s, e - s))
+ panic("PUB RAM is not non-secure");
+
+#ifdef CFG_PL310
+ /* Allocate statically the l2cc mutex */
+ tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
+ s += sizeof(uint32_t); /* size of a pl310 mutex */
+ s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */
+#endif
+
+ default_nsec_shm_paddr = virt_to_phys((void *)s);
+ default_nsec_shm_size = e - s;
+}
+
+uint32_t tee_mmu_user_get_cache_attr(struct user_ta_ctx *utc, void *va)
+{
+ uint32_t attr;
+
+ if (tee_mmu_user_va2pa_attr(utc, va, NULL, &attr) != TEE_SUCCESS)
+ panic("cannot get attr");
+
+ return (attr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK;
+}
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
new file mode 100644
index 0000000..c7238fe
--- /dev/null
+++ b/core/arch/arm/mm/tee_pager.c
@@ -0,0 +1,1473 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <keep.h>
+#include <sys/queue.h>
+#include <kernel/abort.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_pager.h>
+#include <types_ext.h>
+#include <stdlib.h>
+#include <tee_api_defines.h>
+#include <tee/tee_cryp_provider.h>
+#include <trace.h>
+#include <utee_defines.h>
+#include <util.h>
+
+#include "pager_private.h"
+
+#define PAGER_AE_KEY_BITS 256
+
+struct pager_rw_pstate {
+ uint64_t iv;
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN];
+};
+
+enum area_type {
+ AREA_TYPE_RO,
+ AREA_TYPE_RW,
+ AREA_TYPE_LOCK,
+};
+
+struct tee_pager_area {
+ union {
+ const uint8_t *hashes;
+ struct pager_rw_pstate *rwp;
+ } u;
+ uint8_t *store;
+ enum area_type type;
+ uint32_t flags;
+ vaddr_t base;
+ size_t size;
+ struct pgt *pgt;
+ TAILQ_ENTRY(tee_pager_area) link;
+};
+
+TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
+
+static struct tee_pager_area_head tee_pager_area_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
+
+#define INVALID_PGIDX UINT_MAX
+
+/*
+ * struct tee_pager_pmem - Represents a physical page used for paging.
+ *
+ * @pgidx an index of the entry in area->ti.
+ * @va_alias Virtual address where the physical page always is aliased.
+ * Used during remapping of the page when the content need to
+ * be updated before it's available at the new location.
+ * @area a pointer to the pager area
+ */
+struct tee_pager_pmem {
+ unsigned pgidx;
+ void *va_alias;
+ struct tee_pager_area *area;
+ TAILQ_ENTRY(tee_pager_pmem) link;
+};
+
+/* The list of physical pages. The first page in the list is the oldest */
+TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
+
+static struct tee_pager_pmem_head tee_pager_pmem_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
+
+static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
+
+static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
+
+/* number of pages hidden */
+#define TEE_PAGER_NHIDE (tee_pager_npages / 3)
+
+/* Number of registered physical pages, used hiding pages. */
+static size_t tee_pager_npages;
+
+#ifdef CFG_WITH_STATS
+static struct tee_pager_stats pager_stats;
+
+static inline void incr_ro_hits(void)
+{
+ pager_stats.ro_hits++;
+}
+
+static inline void incr_rw_hits(void)
+{
+ pager_stats.rw_hits++;
+}
+
+static inline void incr_hidden_hits(void)
+{
+ pager_stats.hidden_hits++;
+}
+
+static inline void incr_zi_released(void)
+{
+ pager_stats.zi_released++;
+}
+
+static inline void incr_npages_all(void)
+{
+ pager_stats.npages_all++;
+}
+
+static inline void set_npages(void)
+{
+ pager_stats.npages = tee_pager_npages;
+}
+
+void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ *stats = pager_stats;
+
+ pager_stats.hidden_hits = 0;
+ pager_stats.ro_hits = 0;
+ pager_stats.rw_hits = 0;
+ pager_stats.zi_released = 0;
+}
+
+#else /* CFG_WITH_STATS */
+static inline void incr_ro_hits(void) { }
+static inline void incr_rw_hits(void) { }
+static inline void incr_hidden_hits(void) { }
+static inline void incr_zi_released(void) { }
+static inline void incr_npages_all(void) { }
+static inline void set_npages(void) { }
+
+void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ memset(stats, 0, sizeof(struct tee_pager_stats));
+}
+#endif /* CFG_WITH_STATS */
+
+static struct pgt pager_core_pgt;
+struct core_mmu_table_info tee_pager_tbl_info;
+static struct core_mmu_table_info pager_alias_tbl_info;
+
+static unsigned pager_spinlock = SPINLOCK_UNLOCK;
+
+/* Defines the range of the alias area */
+static tee_mm_entry_t *pager_alias_area;
+/*
+ * Physical pages are added in a stack like fashion to the alias area,
+ * @pager_alias_next_free gives the address of next free entry if
+ * @pager_alias_next_free is != 0
+ */
+static uintptr_t pager_alias_next_free;
+
+static uint32_t pager_lock(void)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ cpu_spin_lock(&pager_spinlock);
+ return exceptions;
+}
+
+static void pager_unlock(uint32_t exceptions)
+{
+ cpu_spin_unlock(&pager_spinlock);
+ thread_set_exceptions(exceptions);
+}
+
+static void set_alias_area(tee_mm_entry_t *mm)
+{
+ struct core_mmu_table_info *ti = &pager_alias_tbl_info;
+ size_t tbl_va_size;
+ unsigned idx;
+ unsigned last_idx;
+ vaddr_t smem = tee_mm_get_smem(mm);
+ size_t nbytes = tee_mm_get_bytes(mm);
+
+ DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
+
+ if (pager_alias_area)
+ panic("null pager_alias_area");
+
+ if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
+ panic("Can't find translation table");
+
+ if ((1 << ti->shift) != SMALL_PAGE_SIZE)
+ panic("Unsupported page size in translation table");
+
+ tbl_va_size = (1 << ti->shift) * ti->num_entries;
+ if (!core_is_buffer_inside(smem, nbytes,
+ ti->va_base, tbl_va_size)) {
+ EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
+ smem, nbytes, ti->va_base, tbl_va_size);
+ panic();
+ }
+
+ if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
+ panic("invalid area alignment");
+
+ pager_alias_area = mm;
+ pager_alias_next_free = smem;
+
+ /* Clear all mapping in the alias area */
+ idx = core_mmu_va2idx(ti, smem);
+ last_idx = core_mmu_va2idx(ti, smem + nbytes);
+ for (; idx < last_idx; idx++)
+ core_mmu_set_entry(ti, idx, 0, 0);
+
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+static void generate_ae_key(void)
+{
+ if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
+ panic("failed to generate random");
+}
+
+void tee_pager_init(tee_mm_entry_t *mm_alias)
+{
+ set_alias_area(mm_alias);
+ generate_ae_key();
+}
+
+static void *pager_add_alias_page(paddr_t pa)
+{
+ unsigned idx;
+ struct core_mmu_table_info *ti = &pager_alias_tbl_info;
+ uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
+ TEE_MATTR_SECURE | TEE_MATTR_PRW;
+
+ DMSG("0x%" PRIxPA, pa);
+
+ if (!pager_alias_next_free || !ti->num_entries)
+ panic("invalid alias entry");
+
+ idx = core_mmu_va2idx(ti, pager_alias_next_free);
+ core_mmu_set_entry(ti, idx, pa, attr);
+ pgt_inc_used_entries(&pager_core_pgt);
+ pager_alias_next_free += SMALL_PAGE_SIZE;
+ if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
+ tee_mm_get_bytes(pager_alias_area)))
+ pager_alias_next_free = 0;
+ return (void *)core_mmu_idx2va(ti, idx);
+}
+
+static struct tee_pager_area *alloc_area(struct pgt *pgt,
+ vaddr_t base, size_t size,
+ uint32_t flags, const void *store,
+ const void *hashes)
+{
+ struct tee_pager_area *area = calloc(1, sizeof(*area));
+ enum area_type at;
+ tee_mm_entry_t *mm_store = NULL;
+
+ if (!area)
+ return NULL;
+
+ if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
+ if (flags & TEE_MATTR_LOCKED) {
+ at = AREA_TYPE_LOCK;
+ goto out;
+ }
+ mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
+ if (!mm_store)
+ goto bad;
+ area->store = phys_to_virt(tee_mm_get_smem(mm_store),
+ MEM_AREA_TA_RAM);
+ if (!area->store)
+ goto bad;
+ area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
+ sizeof(struct pager_rw_pstate));
+ if (!area->u.rwp)
+ goto bad;
+ at = AREA_TYPE_RW;
+ } else {
+ area->store = (void *)store;
+ area->u.hashes = hashes;
+ at = AREA_TYPE_RO;
+ }
+out:
+ area->pgt = pgt;
+ area->base = base;
+ area->size = size;
+ area->flags = flags;
+ area->type = at;
+ return area;
+bad:
+ tee_mm_free(mm_store);
+ free(area->u.rwp);
+ free(area);
+ return NULL;
+}
+
+static void area_insert_tail(struct tee_pager_area *area)
+{
+ uint32_t exceptions = pager_lock();
+
+ TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(area_insert_tail);
+
+static size_t tbl_usage_count(struct pgt *pgt)
+{
+ size_t n;
+ paddr_t pa;
+ size_t usage = 0;
+
+ for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
+ core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
+ n, &pa, NULL);
+ if (pa)
+ usage++;
+ }
+ return usage;
+}
+
+bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
+ const void *store, const void *hashes)
+{
+ struct tee_pager_area *area;
+ size_t tbl_va_size;
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+
+ DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
+ base, base + size, flags, store, hashes);
+
+ if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
+ EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
+ panic();
+ }
+
+ if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
+ panic("write pages cannot provide store or hashes");
+
+ if ((flags & TEE_MATTR_PW) && (store || hashes))
+ panic("non-write pages must provide store and hashes");
+
+ if (!pager_core_pgt.tbl) {
+ pager_core_pgt.tbl = ti->table;
+ pgt_set_used_entries(&pager_core_pgt,
+ tbl_usage_count(&pager_core_pgt));
+ }
+
+ tbl_va_size = (1 << ti->shift) * ti->num_entries;
+ if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
+ DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
+ base, size, ti->va_base, tbl_va_size);
+ return false;
+ }
+
+ area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
+ if (!area)
+ return false;
+
+ area_insert_tail(area);
+ return true;
+}
+
+static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
+ vaddr_t va)
+{
+ struct tee_pager_area *area;
+
+ if (!areas)
+ return NULL;
+
+ TAILQ_FOREACH(area, areas, link) {
+ if (core_is_buffer_inside(va, 1, area->base, area->size))
+ return area;
+ }
+ return NULL;
+}
+
+#ifdef CFG_PAGED_USER_TA
+static struct tee_pager_area *find_uta_area(vaddr_t va)
+{
+ struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
+
+ if (!ctx || !is_user_ta_ctx(ctx))
+ return NULL;
+ return find_area(to_user_ta_ctx(ctx)->areas, va);
+}
+#else
+static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
+{
+ return NULL;
+}
+#endif /*CFG_PAGED_USER_TA*/
+
+
+static uint32_t get_area_mattr(uint32_t area_flags)
+{
+ uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
+ (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
+
+ if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
+ attr |= TEE_MATTR_GLOBAL;
+
+ return attr;
+}
+
+static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
+{
+ paddr_t pa;
+ unsigned idx;
+
+ idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
+ core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
+ return pa;
+}
+
+static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
+ void *dst)
+{
+ struct pager_aes_gcm_iv iv = {
+ { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
+ };
+
+ return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
+ &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
+}
+
+static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
+{
+ struct pager_aes_gcm_iv iv;
+
+ assert((rwp->iv + 1) > rwp->iv);
+ rwp->iv++;
+ /*
+ * IV is constructed as recommended in section "8.2.1 Deterministic
+ * Construction" of "Recommendation for Block Cipher Modes of
+ * Operation: Galois/Counter Mode (GCM) and GMAC",
+ * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
+ */
+ iv.iv[0] = (vaddr_t)rwp;
+ iv.iv[1] = rwp->iv >> 32;
+ iv.iv[2] = rwp->iv;
+
+ if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
+ &iv, rwp->tag,
+ src, dst, SMALL_PAGE_SIZE))
+ panic("gcm failed");
+}
+
+static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
+ void *va_alias)
+{
+ size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
+ const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
+
+ switch (area->type) {
+ case AREA_TYPE_RO:
+ {
+ const void *hash = area->u.hashes +
+ idx * TEE_SHA256_HASH_SIZE;
+
+ memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
+ incr_ro_hits();
+
+ if (hash_sha256_check(hash, va_alias,
+ SMALL_PAGE_SIZE) != TEE_SUCCESS) {
+ EMSG("PH 0x%" PRIxVA " failed", page_va);
+ panic();
+ }
+ }
+ break;
+ case AREA_TYPE_RW:
+ FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
+ va_alias, page_va, area->u.rwp[idx].iv);
+ if (!area->u.rwp[idx].iv)
+ memset(va_alias, 0, SMALL_PAGE_SIZE);
+ else if (!decrypt_page(&area->u.rwp[idx], stored_page,
+ va_alias)) {
+ EMSG("PH 0x%" PRIxVA " failed", page_va);
+ panic();
+ }
+ incr_rw_hits();
+ break;
+ case AREA_TYPE_LOCK:
+ FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
+ memset(va_alias, 0, SMALL_PAGE_SIZE);
+ break;
+ default:
+ panic();
+ }
+}
+
+static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
+{
+ const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
+ TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+
+ if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
+ size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
+ size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
+ void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
+
+ assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
+ encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
+ stored_page);
+ FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
+ pmem->area->base + idx * SMALL_PAGE_SIZE,
+ pmem->area->u.rwp[idx].iv);
+ }
+}
+
+static void area_get_entry(struct tee_pager_area *area, size_t idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ assert(area->pgt);
+ assert(idx < tee_pager_tbl_info.num_entries);
+ core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
+ idx, pa, attr);
+}
+
+static void area_set_entry(struct tee_pager_area *area, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ assert(area->pgt);
+ assert(idx < tee_pager_tbl_info.num_entries);
+ core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
+ idx, pa, attr);
+}
+
+static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
+{
+ return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
+}
+
+static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
+ size_t idx)
+{
+ return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
+}
+
+#ifdef CFG_PAGED_USER_TA
+static void free_area(struct tee_pager_area *area)
+{
+ tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
+ virt_to_phys(area->store)));
+ if (area->type == AREA_TYPE_RW)
+ free(area->u.rwp);
+ free(area);
+}
+
+static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ uint32_t flags;
+ vaddr_t b = base;
+ size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
+
+ if (!utc->areas) {
+ utc->areas = malloc(sizeof(*utc->areas));
+ if (!utc->areas)
+ return false;
+ TAILQ_INIT(utc->areas);
+ }
+
+ flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
+
+ while (s) {
+ size_t s2;
+
+ if (find_area(utc->areas, b))
+ return false;
+
+ s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
+
+ /* Table info will be set when the context is activated. */
+ area = alloc_area(NULL, b, s2, flags, NULL, NULL);
+ if (!area)
+ return false;
+ TAILQ_INSERT_TAIL(utc->areas, area, link);
+ b += s2;
+ s -= s2;
+ }
+
+ return true;
+}
+
+bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct tee_pager_area *area;
+ struct core_mmu_table_info dir_info = { NULL };
+
+ if (&utc->ctx != tsd->ctx) {
+ /*
+ * Changes are to an utc that isn't active. Just add the
+ * areas page tables will be dealt with later.
+ */
+ return pager_add_uta_area(utc, base, size);
+ }
+
+ /*
+ * Assign page tables before adding areas to be able to tell which
+ * are newly added and should be removed in case of failure.
+ */
+ tee_pager_assign_uta_tables(utc);
+ if (!pager_add_uta_area(utc, base, size)) {
+ struct tee_pager_area *next_a;
+
+ /* Remove all added areas */
+ TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
+ if (!area->pgt) {
+ TAILQ_REMOVE(utc->areas, area, link);
+ free_area(area);
+ }
+ }
+ return false;
+ }
+
+ /*
+ * Assign page tables to the new areas and make sure that the page
+ * tables are registered in the upper table.
+ */
+ tee_pager_assign_uta_tables(utc);
+ core_mmu_get_user_pgdir(&dir_info);
+ TAILQ_FOREACH(area, utc->areas, link) {
+ paddr_t pa;
+ size_t idx;
+ uint32_t attr;
+
+ idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
+ core_mmu_get_entry(&dir_info, idx, &pa, &attr);
+
+ /*
+ * Check if the page table already is used, if it is, it's
+ * already registered.
+ */
+ if (area->pgt->num_used_entries) {
+ assert(attr & TEE_MATTR_TABLE);
+ assert(pa == virt_to_phys(area->pgt->tbl));
+ continue;
+ }
+
+ attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
+ pa = virt_to_phys(area->pgt->tbl);
+ assert(pa);
+ /*
+ * Note that the update of the table entry is guaranteed to
+ * be atomic.
+ */
+ core_mmu_set_entry(&dir_info, idx, pa, attr);
+ }
+
+ return true;
+}
+
+static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
+ struct pgt *pgt)
+{
+ assert(pgt);
+ ti->table = pgt->tbl;
+ ti->va_base = pgt->vabase;
+ ti->level = tee_pager_tbl_info.level;
+ ti->shift = tee_pager_tbl_info.shift;
+ ti->num_entries = tee_pager_tbl_info.num_entries;
+}
+
+static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
+ vaddr_t new_base)
+{
+ uint32_t exceptions = pager_lock();
+
+ /*
+ * If there's no pgt assigned to the old area there's no pages to
+ * deal with either, just update with a new pgt and base.
+ */
+ if (area->pgt) {
+ struct core_mmu_table_info old_ti;
+ struct core_mmu_table_info new_ti;
+ struct tee_pager_pmem *pmem;
+
+ init_tbl_info_from_pgt(&old_ti, area->pgt);
+ init_tbl_info_from_pgt(&new_ti, new_pgt);
+
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ vaddr_t va;
+ paddr_t pa;
+ uint32_t attr;
+
+ if (pmem->area != area)
+ continue;
+ core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
+ core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
+
+ assert(pa == get_pmem_pa(pmem));
+ assert(attr);
+ assert(area->pgt->num_used_entries);
+ area->pgt->num_used_entries--;
+
+ va = core_mmu_idx2va(&old_ti, pmem->pgidx);
+ va = va - area->base + new_base;
+ pmem->pgidx = core_mmu_va2idx(&new_ti, va);
+ core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
+ new_pgt->num_used_entries++;
+ }
+ }
+
+ area->pgt = new_pgt;
+ area->base = new_base;
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(transpose_area);
+
+void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
+ vaddr_t src_base,
+ struct user_ta_ctx *dst_utc,
+ vaddr_t dst_base, struct pgt **dst_pgt,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ struct tee_pager_area *next_a;
+
+ TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
+ vaddr_t new_area_base;
+ size_t new_idx;
+
+ if (!core_is_buffer_inside(area->base, area->size,
+ src_base, size))
+ continue;
+
+ TAILQ_REMOVE(src_utc->areas, area, link);
+
+ new_area_base = dst_base + (src_base - area->base);
+ new_idx = (new_area_base - dst_pgt[0]->vabase) /
+ CORE_MMU_PGDIR_SIZE;
+ assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
+ dst_pgt[new_idx]->vabase);
+ transpose_area(area, dst_pgt[new_idx], new_area_base);
+
+ /*
+ * Assert that this will not cause any conflicts in the new
+ * utc. This should already be guaranteed, but a bug here
+ * could be tricky to find.
+ */
+ assert(!find_area(dst_utc->areas, area->base));
+ TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
+ }
+}
+
+static void rem_area(struct tee_pager_area_head *area_head,
+ struct tee_pager_area *area)
+{
+ struct tee_pager_pmem *pmem;
+ uint32_t exceptions;
+
+ exceptions = pager_lock();
+
+ TAILQ_REMOVE(area_head, area, link);
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->area == area) {
+ area_set_entry(area, pmem->pgidx, 0, 0);
+ pgt_dec_used_entries(area->pgt);
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ }
+ }
+
+ pager_unlock(exceptions);
+ free_area(area);
+}
+KEEP_PAGER(rem_area);
+
+void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ struct tee_pager_area *next_a;
+ size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
+
+ TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
+ if (core_is_buffer_inside(area->base, area->size, base, s))
+ rem_area(utc->areas, area);
+ }
+}
+
+void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
+{
+ struct tee_pager_area *area;
+
+ if (!utc->areas)
+ return;
+
+ while (true) {
+ area = TAILQ_FIRST(utc->areas);
+ if (!area)
+ break;
+ TAILQ_REMOVE(utc->areas, area, link);
+ free_area(area);
+ }
+
+ free(utc->areas);
+}
+
+bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size, uint32_t flags)
+{
+ bool ret;
+ vaddr_t b = base;
+ size_t s = size;
+ size_t s2;
+ struct tee_pager_area *area = find_area(utc->areas, b);
+ uint32_t exceptions;
+ struct tee_pager_pmem *pmem;
+ paddr_t pa;
+ uint32_t a;
+ uint32_t f;
+
+ f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
+ if (f & TEE_MATTR_UW)
+ f |= TEE_MATTR_PW;
+ f = get_area_mattr(f);
+
+ exceptions = pager_lock();
+
+ while (s) {
+ s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
+ if (!area || area->base != b || area->size != s2) {
+ ret = false;
+ goto out;
+ }
+ b += s2;
+ s -= s2;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->area != area)
+ continue;
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
+ if (a & TEE_MATTR_VALID_BLOCK)
+ assert(pa == get_pmem_pa(pmem));
+ else
+ pa = get_pmem_pa(pmem);
+ if (a == f)
+ continue;
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ if (!(flags & TEE_MATTR_UW))
+ tee_pager_save_page(pmem, a);
+
+ area_set_entry(pmem->area, pmem->pgidx, pa, f);
+
+ if (flags & TEE_MATTR_UX) {
+ void *va = (void *)area_idx2va(pmem->area,
+ pmem->pgidx);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va,
+ SMALL_PAGE_SIZE);
+ }
+ }
+
+ area->flags = f;
+ area = TAILQ_NEXT(area, link);
+ }
+
+ ret = true;
+out:
+ pager_unlock(exceptions);
+ return ret;
+}
+KEEP_PAGER(tee_pager_set_uta_area_attr);
+#endif /*CFG_PAGED_USER_TA*/
+
+static bool tee_pager_unhide_page(vaddr_t page_va)
+{
+ struct tee_pager_pmem *pmem;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ paddr_t pa;
+ uint32_t attr;
+
+ if (pmem->pgidx == INVALID_PGIDX)
+ continue;
+
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
+
+ if (!(attr &
+ (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
+ continue;
+
+ if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
+ uint32_t a = get_area_mattr(pmem->area->flags);
+
+ /* page is hidden, show and move to back */
+ if (pa != get_pmem_pa(pmem))
+ panic("unexpected pa");
+
+ /*
+ * If it's not a dirty block, then it should be
+ * read only.
+ */
+ if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
+ a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
+ else
+ FMSG("Unhide %#" PRIxVA, page_va);
+
+ if (page_va == 0x8000a000)
+ FMSG("unhide %#" PRIxVA " a %#" PRIX32,
+ page_va, a);
+ area_set_entry(pmem->area, pmem->pgidx, pa, a);
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+
+ /* TODO only invalidate entry touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ incr_hidden_hits();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void tee_pager_hide_pages(void)
+{
+ struct tee_pager_pmem *pmem;
+ size_t n = 0;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ paddr_t pa;
+ uint32_t attr;
+ uint32_t a;
+
+ if (n >= TEE_PAGER_NHIDE)
+ break;
+ n++;
+
+ /* we cannot hide pages when pmem->area is not defined. */
+ if (!pmem->area)
+ continue;
+
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+
+ assert(pa == get_pmem_pa(pmem));
+ if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
+ a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ FMSG("Hide %#" PRIxVA,
+ area_idx2va(pmem->area, pmem->pgidx));
+ } else
+ a = TEE_MATTR_HIDDEN_BLOCK;
+ area_set_entry(pmem->area, pmem->pgidx, pa, a);
+ }
+
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+/*
+ * Find mapped pmem, hide and move to pageble pmem.
+ * Return false if page was not mapped, and true if page was mapped.
+ */
+static bool tee_pager_release_one_phys(struct tee_pager_area *area,
+ vaddr_t page_va)
+{
+ struct tee_pager_pmem *pmem;
+ unsigned pgidx;
+ paddr_t pa;
+ uint32_t attr;
+
+ pgidx = area_va2idx(area, page_va);
+ area_get_entry(area, pgidx, &pa, &attr);
+
+ FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
+
+ TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
+ if (pmem->area != area || pmem->pgidx != pgidx)
+ continue;
+
+ assert(pa == get_pmem_pa(pmem));
+ area_set_entry(area, pgidx, 0, 0);
+ pgt_dec_used_entries(area->pgt);
+ TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ tee_pager_npages++;
+ set_npages();
+ TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
+ incr_zi_released();
+ return true;
+ }
+
+ return false;
+}
+
+/* Finds the oldest page and unmats it from its old virtual address */
+static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
+{
+ struct tee_pager_pmem *pmem;
+
+ pmem = TAILQ_FIRST(&tee_pager_pmem_head);
+ if (!pmem) {
+ EMSG("No pmem entries");
+ return NULL;
+ }
+ if (pmem->pgidx != INVALID_PGIDX) {
+ uint32_t a;
+
+ assert(pmem->area && pmem->area->pgt);
+ area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ pgt_dec_used_entries(pmem->area->pgt);
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ tee_pager_save_page(pmem, a);
+ }
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ pmem->pgidx = INVALID_PGIDX;
+ pmem->area = NULL;
+ if (area->type == AREA_TYPE_LOCK) {
+ /* Move page to lock list */
+ if (tee_pager_npages <= 0)
+ panic("running out of page");
+ tee_pager_npages--;
+ set_npages();
+ TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
+ } else {
+ /* move page to back */
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+ }
+
+ return pmem;
+}
+
+static bool pager_update_permissions(struct tee_pager_area *area,
+ struct abort_info *ai, bool *handled)
+{
+ unsigned int pgidx = area_va2idx(area, ai->va);
+ uint32_t attr;
+ paddr_t pa;
+
+ *handled = false;
+
+ area_get_entry(area, pgidx, &pa, &attr);
+
+ /* Not mapped */
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ return false;
+
+ /* Not readable, should not happen */
+ if (abort_is_user_exception(ai)) {
+ if (!(attr & TEE_MATTR_UR))
+ return true;
+ } else {
+ if (!(attr & TEE_MATTR_PR)) {
+ abort_print_error(ai);
+ panic();
+ }
+ }
+
+ switch (core_mmu_get_fault_type(ai->fault_descr)) {
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ if (ai->abort_type == ABORT_TYPE_PREFETCH) {
+ /* Check attempting to execute from an NOX page */
+ if (abort_is_user_exception(ai)) {
+ if (!(attr & TEE_MATTR_UX))
+ return true;
+ } else {
+ if (!(attr & TEE_MATTR_PX)) {
+ abort_print_error(ai);
+ panic();
+ }
+ }
+ }
+ /* Since the page is mapped now it's OK */
+ break;
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ /* Check attempting to write to an RO page */
+ if (abort_is_user_exception(ai)) {
+ if (!(area->flags & TEE_MATTR_UW))
+ return true;
+ if (!(attr & TEE_MATTR_UW)) {
+ FMSG("Dirty %p",
+ (void *)(ai->va & ~SMALL_PAGE_MASK));
+ area_set_entry(area, pgidx, pa,
+ get_area_mattr(area->flags));
+ /* TODO only invalidate entry above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
+
+ } else {
+ if (!(area->flags & TEE_MATTR_PW)) {
+ abort_print_error(ai);
+ panic();
+ }
+ if (!(attr & TEE_MATTR_PW)) {
+ FMSG("Dirty %p",
+ (void *)(ai->va & ~SMALL_PAGE_MASK));
+ area_set_entry(area, pgidx, pa,
+ get_area_mattr(area->flags));
+ /* TODO only invalidate entry above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
+ }
+ /* Since permissions has been updated now it's OK */
+ break;
+ default:
+ /* Some fault we can't deal with */
+ if (abort_is_user_exception(ai))
+ return true;
+ abort_print_error(ai);
+ panic();
+ }
+ *handled = true;
+ return true;
+}
+
+#ifdef CFG_TEE_CORE_DEBUG
+static void stat_handle_fault(void)
+{
+ static size_t num_faults;
+ static size_t min_npages = SIZE_MAX;
+ static size_t total_min_npages = SIZE_MAX;
+
+ num_faults++;
+ if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
+ DMSG("nfaults %zu npages %zu (min %zu)",
+ num_faults, tee_pager_npages, min_npages);
+ min_npages = tee_pager_npages; /* reset */
+ }
+ if (tee_pager_npages < min_npages)
+ min_npages = tee_pager_npages;
+ if (tee_pager_npages < total_min_npages)
+ total_min_npages = tee_pager_npages;
+}
+#else
+static void stat_handle_fault(void)
+{
+}
+#endif
+
+bool tee_pager_handle_fault(struct abort_info *ai)
+{
+ struct tee_pager_area *area;
+ vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
+ uint32_t exceptions;
+ bool ret;
+
+#ifdef TEE_PAGER_DEBUG_PRINT
+ abort_print(ai);
+#endif
+
+ /*
+ * We're updating pages that can affect several active CPUs at a
+ * time below. We end up here because a thread tries to access some
+ * memory that isn't available. We have to be careful when making
+ * that memory available as other threads may succeed in accessing
+ * that address the moment after we've made it available.
+ *
+ * That means that we can't just map the memory and populate the
+ * page, instead we use the aliased mapping to populate the page
+ * and once everything is ready we map it.
+ */
+ exceptions = pager_lock();
+
+ stat_handle_fault();
+
+ /* check if the access is valid */
+ if (abort_is_user_exception(ai)) {
+ area = find_uta_area(ai->va);
+
+ } else {
+ area = find_area(&tee_pager_area_head, ai->va);
+ if (!area)
+ area = find_uta_area(ai->va);
+ }
+ if (!area || !area->pgt) {
+ ret = false;
+ goto out;
+ }
+
+ if (!tee_pager_unhide_page(page_va)) {
+ struct tee_pager_pmem *pmem = NULL;
+ uint32_t attr;
+
+ /*
+ * The page wasn't hidden, but some other core may have
+ * updated the table entry before we got here or we need
+ * to make a read-only page read-write (dirty).
+ */
+ if (pager_update_permissions(area, ai, &ret)) {
+ /*
+ * Nothing more to do with the abort. The problem
+ * could already have been dealt with from another
+ * core or if ret is false the TA will be paniced.
+ */
+ goto out;
+ }
+
+ pmem = tee_pager_get_page(area);
+ if (!pmem) {
+ abort_print(ai);
+ panic();
+ }
+
+ /* load page code & data */
+ tee_pager_load_page(area, page_va, pmem->va_alias);
+
+ /*
+ * We've updated the page using the aliased mapping and
+ * some cache maintenence is now needed if it's an
+ * executable page.
+ *
+ * Since the d-cache is a Physically-indexed,
+ * physically-tagged (PIPT) cache we can clean the aliased
+ * address instead of the real virtual address.
+ *
+ * The i-cache can also be PIPT, but may be something else
+ * to, to keep it simple we invalidate the entire i-cache.
+ * As a future optimization we may invalidate only the
+ * aliased area if it a PIPT cache else the entire cache.
+ */
+ if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
+ /*
+ * Doing these operations to LoUIS (Level of
+ * unification, Inner Shareable) would be enough
+ */
+ cache_maintenance_l1(DCACHE_AREA_CLEAN,
+ pmem->va_alias, SMALL_PAGE_SIZE);
+
+ cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
+ }
+
+ pmem->area = area;
+ pmem->pgidx = area_va2idx(area, ai->va);
+ attr = get_area_mattr(area->flags) &
+ ~(TEE_MATTR_PW | TEE_MATTR_UW);
+ area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
+ pgt_inc_used_entries(area->pgt);
+
+ FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
+ area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
+
+ }
+
+ tee_pager_hide_pages();
+ ret = true;
+out:
+ pager_unlock(exceptions);
+ return ret;
+}
+
+void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
+{
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ size_t n;
+
+ DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
+ vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
+
+ /* setup memory */
+ for (n = 0; n < npages; n++) {
+ struct tee_pager_pmem *pmem;
+ vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
+ unsigned pgidx = core_mmu_va2idx(ti, va);
+ paddr_t pa;
+ uint32_t attr;
+
+ /*
+ * Note that we can only support adding pages in the
+ * valid range of this table info, currently not a problem.
+ */
+ core_mmu_get_entry(ti, pgidx, &pa, &attr);
+
+ /* Ignore unmapped pages/blocks */
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+
+ pmem = malloc(sizeof(struct tee_pager_pmem));
+ if (!pmem)
+ panic("out of mem");
+
+ pmem->va_alias = pager_add_alias_page(pa);
+
+ if (unmap) {
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ core_mmu_set_entry(ti, pgidx, 0, 0);
+ pgt_dec_used_entries(&pager_core_pgt);
+ } else {
+ /*
+ * The page is still mapped, let's assign the area
+ * and update the protection bits accordingly.
+ */
+ pmem->area = find_area(&tee_pager_area_head, va);
+ assert(pmem->area->pgt == &pager_core_pgt);
+ pmem->pgidx = pgidx;
+ assert(pa == get_pmem_pa(pmem));
+ area_set_entry(pmem->area, pgidx, pa,
+ get_area_mattr(pmem->area->flags));
+ }
+
+ tee_pager_npages++;
+ incr_npages_all();
+ set_npages();
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+ }
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+#ifdef CFG_PAGED_USER_TA
+static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
+{
+ struct pgt *p = pgt;
+
+ while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
+ p = SLIST_NEXT(p, link);
+ return p;
+}
+
+void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
+{
+ struct tee_pager_area *area;
+ struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
+
+ TAILQ_FOREACH(area, utc->areas, link) {
+ if (!area->pgt)
+ area->pgt = find_pgt(pgt, area->base);
+ else
+ assert(area->pgt == find_pgt(pgt, area->base));
+ if (!area->pgt)
+ panic();
+ }
+}
+
+static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
+{
+ uint32_t attr;
+
+ assert(pmem->area && pmem->area->pgt);
+
+ area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ tee_pager_save_page(pmem, attr);
+ assert(pmem->area->pgt->num_used_entries);
+ pmem->area->pgt->num_used_entries--;
+ pmem->pgidx = INVALID_PGIDX;
+ pmem->area = NULL;
+}
+
+void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
+{
+ struct tee_pager_pmem *pmem;
+ struct tee_pager_area *area;
+ uint32_t exceptions = pager_lock();
+
+ if (!pgt->num_used_entries)
+ goto out;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
+ continue;
+ if (pmem->area->pgt == pgt)
+ pager_save_and_release_entry(pmem);
+ }
+ assert(!pgt->num_used_entries);
+
+out:
+ if (is_user_ta_ctx(pgt->ctx)) {
+ TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
+ if (area->pgt == pgt)
+ area->pgt = NULL;
+ }
+ }
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
+#endif /*CFG_PAGED_USER_TA*/
+
+void tee_pager_release_phys(void *addr, size_t size)
+{
+ bool unmaped = false;
+ vaddr_t va = (vaddr_t)addr;
+ vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
+ vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
+ struct tee_pager_area *area;
+ uint32_t exceptions;
+
+ if (!size)
+ return;
+
+ area = find_area(&tee_pager_area_head, begin);
+ if (!area ||
+ area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
+ panic();
+
+ exceptions = pager_lock();
+
+ for (va = begin; va < end; va += SMALL_PAGE_SIZE)
+ unmaped |= tee_pager_release_one_phys(area, va);
+
+ /* Invalidate secure TLB */
+ if (unmaped)
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(tee_pager_release_phys);
+
+void *tee_pager_alloc(size_t size, uint32_t flags)
+{
+ tee_mm_entry_t *mm;
+ uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
+
+ if (!size)
+ return NULL;
+
+ mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
+ if (!mm)
+ return NULL;
+
+ tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ f, NULL, NULL);
+
+ return (void *)tee_mm_get_smem(mm);
+}