summaryrefslogtreecommitdiff
path: root/core/arch/arm
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:42:10 +0200
committerr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 11:49:50 +0200
commitf9a43781767007462965b21f3f518c4cfc0744c7 (patch)
tree201509439b1d9798256227794dae6774345adf43 /core/arch/arm
parent1fed20f5471aa0dad5e4b4f79d1f2843ac88734f (diff)
downloadtef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.gz
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.bz2
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.zip
Initial commit with upstream sources
Change-Id: Ie9460111f21fc955102fd8732a0173b2d0499a4a
Diffstat (limited to 'core/arch/arm')
-rw-r--r--core/arch/arm/arm.mk157
-rw-r--r--core/arch/arm/include/arm.h65
-rw-r--r--core/arch/arm/include/arm32.h606
-rw-r--r--core/arch/arm/include/arm32_macros.S215
-rw-r--r--core/arch/arm/include/arm32_macros_cortex_a9.S44
-rw-r--r--core/arch/arm/include/arm64.h310
-rw-r--r--core/arch/arm/include/arm64_macros.S125
-rw-r--r--core/arch/arm/include/kernel/abort.h57
-rw-r--r--core/arch/arm/include/kernel/generic_boot.h101
-rw-r--r--core/arch/arm/include/kernel/misc.h53
-rw-r--r--core/arch/arm/include/kernel/mutex.h98
-rw-r--r--core/arch/arm/include/kernel/pm_stubs.h37
-rw-r--r--core/arch/arm/include/kernel/pseudo_ta.h84
-rw-r--r--core/arch/arm/include/kernel/spinlock.h86
-rw-r--r--core/arch/arm/include/kernel/tee_l2cc_mutex.h72
-rw-r--r--core/arch/arm/include/kernel/thread.h559
-rw-r--r--core/arch/arm/include/kernel/thread_defs.h35
-rw-r--r--core/arch/arm/include/kernel/time_source.h44
-rw-r--r--core/arch/arm/include/kernel/tz_proc_def.h110
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce.h73
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_def.h141
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_pl310.h46
-rw-r--r--core/arch/arm/include/kernel/unwind.h77
-rw-r--r--core/arch/arm/include/kernel/user_ta.h93
-rw-r--r--core/arch/arm/include/kernel/vfp.h127
-rw-r--r--core/arch/arm/include/kernel/wait_queue.h85
-rw-r--r--core/arch/arm/include/mm/core_memprot.h103
-rw-r--r--core/arch/arm/include/mm/core_mmu.h399
-rw-r--r--core/arch/arm/include/mm/mobj.h130
-rw-r--r--core/arch/arm/include/mm/pgt_cache.h144
-rw-r--r--core/arch/arm/include/mm/tee_pager.h226
-rw-r--r--core/arch/arm/include/sm/optee_smc.h533
-rw-r--r--core/arch/arm/include/sm/psci.h60
-rw-r--r--core/arch/arm/include/sm/sm.h123
-rw-r--r--core/arch/arm/include/sm/std_smc.h22
-rw-r--r--core/arch/arm/include/sm/tee_mon.h38
-rw-r--r--core/arch/arm/include/sm/teesmc_opteed.h142
-rw-r--r--core/arch/arm/include/sm/teesmc_opteed_macros.h35
-rw-r--r--core/arch/arm/include/tee/arch_svc.h43
-rw-r--r--core/arch/arm/include/tee/entry_fast.h52
-rw-r--r--core/arch/arm/include/tee/entry_std.h38
-rw-r--r--core/arch/arm/kernel/abort.c582
-rw-r--r--core/arch/arm/kernel/asm-defines.c107
-rw-r--r--core/arch/arm/kernel/cache_helpers_a64.S207
-rw-r--r--core/arch/arm/kernel/elf32.h245
-rw-r--r--core/arch/arm/kernel/elf64.h248
-rw-r--r--core/arch/arm/kernel/elf_common.h1006
-rw-r--r--core/arch/arm/kernel/elf_load.c646
-rw-r--r--core/arch/arm/kernel/elf_load.h44
-rw-r--r--core/arch/arm/kernel/generic_boot.c710
-rw-r--r--core/arch/arm/kernel/generic_entry_a32.S503
-rw-r--r--core/arch/arm/kernel/generic_entry_a64.S315
-rw-r--r--core/arch/arm/kernel/kern.ld.S340
-rw-r--r--core/arch/arm/kernel/link.mk241
-rw-r--r--core/arch/arm/kernel/misc_a32.S90
-rw-r--r--core/arch/arm/kernel/misc_a64.S41
-rw-r--r--core/arch/arm/kernel/mutex.c279
-rw-r--r--core/arch/arm/kernel/pm_stubs.c41
-rw-r--r--core/arch/arm/kernel/proc_a32.S96
-rw-r--r--core/arch/arm/kernel/proc_a64.S71
-rw-r--r--core/arch/arm/kernel/pseudo_ta.c256
-rw-r--r--core/arch/arm/kernel/spin_lock_a32.S85
-rw-r--r--core/arch/arm/kernel/spin_lock_a64.S89
-rw-r--r--core/arch/arm/kernel/spin_lock_debug.c63
-rw-r--r--core/arch/arm/kernel/ssvce_a32.S334
-rw-r--r--core/arch/arm/kernel/ssvce_a64.S115
-rw-r--r--core/arch/arm/kernel/sub.mk45
-rw-r--r--core/arch/arm/kernel/tee_l2cc_mutex.c160
-rw-r--r--core/arch/arm/kernel/tee_time.c83
-rw-r--r--core/arch/arm/kernel/tee_time_arm_cntpct.c100
-rw-r--r--core/arch/arm/kernel/tee_time_ree.c62
-rw-r--r--core/arch/arm/kernel/thread.c1365
-rw-r--r--core/arch/arm/kernel/thread_a32.S645
-rw-r--r--core/arch/arm/kernel/thread_a64.S816
-rw-r--r--core/arch/arm/kernel/thread_private.h251
-rw-r--r--core/arch/arm/kernel/trace_ext.c50
-rw-r--r--core/arch/arm/kernel/tz_ssvce_pl310_a32.S258
-rw-r--r--core/arch/arm/kernel/unwind_arm32.c417
-rw-r--r--core/arch/arm/kernel/unwind_arm64.c84
-rw-r--r--core/arch/arm/kernel/user_ta.c826
-rw-r--r--core/arch/arm/kernel/vfp.c149
-rw-r--r--core/arch/arm/kernel/vfp_a32.S81
-rw-r--r--core/arch/arm/kernel/vfp_a64.S72
-rw-r--r--core/arch/arm/kernel/vfp_private.h53
-rw-r--r--core/arch/arm/kernel/wait_queue.c225
-rw-r--r--core/arch/arm/mm/core_mmu.c1177
-rw-r--r--core/arch/arm/mm/core_mmu_lpae.c890
-rw-r--r--core/arch/arm/mm/core_mmu_private.h43
-rw-r--r--core/arch/arm/mm/core_mmu_v7.c790
-rw-r--r--core/arch/arm/mm/mobj.c439
-rw-r--r--core/arch/arm/mm/pager_aes_gcm.c348
-rw-r--r--core/arch/arm/mm/pager_private.h45
-rw-r--r--core/arch/arm/mm/pgt_cache.c567
-rw-r--r--core/arch/arm/mm/sub.mk12
-rw-r--r--core/arch/arm/mm/tee_mm.c354
-rw-r--r--core/arch/arm/mm/tee_mmu.c896
-rw-r--r--core/arch/arm/mm/tee_pager.c1473
-rw-r--r--core/arch/arm/plat-d02/conf.mk34
-rw-r--r--core/arch/arm/plat-d02/kern.ld.S1
-rw-r--r--core/arch/arm/plat-d02/link.mk1
-rw-r--r--core/arch/arm/plat-d02/main.c96
-rw-r--r--core/arch/arm/plat-d02/platform_config.h137
-rw-r--r--core/arch/arm/plat-d02/sub.mk2
-rw-r--r--core/arch/arm/plat-hikey/conf.mk42
-rw-r--r--core/arch/arm/plat-hikey/hikey_peripherals.h97
-rw-r--r--core/arch/arm/plat-hikey/kern.ld.S1
-rw-r--r--core/arch/arm/plat-hikey/link.mk1
-rw-r--r--core/arch/arm/plat-hikey/main.c207
-rw-r--r--core/arch/arm/plat-hikey/platform_config.h130
-rw-r--r--core/arch/arm/plat-hikey/spi_test.c292
-rw-r--r--core/arch/arm/plat-hikey/sub.mk3
-rw-r--r--core/arch/arm/plat-imx/a9_plat_init.S109
-rw-r--r--core/arch/arm/plat-imx/conf.mk34
-rw-r--r--core/arch/arm/plat-imx/imx6ul.c57
-rw-r--r--core/arch/arm/plat-imx/imx_pl310.c63
-rw-r--r--core/arch/arm/plat-imx/kern.ld.S1
-rw-r--r--core/arch/arm/plat-imx/link.mk1
-rw-r--r--core/arch/arm/plat-imx/main.c202
-rw-r--r--core/arch/arm/plat-imx/platform_config.h410
-rw-r--r--core/arch/arm/plat-imx/psci.c78
-rw-r--r--core/arch/arm/plat-imx/sub.mk9
-rw-r--r--core/arch/arm/plat-ls/conf.mk20
-rw-r--r--core/arch/arm/plat-ls/kern.ld.S1
-rw-r--r--core/arch/arm/plat-ls/link.mk1
-rw-r--r--core/arch/arm/plat-ls/ls_core_pos.S41
-rw-r--r--core/arch/arm/plat-ls/main.c178
-rw-r--r--core/arch/arm/plat-ls/plat_init.S93
-rw-r--r--core/arch/arm/plat-ls/platform_config.h147
-rw-r--r--core/arch/arm/plat-ls/sub.mk4
-rw-r--r--core/arch/arm/plat-mediatek/conf.mk25
-rw-r--r--core/arch/arm/plat-mediatek/kern.ld.S1
-rw-r--r--core/arch/arm/plat-mediatek/link.mk1
-rw-r--r--core/arch/arm/plat-mediatek/main.c93
-rw-r--r--core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S51
-rw-r--r--core/arch/arm/plat-mediatek/mt8173_core_pos_a64.S47
-rw-r--r--core/arch/arm/plat-mediatek/platform_config.h108
-rw-r--r--core/arch/arm/plat-mediatek/sub.mk6
-rw-r--r--core/arch/arm/plat-rcar/conf.mk27
-rw-r--r--core/arch/arm/plat-rcar/kern.ld.S1
-rw-r--r--core/arch/arm/plat-rcar/link.mk7
-rw-r--r--core/arch/arm/plat-rcar/main.c95
-rw-r--r--core/arch/arm/plat-rcar/platform_config.h81
-rw-r--r--core/arch/arm/plat-rcar/sub.mk2
-rw-r--r--core/arch/arm/plat-rpi3/conf.mk39
-rw-r--r--core/arch/arm/plat-rpi3/kern.ld.S1
-rw-r--r--core/arch/arm/plat-rpi3/link.mk1
-rw-r--r--core/arch/arm/plat-rpi3/main.c94
-rw-r--r--core/arch/arm/plat-rpi3/platform_config.h95
-rw-r--r--core/arch/arm/plat-rpi3/sub.mk2
-rw-r--r--core/arch/arm/plat-sprd/conf.mk28
-rw-r--r--core/arch/arm/plat-sprd/console.c58
-rw-r--r--core/arch/arm/plat-sprd/kern.ld.S1
-rw-r--r--core/arch/arm/plat-sprd/link.mk1
-rw-r--r--core/arch/arm/plat-sprd/main.c79
-rw-r--r--core/arch/arm/plat-sprd/platform_config.h105
-rw-r--r--core/arch/arm/plat-sprd/sub.mk3
-rw-r--r--core/arch/arm/plat-stm/.gitignore1
-rw-r--r--core/arch/arm/plat-stm/asc.S108
-rw-r--r--core/arch/arm/plat-stm/asc.h35
-rw-r--r--core/arch/arm/plat-stm/conf.mk30
-rw-r--r--core/arch/arm/plat-stm/kern.ld.S1
-rw-r--r--core/arch/arm/plat-stm/link.mk1
-rw-r--r--core/arch/arm/plat-stm/main.c201
-rw-r--r--core/arch/arm/plat-stm/platform_config.h340
-rw-r--r--core/arch/arm/plat-stm/rng_support.c147
-rw-r--r--core/arch/arm/plat-stm/sub.mk6
-rw-r--r--core/arch/arm/plat-stm/tz_a9init.S101
-rw-r--r--core/arch/arm/plat-sunxi/conf.mk17
-rw-r--r--core/arch/arm/plat-sunxi/console.c59
-rw-r--r--core/arch/arm/plat-sunxi/entry.S107
-rw-r--r--core/arch/arm/plat-sunxi/head.c60
-rw-r--r--core/arch/arm/plat-sunxi/kern.ld.S198
-rw-r--r--core/arch/arm/plat-sunxi/link.mk54
-rw-r--r--core/arch/arm/plat-sunxi/main.c177
-rw-r--r--core/arch/arm/plat-sunxi/platform.c125
-rw-r--r--core/arch/arm/plat-sunxi/platform.h85
-rw-r--r--core/arch/arm/plat-sunxi/platform_config.h152
-rw-r--r--core/arch/arm/plat-sunxi/rng_support.c43
-rw-r--r--core/arch/arm/plat-sunxi/smp_boot.S104
-rw-r--r--core/arch/arm/plat-sunxi/smp_fixup.S116
-rw-r--r--core/arch/arm/plat-sunxi/sub.mk9
-rw-r--r--core/arch/arm/plat-ti/conf.mk24
-rw-r--r--core/arch/arm/plat-ti/console.c67
-rw-r--r--core/arch/arm/plat-ti/kern.ld.S1
-rw-r--r--core/arch/arm/plat-ti/link.mk1
-rw-r--r--core/arch/arm/plat-ti/main.c151
-rw-r--r--core/arch/arm/plat-ti/platform_config.h109
-rw-r--r--core/arch/arm/plat-ti/sub.mk3
-rw-r--r--core/arch/arm/plat-vexpress/conf.mk71
-rw-r--r--core/arch/arm/plat-vexpress/juno_core_pos_a32.S45
-rw-r--r--core/arch/arm/plat-vexpress/juno_core_pos_a64.S42
-rw-r--r--core/arch/arm/plat-vexpress/kern.ld.S1
-rw-r--r--core/arch/arm/plat-vexpress/link.mk1
-rw-r--r--core/arch/arm/plat-vexpress/main.c201
-rw-r--r--core/arch/arm/plat-vexpress/platform_config.h298
-rw-r--r--core/arch/arm/plat-vexpress/sub.mk7
-rw-r--r--core/arch/arm/plat-vexpress/vendor_props.c91
-rw-r--r--core/arch/arm/plat-zynq7k/conf.mk24
-rw-r--r--core/arch/arm/plat-zynq7k/kern.ld.S1
-rw-r--r--core/arch/arm/plat-zynq7k/link.mk1
-rw-r--r--core/arch/arm/plat-zynq7k/main.c276
-rw-r--r--core/arch/arm/plat-zynq7k/plat_init.S111
-rw-r--r--core/arch/arm/plat-zynq7k/platform_config.h276
-rw-r--r--core/arch/arm/plat-zynq7k/platform_smc.h81
-rw-r--r--core/arch/arm/plat-zynq7k/sub.mk3
-rw-r--r--core/arch/arm/plat-zynqmp/conf.mk29
-rw-r--r--core/arch/arm/plat-zynqmp/kern.ld.S1
-rw-r--r--core/arch/arm/plat-zynqmp/link.mk1
-rw-r--r--core/arch/arm/plat-zynqmp/main.c122
-rw-r--r--core/arch/arm/plat-zynqmp/platform_config.h126
-rw-r--r--core/arch/arm/plat-zynqmp/sub.mk2
-rw-r--r--core/arch/arm/pta/core_self_tests.c252
-rw-r--r--core/arch/arm/pta/core_self_tests.h37
-rw-r--r--core/arch/arm/pta/gprof.c221
-rw-r--r--core/arch/arm/pta/interrupt_tests.c239
-rw-r--r--core/arch/arm/pta/pta_self_tests.c251
-rw-r--r--core/arch/arm/pta/se_api_self_tests.c498
-rw-r--r--core/arch/arm/pta/stats.c161
-rw-r--r--core/arch/arm/pta/sub.mk14
-rw-r--r--core/arch/arm/pta/tee_fs_key_manager_tests.c375
-rw-r--r--core/arch/arm/sm/psci.c166
-rw-r--r--core/arch/arm/sm/sm.c58
-rw-r--r--core/arch/arm/sm/sm_a32.S291
-rw-r--r--core/arch/arm/sm/sm_private.h38
-rw-r--r--core/arch/arm/sm/std_smc.c77
-rw-r--r--core/arch/arm/sm/sub.mk3
-rw-r--r--core/arch/arm/tee/arch_svc.c269
-rw-r--r--core/arch/arm/tee/arch_svc_a32.S122
-rw-r--r--core/arch/arm/tee/arch_svc_a64.S205
-rw-r--r--core/arch/arm/tee/arch_svc_private.h38
-rw-r--r--core/arch/arm/tee/entry_fast.c231
-rw-r--r--core/arch/arm/tee/entry_std.c386
-rw-r--r--core/arch/arm/tee/init.c84
-rw-r--r--core/arch/arm/tee/pta_socket.c320
-rw-r--r--core/arch/arm/tee/sub.mk12
-rw-r--r--core/arch/arm/tee/svc_cache.c114
-rw-r--r--core/arch/arm/tee/svc_cache.h39
-rw-r--r--core/arch/arm/tee/svc_dummy.c35
238 files changed, 37643 insertions, 0 deletions
diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk
new file mode 100644
index 0000000..4d8c6f0
--- /dev/null
+++ b/core/arch/arm/arm.mk
@@ -0,0 +1,157 @@
+CFG_LTC_OPTEE_THREAD ?= y
+# Size of emulated TrustZone protected SRAM, 360 kB.
+# Only applicable when paging is enabled.
+CFG_CORE_TZSRAM_EMUL_SIZE ?= 368640
+CFG_LPAE_ADDR_SPACE_SIZE ?= (1ull << 32)
+
+ifeq ($(CFG_ARM64_core),y)
+CFG_KERN_LINKER_FORMAT ?= elf64-littleaarch64
+CFG_KERN_LINKER_ARCH ?= aarch64
+endif
+ifeq ($(CFG_ARM32_core),y)
+CFG_KERN_LINKER_FORMAT ?= elf32-littlearm
+CFG_KERN_LINKER_ARCH ?= arm
+endif
+
+ifeq ($(CFG_TA_FLOAT_SUPPORT),y)
+# Use hard-float for floating point support in user TAs instead of
+# soft-float
+CFG_WITH_VFP ?= y
+ifeq ($(CFG_ARM64_core),y)
+# AArch64 has no fallback to soft-float
+$(call force,CFG_WITH_VFP,y)
+endif
+ifeq ($(CFG_WITH_VFP),y)
+platform-hard-float-enabled := y
+endif
+endif
+
+ifeq ($(CFG_WITH_PAGER),y)
+ifeq ($(CFG_CORE_SANITIZE_KADDRESS),y)
+$(error Error: CFG_CORE_SANITIZE_KADDRESS not compatible with CFG_WITH_PAGER)
+endif
+endif
+
+ifeq ($(CFG_ARM32_core),y)
+# Configration directive related to ARMv7 optee boot arguments.
+# CFG_PAGEABLE_ADDR: if defined, forces pageable data physical address.
+# CFG_NS_ENTRY_ADDR: if defined, forces NS World physical entry address.
+# CFG_DT_ADDR: if defined, forces Device Tree data physical address.
+endif
+
+core-platform-cppflags += -I$(arch-dir)/include
+core-platform-subdirs += \
+ $(addprefix $(arch-dir)/, kernel mm tee pta) $(platform-dir)
+
+ifneq ($(CFG_WITH_ARM_TRUSTED_FW),y)
+core-platform-subdirs += $(arch-dir)/sm
+endif
+
+arm64-platform-cppflags += -DARM64=1 -D__LP64__=1
+arm32-platform-cppflags += -DARM32=1 -D__ILP32__=1
+
+platform-cflags-generic ?= -g -ffunction-sections -fdata-sections -pipe
+platform-aflags-generic ?= -g -pipe
+
+arm32-platform-cflags-no-hard-float ?= -mno-apcs-float -mfloat-abi=soft
+arm32-platform-cflags-hard-float ?= -mfloat-abi=hard -funsafe-math-optimizations
+arm32-platform-cflags-generic ?= -mthumb -mthumb-interwork \
+ -fno-short-enums -fno-common -mno-unaligned-access
+arm32-platform-aflags-no-hard-float ?=
+
+arm64-platform-cflags-no-hard-float ?= -mgeneral-regs-only
+arm64-platform-cflags-hard-float ?=
+arm64-platform-cflags-generic ?= -mstrict-align
+
+ifeq ($(DEBUG),1)
+platform-cflags-optimization ?= -O0
+else
+platform-cflags-optimization ?= -Os
+endif
+
+platform-cflags-debug-info ?= -g3
+platform-aflags-debug-info ?=
+
+core-platform-cflags += $(platform-cflags-optimization)
+core-platform-cflags += $(platform-cflags-generic)
+core-platform-cflags += $(platform-cflags-debug-info)
+
+core-platform-aflags += $(platform-aflags-generic)
+core-platform-aflags += $(platform-aflags-debug-info)
+
+ifeq ($(CFG_ARM64_core),y)
+arch-bits-core := 64
+core-platform-cppflags += $(arm64-platform-cppflags)
+core-platform-cflags += $(arm64-platform-cflags)
+core-platform-cflags += $(arm64-platform-cflags-generic)
+core-platform-cflags += $(arm64-platform-cflags-no-hard-float)
+core-platform-aflags += $(arm64-platform-aflags)
+else
+arch-bits-core := 32
+core-platform-cppflags += $(arm32-platform-cppflags)
+core-platform-cflags += $(arm32-platform-cflags)
+core-platform-cflags += $(arm32-platform-cflags-no-hard-float)
+ifeq ($(CFG_CORE_UNWIND),y)
+core-platform-cflags += -funwind-tables
+endif
+core-platform-cflags += $(arm32-platform-cflags-generic)
+core-platform-aflags += $(core_arm32-platform-aflags)
+core-platform-aflags += $(arm32-platform-aflags)
+endif
+
+ifneq ($(filter ta_arm32,$(ta-targets)),)
+# Variables for ta-target/sm "ta_arm32"
+CFG_ARM32_ta_arm32 := y
+arch-bits-ta_arm32 := 32
+ta_arm32-platform-cppflags += $(arm32-platform-cppflags)
+ta_arm32-platform-cflags += $(arm32-platform-cflags)
+ta_arm32-platform-cflags += $(platform-cflags-optimization)
+ta_arm32-platform-cflags += $(platform-cflags-debug-info)
+ta_arm32-platform-cflags += -fpie
+ta_arm32-platform-cflags += $(arm32-platform-cflags-generic)
+ifeq ($(platform-hard-float-enabled),y)
+ta_arm32-platform-cflags += $(arm32-platform-cflags-hard-float)
+else
+ta_arm32-platform-cflags += $(arm32-platform-cflags-no-hard-float)
+endif
+ta_arm32-platform-aflags += $(platform-aflags-debug-info)
+ta_arm32-platform-aflags += $(arm32-platform-aflags)
+
+ta-mk-file-export-vars-ta_arm32 += CFG_ARM32_ta_arm32
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-cppflags
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-cflags
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-aflags
+
+ta-mk-file-export-add-ta_arm32 += CROSS_COMPILE32 ?= $$(CROSS_COMPILE)_nl_
+ta-mk-file-export-add-ta_arm32 += CROSS_COMPILE_ta_arm32 ?= $$(CROSS_COMPILE32)_nl_
+endif
+
+ifneq ($(filter ta_arm64,$(ta-targets)),)
+# Variables for ta-target/sm "ta_arm64"
+CFG_ARM64_ta_arm64 := y
+arch-bits-ta_arm64 := 64
+ta_arm64-platform-cppflags += $(arm64-platform-cppflags)
+ta_arm64-platform-cflags += $(arm64-platform-cflags)
+ta_arm64-platform-cflags += $(platform-cflags-optimization)
+ta_arm64-platform-cflags += $(platform-cflags-debug-info)
+ta_arm64-platform-cflags += -fpie
+ta_arm64-platform-cflags += $(arm64-platform-cflags-generic)
+ifeq ($(platform-hard-float-enabled),y)
+ta_arm64-platform-cflags += $(arm64-platform-cflags-hard-float)
+else
+ta_arm64-platform-cflags += $(arm64-platform-cflags-no-hard-float)
+endif
+ta_arm64-platform-aflags += $(platform-aflags-debug-info)
+ta_arm64-platform-aflags += $(arm64-platform-aflags)
+
+ta-mk-file-export-vars-ta_arm64 += CFG_ARM64_ta_arm64
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-cppflags
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-cflags
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-aflags
+
+ta-mk-file-export-add-ta_arm64 += CROSS_COMPILE64 ?= $$(CROSS_COMPILE)_nl_
+ta-mk-file-export-add-ta_arm64 += CROSS_COMPILE_ta_arm64 ?= $$(CROSS_COMPILE64)_nl_
+endif
+
+# Set cross compiler prefix for each submodule
+$(foreach sm, core $(ta-targets), $(eval CROSS_COMPILE_$(sm) ?= $(CROSS_COMPILE$(arch-bits-$(sm)))))
diff --git a/core/arch/arm/include/arm.h b/core/arch/arm/include/arm.h
new file mode 100644
index 0000000..a644dd4
--- /dev/null
+++ b/core/arch/arm/include/arm.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ARM_H
+#define ARM_H
+
+
+#define MPIDR_CPU_MASK 0xff
+#define MPIDR_CLUSTER_SHIFT 8
+#define MPIDR_CLUSTER_MASK (0xff << MPIDR_CLUSTER_SHIFT)
+
+#define ARM32_CPSR_MODE_MASK 0x1f
+#define ARM32_CPSR_MODE_USR 0x10
+#define ARM32_CPSR_MODE_FIQ 0x11
+#define ARM32_CPSR_MODE_IRQ 0x12
+#define ARM32_CPSR_MODE_SVC 0x13
+#define ARM32_CPSR_MODE_MON 0x16
+#define ARM32_CPSR_MODE_ABT 0x17
+#define ARM32_CPSR_MODE_UND 0x1b
+#define ARM32_CPSR_MODE_SYS 0x1f
+
+#define ARM32_CPSR_T (1 << 5)
+#define ARM32_CPSR_F_SHIFT 6
+#define ARM32_CPSR_F (1 << 6)
+#define ARM32_CPSR_I (1 << 7)
+#define ARM32_CPSR_A (1 << 8)
+#define ARM32_CPSR_E (1 << 9)
+#define ARM32_CPSR_FIA (ARM32_CPSR_F | ARM32_CPSR_I | ARM32_CPSR_A)
+#define ARM32_CPSR_IT_MASK (ARM32_CPSR_IT_MASK1 | ARM32_CPSR_IT_MASK2)
+#define ARM32_CPSR_IT_MASK1 0x06000000
+#define ARM32_CPSR_IT_MASK2 0x0000fc00
+
+
+#ifdef ARM32
+#include <arm32.h>
+#endif
+
+#ifdef ARM64
+#include <arm64.h>
+#endif
+
+#endif /*ARM_H*/
diff --git a/core/arch/arm/include/arm32.h b/core/arch/arm/include/arm32.h
new file mode 100644
index 0000000..822ff95
--- /dev/null
+++ b/core/arch/arm/include/arm32.h
@@ -0,0 +1,606 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM32_H
+#define ARM32_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+#include <util.h>
+
+#define CPSR_MODE_MASK ARM32_CPSR_MODE_MASK
+#define CPSR_MODE_USR ARM32_CPSR_MODE_USR
+#define CPSR_MODE_FIQ ARM32_CPSR_MODE_FIQ
+#define CPSR_MODE_IRQ ARM32_CPSR_MODE_IRQ
+#define CPSR_MODE_SVC ARM32_CPSR_MODE_SVC
+#define CPSR_MODE_MON ARM32_CPSR_MODE_MON
+#define CPSR_MODE_ABT ARM32_CPSR_MODE_ABT
+#define CPSR_MODE_UND ARM32_CPSR_MODE_UND
+#define CPSR_MODE_SYS ARM32_CPSR_MODE_SYS
+
+#define CPSR_T ARM32_CPSR_T
+#define CPSR_F_SHIFT ARM32_CPSR_F_SHIFT
+#define CPSR_F ARM32_CPSR_F
+#define CPSR_I ARM32_CPSR_I
+#define CPSR_A ARM32_CPSR_A
+#define CPSR_FIA ARM32_CPSR_FIA
+#define CPSR_IT_MASK ARM32_CPSR_IT_MASK
+#define CPSR_IT_MASK1 ARM32_CPSR_IT_MASK1
+#define CPSR_IT_MASK2 ARM32_CPSR_IT_MASK2
+
+#define SCR_NS BIT32(0)
+#define SCR_IRQ BIT32(1)
+#define SCR_FIQ BIT32(2)
+#define SCR_EA BIT32(3)
+#define SCR_FW BIT32(4)
+#define SCR_AW BIT32(5)
+#define SCR_NET BIT32(6)
+#define SCR_SCD BIT32(7)
+#define SCR_HCE BIT32(8)
+#define SCR_SIF BIT32(9)
+
+#define SCTLR_M BIT32(0)
+#define SCTLR_A BIT32(1)
+#define SCTLR_C BIT32(2)
+#define SCTLR_CP15BEN BIT32(5)
+#define SCTLR_SW BIT32(10)
+#define SCTLR_Z BIT32(11)
+#define SCTLR_I BIT32(12)
+#define SCTLR_V BIT32(13)
+#define SCTLR_RR BIT32(14)
+#define SCTLR_HA BIT32(17)
+#define SCTLR_WXN BIT32(19)
+#define SCTLR_UWXN BIT32(20)
+#define SCTLR_FI BIT32(21)
+#define SCTLR_VE BIT32(24)
+#define SCTLR_EE BIT32(25)
+#define SCTLR_NMFI BIT32(26)
+#define SCTLR_TRE BIT32(28)
+#define SCTLR_AFE BIT32(29)
+#define SCTLR_TE BIT32(30)
+
+#define ACTLR_SMP BIT32(6)
+#define ACTLR_DODMBS BIT32(10)
+#define ACTLR_L2RADIS BIT32(11)
+#define ACTLR_L1RADIS BIT32(12)
+#define ACTLR_L1PCTL BIT32(13)
+#define ACTLR_DDVM BIT32(15)
+#define ACTLR_DDI BIT32(28)
+
+#define NSACR_CP10 BIT32(10)
+#define NSACR_CP11 BIT32(11)
+#define NSACR_NSD32DIS BIT32(14)
+#define NSACR_NSASEDIS BIT32(15)
+#define NSACR_NS_L2ERR BIT32(17)
+#define NSACR_NS_SMP BIT32(18)
+
+#define CPACR_ASEDIS BIT32(31)
+#define CPACR_D32DIS BIT32(30)
+#define CPACR_CP(co_proc, access) SHIFT_U32((access), ((co_proc) * 2))
+#define CPACR_CP_ACCESS_DENIED 0x0
+#define CPACR_CP_ACCESS_PL1_ONLY 0x1
+#define CPACR_CP_ACCESS_FULL 0x3
+
+
+#define DACR_DOMAIN(num, perm) SHIFT_U32((perm), ((num) * 2))
+#define DACR_DOMAIN_PERM_NO_ACCESS 0x0
+#define DACR_DOMAIN_PERM_CLIENT 0x1
+#define DACR_DOMAIN_PERM_MANAGER 0x3
+
+#define PAR_F BIT32(0)
+#define PAR_SS BIT32(1)
+#define PAR_LPAE BIT32(11)
+#define PAR_PA_SHIFT 12
+#define PAR32_PA_MASK (BIT32(20) - 1)
+#define PAR64_PA_MASK (BIT64(28) - 1)
+
+/*
+ * TTBCR has different register layout if LPAE is enabled or not.
+ * TTBCR.EAE == 0 => LPAE is not enabled
+ * TTBCR.EAE == 1 => LPAE is enabled
+ */
+#define TTBCR_EAE BIT32(31)
+
+/* When TTBCR.EAE == 0 */
+#define TTBCR_PD0 BIT32(4)
+#define TTBCR_PD1 BIT32(5)
+
+/* When TTBCR.EAE == 1 */
+#define TTBCR_T0SZ_SHIFT 0
+#define TTBCR_EPD0 BIT32(7)
+#define TTBCR_IRGN0_SHIFT 8
+#define TTBCR_ORGN0_SHIFT 10
+#define TTBCR_SH0_SHIFT 12
+#define TTBCR_T1SZ_SHIFT 16
+#define TTBCR_A1 BIT32(22)
+#define TTBCR_EPD1 BIT32(23)
+#define TTBCR_IRGN1_SHIFT 24
+#define TTBCR_ORGN1_SHIFT 26
+#define TTBCR_SH1_SHIFT 28
+
+/* Normal memory, Inner/Outer Non-cacheable */
+#define TTBCR_XRGNX_NC 0x0
+/* Normal memory, Inner/Outer Write-Back Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WB 0x1
+/* Normal memory, Inner/Outer Write-Through Cacheable */
+#define TTBCR_XRGNX_WT 0x2
+/* Normal memory, Inner/Outer Write-Back no Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WBWA 0x3
+
+/* Non-shareable */
+#define TTBCR_SHX_NSH 0x0
+/* Outer Shareable */
+#define TTBCR_SHX_OSH 0x2
+/* Inner Shareable */
+#define TTBCR_SHX_ISH 0x3
+
+#define TTBR_ASID_MASK 0xff
+#define TTBR_ASID_SHIFT 48
+
+
+#define FSR_LPAE BIT32(9)
+#define FSR_WNR BIT32(11)
+
+/* Valid if FSR.LPAE is 1 */
+#define FSR_STATUS_MASK (BIT32(6) - 1)
+
+/* Valid if FSR.LPAE is 0 */
+#define FSR_FS_MASK (BIT32(10) | (BIT32(4) - 1))
+
+#ifndef ASM
+static inline uint32_t read_mpidr(void)
+{
+ uint32_t mpidr;
+
+ asm volatile ("mrc p15, 0, %[mpidr], c0, c0, 5"
+ : [mpidr] "=r" (mpidr)
+ );
+
+ return mpidr;
+}
+
+static inline uint32_t read_sctlr(void)
+{
+ uint32_t sctlr;
+
+ asm volatile ("mrc p15, 0, %[sctlr], c1, c0, 0"
+ : [sctlr] "=r" (sctlr)
+ );
+
+ return sctlr;
+}
+
+static inline void write_sctlr(uint32_t sctlr)
+{
+ asm volatile ("mcr p15, 0, %[sctlr], c1, c0, 0"
+ : : [sctlr] "r" (sctlr)
+ );
+}
+
+static inline uint32_t read_cpacr(void)
+{
+ uint32_t cpacr;
+
+ asm volatile ("mrc p15, 0, %[cpacr], c1, c0, 2"
+ : [cpacr] "=r" (cpacr)
+ );
+
+ return cpacr;
+}
+
+static inline void write_cpacr(uint32_t cpacr)
+{
+ asm volatile ("mcr p15, 0, %[cpacr], c1, c0, 2"
+ : : [cpacr] "r" (cpacr)
+ );
+}
+
+static inline void write_ttbr0(uint32_t ttbr0)
+{
+ asm volatile ("mcr p15, 0, %[ttbr0], c2, c0, 0"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
+static inline void write_ttbr0_64bit(uint64_t ttbr0)
+{
+ asm volatile ("mcrr p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
+static inline uint32_t read_ttbr0(void)
+{
+ uint32_t ttbr0;
+
+ asm volatile ("mrc p15, 0, %[ttbr0], c2, c0, 0"
+ : [ttbr0] "=r" (ttbr0)
+ );
+
+ return ttbr0;
+}
+
+static inline uint64_t read_ttbr0_64bit(void)
+{
+ uint64_t ttbr0;
+
+ asm volatile ("mrrc p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : [ttbr0] "=r" (ttbr0)
+ );
+
+ return ttbr0;
+}
+
+static inline void write_ttbr1(uint32_t ttbr1)
+{
+ asm volatile ("mcr p15, 0, %[ttbr1], c2, c0, 1"
+ : : [ttbr1] "r" (ttbr1)
+ );
+}
+
+static inline void write_ttbr1_64bit(uint64_t ttbr1)
+{
+ asm volatile ("mcrr p15, 1, %Q[ttbr1], %R[ttbr1], c2"
+ : : [ttbr1] "r" (ttbr1)
+ );
+}
+
+static inline uint32_t read_ttbr1(void)
+{
+ uint32_t ttbr1;
+
+ asm volatile ("mrc p15, 0, %[ttbr1], c2, c0, 1"
+ : [ttbr1] "=r" (ttbr1)
+ );
+
+ return ttbr1;
+}
+
+
+static inline void write_ttbcr(uint32_t ttbcr)
+{
+ asm volatile ("mcr p15, 0, %[ttbcr], c2, c0, 2"
+ : : [ttbcr] "r" (ttbcr)
+ );
+}
+
+static inline uint32_t read_ttbcr(void)
+{
+ uint32_t ttbcr;
+
+ asm volatile ("mrc p15, 0, %[ttbcr], c2, c0, 2"
+ : [ttbcr] "=r" (ttbcr)
+ );
+
+ return ttbcr;
+}
+
+static inline void write_dacr(uint32_t dacr)
+{
+ asm volatile ("mcr p15, 0, %[dacr], c3, c0, 0"
+ : : [dacr] "r" (dacr)
+ );
+}
+
+static inline uint32_t read_ifar(void)
+{
+ uint32_t ifar;
+
+ asm volatile ("mrc p15, 0, %[ifar], c6, c0, 2"
+ : [ifar] "=r" (ifar)
+ );
+
+ return ifar;
+}
+
+static inline uint32_t read_dfar(void)
+{
+ uint32_t dfar;
+
+ asm volatile ("mrc p15, 0, %[dfar], c6, c0, 0"
+ : [dfar] "=r" (dfar)
+ );
+
+ return dfar;
+}
+
+static inline uint32_t read_dfsr(void)
+{
+ uint32_t dfsr;
+
+ asm volatile ("mrc p15, 0, %[dfsr], c5, c0, 0"
+ : [dfsr] "=r" (dfsr)
+ );
+
+ return dfsr;
+}
+
+static inline uint32_t read_ifsr(void)
+{
+ uint32_t ifsr;
+
+ asm volatile ("mrc p15, 0, %[ifsr], c5, c0, 1"
+ : [ifsr] "=r" (ifsr)
+ );
+
+ return ifsr;
+}
+
+static inline void write_scr(uint32_t scr)
+{
+ asm volatile ("mcr p15, 0, %[scr], c1, c1, 0"
+ : : [scr] "r" (scr)
+ );
+}
+
+static inline void isb(void)
+{
+ asm volatile ("isb");
+}
+
+static inline void dsb(void)
+{
+ asm volatile ("dsb");
+}
+
+static inline void dmb(void)
+{
+ asm volatile ("dmb");
+}
+
+static inline void sev(void)
+{
+ asm volatile ("sev");
+}
+
+static inline void wfe(void)
+{
+ asm volatile ("wfe");
+}
+
+/* Address translate privileged write translation (current state secure PL1) */
+static inline void write_ats1cpw(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 1" : : "r" (va));
+}
+
+static inline void write_ats1cpr(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+}
+
+static inline void write_ats1cpuw(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 3" : : "r" (va));
+}
+
+static inline void write_ats1cpur(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 2" : : "r" (va));
+}
+
+static inline uint32_t read_par32(void)
+{
+ uint32_t val;
+
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (val));
+ return val;
+}
+
+#ifdef CFG_WITH_LPAE
+static inline uint64_t read_par64(void)
+{
+ uint64_t val;
+
+ asm volatile ("mrrc p15, 0, %Q0, %R0, c7" : "=r" (val));
+ return val;
+}
+#endif
+
+static inline void write_mair0(uint32_t mair0)
+{
+ asm volatile ("mcr p15, 0, %[mair0], c10, c2, 0"
+ : : [mair0] "r" (mair0)
+ );
+}
+
+static inline void write_prrr(uint32_t prrr)
+{
+ /*
+ * Same physical register as MAIR0.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR0 replaces the PRRR
+ */
+ write_mair0(prrr);
+}
+
+static inline void write_mair1(uint32_t mair1)
+{
+ asm volatile ("mcr p15, 0, %[mair1], c10, c2, 1"
+ : : [mair1] "r" (mair1)
+ );
+}
+
+static inline void write_nmrr(uint32_t nmrr)
+{
+ /*
+ * Same physical register as MAIR1.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR1 replaces the NMRR
+ */
+ write_mair1(nmrr);
+}
+
+static inline uint32_t read_contextidr(void)
+{
+ uint32_t contextidr;
+
+ asm volatile ("mrc p15, 0, %[contextidr], c13, c0, 1"
+ : [contextidr] "=r" (contextidr)
+ );
+
+ return contextidr;
+}
+
+static inline void write_contextidr(uint32_t contextidr)
+{
+ asm volatile ("mcr p15, 0, %[contextidr], c13, c0, 1"
+ : : [contextidr] "r" (contextidr)
+ );
+}
+
+static inline uint32_t read_cpsr(void)
+{
+ uint32_t cpsr;
+
+ asm volatile ("mrs %[cpsr], cpsr"
+ : [cpsr] "=r" (cpsr)
+ );
+ return cpsr;
+}
+
+static inline void write_cpsr(uint32_t cpsr)
+{
+ asm volatile ("msr cpsr_fsxc, %[cpsr]"
+ : : [cpsr] "r" (cpsr)
+ );
+}
+
+static inline uint32_t read_spsr(void)
+{
+ uint32_t spsr;
+
+ asm volatile ("mrs %[spsr], spsr"
+ : [spsr] "=r" (spsr)
+ );
+ return spsr;
+}
+
+static inline uint32_t read_actlr(void)
+{
+ uint32_t actlr;
+
+ asm volatile ("mrc p15, 0, %[actlr], c1, c0, 1"
+ : [actlr] "=r" (actlr)
+ );
+
+ return actlr;
+}
+
+static inline void write_actlr(uint32_t actlr)
+{
+ asm volatile ("mcr p15, 0, %[actlr], c1, c0, 1"
+ : : [actlr] "r" (actlr)
+ );
+}
+
+static inline uint32_t read_nsacr(void)
+{
+ uint32_t nsacr;
+
+ asm volatile ("mrc p15, 0, %[nsacr], c1, c1, 2"
+ : [nsacr] "=r" (nsacr)
+ );
+
+ return nsacr;
+}
+
+static inline void write_nsacr(uint32_t nsacr)
+{
+ asm volatile ("mcr p15, 0, %[nsacr], c1, c1, 2"
+ : : [nsacr] "r" (nsacr)
+ );
+}
+
+static inline uint64_t read_cntpct(void)
+{
+ uint64_t val;
+
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (val));
+ return val;
+}
+
+static inline uint32_t read_cntfrq(void)
+{
+ uint32_t frq;
+
+ asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (frq));
+ return frq;
+}
+
+static inline void write_cntfrq(uint32_t frq)
+{
+ asm volatile("mcr p15, 0, %0, c14, c0, 0" : : "r" (frq));
+}
+
+static __always_inline uint32_t read_pc(void)
+{
+ uint32_t val;
+
+ asm volatile ("adr %0, ." : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_sp(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, sp" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_lr(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, lr" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_fp(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, fp" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_r7(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, r7" : "=r" (val));
+ return val;
+}
+#endif /*ASM*/
+
+#endif /*ARM32_H*/
diff --git a/core/arch/arm/include/arm32_macros.S b/core/arch/arm/include/arm32_macros.S
new file mode 100644
index 0000000..0a4ca28
--- /dev/null
+++ b/core/arch/arm/include/arm32_macros.S
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ /* Please keep them sorted based on the CRn register */
+ .macro read_mpidr reg
+ mrc p15, 0, \reg, c0, c0, 5
+ .endm
+
+ .macro read_sctlr reg
+ mrc p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro write_sctlr reg
+ mcr p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro write_actlr reg
+ mcr p15, 0, \reg, c1, c0, 1
+ .endm
+
+ .macro read_actlr reg
+ mrc p15, 0, \reg, c1, c0, 1
+ .endm
+
+ .macro write_cpacr reg
+ mcr p15, 0, \reg, c1, c0, 2
+ .endm
+
+ .macro read_cpacr reg
+ mrc p15, 0, \reg, c1, c0, 2
+ .endm
+
+ .macro read_scr reg
+ mrc p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro write_scr reg
+ mcr p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro write_nsacr reg
+ mcr p15, 0, \reg, c1, c1, 2
+ .endm
+
+ .macro read_nsacr reg
+ mrc p15, 0, \reg, c1, c1, 2
+ .endm
+
+ .macro write_ttbr0 reg
+ mcr p15, 0, \reg, c2, c0, 0
+ .endm
+
+ .macro read_ttbr0 reg
+ mrc p15, 0, \reg, c2, c0, 0
+ .endm
+
+ .macro write_ttbr1 reg
+ mcr p15, 0, \reg, c2, c0, 1
+ .endm
+
+ .macro read_ttbr1 reg
+ mrc p15, 0, \reg, c2, c0, 1
+ .endm
+
+ .macro write_ttbcr reg
+ mcr p15, 0, \reg, c2, c0, 2
+ .endm
+
+ .macro read_ttbcr reg
+ mrc p15, 0, \reg, c2, c0, 2
+ .endm
+
+
+ .macro write_dacr reg
+ mcr p15, 0, \reg, c3, c0, 0
+ .endm
+
+ .macro read_dacr reg
+ mrc p15, 0, \reg, c3, c0, 0
+ .endm
+
+ .macro read_dfsr reg
+ mrc p15, 0, \reg, c5, c0, 0
+ .endm
+
+ .macro write_iciallu
+ /* Invalidate all instruction caches to PoU (register ignored) */
+ mcr p15, 0, r0, c7, c5, 0
+ .endm
+
+ .macro write_icialluis
+ /*
+ * Invalidate all instruction caches to PoU, Inner Shareable
+ * (register ignored)
+ */
+ mcr p15, 0, r0, c7, c1, 0
+ .endm
+
+ .macro write_bpiall
+ /* Invalidate entire branch predictor array (register ignored) */
+ mcr p15, 0, r0, c7, c5, 0
+ .endm
+
+ .macro write_bpiallis
+ /*
+ * Invalidate entire branch predictor array, Inner Shareable
+ * (register ignored)
+ */
+ mcr p15, 0, r0, c7, c1, 6
+ .endm
+
+ .macro write_tlbiall
+ /* Invalidate entire unified TLB (register ignored) */
+ mcr p15, 0, r0, c8, c7, 0
+ .endm
+
+ .macro write_tlbiallis
+ /* Invalidate entire unified TLB Inner Sharable (register ignored) */
+ mcr p15, 0, r0, c8, c3, 0
+ .endm
+
+ .macro write_tlbiasidis reg
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ mcr p15, 0, \reg, c8, c3, 2
+ .endm
+
+ .macro write_prrr reg
+ mcr p15, 0, \reg, c10, c2, 0
+ .endm
+
+ .macro read_prrr reg
+ mrc p15, 0, \reg, c10, c2, 0
+ .endm
+
+ .macro write_nmrr reg
+ mcr p15, 0, \reg, c10, c2, 1
+ .endm
+
+ .macro read_nmrr reg
+ mrc p15, 0, \reg, c10, c2, 1
+ .endm
+
+ .macro read_vbar reg
+ mrc p15, 0, \reg, c12, c0, 0
+ .endm
+
+ .macro write_vbar reg
+ mcr p15, 0, \reg, c12, c0, 0
+ .endm
+
+ .macro write_mvbar reg
+ mcr p15, 0, \reg, c12, c0, 1
+ .endm
+
+ .macro read_mvbar reg
+ mrc p15, 0, \reg, c12, c0, 1
+ .endm
+
+ .macro write_fcseidr reg
+ mcr p15, 0, \reg, c13, c0, 0
+ .endm
+
+ .macro read_fcseidr reg
+ mrc p15, 0, \reg, c13, c0, 0
+ .endm
+
+ .macro write_contextidr reg
+ mcr p15, 0, \reg, c13, c0, 1
+ .endm
+
+ .macro read_contextidr reg
+ mrc p15, 0, \reg, c13, c0, 1
+ .endm
+
+ .macro write_tpidruro reg
+ mcr p15, 0, \reg, c13, c0, 3
+ .endm
+
+ .macro read_tpidruro reg
+ mrc p15, 0, \reg, c13, c0, 3
+ .endm
+
+ .macro mov_imm reg, val
+ .if ((\val) & 0xffff0000) == 0
+ mov \reg, #(\val)
+ .else
+ movw \reg, #((\val) & 0xffff)
+ movt \reg, #((\val) >> 16)
+ .endif
+ .endm
+
diff --git a/core/arch/arm/include/arm32_macros_cortex_a9.S b/core/arch/arm/include/arm32_macros_cortex_a9.S
new file mode 100644
index 0000000..57a2a10
--- /dev/null
+++ b/core/arch/arm/include/arm32_macros_cortex_a9.S
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .macro write_pcr reg
+ mcr p15, 0, \reg, c15, c0, 0
+ .endm
+
+ .macro read_pcr reg
+ mrc p15, 0, \reg, c15, c0, 0
+ .endm
+
+ .macro write_diag reg
+ mcr p15, 0, \reg, c15, c0, 1
+ .endm
+
+ .macro read_diag reg
+ mrc p15, 0, \reg, c15, c0, 1
+ .endm
diff --git a/core/arch/arm/include/arm64.h b/core/arch/arm/include/arm64.h
new file mode 100644
index 0000000..148b761
--- /dev/null
+++ b/core/arch/arm/include/arm64.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ARM64_H
+#define ARM64_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+#include <util.h>
+
+#define SCTLR_M BIT32(0)
+#define SCTLR_A BIT32(1)
+#define SCTLR_C BIT32(2)
+#define SCTLR_SA BIT32(3)
+#define SCTLR_I BIT32(12)
+
+#define TTBR_ASID_MASK 0xff
+#define TTBR_ASID_SHIFT 48
+
+#define CLIDR_LOUIS_SHIFT 21
+#define CLIDR_LOC_SHIFT 24
+#define CLIDR_FIELD_WIDTH 3
+
+#define CSSELR_LEVEL_SHIFT 1
+
+#define DAIFBIT_FIQ BIT32(0)
+#define DAIFBIT_IRQ BIT32(1)
+#define DAIFBIT_ABT BIT32(2)
+#define DAIFBIT_DBG BIT32(3)
+#define DAIFBIT_ALL (DAIFBIT_FIQ | DAIFBIT_IRQ | \
+ DAIFBIT_ABT | DAIFBIT_DBG)
+
+#define DAIF_F_SHIFT 6
+#define DAIF_F BIT32(6)
+#define DAIF_I BIT32(7)
+#define DAIF_A BIT32(8)
+#define DAIF_D BIT32(9)
+#define DAIF_AIF (DAIF_A | DAIF_I | DAIF_F)
+
+#define SPSR_MODE_RW_SHIFT 4
+#define SPSR_MODE_RW_MASK 0x1
+#define SPSR_MODE_RW_64 0x0
+#define SPSR_MODE_RW_32 0x1
+
+#define SPSR_64_MODE_SP_SHIFT 0
+#define SPSR_64_MODE_SP_MASK 0x1
+#define SPSR_64_MODE_SP_EL0 0x0
+#define SPSR_64_MODE_SP_ELX 0x1
+
+#define SPSR_64_MODE_EL_SHIFT 2
+#define SPSR_64_MODE_EL_MASK 0x3
+#define SPSR_64_MODE_EL1 0x1
+#define SPSR_64_MODE_EL0 0x0
+
+#define SPSR_64_DAIF_SHIFT 6
+#define SPSR_64_DAIF_MASK 0xf
+
+#define SPSR_32_AIF_SHIFT 6
+#define SPSR_32_AIF_MASK 0x7
+
+#define SPSR_32_E_SHIFT 9
+#define SPSR_32_E_MASK 0x1
+#define SPSR_32_E_LITTLE 0x0
+#define SPSR_32_E_BIG 0x1
+
+#define SPSR_32_T_SHIFT 5
+#define SPSR_32_T_MASK 0x1
+#define SPSR_32_T_ARM 0x0
+#define SPSR_32_T_THUMB 0x1
+
+#define SPSR_32_MODE_SHIFT 0
+#define SPSR_32_MODE_MASK 0xf
+#define SPSR_32_MODE_USR 0x0
+
+
+#define SPSR_64(el, sp, daif) \
+ (SPSR_MODE_RW_64 << SPSR_MODE_RW_SHIFT | \
+ ((el) & SPSR_64_MODE_EL_MASK) << SPSR_64_MODE_EL_SHIFT | \
+ ((sp) & SPSR_64_MODE_SP_MASK) << SPSR_64_MODE_SP_SHIFT | \
+ ((daif) & SPSR_64_DAIF_MASK) << SPSR_64_DAIF_SHIFT)
+
+#define SPSR_32(mode, isa, aif) \
+ (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT | \
+ SPSR_32_E_LITTLE << SPSR_32_E_SHIFT | \
+ ((mode) & SPSR_32_MODE_MASK) << SPSR_32_MODE_SHIFT | \
+ ((isa) & SPSR_32_T_MASK) << SPSR_32_T_SHIFT | \
+ ((aif) & SPSR_32_AIF_MASK) << SPSR_32_AIF_SHIFT)
+
+
+#define TCR_T0SZ_SHIFT 0
+#define TCR_EPD0 BIT32(7)
+#define TCR_IRGN0_SHIFT 8
+#define TCR_ORGN0_SHIFT 10
+#define TCR_SH0_SHIFT 12
+#define TCR_T1SZ_SHIFT 16
+#define TCR_A1 BIT32(22)
+#define TCR_EPD1 BIT32(23)
+#define TCR_IRGN1_SHIFT 24
+#define TCR_ORGN1_SHIFT 26
+#define TCR_SH1_SHIFT 28
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_TG1_4KB SHIFT_U32(2, 30)
+#define TCR_RES1 BIT32(31)
+
+
+/* Normal memory, Inner/Outer Non-cacheable */
+#define TCR_XRGNX_NC 0x0
+/* Normal memory, Inner/Outer Write-Back Write-Allocate Cacheable */
+#define TCR_XRGNX_WB 0x1
+/* Normal memory, Inner/Outer Write-Through Cacheable */
+#define TCR_XRGNX_WT 0x2
+/* Normal memory, Inner/Outer Write-Back no Write-Allocate Cacheable */
+#define TCR_XRGNX_WBWA 0x3
+
+/* Non-shareable */
+#define TCR_SHX_NSH 0x0
+/* Outer Shareable */
+#define TCR_SHX_OSH 0x2
+/* Inner Shareable */
+#define TCR_SHX_ISH 0x3
+
+#define ESR_EC_SHIFT 26
+#define ESR_EC_MASK 0x3f
+
+#define ESR_EC_UNKNOWN 0x00
+#define ESR_EC_WFI 0x01
+#define ESR_EC_AARCH32_CP15_32 0x03
+#define ESR_EC_AARCH32_CP15_64 0x04
+#define ESR_EC_AARCH32_CP14_MR 0x05
+#define ESR_EC_AARCH32_CP14_LS 0x06
+#define ESR_EC_FP_ASIMD 0x07
+#define ESR_EC_AARCH32_CP10_ID 0x08
+#define ESR_EC_AARCH32_CP14_64 0x0c
+#define ESR_EC_ILLEGAL 0x0e
+#define ESR_EC_AARCH32_SVC 0x11
+#define ESR_EC_AARCH64_SVC 0x15
+#define ESR_EC_AARCH64_SYS 0x18
+#define ESR_EC_IABT_EL0 0x20
+#define ESR_EC_IABT_EL1 0x21
+#define ESR_EC_PC_ALIGN 0x22
+#define ESR_EC_DABT_EL0 0x24
+#define ESR_EC_DABT_EL1 0x25
+#define ESR_EC_SP_ALIGN 0x26
+#define ESR_EC_AARCH32_FP 0x28
+#define ESR_EC_AARCH64_FP 0x2c
+#define ESR_EC_SERROR 0x2f
+#define ESR_EC_BREAKPT_EL0 0x30
+#define ESR_EC_BREAKPT_EL1 0x31
+#define ESR_EC_SOFTSTP_EL0 0x32
+#define ESR_EC_SOFTSTP_EL1 0x33
+#define ESR_EC_WATCHPT_EL0 0x34
+#define ESR_EC_WATCHPT_EL1 0x35
+#define ESR_EC_AARCH32_BKPT 0x38
+#define ESR_EC_AARCH64_BRK 0x3c
+
+/* Combined defines for DFSC and IFSC */
+#define ESR_FSC_MASK 0x3f
+#define ESR_FSC_TRANS_L0 0x04
+#define ESR_FSC_TRANS_L1 0x05
+#define ESR_FSC_TRANS_L2 0x06
+#define ESR_FSC_TRANS_L3 0x07
+#define ESR_FSC_ACCF_L1 0x09
+#define ESR_FSC_ACCF_L2 0x0a
+#define ESR_FSC_ACCF_L3 0x0b
+#define ESR_FSC_PERMF_L1 0x0d
+#define ESR_FSC_PERMF_L2 0x0e
+#define ESR_FSC_PERMF_L3 0x0f
+#define ESR_FSC_ALIGN 0x21
+
+/* WnR for DABT and RES0 for IABT */
+#define ESR_ABT_WNR BIT32(6)
+
+#define CPACR_EL1_FPEN_SHIFT 20
+#define CPACR_EL1_FPEN_MASK 0x3
+#define CPACR_EL1_FPEN_NONE 0x0
+#define CPACR_EL1_FPEN_EL1 0x1
+#define CPACR_EL1_FPEN_EL0EL1 0x3
+#define CPACR_EL1_FPEN(x) ((x) >> CPACR_EL1_FPEN_SHIFT \
+ & CPACR_EL1_FPEN_MASK)
+
+
+#define PAR_F BIT32(0)
+#define PAR_PA_SHIFT 12
+#define PAR_PA_MASK (BIT64(36) - 1)
+
+#ifndef ASM
+static inline void isb(void)
+{
+ asm volatile ("isb");
+}
+
+static inline void dsb(void)
+{
+ asm volatile ("dsb sy");
+}
+
+static inline void write_at_s1e1r(uint64_t va)
+{
+ asm volatile ("at S1E1R, %0" : : "r" (va));
+}
+
+static __always_inline uint64_t read_pc(void)
+{
+ uint64_t val;
+
+ asm volatile ("adr %0, ." : "=r" (val));
+ return val;
+}
+
+static __always_inline uint64_t read_fp(void)
+{
+ uint64_t val;
+
+ asm volatile ("mov %0, x29" : "=r" (val));
+ return val;
+}
+
+/*
+ * Templates for register read/write functions based on mrs/msr
+ */
+
+#define DEFINE_REG_READ_FUNC_(reg, type, asmreg) \
+static inline type read_##reg(void) \
+{ \
+ type val; \
+ \
+ asm volatile("mrs %0, " #asmreg : "=r" (val)); \
+ return val; \
+}
+
+#define DEFINE_REG_WRITE_FUNC_(reg, type, asmreg) \
+static inline void write_##reg(type val) \
+{ \
+ asm volatile("msr " #asmreg ", %0" : : "r" (val)); \
+}
+
+#define DEFINE_U32_REG_READ_FUNC(reg) \
+ DEFINE_REG_READ_FUNC_(reg, uint32_t, reg)
+
+#define DEFINE_U32_REG_WRITE_FUNC(reg) \
+ DEFINE_REG_WRITE_FUNC_(reg, uint32_t, reg)
+
+#define DEFINE_U32_REG_READWRITE_FUNCS(reg) \
+ DEFINE_U32_REG_READ_FUNC(reg) \
+ DEFINE_U32_REG_WRITE_FUNC(reg)
+
+#define DEFINE_U64_REG_READ_FUNC(reg) \
+ DEFINE_REG_READ_FUNC_(reg, uint64_t, reg)
+
+#define DEFINE_U64_REG_WRITE_FUNC(reg) \
+ DEFINE_REG_WRITE_FUNC_(reg, uint64_t, reg)
+
+#define DEFINE_U64_REG_READWRITE_FUNCS(reg) \
+ DEFINE_U64_REG_READ_FUNC(reg) \
+ DEFINE_U64_REG_WRITE_FUNC(reg)
+
+/*
+ * Define register access functions
+ */
+
+DEFINE_U32_REG_READWRITE_FUNCS(cpacr_el1)
+DEFINE_U32_REG_READWRITE_FUNCS(daif)
+DEFINE_U32_REG_READWRITE_FUNCS(fpcr)
+DEFINE_U32_REG_READWRITE_FUNCS(fpsr)
+
+DEFINE_U32_REG_READ_FUNC(contextidr_el1)
+DEFINE_U32_REG_READ_FUNC(sctlr_el1)
+
+DEFINE_REG_READ_FUNC_(cntfrq, uint32_t, cntfrq_el0)
+
+DEFINE_U64_REG_READWRITE_FUNCS(ttbr0_el1)
+DEFINE_U64_REG_READWRITE_FUNCS(ttbr1_el1)
+DEFINE_U64_REG_READWRITE_FUNCS(tcr_el1)
+
+DEFINE_U64_REG_READ_FUNC(esr_el1)
+DEFINE_U64_REG_READ_FUNC(far_el1)
+DEFINE_U64_REG_READ_FUNC(mpidr_el1)
+DEFINE_U64_REG_READ_FUNC(par_el1)
+
+DEFINE_U64_REG_WRITE_FUNC(mair_el1)
+
+DEFINE_REG_READ_FUNC_(cntpct, uint64_t, cntpct_el0)
+
+#endif /*ASM*/
+
+#endif /*ARM64_H*/
+
diff --git a/core/arch/arm/include/arm64_macros.S b/core/arch/arm/include/arm64_macros.S
new file mode 100644
index 0000000..981a150
--- /dev/null
+++ b/core/arch/arm/include/arm64_macros.S
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .altmacro
+
+ /*
+ * This helper macro concatenates instr_prefix, instr_suffix, to
+ * create a ldp/stp instruction. It also selects register name x/w
+ * based on reg_bytes.
+ */
+ .macro __do_dregs instr_prefix, instr_suffix, reg_bytes, base_reg, \
+ base_offs, reg0, reg1
+ .if \reg_bytes == 8
+ \instr_prefix\instr_suffix \
+ x\reg0, x\reg1, [\base_reg, #\base_offs]
+ .else
+ \instr_prefix\instr_suffix \
+ w\reg0, w\reg1, [\base_reg, #\base_offs]
+ .endif
+ .endm
+
+ /*
+ * This helper macro concatenates instr_prefix, instr_suffix, to
+ * create a ldr/str instruction. It also selects register name x/w
+ * based on reg_bytes.
+ */
+ .macro __do_reg instr_prefix, instr_suffix, reg_bytes, base_reg, \
+ base_offs, reg
+ .if \reg_bytes == 8
+ \instr_prefix\instr_suffix \
+ x\reg, [\base_reg, #\base_offs]
+ .else
+ \instr_prefix\instr_suffix \
+ w\reg, [\base_reg, #\base_offs]
+ .endif
+ .endm
+
+ /*
+ * This helper macro uses recursion to create a loop which will
+ * start with generating instructions for register pairs and if
+ * it's an odd number of registers end with a single load/store.
+ */
+ .macro _do_regs instr_prefix, reg_bytes, base_reg, base_offs, \
+ from_regnum, to_regnum
+ .if (\to_regnum - \from_regnum + 1) >= 2
+ __do_dregs \instr_prefix, p, \reg_bytes, \base_reg, \
+ \base_offs, \from_regnum, %(\from_regnum + 1)
+ .else
+ __do_reg \instr_prefix, r, \reg_bytes, \base_reg, \
+ \base_offs, \from_regnum
+ .endif
+ .if (\to_regnum - \from_regnum + 1) > 2
+ _do_regs \instr_prefix, \reg_bytes, \base_reg, \
+ %(\base_offs + 2 * \reg_bytes), \
+ %(\from_regnum + 2), \to_regnum
+ .endif
+ .endm
+
+ /*
+ * Stores registers x[from_regnum]..x[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro store_xregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs st 8 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Stores registers w[from_regnum]..w[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro store_wregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs st 4 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Loads registers x[from_regnum]..x[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro load_xregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs ld 8 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Loads registers w[from_regnum]..w[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro load_wregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs ld 4 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+
+ /* Push register pair on stack */
+ .macro push, r1, r2
+ stp \r1, \r2, [sp, #-16]!
+ .endm
+
+ /* Pop register pair from stack */
+ .macro pop, r1, r2
+ ldp \r1, \r2, [sp], #16
+ .endm
+
diff --git a/core/arch/arm/include/kernel/abort.h b/core/arch/arm/include/kernel/abort.h
new file mode 100644
index 0000000..0480f43
--- /dev/null
+++ b/core/arch/arm/include/kernel/abort.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_ABORT_H
+#define KERNEL_ABORT_H
+
+#define ABORT_TYPE_UNDEF 0
+#define ABORT_TYPE_PREFETCH 1
+#define ABORT_TYPE_DATA 2
+
+#ifndef ASM
+
+#include <compiler.h>
+#include <types_ext.h>
+
+struct abort_info {
+ uint32_t abort_type;
+ uint32_t fault_descr; /* only valid for data of prefetch abort */
+ vaddr_t va;
+ uint32_t pc;
+ struct thread_abort_regs *regs;
+};
+
+void abort_print(struct abort_info *ai);
+void abort_print_error(struct abort_info *ai);
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs);
+
+bool abort_is_user_exception(struct abort_info *ai);
+
+#endif /*ASM*/
+#endif /*KERNEL_ABORT_H*/
+
diff --git a/core/arch/arm/include/kernel/generic_boot.h b/core/arch/arm/include/kernel/generic_boot.h
new file mode 100644
index 0000000..622c6ff
--- /dev/null
+++ b/core/arch/arm/include/kernel/generic_boot.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_GENERIC_BOOT_H
+#define KERNEL_GENERIC_BOOT_H
+
+#include <initcall.h>
+#include <types_ext.h>
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+unsigned long cpu_on_handler(unsigned long a0, unsigned long a1);
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long unused,
+ unsigned long fdt);
+unsigned long generic_boot_cpu_on_handler(unsigned long a0, unsigned long a1);
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt);
+void generic_boot_init_secondary(unsigned long nsec_entry);
+#endif
+
+void main_init_gic(void);
+void main_secondary_init_gic(void);
+
+void init_sec_mon(unsigned long nsec_entry);
+
+const struct thread_handlers *generic_boot_get_handlers(void);
+
+/* weak routines eventually overridden by platform */
+void plat_cpu_reset_early(void);
+void plat_cpu_reset_late(void);
+void arm_cl2_config(vaddr_t pl310);
+void arm_cl2_enable(vaddr_t pl310);
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+extern paddr_t ns_entry_addrs[] __early_bss;
+int generic_boot_core_release(size_t core_idx, paddr_t entry);
+paddr_t generic_boot_core_hpen(void);
+#endif
+
+extern uint8_t __text_init_start[];
+extern uint8_t __text_start[];
+extern initcall_t __initcall_start;
+extern initcall_t __initcall_end;
+extern uint8_t __data_start[];
+extern uint8_t __data_end[];
+extern uint8_t __rodata_start[];
+extern uint8_t __rodata_end[];
+extern uint8_t __early_bss_start[];
+extern uint8_t __early_bss_end[];
+extern uint8_t __bss_start[];
+extern uint8_t __bss_end[];
+extern uint8_t __nozi_start[];
+extern uint8_t __nozi_end[];
+extern uint8_t __nozi_stack_start[];
+extern uint8_t __nozi_stack_end[];
+extern uint8_t __init_start[];
+extern uint8_t __init_size[];
+extern uint8_t __tmp_hashes_start[];
+extern uint8_t __tmp_hashes_size[];
+extern uint8_t __heap1_start[];
+extern uint8_t __heap1_end[];
+extern uint8_t __heap2_start[];
+extern uint8_t __heap2_end[];
+extern uint8_t __pageable_part_start[];
+extern uint8_t __pageable_part_end[];
+extern uint8_t __pageable_start[];
+extern uint8_t __pageable_end[];
+extern uint8_t __asan_shadow_start[];
+extern uint8_t __asan_shadow_end[];
+extern vaddr_t __ctor_list;
+extern vaddr_t __ctor_end;
+extern uint8_t __end[];
+
+/* Generated by core/arch/arm/kernel/link.mk */
+extern const char core_v_str[];
+
+#endif /* KERNEL_GENERIC_BOOT_H */
diff --git a/core/arch/arm/include/kernel/misc.h b/core/arch/arm/include/kernel/misc.h
new file mode 100644
index 0000000..a9174a8
--- /dev/null
+++ b/core/arch/arm/include/kernel/misc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_MISC_H
+#define KERNEL_MISC_H
+
+#include <types_ext.h>
+#include <arm.h>
+#include <kernel/thread.h>
+
+size_t get_core_pos(void);
+
+uint32_t read_mode_sp(int cpu_mode);
+uint32_t read_mode_lr(int cpu_mode);
+
+static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
+{
+ return (uint64_t)reg0 << 32 | reg1;
+}
+
+static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
+ uint32_t *reg1)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+#endif /*KERNEL_MISC_H*/
+
diff --git a/core/arch/arm/include/kernel/mutex.h b/core/arch/arm/include/kernel/mutex.h
new file mode 100644
index 0000000..1698b35
--- /dev/null
+++ b/core/arch/arm/include/kernel/mutex.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_MUTEX_H
+#define KERNEL_MUTEX_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+#include <kernel/wait_queue.h>
+
+enum mutex_value {
+ MUTEX_VALUE_UNLOCKED,
+ MUTEX_VALUE_LOCKED,
+};
+
+struct mutex {
+ enum mutex_value value;
+ unsigned spin_lock; /* used when operating on this struct */
+ struct wait_queue wq;
+ int owner_id;
+ TAILQ_ENTRY(mutex) link;
+};
+#define MUTEX_INITIALIZER \
+ { .value = MUTEX_VALUE_UNLOCKED, .owner_id = -1, \
+ .wq = WAIT_QUEUE_INITIALIZER, }
+
+TAILQ_HEAD(mutex_head, mutex);
+
+void mutex_init(struct mutex *m);
+void mutex_destroy(struct mutex *m);
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_unlock(m) mutex_unlock_debug((m), __FILE__, __LINE__)
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_lock(m) mutex_lock_debug((m), __FILE__, __LINE__)
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_trylock(m) mutex_trylock_debug((m), __FILE__, __LINE__)
+
+#else
+void mutex_unlock(struct mutex *m);
+void mutex_lock(struct mutex *m);
+bool mutex_trylock(struct mutex *m);
+#endif
+
+
+struct condvar {
+ unsigned spin_lock;
+ struct mutex *m;
+};
+#define CONDVAR_INITIALIZER { .m = NULL }
+
+void condvar_init(struct condvar *cv);
+void condvar_destroy(struct condvar *cv);
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_signal(cv) condvar_signal_debug((cv), __FILE__, __LINE__)
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_broadcast(cv) condvar_broadcast_debug((cv), __FILE__, __LINE__)
+
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno);
+#define condvar_wait(cv, m) condvar_wait_debug((cv), (m), __FILE__, __LINE__)
+#else
+void condvar_signal(struct condvar *cv);
+void condvar_broadcast(struct condvar *cv);
+void condvar_wait(struct condvar *cv, struct mutex *m);
+#endif
+
+#endif /*KERNEL_MUTEX_H*/
+
diff --git a/core/arch/arm/include/kernel/pm_stubs.h b/core/arch/arm/include/kernel/pm_stubs.h
new file mode 100644
index 0000000..6cbe897
--- /dev/null
+++ b/core/arch/arm/include/kernel/pm_stubs.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PM_STUBS_H
+#define PM_STUBS_H
+
+#include <stdint.h>
+#include <compiler.h>
+
+unsigned long pm_panic(unsigned long a0, unsigned long a1) __noreturn;
+unsigned long pm_do_nothing(unsigned long a0, unsigned long a1);
+
+#endif /* PM_STUBS_H */
diff --git a/core/arch/arm/include/kernel/pseudo_ta.h b/core/arch/arm/include/kernel/pseudo_ta.h
new file mode 100644
index 0000000..98316bd
--- /dev/null
+++ b/core/arch/arm/include/kernel/pseudo_ta.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_PSEUDO_TA_H
+#define KERNEL_PSEUDO_TA_H
+
+#include <assert.h>
+#include <compiler.h>
+#include <kernel/tee_ta_manager.h>
+#include <tee_api_types.h>
+#include <user_ta_header.h>
+#include <util.h>
+
+#define PTA_MANDATORY_FLAGS (TA_FLAG_SINGLE_INSTANCE | \
+ TA_FLAG_MULTI_SESSION | \
+ TA_FLAG_INSTANCE_KEEP_ALIVE)
+
+#define PTA_ALLOWED_FLAGS PTA_MANDATORY_FLAGS
+#define PTA_DEFAULT_FLAGS PTA_MANDATORY_FLAGS
+
+struct pseudo_ta_head {
+ TEE_UUID uuid;
+ const char *name;
+ uint32_t flags;
+
+ TEE_Result (*create_entry_point)(void);
+ void (*destroy_entry_point)(void);
+ TEE_Result (*open_session_entry_point)(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS],
+ void **ppSessionContext);
+ void (*close_session_entry_point)(void *pSessionContext);
+ TEE_Result (*invoke_command_entry_point)(void *pSessionContext,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS]);
+};
+
+#define pseudo_ta_register(...) static const struct pseudo_ta_head __head \
+ __used __section("ta_head_section") = { __VA_ARGS__ }
+
+
+struct pseudo_ta_ctx {
+ const struct pseudo_ta_head *pseudo_ta;
+ struct tee_ta_ctx ctx;
+};
+
+static inline bool is_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct pseudo_ta_ctx *to_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_pseudo_ta_ctx(ctx));
+ return container_of(ctx, struct pseudo_ta_ctx, ctx);
+}
+
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+
+#endif /* KERNEL_PSEUDO_TA_H */
+
diff --git a/core/arch/arm/include/kernel/spinlock.h b/core/arch/arm/include/kernel/spinlock.h
new file mode 100644
index 0000000..c248673
--- /dev/null
+++ b/core/arch/arm/include/kernel/spinlock.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_SPINLOCK_H
+#define KERNEL_SPINLOCK_H
+
+#define SPINLOCK_LOCK 1
+#define SPINLOCK_UNLOCK 0
+
+#ifndef ASM
+#include <assert.h>
+#include <compiler.h>
+#include <stdbool.h>
+#include <kernel/thread.h>
+
+#ifdef CFG_TEE_CORE_DEBUG
+void spinlock_count_incr(void);
+void spinlock_count_decr(void);
+bool have_spinlock(void);
+static inline void assert_have_no_spinlock(void)
+{
+ assert(!have_spinlock());
+}
+#else
+static inline void spinlock_count_incr(void) { }
+static inline void spinlock_count_decr(void) { }
+static inline void assert_have_no_spinlock(void) { }
+#endif
+
+void __cpu_spin_lock(unsigned int *lock);
+void __cpu_spin_unlock(unsigned int *lock);
+/* returns 0 on locking success, non zero on failure */
+unsigned int __cpu_spin_trylock(unsigned int *lock);
+
+static inline void cpu_spin_lock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_lock(lock);
+ spinlock_count_incr();
+}
+
+static inline bool cpu_spin_trylock(unsigned int *lock)
+{
+ unsigned int rc;
+
+ assert(thread_irq_disabled());
+ rc = __cpu_spin_trylock(lock);
+ if (!rc)
+ spinlock_count_incr();
+ return !rc;
+}
+
+static inline void cpu_spin_unlock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_unlock(lock);
+ spinlock_count_decr();
+}
+#endif /* ASM */
+
+#endif /* KERNEL_SPINLOCK_H */
diff --git a/core/arch/arm/include/kernel/tee_l2cc_mutex.h b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
new file mode 100644
index 0000000..508a510
--- /dev/null
+++ b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_L2CC_MUTEX_H
+#define TEE_L2CC_MUTEX_H
+#include <inttypes.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <compiler.h>
+
+#if defined(CFG_PL310)
+TEE_Result tee_enable_l2cc_mutex(void);
+TEE_Result tee_disable_l2cc_mutex(void);
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+void tee_l2cc_mutex_lock(void);
+void tee_l2cc_mutex_unlock(void);
+
+/*
+ * Store the pa of a mutex used for l2cc
+ * It is allocated from the boot
+ */
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa);
+
+#else
+static TEE_Result tee_enable_l2cc_mutex(void);
+static TEE_Result tee_disable_l2cc_mutex(void);
+static TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+static TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+
+static inline TEE_Result tee_enable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_disable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_get_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_set_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+#endif /* TEE_L2CC_MUTEX_H */
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
new file mode 100644
index 0000000..175ba77
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_H
+#define KERNEL_THREAD_H
+
+#ifndef ASM
+#include <types_ext.h>
+#include <compiler.h>
+#include <optee_msg.h>
+#include <kernel/mutex.h>
+#include <kernel/vfp.h>
+#include <mm/pgt_cache.h>
+#endif
+
+#define THREAD_ID_0 0
+#define THREAD_ID_INVALID -1
+
+#define THREAD_RPC_MAX_NUM_PARAMS 4
+
+#ifndef ASM
+struct thread_vector_table {
+ uint32_t std_smc_entry;
+ uint32_t fast_smc_entry;
+ uint32_t cpu_on_entry;
+ uint32_t cpu_off_entry;
+ uint32_t cpu_resume_entry;
+ uint32_t cpu_suspend_entry;
+ uint32_t fiq_entry;
+ uint32_t system_off_entry;
+ uint32_t system_reset_entry;
+};
+extern struct thread_vector_table thread_vector_table;
+
+struct thread_specific_data {
+ TAILQ_HEAD(, tee_ta_session) sess_stack;
+ struct tee_ta_ctx *ctx;
+#ifdef CFG_SMALL_PAGE_USER_TA
+ struct pgt_cache pgt_cache;
+#endif
+ void *rpc_fs_payload;
+ paddr_t rpc_fs_payload_pa;
+ uint64_t rpc_fs_payload_cookie;
+ size_t rpc_fs_payload_size;
+};
+
+struct thread_user_vfp_state {
+ struct vfp_state vfp;
+ bool lazy_saved;
+ bool saved;
+};
+
+#ifdef ARM32
+struct thread_smc_args {
+ uint32_t a0; /* SMC function ID */
+ uint32_t a1; /* Parameter */
+ uint32_t a2; /* Parameter */
+ uint32_t a3; /* Thread ID when returning from RPC */
+ uint32_t a4; /* Not used */
+ uint32_t a5; /* Not used */
+ uint32_t a6; /* Not used */
+ uint32_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_smc_args {
+ uint64_t a0; /* SMC function ID */
+ uint64_t a1; /* Parameter */
+ uint64_t a2; /* Parameter */
+ uint64_t a3; /* Thread ID when returning from RPC */
+ uint64_t a4; /* Not used */
+ uint64_t a5; /* Not used */
+ uint64_t a6; /* Not used */
+ uint64_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_abort_regs {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t pad;
+ uint32_t spsr;
+ uint32_t elr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t ip;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_abort_regs {
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x15;
+ uint64_t x16;
+ uint64_t x17;
+ uint64_t x18;
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+ uint64_t x22;
+ uint64_t x23;
+ uint64_t x24;
+ uint64_t x25;
+ uint64_t x26;
+ uint64_t x27;
+ uint64_t x28;
+ uint64_t x29;
+ uint64_t x30;
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t sp_el0;
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_svc_regs {
+ uint32_t spsr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t lr;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_svc_regs {
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x30;
+ uint64_t sp_el0;
+ uint64_t pad;
+} __aligned(16);
+#endif /*ARM64*/
+#endif /*ASM*/
+
+#ifndef ASM
+typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
+typedef void (*thread_fiq_handler_t)(void);
+typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
+ unsigned long a1);
+struct thread_handlers {
+ /*
+ * stdcall and fastcall are called as regular functions and
+ * normal ARM Calling Convention applies. Return values are passed
+ * args->param{1-3} and forwarded into r0-r3 when returned to
+ * non-secure world.
+ *
+ * stdcall handles calls which can be preemted from non-secure
+ * world. This handler is executed with a large stack.
+ *
+ * fastcall handles fast calls which can't be preemted. This
+ * handler is executed with a limited stack. This handler must not
+ * cause any aborts or reenenable FIQs which are temporarily masked
+ * while executing this handler.
+ *
+ * TODO investigate if we should execute fastcalls and FIQs on
+ * different stacks allowing FIQs to be enabled during a fastcall.
+ */
+ thread_smc_handler_t std_smc;
+ thread_smc_handler_t fast_smc;
+
+ /*
+ * fiq is called as a regular function and normal ARM Calling
+ * Convention applies.
+ *
+ * This handler handles FIQs which can't be preemted. This handler
+ * is executed with a limited stack. This handler must not cause
+ * any aborts or reenenable FIQs which are temporarily masked while
+ * executing this handler.
+ */
+ thread_fiq_handler_t fiq;
+
+ /*
+ * Power management handlers triggered from ARM Trusted Firmware.
+ * Not used when using internal monitor.
+ */
+ thread_pm_handler_t cpu_on;
+ thread_pm_handler_t cpu_off;
+ thread_pm_handler_t cpu_suspend;
+ thread_pm_handler_t cpu_resume;
+ thread_pm_handler_t system_off;
+ thread_pm_handler_t system_reset;
+};
+void thread_init_primary(const struct thread_handlers *handlers);
+void thread_init_per_cpu(void);
+
+/*
+ * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
+ * first stack, THREAD_ID_0 + 1 for the next and so on.
+ *
+ * Returns true on success and false on errors.
+ */
+bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
+
+/*
+ * Initializes a thread to be used during boot
+ */
+void thread_init_boot_thread(void);
+
+/*
+ * Clears the current thread id
+ * Only supposed to be used during initialization.
+ */
+void thread_clr_boot_thread(void);
+
+/*
+ * Returns current thread id.
+ */
+int thread_get_id(void);
+
+/*
+ * Returns current thread id, return -1 on failure.
+ */
+int thread_get_id_may_fail(void);
+
+/* Returns Thread Specific Data (TSD) pointer. */
+struct thread_specific_data *thread_get_tsd(void);
+
+/*
+ * Sets IRQ status for current thread, must only be called from an
+ * active thread context.
+ *
+ * enable == true -> enable IRQ
+ * enable == false -> disable IRQ
+ */
+void thread_set_irq(bool enable);
+
+/*
+ * Restores the IRQ status (in CPSR) for current thread, must only be called
+ * from an active thread context.
+ */
+void thread_restore_irq(void);
+
+/*
+ * Defines the bits for the exception mask used the the
+ * thread_*_exceptions() functions below.
+ */
+#define THREAD_EXCP_FIQ (1 << 0)
+#define THREAD_EXCP_IRQ (1 << 1)
+#define THREAD_EXCP_ABT (1 << 2)
+#define THREAD_EXCP_ALL (THREAD_EXCP_FIQ | THREAD_EXCP_IRQ | THREAD_EXCP_ABT)
+
+/*
+ * thread_get_exceptions() - return current exception mask
+ */
+uint32_t thread_get_exceptions(void);
+
+/*
+ * thread_set_exceptions() - set exception mask
+ * @exceptions: exception mask to set
+ *
+ * Any previous exception mask is replaced by this exception mask, that is,
+ * old bits are cleared and replaced by these.
+ */
+void thread_set_exceptions(uint32_t exceptions);
+
+/*
+ * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
+ * @exceptions exceptions to mask
+ * @returns old exception state
+ */
+uint32_t thread_mask_exceptions(uint32_t exceptions);
+
+/*
+ * thread_unmask_exceptions() - Unmasks asynchronous exceptions
+ * @state Old asynchronous exception state to restore (returned by
+ * thread_mask_exceptions())
+ */
+void thread_unmask_exceptions(uint32_t state);
+
+
+static inline bool thread_irq_disabled(void)
+{
+ return !!(thread_get_exceptions() & THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+/*
+ * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
+ *
+ * IRQ is masked while VFP is enabled. User space must not be entered before
+ * thread_kernel_disable_vfp() has been called to disable VFP and restore the
+ * IRQ status.
+ *
+ * This function may only be called from an active thread context and may
+ * not be called again before thread_kernel_disable_vfp() has been called.
+ *
+ * VFP state is saved as needed.
+ *
+ * Returns a state variable that should be passed to
+ * thread_kernel_disable_vfp().
+ */
+uint32_t thread_kernel_enable_vfp(void);
+
+/*
+ * thread_kernel_disable_vfp() - Disables usage of VFP
+ * @state: state variable returned by thread_kernel_enable_vfp()
+ *
+ * Disables usage of VFP and restores IRQ status after a call to
+ * thread_kernel_enable_vfp().
+ *
+ * This function may only be called after a call to
+ * thread_kernel_enable_vfp().
+ */
+void thread_kernel_disable_vfp(uint32_t state);
+
+/*
+ * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
+ */
+void thread_kernel_save_vfp(void);
+
+/*
+ * thread_kernel_save_vfp() - Restores kernel vfp state
+ */
+void thread_kernel_restore_vfp(void);
+
+/*
+ * thread_user_enable_vfp() - Enables vfp for user mode usage
+ * @uvfp: pointer to where to save the vfp state if needed
+ */
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
+#else /*CFG_WITH_VFP*/
+static inline void thread_kernel_save_vfp(void)
+{
+}
+
+static inline void thread_kernel_restore_vfp(void)
+{
+}
+#endif /*CFG_WITH_VFP*/
+
+/*
+ * thread_user_save_vfp() - Saves the user vfp state if enabled
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_save_vfp(void);
+#else
+static inline void thread_user_save_vfp(void)
+{
+}
+#endif
+
+/*
+ * thread_user_clear_vfp() - Clears the vfp state
+ * @uvfp: pointer to saved state to clear
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp);
+#else
+static inline void thread_user_clear_vfp(
+ struct thread_user_vfp_state *uvfp __unused)
+{
+}
+#endif
+
+
+/*
+ * thread_enter_user_mode() - Enters user mode
+ * @a0: Passed in r/x0 for user_func
+ * @a1: Passed in r/x1 for user_func
+ * @a2: Passed in r/x2 for user_func
+ * @a3: Passed in r/x3 for user_func
+ * @user_sp: Assigned sp value in user mode
+ * @user_func: Function to execute in user mode
+ * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
+ * @exit_status0: Pointer to opaque exit staus 0
+ * @exit_status1: Pointer to opaque exit staus 1
+ *
+ * This functions enters user mode with the argument described above,
+ * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
+ * when returning back to the caller of this function through an exception
+ * handler.
+ *
+ * @Returns what's passed in "ret" to thread_unwind_user_mode()
+ */
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * thread_unwind_user_mode() - Unwinds kernel stack from user entry
+ * @ret: Value to return from thread_enter_user_mode()
+ * @exit_status0: Exit status 0
+ * @exit_status1: Exit status 1
+ *
+ * This is the function that exception handlers can return into
+ * to resume execution in kernel mode instead of user mode.
+ *
+ * This function is closely coupled with thread_enter_user_mode() since it
+ * need to restore registers saved by thread_enter_user_mode() and when it
+ * returns make it look like thread_enter_user_mode() just returned. It is
+ * expected that the stack pointer is where thread_enter_user_mode() left
+ * it. The stack will be unwound and the function will return to where
+ * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
+ * are filled in the corresponding pointers supplied to
+ * thread_enter_user_mode().
+ */
+void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ uint32_t exit_status1);
+
+#ifdef ARM64
+/*
+ * thread_get_saved_thread_sp() - Returns the saved sp of current thread
+ *
+ * When switching from the thread stack pointer the value is stored
+ * separately in the current thread context. This function returns this
+ * saved value.
+ *
+ * @returns stack pointer
+ */
+vaddr_t thread_get_saved_thread_sp(void);
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va);
+
+/*
+ * Adds a mutex to the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_add_mutex(struct mutex *m);
+
+/*
+ * Removes a mutex from the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_rem_mutex(struct mutex *m);
+
+/*
+ * Disables and empties the prealloc RPC cache one reference at a time. If
+ * all threads are idle this function returns true and a cookie of one shm
+ * object which was removed from the cache. When the cache is empty *cookie
+ * is set to 0 and the cache is disabled else a valid cookie value. If one
+ * thread isn't idle this function returns false.
+ */
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
+
+/*
+ * Enabled the prealloc RPC cache. If all threads are idle the cache is
+ * enabled and this function returns true. If one thread isn't idle this
+ * function return false.
+ */
+bool thread_enable_prealloc_rpc_cache(void);
+
+/**
+ * Allocates data for struct optee_msg_arg.
+ *
+ * @size: size in bytes of struct optee_msg_arg
+ * @arg: returned physcial pointer to a struct optee_msg_arg buffer,
+ * 0 if allocation failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_arg()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_arg(uint64_t cookie);
+
+/**
+ * Allocates data for payload buffers.
+ *
+ * @size: size in bytes of payload buffer
+ * @payload: returned physcial pointer to payload buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_payload()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_payload(uint64_t cookie);
+
+/**
+ * Does an RPC using a preallocated argument buffer
+ * @cmd: RPC cmd
+ * @num_params: number of parameters (max 2)
+ * @params: RPC parameters
+ * @returns RPC return value
+ */
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params);
+
+#endif /*ASM*/
+
+#endif /*KERNEL_THREAD_H*/
diff --git a/core/arch/arm/include/kernel/thread_defs.h b/core/arch/arm/include/kernel/thread_defs.h
new file mode 100644
index 0000000..0f54569
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread_defs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_DEFS_H
+#define KERNEL_THREAD_DEFS_H
+
+#define THREAD_FLAGS_COPY_ARGS_ON_RETURN (1 << 0)
+#define THREAD_FLAGS_IRQ_ENABLE (1 << 1)
+#define THREAD_FLAGS_EXIT_ON_IRQ (1 << 2)
+
+#endif /*KERNEL_THREAD_DEFS_H*/
diff --git a/core/arch/arm/include/kernel/time_source.h b/core/arch/arm/include/kernel/time_source.h
new file mode 100644
index 0000000..ddabfe9
--- /dev/null
+++ b/core/arch/arm/include/kernel/time_source.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+
+struct time_source {
+ const char *name;
+ uint32_t protection_level;
+ TEE_Result (*get_sys_time)(TEE_Time *time);
+};
+void time_source_init(void);
+
+#define REGISTER_TIME_SOURCE(source) \
+ void time_source_init(void) { \
+ _time_source = source; \
+ }
+
+extern struct time_source _time_source;
+
+void arm_prng_add_jitter_entropy(void);
diff --git a/core/arch/arm/include/kernel/tz_proc_def.h b/core/arch/arm/include/kernel/tz_proc_def.h
new file mode 100644
index 0000000..abe281b
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_proc_def.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * General constants
+ */
+
+/*
+ * CP15 Multiprocessor Affinity register (MPIDR)
+ */
+#define CP15_CONFIG_CPU_ID_MASK 0x00000003
+#define CPU_ID0 0x00000000
+#define CPU_ID1 0x00000001
+
+/*
+ * CP15 Secure configuration register
+ */
+#define CP15_CONFIG_NS_MASK 0x00000001
+#define CP15_CONFIG_IRQ_MASK 0x00000002
+#define CP15_CONFIG_FIQ_MASK 0x00000004
+#define CP15_CONFIG_EA_MASK 0x00000008
+#define CP15_CONFIG_FW_MASK 0x00000010
+#define CP15_CONFIG_AW_MASK 0x00000020
+#define CP15_CONFIG_nET_MASK 0x00000040
+
+/*
+ * CP15 Control register
+ */
+#define CP15_CONTROL_M_MASK 0x00000001
+#define CP15_CONTROL_C_MASK 0x00000004
+#define CP15_CONTROL_Z_MASK 0x00000800
+#define CP15_CONTROL_I_MASK 0x00001000
+#define CP15_CONTROL_V_MASK 0x00002000
+#define CP15_CONTROL_HA_MASK 0x00020000
+#define CP15_CONTROL_EE_MASK 0x02000000
+#define CP15_CONTROL_NMFI_MASK 0x08000000
+#define CP15_CONTROL_TRE_MASK 0x10000000
+#define CP15_CONTROL_AFE_MASK 0x20000000
+#define CP15_CONTROL_TE_MASK 0x40000000
+
+/*
+ * CP15 Auxiliary Control register
+ */
+#define CP15_CONTROL_SMP_MASK 0x00000040
+#define CP15_CONTROL_EXCL_MASK 0x00000080
+
+/*
+ * CP15 Non secure access control register
+ */
+#define CP15_NSAC_TL_MASK 0x10000
+#define CP15_NSAC_CL_MASK 0x20000
+#define CP15_NSAC_CPN_MASK 0x3FFF
+
+/*
+ * CP15 Cache register
+ */
+#define CP15_CACHE_ADDR_R_BIT 12
+#define CP15_CACHE_ADDR_L_BIT (32-CP15_CACHE_ADDR_R_BIT)
+#define CP15_CACHE_RESULT_MASK 0x00000001
+
+/*
+ * CP15 TCM register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_TCM_ENABLE_MASK 0x00000001
+#define CP15_TCM_INSTR_TCM 0x2010000C
+#define CP15_TCM_DATA_TCM 0x2010100C
+
+/*
+ * CP15 cache lockdown register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_CACHE_LOCK_ALLWAYS_MASK 0x0000000F
+
+/*
+ * CP15 cache cleaning constant definition
+ */
+/* start of line number field offset in way/index format */
+#define LINE_FIELD_OFFSET 5
+/* Warning: this assumes a 256 lines/way cache (32kB cache) */
+#define LINE_FIELD_OVERFLOW 13
+/* start of way number field offset in way/index format */
+#define WAY_FIELD_OFFSET 30
diff --git a/core/arch/arm/include/kernel/tz_ssvce.h b/core/arch/arm/include/kernel/tz_ssvce.h
new file mode 100644
index 0000000..a886f9d
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_H
+#define TZ_SSVCE_H
+
+#ifndef ASM
+
+#include <types_ext.h>
+
+unsigned int secure_get_cpu_id(void);
+
+void arm_cl1_d_cleanbysetway(void);
+void arm_cl1_d_invbysetway(void);
+void arm_cl1_d_cleaninvbysetway(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleanbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_invbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleaninvbyva(void *start, void *end);
+void arm_cl1_i_inv_all(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_i_inv(void *start, void *end);
+
+void secure_mmu_datatlbinvall(void);
+void secure_mmu_unifiedtlbinvall(void);
+void secure_mmu_unifiedtlbinvbymva(unsigned long addr);
+void secure_mmu_unifiedtlbinv_curasid(void);
+void secure_mmu_unifiedtlbinv_byasid(unsigned long asid);
+
+void secure_mmu_disable(void);
+#endif /*!ASM*/
+
+#ifdef ARM64
+/* D$ set/way op type defines */
+#define DCISW 0x0
+#define DCCISW 0x1
+#define DCCSW 0x2
+
+#ifndef ASM
+void flush_dcache_range(vaddr_t va, size_t len);
+void inv_dcache_range(vaddr_t va, size_t len);
+void dcsw_op_louis(uint32_t op);
+void dcsw_op_all(uint32_t op);
+#endif /*!ASM*/
+#endif /*ARM64*/
+
+#endif
diff --git a/core/arch/arm/include/kernel/tz_ssvce_def.h b/core/arch/arm/include/kernel/tz_ssvce_def.h
new file mode 100644
index 0000000..3e9f9fc
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_def.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TZ_SSVCE_DEF_H
+#define TZ_SSVCE_DEF_H
+
+#include <util.h>
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+#define CPSR_OFFSET 0x00
+#define STACK_INT_USAGE 0x04
+
+/*
+ * tee service IDs (TODO: align with the service ID list).
+ * Set by NSec in R4 before SMC to request a TEE service.
+ */
+#define SSAPI_RET_FROM_INT_SERV 4
+#define SSAPI_RET_FROM_RPC_SERV 5
+
+/*
+ * TEE monitor: status returned by the routine that checks the entry
+ * reason (valid Service ID / secure context).
+ */
+#define SEC_INVALID_ENTRY 0
+#define SEC_PRE_INIT_ENTRY 1
+#define SEC_RET_FROM_INT_ENTRY 2
+#define SEC_RET_FROM_RPC_ENTRY 3
+#define SEC_NORMAL_ENTRY 4
+
+/*
+ * teecore exit reason.
+ * Set by Secure in R4 before SMC to request a switch to NSec.
+ */
+#define SEC_EXIT_NORMAL 1
+#define SEC_EXIT_START_EXT_CODE 2
+#define SEC_EXIT_INT 3
+#define SEC_EXIT_RPC_CALL 4
+#define SEC_EXIT_FIRST 5
+#define SEC_EXIT_DEEP_SLEEP 6
+
+/* misc */
+
+#define SEC_UNDEF_STACK_OFFSET 4
+#define SEC_ABORT_STACK_OFFSET 12
+
+#define SEC_ENTRY_STATUS_NOK 0
+#define SEC_ENTRY_STATUS_OK 1
+
+/*
+ * Outer cache iomem
+ */
+#define PL310_LINE_SIZE 32
+#define PL310_8_WAYS 8
+
+/* reg1 */
+#define PL310_CTRL 0x100
+#define PL310_AUX_CTRL 0x104
+#define PL310_TAG_RAM_CTRL 0x108
+#define PL310_DATA_RAM_CTRL 0x10C
+/* reg7 */
+#define PL310_SYNC 0x730
+#define PL310_INV_BY_WAY 0x77C
+#define PL310_CLEAN_BY_WAY 0x7BC
+#define PL310_FLUSH_BY_WAY 0x7FC
+#define PL310_INV_BY_PA 0x770
+#define PL310_CLEAN_BY_PA 0x7B0
+#define PL310_FLUSH_BY_PA 0x7F0
+#define PL310_FLUSH_BY_INDEXWAY 0x7F8
+/* reg9 */
+#define PL310_DCACHE_LOCKDOWN_BASE 0x900
+#define PL310_ICACHE_LOCKDOWN_BASE 0x904
+/* reg12 */
+#define PL310_ADDR_FILT_START 0xC00
+#define PL310_ADDR_FILT_END 0xC04
+/* reg15 */
+#define PL310_DEBUG_CTRL 0xF40
+#define PL310_PREFETCH_CTRL 0xF60
+#define PL310_POWER_CTRL 0xF80
+
+#define PL310_CTRL_ENABLE_BIT BIT32(0)
+#define PL310_AUX_16WAY_BIT BIT32(16)
+
+/*
+ * SCU iomem
+ */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_POWER 0x08
+#define SCU_INV_SEC 0x0C
+#define SCU_FILT_SA 0x40
+#define SCU_FILT_EA 0x44
+#define SCU_SAC 0x50
+#define SCU_NSAC 0x54
+#define SCU_ERRATA744369 0x30
+
+#define SCU_ACCESS_CONTROL_CPU0 BIT32(0)
+#define SCU_ACCESS_CONTROL_CPU1 BIT32(1)
+#define SCU_ACCESS_CONTROL_CPU2 BIT32(2)
+#define SCU_ACCESS_CONTROL_CPU3 BIT32(3)
+#define SCU_NSAC_SCU_SHIFT 0
+#define SCU_NSAC_PTIMER_SHIFT 4
+#define SCU_NSAC_GTIMER_SHIFT 8
+
+/*
+ * GIC iomem
+ */
+#define GIC_DIST_ISR0 0x080
+#define GIC_DIST_ISR1 0x084
+
+/*
+ * CPU iomem
+ */
+#define CORE_ICC_ICCPMR 0x0004
+
+#endif /* TZ_SSVCE_DEF_H */
diff --git a/core/arch/arm/include/kernel/tz_ssvce_pl310.h b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
new file mode 100644
index 0000000..88b91e1
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_PL310_H
+#define TZ_SSVCE_PL310_H
+
+#include <util.h>
+#include <kernel/tz_ssvce_def.h>
+#include <types_ext.h>
+
+vaddr_t pl310_base(void);
+/*
+ * End address is included in the range (last address in range)
+ */
+void arm_cl2_cleaninvbyway(vaddr_t pl310_base);
+void arm_cl2_invbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+
+#endif /* TZ_SSVCE_PL310_H */
diff --git a/core/arch/arm/include/kernel/unwind.h b/core/arch/arm/include/kernel/unwind.h
new file mode 100644
index 0000000..cc5ff5a
--- /dev/null
+++ b/core/arch/arm/include/kernel/unwind.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef KERNEL_UNWIND
+#define KERNEL_UNWIND
+
+#ifndef ASM
+#include <types_ext.h>
+
+#ifdef ARM32
+/* The state of the unwind process */
+struct unwind_state {
+ uint32_t registers[16];
+ uint32_t start_pc;
+ uint32_t *insn;
+ unsigned entries;
+ unsigned byte;
+ uint16_t update_mask;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct unwind_state {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+};
+#endif /*ARM64*/
+
+bool unwind_stack(struct unwind_state *state);
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+void print_stack(int level);
+#else
+static inline void print_stack(int level __unused)
+{
+}
+#endif
+
+#endif /*ASM*/
+
+#ifdef CFG_CORE_UNWIND
+#define UNWIND(...) __VA_ARGS__
+#else
+#define UNWIND(...)
+#endif
+
+#endif /*KERNEL_UNWIND*/
diff --git a/core/arch/arm/include/kernel/user_ta.h b/core/arch/arm/include/kernel/user_ta.h
new file mode 100644
index 0000000..196c0af
--- /dev/null
+++ b/core/arch/arm/include/kernel/user_ta.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_USER_TA_H
+#define KERNEL_USER_TA_H
+
+#include <assert.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <mm/tee_mm.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+TAILQ_HEAD(tee_cryp_state_head, tee_cryp_state);
+TAILQ_HEAD(tee_obj_head, tee_obj);
+TAILQ_HEAD(tee_storage_enum_head, tee_storage_enum);
+
+struct user_ta_ctx {
+ uaddr_t entry_func;
+ bool is_32bit; /* true if 32-bit ta, false if 64-bit ta */
+ /* list of sessions opened by this TA */
+ struct tee_ta_session_head open_sessions;
+ /* List of cryp states created by this TA */
+ struct tee_cryp_state_head cryp_states;
+ /* List of storage objects opened by this TA */
+ struct tee_obj_head objects;
+ /* List of storage enumerators opened by this TA */
+ struct tee_storage_enum_head storage_enums;
+ struct mobj *mobj_code; /* secure world memory */
+ struct mobj *mobj_stack; /* stack */
+ uint32_t load_addr; /* elf load addr (from TAs address space) */
+ uint32_t context; /* Context ID of the process */
+ struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */
+ void *ta_time_offs; /* Time reference used by the TA */
+ struct tee_pager_area_head *areas;
+#if defined(CFG_SE_API)
+ struct tee_se_service *se_service;
+#endif
+#if defined(CFG_WITH_VFP)
+ struct thread_user_vfp_state vfp;
+#endif
+ struct tee_ta_ctx ctx;
+
+};
+
+static inline bool is_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !!(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct user_ta_ctx *to_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_user_ta_ctx(ctx));
+ return container_of(ctx, struct user_ta_ctx, ctx);
+}
+
+#ifdef CFG_WITH_USER_TA
+TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+#else
+static inline TEE_Result tee_ta_init_user_ta_session(
+ const TEE_UUID *uuid __unused,
+ struct tee_ta_session *s __unused)
+{
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+#endif
+
+#endif /*KERNEL_USER_TA_H*/
diff --git a/core/arch/arm/include/kernel/vfp.h b/core/arch/arm/include/kernel/vfp.h
new file mode 100644
index 0000000..267dee2
--- /dev/null
+++ b/core/arch/arm/include/kernel/vfp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_VFP_H
+#define KERNEL_VFP_H
+
+#include <types_ext.h>
+#include <compiler.h>
+
+#ifdef ARM32
+/*
+ * Advanced SIMD/floating point state on ARMv7-A or ARMv8-A AArch32 has:
+ * - 32 64-bit data registers
+ * - FPSCR (32 bits)
+ * - FPEXC (32 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint64_t v;
+};
+
+struct vfp_state {
+ uint32_t fpexc;
+ uint32_t fpscr;
+ struct vfp_reg reg[VFP_NUM_REGS];
+};
+#endif
+
+#ifdef ARM64
+/*
+ * Advanced SIMD/floating point state on ARMv8-A AArch64 has:
+ * - 32 128-bit data registers
+ * - FPSR (32 bits)
+ * - FPCR (32 bits)
+ * - CPACR_EL1.FPEN (2 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint8_t v[16];
+} __aligned(16);
+
+struct vfp_state {
+ struct vfp_reg reg[VFP_NUM_REGS];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ uint32_t cpacr_el1;
+ bool force_save; /* Save to reg even if VFP was not enabled */
+};
+#endif
+
+#ifdef CFG_WITH_VFP
+/* vfp_is_enabled() - Returns true if VFP is enabled */
+bool vfp_is_enabled(void);
+
+/* vfp_enable() - Enables vfp */
+void vfp_enable(void);
+
+/* vfp_disable() - Disables vfp */
+void vfp_disable(void);
+#else
+static inline bool vfp_is_enabled(void)
+{
+ return false;
+}
+
+static inline void vfp_enable(void)
+{
+}
+
+static inline void vfp_disable(void)
+{
+}
+#endif
+
+/*
+ * vfp_lazy_save_state_init() - Saves VFP enable status and disables VFP
+ * @state: VFP state structure to initialize
+ */
+void vfp_lazy_save_state_init(struct vfp_state *state);
+
+/*
+ * vfp_lazy_save_state_final() - Saves rest of VFP state
+ * @state: VFP state to save to
+ *
+ * If VFP was enabled when vfp_lazy_save_state_init() was called: save rest
+ * of state and disable VFP. Otherwise, do nothing.
+ */
+void vfp_lazy_save_state_final(struct vfp_state *state);
+
+/*
+ * vfp_lazy_restore_state() - Lazy restore VFP state
+ * @state: VFP state to restore
+ *
+ * Restores VFP enable status and also restores rest of VFP state if
+ * vfp_lazy_save_state_final() was called on this state.
+ */
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state);
+
+#endif /*KERNEL_VFP_H*/
diff --git a/core/arch/arm/include/kernel/wait_queue.h b/core/arch/arm/include/kernel/wait_queue.h
new file mode 100644
index 0000000..eb8f881
--- /dev/null
+++ b/core/arch/arm/include/kernel/wait_queue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_WAIT_QUEUE_H
+#define KERNEL_WAIT_QUEUE_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+
+struct wait_queue_elem;
+SLIST_HEAD(wait_queue, wait_queue_elem);
+
+#define WAIT_QUEUE_INITIALIZER { .slh_first = NULL }
+
+struct condvar;
+struct wait_queue_elem {
+ short handle;
+ bool done;
+ struct condvar *cv;
+ SLIST_ENTRY(wait_queue_elem) link;
+};
+
+/*
+ * Initializes a wait queue
+ */
+void wq_init(struct wait_queue *wq);
+
+/*
+ * Initializes a wait queue element and adds it to the wait queue. This
+ * function is supposed to be called before the lock that protects the
+ * resource we need to wait for is released.
+ *
+ * One call to this function must be followed by one call to wq_wait_final()
+ * on the same wait queue element.
+ */
+void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ struct condvar *cv);
+
+static inline void wq_wait_init(struct wait_queue *wq,
+ struct wait_queue_elem *wqe)
+{
+ wq_wait_init_condvar(wq, wqe, NULL);
+}
+
+/* Waits for the wait queue element to the awakened. */
+void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ const void *sync_obj, const char *fname, int lineno);
+
+/* Wakes up the first wait queue element in the wait queue, if there is one */
+void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
+ const char *fname, int lineno);
+
+/* Returns true if the wait queue doesn't contain any elements */
+bool wq_is_empty(struct wait_queue *wq);
+
+void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
+ bool only_one, const void *sync_obj, const char *fname,
+ int lineno);
+bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv);
+
+#endif /*KERNEL_WAIT_QUEUE_H*/
+
diff --git a/core/arch/arm/include/mm/core_memprot.h b/core/arch/arm/include/mm/core_memprot.h
new file mode 100644
index 0000000..b7ccd21
--- /dev/null
+++ b/core/arch/arm/include/mm/core_memprot.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MEMPROT_H
+#define CORE_MEMPROT_H
+
+#include <mm/core_mmu.h>
+#include <types_ext.h>
+
+/*
+ * "pbuf_is" support.
+ *
+ * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped
+ * virtual address or a physical address is "Secure", "Unsecure", "external
+ * RAM" and some other fancy attributes.
+ *
+ * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is
+ * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured
+ * only if 'buf_is(UnSecure, buffer)==true'.
+ */
+
+/* memory atttributes */
+enum buf_is_attr {
+ CORE_MEM_SEC,
+ CORE_MEM_NON_SEC,
+ CORE_MEM_TEE_RAM,
+ CORE_MEM_TA_RAM,
+ CORE_MEM_NSEC_SHM,
+ CORE_MEM_EXTRAM,
+ CORE_MEM_INTRAM,
+ CORE_MEM_CACHED,
+};
+
+/* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
+#define tee_pbuf_is core_pbuf_is
+#define tee_vbuf_is core_vbuf_is
+
+/* Convenience macros */
+#define tee_pbuf_is_non_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_NON_SEC, (paddr_t)(buf), (len))
+
+#define tee_pbuf_is_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_SEC, (paddr_t)(buf), (len))
+
+#define tee_vbuf_is_non_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len))
+
+#define tee_vbuf_is_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len))
+
+/*
+ * This function return true if the buf complies with supplied flags.
+ * If this function returns false buf doesn't comply with supplied flags
+ * or something went wrong.
+ *
+ * Note that returning false doesn't guarantee that buf complies with
+ * the complement of the supplied flags.
+ */
+bool core_pbuf_is(uint32_t flags, paddr_t pbuf, size_t len);
+
+/*
+ * Translates the supplied virtual address to a physical address and uses
+ * tee_phys_buf_is() to check the compliance of the buffer.
+ */
+bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
+
+/*
+ * Translate physical address to virtual address using specified mapping
+ * Returns NULL on failure or a valid virtual address on success.
+ */
+void *phys_to_virt(paddr_t pa, enum teecore_memtypes m);
+
+/*
+ * Translate virtual address to physical address
+ * Returns 0 on failure or a valid physical address on success.
+ */
+paddr_t virt_to_phys(void *va);
+
+#endif /* CORE_MEMPROT_H */
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
new file mode 100644
index 0000000..03ad93d
--- /dev/null
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MMU_H
+#define CORE_MMU_H
+
+#include <compiler.h>
+#include <kernel/user_ta.h>
+#include <mm/tee_mmu_types.h>
+#include <types_ext.h>
+
+/* A small page is the smallest unit of memory that can be mapped */
+#define SMALL_PAGE_SHIFT 12
+#define SMALL_PAGE_MASK 0x00000fff
+#define SMALL_PAGE_SIZE 0x00001000
+
+/*
+ * PGDIR is the translation table above the translation table that holds
+ * the pages.
+ */
+#ifdef CFG_WITH_LPAE
+#define CORE_MMU_PGDIR_SHIFT 21
+#else
+#define CORE_MMU_PGDIR_SHIFT 20
+#endif
+#define CORE_MMU_PGDIR_SIZE (1 << CORE_MMU_PGDIR_SHIFT)
+#define CORE_MMU_PGDIR_MASK (CORE_MMU_PGDIR_SIZE - 1)
+
+/* Devices are mapped using this granularity */
+#define CORE_MMU_DEVICE_SHIFT CORE_MMU_PGDIR_SHIFT
+#define CORE_MMU_DEVICE_SIZE (1 << CORE_MMU_DEVICE_SHIFT)
+#define CORE_MMU_DEVICE_MASK (CORE_MMU_DEVICE_SIZE - 1)
+
+/* TA user space code, data, stack and heap are mapped using this granularity */
+#ifdef CFG_SMALL_PAGE_USER_TA
+#define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
+#else
+#define CORE_MMU_USER_CODE_SHIFT CORE_MMU_PGDIR_SHIFT
+#endif
+#define CORE_MMU_USER_CODE_SIZE (1 << CORE_MMU_USER_CODE_SHIFT)
+#define CORE_MMU_USER_CODE_MASK (CORE_MMU_USER_CODE_SIZE - 1)
+
+/* TA user space parameters are mapped using this granularity */
+#ifdef CFG_SMALL_PAGE_USER_TA
+#define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
+#else
+#define CORE_MMU_USER_PARAM_SHIFT CORE_MMU_PGDIR_SHIFT
+#endif
+#define CORE_MMU_USER_PARAM_SIZE (1 << CORE_MMU_USER_PARAM_SHIFT)
+#define CORE_MMU_USER_PARAM_MASK (CORE_MMU_USER_PARAM_SIZE - 1)
+
+/*
+ * Memory area type:
+ * MEM_AREA_NOTYPE: Undefined type. Used as end of table.
+ * MEM_AREA_TEE_RAM: teecore execution RAM (secure, reserved to TEE, unused)
+ * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
+ * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances.
+ * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
+ * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
+ * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
+ * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
+ * MEM_AREA_IO_SEC: Secure HW mapped registers
+ * MEM_AREA_RES_VASPACE: Reserved virtual memory space
+ * MEM_AREA_TA_VASPACE: TA va space, only used with phys_to_virt()
+ * MEM_AREA_MAXTYPE: lower invalid 'type' value
+ */
+enum teecore_memtypes {
+ MEM_AREA_NOTYPE = 0,
+ MEM_AREA_TEE_RAM,
+ MEM_AREA_TEE_COHERENT,
+ MEM_AREA_TA_RAM,
+ MEM_AREA_NSEC_SHM,
+ MEM_AREA_RAM_NSEC,
+ MEM_AREA_RAM_SEC,
+ MEM_AREA_IO_NSEC,
+ MEM_AREA_IO_SEC,
+ MEM_AREA_RES_VASPACE,
+ MEM_AREA_TA_VASPACE,
+ MEM_AREA_MAXTYPE
+};
+
+struct core_mmu_phys_mem {
+ const char *name;
+ enum teecore_memtypes type;
+ paddr_t addr;
+ size_t size;
+};
+
+#define register_phys_mem(type, addr, size) \
+ static const struct core_mmu_phys_mem __phys_mem_ ## addr \
+ __used __section("phys_mem_map_section") = \
+ { #addr, (type), (addr), (size) }
+
+
+/* Default NSec shared memory allocated from NSec world */
+extern unsigned long default_nsec_shm_paddr;
+extern unsigned long default_nsec_shm_size;
+
+void core_init_mmu_map(void);
+void core_init_mmu_regs(void);
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr);
+
+#ifdef CFG_WITH_LPAE
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @user_map: physical address of user map translation table
+ * @asid: ASID for the user map
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint64_t user_map;
+ uint32_t asid;
+};
+#else
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @ttbr0: content of ttbr0
+ * @ctxid: content of contextidr
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint32_t ttbr0;
+ uint32_t ctxid;
+};
+#endif
+
+#ifdef CFG_WITH_LPAE
+bool core_mmu_user_va_range_is_defined(void);
+#else
+static inline bool core_mmu_user_va_range_is_defined(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * core_mmu_get_user_va_range() - Return range of user va space
+ * @base: Lowest user virtual address
+ * @size: Size in bytes of user address space
+ */
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
+
+/*
+ * enum core_mmu_fault - different kinds of faults
+ * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
+ * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
+ * @CORE_MMU_FAULT_TRANSLATION: translation fault
+ * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
+ * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
+ * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
+ * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
+ * @CORE_MMU_FAULT_OTHER: Other/unknown fault
+ */
+enum core_mmu_fault {
+ CORE_MMU_FAULT_ALIGNMENT,
+ CORE_MMU_FAULT_DEBUG_EVENT,
+ CORE_MMU_FAULT_TRANSLATION,
+ CORE_MMU_FAULT_WRITE_PERMISSION,
+ CORE_MMU_FAULT_READ_PERMISSION,
+ CORE_MMU_FAULT_ASYNC_EXTERNAL,
+ CORE_MMU_FAULT_ACCESS_BIT,
+ CORE_MMU_FAULT_OTHER,
+};
+
+/*
+ * core_mmu_get_fault_type() - get fault type
+ * @fault_descr: Content of fault status or exception syndrome register
+ * @returns an enum describing the content of fault status register.
+ */
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
+
+/*
+ * core_mm_type_to_attr() - convert memory type to attribute
+ * @t: memory type
+ * @returns an attribute that can be passed to core_mm_set_entry() and friends
+ */
+uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
+
+/*
+ * core_mmu_create_user_map() - Create user space mapping
+ * @utc: Pointer to user TA context
+ * @map: MMU configuration to use when activating this VA space
+ */
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map);
+/*
+ * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
+ * @map: MMU configuration for current user VA space.
+ */
+void core_mmu_get_user_map(struct core_mmu_user_map *map);
+
+/*
+ * core_mmu_set_user_map() - Set new MMU configuration for user VA space
+ * @map: If NULL will disable user VA space, if not NULL the user
+ * VA space to activate.
+ */
+void core_mmu_set_user_map(struct core_mmu_user_map *map);
+
+/*
+ * struct core_mmu_table_info - Properties for a translation table
+ * @table: Pointer to translation table
+ * @va_base: VA base address of the transaltion table
+ * @level: Translation table level
+ * @shift: The shift of each entry in the table
+ * @num_entries: Number of entries in this table.
+ */
+struct core_mmu_table_info {
+ void *table;
+ vaddr_t va_base;
+ unsigned level;
+ unsigned shift;
+ unsigned num_entries;
+};
+
+/*
+ * core_mmu_find_table() - Locates a translation table
+ * @va: Virtual address for the table to cover
+ * @max_level: Don't traverse beyond this level
+ * @tbl_info: Pointer to where to store properties.
+ * @return true if a translation table was found, false on error
+ */
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info);
+
+/*
+ * core_mmu_divide_block() - divide larger block/section into smaller ones
+ * @tbl_info: table where target record located
+ * @idx: index of record
+ * @return true if function was able to divide block, false on error
+ */
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx);
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr);
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
+
+/*
+ * core_mmu_set_entry() - Set entry in translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to update
+ * @pa: Physical address to assign entry
+ * @attr: Attributes to assign entry
+ */
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr);
+
+void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
+ paddr_t *pa, uint32_t *attr);
+
+/*
+ * core_mmu_get_entry() - Get entry from translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to read
+ * @pa: Physical address is returned here if pa is not NULL
+ * @attr: Attributues are returned here if attr is not NULL
+ */
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr);
+
+/*
+ * core_mmu_va2idx() - Translate from virtual address to table index
+ * @tbl_info: Translation table properties
+ * @va: Virtual address to translate
+ * @returns index in transaltion table
+ */
+static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
+ vaddr_t va)
+{
+ return (va - tbl_info->va_base) >> tbl_info->shift;
+}
+
+/*
+ * core_mmu_idx2va() - Translate from table index to virtual address
+ * @tbl_info: Translation table properties
+ * @idx: Index to translate
+ * @returns Virtual address
+ */
+static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
+ unsigned idx)
+{
+ return (idx << tbl_info->shift) + tbl_info->va_base;
+}
+
+/*
+ * core_mmu_get_block_offset() - Get offset inside a block/page
+ * @tbl_info: Translation table properties
+ * @pa: Physical address
+ * @returns offset within one block of the translation table
+ */
+static inline size_t core_mmu_get_block_offset(
+ struct core_mmu_table_info *tbl_info, paddr_t pa)
+{
+ return pa & ((1 << tbl_info->shift) - 1);
+}
+
+/*
+ * core_mmu_user_mapping_is_active() - Report if user mapping is active
+ * @returns true if a user VA space is active, false if user VA space is
+ * inactive.
+ */
+bool core_mmu_user_mapping_is_active(void);
+
+/*
+ * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
+ * @returns true if the attributes can be used, false if not.
+ */
+bool core_mmu_mattr_is_ok(uint32_t mattr);
+
+void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
+ vaddr_t *e);
+
+enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
+
+/* Function is deprecated, use virt_to_phys() instead */
+int core_va2pa_helper(void *va, paddr_t *pa);
+
+/* routines to retreive shared mem configuration */
+bool core_mmu_is_shm_cached(void);
+
+bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
+
+/* L1/L2 cache maintenance (op: refer to ???) */
+unsigned int cache_maintenance_l1(int op, void *va, size_t len);
+#ifdef CFG_PL310
+unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len);
+#else
+static inline unsigned int cache_maintenance_l2(int op __unused,
+ paddr_t pa __unused,
+ size_t len __unused)
+{
+ /* Nothing to do about L2 Cache Maintenance when no PL310 */
+ return TEE_SUCCESS;
+}
+#endif
+
+/* various invalidate secure TLB */
+enum teecore_tlb_op {
+ TLBINV_UNIFIEDTLB, /* invalidate unified tlb */
+ TLBINV_CURRENT_ASID, /* invalidate unified tlb for current ASID */
+ TLBINV_BY_ASID, /* invalidate unified tlb by ASID */
+ TLBINV_BY_MVA, /* invalidate unified tlb by MVA */
+};
+
+int core_tlb_maintenance(int op, unsigned int a);
+
+/* Cache maintenance operation type */
+typedef enum {
+ DCACHE_CLEAN = 0x1,
+ DCACHE_AREA_CLEAN = 0x2,
+ DCACHE_INVALIDATE = 0x3,
+ DCACHE_AREA_INVALIDATE = 0x4,
+ ICACHE_INVALIDATE = 0x5,
+ ICACHE_AREA_INVALIDATE = 0x6,
+ WRITE_BUFFER_DRAIN = 0x7,
+ DCACHE_CLEAN_INV = 0x8,
+ DCACHE_AREA_CLEAN_INV = 0x9,
+ L2CACHE_INVALIDATE = 0xA,
+ L2CACHE_AREA_INVALIDATE = 0xB,
+ L2CACHE_CLEAN = 0xC,
+ L2CACHE_AREA_CLEAN = 0xD,
+ L2CACHE_CLEAN_INV = 0xE,
+ L2CACHE_AREA_CLEAN_INV = 0xF
+} t_cache_operation_id;
+
+/* Check cpu mmu enabled or not */
+bool cpu_mmu_enabled(void);
+
+#endif /* CORE_MMU_H */
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
new file mode 100644
index 0000000..d5eeb69
--- /dev/null
+++ b/core/arch/arm/include/mm/mobj.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MM_MOBJ_H
+#define __MM_MOBJ_H
+
+#include <compiler.h>
+#include <mm/core_memprot.h>
+#include <optee_msg.h>
+#include <sys/queue.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+
+
+struct mobj {
+ const struct mobj_ops *ops;
+ size_t size;
+};
+
+struct mobj_ops {
+ void *(*get_va)(struct mobj *mobj, size_t offs);
+ TEE_Result (*get_pa)(struct mobj *mobj, size_t offs, size_t granule,
+ paddr_t *pa);
+ TEE_Result (*get_cattr)(struct mobj *mobj, uint32_t *cattr);
+ bool (*matches)(struct mobj *mobj, enum buf_is_attr attr);
+ void (*free)(struct mobj *mobj);
+ void (*update_mapping)(struct mobj *mobj, struct user_ta_ctx *utc,
+ vaddr_t va);
+};
+
+extern struct mobj mobj_virt;
+extern struct mobj *mobj_sec_ddr;
+
+static inline void *mobj_get_va(struct mobj *mobj, size_t offset)
+{
+ if (mobj && mobj->ops && mobj->ops->get_va)
+ return mobj->ops->get_va(mobj, offset);
+ return NULL;
+}
+
+static inline TEE_Result mobj_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ if (mobj && mobj->ops && mobj->ops->get_pa)
+ return mobj->ops->get_pa(mobj, offs, granule, pa);
+ return TEE_ERROR_GENERIC;
+}
+
+static inline TEE_Result mobj_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ if (mobj && mobj->ops && mobj->ops->get_cattr)
+ return mobj->ops->get_cattr(mobj, cattr);
+ return TEE_ERROR_GENERIC;
+}
+
+static inline bool mobj_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ if (mobj && mobj->ops && mobj->ops->matches)
+ return mobj->ops->matches(mobj, attr);
+ return false;
+}
+
+static inline void mobj_free(struct mobj *mobj)
+{
+ if (mobj && mobj->ops && mobj->ops->free)
+ mobj->ops->free(mobj);
+}
+
+
+static inline void mobj_update_mapping(struct mobj *mobj,
+ struct user_ta_ctx *utc, vaddr_t va)
+{
+ if (mobj && mobj->ops && mobj->ops->update_mapping)
+ mobj->ops->update_mapping(mobj, utc, va);
+}
+
+static inline bool mobj_is_nonsec(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_NON_SEC);
+}
+
+static inline bool mobj_is_secure(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_SEC);
+}
+
+struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
+ tee_mm_pool_t *pool);
+
+struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
+ enum buf_is_attr battr);
+
+struct mobj *mobj_paged_alloc(size_t size);
+
+#ifdef CFG_PAGED_USER_TA
+bool mobj_is_paged(struct mobj *mobj);
+#else
+static inline bool mobj_is_paged(struct mobj *mobj __unused)
+{
+ return false;
+}
+#endif
+
+struct mobj *mobj_seccpy_shm_alloc(size_t size);
+
+#endif /*__MM_MOBJ_H*/
diff --git a/core/arch/arm/include/mm/pgt_cache.h b/core/arch/arm/include/mm/pgt_cache.h
new file mode 100644
index 0000000..8812758
--- /dev/null
+++ b/core/arch/arm/include/mm/pgt_cache.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MM_PGT_CACHE_H
+#define MM_PGT_CACHE_H
+
+#ifdef CFG_WITH_LPAE
+#define PGT_SIZE (4 * 1024)
+#define PGT_NUM_PGT_PER_PAGE 1
+#else
+#define PGT_SIZE (1 * 1024)
+#define PGT_NUM_PGT_PER_PAGE 4
+#endif
+
+#include <kernel/tee_ta_manager.h>
+#include <sys/queue.h>
+#include <types_ext.h>
+#include <util.h>
+
+struct pgt {
+ void *tbl;
+#if defined(CFG_PAGED_USER_TA)
+ vaddr_t vabase;
+ struct tee_ta_ctx *ctx;
+ size_t num_used_entries;
+#endif
+#if defined(CFG_WITH_PAGER)
+#if !defined(CFG_WITH_LPAE)
+ struct pgt_parent *parent;
+#endif
+#endif
+#ifdef CFG_SMALL_PAGE_USER_TA
+ SLIST_ENTRY(pgt) link;
+#endif
+};
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+/*
+ * Reserve 2 page tables per thread, but at least 4 page tables in total
+ */
+#if CFG_NUM_THREADS < 2
+#define PGT_CACHE_SIZE 4
+#else
+#define PGT_CACHE_SIZE ROUNDUP(CFG_NUM_THREADS * 2, PGT_NUM_PGT_PER_PAGE)
+#endif
+
+SLIST_HEAD(pgt_cache, pgt);
+
+static inline bool pgt_check_avail(size_t num_tbls)
+{
+ return num_tbls <= PGT_CACHE_SIZE;
+}
+
+void pgt_alloc(struct pgt_cache *pgt_cache, void *owning_ctx,
+ vaddr_t begin, vaddr_t last);
+void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx);
+
+#ifdef CFG_PAGED_USER_TA
+void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last);
+#else
+static inline void pgt_flush_ctx_range(struct pgt_cache *pgt_cache __unused,
+ void *ctx __unused,
+ vaddr_t begin __unused,
+ vaddr_t last __unused)
+{
+}
+#endif
+
+void pgt_transfer(struct pgt_cache *pgt_cache, void *old_ctx, vaddr_t old_va,
+ void *new_ctx, vaddr_t new_va, size_t size);
+
+void pgt_init(void);
+
+#else
+
+static inline void pgt_init(void)
+{
+}
+
+#endif
+
+#if defined(CFG_PAGED_USER_TA)
+void pgt_flush_ctx(struct tee_ta_ctx *ctx);
+
+static inline void pgt_inc_used_entries(struct pgt *pgt)
+{
+ pgt->num_used_entries++;
+}
+
+static inline void pgt_dec_used_entries(struct pgt *pgt)
+{
+ pgt->num_used_entries--;
+}
+
+static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
+{
+ pgt->num_used_entries = val;
+}
+
+#else
+static inline void pgt_flush_ctx(struct tee_ta_ctx *ctx __unused)
+{
+}
+
+static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
+{
+}
+
+static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
+{
+}
+
+static inline void pgt_set_used_entries(struct pgt *pgt __unused,
+ size_t val __unused)
+{
+}
+
+#endif
+
+#endif /*MM_PGT_CACHE_H*/
diff --git a/core/arch/arm/include/mm/tee_pager.h b/core/arch/arm/include/mm/tee_pager.h
new file mode 100644
index 0000000..d48577a
--- /dev/null
+++ b/core/arch/arm/include/mm/tee_pager.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MM_TEE_PAGER_H
+#define MM_TEE_PAGER_H
+
+#include <kernel/abort.h>
+#include <kernel/panic.h>
+#include <kernel/user_ta.h>
+#include <mm/tee_mm.h>
+#include <string.h>
+#include <trace.h>
+
+/*
+ * Reference to translation table used to map the virtual memory range
+ * covered by the pager.
+ */
+extern struct core_mmu_table_info tee_pager_tbl_info;
+
+struct tee_pager_area_head;
+
+/*
+ * tee_pager_init() - Initialized the pager
+ * @mm_alias: The alias area where all physical pages managed by the
+ * pager are aliased
+ *
+ * Panics if called twice or some other error occurs.
+ */
+void tee_pager_init(tee_mm_entry_t *mm_alias);
+
+/*
+ * tee_pager_add_core_area() - Adds a pageable core area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ * @flags: describes attributes of mapping
+ * @store: backing store for the memory area
+ * @hashes: hashes of the pages in the backing store
+ *
+ * TEE_MATTR_PW - read-write mapping else read-only mapping
+ * TEE_MATTR_PX - executable mapping
+ * TEE_MATTR_LOCKED - on demand locked mapping, requires TEE_MATTR_PW,
+ * will only be unmapped by a call to
+ * tee_pager_release_phys()
+ *
+ * !TEE_MATTR_PW requires store and hashes to be !NULL while
+ * TEE_MATTR_PW requires store and hashes to be NULL.
+ *
+ * Invalid use of flags or non-page aligned base or size or size == 0 will
+ * cause a panic.
+ *
+ * Return true on success or false if area can't be added.
+ */
+bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
+ const void *store, const void *hashes);
+
+/*
+ * tee_pager_add_uta_area() - Adds a pageable user ta area
+ * @utc: user ta context of the area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ *
+ * The mapping is created suitable to initialize the memory content while
+ * loading the TA. Once the TA is properly loaded the areas should be
+ * finalized with tee_pager_set_uta_area_attr() to get more strict settings.
+ *
+ * Return true on success of false if the area can't be added
+ */
+bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size);
+
+/*
+ * tee_pager_set_uta_area_attr() - Set attributes of a initialized memory area
+ * @utc: user ta context of the area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ * @flags: TEE_MATTR_U* flags describing permissions of the area
+ *
+ * Return true on success of false if the area can't be updated
+ */
+bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size, uint32_t flags);
+
+void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
+ vaddr_t src_base,
+ struct user_ta_ctx *dst_utc,
+ vaddr_t dst_base, struct pgt **dst_pgt,
+ size_t size);
+void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size);
+
+/*
+ * tee_pager_rem_uta_areas() - Remove all user ta areas
+ * @utc: user ta context
+ *
+ * This function is called when a user ta context is teared down.
+ */
+#ifdef CFG_PAGED_USER_TA
+void tee_pager_rem_uta_areas(struct user_ta_ctx *utc);
+#else
+static inline void tee_pager_rem_uta_areas(struct user_ta_ctx *utc __unused)
+{
+}
+#endif
+
+/*
+ * tee_pager_assign_uta_tables() - Assigns translation table to a user ta
+ * @utc: user ta context
+ *
+ * This function is called to assign translation tables for the pageable
+ * areas of a user TA.
+ */
+#ifdef CFG_PAGED_USER_TA
+void tee_pager_assign_uta_tables(struct user_ta_ctx *utc);
+#else
+static inline void tee_pager_assign_uta_tables(struct user_ta_ctx *utc __unused)
+{
+}
+#endif
+
+/*
+ * Adds physical pages to the pager to use. The supplied virtual address range
+ * is searched for mapped physical pages and unmapped pages are ignored.
+ *
+ * vaddr is the first virtual address
+ * npages is the number of pages to add
+ */
+void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap);
+
+/*
+ * tee_pager_alloc() - Allocate read-write virtual memory from pager.
+ * @size: size of memory in bytes
+ * @flags: flags for allocation
+ *
+ * Allocates read-write memory from pager, all flags but the optional
+ * TEE_MATTR_LOCKED is ignored.
+ *
+ * @return NULL on failure or a pointer to the virtual memory on success.
+ */
+void *tee_pager_alloc(size_t size, uint32_t flags);
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * tee_pager_pgt_save_and_release_entries() - Save dirty pages to backing store
+ * and remove physical page from translation table
+ * @pgt: page table descriptor
+ *
+ * This function is called when a translation table needs to be recycled
+ */
+void tee_pager_pgt_save_and_release_entries(struct pgt *pgt);
+#endif
+
+/*
+ * tee_pager_release_phys() - Release physical pages used for mapping
+ * @addr: virtual address of first page to release
+ * @size: number of bytes to release
+ *
+ * Only pages completely covered by the supplied range are affected. This
+ * function only supplies a hint to the pager that the physical page can be
+ * reused. The caller can't expect a released memory range to hold a
+ * specific bit pattern when used next time.
+ *
+ * Note that the virtual memory allocation is still valid after this
+ * function has returned, it's just the content that may or may not have
+ * changed.
+ */
+#ifdef CFG_WITH_PAGER
+void tee_pager_release_phys(void *addr, size_t size);
+#else
+static inline void tee_pager_release_phys(void *addr __unused,
+ size_t size __unused)
+{
+}
+#endif
+
+/*
+ * Statistics on the pager
+ */
+struct tee_pager_stats {
+ size_t hidden_hits;
+ size_t ro_hits;
+ size_t rw_hits;
+ size_t zi_released;
+ size_t npages; /* number of load pages */
+ size_t npages_all; /* number of pages */
+};
+
+#ifdef CFG_WITH_PAGER
+void tee_pager_get_stats(struct tee_pager_stats *stats);
+bool tee_pager_handle_fault(struct abort_info *ai);
+#else /*CFG_WITH_PAGER*/
+static inline bool tee_pager_handle_fault(struct abort_info *ai __unused)
+{
+ return false;
+}
+
+static inline void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ memset(stats, 0, sizeof(struct tee_pager_stats));
+}
+#endif /*CFG_WITH_PAGER*/
+
+#endif /*MM_TEE_PAGER_H*/
diff --git a/core/arch/arm/include/sm/optee_smc.h b/core/arch/arm/include/sm/optee_smc.h
new file mode 100644
index 0000000..b6fcd65
--- /dev/null
+++ b/core/arch/arm/include/sm/optee_smc.h
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+/*
+ * This file is exported by OP-TEE and is in kept in sync between secure
+ * world and normal world kernel driver. We're following ARM SMC Calling
+ * Convention as specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ *
+ * This file depends on optee_msg.h being included to expand the SMC id
+ * macros below.
+ */
+
+#define OPTEE_SMC_32 0
+#define OPTEE_SMC_64 0x40000000
+#define OPTEE_SMC_FAST_CALL 0x80000000
+#define OPTEE_SMC_STD_CALL 0
+
+#define OPTEE_SMC_OWNER_MASK 0x3F
+#define OPTEE_SMC_OWNER_SHIFT 24
+
+#define OPTEE_SMC_FUNC_MASK 0xFFFF
+
+#define OPTEE_SMC_IS_FAST_CALL(smc_val) ((smc_val) & OPTEE_SMC_FAST_CALL)
+#define OPTEE_SMC_IS_64(smc_val) ((smc_val) & OPTEE_SMC_64)
+#define OPTEE_SMC_FUNC_NUM(smc_val) ((smc_val) & OPTEE_SMC_FUNC_MASK)
+#define OPTEE_SMC_OWNER_NUM(smc_val) \
+ (((smc_val) >> OPTEE_SMC_OWNER_SHIFT) & OPTEE_SMC_OWNER_MASK)
+
+#define OPTEE_SMC_CALL_VAL(type, calling_convention, owner, func_num) \
+ ((type) | (calling_convention) | \
+ (((owner) & OPTEE_SMC_OWNER_MASK) << \
+ OPTEE_SMC_OWNER_SHIFT) |\
+ ((func_num) & OPTEE_SMC_FUNC_MASK))
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_STD_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
+
+#define OPTEE_SMC_OWNER_ARCH 0
+#define OPTEE_SMC_OWNER_CPU 1
+#define OPTEE_SMC_OWNER_SIP 2
+#define OPTEE_SMC_OWNER_OEM 3
+#define OPTEE_SMC_OWNER_STANDARD 4
+#define OPTEE_SMC_OWNER_TRUSTED_APP 48
+#define OPTEE_SMC_OWNER_TRUSTED_OS 50
+
+#define OPTEE_SMC_OWNER_TRUSTED_OS_OPTEED 62
+#define OPTEE_SMC_OWNER_TRUSTED_OS_API 63
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return the following UID if using API specified in this file
+ * without further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * see also OPTEE_MSG_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extensions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+/*
+ * Configures L2CC mutex
+ *
+ * Disables, enables usage of L2CC mutex. Returns or sets physical address
+ * of L2CC mutex.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_L2CC_MUTEX
+ * a1 OPTEE_SMC_L2CC_MUTEX_GET_ADDR Get physical address of mutex
+ * OPTEE_SMC_L2CC_MUTEX_SET_ADDR Set physical address of mutex
+ * OPTEE_SMC_L2CC_MUTEX_ENABLE Enable usage of mutex
+ * OPTEE_SMC_L2CC_MUTEX_DISABLE Disable usage of mutex
+ * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, upper 32bit of a 64bit
+ * physical address of mutex
+ * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, lower 32bit of a 64bit
+ * physical address of mutex
+ * a3-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Preserved
+ * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, upper 32bit of a 64bit
+ * physical address of mutex
+ * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, lower 32bit of a 64bit
+ * physical address of mutex
+ * a3-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL Physical address not available
+ * OPTEE_SMC_RETURN_EBADADDR Bad supplied physical address
+ * OPTEE_SMC_RETURN_EBADCMD Unsupported value in a1
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_L2CC_MUTEX_GET_ADDR 0
+#define OPTEE_SMC_L2CC_MUTEX_SET_ADDR 1
+#define OPTEE_SMC_L2CC_MUTEX_ENABLE 2
+#define OPTEE_SMC_L2CC_MUTEX_DISABLE 3
+#define OPTEE_SMC_FUNCID_L2CC_MUTEX 8
+#define OPTEE_SMC_L2CC_MUTEX \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_L2CC_MUTEX)
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR (1 << 0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM (1 << 0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM (1 << 1)
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Release of secondary cores
+ *
+ * OP-TEE in secure world is in charge of the release process of secondary
+ * cores. The Rich OS issue the this request to ask OP-TEE to boot up the
+ * secondary cores, go through the OP-TEE per-core initialization, and then
+ * switch to the Non-seCure world with the Rich OS provided entry address.
+ * The secondary cores enter Non-Secure world in SVC mode, with Thumb, FIQ,
+ * IRQ and Abort bits disabled.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_BOOT_SECONDARY
+ * a1 Index of secondary core to boot
+ * a2 Upper 32 bits of a 64-bit Non-Secure world entry physical address
+ * a3 Lower 32 bits of a 64-bit Non-Secure world entry physical address
+ * a4-7 Not used
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Error return:
+ * a0 OPTEE_SMC_RETURN_EBADCMD Core index out of range
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_BOOT_SECONDARY 12
+#define OPTEE_SMC_BOOT_SECONDARY \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_IRQ
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_IRQ 4
+#define OPTEE_SMC_RETURN_RPC_IRQ \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) \
+ (((ret) != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION) && \
+ ((((ret) & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == \
+ OPTEE_SMC_RETURN_RPC_PREFIX)))
+
+#endif /* OPTEE_SMC_H */
diff --git a/core/arch/arm/include/sm/psci.h b/core/arch/arm/include/sm/psci.h
new file mode 100644
index 0000000..57d8f07
--- /dev/null
+++ b/core/arch/arm/include/sm/psci.h
@@ -0,0 +1,60 @@
+#include <kernel/thread.h>
+#include <stdint.h>
+
+#define PSCI_FN_BASE (0x84000000U)
+#define PSCI_FN(n) (PSCI_FN_BASE + (n))
+
+#define PSCI_VERSION_0_2 (0x00000002)
+#define PSCI_VERSION_1_0 (0x00010000)
+#define PSCI_VERSION PSCI_FN(0)
+#define PSCI_CPU_SUSPEND PSCI_FN(1)
+#define PSCI_CPU_OFF PSCI_FN(2)
+#define PSCI_CPU_ON PSCI_FN(3)
+#define PSCI_AFFINITY_INFO PSCI_FN(4)
+#define PSCI_MIGRATE PSCI_FN(5)
+#define PSCI_MIGRATE_INFO_TYPE PSCI_FN(6)
+#define PSCI_MIGRATE_INFO_UP_CPU PSCI_FN(7)
+#define PSCI_SYSTEM_OFF PSCI_FN(8)
+#define PSCI_SYSTEM_RESET PSCI_FN(9)
+#define PSCI_PSCI_FEATURES PSCI_FN(10)
+#define PSCI_CPU_FREEZE PSCI_FN(11)
+#define PSCI_CPU_DEFAULT_SUSPEND PSCI_FN(12)
+#define PSCI_NODE_HW_STATE PSCI_FN(13)
+#define PSCI_SYSTEM_SUSPEND PSCI_FN(14)
+#define PSCI_PSCI_SET_SUSPEND_MODE PSCI_FN(15)
+#define PSCI_FN_STAT_RESIDENCY PSCI_FN(16)
+#define PSCI_FN_STAT_COUNT PSCI_FN(17)
+
+#define PSCI_NUM_CALLS 18
+
+#define PSCI_AFFINITY_LEVEL_ON 0
+#define PSCI_AFFINITY_LEVEL_OFF 1
+#define PSCI_AFFINITY_LEVEL_ON_PENDING 2
+
+#define PSCI_RET_SUCCESS (0)
+#define PSCI_RET_NOT_SUPPORTED (-1)
+#define PSCI_RET_INVALID_PARAMETERS (-2)
+#define PSCI_RET_DENIED (-3)
+#define PSCI_RET_ALREADY_ON (-4)
+#define PSCI_RET_ON_PENDING (-5)
+#define PSCI_RET_INTERNAL_FAILURE (-6)
+#define PSCI_RET_NOT_PRESENT (-7)
+#define PSCI_RET_DISABLED (-8)
+#define PSCI_RET_INVALID_ADDRESS (-9)
+
+uint32_t psci_version(void);
+int psci_cpu_suspend(uint32_t power_state, uintptr_t entry,
+ uint32_t context_id);
+int psci_cpu_off(void);
+int psci_cpu_on(uint32_t cpu_id, uint32_t entry, uint32_t context_id);
+int psci_affinity_info(uint32_t affinity, uint32_t lowest_affnity_level);
+int psci_migrate(uint32_t cpu_id);
+int psci_migrate_info_type(void);
+int psci_migrate_info_up_cpu(void);
+void psci_system_off(void);
+void psci_system_reset(void);
+int psci_features(uint32_t psci_fid);
+int psci_node_hw_state(uint32_t cpu_id, uint32_t power_level);
+int psci_stat_residency(uint32_t cpu_id, uint32_t power_state);
+int psci_stat_count(uint32_t cpu_id, uint32_t power_state);
+void tee_psci_handler(struct thread_smc_args *args);
diff --git a/core/arch/arm/include/sm/sm.h b/core/arch/arm/include/sm/sm.h
new file mode 100644
index 0000000..6368359
--- /dev/null
+++ b/core/arch/arm/include/sm/sm.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SM_SM_H
+#define SM_SM_H
+
+#include <types_ext.h>
+
+struct sm_mode_regs {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t irq_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+ uint32_t fiq_spsr;
+ uint32_t fiq_sp;
+ uint32_t fiq_lr;
+ /*
+ * Note that fiq_r{8-12} are not saved here. Instead thread_fiq_handler
+ * preserves r{8-12}.
+ */
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t abt_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t und_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+};
+
+struct sm_nsec_ctx {
+ struct sm_mode_regs mode_regs;
+
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+
+ /* return state */
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+struct sm_sec_ctx {
+ struct sm_mode_regs mode_regs;
+
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+
+ /* return state */
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+struct sm_ctx {
+ uint32_t pad;
+ struct sm_sec_ctx sec;
+ struct sm_nsec_ctx nsec;
+};
+
+/*
+ * The secure monitor reserves space at top of stack_tmp to hold struct
+ * sm_ctx.
+ */
+#define SM_STACK_TMP_RESERVE_SIZE sizeof(struct sm_ctx)
+
+
+
+/* Returns storage location of non-secure context for current CPU */
+struct sm_nsec_ctx *sm_get_nsec_ctx(void);
+
+/* Returns stack pointer to use in monitor mode for current CPU */
+void *sm_get_sp(void);
+
+/*
+ * Initializes secure monitor, must be called by each CPU
+ */
+void sm_init(vaddr_t stack_pointer);
+
+#endif /*SM_SM_H*/
diff --git a/core/arch/arm/include/sm/std_smc.h b/core/arch/arm/include/sm/std_smc.h
new file mode 100644
index 0000000..2b2e54d
--- /dev/null
+++ b/core/arch/arm/include/sm/std_smc.h
@@ -0,0 +1,22 @@
+#ifndef __STD_SMC_H__
+#define __STD_SMC_H__
+
+/* SMC function IDs for Standard Service queries */
+
+#define ARM_STD_SVC_CALL_COUNT 0x8400ff00
+#define ARM_STD_SVC_UID 0x8400ff01
+/* 0x8400ff02 is reserved */
+#define ARM_STD_SVC_VERSION 0x8400ff03
+
+/* ARM Standard Service Calls version numbers */
+#define STD_SVC_VERSION_MAJOR 0x0
+#define STD_SVC_VERSION_MINOR 0x1
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK 0xffe0u
+#define PSCI_FID_VALUE 0u
+#define is_psci_fid(_fid) \
+ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+void smc_std_handler(struct thread_smc_args *args);
+#endif
diff --git a/core/arch/arm/include/sm/tee_mon.h b/core/arch/arm/include/sm/tee_mon.h
new file mode 100644
index 0000000..725afb9
--- /dev/null
+++ b/core/arch/arm/include/sm/tee_mon.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_MON_H
+#define TEE_MON_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include "tee_api_types.h"
+#include "user_ta_header.h"
+
+extern TEE_Result init_teecore(void);
+
+#endif /* TEE_MON_H */
diff --git a/core/arch/arm/include/sm/teesmc_opteed.h b/core/arch/arm/include/sm/teesmc_opteed.h
new file mode 100644
index 0000000..c6e25e2
--- /dev/null
+++ b/core/arch/arm/include/sm/teesmc_opteed.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEESMC_OPTEED_H
+#define TEESMC_OPTEED_H
+
+/*
+ * This file specify SMC function IDs used when returning from TEE to the
+ * secure monitor.
+ *
+ * All SMC Function IDs indicates SMC32 Calling Convention but will carry
+ * full 64 bit values in the argument registers if invoked from Aarch64
+ * mode. This violates the SMC Calling Convention, but since this
+ * convention only coveres API towards Normwal World it's something that
+ * only concerns the OP-TEE Dispatcher in ARM Trusted Firmware and OP-TEE
+ * OS at Secure EL1.
+ */
+
+/*
+ * Issued when returning from initial entry.
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ENTRY_DONE
+ * r1/x1 Pointer to entry vector
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE 0
+#define TEESMC_OPTEED_RETURN_ENTRY_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE)
+
+
+
+/*
+ * Issued when returning from "cpu_on" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ON_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ON_DONE 1
+#define TEESMC_OPTEED_RETURN_ON_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ON_DONE)
+
+/*
+ * Issued when returning from "cpu_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_OFF_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE 2
+#define TEESMC_OPTEED_RETURN_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE)
+
+/*
+ * Issued when returning from "cpu_suspend" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE 3
+#define TEESMC_OPTEED_RETURN_SUSPEND_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE)
+
+/*
+ * Issued when returning from "cpu_resume" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_RESUME_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE 4
+#define TEESMC_OPTEED_RETURN_RESUME_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE)
+
+/*
+ * Issued when returning from "std_smc" or "fast_smc" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_CALL_DONE
+ * r1-4/x1-4 Return value 0-3 which will passed to normal world in
+ * r0-3/x0-3
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE 5
+#define TEESMC_OPTEED_RETURN_CALL_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE)
+
+/*
+ * Issued when returning from "fiq" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_FIQ_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE 6
+#define TEESMC_OPTEED_RETURN_FIQ_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE)
+
+/*
+ * Issued when returning from "system_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE 7
+#define TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE)
+
+/*
+ * Issued when returning from "system_reset" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE 8
+#define TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE)
+
+#endif /*TEESMC_OPTEED_H*/
diff --git a/core/arch/arm/include/sm/teesmc_opteed_macros.h b/core/arch/arm/include/sm/teesmc_opteed_macros.h
new file mode 100644
index 0000000..00e9eed
--- /dev/null
+++ b/core/arch/arm/include/sm/teesmc_opteed_macros.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEESMC_OPTEED_MACROS_H
+#define TEESMC_OPTEED_MACROS_H
+
+#define TEESMC_OPTEED_RV(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_OPTEED, (func_num))
+
+#endif /*TEESMC_OPTEED_MACROS_H*/
diff --git a/core/arch/arm/include/tee/arch_svc.h b/core/arch/arm/include/tee/arch_svc.h
new file mode 100644
index 0000000..1848865
--- /dev/null
+++ b/core/arch/arm/include/tee/arch_svc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_ARCH_SVC_H
+#define TEE_ARCH_SVC_H
+
+struct thread_svc_regs;
+
+void tee_svc_handler(struct thread_svc_regs *regs);
+
+/*
+ * Called from the assembly functions syscall_sys_return() and
+ * syscall_panic() to update the register values in the struct
+ * thread_svc_regs to return back to TEE Core from an erlier call to
+ * thread_enter_user_mode().
+ */
+uint32_t tee_svc_sys_return_helper(uint32_t ret, bool panic,
+ uint32_t panic_code, struct thread_svc_regs *regs);
+
+#endif /*TEE_ARCH_SVC_H*/
diff --git a/core/arch/arm/include/tee/entry_fast.h b/core/arch/arm/include/tee/entry_fast.h
new file mode 100644
index 0000000..a9951f2
--- /dev/null
+++ b/core/arch/arm/include/tee/entry_fast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_ENTRY_FAST_H
+#define TEE_ENTRY_FAST_H
+
+#include <kernel/thread.h>
+
+/* These functions are overridable by the specific target */
+void tee_entry_get_api_call_count(struct thread_smc_args *args);
+void tee_entry_get_api_uuid(struct thread_smc_args *args);
+void tee_entry_get_api_revision(struct thread_smc_args *args);
+void tee_entry_get_os_uuid(struct thread_smc_args *args);
+void tee_entry_get_os_revision(struct thread_smc_args *args);
+
+/*
+ * Returns the number of calls recognized by tee_entry(). Used by the
+ * specific target to calculate the total number of supported calls when
+ * overriding tee_entry_get_api_call_count().
+ */
+size_t tee_entry_generic_get_api_call_count(void);
+
+/* Fast call entry */
+void tee_entry_fast(struct thread_smc_args *args);
+
+#endif /* TEE_ENTRY_FAST_H */
diff --git a/core/arch/arm/include/tee/entry_std.h b/core/arch/arm/include/tee/entry_std.h
new file mode 100644
index 0000000..d545912
--- /dev/null
+++ b/core/arch/arm/include/tee/entry_std.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_ENTRY_STD_H
+#define TEE_ENTRY_STD_H
+
+#include <kernel/thread.h>
+
+/* Standard call entry */
+void tee_entry_std(struct thread_smc_args *args);
+
+#endif /* TEE_ENTRY_STD_H */
diff --git a/core/arch/arm/kernel/abort.c b/core/arch/arm/kernel/abort.c
new file mode 100644
index 0000000..3d29521
--- /dev/null
+++ b/core/arch/arm/kernel/abort.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/abort.h>
+#include <kernel/misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/panic.h>
+#include <kernel/user_ta.h>
+#include <kernel/unwind.h>
+#include <mm/core_mmu.h>
+#include <mm/tee_pager.h>
+#include <tee/tee_svc.h>
+#include <trace.h>
+#include <arm.h>
+
+enum fault_type {
+ FAULT_TYPE_USER_TA_PANIC,
+ FAULT_TYPE_USER_TA_VFP,
+ FAULT_TYPE_PAGEABLE,
+ FAULT_TYPE_IGNORE,
+};
+
+#ifdef CFG_CORE_UNWIND
+#ifdef ARM32
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.registers[0] = ai->regs->r0;
+ state.registers[1] = ai->regs->r1;
+ state.registers[2] = ai->regs->r2;
+ state.registers[3] = ai->regs->r3;
+ state.registers[4] = ai->regs->r4;
+ state.registers[5] = ai->regs->r5;
+ state.registers[6] = ai->regs->r6;
+ state.registers[7] = ai->regs->r7;
+ state.registers[8] = ai->regs->r8;
+ state.registers[9] = ai->regs->r9;
+ state.registers[10] = ai->regs->r10;
+ state.registers[11] = ai->regs->r11;
+ state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[15] = ai->pc;
+
+ do {
+ EMSG_RAW(" pc 0x%08x", state.registers[15]);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = ai->regs->elr;
+ state.fp = ai->regs->x29;
+
+ do {
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM64*/
+
+static void print_stack_unwind(struct abort_info *ai)
+{
+ EMSG_RAW("Call stack:");
+ __print_stack_unwind(ai);
+}
+#else /*CFG_CORE_UNWIND*/
+static void print_stack_unwind(struct abort_info *ai __unused)
+{
+}
+#endif /*CFG_CORE_UNWIND*/
+
+static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
+{
+ if (abort_type == ABORT_TYPE_DATA)
+ return "data";
+ if (abort_type == ABORT_TYPE_PREFETCH)
+ return "prefetch";
+ return "undef";
+}
+
+static __maybe_unused const char *fault_to_str(uint32_t abort_type,
+ uint32_t fault_descr)
+{
+ /* fault_descr is only valid for data or prefetch abort */
+ if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
+ return "";
+
+ switch (core_mmu_get_fault_type(fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ return " (alignment fault)";
+ case CORE_MMU_FAULT_TRANSLATION:
+ return " (translation fault)";
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return " (read permission fault)";
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ return " (write permission fault)";
+ default:
+ return "";
+ }
+}
+
+static __maybe_unused void print_detailed_abort(
+ struct abort_info *ai __maybe_unused,
+ const char *ctx __maybe_unused)
+{
+ EMSG_RAW("\n");
+ EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s\n",
+ ctx, abort_type_to_str(ai->abort_type), ai->va,
+ fault_to_str(ai->abort_type, ai->fault_descr));
+#ifdef ARM32
+ EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0(), read_ttbr1(),
+ read_contextidr());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), ai->regs->spsr);
+ EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n",
+ ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
+ EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n",
+ ai->regs->r1, ai->regs->r5, ai->regs->r9,
+ read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n",
+ ai->regs->r2, ai->regs->r6, ai->regs->r10,
+ read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n",
+ ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 " cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
+ read_contextidr_el1());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), (uint32_t)ai->regs->spsr);
+ EMSG_RAW("x0 %016" PRIx64 " x1 %016" PRIx64,
+ ai->regs->x0, ai->regs->x1);
+ EMSG_RAW("x2 %016" PRIx64 " x3 %016" PRIx64,
+ ai->regs->x2, ai->regs->x3);
+ EMSG_RAW("x4 %016" PRIx64 " x5 %016" PRIx64,
+ ai->regs->x4, ai->regs->x5);
+ EMSG_RAW("x6 %016" PRIx64 " x7 %016" PRIx64,
+ ai->regs->x6, ai->regs->x7);
+ EMSG_RAW("x8 %016" PRIx64 " x9 %016" PRIx64,
+ ai->regs->x8, ai->regs->x9);
+ EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
+ ai->regs->x10, ai->regs->x11);
+ EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
+ ai->regs->x12, ai->regs->x13);
+ EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
+ ai->regs->x14, ai->regs->x15);
+ EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
+ ai->regs->x16, ai->regs->x17);
+ EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
+ ai->regs->x18, ai->regs->x19);
+ EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
+ ai->regs->x20, ai->regs->x21);
+ EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
+ ai->regs->x22, ai->regs->x23);
+ EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
+ ai->regs->x24, ai->regs->x25);
+ EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
+ ai->regs->x26, ai->regs->x27);
+ EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
+ ai->regs->x28, ai->regs->x29);
+ EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
+ ai->regs->x30, ai->regs->elr);
+ EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
+#endif /*ARM64*/
+}
+
+static void print_user_abort(struct abort_info *ai __maybe_unused)
+{
+#ifdef CFG_TEE_CORE_TA_TRACE
+ print_detailed_abort(ai, "user TA");
+ tee_ta_dump_current();
+#endif
+}
+
+void abort_print(struct abort_info *ai __maybe_unused)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ print_detailed_abort(ai, "core");
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+}
+
+void abort_print_error(struct abort_info *ai)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ /* full verbose log at DEBUG level */
+ print_detailed_abort(ai, "core");
+#else
+#ifdef ARM32
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
+ "CPUID 0x%x CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
+ read_mpidr(), read_spsr());
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
+ "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
+ read_contextidr_el1(),
+ read_mpidr_el1(), (uint32_t)ai->regs->spsr);
+#endif /*ARM64*/
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+ print_stack_unwind(ai);
+}
+
+#ifdef ARM32
+static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
+ struct abort_info *ai)
+{
+ switch (abort_type) {
+ case ABORT_TYPE_DATA:
+ ai->fault_descr = read_dfsr();
+ ai->va = read_dfar();
+ break;
+ case ABORT_TYPE_PREFETCH:
+ ai->fault_descr = read_ifsr();
+ ai->va = read_ifar();
+ break;
+ default:
+ ai->fault_descr = 0;
+ ai->va = regs->elr;
+ break;
+ }
+ ai->abort_type = abort_type;
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_abort_info(uint32_t abort_type __unused,
+ struct thread_abort_regs *regs, struct abort_info *ai)
+{
+ ai->fault_descr = read_esr_el1();
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_IABT_EL0:
+ case ESR_EC_IABT_EL1:
+ ai->abort_type = ABORT_TYPE_PREFETCH;
+ ai->va = read_far_el1();
+ break;
+ case ESR_EC_DABT_EL0:
+ case ESR_EC_DABT_EL1:
+ case ESR_EC_SP_ALIGN:
+ ai->abort_type = ABORT_TYPE_DATA;
+ ai->va = read_far_el1();
+ break;
+ default:
+ ai->abort_type = ABORT_TYPE_UNDEF;
+ ai->va = regs->elr;
+ }
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->r1 = true;
+ ai->regs->r2 = 0xdeadbeef;
+ ai->regs->elr = (uint32_t)thread_unwind_user_mode;
+ ai->regs->spsr = read_cpsr();
+ ai->regs->spsr &= ~CPSR_MODE_MASK;
+ ai->regs->spsr |= CPSR_MODE_SVC;
+ ai->regs->spsr &= ~CPSR_FIA;
+ ai->regs->spsr |= read_spsr() & CPSR_FIA;
+ /* Select Thumb or ARM mode */
+ if (ai->regs->elr & 1)
+ ai->regs->spsr |= CPSR_T;
+ else
+ ai->regs->spsr &= ~CPSR_T;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ uint32_t daif;
+
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->x1 = true;
+ ai->regs->x2 = 0xdeadbeef;
+ ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
+ ai->regs->sp_el0 = thread_get_saved_thread_sp();
+
+ daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
+ /* XXX what about DAIF_D? */
+ ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
+}
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+static void handle_user_ta_vfp(void)
+{
+ struct tee_ta_session *s;
+
+ if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
+ panic();
+
+ thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef CFG_WITH_USER_TA
+#ifdef ARM32
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ uint32_t spsr = ai->regs->spsr;
+
+ if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_USER_TA*/
+bool abort_is_user_exception(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_USER_TA*/
+
+#ifdef ARM32
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*ARM64*/
+
+
+#if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
+#ifdef ARM32
+
+#define T32_INSTR(w1, w0) \
+ ((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
+
+#define T32_VTRANS32_MASK T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
+#define T32_VTRANS32_VAL T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
+
+#define T32_VTRANS64_MASK T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
+#define T32_VTRANS64_VAL T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
+
+#define T32_VLDST_MASK T32_INSTR((0xff << 8) | (1 << 4), 0)
+#define T32_VLDST_VAL T32_INSTR( 0xf9 << 8 , 0)
+
+#define T32_VXLDST_MASK T32_INSTR(0xfc << 8, 7 << 9)
+#define T32_VXLDST_VAL T32_INSTR(0xec << 8, 5 << 9)
+
+#define T32_VPROC_MASK T32_INSTR(0xef << 8, 0)
+#define T32_VPROC_VAL T32_VPROC_MASK
+
+#define A32_INSTR(x) ((uint32_t)(x))
+
+#define A32_VTRANS32_MASK A32_INSTR(SHIFT_U32(0xf, 24) | \
+ SHIFT_U32(7, 9) | BIT32(4))
+#define A32_VTRANS32_VAL A32_INSTR(SHIFT_U32(0xe, 24) | \
+ SHIFT_U32(5, 9) | BIT32(4))
+
+#define A32_VTRANS64_MASK A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
+#define A32_VTRANS64_VAL A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
+
+#define A32_VLDST_MASK A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
+#define A32_VLDST_VAL A32_INSTR(SHIFT_U32(0xf4, 24))
+#define A32_VXLDST_MASK A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
+#define A32_VXLDST_VAL A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
+
+#define A32_VPROC_MASK A32_INSTR(SHIFT_U32(0x7f, 25))
+#define A32_VPROC_VAL A32_INSTR(SHIFT_U32(0x79, 25))
+
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ TEE_Result res;
+ uint32_t instr;
+
+ if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
+ return false;
+
+ res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
+ if (res != TEE_SUCCESS)
+ return false;
+
+ if (ai->regs->spsr & CPSR_T) {
+ /* Thumb mode */
+ return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
+ ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
+ ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
+ ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
+ ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
+ } else {
+ /* ARM mode */
+ return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
+ ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
+ ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
+ ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
+ ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
+ }
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_FP_ASIMD:
+ case ESR_EC_AARCH32_FP:
+ case ESR_EC_AARCH64_FP:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+static bool is_vfp_fault(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+
+static enum fault_type get_fault_type(struct abort_info *ai)
+{
+ if (abort_is_user_exception(ai)) {
+ if (is_vfp_fault(ai))
+ return FAULT_TYPE_USER_TA_VFP;
+#ifndef CFG_WITH_PAGER
+ return FAULT_TYPE_USER_TA_PANIC;
+#endif
+ }
+
+ if (is_abort_in_abort_handler(ai)) {
+ abort_print_error(ai);
+ panic("[abort] abort in abort handler (trap CPU)");
+ }
+
+ if (ai->abort_type == ABORT_TYPE_UNDEF) {
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] undefined abort (trap CPU)");
+ }
+
+ switch (core_mmu_get_fault_type(ai->fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] alignement fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_ACCESS_BIT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] access bit fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_DEBUG_EVENT:
+ abort_print(ai);
+ DMSG("[abort] Ignoring debug event!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return FAULT_TYPE_PAGEABLE;
+
+ case CORE_MMU_FAULT_ASYNC_EXTERNAL:
+ abort_print(ai);
+ DMSG("[abort] Ignoring async external abort!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_OTHER:
+ default:
+ abort_print(ai);
+ DMSG("[abort] Unhandled fault!");
+ return FAULT_TYPE_IGNORE;
+ }
+}
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
+{
+ struct abort_info ai;
+ bool handled;
+
+ set_abort_info(abort_type, regs, &ai);
+
+ switch (get_fault_type(&ai)) {
+ case FAULT_TYPE_IGNORE:
+ break;
+ case FAULT_TYPE_USER_TA_PANIC:
+ DMSG("[abort] abort in User mode (TA will panic)");
+ print_user_abort(&ai);
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ break;
+#ifdef CFG_WITH_VFP
+ case FAULT_TYPE_USER_TA_VFP:
+ handle_user_ta_vfp();
+ break;
+#endif
+ case FAULT_TYPE_PAGEABLE:
+ default:
+ thread_kernel_save_vfp();
+ handled = tee_pager_handle_fault(&ai);
+ thread_kernel_restore_vfp();
+ if (!handled) {
+ if (!abort_is_user_exception(&ai)) {
+ abort_print_error(&ai);
+ panic("unhandled pageable abort");
+ }
+ print_user_abort(&ai);
+ DMSG("[abort] abort in User mode (TA will panic)");
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ }
+ break;
+ }
+}
diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c
new file mode 100644
index 0000000..99c0a63
--- /dev/null
+++ b/core/arch/arm/kernel/asm-defines.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/thread.h>
+#include <sm/sm.h>
+#include <types_ext.h>
+#include "thread_private.h"
+
+#define DEFINES void __defines(void); void __defines(void)
+
+#define DEFINE(def, val) \
+ asm volatile("\n==>" #def " %0 " #val : : "i" (val))
+
+DEFINES
+{
+#ifdef ARM32
+ DEFINE(SM_NSEC_CTX_R0, offsetof(struct sm_nsec_ctx, r0));
+ DEFINE(SM_NSEC_CTX_R8, offsetof(struct sm_nsec_ctx, r8));
+ DEFINE(SM_SEC_CTX_R0, offsetof(struct sm_sec_ctx, r0));
+ DEFINE(SM_SEC_CTX_MON_LR, offsetof(struct sm_sec_ctx, mon_lr));
+ DEFINE(SM_CTX_SIZE, sizeof(struct sm_ctx));
+ DEFINE(SM_CTX_NSEC, offsetof(struct sm_ctx, nsec));
+ DEFINE(SM_CTX_SEC, offsetof(struct sm_ctx, sec));
+
+ DEFINE(THREAD_VECTOR_TABLE_FIQ_ENTRY,
+ offsetof(struct thread_vector_table, fiq_entry));
+
+ DEFINE(THREAD_SVC_REG_R0, offsetof(struct thread_svc_regs, r0));
+ DEFINE(THREAD_SVC_REG_R5, offsetof(struct thread_svc_regs, r5));
+ DEFINE(THREAD_SVC_REG_R6, offsetof(struct thread_svc_regs, r6));
+#endif /*ARM32*/
+
+#ifdef ARM64
+ DEFINE(THREAD_SMC_ARGS_X0, offsetof(struct thread_smc_args, a0));
+ DEFINE(THREAD_SMC_ARGS_SIZE, sizeof(struct thread_smc_args));
+
+ DEFINE(THREAD_SVC_REG_X0, offsetof(struct thread_svc_regs, x0));
+ DEFINE(THREAD_SVC_REG_X5, offsetof(struct thread_svc_regs, x5));
+ DEFINE(THREAD_SVC_REG_X6, offsetof(struct thread_svc_regs, x6));
+ DEFINE(THREAD_SVC_REG_X30, offsetof(struct thread_svc_regs, x30));
+ DEFINE(THREAD_SVC_REG_ELR, offsetof(struct thread_svc_regs, elr));
+ DEFINE(THREAD_SVC_REG_SPSR, offsetof(struct thread_svc_regs, spsr));
+ DEFINE(THREAD_SVC_REG_SP_EL0, offsetof(struct thread_svc_regs, sp_el0));
+ DEFINE(THREAD_SVC_REG_SIZE, sizeof(struct thread_svc_regs));
+
+ /* struct thread_abort_regs */
+ DEFINE(THREAD_ABT_REG_X0, offsetof(struct thread_abort_regs, x0));
+ DEFINE(THREAD_ABT_REG_X2, offsetof(struct thread_abort_regs, x2));
+ DEFINE(THREAD_ABT_REG_X30, offsetof(struct thread_abort_regs, x30));
+ DEFINE(THREAD_ABT_REG_SPSR, offsetof(struct thread_abort_regs, spsr));
+ DEFINE(THREAD_ABT_REGS_SIZE, sizeof(struct thread_abort_regs));
+
+ /* struct thread_ctx */
+ DEFINE(THREAD_CTX_KERN_SP, offsetof(struct thread_ctx, kern_sp));
+ DEFINE(THREAD_CTX_SIZE, sizeof(struct thread_ctx));
+
+ /* struct thread_ctx_regs */
+ DEFINE(THREAD_CTX_REGS_SP, offsetof(struct thread_ctx_regs, sp));
+ DEFINE(THREAD_CTX_REGS_X0, offsetof(struct thread_ctx_regs, x[0]));
+ DEFINE(THREAD_CTX_REGS_X1, offsetof(struct thread_ctx_regs, x[1]));
+ DEFINE(THREAD_CTX_REGS_X4, offsetof(struct thread_ctx_regs, x[4]));
+ DEFINE(THREAD_CTX_REGS_X19, offsetof(struct thread_ctx_regs, x[19]));
+
+ /* struct thread_user_mode_rec */
+ DEFINE(THREAD_USER_MODE_REC_EXIT_STATUS0_PTR,
+ offsetof(struct thread_user_mode_rec, exit_status0_ptr));
+ DEFINE(THREAD_USER_MODE_REC_X19,
+ offsetof(struct thread_user_mode_rec, x[0]));
+ DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));
+
+ /* struct thread_core_local */
+ DEFINE(THREAD_CORE_LOCAL_TMP_STACK_VA_END,
+ offsetof(struct thread_core_local, tmp_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_CURR_THREAD,
+ offsetof(struct thread_core_local, curr_thread));
+ DEFINE(THREAD_CORE_LOCAL_FLAGS,
+ offsetof(struct thread_core_local, flags));
+ DEFINE(THREAD_CORE_LOCAL_ABT_STACK_VA_END,
+ offsetof(struct thread_core_local, abt_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_X0, offsetof(struct thread_core_local, x[0]));
+ DEFINE(THREAD_CORE_LOCAL_X2, offsetof(struct thread_core_local, x[2]));
+#endif /*ARM64*/
+}
diff --git a/core/arch/arm/kernel/cache_helpers_a64.S b/core/arch/arm/kernel/cache_helpers_a64.S
new file mode 100644
index 0000000..d3a0248
--- /dev/null
+++ b/core/arch/arm/kernel/cache_helpers_a64.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm64.h>
+#include <asm.S>
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC flush_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+flush_loop:
+ dc civac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo flush_loop
+ dsb sy
+ ret
+END_FUNC flush_dcache_range
+
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC inv_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+inv_loop:
+ dc ivac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo inv_loop
+ dsb sy
+ ret
+END_FUNC inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ b do_dcsw_op
+ .endm
+
+LOCAL_FUNC do_dcsw_op , :
+ cbz x3, exit
+ mov x10, xzr
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache op
+ isb
+exit:
+ ret
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+END_FUNC do_dcsw_op
+
+
+FUNC dcsw_op_louis , :
+ dcsw_op #CLIDR_LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_louis
+
+
+FUNC dcsw_op_all , :
+ dcsw_op #CLIDR_LOC_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_all
diff --git a/core/arch/arm/kernel/elf32.h b/core/arch/arm/kernel/elf32.h
new file mode 100644
index 0000000..d374208
--- /dev/null
+++ b/core/arch/arm/kernel/elf32.h
@@ -0,0 +1,245 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF32_H_
+#define _SYS_ELF32_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 32-bit architectures.
+ */
+
+typedef uint32_t Elf32_Addr;
+typedef uint16_t Elf32_Half;
+typedef uint32_t Elf32_Off;
+typedef int32_t Elf32_Sword;
+typedef uint32_t Elf32_Word;
+typedef uint64_t Elf32_Lword;
+
+typedef Elf32_Word Elf32_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf32_Word Elf32_Size;
+typedef Elf32_Sword Elf32_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf32_Half e_type; /* File type. */
+ Elf32_Half e_machine; /* Machine architecture. */
+ Elf32_Word e_version; /* ELF format version. */
+ Elf32_Addr e_entry; /* Entry point. */
+ Elf32_Off e_phoff; /* Program header file offset. */
+ Elf32_Off e_shoff; /* Section header file offset. */
+ Elf32_Word e_flags; /* Architecture-specific flags. */
+ Elf32_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf32_Half e_phentsize; /* Size of program header entry. */
+ Elf32_Half e_phnum; /* Number of program header entries. */
+ Elf32_Half e_shentsize; /* Size of section header entry. */
+ Elf32_Half e_shnum; /* Number of section header entries. */
+ Elf32_Half e_shstrndx; /* Section name strings section. */
+} Elf32_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf32_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf32_Word sh_type; /* Section type. */
+ Elf32_Word sh_flags; /* Section flags. */
+ Elf32_Addr sh_addr; /* Address in memory image. */
+ Elf32_Off sh_offset; /* Offset in file. */
+ Elf32_Word sh_size; /* Size in bytes. */
+ Elf32_Word sh_link; /* Index of a related section. */
+ Elf32_Word sh_info; /* Depends on section type. */
+ Elf32_Word sh_addralign; /* Alignment in bytes. */
+ Elf32_Word sh_entsize; /* Size of each entry in section. */
+} Elf32_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf32_Word p_type; /* Entry type. */
+ Elf32_Off p_offset; /* File offset of contents. */
+ Elf32_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf32_Addr p_paddr; /* Physical address (not used). */
+ Elf32_Word p_filesz; /* Size of contents in file. */
+ Elf32_Word p_memsz; /* Size of contents in memory. */
+ Elf32_Word p_flags; /* Access permission flags. */
+ Elf32_Word p_align; /* Alignment in memory and file. */
+} Elf32_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf32_Sword d_tag; /* Entry type. */
+ union {
+ Elf32_Word d_val; /* Integer value. */
+ Elf32_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf32_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+} Elf32_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+ Elf32_Sword r_addend; /* Addend. */
+} Elf32_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF32_R_SYM(info) ((info) >> 8)
+#define ELF32_R_TYPE(info) ((unsigned char)(info))
+
+/* Macro for constructing r_info from field values. */
+#define ELF32_R_INFO(sym, type) (((sym) << 8) + (unsigned char)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf32_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf32_Lword m_value; /* symbol value */
+ Elf32_Word m_info; /* size + index */
+ Elf32_Word m_poffset; /* symbol offset */
+ Elf32_Half m_repeat; /* repeat count */
+ Elf32_Half m_stride; /* stride info */
+} Elf32_Move;
+
+/*
+ * The macros compose and decompose values for Move.r_info
+ *
+ * sym = ELF32_M_SYM(M.m_info)
+ * size = ELF32_M_SIZE(M.m_info)
+ * M.m_info = ELF32_M_INFO(sym, size)
+ */
+#define ELF32_M_SYM(info) ((info)>>8)
+#define ELF32_M_SIZE(info) ((unsigned char)(info))
+#define ELF32_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf32_Word c_tag; /* how to interpret value */
+ union {
+ Elf32_Word c_val;
+ Elf32_Addr c_ptr;
+ } c_un;
+} Elf32_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf32_Word st_name; /* String table index of name. */
+ Elf32_Addr st_value; /* Symbol value. */
+ Elf32_Word st_size; /* Size of associated object. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf32_Half st_shndx; /* Section index of symbol. */
+} Elf32_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF32_ST_BIND(info) ((info) >> 4)
+#define ELF32_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF32_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU symbol versioning. */
+typedef struct
+{
+ Elf32_Half vd_version;
+ Elf32_Half vd_flags;
+ Elf32_Half vd_ndx;
+ Elf32_Half vd_cnt;
+ Elf32_Word vd_hash;
+ Elf32_Word vd_aux;
+ Elf32_Word vd_next;
+} Elf32_Verdef;
+
+typedef struct
+{
+ Elf32_Word vda_name;
+ Elf32_Word vda_next;
+} Elf32_Verdaux;
+
+typedef struct
+{
+ Elf32_Half vn_version;
+ Elf32_Half vn_cnt;
+ Elf32_Word vn_file;
+ Elf32_Word vn_aux;
+ Elf32_Word vn_next;
+} Elf32_Verneed;
+
+typedef struct
+{
+ Elf32_Word vna_hash;
+ Elf32_Half vna_flags;
+ Elf32_Half vna_other;
+ Elf32_Word vna_name;
+ Elf32_Word vna_next;
+} Elf32_Vernaux;
+
+typedef Elf32_Half Elf32_Versym;
+
+typedef struct {
+ Elf32_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf32_Half si_flags; /* per symbol flags */
+} Elf32_Syminfo;
+
+#endif /* !_SYS_ELF32_H_ */
diff --git a/core/arch/arm/kernel/elf64.h b/core/arch/arm/kernel/elf64.h
new file mode 100644
index 0000000..c468dcd
--- /dev/null
+++ b/core/arch/arm/kernel/elf64.h
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF64_H_
+#define _SYS_ELF64_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 64-bit architectures.
+ */
+
+typedef uint64_t Elf64_Addr;
+typedef uint16_t Elf64_Half;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Sword;
+typedef int64_t Elf64_Sxword;
+typedef uint32_t Elf64_Word;
+typedef uint64_t Elf64_Lword;
+typedef uint64_t Elf64_Xword;
+
+/*
+ * Types of dynamic symbol hash table bucket and chain elements.
+ *
+ * This is inconsistent among 64 bit architectures, so a machine dependent
+ * typedef is required.
+ */
+
+typedef Elf64_Word Elf64_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf64_Xword Elf64_Size;
+typedef Elf64_Sxword Elf64_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf64_Half e_type; /* File type. */
+ Elf64_Half e_machine; /* Machine architecture. */
+ Elf64_Word e_version; /* ELF format version. */
+ Elf64_Addr e_entry; /* Entry point. */
+ Elf64_Off e_phoff; /* Program header file offset. */
+ Elf64_Off e_shoff; /* Section header file offset. */
+ Elf64_Word e_flags; /* Architecture-specific flags. */
+ Elf64_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf64_Half e_phentsize; /* Size of program header entry. */
+ Elf64_Half e_phnum; /* Number of program header entries. */
+ Elf64_Half e_shentsize; /* Size of section header entry. */
+ Elf64_Half e_shnum; /* Number of section header entries. */
+ Elf64_Half e_shstrndx; /* Section name strings section. */
+} Elf64_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf64_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf64_Word sh_type; /* Section type. */
+ Elf64_Xword sh_flags; /* Section flags. */
+ Elf64_Addr sh_addr; /* Address in memory image. */
+ Elf64_Off sh_offset; /* Offset in file. */
+ Elf64_Xword sh_size; /* Size in bytes. */
+ Elf64_Word sh_link; /* Index of a related section. */
+ Elf64_Word sh_info; /* Depends on section type. */
+ Elf64_Xword sh_addralign; /* Alignment in bytes. */
+ Elf64_Xword sh_entsize; /* Size of each entry in section. */
+} Elf64_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf64_Word p_type; /* Entry type. */
+ Elf64_Word p_flags; /* Access permission flags. */
+ Elf64_Off p_offset; /* File offset of contents. */
+ Elf64_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf64_Addr p_paddr; /* Physical address (not used). */
+ Elf64_Xword p_filesz; /* Size of contents in file. */
+ Elf64_Xword p_memsz; /* Size of contents in memory. */
+ Elf64_Xword p_align; /* Alignment in memory and file. */
+} Elf64_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf64_Sxword d_tag; /* Entry type. */
+ union {
+ Elf64_Xword d_val; /* Integer value. */
+ Elf64_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf64_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+} Elf64_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+ Elf64_Sxword r_addend; /* Addend. */
+} Elf64_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF64_R_SYM(info) ((info) >> 32)
+#define ELF64_R_TYPE(info) ((info) & 0xffffffffL)
+
+/* Macro for constructing r_info from field values. */
+#define ELF64_R_INFO(sym, type) (((sym) << 32) + ((type) & 0xffffffffL))
+
+#define ELF64_R_TYPE_DATA(info) (((Elf64_Xword)(info)<<32)>>40)
+#define ELF64_R_TYPE_ID(info) (((Elf64_Xword)(info)<<56)>>56)
+#define ELF64_R_TYPE_INFO(data, type) \
+ (((Elf64_Xword)(data)<<8)+(Elf64_Xword)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf64_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf64_Lword m_value; /* symbol value */
+ Elf64_Xword m_info; /* size + index */
+ Elf64_Xword m_poffset; /* symbol offset */
+ Elf64_Half m_repeat; /* repeat count */
+ Elf64_Half m_stride; /* stride info */
+} Elf64_Move;
+
+#define ELF64_M_SYM(info) ((info)>>8)
+#define ELF64_M_SIZE(info) ((unsigned char)(info))
+#define ELF64_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf64_Xword c_tag; /* how to interpret value */
+ union {
+ Elf64_Xword c_val;
+ Elf64_Addr c_ptr;
+ } c_un;
+} Elf64_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf64_Word st_name; /* String table index of name. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf64_Half st_shndx; /* Section index of symbol. */
+ Elf64_Addr st_value; /* Symbol value. */
+ Elf64_Xword st_size; /* Size of associated object. */
+} Elf64_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF64_ST_BIND(info) ((info) >> 4)
+#define ELF64_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF64_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF64_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU-style symbol versioning. */
+typedef struct {
+ Elf64_Half vd_version;
+ Elf64_Half vd_flags;
+ Elf64_Half vd_ndx;
+ Elf64_Half vd_cnt;
+ Elf64_Word vd_hash;
+ Elf64_Word vd_aux;
+ Elf64_Word vd_next;
+} Elf64_Verdef;
+
+typedef struct {
+ Elf64_Word vda_name;
+ Elf64_Word vda_next;
+} Elf64_Verdaux;
+
+typedef struct {
+ Elf64_Half vn_version;
+ Elf64_Half vn_cnt;
+ Elf64_Word vn_file;
+ Elf64_Word vn_aux;
+ Elf64_Word vn_next;
+} Elf64_Verneed;
+
+typedef struct {
+ Elf64_Word vna_hash;
+ Elf64_Half vna_flags;
+ Elf64_Half vna_other;
+ Elf64_Word vna_name;
+ Elf64_Word vna_next;
+} Elf64_Vernaux;
+
+typedef Elf64_Half Elf64_Versym;
+
+typedef struct {
+ Elf64_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf64_Half si_flags; /* per symbol flags */
+} Elf64_Syminfo;
+
+#endif /* !_SYS_ELF64_H_ */
diff --git a/core/arch/arm/kernel/elf_common.h b/core/arch/arm/kernel/elf_common.h
new file mode 100644
index 0000000..dd8cd50
--- /dev/null
+++ b/core/arch/arm/kernel/elf_common.h
@@ -0,0 +1,1006 @@
+/*-
+ * Copyright (c) 2000, 2001, 2008, 2011, David E. O'Brien
+ * Copyright (c) 1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF_COMMON_H_
+#define _SYS_ELF_COMMON_H_ 1
+
+/*
+ * ELF definitions that are independent of architecture or word size.
+ */
+
+/*
+ * Note header. The ".note" section contains an array of notes. Each
+ * begins with this header, aligned to a word boundary. Immediately
+ * following the note header is n_namesz bytes of name, padded to the
+ * next word boundary. Then comes n_descsz bytes of descriptor, again
+ * padded to a word boundary. The values of n_namesz and n_descsz do
+ * not include the padding.
+ */
+
+typedef struct {
+ uint32_t n_namesz; /* Length of name. */
+ uint32_t n_descsz; /* Length of descriptor. */
+ uint32_t n_type; /* Type of this note. */
+} Elf_Note;
+
+/*
+ * The header for GNU-style hash sections.
+ */
+
+typedef struct {
+ uint32_t gh_nbuckets; /* Number of hash buckets. */
+ uint32_t gh_symndx; /* First visible symbol in .dynsym. */
+ uint32_t gh_maskwords; /* #maskwords used in bloom filter. */
+ uint32_t gh_shift2; /* Bloom filter shift count. */
+} Elf_GNU_Hash_Header;
+
+/* Indexes into the e_ident array. Keep synced with
+ http://www.sco.com/developers/gabi/latest/ch4.eheader.html */
+#define EI_MAG0 0 /* Magic number, byte 0. */
+#define EI_MAG1 1 /* Magic number, byte 1. */
+#define EI_MAG2 2 /* Magic number, byte 2. */
+#define EI_MAG3 3 /* Magic number, byte 3. */
+#define EI_CLASS 4 /* Class of machine. */
+#define EI_DATA 5 /* Data format. */
+#define EI_VERSION 6 /* ELF format version. */
+#define EI_OSABI 7 /* Operating system / ABI identification */
+#define EI_ABIVERSION 8 /* ABI version */
+#define OLD_EI_BRAND 8 /* Start of architecture identification. */
+#define EI_PAD 9 /* Start of padding (per SVR4 ABI). */
+#define EI_NIDENT 16 /* Size of e_ident array. */
+
+/* Values for the magic number bytes. */
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF" /* magic string */
+#define SELFMAG 4 /* magic string size */
+
+/* Values for e_ident[EI_VERSION] and e_version. */
+#define EV_NONE 0
+#define EV_CURRENT 1
+
+/* Values for e_ident[EI_CLASS]. */
+#define ELFCLASSNONE 0 /* Unknown class. */
+#define ELFCLASS32 1 /* 32-bit architecture. */
+#define ELFCLASS64 2 /* 64-bit architecture. */
+
+/* Values for e_ident[EI_DATA]. */
+#define ELFDATANONE 0 /* Unknown data format. */
+#define ELFDATA2LSB 1 /* 2's complement little-endian. */
+#define ELFDATA2MSB 2 /* 2's complement big-endian. */
+
+/* Values for e_ident[EI_OSABI]. */
+#define ELFOSABI_NONE 0 /* UNIX System V ABI */
+#define ELFOSABI_HPUX 1 /* HP-UX operating system */
+#define ELFOSABI_NETBSD 2 /* NetBSD */
+#define ELFOSABI_LINUX 3 /* GNU/Linux */
+#define ELFOSABI_HURD 4 /* GNU/Hurd */
+#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
+#define ELFOSABI_SOLARIS 6 /* Solaris */
+#define ELFOSABI_AIX 7 /* AIX */
+#define ELFOSABI_IRIX 8 /* IRIX */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD */
+#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD */
+#define ELFOSABI_OPENVMS 13 /* Open VMS */
+#define ELFOSABI_NSK 14 /* HP Non-Stop Kernel */
+#define ELFOSABI_AROS 15 /* Amiga Research OS */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+#define ELFOSABI_SYSV ELFOSABI_NONE /* symbol used in old spec */
+#define ELFOSABI_MONTEREY ELFOSABI_AIX /* Monterey */
+
+/* e_ident */
+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
+ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
+ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
+ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
+
+/* Values for e_type. */
+#define ET_NONE 0 /* Unknown type. */
+#define ET_REL 1 /* Relocatable. */
+#define ET_EXEC 2 /* Executable. */
+#define ET_DYN 3 /* Shared object. */
+#define ET_CORE 4 /* Core file. */
+#define ET_LOOS 0xfe00 /* First operating system specific. */
+#define ET_HIOS 0xfeff /* Last operating system-specific. */
+#define ET_LOPROC 0xff00 /* First processor-specific. */
+#define ET_HIPROC 0xffff /* Last processor-specific. */
+
+/* Values for e_machine. */
+#define EM_NONE 0 /* Unknown machine. */
+#define EM_M32 1 /* AT&T WE32100. */
+#define EM_SPARC 2 /* Sun SPARC. */
+#define EM_386 3 /* Intel i386. */
+#define EM_68K 4 /* Motorola 68000. */
+#define EM_88K 5 /* Motorola 88000. */
+#define EM_860 7 /* Intel i860. */
+#define EM_MIPS 8 /* MIPS R3000 Big-Endian only. */
+#define EM_S370 9 /* IBM System/370. */
+#define EM_MIPS_RS3_LE 10 /* MIPS R3000 Little-Endian. */
+#define EM_PARISC 15 /* HP PA-RISC. */
+#define EM_VPP500 17 /* Fujitsu VPP500. */
+#define EM_SPARC32PLUS 18 /* SPARC v8plus. */
+#define EM_960 19 /* Intel 80960. */
+#define EM_PPC 20 /* PowerPC 32-bit. */
+#define EM_PPC64 21 /* PowerPC 64-bit. */
+#define EM_S390 22 /* IBM System/390. */
+#define EM_V800 36 /* NEC V800. */
+#define EM_FR20 37 /* Fujitsu FR20. */
+#define EM_RH32 38 /* TRW RH-32. */
+#define EM_RCE 39 /* Motorola RCE. */
+#define EM_ARM 40 /* ARM. */
+#define EM_SH 42 /* Hitachi SH. */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit. */
+#define EM_TRICORE 44 /* Siemens TriCore embedded processor. */
+#define EM_ARC 45 /* Argonaut RISC Core. */
+#define EM_H8_300 46 /* Hitachi H8/300. */
+#define EM_H8_300H 47 /* Hitachi H8/300H. */
+#define EM_H8S 48 /* Hitachi H8S. */
+#define EM_H8_500 49 /* Hitachi H8/500. */
+#define EM_IA_64 50 /* Intel IA-64 Processor. */
+#define EM_MIPS_X 51 /* Stanford MIPS-X. */
+#define EM_COLDFIRE 52 /* Motorola ColdFire. */
+#define EM_68HC12 53 /* Motorola M68HC12. */
+#define EM_MMA 54 /* Fujitsu MMA. */
+#define EM_PCP 55 /* Siemens PCP. */
+#define EM_NCPU 56 /* Sony nCPU. */
+#define EM_NDR1 57 /* Denso NDR1 microprocessor. */
+#define EM_STARCORE 58 /* Motorola Star*Core processor. */
+#define EM_ME16 59 /* Toyota ME16 processor. */
+#define EM_ST100 60 /* STMicroelectronics ST100 processor. */
+#define EM_TINYJ 61 /* Advanced Logic Corp. TinyJ processor. */
+#define EM_X86_64 62 /* Advanced Micro Devices x86-64 */
+#define EM_AMD64 EM_X86_64 /* Advanced Micro Devices x86-64 (compat) */
+#define EM_PDSP 63 /* Sony DSP Processor. */
+#define EM_FX66 66 /* Siemens FX66 microcontroller. */
+#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16
+ microcontroller. */
+#define EM_ST7 68 /* STmicroelectronics ST7 8-bit
+ microcontroller. */
+#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller. */
+#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller. */
+#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller. */
+#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller. */
+#define EM_SVX 73 /* Silicon Graphics SVx. */
+#define EM_ST19 74 /* STMicroelectronics ST19 8-bit mc. */
+#define EM_VAX 75 /* Digital VAX. */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded
+ processor. */
+#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded
+ processor. */
+#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor. */
+#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor. */
+#define EM_MMIX 80 /* Donald Knuth's educational 64-bit proc. */
+#define EM_HUANY 81 /* Harvard University machine-independent
+ object files. */
+#define EM_PRISM 82 /* SiTera Prism. */
+#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller. */
+#define EM_FR30 84 /* Fujitsu FR30. */
+#define EM_D10V 85 /* Mitsubishi D10V. */
+#define EM_D30V 86 /* Mitsubishi D30V. */
+#define EM_V850 87 /* NEC v850. */
+#define EM_M32R 88 /* Mitsubishi M32R. */
+#define EM_MN10300 89 /* Matsushita MN10300. */
+#define EM_MN10200 90 /* Matsushita MN10200. */
+#define EM_PJ 91 /* picoJava. */
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor. */
+#define EM_ARC_A5 93 /* ARC Cores Tangent-A5. */
+#define EM_XTENSA 94 /* Tensilica Xtensa Architecture. */
+#define EM_VIDEOCORE 95 /* Alphamosaic VideoCore processor. */
+#define EM_TMM_GPP 96 /* Thompson Multimedia General Purpose
+ Processor. */
+#define EM_NS32K 97 /* National Semiconductor 32000 series. */
+#define EM_TPC 98 /* Tenor Network TPC processor. */
+#define EM_SNP1K 99 /* Trebia SNP 1000 processor. */
+#define EM_ST200 100 /* STMicroelectronics ST200 microcontroller. */
+#define EM_IP2K 101 /* Ubicom IP2xxx microcontroller family. */
+#define EM_MAX 102 /* MAX Processor. */
+#define EM_CR 103 /* National Semiconductor CompactRISC
+ microprocessor. */
+#define EM_F2MC16 104 /* Fujitsu F2MC16. */
+#define EM_MSP430 105 /* Texas Instruments embedded microcontroller
+ msp430. */
+#define EM_BLACKFIN 106 /* Analog Devices Blackfin (DSP) processor. */
+#define EM_SE_C33 107 /* S1C33 Family of Seiko Epson processors. */
+#define EM_SEP 108 /* Sharp embedded microprocessor. */
+#define EM_ARCA 109 /* Arca RISC Microprocessor. */
+#define EM_UNICORE 110 /* Microprocessor series from PKU-Unity Ltd.
+ and MPRC of Peking University */
+#define EM_AARCH64 183 /* AArch64 (64-bit ARM) */
+
+/* Non-standard or deprecated. */
+#define EM_486 6 /* Intel i486. */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
+#define EM_ALPHA_STD 41 /* Digital Alpha (standard value). */
+#define EM_ALPHA 0x9026 /* Alpha (written in the absence of an ABI) */
+
+/* e_flags for EM_ARM */
+#define EF_ARM_ABI_VERSION 0x05000000 /* ABI version 5 */
+#define EF_ARM_ABIMASK 0xFF000000
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_ABI_FLOAT_HARD 0x00000400 /* ABI version 5 and later */
+#define EF_ARM_ABI_FLOAT_SOFT 0x00000200 /* ABI version 5 and later */
+
+/* Special section indexes. */
+#define SHN_UNDEF 0 /* Undefined, missing, irrelevant. */
+#define SHN_LORESERVE 0xff00 /* First of reserved range. */
+#define SHN_LOPROC 0xff00 /* First processor-specific. */
+#define SHN_HIPROC 0xff1f /* Last processor-specific. */
+#define SHN_LOOS 0xff20 /* First operating system-specific. */
+#define SHN_HIOS 0xff3f /* Last operating system-specific. */
+#define SHN_ABS 0xfff1 /* Absolute values. */
+#define SHN_COMMON 0xfff2 /* Common data. */
+#define SHN_XINDEX 0xffff /* Escape -- index stored elsewhere. */
+#define SHN_HIRESERVE 0xffff /* Last of reserved range. */
+
+/* sh_type */
+#define SHT_NULL 0 /* inactive */
+#define SHT_PROGBITS 1 /* program defined information */
+#define SHT_SYMTAB 2 /* symbol table section */
+#define SHT_STRTAB 3 /* string table section */
+#define SHT_RELA 4 /* relocation section with addends */
+#define SHT_HASH 5 /* symbol hash table section */
+#define SHT_DYNAMIC 6 /* dynamic section */
+#define SHT_NOTE 7 /* note section */
+#define SHT_NOBITS 8 /* no space section */
+#define SHT_REL 9 /* relocation section - no addends */
+#define SHT_SHLIB 10 /* reserved - purpose unknown */
+#define SHT_DYNSYM 11 /* dynamic symbol table section */
+#define SHT_INIT_ARRAY 14 /* Initialization function pointers. */
+#define SHT_FINI_ARRAY 15 /* Termination function pointers. */
+#define SHT_PREINIT_ARRAY 16 /* Pre-initialization function ptrs. */
+#define SHT_GROUP 17 /* Section group. */
+#define SHT_SYMTAB_SHNDX 18 /* Section indexes (see SHN_XINDEX). */
+#define SHT_LOOS 0x60000000 /* First of OS specific semantics */
+#define SHT_LOSUNW 0x6ffffff4
+#define SHT_SUNW_dof 0x6ffffff4
+#define SHT_SUNW_cap 0x6ffffff5
+#define SHT_SUNW_SIGNATURE 0x6ffffff6
+#define SHT_GNU_HASH 0x6ffffff6
+#define SHT_GNU_LIBLIST 0x6ffffff7
+#define SHT_SUNW_ANNOTATE 0x6ffffff7
+#define SHT_SUNW_DEBUGSTR 0x6ffffff8
+#define SHT_SUNW_DEBUG 0x6ffffff9
+#define SHT_SUNW_move 0x6ffffffa
+#define SHT_SUNW_COMDAT 0x6ffffffb
+#define SHT_SUNW_syminfo 0x6ffffffc
+#define SHT_SUNW_verdef 0x6ffffffd
+#define SHT_GNU_verdef 0x6ffffffd /* Symbol versions provided */
+#define SHT_SUNW_verneed 0x6ffffffe
+#define SHT_GNU_verneed 0x6ffffffe /* Symbol versions required */
+#define SHT_SUNW_versym 0x6fffffff
+#define SHT_GNU_versym 0x6fffffff /* Symbol version table */
+#define SHT_HISUNW 0x6fffffff
+#define SHT_HIOS 0x6fffffff /* Last of OS specific semantics */
+#define SHT_LOPROC 0x70000000 /* reserved range for processor */
+#define SHT_AMD64_UNWIND 0x70000001 /* unwind information */
+#define SHT_ARM_EXIDX 0x70000001 /* Exception index table. */
+#define SHT_ARM_PREEMPTMAP 0x70000002 /* BPABI DLL dynamic linking
+ pre-emption map. */
+#define SHT_ARM_ATTRIBUTES 0x70000003 /* Object file compatibility
+ attributes. */
+#define SHT_ARM_DEBUGOVERLAY 0x70000004 /* See DBGOVL for details. */
+#define SHT_ARM_OVERLAYSECTION 0x70000005 /* See DBGOVL for details. */
+#define SHT_MIPS_REGINFO 0x70000006
+#define SHT_MIPS_OPTIONS 0x7000000d
+#define SHT_MIPS_DWARF 0x7000001e /* MIPS gcc uses MIPS_DWARF */
+#define SHT_HIPROC 0x7fffffff /* specific section header types */
+#define SHT_LOUSER 0x80000000 /* reserved range for application */
+#define SHT_HIUSER 0xffffffff /* specific indexes */
+
+/* Flags for sh_flags. */
+#define SHF_WRITE 0x1 /* Section contains writable data. */
+#define SHF_ALLOC 0x2 /* Section occupies memory. */
+#define SHF_EXECINSTR 0x4 /* Section contains instructions. */
+#define SHF_MERGE 0x10 /* Section may be merged. */
+#define SHF_STRINGS 0x20 /* Section contains strings. */
+#define SHF_INFO_LINK 0x40 /* sh_info holds section index. */
+#define SHF_LINK_ORDER 0x80 /* Special ordering requirements. */
+#define SHF_OS_NONCONFORMING 0x100 /* OS-specific processing required. */
+#define SHF_GROUP 0x200 /* Member of section group. */
+#define SHF_TLS 0x400 /* Section contains TLS data. */
+#define SHF_MASKOS 0x0ff00000 /* OS-specific semantics. */
+#define SHF_MASKPROC 0xf0000000 /* Processor-specific semantics. */
+
+/* Values for p_type. */
+#define PT_NULL 0 /* Unused entry. */
+#define PT_LOAD 1 /* Loadable segment. */
+#define PT_DYNAMIC 2 /* Dynamic linking information segment. */
+#define PT_INTERP 3 /* Pathname of interpreter. */
+#define PT_NOTE 4 /* Auxiliary information. */
+#define PT_SHLIB 5 /* Reserved (not used). */
+#define PT_PHDR 6 /* Location of program header itself. */
+#define PT_TLS 7 /* Thread local storage segment */
+#define PT_LOOS 0x60000000 /* First OS-specific. */
+#define PT_SUNW_UNWIND 0x6464e550 /* amd64 UNWIND program header */
+#define PT_GNU_EH_FRAME 0x6474e550
+#define PT_GNU_STACK 0x6474e551
+#define PT_GNU_RELRO 0x6474e552
+#define PT_LOSUNW 0x6ffffffa
+#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
+#define PT_SUNWSTACK 0x6ffffffb /* describes the stack segment */
+#define PT_SUNWDTRACE 0x6ffffffc /* private */
+#define PT_SUNWCAP 0x6ffffffd /* hard/soft capabilities segment */
+#define PT_HISUNW 0x6fffffff
+#define PT_HIOS 0x6fffffff /* Last OS-specific. */
+#define PT_LOPROC 0x70000000 /* First processor-specific type. */
+#define PT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for p_flags. */
+#define PF_X 0x1 /* Executable. */
+#define PF_W 0x2 /* Writable. */
+#define PF_R 0x4 /* Readable. */
+#define PF_MASKOS 0x0ff00000 /* Operating system-specific. */
+#define PF_MASKPROC 0xf0000000 /* Processor-specific. */
+
+/* Extended program header index. */
+#define PN_XNUM 0xffff
+
+/* Values for d_tag. */
+#define DT_NULL 0 /* Terminating entry. */
+#define DT_NEEDED 1 /* String table offset of a needed shared
+ library. */
+#define DT_PLTRELSZ 2 /* Total size in bytes of PLT relocations. */
+#define DT_PLTGOT 3 /* Processor-dependent address. */
+#define DT_HASH 4 /* Address of symbol hash table. */
+#define DT_STRTAB 5 /* Address of string table. */
+#define DT_SYMTAB 6 /* Address of symbol table. */
+#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */
+#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */
+#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */
+#define DT_STRSZ 10 /* Size of string table. */
+#define DT_SYMENT 11 /* Size of each symbol table entry. */
+#define DT_INIT 12 /* Address of initialization function. */
+#define DT_FINI 13 /* Address of finalization function. */
+#define DT_SONAME 14 /* String table offset of shared object
+ name. */
+#define DT_RPATH 15 /* String table offset of library path. [sup] */
+#define DT_SYMBOLIC 16 /* Indicates "symbolic" linking. [sup] */
+#define DT_REL 17 /* Address of ElfNN_Rel relocations. */
+#define DT_RELSZ 18 /* Total size of ElfNN_Rel relocations. */
+#define DT_RELENT 19 /* Size of each ElfNN_Rel relocation. */
+#define DT_PLTREL 20 /* Type of relocation used for PLT. */
+#define DT_DEBUG 21 /* Reserved (not used). */
+#define DT_TEXTREL 22 /* Indicates there may be relocations in
+ non-writable segments. [sup] */
+#define DT_JMPREL 23 /* Address of PLT relocations. */
+#define DT_BIND_NOW 24 /* [sup] */
+#define DT_INIT_ARRAY 25 /* Address of the array of pointers to
+ initialization functions */
+#define DT_FINI_ARRAY 26 /* Address of the array of pointers to
+ termination functions */
+#define DT_INIT_ARRAYSZ 27 /* Size in bytes of the array of
+ initialization functions. */
+#define DT_FINI_ARRAYSZ 28 /* Size in bytes of the array of
+ termination functions. */
+#define DT_RUNPATH 29 /* String table offset of a null-terminated
+ library search path string. */
+#define DT_FLAGS 30 /* Object specific flag values. */
+#define DT_ENCODING 32 /* Values greater than or equal to DT_ENCODING
+ and less than DT_LOOS follow the rules for
+ the interpretation of the d_un union
+ as follows: even == 'd_ptr', odd == 'd_val'
+ or none */
+#define DT_PREINIT_ARRAY 32 /* Address of the array of pointers to
+ pre-initialization functions. */
+#define DT_PREINIT_ARRAYSZ 33 /* Size in bytes of the array of
+ pre-initialization functions. */
+#define DT_MAXPOSTAGS 34 /* number of positive tags */
+#define DT_LOOS 0x6000000d /* First OS-specific */
+#define DT_SUNW_AUXILIARY 0x6000000d /* symbol auxiliary name */
+#define DT_SUNW_RTLDINF 0x6000000e /* ld.so.1 info (private) */
+#define DT_SUNW_FILTER 0x6000000f /* symbol filter name */
+#define DT_SUNW_CAP 0x60000010 /* hardware/software */
+#define DT_HIOS 0x6ffff000 /* Last OS-specific */
+
+/*
+ * DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
+ * Dyn.d_un.d_val field of the Elf*_Dyn structure.
+ */
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_CHECKSUM 0x6ffffdf8 /* elf checksum */
+#define DT_PLTPADSZ 0x6ffffdf9 /* pltpadding size */
+#define DT_MOVEENT 0x6ffffdfa /* move table entry size */
+#define DT_MOVESZ 0x6ffffdfb /* move table size */
+#define DT_FEATURE_1 0x6ffffdfc /* feature holder */
+#define DT_POSFLAG_1 0x6ffffdfd /* flags for DT_* entries, effecting */
+ /* the following DT_* entry. */
+ /* See DF_P1_* definitions */
+#define DT_SYMINSZ 0x6ffffdfe /* syminfo table size (in bytes) */
+#define DT_SYMINENT 0x6ffffdff /* syminfo entry size (in bytes) */
+#define DT_VALRNGHI 0x6ffffdff
+
+/*
+ * DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
+ * Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
+ *
+ * If any adjustment is made to the ELF object after it has been
+ * built, these entries will need to be adjusted.
+ */
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table */
+#define DT_CONFIG 0x6ffffefa /* configuration information */
+#define DT_DEPAUDIT 0x6ffffefb /* dependency auditing */
+#define DT_AUDIT 0x6ffffefc /* object auditing */
+#define DT_PLTPAD 0x6ffffefd /* pltpadding (sparcv9) */
+#define DT_MOVETAB 0x6ffffefe /* move table */
+#define DT_SYMINFO 0x6ffffeff /* syminfo table */
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0 /* Address of versym section. */
+#define DT_RELACOUNT 0x6ffffff9 /* number of RELATIVE relocations */
+#define DT_RELCOUNT 0x6ffffffa /* number of RELATIVE relocations */
+#define DT_FLAGS_1 0x6ffffffb /* state flags - see DF_1_* defs */
+#define DT_VERDEF 0x6ffffffc /* Address of verdef section. */
+#define DT_VERDEFNUM 0x6ffffffd /* Number of elems in verdef section */
+#define DT_VERNEED 0x6ffffffe /* Address of verneed section. */
+#define DT_VERNEEDNUM 0x6fffffff /* Number of elems in verneed section */
+
+#define DT_LOPROC 0x70000000 /* First processor-specific type. */
+#define DT_DEPRECATED_SPARC_REGISTER 0x7000001
+#define DT_AUXILIARY 0x7ffffffd /* shared library auxiliary name */
+#define DT_USED 0x7ffffffe /* ignored - same as needed */
+#define DT_FILTER 0x7fffffff /* shared library filter name */
+#define DT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for DT_FLAGS */
+#define DF_ORIGIN 0x0001 /* Indicates that the object being loaded may
+ make reference to the $ORIGIN substitution
+ string */
+#define DF_SYMBOLIC 0x0002 /* Indicates "symbolic" linking. */
+#define DF_TEXTREL 0x0004 /* Indicates there may be relocations in
+ non-writable segments. */
+#define DF_BIND_NOW 0x0008 /* Indicates that the dynamic linker should
+ process all relocations for the object
+ containing this entry before transferring
+ control to the program. */
+#define DF_STATIC_TLS 0x0010 /* Indicates that the shared object or
+ executable contains code using a static
+ thread-local storage scheme. */
+
+/* Values for DT_FLAGS_1 */
+#define DF_1_BIND_NOW 0x00000001 /* Same as DF_BIND_NOW */
+#define DF_1_GLOBAL 0x00000002 /* Set the RTLD_GLOBAL for object */
+#define DF_1_NODELETE 0x00000008 /* Set the RTLD_NODELETE for object */
+#define DF_1_LOADFLTR 0x00000010 /* Immediate loading of filtees */
+#define DF_1_NOOPEN 0x00000040 /* Do not allow loading on dlopen() */
+#define DF_1_ORIGIN 0x00000080 /* Process $ORIGIN */
+#define DF_1_INTERPOSE 0x00000400 /* Interpose all objects but main */
+#define DF_1_NODEFLIB 0x00000800 /* Do not search default paths */
+
+/* Values for n_type. Used in core files. */
+#define NT_PRSTATUS 1 /* Process status. */
+#define NT_FPREGSET 2 /* Floating point registers. */
+#define NT_PRPSINFO 3 /* Process state info. */
+#define NT_THRMISC 7 /* Thread miscellaneous info. */
+#define NT_PROCSTAT_PROC 8 /* Procstat proc data. */
+#define NT_PROCSTAT_FILES 9 /* Procstat files data. */
+#define NT_PROCSTAT_VMMAP 10 /* Procstat vmmap data. */
+#define NT_PROCSTAT_GROUPS 11 /* Procstat groups data. */
+#define NT_PROCSTAT_UMASK 12 /* Procstat umask data. */
+#define NT_PROCSTAT_RLIMIT 13 /* Procstat rlimit data. */
+#define NT_PROCSTAT_OSREL 14 /* Procstat osreldate data. */
+#define NT_PROCSTAT_PSSTRINGS 15 /* Procstat ps_strings data. */
+#define NT_PROCSTAT_AUXV 16 /* Procstat auxv data. */
+
+/* Symbol Binding - ELFNN_ST_BIND - st_info */
+#define STB_LOCAL 0 /* Local symbol */
+#define STB_GLOBAL 1 /* Global symbol */
+#define STB_WEAK 2 /* like global - lower precedence */
+#define STB_LOOS 10 /* Reserved range for operating system */
+#define STB_HIOS 12 /* specific semantics. */
+#define STB_LOPROC 13 /* reserved range for processor */
+#define STB_HIPROC 15 /* specific semantics. */
+
+/* Symbol type - ELFNN_ST_TYPE - st_info */
+#define STT_NOTYPE 0 /* Unspecified type. */
+#define STT_OBJECT 1 /* Data object. */
+#define STT_FUNC 2 /* Function. */
+#define STT_SECTION 3 /* Section. */
+#define STT_FILE 4 /* Source file. */
+#define STT_COMMON 5 /* Uninitialized common block. */
+#define STT_TLS 6 /* TLS object. */
+#define STT_NUM 7
+#define STT_LOOS 10 /* Reserved range for operating system */
+#define STT_GNU_IFUNC 10
+#define STT_HIOS 12 /* specific semantics. */
+#define STT_LOPROC 13 /* reserved range for processor */
+#define STT_HIPROC 15 /* specific semantics. */
+
+/* Symbol visibility - ELFNN_ST_VISIBILITY - st_other */
+#define STV_DEFAULT 0x0 /* Default visibility (see binding). */
+#define STV_INTERNAL 0x1 /* Special meaning in relocatable objects. */
+#define STV_HIDDEN 0x2 /* Not visible. */
+#define STV_PROTECTED 0x3 /* Visible but not preemptible. */
+#define STV_EXPORTED 0x4
+#define STV_SINGLETON 0x5
+#define STV_ELIMINATE 0x6
+
+/* Special symbol table indexes. */
+#define STN_UNDEF 0 /* Undefined symbol index. */
+
+/* Symbol versioning flags. */
+#define VER_DEF_CURRENT 1
+#define VER_DEF_IDX(x) VER_NDX(x)
+
+#define VER_FLG_BASE 0x01
+#define VER_FLG_WEAK 0x02
+
+#define VER_NEED_CURRENT 1
+#define VER_NEED_WEAK (1u << 15)
+#define VER_NEED_HIDDEN VER_NDX_HIDDEN
+#define VER_NEED_IDX(x) VER_NDX(x)
+
+#define VER_NDX_LOCAL 0
+#define VER_NDX_GLOBAL 1
+#define VER_NDX_GIVEN 2
+
+#define VER_NDX_HIDDEN (1u << 15)
+#define VER_NDX(x) ((x) & ~(1u << 15))
+
+#define CA_SUNW_NULL 0
+#define CA_SUNW_HW_1 1 /* first hardware capabilities entry */
+#define CA_SUNW_SF_1 2 /* first software capabilities entry */
+
+/*
+ * Syminfo flag values
+ */
+#define SYMINFO_FLG_DIRECT 0x0001 /* symbol ref has direct association */
+ /* to object containing defn. */
+#define SYMINFO_FLG_PASSTHRU 0x0002 /* ignored - see SYMINFO_FLG_FILTER */
+#define SYMINFO_FLG_COPY 0x0004 /* symbol is a copy-reloc */
+#define SYMINFO_FLG_LAZYLOAD 0x0008 /* object containing defn should be */
+ /* lazily-loaded */
+#define SYMINFO_FLG_DIRECTBIND 0x0010 /* ref should be bound directly to */
+ /* object containing defn. */
+#define SYMINFO_FLG_NOEXTDIRECT 0x0020 /* don't let an external reference */
+ /* directly bind to this symbol */
+#define SYMINFO_FLG_FILTER 0x0002 /* symbol ref is associated to a */
+#define SYMINFO_FLG_AUXILIARY 0x0040 /* standard or auxiliary filter */
+
+/*
+ * Syminfo.si_boundto values.
+ */
+#define SYMINFO_BT_SELF 0xffff /* symbol bound to self */
+#define SYMINFO_BT_PARENT 0xfffe /* symbol bound to parent */
+#define SYMINFO_BT_NONE 0xfffd /* no special symbol binding */
+#define SYMINFO_BT_EXTERN 0xfffc /* symbol defined as external */
+#define SYMINFO_BT_LOWRESERVE 0xff00 /* beginning of reserved entries */
+
+/*
+ * Syminfo version values.
+ */
+#define SYMINFO_NONE 0 /* Syminfo version */
+#define SYMINFO_CURRENT 1
+#define SYMINFO_NUM 2
+
+/*
+ * Relocation types.
+ *
+ * All machine architectures are defined here to allow tools on one to
+ * handle others.
+ */
+
+#define R_386_NONE 0 /* No relocation. */
+#define R_386_32 1 /* Add symbol value. */
+#define R_386_PC32 2 /* Add PC-relative symbol value. */
+#define R_386_GOT32 3 /* Add PC-relative GOT offset. */
+#define R_386_PLT32 4 /* Add PC-relative PLT offset. */
+#define R_386_COPY 5 /* Copy data from shared object. */
+#define R_386_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_386_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_386_RELATIVE 8 /* Add load address of shared object. */
+#define R_386_GOTOFF 9 /* Add GOT-relative symbol address. */
+#define R_386_GOTPC 10 /* Add PC-relative GOT table address. */
+#define R_386_TLS_TPOFF 14 /* Negative offset in static TLS block */
+#define R_386_TLS_IE 15 /* Absolute address of GOT for -ve static TLS */
+#define R_386_TLS_GOTIE 16 /* GOT entry for negative static TLS block */
+#define R_386_TLS_LE 17 /* Negative offset relative to static TLS */
+#define R_386_TLS_GD 18 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_LDM 19 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_GD_32 24 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_GD_PUSH 25 /* pushl instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_CALL 26 /* call instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_POP 27 /* popl instruction for Sun ABI GD sequence */
+#define R_386_TLS_LDM_32 28 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_LDM_PUSH 29 /* pushl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_CALL 30 /* call instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_POP 31 /* popl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDO_32 32 /* 32 bit offset from start of TLS block */
+#define R_386_TLS_IE_32 33 /* 32 bit offset to GOT static TLS offset entry */
+#define R_386_TLS_LE_32 34 /* 32 bit offset within static TLS block */
+#define R_386_TLS_DTPMOD32 35 /* GOT entry containing TLS index */
+#define R_386_TLS_DTPOFF32 36 /* GOT entry containing TLS offset */
+#define R_386_TLS_TPOFF32 37 /* GOT entry of -ve static TLS offset */
+#define R_386_IRELATIVE 42 /* PLT entry resolved indirectly at runtime */
+
+#define R_AARCH64_RELATIVE 1027
+
+#define R_ARM_NONE 0 /* No relocation. */
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5
+#define R_ARM_ABS12 6
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+/* TLS relocations */
+#define R_ARM_TLS_DTPMOD32 17 /* ID of module containing symbol */
+#define R_ARM_TLS_DTPOFF32 18 /* Offset in TLS block */
+#define R_ARM_TLS_TPOFF32 19 /* Offset in static TLS block */
+#define R_ARM_COPY 20 /* Copy data from shared object. */
+#define R_ARM_GLOB_DAT 21 /* Set GOT entry to data address. */
+#define R_ARM_JUMP_SLOT 22 /* Set GOT entry to code address. */
+#define R_ARM_RELATIVE 23 /* Add load address of shared object. */
+#define R_ARM_GOTOFF 24 /* Add GOT-relative symbol address. */
+#define R_ARM_GOTPC 25 /* Add PC-relative GOT table address. */
+#define R_ARM_GOT32 26 /* Add PC-relative GOT offset. */
+#define R_ARM_PLT32 27 /* Add PC-relative PLT offset. */
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS32 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+
+/* Name Value Field Calculation */
+#define R_IA_64_NONE 0 /* None */
+#define R_IA_64_IMM14 0x21 /* immediate14 S + A */
+#define R_IA_64_IMM22 0x22 /* immediate22 S + A */
+#define R_IA_64_IMM64 0x23 /* immediate64 S + A */
+#define R_IA_64_DIR32MSB 0x24 /* word32 MSB S + A */
+#define R_IA_64_DIR32LSB 0x25 /* word32 LSB S + A */
+#define R_IA_64_DIR64MSB 0x26 /* word64 MSB S + A */
+#define R_IA_64_DIR64LSB 0x27 /* word64 LSB S + A */
+#define R_IA_64_GPREL22 0x2a /* immediate22 @gprel(S + A) */
+#define R_IA_64_GPREL64I 0x2b /* immediate64 @gprel(S + A) */
+#define R_IA_64_GPREL32MSB 0x2c /* word32 MSB @gprel(S + A) */
+#define R_IA_64_GPREL32LSB 0x2d /* word32 LSB @gprel(S + A) */
+#define R_IA_64_GPREL64MSB 0x2e /* word64 MSB @gprel(S + A) */
+#define R_IA_64_GPREL64LSB 0x2f /* word64 LSB @gprel(S + A) */
+#define R_IA_64_LTOFF22 0x32 /* immediate22 @ltoff(S + A) */
+#define R_IA_64_LTOFF64I 0x33 /* immediate64 @ltoff(S + A) */
+#define R_IA_64_PLTOFF22 0x3a /* immediate22 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64I 0x3b /* immediate64 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64MSB 0x3e /* word64 MSB @pltoff(S + A) */
+#define R_IA_64_PLTOFF64LSB 0x3f /* word64 LSB @pltoff(S + A) */
+#define R_IA_64_FPTR64I 0x43 /* immediate64 @fptr(S + A) */
+#define R_IA_64_FPTR32MSB 0x44 /* word32 MSB @fptr(S + A) */
+#define R_IA_64_FPTR32LSB 0x45 /* word32 LSB @fptr(S + A) */
+#define R_IA_64_FPTR64MSB 0x46 /* word64 MSB @fptr(S + A) */
+#define R_IA_64_FPTR64LSB 0x47 /* word64 LSB @fptr(S + A) */
+#define R_IA_64_PCREL60B 0x48 /* immediate60 form1 S + A - P */
+#define R_IA_64_PCREL21B 0x49 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL21M 0x4a /* immediate21 form2 S + A - P */
+#define R_IA_64_PCREL21F 0x4b /* immediate21 form3 S + A - P */
+#define R_IA_64_PCREL32MSB 0x4c /* word32 MSB S + A - P */
+#define R_IA_64_PCREL32LSB 0x4d /* word32 LSB S + A - P */
+#define R_IA_64_PCREL64MSB 0x4e /* word64 MSB S + A - P */
+#define R_IA_64_PCREL64LSB 0x4f /* word64 LSB S + A - P */
+#define R_IA_64_LTOFF_FPTR22 0x52 /* immediate22 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64I 0x53 /* immediate64 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32MSB 0x54 /* word32 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32LSB 0x55 /* word32 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64MSB 0x56 /* word64 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64LSB 0x57 /* word64 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_SEGREL32MSB 0x5c /* word32 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL32LSB 0x5d /* word32 LSB @segrel(S + A) */
+#define R_IA_64_SEGREL64MSB 0x5e /* word64 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL64LSB 0x5f /* word64 LSB @segrel(S + A) */
+#define R_IA_64_SECREL32MSB 0x64 /* word32 MSB @secrel(S + A) */
+#define R_IA_64_SECREL32LSB 0x65 /* word32 LSB @secrel(S + A) */
+#define R_IA_64_SECREL64MSB 0x66 /* word64 MSB @secrel(S + A) */
+#define R_IA_64_SECREL64LSB 0x67 /* word64 LSB @secrel(S + A) */
+#define R_IA_64_REL32MSB 0x6c /* word32 MSB BD + A */
+#define R_IA_64_REL32LSB 0x6d /* word32 LSB BD + A */
+#define R_IA_64_REL64MSB 0x6e /* word64 MSB BD + A */
+#define R_IA_64_REL64LSB 0x6f /* word64 LSB BD + A */
+#define R_IA_64_LTV32MSB 0x74 /* word32 MSB S + A */
+#define R_IA_64_LTV32LSB 0x75 /* word32 LSB S + A */
+#define R_IA_64_LTV64MSB 0x76 /* word64 MSB S + A */
+#define R_IA_64_LTV64LSB 0x77 /* word64 LSB S + A */
+#define R_IA_64_PCREL21BI 0x79 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL22 0x7a /* immediate22 S + A - P */
+#define R_IA_64_PCREL64I 0x7b /* immediate64 S + A - P */
+#define R_IA_64_IPLTMSB 0x80 /* function descriptor MSB special */
+#define R_IA_64_IPLTLSB 0x81 /* function descriptor LSB speciaal */
+#define R_IA_64_SUB 0x85 /* immediate64 A - S */
+#define R_IA_64_LTOFF22X 0x86 /* immediate22 special */
+#define R_IA_64_LDXMOV 0x87 /* immediate22 special */
+#define R_IA_64_TPREL14 0x91 /* imm14 @tprel(S + A) */
+#define R_IA_64_TPREL22 0x92 /* imm22 @tprel(S + A) */
+#define R_IA_64_TPREL64I 0x93 /* imm64 @tprel(S + A) */
+#define R_IA_64_TPREL64MSB 0x96 /* word64 MSB @tprel(S + A) */
+#define R_IA_64_TPREL64LSB 0x97 /* word64 LSB @tprel(S + A) */
+#define R_IA_64_LTOFF_TPREL22 0x9a /* imm22 @ltoff(@tprel(S+A)) */
+#define R_IA_64_DTPMOD64MSB 0xa6 /* word64 MSB @dtpmod(S + A) */
+#define R_IA_64_DTPMOD64LSB 0xa7 /* word64 LSB @dtpmod(S + A) */
+#define R_IA_64_LTOFF_DTPMOD22 0xaa /* imm22 @ltoff(@dtpmod(S+A)) */
+#define R_IA_64_DTPREL14 0xb1 /* imm14 @dtprel(S + A) */
+#define R_IA_64_DTPREL22 0xb2 /* imm22 @dtprel(S + A) */
+#define R_IA_64_DTPREL64I 0xb3 /* imm64 @dtprel(S + A) */
+#define R_IA_64_DTPREL32MSB 0xb4 /* word32 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL32LSB 0xb5 /* word32 LSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64MSB 0xb6 /* word64 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64LSB 0xb7 /* word64 LSB @dtprel(S + A) */
+#define R_IA_64_LTOFF_DTPREL22 0xba /* imm22 @ltoff(@dtprel(S+A)) */
+
+#define R_MIPS_NONE 0 /* No reloc */
+#define R_MIPS_16 1 /* Direct 16 bit */
+#define R_MIPS_32 2 /* Direct 32 bit */
+#define R_MIPS_REL32 3 /* PC relative 32 bit */
+#define R_MIPS_26 4 /* Direct 26 bit shifted */
+#define R_MIPS_HI16 5 /* High 16 bit */
+#define R_MIPS_LO16 6 /* Low 16 bit */
+#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
+#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
+#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
+#define R_MIPS_PC16 10 /* PC relative 16 bit */
+#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
+#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
+#define R_MIPS_64 18 /* Direct 64 bit */
+#define R_MIPS_GOTHI16 21 /* GOT HI 16 bit */
+#define R_MIPS_GOTLO16 22 /* GOT LO 16 bit */
+#define R_MIPS_CALLHI16 30 /* upper 16 bit GOT entry for function */
+#define R_MIPS_CALLLO16 31 /* lower 16 bit GOT entry for function */
+
+#define R_PPC_NONE 0 /* No relocation. */
+#define R_PPC_ADDR32 1
+#define R_PPC_ADDR24 2
+#define R_PPC_ADDR16 3
+#define R_PPC_ADDR16_LO 4
+#define R_PPC_ADDR16_HI 5
+#define R_PPC_ADDR16_HA 6
+#define R_PPC_ADDR14 7
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10
+#define R_PPC_REL14 11
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/*
+ * 64-bit relocations
+ */
+#define R_PPC64_ADDR64 38
+#define R_PPC64_ADDR16_HIGHER 39
+#define R_PPC64_ADDR16_HIGHERA 40
+#define R_PPC64_ADDR16_HIGHEST 41
+#define R_PPC64_ADDR16_HIGHESTA 42
+#define R_PPC64_UADDR64 43
+#define R_PPC64_REL64 44
+#define R_PPC64_PLT64 45
+#define R_PPC64_PLTREL64 46
+#define R_PPC64_TOC16 47
+#define R_PPC64_TOC16_LO 48
+#define R_PPC64_TOC16_HI 49
+#define R_PPC64_TOC16_HA 50
+#define R_PPC64_TOC 51
+#define R_PPC64_DTPMOD64 68
+#define R_PPC64_TPREL64 73
+#define R_PPC64_DTPREL64 78
+
+/*
+ * TLS relocations
+ */
+#define R_PPC_TLS 67
+#define R_PPC_DTPMOD32 68
+#define R_PPC_TPREL16 69
+#define R_PPC_TPREL16_LO 70
+#define R_PPC_TPREL16_HI 71
+#define R_PPC_TPREL16_HA 72
+#define R_PPC_TPREL32 73
+#define R_PPC_DTPREL16 74
+#define R_PPC_DTPREL16_LO 75
+#define R_PPC_DTPREL16_HI 76
+#define R_PPC_DTPREL16_HA 77
+#define R_PPC_DTPREL32 78
+#define R_PPC_GOT_TLSGD16 79
+#define R_PPC_GOT_TLSGD16_LO 80
+#define R_PPC_GOT_TLSGD16_HI 81
+#define R_PPC_GOT_TLSGD16_HA 82
+#define R_PPC_GOT_TLSLD16 83
+#define R_PPC_GOT_TLSLD16_LO 84
+#define R_PPC_GOT_TLSLD16_HI 85
+#define R_PPC_GOT_TLSLD16_HA 86
+#define R_PPC_GOT_TPREL16 87
+#define R_PPC_GOT_TPREL16_LO 88
+#define R_PPC_GOT_TPREL16_HI 89
+#define R_PPC_GOT_TPREL16_HA 90
+
+/*
+ * The remaining relocs are from the Embedded ELF ABI, and are not in the
+ * SVR4 ELF ABI.
+ */
+
+#define R_PPC_EMB_NADDR32 101
+#define R_PPC_EMB_NADDR16 102
+#define R_PPC_EMB_NADDR16_LO 103
+#define R_PPC_EMB_NADDR16_HI 104
+#define R_PPC_EMB_NADDR16_HA 105
+#define R_PPC_EMB_SDAI16 106
+#define R_PPC_EMB_SDA2I16 107
+#define R_PPC_EMB_SDA2REL 108
+#define R_PPC_EMB_SDA21 109
+#define R_PPC_EMB_MRKREF 110
+#define R_PPC_EMB_RELSEC16 111
+#define R_PPC_EMB_RELST_LO 112
+#define R_PPC_EMB_RELST_HI 113
+#define R_PPC_EMB_RELST_HA 114
+#define R_PPC_EMB_BIT_FLD 115
+#define R_PPC_EMB_RELSDA 116
+
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_PC_HH22 37
+#define R_SPARC_PC_HM10 38
+#define R_SPARC_PC_LM22 39
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_GLOB_JMP 42
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
+#define R_SPARC_DISP64 46
+#define R_SPARC_PLT64 47
+#define R_SPARC_HIX22 48
+#define R_SPARC_LOX10 49
+#define R_SPARC_H44 50
+#define R_SPARC_M44 51
+#define R_SPARC_L44 52
+#define R_SPARC_REGISTER 53
+#define R_SPARC_UA64 54
+#define R_SPARC_UA16 55
+#define R_SPARC_TLS_GD_HI22 56
+#define R_SPARC_TLS_GD_LO10 57
+#define R_SPARC_TLS_GD_ADD 58
+#define R_SPARC_TLS_GD_CALL 59
+#define R_SPARC_TLS_LDM_HI22 60
+#define R_SPARC_TLS_LDM_LO10 61
+#define R_SPARC_TLS_LDM_ADD 62
+#define R_SPARC_TLS_LDM_CALL 63
+#define R_SPARC_TLS_LDO_HIX22 64
+#define R_SPARC_TLS_LDO_LOX10 65
+#define R_SPARC_TLS_LDO_ADD 66
+#define R_SPARC_TLS_IE_HI22 67
+#define R_SPARC_TLS_IE_LO10 68
+#define R_SPARC_TLS_IE_LD 69
+#define R_SPARC_TLS_IE_LDX 70
+#define R_SPARC_TLS_IE_ADD 71
+#define R_SPARC_TLS_LE_HIX22 72
+#define R_SPARC_TLS_LE_LOX10 73
+#define R_SPARC_TLS_DTPMOD32 74
+#define R_SPARC_TLS_DTPMOD64 75
+#define R_SPARC_TLS_DTPOFF32 76
+#define R_SPARC_TLS_DTPOFF64 77
+#define R_SPARC_TLS_TPOFF32 78
+#define R_SPARC_TLS_TPOFF64 79
+
+#define R_X86_64_NONE 0 /* No relocation. */
+#define R_X86_64_64 1 /* Add 64 bit symbol value. */
+#define R_X86_64_PC32 2 /* PC-relative 32 bit signed sym value. */
+#define R_X86_64_GOT32 3 /* PC-relative 32 bit GOT offset. */
+#define R_X86_64_PLT32 4 /* PC-relative 32 bit PLT offset. */
+#define R_X86_64_COPY 5 /* Copy data from shared object. */
+#define R_X86_64_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_X86_64_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_X86_64_RELATIVE 8 /* Add load address of shared object. */
+#define R_X86_64_GOTPCREL 9 /* Add 32 bit signed pcrel offset to GOT. */
+#define R_X86_64_32 10 /* Add 32 bit zero extended symbol value */
+#define R_X86_64_32S 11 /* Add 32 bit sign extended symbol value */
+#define R_X86_64_16 12 /* Add 16 bit zero extended symbol value */
+#define R_X86_64_PC16 13 /* Add 16 bit signed extended pc relative symbol value */
+#define R_X86_64_8 14 /* Add 8 bit zero extended symbol value */
+#define R_X86_64_PC8 15 /* Add 8 bit signed extended pc relative symbol value */
+#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
+#define R_X86_64_DTPOFF64 17 /* Offset in TLS block */
+#define R_X86_64_TPOFF64 18 /* Offset in static TLS block */
+#define R_X86_64_TLSGD 19 /* PC relative offset to GD GOT entry */
+#define R_X86_64_TLSLD 20 /* PC relative offset to LD GOT entry */
+#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
+#define R_X86_64_GOTTPOFF 22 /* PC relative offset to IE GOT entry */
+#define R_X86_64_TPOFF32 23 /* Offset in static TLS block */
+#define R_X86_64_IRELATIVE 37
+
+
+#endif /* !_SYS_ELF_COMMON_H_ */
diff --git a/core/arch/arm/kernel/elf_load.c b/core/arch/arm/kernel/elf_load.c
new file mode 100644
index 0000000..420ba59
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <types_ext.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <kernel/tee_misc.h>
+#include <tee/tee_cryp_provider.h>
+#include <stdlib.h>
+#include <string.h>
+#include <util.h>
+#include <trace.h>
+#include "elf_load.h"
+#include "elf_common.h"
+#include "elf32.h"
+#include "elf64.h"
+
+struct elf_load_state {
+ bool is_32bit;
+
+ uint8_t *nwdata;
+ size_t nwdata_len;
+
+ void *hash_ctx;
+ uint32_t hash_algo;
+
+ size_t next_offs;
+
+ void *ta_head;
+ size_t ta_head_size;
+
+ void *ehdr;
+ void *phdr;
+
+ size_t vasize;
+ void *shdr;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Ehdr */
+struct elf_ehdr {
+ size_t e_phoff;
+ size_t e_shoff;
+ uint32_t e_phentsize;
+ uint32_t e_phnum;
+ uint32_t e_shentsize;
+ uint32_t e_shnum;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Phdr */
+struct elf_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uintptr_t p_vaddr;
+ size_t p_filesz;
+ size_t p_memsz;
+ size_t p_offset;
+};
+
+#ifdef ARM64
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) \
+ do { \
+ if ((state)->is_32bit) { \
+ is_32bit_action; \
+ } else { \
+ is_64bit_action; \
+ } \
+ } while (0)
+#else
+/* No need to assert state->is_32bit since that is caught before this is used */
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) is_32bit_action
+#endif
+
+#define COPY_EHDR(dst, src) \
+ do { \
+ (dst)->e_phoff = (src)->e_phoff; \
+ (dst)->e_shoff = (src)->e_shoff; \
+ (dst)->e_phentsize = (src)->e_phentsize; \
+ (dst)->e_phnum = (src)->e_phnum; \
+ (dst)->e_shentsize = (src)->e_shentsize; \
+ (dst)->e_shnum = (src)->e_shnum; \
+ } while (0)
+static void copy_ehdr(struct elf_ehdr *ehdr, struct elf_load_state *state)
+{
+ DO_ACTION(state, COPY_EHDR(ehdr, ((Elf32_Ehdr *)state->ehdr)),
+ COPY_EHDR(ehdr, ((Elf64_Ehdr *)state->ehdr)));
+}
+
+static uint32_t get_shdr_type(struct elf_load_state *state, size_t idx)
+{
+ DO_ACTION(state, return ((Elf32_Shdr *)state->shdr + idx)->sh_type,
+ return ((Elf64_Shdr *)state->shdr + idx)->sh_type);
+}
+
+#define COPY_PHDR(dst, src) \
+ do { \
+ (dst)->p_type = (src)->p_type; \
+ (dst)->p_vaddr = (src)->p_vaddr; \
+ (dst)->p_filesz = (src)->p_filesz; \
+ (dst)->p_memsz = (src)->p_memsz; \
+ (dst)->p_offset = (src)->p_offset; \
+ (dst)->p_flags = (src)->p_flags; \
+ } while (0)
+static void copy_phdr(struct elf_phdr *phdr, struct elf_load_state *state,
+ size_t idx)
+{
+ DO_ACTION(state, COPY_PHDR(phdr, ((Elf32_Phdr *)state->phdr + idx)),
+ COPY_PHDR(phdr, ((Elf64_Phdr *)state->phdr + idx)));
+}
+
+static TEE_Result advance_to(struct elf_load_state *state, size_t offs)
+{
+ TEE_Result res;
+
+ if (offs < state->next_offs)
+ return TEE_ERROR_BAD_STATE;
+ if (offs == state->next_offs)
+ return TEE_SUCCESS;
+
+ if (offs > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ state->nwdata + state->next_offs,
+ offs - state->next_offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs;
+ return res;
+}
+
+static TEE_Result copy_to(struct elf_load_state *state,
+ void *dst, size_t dst_size, size_t dst_offs,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+
+ res = advance_to(state, offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (!len)
+ return TEE_SUCCESS;
+
+ /* Check for integer overflow */
+ if ((len + dst_offs) < dst_offs || (len + dst_offs) > dst_size ||
+ (len + offs) < offs || (len + offs) > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ memcpy((uint8_t *)dst + dst_offs, state->nwdata + offs, len);
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ (uint8_t *)dst + dst_offs, len);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs + len;
+ return res;
+}
+
+static TEE_Result alloc_and_copy_to(void **p, struct elf_load_state *state,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+ void *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ res = copy_to(state, buf, len, 0, offs, len);
+ if (res == TEE_SUCCESS)
+ *p = buf;
+ else
+ free(buf);
+ return res;
+}
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **ret_state)
+{
+ struct elf_load_state *state;
+
+ state = calloc(1, sizeof(*state));
+ if (!state)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->hash_ctx = hash_ctx;
+ state->hash_algo = hash_algo;
+ state->nwdata = nwdata;
+ state->nwdata_len = nwdata_len;
+ *ret_state = state;
+ return TEE_SUCCESS;
+}
+
+static TEE_Result e32_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *ehdr)
+{
+ if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
+ ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
+ ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
+ ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
+ (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
+#ifndef CFG_WITH_VFP
+ (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
+#endif
+ ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf32_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = malloc(sizeof(*ehdr));
+ if (!state->ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(state->ehdr, ehdr, sizeof(*ehdr));
+ state->is_32bit = true;
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *eh32)
+{
+ TEE_Result res;
+ Elf64_Ehdr *ehdr = NULL;
+
+ if (eh32->e_ident[EI_VERSION] != EV_CURRENT ||
+ eh32->e_ident[EI_CLASS] != ELFCLASS64 ||
+ eh32->e_ident[EI_DATA] != ELFDATA2LSB ||
+ eh32->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ eh32->e_type != ET_DYN || eh32->e_machine != EM_AARCH64)
+ return TEE_ERROR_BAD_FORMAT;
+
+ ehdr = malloc(sizeof(*ehdr));
+ if (!ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->ehdr = ehdr;
+ memcpy(ehdr, eh32, sizeof(*eh32));
+ res = copy_to(state, ehdr, sizeof(*ehdr), sizeof(*eh32),
+ sizeof(*eh32), sizeof(*ehdr) - sizeof(*eh32));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf64_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = ehdr;
+ state->is_32bit = false;
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_load_ehdr(struct elf_load_state *state __unused,
+ Elf32_Ehdr *eh32 __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+static TEE_Result load_head(struct elf_load_state *state, size_t head_size)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ struct elf_ehdr ehdr;
+ struct elf_phdr phdr;
+ struct elf_phdr phdr0;
+
+ copy_ehdr(&ehdr, state);
+ /*
+ * Program headers are supposed to be arranged as:
+ * PT_LOAD [0] : .ta_head ...
+ * ...
+ * PT_LOAD [n]
+ *
+ * .ta_head must be located first in the first program header,
+ * which also has to be of PT_LOAD type.
+ *
+ * A PT_DYNAMIC segment may appear, but is ignored. Any other
+ * segment except PT_LOAD and PT_DYNAMIC will cause an error. All
+ * sections not included by a PT_LOAD segment are ignored.
+ */
+ if (ehdr.e_phnum < 1)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check for integer overflow */
+ if (((uint64_t)ehdr.e_phnum * ehdr.e_phentsize) > SIZE_MAX)
+ return TEE_ERROR_SECURITY;
+
+ res = alloc_and_copy_to(&p, state, ehdr.e_phoff,
+ ehdr.e_phnum * ehdr.e_phentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->phdr = p;
+
+ /*
+ * Check that the first program header is a PT_LOAD (not strictly
+ * needed but our link script is supposed to arrange it that way)
+ * and that it starts at virtual address 0.
+ */
+ copy_phdr(&phdr0, state, 0);
+ if (phdr0.p_type != PT_LOAD || phdr0.p_vaddr != 0)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /*
+ * Calculate amount of required virtual memory for TA. Find the max
+ * address used by a PT_LOAD type. Note that last PT_LOAD type
+ * dictates the total amount of needed memory. Eventual holes in
+ * the memory will also be allocated.
+ *
+ * Note that this loop will terminate at n = 0 if not earlier
+ * as we already know from above that state->phdr[0].p_type == PT_LOAD
+ */
+ n = ehdr.e_phnum;
+ do {
+ n--;
+ copy_phdr(&phdr, state, n);
+ } while (phdr.p_type != PT_LOAD);
+ state->vasize = phdr.p_vaddr + phdr.p_memsz;
+
+ /* Check for integer overflow */
+ if (state->vasize < phdr.p_vaddr)
+ return TEE_ERROR_SECURITY;
+
+ /*
+ * Read .ta_head from first segment, make sure the segment is large
+ * enough. We're only interested in seeing that the
+ * TA_FLAG_EXEC_DDR flag is set. If that's true we set that flag in
+ * the TA context to enable mapping the TA. Later when this
+ * function has returned and the hash has been verified the flags
+ * field will be updated with eventual other flags.
+ */
+ if (phdr0.p_filesz < head_size)
+ return TEE_ERROR_BAD_FORMAT;
+ res = alloc_and_copy_to(&p, state, phdr0.p_offset, head_size);
+ if (res == TEE_SUCCESS) {
+ state->ta_head = p;
+ state->ta_head_size = head_size;
+ }
+ return res;
+}
+
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit)
+{
+ TEE_Result res;
+ Elf32_Ehdr ehdr;
+
+ /*
+ * The ELF resides in shared memory, to avoid attacks based on
+ * modifying the ELF while we're parsing it here we only read each
+ * byte from the ELF once. We're also hashing the ELF while reading
+ * so we're limited to only read the ELF sequentially from start to
+ * end.
+ */
+
+ res = copy_to(state, &ehdr, sizeof(ehdr), 0, 0, sizeof(Elf32_Ehdr));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (!IS_ELF(ehdr))
+ return TEE_ERROR_BAD_FORMAT;
+ res = e32_load_ehdr(state, &ehdr);
+ if (res == TEE_ERROR_BAD_FORMAT)
+ res = e64_load_ehdr(state, &ehdr);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = load_head(state, head_size);
+ if (res == TEE_SUCCESS) {
+ *head = state->ta_head;
+ *vasize = state->vasize;
+ *is_32bit = state->is_32bit;
+ }
+ return res;
+}
+
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags)
+{
+ struct elf_ehdr ehdr;
+
+ copy_ehdr(&ehdr, state);
+ while (*idx < ehdr.e_phnum) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, *idx);
+ (*idx)++;
+ if (phdr.p_type == PT_LOAD) {
+ if (vaddr)
+ *vaddr = phdr.p_vaddr;
+ if (size)
+ *size = phdr.p_memsz;
+ if (flags)
+ *flags = phdr.p_flags;
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+
+static TEE_Result e32_process_rel(struct elf_load_state *state, size_t rel_sidx,
+ vaddr_t vabase)
+{
+ Elf32_Ehdr *ehdr = state->ehdr;
+ Elf32_Shdr *shdr = state->shdr;
+ Elf32_Rel *rel;
+ Elf32_Rel *rel_end;
+ size_t sym_tab_idx;
+ Elf32_Sym *sym_tab = NULL;
+ size_t num_syms = 0;
+
+ if (shdr[rel_sidx].sh_type != SHT_REL)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab_idx = shdr[rel_sidx].sh_link;
+ if (sym_tab_idx) {
+ if (sym_tab_idx >= ehdr->e_shnum)
+ return TEE_ERROR_BAD_FORMAT;
+
+ if (shdr[sym_tab_idx].sh_entsize != sizeof(Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[sym_tab_idx].sh_addr > state->vasize ||
+ (shdr[sym_tab_idx].sh_addr +
+ shdr[sym_tab_idx].sh_size) > state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab = (Elf32_Sym *)(vabase + shdr[sym_tab_idx].sh_addr);
+ if (!ALIGNMENT_IS_OK(sym_tab, Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
+ }
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel = (Elf32_Rel *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rel, Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
+ for (; rel < rel_end; rel++) {
+ Elf32_Addr *where;
+ size_t sym_idx;
+
+ /* Check the address is inside TA memory */
+ if (rel->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf32_Addr *)(vabase + rel->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf32_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_ABS32:
+ sym_idx = ELF32_R_SYM(rel->r_info);
+ if (sym_idx >= num_syms)
+ return TEE_ERROR_BAD_FORMAT;
+
+ *where += vabase + sym_tab[sym_idx].st_value;
+ break;
+ case R_ARM_RELATIVE:
+ *where += vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %d",
+ ELF32_R_TYPE(rel->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_process_rel(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase)
+{
+ Elf64_Shdr *shdr = state->shdr;
+ Elf64_Rela *rela;
+ Elf64_Rela *rela_end;
+
+ if (shdr[rel_sidx].sh_type != SHT_RELA)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela = (Elf64_Rela *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rela, Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
+ for (; rela < rela_end; rela++) {
+ Elf64_Addr *where;
+
+ /* Check the address is inside TA memory */
+ if (rela->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf64_Addr *)(vabase + rela->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf64_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_AARCH64_RELATIVE:
+ *where = rela->r_addend + vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %zd",
+ ELF64_R_TYPE(rela->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_process_rel(struct elf_load_state *state __unused,
+ size_t rel_sidx __unused, vaddr_t vabase __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ uint8_t *dst = (uint8_t *)vabase;
+ struct elf_ehdr ehdr;
+ size_t offs;
+
+ copy_ehdr(&ehdr, state);
+
+ /*
+ * Zero initialize everything to make sure that all memory not
+ * updated from the ELF is zero (covering .bss and eventual gaps).
+ */
+ memset(dst, 0, state->vasize);
+
+ /*
+ * Copy the segments
+ */
+ memcpy(dst, state->ta_head, state->ta_head_size);
+ offs = state->ta_head_size;
+ for (n = 0; n < ehdr.e_phnum; n++) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, n);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ res = copy_to(state, dst, state->vasize,
+ phdr.p_vaddr + offs,
+ phdr.p_offset + offs,
+ phdr.p_filesz - offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ offs = 0;
+ }
+
+ /*
+ * We have now loaded all segments into TA memory, now we need to
+ * process relocation information. To find relocation information
+ * we need to locate the section headers. The section headers are
+ * located somewhere between the last segment and the end of the
+ * ELF.
+ */
+ if (ehdr.e_shoff) {
+ /* We have section headers */
+ res = alloc_and_copy_to(&p, state, ehdr.e_shoff,
+ ehdr.e_shnum * ehdr.e_shentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->shdr = p;
+ }
+
+ /* Hash until end of ELF */
+ res = advance_to(state, state->nwdata_len);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (state->shdr) {
+ TEE_Result (*process_rel)(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase);
+
+ if (state->is_32bit)
+ process_rel = e32_process_rel;
+ else
+ process_rel = e64_process_rel;
+
+ /* Process relocation */
+ for (n = 0; n < ehdr.e_shnum; n++) {
+ uint32_t sh_type = get_shdr_type(state, n);
+
+ if (sh_type == SHT_REL || sh_type == SHT_RELA) {
+ res = process_rel(state, n, vabase);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+void elf_load_final(struct elf_load_state *state)
+{
+ if (state) {
+ free(state->ta_head);
+ free(state->ehdr);
+ free(state->phdr);
+ free(state->shdr);
+ free(state);
+ }
+}
diff --git a/core/arch/arm/kernel/elf_load.h b/core/arch/arm/kernel/elf_load.h
new file mode 100644
index 0000000..4944e3a
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ELF_LOAD_H
+#define ELF_LOAD_H
+
+#include <types_ext.h>
+#include <tee_api_types.h>
+
+struct elf_load_state;
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **state);
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit);
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase);
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags);
+void elf_load_final(struct elf_load_state *state);
+
+#endif /*ELF_LOAD_H*/
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
new file mode 100644
index 0000000..8f13c36
--- /dev/null
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <compiler.h>
+#include <inttypes.h>
+#include <keep.h>
+#include <kernel/generic_boot.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <kernel/asan.h>
+#include <malloc.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <sm/tee_mon.h>
+#include <trace.h>
+#include <tee/tee_cryp_provider.h>
+#include <utee_defines.h>
+#include <util.h>
+#include <stdio.h>
+
+#include <platform_config.h>
+
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+#include <sm/sm.h>
+#endif
+
+#if defined(CFG_WITH_VFP)
+#include <kernel/vfp.h>
+#endif
+
+#if defined(CFG_DT)
+#include <libfdt.h>
+#endif
+
+/*
+ * In this file we're using unsigned long to represent physical pointers as
+ * they are received in a single register when OP-TEE is initially entered.
+ * This limits 32-bit systems to only use make use of the lower 32 bits
+ * of a physical address for initial parameters.
+ *
+ * 64-bit systems on the other hand can use full 64-bit physical pointers.
+ */
+#define PADDR_INVALID ULONG_MAX
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+paddr_t ns_entry_addrs[CFG_TEE_CORE_NB_CORE] __early_bss;
+static uint32_t spin_table[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+#ifdef CFG_BOOT_SYNC_CPU
+/*
+ * Array used when booting, to synchronize cpu.
+ * When 0, the cpu has not started.
+ * When 1, it has started
+ */
+uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_late(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_late);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_early(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_early);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_init_gic(void)
+{
+}
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_secondary_init_gic(void)
+{
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+void init_sec_mon(unsigned long nsec_entry __maybe_unused)
+{
+ assert(nsec_entry == PADDR_INVALID);
+ /* Do nothing as we don't have a secure monitor */
+}
+#else
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void init_sec_mon(unsigned long nsec_entry)
+{
+ struct sm_nsec_ctx *nsec_ctx;
+
+ assert(nsec_entry != PADDR_INVALID);
+
+ /* Initialize secure monitor */
+ nsec_ctx = sm_get_nsec_ctx();
+ nsec_ctx->mon_lr = nsec_entry;
+ nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
+
+}
+#endif
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+static void init_vfp_nsec(void)
+{
+}
+#else
+static void init_vfp_nsec(void)
+{
+ /* Normal world can use CP10 and CP11 (SIMD/VFP) */
+ write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
+}
+#endif
+
+#if defined(CFG_WITH_VFP)
+
+#ifdef ARM32
+static void init_vfp_sec(void)
+{
+ uint32_t cpacr = read_cpacr();
+
+ /*
+ * Enable Advanced SIMD functionality.
+ * Enable use of D16-D31 of the Floating-point Extension register
+ * file.
+ */
+ cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
+ /*
+ * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
+ * mode.
+ */
+ cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
+ cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
+ write_cpacr(cpacr);
+}
+#endif /* ARM32 */
+
+#ifdef ARM64
+static void init_vfp_sec(void)
+{
+ /* Not using VFP until thread_kernel_enable_vfp() */
+ vfp_disable();
+}
+#endif /* ARM64 */
+
+#else /* CFG_WITH_VFP */
+
+static void init_vfp_sec(void)
+{
+ /* Not using VFP */
+}
+#endif
+
+#ifdef CFG_WITH_PAGER
+
+static size_t get_block_size(void)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned l;
+
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
+ panic("can't find mmu tables");
+
+ l = tbl_info.level - 1;
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
+ panic("can't find mmu table upper level");
+
+ return 1 << tbl_info.shift;
+}
+
+static void init_runtime(unsigned long pageable_part)
+{
+ size_t n;
+ size_t init_size = (size_t)__init_size;
+ size_t pageable_size = __pageable_end - __pageable_start;
+ size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
+ TEE_SHA256_HASH_SIZE;
+ tee_mm_entry_t *mm;
+ uint8_t *paged_store;
+ uint8_t *hashes;
+ size_t block_size;
+
+ assert(pageable_size % SMALL_PAGE_SIZE == 0);
+ assert(hash_size == (size_t)__tmp_hashes_size);
+
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ /*
+ * This needs to be initialized early to support address lookup
+ * in MEM_AREA_TEE_RAM
+ */
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
+ &tee_pager_tbl_info))
+ panic("can't find mmu tables");
+
+ if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
+ panic("Unsupported page size in translation table");
+
+ thread_init_boot_thread();
+
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+ malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
+
+ hashes = malloc(hash_size);
+ IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
+ assert(hashes);
+ memcpy(hashes, __tmp_hashes_start, hash_size);
+
+ /*
+ * Need tee_mm_sec_ddr initialized to be able to allocate secure
+ * DDR below.
+ */
+ teecore_init_ta_ram();
+
+ mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
+ assert(mm);
+ paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
+ /* Copy init part into pageable area */
+ memcpy(paged_store, __init_start, init_size);
+ /* Copy pageable part after init part into pageable area */
+ memcpy(paged_store + init_size,
+ phys_to_virt(pageable_part,
+ core_mmu_get_type_by_pa(pageable_part)),
+ __pageable_part_end - __pageable_part_start);
+
+ /* Check that hashes of what's in pageable area is OK */
+ DMSG("Checking hashes of pageable area");
+ for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
+ const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
+ const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
+ TEE_Result res;
+
+ DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
+ res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
+ if (res != TEE_SUCCESS) {
+ EMSG("Hash failed for page %zu at %p: res 0x%x",
+ n, page, res);
+ panic();
+ }
+ }
+
+ /*
+ * Copy what's not initialized in the last init page. Needed
+ * because we're not going fault in the init pages again. We can't
+ * fault in pages until we've switched to the new vector by calling
+ * thread_init_handlers() below.
+ */
+ if (init_size % SMALL_PAGE_SIZE) {
+ uint8_t *p;
+
+ memcpy(__init_start + init_size, paged_store + init_size,
+ SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
+
+ p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
+ ~SMALL_PAGE_MASK);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
+ SMALL_PAGE_SIZE);
+ }
+
+ /*
+ * Initialize the virtual memory pool used for main_mmu_l2_ttb which
+ * is supplied to tee_pager_init() below.
+ */
+ block_size = get_block_size();
+ if (!tee_mm_init(&tee_mm_vcore,
+ ROUNDDOWN(CFG_TEE_RAM_START, block_size),
+ ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
+ block_size),
+ SMALL_PAGE_SHIFT, 0))
+ panic("tee_mm_vcore init failed");
+
+ /*
+ * Assign alias area for pager end of the small page block the rest
+ * of the binary is loaded into. We're taking more than needed, but
+ * we're guaranteed to not need more than the physical amount of
+ * TZSRAM.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore,
+ (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
+ assert(mm);
+ tee_pager_init(mm);
+
+ /*
+ * Claim virtual memory which isn't paged, note that there migth be
+ * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
+ * claimed to avoid later allocations to get that memory.
+ * Linear memory (flat map core memory) ends there.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
+ (vaddr_t)(__pageable_start - tee_mm_vcore.lo));
+ assert(mm);
+
+ /*
+ * Allocate virtual memory for the pageable area and let the pager
+ * take charge of all the pages already assigned to that memory.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
+ pageable_size);
+ assert(mm);
+ if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ TEE_MATTR_PRX, paged_store, hashes))
+ panic("failed to add pageable to vcore");
+
+ tee_pager_add_pages((vaddr_t)__pageable_start,
+ ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
+ tee_pager_add_pages((vaddr_t)__pageable_start +
+ ROUNDUP(init_size, SMALL_PAGE_SIZE),
+ (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
+ SMALL_PAGE_SIZE, true);
+
+}
+#else
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+static void init_run_constructors(void)
+{
+ vaddr_t *ctor;
+
+ for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
+ ((void (*)(void))(*ctor))();
+}
+
+static void init_asan(void)
+{
+
+ /*
+ * CFG_ASAN_SHADOW_OFFSET is also supplied as
+ * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
+ * Since all the needed values to calculate the value of
+ * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
+ * calculate it in advance and hard code it into the platform
+ * conf.mk. Here where we have all the needed values we double
+ * check that the compiler is supplied the correct value.
+ */
+
+#define __ASAN_SHADOW_START \
+ ROUNDUP(CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
+ assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
+#define __CFG_ASAN_SHADOW_OFFSET \
+ (__ASAN_SHADOW_START - (CFG_TEE_RAM_START / 8))
+ COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
+#undef __ASAN_SHADOW_START
+#undef __CFG_ASAN_SHADOW_OFFSET
+
+ /*
+ * Assign area covered by the shadow area, everything from start up
+ * to the beginning of the shadow area.
+ */
+ asan_set_shadowed((void *)CFG_TEE_LOAD_ADDR, &__asan_shadow_start);
+
+ /*
+ * Add access to areas that aren't opened automatically by a
+ * constructor.
+ */
+ asan_tag_access(&__initcall_start, &__initcall_end);
+ asan_tag_access(&__ctor_list, &__ctor_end);
+ asan_tag_access(__rodata_start, __rodata_end);
+ asan_tag_access(__early_bss_start, __early_bss_end);
+ asan_tag_access(__nozi_start, __nozi_end);
+
+ init_run_constructors();
+
+ /* Everything is tagged correctly, let's start address sanitizing. */
+ asan_start();
+}
+#else /*CFG_CORE_SANITIZE_KADDRESS*/
+static void init_asan(void)
+{
+}
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+static void init_runtime(unsigned long pageable_part __unused)
+{
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ thread_init_boot_thread();
+
+ init_asan();
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+
+ /*
+ * Initialized at this stage in the pager version of this function
+ * above
+ */
+ teecore_init_ta_ram();
+}
+#endif
+
+#ifdef CFG_DT
+static int add_optee_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+
+ if (fdt_path_offset(fdt, "/firmware/optee") >= 0) {
+ IMSG("OP-TEE Device Tree node already exists!\n");
+ return 0;
+ }
+
+ offs = fdt_path_offset(fdt, "/firmware");
+ if (offs < 0) {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "firmware");
+ if (offs < 0)
+ return -1;
+ }
+
+ offs = fdt_add_subnode(fdt, offs, "optee");
+ if (offs < 0)
+ return -1;
+
+ ret = fdt_setprop_string(fdt, offs, "compatible", "linaro,optee-tz");
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_string(fdt, offs, "method", "smc");
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+static int get_dt_cell_size(void *fdt, int offs, const char *cell_name,
+ uint32_t *cell_size)
+{
+ int len;
+ const uint32_t *cell = fdt_getprop(fdt, offs, cell_name, &len);
+
+ if (len != sizeof(*cell))
+ return -1;
+ *cell_size = fdt32_to_cpu(*cell);
+ if (*cell_size != 1 && *cell_size != 2)
+ return -1;
+ return 0;
+}
+
+static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
+{
+ if (cell_size == 1) {
+ uint32_t v = cpu_to_fdt32((uint32_t)val);
+
+ memcpy(data, &v, sizeof(v));
+ } else {
+ uint64_t v = cpu_to_fdt64(val);
+
+ memcpy(data, &v, sizeof(v));
+ }
+}
+
+static int add_optee_res_mem_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+ uint32_t addr_size = 2;
+ uint32_t len_size = 2;
+ vaddr_t shm_va_start;
+ vaddr_t shm_va_end;
+ paddr_t shm_pa;
+ char subnode_name[80];
+
+ offs = fdt_path_offset(fdt, "/reserved-memory");
+ if (offs >= 0) {
+ ret = get_dt_cell_size(fdt, offs, "#address-cells", &addr_size);
+ if (ret < 0)
+ return -1;
+ ret = get_dt_cell_size(fdt, offs, "#size-cells", &len_size);
+ if (ret < 0)
+ return -1;
+ } else {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "reserved-memory");
+ if (offs < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#address-cells", addr_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#size-cells", len_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "ranges", NULL, 0);
+ if (ret < 0)
+ return -1;
+ }
+
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
+ shm_pa = virt_to_phys((void *)shm_va_start);
+ snprintf(subnode_name, sizeof(subnode_name),
+ "optee@0x%" PRIxPA, shm_pa);
+ offs = fdt_add_subnode(fdt, offs, subnode_name);
+ if (offs >= 0) {
+ uint32_t data[addr_size + len_size] ;
+
+ set_dt_val(data, addr_size, shm_pa);
+ set_dt_val(data + addr_size, len_size,
+ shm_va_end - shm_va_start);
+ ret = fdt_setprop(fdt, offs, "reg", data, sizeof(data));
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "no-map", NULL, 0);
+ if (ret < 0)
+ return -1;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+static void init_fdt(unsigned long phys_fdt)
+{
+ void *fdt;
+ int ret;
+
+ if (!phys_fdt) {
+ EMSG("Device Tree missing");
+ /*
+ * No need to panic as we're not using the DT in OP-TEE
+ * yet, we're only adding some nodes for normal world use.
+ * This makes the switch to using DT easier as we can boot
+ * a newer OP-TEE with older boot loaders. Once we start to
+ * initialize devices based on DT we'll likely panic
+ * instead of returning here.
+ */
+ return;
+ }
+
+ if (!core_mmu_add_mapping(MEM_AREA_IO_NSEC, phys_fdt, CFG_DTB_MAX_SIZE))
+ panic("failed to map fdt");
+
+ fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
+ if (!fdt)
+ panic();
+
+ ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
+ if (ret < 0) {
+ EMSG("Invalid Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+
+ if (add_optee_dt_node(fdt))
+ panic("Failed to add OP-TEE Device Tree node");
+
+ if (add_optee_res_mem_dt_node(fdt))
+ panic("Failed to add OP-TEE reserved memory DT node");
+
+ ret = fdt_pack(fdt);
+ if (ret < 0) {
+ EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+}
+#else
+static void init_fdt(unsigned long phys_fdt __unused)
+{
+}
+#endif /*!CFG_DT*/
+
+static void init_primary_helper(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+ init_vfp_sec();
+
+ init_runtime(pageable_part);
+
+ IMSG("Initializing (%s)\n", core_v_str);
+
+ thread_init_primary(generic_boot_get_handlers());
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ init_fdt(fdt);
+ main_init_gic();
+ init_vfp_nsec();
+
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+ DMSG("Primary CPU switching to normal world boot\n");
+}
+
+static void init_secondary_helper(unsigned long nsec_entry)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ main_secondary_init_gic();
+ init_vfp_sec();
+ init_vfp_nsec();
+
+ DMSG("Secondary CPU Switching to normal world boot\n");
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long u __unused,
+ unsigned long fdt)
+{
+ init_primary_helper(pageable_part, PADDR_INVALID, fdt);
+ return &thread_vector_table;
+}
+
+unsigned long generic_boot_cpu_on_handler(unsigned long a0 __maybe_unused,
+ unsigned long a1 __unused)
+{
+ DMSG("cpu %zu: a0 0x%lx", get_core_pos(), a0);
+ init_secondary_helper(PADDR_INVALID);
+ return 0;
+}
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ init_primary_helper(pageable_part, nsec_entry, fdt);
+}
+
+void generic_boot_init_secondary(unsigned long nsec_entry)
+{
+ init_secondary_helper(nsec_entry);
+}
+#endif
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+int generic_boot_core_release(size_t core_idx, paddr_t entry)
+{
+ if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
+ return -1;
+
+ ns_entry_addrs[core_idx] = entry;
+ dmb();
+ spin_table[core_idx] = 1;
+ dsb();
+ sev();
+
+ return 0;
+}
+
+/*
+ * spin until secondary boot request, then returns with
+ * the secondary core entry address.
+ */
+paddr_t generic_boot_core_hpen(void)
+{
+#ifdef CFG_PSCI_ARM32
+ return ns_entry_addrs[get_core_pos()];
+#else
+ do {
+ wfe();
+ } while (!spin_table[get_core_pos()]);
+ dmb();
+ return ns_entry_addrs[get_core_pos()];
+#endif
+}
+#endif
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
new file mode 100644
index 0000000..27717d5
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+#include <kernel/asan.h>
+
+.section .data
+.balign 4
+
+#ifdef CFG_BOOT_SYNC_CPU
+.equ SEM_CPU_READY, 1
+#endif
+
+#ifdef CFG_PL310
+.section .rodata.init
+panic_boot_file:
+ .asciz __FILE__
+
+/*
+ * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
+ */
+.section .text.init
+LOCAL_FUNC __assert_flat_mapped_range , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push { r4-r6, lr }
+ mov r4, r0
+ mov r5, r1
+ bl cpu_mmu_enabled
+ cmp r0, #0
+ beq 1f
+ mov r0, r4
+ bl virt_to_phys
+ cmp r0, r4
+ beq 1f
+ /*
+ * this must be compliant with the panic generic routine:
+ * __do_panic(__FILE__, __LINE__, __func__, str)
+ */
+ ldr r0, =panic_boot_file
+ mov r1, r5
+ mov r2, #0
+ mov r3, #0
+ bl __do_panic
+ b . /* should NOT return */
+1: pop { r4-r6, pc }
+UNWIND( .fnend)
+END_FUNC __assert_flat_mapped_range
+
+ /* panic if mmu is enable and vaddr != paddr (scratch lr) */
+ .macro assert_flat_mapped_range va, line
+ ldr r0, =(\va)
+ ldr r1, =\line
+ bl __assert_flat_mapped_range
+ .endm
+#endif /* CFG_PL310 */
+
+.section .text.boot
+FUNC _start , :
+ b reset
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+END_FUNC _start
+
+ .macro cpu_is_ready
+#ifdef CFG_BOOT_SYNC_CPU
+ bl get_core_pos
+ lsl r0, r0, #2
+ ldr r1,=sem_cpu_sync
+ ldr r2, =SEM_CPU_READY
+ str r2, [r1, r0]
+ dsb
+ sev
+#endif
+ .endm
+
+ .macro wait_primary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 1b
+#endif
+ .endm
+
+ .macro wait_secondary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r3, #CFG_TEE_CORE_NB_CORE
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ subs r3, r3, #1
+ beq 3f
+ add r0, r0, #4
+2:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 2b
+ b 1b
+3:
+#endif
+ .endm
+
+ /*
+ * Save boot arguments
+ * entry r0, saved r4: pagestore
+ * entry r1, saved r7: (ARMv7 standard bootarg #1)
+ * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
+ * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
+ */
+ .macro bootargs_entry
+#if defined(CFG_NS_ENTRY_ADDR)
+ ldr r5, =CFG_NS_ENTRY_ADDR
+#else
+ mov r5, lr
+#endif
+#if defined(CFG_PAGEABLE_ADDR)
+ ldr r4, =CFG_PAGEABLE_ADDR
+#else
+ mov r4, r0
+#endif
+#if defined(CFG_DT_ADDR)
+ ldr r6, =CFG_DT_ADDR
+#else
+ mov r6, r2
+#endif
+ mov r7, r1
+ .endm
+
+LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ bootargs_entry
+
+ /* Enable alignment checks and disable data and instruction cache. */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ bic r0, r0, #SCTLR_C
+ bic r0, r0, #SCTLR_I
+ write_sctlr r0
+ isb
+
+ /* Early ARM secure MP specific configuration */
+ bl plat_cpu_reset_early
+
+ ldr r0, =_start
+ write_vbar r0
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ b reset_primary
+#else
+ bl get_core_pos
+ cmp r0, #0
+ beq reset_primary
+ b reset_secondary
+#endif
+UNWIND( .fnend)
+END_FUNC reset
+
+ /*
+ * Setup sp to point to the top of the tmp stack for the current CPU:
+ * sp is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp r0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add r0, r0, #1
+ ldr r2, =stack_tmp_stride
+ ldr r1, [r2]
+ mul r2, r0, r1
+ ldr r1, =stack_tmp
+ add r1, r1, r2
+ ldr r2, =stack_tmp_offset
+ ldr r2, [r2]
+ sub sp, r1, r2
+ .endm
+
+ /*
+ * Cache maintenance during entry: handle outer cache.
+ * End address is exclusive: first byte not to be changed.
+ * Note however arm_clX_inv/cleanbyva operate on full cache lines.
+ *
+ * Use ANSI #define to trap source file line number for PL310 assertion
+ */
+ .macro __inval_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_invbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_invbyva
+ .endm
+
+ .macro __flush_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleanbyva
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_cleaninvbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleaninvbyva
+ .endm
+
+#define inval_cache_vrange(vbase, vend) \
+ __inval_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#define flush_cache_vrange(vbase, vend) \
+ __flush_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#ifdef CFG_BOOT_SYNC_CPU
+#define flush_cpu_semaphores \
+ flush_cache_vrange(sem_cpu_sync, \
+ (sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
+#else
+#define flush_cpu_semaphores
+#endif
+
+LOCAL_FUNC reset_primary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ /* preserve r4-r7: bootargs */
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location and move hashes to a
+ * temporary safe location until the heap is initialized.
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before initializing pager
+ *
+ */
+ ldr r0, =__text_init_start /* dst */
+ ldr r1, =__data_end /* src */
+ ldr r2, =__tmp_hashes_end /* dst limit */
+ /* Copy backwards (as memmove) in case we're overlapping */
+ sub r2, r2, r0 /* len */
+ add r0, r0, r2
+ add r1, r1, r2
+ ldr r2, =__text_init_start
+copy_init:
+ ldmdb r1!, {r3, r8-r12, sp}
+ stmdb r0!, {r3, r8-r12, sp}
+ cmp r0, r2
+ bgt copy_init
+#endif
+
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ /* First initialize the entire shadow area with no access */
+ ldr r0, =__asan_shadow_start /* start */
+ ldr r1, =__asan_shadow_end /* limit */
+ mov r2, #ASAN_DATA_RED_ZONE
+shadow_no_access:
+ str r2, [r0], #4
+ cmp r0, r1
+ bls shadow_no_access
+
+ /* Mark the entire stack area as OK */
+ ldr r2, =CFG_ASAN_SHADOW_OFFSET
+ ldr r0, =__nozi_stack_start /* start */
+ lsr r0, r0, #ASAN_BLOCK_SHIFT
+ add r0, r0, r2
+ ldr r1, =__nozi_stack_end /* limit */
+ lsr r1, r1, #ASAN_BLOCK_SHIFT
+ add r1, r1, r2
+ mov r2, #0
+shadow_stack_access_ok:
+ strb r2, [r0], #1
+ cmp r0, r1
+ bls shadow_stack_access_ok
+#endif
+
+ set_sp
+
+ /* complete ARM secure MP common configuration */
+ bl plat_cpu_reset_late
+
+ /* Enable Console */
+ bl console_init
+
+#ifdef CFG_PL310
+ bl pl310_base
+ bl arm_cl2_config
+#endif
+
+ /*
+ * Invalidate dcache for all memory used during initialization to
+ * avoid nasty surprices when the cache is turned on. We must not
+ * invalidate memory not used by OP-TEE since we may invalidate
+ * entries used by for instance ARM Trusted Firmware.
+ */
+#ifdef CFG_WITH_PAGER
+ inval_cache_vrange(__text_start, __tmp_hashes_end)
+#else
+ inval_cache_vrange(__text_start, __end)
+#endif
+
+#ifdef CFG_PL310
+ /* Enable PL310 if not yet enabled */
+ bl pl310_base
+ bl arm_cl2_enable
+#endif
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4 /* pageable part address */
+ mov r1, r5 /* ns-entry address */
+ mov r2, r6 /* DT address */
+ bl generic_boot_init_primary
+ mov r4, r0 /* save entry test vector */
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+#ifdef CFG_WITH_PAGER
+ flush_cache_vrange(__text_start, __init_end)
+#else
+ flush_cache_vrange(__text_start, __end)
+#endif
+
+ /* release secondary boot cores and sync with them */
+ cpu_is_ready
+ flush_cpu_semaphores
+ wait_secondary
+
+#ifdef CFG_PL310_LOCKED
+ /* lock/invalidate all lines: pl310 behaves as if disable */
+ bl pl310_base
+ bl arm_cl2_lockallways
+ bl pl310_base
+ bl arm_cl2_cleaninvbyway
+#endif
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread() in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Pass the vector address returned from main_init */
+ mov r1, r4
+#else
+ /* realy standard bootarg #1 and #2 to non secure entry */
+ mov r4, #0
+ mov r3, r6 /* std bootarg #2 for register R2 */
+ mov r2, r7 /* std bootarg #1 for register R1 */
+ mov r1, #0
+#endif /* CFG_WITH_ARM_TRUSTED_FW */
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_primary
+
+
+LOCAL_FUNC unhandled_cpu , :
+UNWIND( .fnstart)
+ wfi
+ b unhandled_cpu
+UNWIND( .fnend)
+END_FUNC unhandled_cpu
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+FUNC cpu_on_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mov r4, r0
+ mov r5, r1
+ mov r6, lr
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ write_sctlr r0
+
+ ldr r0, =_start
+ write_vbar r0
+
+ mov r4, lr
+ set_sp
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4
+ mov r1, r5
+ bl generic_boot_cpu_on_handler
+
+ bx r6
+UNWIND( .fnend)
+END_FUNC cpu_on_handler
+
+#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
+
+LOCAL_FUNC reset_secondary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ wait_primary
+
+ set_sp
+
+ bl plat_cpu_reset_late
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* if L1 is not invalidated before, do it here */
+ bl arm_cl1_d_invbysetway
+#endif
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ cpu_is_ready
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* generic_boot_core_hpen return value (r0) is ns entry point */
+ bl generic_boot_core_hpen
+#else
+ mov r0, r5 /* ns-entry address */
+#endif
+ bl generic_boot_init_secondary
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ mov r4, #0
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_secondary
+#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
diff --git a/core/arch/arm/kernel/generic_entry_a64.S b/core/arch/arm/kernel/generic_entry_a64.S
new file mode 100644
index 0000000..5a5dd53
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a64.S
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+
+ /*
+ * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
+ * SP_EL0 is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ * SP_EL1 is assigned thread_core_local[cpu_id]
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp x0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add x0, x0, #1
+ adr x2, stack_tmp_stride
+ ldr w1, [x2]
+ mul x2, x0, x1
+ adrp x1, stack_tmp
+ add x1, x1, :lo12:stack_tmp
+ add x1, x1, x2
+ adr x2, stack_tmp_offset
+ ldr w2, [x2]
+ sub x1, x1, x2
+ msr spsel, #0
+ mov sp, x1
+ bl thread_get_core_local
+ msr spsel, #1
+ mov sp, x0
+ msr spsel, #0
+ .endm
+
+.section .text.boot
+FUNC _start , :
+ mov x19, x0 /* Save pagable part address */
+ mov x20, x2 /* Save DT address */
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before clearing bss
+ *
+ * When we copy init code and rodata into correct location we don't
+ * need to worry about hashes being overwritten as size of .bss,
+ * .heap, .nozi and .heap3 is much larger than the size of init
+ * code and rodata and hashes.
+ */
+ adr x0, __text_init_start /* dst */
+ adr x1, __data_end /* src */
+ adr x2, __rodata_init_end /* dst limit */
+copy_init:
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ cmp x0, x2
+ b.lt copy_init
+#endif
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Enable Console */
+ bl console_init
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19 /* pagable part address */
+ mov x1, #-1
+ mov x2, x20 /* DT address */
+ bl generic_boot_init_primary
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+ mov x19, x0
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl flush_dcache_range
+
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+ /* Pass the vector address returned from main_init */
+ mov x1, x19
+ mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC _start
+
+
+.section .text.cpu_on_handler
+FUNC cpu_on_handler , :
+ mov x19, x0
+ mov x20, x1
+ mov x21, x30
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19
+ mov x1, x20
+ mov x30, x21
+ b generic_boot_cpu_on_handler
+END_FUNC cpu_on_handler
+
+LOCAL_FUNC unhandled_cpu , :
+ wfi
+ b unhandled_cpu
+END_FUNC unhandled_cpu
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+ .align 11
+LOCAL_FUNC reset_vect_table , :
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+SynchronousExceptionSP0:
+ b SynchronousExceptionSP0
+ check_vector_size SynchronousExceptionSP0
+
+ .align 7
+IrqSP0:
+ b IrqSP0
+ check_vector_size IrqSP0
+
+ .align 7
+FiqSP0:
+ b FiqSP0
+ check_vector_size FiqSP0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA64:
+ b SynchronousExceptionA64
+ check_vector_size SynchronousExceptionA64
+
+ .align 7
+IrqA64:
+ b IrqA64
+ check_vector_size IrqA64
+
+ .align 7
+FiqA64:
+ b FiqA64
+ check_vector_size FiqA64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA32:
+ b SynchronousExceptionA32
+ check_vector_size SynchronousExceptionA32
+
+ .align 7
+IrqA32:
+ b IrqA32
+ check_vector_size IrqA32
+
+ .align 7
+FiqA32:
+ b FiqA32
+ check_vector_size FiqA32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC reset_vect_table
diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S
new file mode 100644
index 0000000..10dac6e
--- /dev/null
+++ b/core/arch/arm/kernel/kern.ld.S
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2008-2010 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <platform_config.h>
+
+OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
+OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
+
+ENTRY(_start)
+SECTIONS
+{
+ . = CFG_TEE_LOAD_ADDR;
+
+ /* text/read-only data */
+ .text : {
+ __text_start = .;
+ KEEP(*(.text.boot.vectab1))
+ KEEP(*(.text.boot.vectab2))
+ KEEP(*(.text.boot))
+
+ . = ALIGN(8);
+ __initcall_start = .;
+ KEEP(*(.initcall1))
+ KEEP(*(.initcall2))
+ KEEP(*(.initcall3))
+ KEEP(*(.initcall4))
+ __initcall_end = .;
+
+#ifdef CFG_WITH_PAGER
+ *(.text)
+/* Include list of sections needed for paging */
+#include <text_unpaged.ld.S>
+#else
+ *(.text .text.*)
+#endif
+ *(.sram.text.glue_7* .gnu.linkonce.t.*)
+ . = ALIGN(8);
+ __text_end = .;
+ }
+
+ .rodata : ALIGN(8) {
+ __rodata_start = .;
+ *(.gnu.linkonce.r.*)
+#ifdef CFG_WITH_PAGER
+ *(.rodata .rodata.__unpaged)
+#include <rodata_unpaged.ld.S>
+#else
+ *(.rodata .rodata.*)
+
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+#endif
+ . = ALIGN(8);
+ __rodata_end = .;
+ }
+
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x9090
+ .plt : { *(.plt) }
+
+ /* .ARM.exidx is sorted, so has to go in its own output section. */
+ .ARM.exidx : {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ }
+
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
+ .data : ALIGN(8) {
+ /* writable data */
+ __data_start_rom = .;
+ /* in one segment binaries, the rom data address is on top
+ of the ram data address */
+ __early_bss_start = .;
+ *(.early_bss .early_bss.*)
+ . = ALIGN(8);
+ __early_bss_end = .;
+ __data_start = .;
+ *(.data .data.* .gnu.linkonce.d.*)
+ . = ALIGN(8);
+ }
+
+ .ctors : ALIGN(8) {
+ __ctor_list = .;
+ KEEP(*(.ctors .ctors.* .init_array .init_array.*))
+ __ctor_end = .;
+ }
+ .dtors : ALIGN(8) {
+ __dtor_list = .;
+ KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
+ __dtor_end = .;
+ }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+
+ __data_end = .;
+ /* unintialized data */
+ .bss : ALIGN(8) {
+ __bss_start = .;
+ *(.bss .bss.*)
+ *(.gnu.linkonce.b.*)
+ *(COMMON)
+ . = ALIGN(8);
+ __bss_end = .;
+ }
+
+ .heap1 (NOLOAD) : {
+ /*
+ * We're keeping track of the padding added before the
+ * .nozi section so we can do something useful with
+ * this otherwise wasted memory.
+ */
+ __heap1_start = .;
+#ifndef CFG_WITH_PAGER
+ . += CFG_CORE_HEAP_SIZE;
+#endif
+ . = ALIGN(16 * 1024);
+ __heap1_end = .;
+ }
+
+ /*
+ * Uninitialized data that shouldn't be zero initialized at
+ * runtime.
+ *
+ * L1 mmu table requires 16 KiB alignment
+ */
+ .nozi (NOLOAD) : ALIGN(16 * 1024) {
+ __nozi_start = .;
+ KEEP(*(.nozi .nozi.*))
+ . = ALIGN(16);
+ __nozi_end = .;
+ __nozi_stack_start = .;
+ KEEP(*(.nozi_stack))
+ . = ALIGN(8);
+ __nozi_stack_end = .;
+ }
+
+#ifdef CFG_WITH_PAGER
+ .heap2 (NOLOAD) : {
+ __heap2_start = .;
+ /*
+ * Reserve additional memory for heap, the total should be
+ * at least CFG_CORE_HEAP_SIZE, but count what has already
+ * been reserved in .heap1
+ */
+ . += CFG_CORE_HEAP_SIZE - (__heap1_end - __heap1_start);
+ . = ALIGN(4 * 1024);
+ __heap2_end = .;
+ }
+
+ .text_init : ALIGN(4 * 1024) {
+ __text_init_start = .;
+/*
+ * Include list of sections needed for boot initialization, this list
+ * overlaps with unpaged.ld.S but since unpaged.ld.S is first all those
+ * sections will go into the unpaged area.
+ */
+#include <text_init.ld.S>
+ . = ALIGN(8);
+ __text_init_end = .;
+ }
+
+ .rodata_init : ALIGN(8) {
+ __rodata_init_start = .;
+#include <rodata_init.ld.S>
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+ . = ALIGN(8);
+ __rodata_init_end = .;
+ }
+ __init_start = __text_init_start;
+ __init_end = .;
+ __init_size = __init_end - __text_init_start;
+
+ .text_pageable : ALIGN(8) {
+ __text_pageable_start = .;
+ *(.text*)
+ . = ALIGN(8);
+ __text_pageable_end = .;
+ }
+
+ .rodata_pageable : ALIGN(8) {
+ __rodata_pageable_start = .;
+ *(.rodata*)
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(4 * 1024);
+ __rodata_pageable_end = .;
+ }
+
+ __pageable_part_start = __rodata_init_end;
+ __pageable_part_end = __rodata_pageable_end;
+ __pageable_start = __text_init_start;
+ __pageable_end = __pageable_part_end;
+
+ /*
+ * Assign a safe spot to store the hashes of the pages before
+ * heap is initialized.
+ */
+ __tmp_hashes_start = __rodata_init_end;
+ __tmp_hashes_size = ((__pageable_end - __pageable_start) /
+ (4 * 1024)) * 32;
+ __tmp_hashes_end = __tmp_hashes_start + __tmp_hashes_size;
+
+ __init_mem_usage = __tmp_hashes_end - CFG_TEE_LOAD_ADDR;
+
+ ASSERT(CFG_TEE_LOAD_ADDR >= CFG_TEE_RAM_START,
+ "Load address before start of physical memory")
+ ASSERT(CFG_TEE_LOAD_ADDR < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "Load address after end of physical memory")
+ ASSERT(__tmp_hashes_end < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "OP-TEE can't fit init part into available physical memory")
+ ASSERT((CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE - __init_end) >
+ 1 * 4096, "Too few free pages to initialize paging")
+
+
+#endif /*CFG_WITH_PAGER*/
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ . = CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8;
+ . = ALIGN(8);
+ .asan_shadow : {
+ __asan_shadow_start = .;
+ . += CFG_TEE_RAM_VA_SIZE / 9;
+ __asan_shadow_end = .;
+ }
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+ __end = .;
+
+#ifndef CFG_WITH_PAGER
+ __init_size = __data_end - CFG_TEE_LOAD_ADDR;
+ __init_mem_usage = __end - CFG_TEE_LOAD_ADDR;
+#endif
+ . = CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE;
+ _end_of_ram = .;
+
+ /DISCARD/ : {
+ /* Strip unnecessary stuff */
+ *(.comment .note .eh_frame)
+ /* Strip meta variables */
+ *(__keep_meta_vars*)
+ }
+
+}
diff --git a/core/arch/arm/kernel/link.mk b/core/arch/arm/kernel/link.mk
new file mode 100644
index 0000000..4a7bd8e
--- /dev/null
+++ b/core/arch/arm/kernel/link.mk
@@ -0,0 +1,241 @@
+link-out-dir = $(out-dir)/core
+
+link-script = $(platform-dir)/kern.ld.S
+link-script-pp = $(link-out-dir)/kern.ld
+link-script-dep = $(link-out-dir)/.kern.ld.d
+
+AWK = awk
+
+
+link-ldflags = $(LDFLAGS)
+link-ldflags += -T $(link-script-pp) -Map=$(link-out-dir)/tee.map
+link-ldflags += --sort-section=alignment
+link-ldflags += --fatal-warnings
+link-ldflags += --gc-sections
+
+link-ldadd = $(LDADD)
+link-ldadd += $(addprefix -L,$(libdirs))
+link-ldadd += $(addprefix -l,$(libnames))
+ldargs-tee.elf := $(link-ldflags) $(objs) $(link-out-dir)/version.o \
+ $(link-ldadd) $(libgcccore)
+
+link-script-cppflags := -DASM=1 \
+ $(filter-out $(CPPFLAGS_REMOVE) $(cppflags-remove), \
+ $(nostdinccore) $(CPPFLAGS) \
+ $(addprefix -I,$(incdirscore) $(link-out-dir)) \
+ $(cppflagscore))
+
+entries-unpaged += thread_init_vbar
+entries-unpaged += sm_init
+entries-unpaged += core_init_mmu_regs
+entries-unpaged += sem_cpu_sync
+entries-unpaged += generic_boot_get_handlers
+
+ldargs-all_objs := -i $(objs) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/all_objs.o
+$(link-out-dir)/all_objs.o: $(objs) $(libdeps) $(MAKEFILE_LIST)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-all_objs) -o $@
+
+cleanfiles += $(link-out-dir)/unpaged_entries.txt
+$(link-out-dir)/unpaged_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_pager/ { printf "-u%s ", $$3 }' > $@
+
+objs-unpaged-rem += core/arch/arm/tee/entry_std.o
+objs-unpaged-rem += core/arch/arm/tee/arch_svc.o
+objs-unpaged := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-unpaged-rem)), $(objs))
+ldargs-unpaged = -i --gc-sections $(addprefix -u, $(entries-unpaged))
+ldargs-unpaged-objs := $(objs-unpaged) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/unpaged.o
+$(link-out-dir)/unpaged.o: $(link-out-dir)/unpaged_entries.txt
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-unpaged) \
+ `cat $(link-out-dir)/unpaged_entries.txt` \
+ $(ldargs-unpaged-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_unpaged.ld.S
+$(link-out-dir)/text_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_unpaged.ld.S
+$(link-out-dir)/rodata_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+
+cleanfiles += $(link-out-dir)/init_entries.txt
+$(link-out-dir)/init_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_init/ { printf "-u%s", $$3 }' > $@
+
+objs-init-rem += core/arch/arm/tee/arch_svc.o
+objs-init-rem += core/arch/arm/tee/arch_svc_asm.o
+objs-init-rem += core/arch/arm/tee/init.o
+objs-init-rem += core/arch/arm/tee/entry_std.o
+entries-init += _start
+objs-init := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-init-rem)), $(objs) \
+ $(link-out-dir)/version.o)
+ldargs-init := -i --gc-sections $(addprefix -u, $(entries-init))
+
+ldargs-init-objs := $(objs-init) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/init.o
+$(link-out-dir)/init.o: $(link-out-dir)/init_entries.txt
+ $(call gen-version-o)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-init) \
+ `cat $(link-out-dir)/init_entries.txt` \
+ $(ldargs-init-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_init.ld.S
+$(link-out-dir)/text_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_init.ld.S
+$(link-out-dir)/rodata_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+-include $(link-script-dep)
+
+link-script-extra-deps += $(link-out-dir)/text_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/text_init.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_init.ld.S
+link-script-extra-deps += $(conf-file)
+cleanfiles += $(link-script-pp) $(link-script-dep)
+$(link-script-pp): $(link-script) $(link-script-extra-deps)
+ @$(cmd-echo-silent) ' CPP $@'
+ @mkdir -p $(dir $@)
+ $(q)$(CPPcore) -Wp,-P,-MT,$@,-MD,$(link-script-dep) \
+ $(link-script-cppflags) $< > $@
+
+define update-buildcount
+ @$(cmd-echo-silent) ' UPD $(1)'
+ $(q)if [ ! -f $(1) ]; then \
+ mkdir -p $(dir $(1)); \
+ echo 1 >$(1); \
+ else \
+ expr 0`cat $(1)` + 1 >$(1); \
+ fi
+endef
+
+version-o-cflags = $(filter-out -g3,$(core-platform-cflags) \
+ $(platform-cflags)) # Workaround objdump warning
+DATE_STR = `date -u`
+BUILD_COUNT_STR = `cat $(link-out-dir)/.buildcount`
+define gen-version-o
+ $(call update-buildcount,$(link-out-dir)/.buildcount)
+ @$(cmd-echo-silent) ' GEN $(link-out-dir)/version.o'
+ $(q)echo -e "const char core_v_str[] =" \
+ "\"$(TEE_IMPL_VERSION) \"" \
+ "\"#$(BUILD_COUNT_STR) \"" \
+ "\"$(DATE_STR) \"" \
+ "\"$(CFG_KERN_LINKER_ARCH)\";\n" \
+ | $(CCcore) $(version-o-cflags) \
+ -xc - -c -o $(link-out-dir)/version.o
+endef
+$(link-out-dir)/version.o:
+ $(call gen-version-o)
+
+all: $(link-out-dir)/tee.elf
+cleanfiles += $(link-out-dir)/tee.elf $(link-out-dir)/tee.map
+cleanfiles += $(link-out-dir)/version.o
+cleanfiles += $(link-out-dir)/.buildcount
+$(link-out-dir)/tee.elf: $(objs) $(libdeps) $(link-script-pp)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-tee.elf) -o $@
+
+all: $(link-out-dir)/tee.dmp
+cleanfiles += $(link-out-dir)/tee.dmp
+$(link-out-dir)/tee.dmp: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJDUMP $@'
+ $(q)$(OBJDUMPcore) -l -x -d $< > $@
+
+pageable_sections := .*_pageable
+init_sections := .*_init
+cleanfiles += $(link-out-dir)/tee-pager.bin
+$(link-out-dir)/tee-pager.bin: $(link-out-dir)/tee.elf \
+ $(link-out-dir)/tee-data_end.txt
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --remove-section="$(pageable_sections)" \
+ --remove-section="$(init_sections)" \
+ --pad-to `cat $(link-out-dir)/tee-data_end.txt` \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-pageable.bin
+$(link-out-dir)/tee-pageable.bin: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --only-section="$(init_sections)" \
+ --only-section="$(pageable_sections)" \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-data_end.txt
+$(link-out-dir)/tee-data_end.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __data_end | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_size.txt
+$(link-out-dir)/tee-init_size.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __init_size | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_load_addr.txt
+$(link-out-dir)/tee-init_load_addr.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' _start' | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_mem_usage.txt
+$(link-out-dir)/tee-init_mem_usage.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' __init_mem_usage' | sed 's/ .*$$//' >> $@
+
+all: $(link-out-dir)/tee.bin
+cleanfiles += $(link-out-dir)/tee.bin
+$(link-out-dir)/tee.bin: $(link-out-dir)/tee-pager.bin \
+ $(link-out-dir)/tee-pageable.bin \
+ $(link-out-dir)/tee-init_size.txt \
+ $(link-out-dir)/tee-init_load_addr.txt \
+ $(link-out-dir)/tee-init_mem_usage.txt \
+ ./scripts/gen_hashed_bin.py
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)load_addr=`cat $(link-out-dir)/tee-init_load_addr.txt` && \
+ ./scripts/gen_hashed_bin.py \
+ --arch $(if $(filter y,$(CFG_ARM64_core)),arm64,arm32) \
+ --init_size `cat $(link-out-dir)/tee-init_size.txt` \
+ --init_load_addr_hi $$(($$load_addr >> 32 & 0xffffffff)) \
+ --init_load_addr_lo $$(($$load_addr & 0xffffffff)) \
+ --init_mem_usage `cat $(link-out-dir)/tee-init_mem_usage.txt` \
+ --tee_pager_bin $(link-out-dir)/tee-pager.bin \
+ --tee_pageable_bin $(link-out-dir)/tee-pageable.bin \
+ --out $@
+
+
+all: $(link-out-dir)/tee.symb_sizes
+cleanfiles += $(link-out-dir)/tee.symb_sizes
+$(link-out-dir)/tee.symb_sizes: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) --print-size --reverse-sort --size-sort $< > $@
+
+cleanfiles += $(link-out-dir)/tee.mem_usage
+ifneq ($(filter mem_usage,$(MAKECMDGOALS)),)
+mem_usage: $(link-out-dir)/tee.mem_usage
+
+$(link-out-dir)/tee.mem_usage: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/mem_usage.awk > $@
+endif
diff --git a/core/arch/arm/kernel/misc_a32.S b/core/arch/arm/kernel/misc_a32.S
new file mode 100644
index 0000000..48fd8ba
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a32.S
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ add r0, r1, r0, LSR #6
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
+
+/*
+ * uint32_t temp_set_mode(int cpu_mode)
+ * returns cpsr to be set
+ */
+LOCAL_FUNC temp_set_mode , :
+UNWIND( .fnstart)
+ mov r1, r0
+ cmp r1, #CPSR_MODE_USR /* update mode: usr -> sys */
+ moveq r1, #CPSR_MODE_SYS
+ cpsid aif /* disable interrupts */
+ mrs r0, cpsr /* get cpsr with disabled its*/
+ bic r0, #CPSR_MODE_MASK /* clear mode */
+ orr r0, r1 /* set expected mode */
+ bx lr
+UNWIND( .fnend)
+END_FUNC temp_set_mode
+
+/* uint32_t read_mode_sp(int cpu_mode) */
+FUNC read_mode_sp , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, sp /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_sp
+
+/* uint32_t read_mode_lr(int cpu_mode) */
+FUNC read_mode_lr , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, lr /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_lr
diff --git a/core/arch/arm/kernel/misc_a64.S b/core/arch/arm/kernel/misc_a64.S
new file mode 100644
index 0000000..2b4da4a
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a64.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+ mrs x0, mpidr_el1
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+END_FUNC get_core_pos
diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c
new file mode 100644
index 0000000..0e1b836
--- /dev/null
+++ b/core/arch/arm/kernel/mutex.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+void mutex_init(struct mutex *m)
+{
+ *m = (struct mutex)MUTEX_INITIALIZER;
+}
+
+static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
+{
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ while (true) {
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+ struct wait_queue_elem wqe;
+
+ /*
+ * If the mutex is locked we need to initialize the wqe
+ * before releasing the spinlock to guarantee that we don't
+ * miss the wakeup from mutex_unlock().
+ *
+ * If the mutex is unlocked we don't need to use the wqe at
+ * all.
+ */
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ wq_wait_init(&m->wq, &wqe);
+ } else {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ /*
+ * Someone else is holding the lock, wait in normal
+ * world for the lock to become available.
+ */
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+ } else
+ return;
+ }
+}
+
+static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ wq_wake_one(&m->wq, m, fname, lineno);
+}
+
+static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
+ int lineno __unused)
+{
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_UNLOCKED) {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return old_value == MUTEX_VALUE_UNLOCKED;
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_unlock(m, fname, lineno);
+}
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_lock(m, fname, lineno);
+}
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ return __mutex_trylock(m, fname, lineno);
+}
+#else
+void mutex_unlock(struct mutex *m)
+{
+ __mutex_unlock(m, NULL, -1);
+}
+
+void mutex_lock(struct mutex *m)
+{
+ __mutex_lock(m, NULL, -1);
+}
+
+bool mutex_trylock(struct mutex *m)
+{
+ return __mutex_trylock(m, NULL, -1);
+}
+#endif
+
+
+
+void mutex_destroy(struct mutex *m)
+{
+ /*
+ * Caller guarantees that no one will try to take the mutex so
+ * there's no need to take the spinlock before accessing it.
+ */
+ if (m->value != MUTEX_VALUE_UNLOCKED)
+ panic();
+ if (!wq_is_empty(&m->wq))
+ panic("waitqueue not empty");
+}
+
+void condvar_init(struct condvar *cv)
+{
+ *cv = (struct condvar)CONDVAR_INITIALIZER;
+}
+
+void condvar_destroy(struct condvar *cv)
+{
+ if (cv->m && wq_have_condvar(&cv->m->wq, cv))
+ panic();
+
+ condvar_init(cv);
+}
+
+static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
+ int lineno)
+{
+ uint32_t old_itr_status;
+ struct mutex *m;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&cv->spin_lock);
+ m = cv->m;
+ cpu_spin_unlock(&cv->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (m)
+ wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
+
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, true /* only one */, fname, lineno);
+}
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, false /* all */, fname, lineno);
+}
+
+#else
+void condvar_signal(struct condvar *cv)
+{
+ cv_signal(cv, true /* only one */, NULL, -1);
+}
+
+void condvar_broadcast(struct condvar *cv)
+{
+ cv_signal(cv, false /* all */, NULL, -1);
+}
+#endif /*CFG_MUTEX_DEBUG*/
+
+static void __condvar_wait(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem wqe;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ /* Link this condvar to this mutex until reinitialized */
+ cpu_spin_lock(&cv->spin_lock);
+ if (cv->m && cv->m != m)
+ panic("invalid mutex");
+
+ cv->m = m;
+ cpu_spin_unlock(&cv->spin_lock);
+
+ cpu_spin_lock(&m->spin_lock);
+
+ /* Add to mutex wait queue as a condvar waiter */
+ wq_wait_init_condvar(&m->wq, &wqe, cv);
+
+ /* Unlock the mutex */
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+
+ thread_unmask_exceptions(old_itr_status);
+
+ /* Wake eventual waiters */
+ wq_wake_one(&m->wq, m, fname, lineno);
+
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+
+ mutex_lock(m);
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ __condvar_wait(cv, m, fname, lineno);
+}
+#else
+void condvar_wait(struct condvar *cv, struct mutex *m)
+{
+ __condvar_wait(cv, m, NULL, -1);
+}
+#endif
diff --git a/core/arch/arm/kernel/pm_stubs.c b/core/arch/arm/kernel/pm_stubs.c
new file mode 100644
index 0000000..db77e7c
--- /dev/null
+++ b/core/arch/arm/kernel/pm_stubs.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <compiler.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+
+unsigned long pm_panic(unsigned long a0 __unused, unsigned long a1 __unused)
+{
+ panic();
+}
+
+unsigned long pm_do_nothing(unsigned long a0 __unused,
+ unsigned long a1 __unused)
+{
+ return 0;
+}
diff --git a/core/arch/arm/kernel/proc_a32.S b/core/arch/arm/kernel/proc_a32.S
new file mode 100644
index 0000000..f0446a6
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a32.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <keep.h>
+#include <kernel/unwind.h>
+
+/*
+ * void cpu_mmu_enable(void) - enable MMU
+ *
+ * TLBs are invalidated before MMU is enabled.
+ * An DSB and ISB insures MMUs is enabled before routine returns
+ */
+FUNC cpu_mmu_enable , :
+UNWIND( .fnstart)
+ /* Invalidate TLB */
+ write_tlbiall
+
+ /* Enable the MMU */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_M
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable
+KEEP_PAGER cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) - enable instruction cache */
+FUNC cpu_mmu_enable_icache , :
+UNWIND( .fnstart)
+ /* Invalidate instruction cache and branch predictor */
+ write_iciallu
+ write_bpiall
+
+ /* Enable the instruction cache */
+ read_sctlr r1
+ orr r1, r1, #SCTLR_I
+ write_sctlr r1
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_icache
+KEEP_PAGER cpu_mmu_enable_icache
+
+/* void cpu_mmu_enable_dcache(void) - enable data cache */
+FUNC cpu_mmu_enable_dcache , :
+UNWIND( .fnstart)
+ read_sctlr r0
+ orr r0, r0, #SCTLR_C
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_dcache
+KEEP_PAGER cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/proc_a64.S b/core/arch/arm/kernel/proc_a64.S
new file mode 100644
index 0000000..5db895a
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a64.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm64.h>
+#include <asm.S>
+
+/* void cpu_mmu_enable(void) */
+FUNC cpu_mmu_enable , :
+ /* Invalidate TLB */
+ tlbi vmalle1
+
+ /*
+ * Make sure translation table writes have drained into memory and
+ * the TLB invalidation is complete.
+ */
+ dsb sy
+ isb
+
+ /* Enable the MMU */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_M
+ msr sctlr_el1, x0
+ isb
+
+ ret
+END_FUNC cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) */
+FUNC cpu_mmu_enable_icache , :
+ /* Invalidate instruction cache and branch predictor */
+ ic iallu
+ isb
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_icache
+
+
+/* void cpu_mmu_enable_dcache(void) */
+FUNC cpu_mmu_enable_dcache , :
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_C
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/pseudo_ta.c b/core/arch/arm/kernel/pseudo_ta.c
new file mode 100644
index 0000000..6352a28
--- /dev/null
+++ b/core/arch/arm/kernel/pseudo_ta.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <initcall.h>
+#include <kernel/panic.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/tee_ta_manager.h>
+#include <mm/core_memprot.h>
+#include <mm/mobj.h>
+#include <sm/tee_mon.h>
+#include <stdlib.h>
+#include <string.h>
+#include <trace.h>
+#include <types_ext.h>
+
+/* Maps static TA params */
+static TEE_Result copy_in_param(struct tee_ta_param *param,
+ TEE_Param tee_param[TEE_NUM_PARAMS])
+{
+ size_t n;
+ void *va;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_INPUT:
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ tee_param[n].value.a = param->u[n].val.a;
+ tee_param[n].value.b = param->u[n].val.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ va = mobj_get_va(param->u[n].mem.mobj,
+ param->u[n].mem.offs);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ tee_param[n].memref.buffer = va;
+ tee_param[n].memref.size = param->u[n].mem.size;
+ break;
+ default:
+ memset(tee_param + n, 0, sizeof(TEE_Param));
+ break;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+static void update_out_param(TEE_Param tee_param[TEE_NUM_PARAMS],
+ struct tee_ta_param *param)
+{
+ size_t n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ param->u[n].val.a = tee_param[n].value.a;
+ param->u[n].val.b = tee_param[n].value.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ param->u[n].mem.size = tee_param[n].memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static TEE_Result pseudo_ta_enter_open_session(struct tee_ta_session *s,
+ struct tee_ta_param *param, TEE_ErrorOrigin *eo)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->create_entry_point) {
+ res = stc->pseudo_ta->create_entry_point();
+ if (res != TEE_SUCCESS)
+ goto out;
+ }
+
+ if (stc->pseudo_ta->open_session_entry_point) {
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ res = stc->pseudo_ta->open_session_entry_point(param->types,
+ tee_param,
+ &s->user_ctx);
+ update_out_param(tee_param, param);
+ }
+
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static TEE_Result pseudo_ta_enter_invoke_cmd(struct tee_ta_session *s,
+ uint32_t cmd, struct tee_ta_param *param,
+ TEE_ErrorOrigin *eo)
+{
+ TEE_Result res;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+ res = stc->pseudo_ta->invoke_command_entry_point(s->user_ctx, cmd,
+ param->types,
+ tee_param);
+ update_out_param(tee_param, param);
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static void pseudo_ta_enter_close_session(struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+
+ tee_ta_push_current_session(s);
+
+ if (stc->pseudo_ta->close_session_entry_point)
+ stc->pseudo_ta->close_session_entry_point(s->user_ctx);
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->destroy_entry_point)
+ stc->pseudo_ta->destroy_entry_point();
+
+ tee_ta_pop_current_session();
+}
+
+static void pseudo_ta_destroy(struct tee_ta_ctx *ctx)
+{
+ free(to_pseudo_ta_ctx(ctx));
+}
+
+static const struct tee_ta_ops pseudo_ta_ops = {
+ .enter_open_session = pseudo_ta_enter_open_session,
+ .enter_invoke_cmd = pseudo_ta_enter_invoke_cmd,
+ .enter_close_session = pseudo_ta_enter_close_session,
+ .destroy = pseudo_ta_destroy,
+};
+
+
+/* Defined in link script */
+extern const struct pseudo_ta_head __start_ta_head_section;
+extern const struct pseudo_ta_head __stop_ta_head_section;
+
+/* Insures declared pseudo TAs conforms with core expectations */
+static TEE_Result verify_pseudo_tas_conformance(void)
+{
+ const struct pseudo_ta_head *start = &__start_ta_head_section;
+ const struct pseudo_ta_head *end = &__stop_ta_head_section;
+ const struct pseudo_ta_head *pta;
+
+ for (pta = start; pta < end; pta++) {
+ const struct pseudo_ta_head *pta2;
+
+ /* PTAs must all have a specific UUID */
+ for (pta2 = pta + 1; pta2 < end; pta2++)
+ if (!memcmp(&pta->uuid, &pta2->uuid, sizeof(TEE_UUID)))
+ goto err;
+
+ if (!pta->name ||
+ (pta->flags & PTA_MANDATORY_FLAGS) != PTA_MANDATORY_FLAGS ||
+ pta->flags & ~PTA_ALLOWED_FLAGS ||
+ !pta->invoke_command_entry_point)
+ goto err;
+ }
+ return TEE_SUCCESS;
+err:
+ DMSG("pseudo TA error at %p", (void *)pta);
+ panic("pta");
+}
+
+service_init(verify_pseudo_tas_conformance);
+
+/*-----------------------------------------------------------------------------
+ * Initialises a session based on the UUID or ptr to the ta
+ * Returns ptr to the session (ta_session) and a TEE_Result
+ *---------------------------------------------------------------------------*/
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = NULL;
+ struct tee_ta_ctx *ctx;
+ const struct pseudo_ta_head *ta;
+
+ DMSG(" Lookup for Static TA %pUl", (void *)uuid);
+
+ ta = &__start_ta_head_section;
+ while (true) {
+ if (ta >= &__stop_ta_head_section)
+ return TEE_ERROR_ITEM_NOT_FOUND;
+ if (memcmp(&ta->uuid, uuid, sizeof(TEE_UUID)) == 0)
+ break;
+ ta++;
+ }
+
+ /* Load a new TA and create a session */
+ DMSG(" Open %s", ta->name);
+ stc = calloc(1, sizeof(struct pseudo_ta_ctx));
+ if (!stc)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ ctx = &stc->ctx;
+
+ ctx->ref_count = 1;
+ s->ctx = ctx;
+ ctx->flags = ta->flags;
+ stc->pseudo_ta = ta;
+ ctx->uuid = ta->uuid;
+ ctx->ops = &pseudo_ta_ops;
+ TAILQ_INSERT_TAIL(&tee_ctxes, ctx, link);
+
+ DMSG(" %s : %pUl", stc->pseudo_ta->name, (void *)&ctx->uuid);
+
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm/kernel/spin_lock_a32.S b/core/arch/arm/kernel/spin_lock_a32.S
new file mode 100644
index 0000000..52d8e9f
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a32.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+#include <kernel/unwind.h>
+
+/* void __cpu_spin_lock(unsigned int *lock) */
+FUNC __cpu_spin_lock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+1:
+ ldrex r1, [r0]
+ cmp r1, #SPINLOCK_UNLOCK
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_lock
+
+/* int __cpu_spin_trylock(unsigned int *lock) - return 0 on success */
+FUNC __cpu_spin_trylock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+ mov r1, r0
+1:
+ ldrex r0, [r1]
+ cmp r0, #0
+ bne 1f
+ strex r0, r2, [r1]
+ cmp r0, #0
+ bne 1b
+ dmb
+ bx lr
+1:
+ clrex
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock) */
+FUNC __cpu_spin_unlock , :
+UNWIND( .fnstart)
+ dmb
+ mov r1, #SPINLOCK_UNLOCK
+ str r1, [r0]
+ dsb
+ sev
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_a64.S b/core/arch/arm/kernel/spin_lock_a64.S
new file mode 100644
index 0000000..97fce42
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a64.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+
+/* void __cpu_spin_lock(unsigned int *lock); */
+FUNC __cpu_spin_lock , :
+ mov w2, #SPINLOCK_LOCK
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+END_FUNC __cpu_spin_lock
+
+/* unsigned int __cpu_spin_trylock(unsigned int *lock); */
+FUNC __cpu_spin_trylock , :
+ mov x1, x0
+ mov w2, #SPINLOCK_LOCK
+.loop: ldaxr w0, [x1]
+ cbnz w0, .cpu_spin_trylock_out
+ stxr w0, w2, [x1]
+ cbnz w0, .loop
+.cpu_spin_trylock_out:
+ ret
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock); */
+FUNC __cpu_spin_unlock , :
+ stlr wzr, [x0]
+ ret
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_debug.c b/core/arch/arm/kernel/spin_lock_debug.c
new file mode 100644
index 0000000..2a450a5
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_debug.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/spinlock.h>
+#include "thread_private.h"
+
+void spinlock_count_incr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ l->locked_count++;
+ assert(l->locked_count);
+}
+
+void spinlock_count_decr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->locked_count);
+ l->locked_count--;
+}
+
+bool have_spinlock(void)
+{
+ struct thread_core_local *l;
+
+ if (!thread_irq_disabled()) {
+ /*
+ * Normally we can't be holding a spinlock since doing so would
+ * imply IRQ are disabled (or the spinlock logic is flawed).
+ */
+ return false;
+ }
+
+ l = thread_get_core_local();
+
+ return !!l->locked_count;
+}
diff --git a/core/arch/arm/kernel/ssvce_a32.S b/core/arch/arm/kernel/ssvce_a32.S
new file mode 100644
index 0000000..e2850f1
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a32.S
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+/*
+ * Variable(s)
+ */
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+
+ .section .text.ssvce
+
+
+
+/*
+ * - MMU maintenaince support ---------------------------------------------
+ */
+
+
+/*
+ * void secure_mmu_unifiedtlbinvall(void);
+ */
+FUNC secure_mmu_unifiedtlbinvall , :
+UNWIND( .fnstart)
+
+ write_tlbiallis
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/*
+ * void secure_mmu_unifiedtlbinvbymva(mva);
+ *
+ * Combine VA and current ASID, and invalidate matching TLB
+ */
+FUNC secure_mmu_unifiedtlbinvbymva , :
+UNWIND( .fnstart)
+
+ b . @ Wrong code to force fix/check the routine before using it
+
+ MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
+ ANDS R1, R1, #0xFF /* Get current ASID */
+ ORR R1, R1, R0 /* Combine MVA and ASID */
+
+ MCR p15, 0, R1, c8, c7, 1 /* Invalidate Unified TLB entry by MVA */
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvbymva
+
+/*
+ * void secure_mmu_unifiedtlbinv_curasid(void)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+UNWIND( .fnstart)
+ read_contextidr r0
+ and r0, r0, #0xff /* Get current ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/*
+ * void secure_mmu_unifiedtlbinv_byasid(unsigned int asid)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+UNWIND( .fnstart)
+ and r0, r0, #0xff /* Get ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * void arm_cl1_d_cleanbysetway(void)
+ */
+FUNC arm_cl1_d_cleanbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cl_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cl_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c10, 2 @ ; DCCSW Clean data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cl_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cl_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbysetway
+
+FUNC arm_cl1_d_invbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_dcache_off:
+ MOV R0, #0 @ ; set way number to 0
+_inv_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_inv_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c6, 2 @ ; DCISW Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _inv_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _inv_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbysetway
+
+FUNC arm_cl1_d_cleaninvbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cli_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cli_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c14, 2 @ ; DCCISW Clean and Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cli_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cli_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/*
+ * void arm_cl1_d_cleanbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleanbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cl_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cl_area_nextLine:
+ MCR p15, 0, R0, c7, c10, 1 @ ; Clean data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cl_area_nextLine
+
+_cl_area_exit:
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbyva
+
+/*
+ * void arm_cl1_d_invbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_invbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _inv_area_dcache_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_area_dcache_off:
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_inv_area_dcache_nl:
+ MCR p15, 0, R0, c7, c6, 1 @ ; Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _inv_area_dcache_nl
+
+_inv_area_dcache_exit:
+ DSB
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbyva
+
+/*
+ * void arm_cl1_d_cleaninvbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleaninvbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cli_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cli_area_nextLine:
+ MCR p15, 0, R0, c7, c14, 1 @ ; Clean and Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cli_area_nextLine
+
+_cli_area_exit:
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/*
+ * void arm_cl1_i_inv_all( void );
+ *
+ * Invalidates the whole instruction cache.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv_all , :
+UNWIND( .fnstart)
+
+ /* Invalidate Entire Instruction Cache */
+ write_icialluis
+ DSB
+
+ /* Flush entire branch target cache */
+ write_bpiallis
+
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv_all
+
+/*
+ * void arm_cl1_i_inv(void *start, void *end);
+ *
+ * Invalidates instruction cache area whose limits are given in parameters.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 /* Check that end >= start. Otherwise return. */
+ BHI _inv_icache_exit
+
+ BIC R0, R0, #0x1F /* Mask 5 LSBits */
+_inv_icache_nextLine:
+ MCR p15, 0, R0, c7, c5, 1 /* Invalidate ICache single entry (MVA) */
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET /* Next cache line */
+ CMP R1, R0
+ BPL _inv_icache_nextLine
+ DSB
+
+ /* Flush entire branch target cache */
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+_inv_icache_exit:
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/ssvce_a64.S b/core/arch/arm/kernel/ssvce_a64.S
new file mode 100644
index 0000000..6c9bbac
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a64.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tz_ssvce.h>
+#include <arm64.h>
+#include <asm.S>
+
+/* void secure_mmu_unifiedtlbinvall(void); */
+FUNC secure_mmu_unifiedtlbinvall , :
+ tlbi vmalle1
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/* void secure_mmu_unifiedtlbinv_curasid(void) */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+ mrs x0, ttbr0_el1
+ lsr x0, x0, #TTBR_ASID_SHIFT
+ b secure_mmu_unifiedtlbinv_byasid
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+ and x0, x0, #TTBR_ASID_MASK
+ tlbi aside1, x0
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * Compatibility wrappers to be used while the rest of the code stops caring
+ * about which cache level it operates on. CL1 -> Inner cache.
+ */
+
+/* void arm_cl1_d_cleanbysetway(void); */
+FUNC arm_cl1_d_cleanbysetway , :
+ mov x0, #DCCSW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleanbysetway
+
+/* void arm_cl1_d_invbysetway(void); */
+FUNC arm_cl1_d_invbysetway , :
+ mov x0, #DCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_invbysetway
+
+/* void arm_cl1_d_cleaninvbysetway(void); */
+FUNC arm_cl1_d_cleaninvbysetway , :
+ mov x0, #DCCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/* void arm_cl1_d_cleanbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleanbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ /*
+ * flush_dcache_range() does Clean+Invalidate, but that shouldn't
+ * matter to the caller.
+ */
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleanbyva
+
+/* void arm_cl1_d_invbyva(void *s, void *e); */
+FUNC arm_cl1_d_invbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b inv_dcache_range
+END_FUNC arm_cl1_d_invbyva
+
+/* void arm_cl1_d_cleaninvbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleaninvbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/* void arm_cl1_i_inv_all( void ); */
+FUNC arm_cl1_i_inv_all , :
+ ic ialluis
+ isb
+ ret
+END_FUNC arm_cl1_i_inv_all
+
+/* void arm_cl1_i_inv(void *start, void *end); */
+FUNC arm_cl1_i_inv , :
+ /*
+ * Invalidate the entire icache instead, it shouldn't matter to the
+ * caller.
+ */
+ b arm_cl1_i_inv_all
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk
new file mode 100644
index 0000000..cee3aee
--- /dev/null
+++ b/core/arch/arm/kernel/sub.mk
@@ -0,0 +1,45 @@
+srcs-$(CFG_WITH_USER_TA) += user_ta.c
+srcs-y += pseudo_ta.c
+srcs-y += elf_load.c
+srcs-y += tee_time.c
+
+srcs-$(CFG_SECURE_TIME_SOURCE_CNTPCT) += tee_time_arm_cntpct.c
+srcs-$(CFG_SECURE_TIME_SOURCE_REE) += tee_time_ree.c
+
+srcs-$(CFG_ARM32_core) += proc_a32.S
+srcs-$(CFG_ARM32_core) += spin_lock_a32.S
+srcs-$(CFG_ARM64_core) += proc_a64.S
+srcs-$(CFG_ARM64_core) += spin_lock_a64.S
+srcs-$(CFG_TEE_CORE_DEBUG) += spin_lock_debug.c
+srcs-$(CFG_ARM32_core) += ssvce_a32.S
+srcs-$(CFG_ARM64_core) += ssvce_a64.S
+srcs-$(CFG_ARM64_core) += cache_helpers_a64.S
+srcs-$(CFG_PL310) += tz_ssvce_pl310_a32.S
+srcs-$(CFG_PL310) += tee_l2cc_mutex.c
+
+srcs-$(CFG_ARM32_core) += thread_a32.S
+srcs-$(CFG_ARM64_core) += thread_a64.S
+srcs-y += thread.c
+srcs-y += abort.c
+srcs-$(CFG_WITH_VFP) += vfp.c
+ifeq ($(CFG_WITH_VFP),y)
+srcs-$(CFG_ARM32_core) += vfp_a32.S
+srcs-$(CFG_ARM64_core) += vfp_a64.S
+endif
+srcs-y += trace_ext.c
+srcs-$(CFG_ARM32_core) += misc_a32.S
+srcs-$(CFG_ARM64_core) += misc_a64.S
+srcs-y += mutex.c
+srcs-y += wait_queue.c
+srcs-$(CFG_PM_STUBS) += pm_stubs.c
+
+srcs-$(CFG_GENERIC_BOOT) += generic_boot.c
+ifeq ($(CFG_GENERIC_BOOT),y)
+srcs-$(CFG_ARM32_core) += generic_entry_a32.S
+srcs-$(CFG_ARM64_core) += generic_entry_a64.S
+endif
+
+ifeq ($(CFG_CORE_UNWIND),y)
+srcs-$(CFG_ARM32_core) += unwind_arm32.c
+srcs-$(CFG_ARM64_core) += unwind_arm64.c
+endif
diff --git a/core/arch/arm/kernel/tee_l2cc_mutex.c b/core/arch/arm/kernel/tee_l2cc_mutex.c
new file mode 100644
index 0000000..2afda4d
--- /dev/null
+++ b/core/arch/arm/kernel/tee_l2cc_mutex.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tee_common.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/spinlock.h>
+#include <mm/tee_mm.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <tee_api_defines.h>
+#include <trace.h>
+
+/*
+ * l2cc_mutex_va holds teecore virtual address of TZ L2CC mutex or NULL.
+ *
+ * l2cc_mutex_pa holds TZ L2CC mutex physical address. It is relevant only
+ * if 'l2cc_mutex_va' hold a non-NULL address.
+ */
+#define MUTEX_SZ sizeof(uint32_t)
+
+static uint32_t *l2cc_mutex_va;
+static uint32_t l2cc_mutex_pa;
+static uint32_t l2cc_mutex_boot_pa;
+static unsigned int *l2cc_mutex;
+
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa)
+{
+ l2cc_mutex_boot_pa = pa;
+}
+
+/*
+ * Allocate public RAM to get a L2CC mutex to shared with NSec.
+ * Return 0 on success.
+ */
+static int l2cc_mutex_alloc(void)
+{
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return -1;
+
+ l2cc_mutex_pa = l2cc_mutex_boot_pa;
+
+ va = phys_to_virt(l2cc_mutex_pa, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return -1;
+
+ *(uint32_t *)va = 0;
+ l2cc_mutex_va = va;
+ return 0;
+}
+
+static void l2cc_mutex_set(void *mutex)
+{
+ l2cc_mutex = (unsigned int *)mutex;
+}
+
+/*
+ * tee_xxx_l2cc_mutex(): Handle L2 mutex configuration requests from NSec
+ *
+ * Policy:
+ * - if NSec did not register a L2 mutex, default allocate it in public RAM.
+ * - if NSec disables L2 mutex, disable the current mutex and unregister it.
+ *
+ * Enable L2CC: NSec allows teecore to run safe outer maintance
+ * with shared mutex.
+ * Disable L2CC: NSec will run outer maintenance with locking
+ * shared mutex. teecore cannot run outer maintenance.
+ * Set L2CC: NSec proposes a Shared Memory locaiotn for the outer
+ * maintenance shared mutex.
+ * Get L2CC: NSec requests the outer maintenance shared mutex
+ * location. If NSec has successufully registered one,
+ * return its location, otherwise, allocated one in NSec
+ * and provided NSec the physical location.
+ */
+TEE_Result tee_enable_l2cc_mutex(void)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ l2cc_mutex_set(l2cc_mutex_va);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_disable_l2cc_mutex(void)
+{
+ l2cc_mutex_va = NULL;
+ l2cc_mutex_set(NULL);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ *mutex = l2cc_mutex_pa;
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex)
+{
+ uint32_t addr;
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+ addr = *mutex;
+ if (core_pbuf_is(CORE_MEM_NSEC_SHM, addr, MUTEX_SZ) == false)
+ return TEE_ERROR_BAD_PARAMETERS;
+ va = phys_to_virt(addr, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ l2cc_mutex_pa = addr;
+ l2cc_mutex_va = va;
+ return TEE_SUCCESS;
+}
+
+void tee_l2cc_mutex_lock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_lock(l2cc_mutex);
+}
+
+void tee_l2cc_mutex_unlock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_unlock(l2cc_mutex);
+}
diff --git a/core/arch/arm/kernel/tee_time.c b/core/arch/arm/kernel/tee_time.c
new file mode 100644
index 0000000..671a8e9
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, Linaro Limied
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/thread.h>
+#include <optee_msg.h>
+#include <mm/core_mmu.h>
+
+struct time_source _time_source;
+
+TEE_Result tee_time_get_sys_time(TEE_Time *time)
+{
+ return _time_source.get_sys_time(time);
+}
+
+uint32_t tee_time_get_sys_time_protection_level(void)
+{
+ return _time_source.protection_level;
+}
+
+void tee_time_wait(uint32_t milliseconds_delay)
+{
+ struct optee_msg_param params;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params.u.value.a = milliseconds_delay;
+ thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SUSPEND, 1, &params);
+}
+
+/*
+ * tee_time_get_ree_time(): this function implements the GP Internal API
+ * function TEE_GetREETime()
+ * Goal is to get the time of the Rich Execution Environment
+ * This is why this time is provided through the supplicant
+ */
+TEE_Result tee_time_get_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+ struct optee_msg_param params;
+
+ if (!time)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT;
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_GET_TIME, 1, &params);
+ if (res == TEE_SUCCESS) {
+ time->seconds = params.u.value.a;
+ time->millis = params.u.value.b / 1000000;
+ }
+
+ return res;
+}
diff --git a/core/arch/arm/kernel/tee_time_arm_cntpct.c b/core/arch/arm/kernel/tee_time_arm_cntpct.c
new file mode 100644
index 0000000..90e7f20
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_arm_cntpct.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014, 2015 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/misc.h>
+#include <kernel/tee_time.h>
+#include <trace.h>
+#include <kernel/time_source.h>
+#include <mm/core_mmu.h>
+#include <utee_defines.h>
+
+#include <tee/tee_cryp_utl.h>
+
+#include <stdint.h>
+#include <mpa.h>
+#include <arm.h>
+
+static TEE_Result arm_cntpct_get_sys_time(TEE_Time *time)
+{
+ uint64_t cntpct = read_cntpct();
+ uint32_t cntfrq = read_cntfrq();
+
+ time->seconds = cntpct / cntfrq;
+ time->millis = (cntpct % cntfrq) / (cntfrq / TEE_TIME_MILLIS_BASE);
+
+ return TEE_SUCCESS;
+}
+
+static const struct time_source arm_cntpct_time_source = {
+ .name = "arm cntpct",
+ .protection_level = 1000,
+ .get_sys_time = arm_cntpct_get_sys_time,
+};
+
+REGISTER_TIME_SOURCE(arm_cntpct_time_source)
+
+/*
+ * We collect jitter using cntpct in 32- or 64-bit mode that is typically
+ * clocked at around 1MHz.
+ *
+ * The first time we are called, we add low 16 bits of the counter as entropy.
+ *
+ * Subsequently, accumulate 2 low bits each time by:
+ *
+ * - rotating the accumumlator by 2 bits
+ * - XORing it in 2-bit chunks with the whole CNTPCT contents
+ *
+ * and adding one byte of entropy when we reach 8 rotated bits.
+ */
+
+void plat_prng_add_jitter_entropy(void)
+{
+ uint64_t tsc = read_cntpct();
+ int bytes = 0, n;
+ static uint8_t first, bits;
+ static uint16_t acc;
+
+ if (!first) {
+ acc = tsc;
+ bytes = 2;
+ first = 1;
+ } else {
+ acc = (acc << 2) | ((acc >> 6) & 3);
+ for (n = 0; n < 64; n += 2)
+ acc ^= (tsc >> n) & 3;
+ bits += 2;
+ if (bits >= 8) {
+ bits = 0;
+ bytes = 1;
+ }
+ }
+ if (bytes) {
+ DMSG("%s: 0x%02X\n", __func__,
+ (int)acc & ((1 << (bytes * 8)) - 1));
+ tee_prng_add_entropy((uint8_t *)&acc, bytes);
+ }
+}
diff --git a/core/arch/arm/kernel/tee_time_ree.c b/core/arch/arm/kernel/tee_time_ree.c
new file mode 100644
index 0000000..d2a9bb1
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_ree.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/mutex.h>
+
+static TEE_Time prev;
+
+static struct mutex time_mu = MUTEX_INITIALIZER;
+
+static TEE_Result get_monotonic_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+
+ res = tee_time_get_ree_time(time);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ mutex_lock(&time_mu);
+ if (time->seconds < prev.seconds ||
+ (time->seconds == prev.seconds &&
+ time->millis < prev.millis))
+ *time = prev; /* REE time was rolled back */
+ else
+ prev = *time;
+ mutex_unlock(&time_mu);
+
+ return res;
+}
+
+static const struct time_source ree_time_source = {
+ .name = "ree",
+ .protection_level = 100,
+ .get_sys_time = get_monotonic_ree_time,
+};
+
+REGISTER_TIME_SOURCE(ree_time_source)
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
new file mode 100644
index 0000000..c988b65
--- /dev/null
+++ b/core/arch/arm/kernel/thread.c
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <arm.h>
+#include <assert.h>
+#include <keep.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread_defs.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <sm/sm.h>
+#include <tee/tee_fs_rpc.h>
+#include <tee/tee_cryp_utl.h>
+#include <trace.h>
+#include <util.h>
+
+#include "thread_private.h"
+
+#ifdef CFG_WITH_ARM_TRUSTED_FW
+#define STACK_TMP_OFFS 0
+#else
+#define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE
+#endif
+
+
+#ifdef ARM32
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS)
+#else
+#define STACK_TMP_SIZE (1024 + STACK_TMP_OFFS)
+#endif
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 2048
+#endif
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+
+#endif /*ARM32*/
+
+#ifdef ARM64
+#define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS)
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+#endif /*ARM64*/
+
+struct thread_ctx threads[CFG_NUM_THREADS];
+
+static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE];
+
+#ifdef CFG_WITH_STACK_CANARIES
+#ifdef ARM32
+#define STACK_CANARY_SIZE (4 * sizeof(uint32_t))
+#endif
+#ifdef ARM64
+#define STACK_CANARY_SIZE (8 * sizeof(uint32_t))
+#endif
+#define START_CANARY_VALUE 0xdededede
+#define END_CANARY_VALUE 0xabababab
+#define GET_START_CANARY(name, stack_num) name[stack_num][0]
+#define GET_END_CANARY(name, stack_num) \
+ name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
+#else
+#define STACK_CANARY_SIZE 0
+#endif
+
+#define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
+linkage uint32_t name[num_stacks] \
+ [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \
+ sizeof(uint32_t)] \
+ __attribute__((section(".nozi_stack"), \
+ aligned(STACK_ALIGNMENT)))
+
+#define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2)
+
+#define GET_STACK(stack) \
+ ((vaddr_t)(stack) + STACK_SIZE(stack))
+
+DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, /* global */);
+DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
+#ifndef CFG_WITH_PAGER
+DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
+#endif
+
+const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]);
+const uint32_t stack_tmp_offset = STACK_TMP_OFFS + STACK_CANARY_SIZE / 2;
+
+/*
+ * These stack setup info are required by secondary boot cores before they
+ * each locally enable the pager (the mmu). Hence kept in pager sections.
+ */
+KEEP_PAGER(stack_tmp);
+KEEP_PAGER(stack_tmp_stride);
+KEEP_PAGER(stack_tmp_offset);
+
+thread_smc_handler_t thread_std_smc_handler_ptr;
+static thread_smc_handler_t thread_fast_smc_handler_ptr;
+thread_fiq_handler_t thread_fiq_handler_ptr;
+thread_pm_handler_t thread_cpu_on_handler_ptr;
+thread_pm_handler_t thread_cpu_off_handler_ptr;
+thread_pm_handler_t thread_cpu_suspend_handler_ptr;
+thread_pm_handler_t thread_cpu_resume_handler_ptr;
+thread_pm_handler_t thread_system_off_handler_ptr;
+thread_pm_handler_t thread_system_reset_handler_ptr;
+
+
+static unsigned int thread_global_lock = SPINLOCK_UNLOCK;
+static bool thread_prealloc_rpc_cache;
+
+static void init_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+#define INIT_CANARY(name) \
+ for (n = 0; n < ARRAY_SIZE(name); n++) { \
+ uint32_t *start_canary = &GET_START_CANARY(name, n); \
+ uint32_t *end_canary = &GET_END_CANARY(name, n); \
+ \
+ *start_canary = START_CANARY_VALUE; \
+ *end_canary = END_CANARY_VALUE; \
+ DMSG("#Stack canaries for %s[%zu] with top at %p\n", \
+ #name, n, (void *)(end_canary - 1)); \
+ DMSG("watch *%p\n", (void *)end_canary); \
+ }
+
+ INIT_CANARY(stack_tmp);
+ INIT_CANARY(stack_abt);
+#ifndef CFG_WITH_PAGER
+ INIT_CANARY(stack_thread);
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+#define CANARY_DIED(stack, loc, n) \
+ do { \
+ EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \
+ panic(); \
+ } while (0)
+
+void thread_check_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) {
+ if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, start, n);
+ if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, end, n);
+ }
+
+ for (n = 0; n < ARRAY_SIZE(stack_abt); n++) {
+ if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_abt, start, n);
+ if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_abt, end, n);
+
+ }
+#ifndef CFG_WITH_PAGER
+ for (n = 0; n < ARRAY_SIZE(stack_thread); n++) {
+ if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_thread, start, n);
+ if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_thread, end, n);
+ }
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+static void lock_global(void)
+{
+ cpu_spin_lock(&thread_global_lock);
+}
+
+static void unlock_global(void)
+{
+ cpu_spin_unlock(&thread_global_lock);
+}
+
+#ifdef ARM32
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t cpsr = read_cpsr();
+
+ return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t cpsr = read_cpsr();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
+ cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
+ write_cpsr(cpsr);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t daif = read_daif();
+
+ return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t daif = read_daif();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
+ daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
+ write_daif(daif);
+}
+#endif /*ARM64*/
+
+uint32_t thread_mask_exceptions(uint32_t exceptions)
+{
+ uint32_t state = thread_get_exceptions();
+
+ thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
+ return state;
+}
+
+void thread_unmask_exceptions(uint32_t state)
+{
+ thread_set_exceptions(state & THREAD_EXCP_ALL);
+}
+
+
+struct thread_core_local *thread_get_core_local(void)
+{
+ uint32_t cpu_id = get_core_pos();
+
+ /*
+ * IRQs must be disabled before playing with core_local since
+ * we otherwise may be rescheduled to a different core in the
+ * middle of this function.
+ */
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+
+ assert(cpu_id < CFG_TEE_CORE_NB_CORE);
+ return &thread_core_local[cpu_id];
+}
+
+static void thread_lazy_save_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ thr->vfp_state.ns_saved = false;
+#if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW)
+ /*
+ * ARM TF saves and restores CPACR_EL1, so we must assume NS world
+ * uses VFP and always preserve the register file when secure world
+ * is about to use it
+ */
+ thr->vfp_state.ns.force_save = true;
+#endif
+ vfp_lazy_save_state_init(&thr->vfp_state.ns);
+#endif /*CFG_WITH_VFP*/
+}
+
+static void thread_lazy_restore_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
+
+ if (tuv && tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
+ thr->vfp_state.ns_saved = false;
+#endif /*CFG_WITH_VFP*/
+}
+
+#ifdef ARM32
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint32_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
+ thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A;
+ /* Enable thumb mode if it's a thumb instruction */
+ if (thread->regs.pc & 1)
+ thread->regs.cpsr |= CPSR_T;
+ /* Reinitialize stack pointer */
+ thread->regs.svc_sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in r0-r7 when thread is started.
+ */
+ thread->regs.r0 = args->a0;
+ thread->regs.r1 = args->a1;
+ thread->regs.r2 = args->a2;
+ thread->regs.r3 = args->a3;
+ thread->regs.r4 = args->a4;
+ thread->regs.r5 = args->a5;
+ thread->regs.r6 = args->a6;
+ thread->regs.r7 = args->a7;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint64_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
+ DAIFBIT_IRQ | DAIFBIT_ABT);
+ /* Reinitialize stack pointer */
+ thread->regs.sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in x0-x7 when thread is started.
+ */
+ thread->regs.x[0] = args->a0;
+ thread->regs.x[1] = args->a1;
+ thread->regs.x[2] = args->a2;
+ thread->regs.x[3] = args->a3;
+ thread->regs.x[4] = args->a4;
+ thread->regs.x[5] = args->a5;
+ thread->regs.x[6] = args->a6;
+ thread->regs.x[7] = args->a7;
+
+ /* Set up frame pointer as per the Aarch64 AAPCS */
+ thread->regs.x[29] = 0;
+}
+#endif /*ARM64*/
+
+void thread_init_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ size_t n;
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ TAILQ_INIT(&threads[n].mutexes);
+ TAILQ_INIT(&threads[n].tsd.sess_stack);
+#ifdef CFG_SMALL_PAGE_USER_TA
+ SLIST_INIT(&threads[n].tsd.pgt_cache);
+#endif
+ }
+
+ for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
+ thread_core_local[n].curr_thread = -1;
+
+ l->curr_thread = 0;
+ threads[0].state = THREAD_STATE_ACTIVE;
+}
+
+void thread_clr_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
+ assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
+ assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes));
+ threads[l->curr_thread].state = THREAD_STATE_FREE;
+ l->curr_thread = -1;
+}
+
+static void thread_alloc_and_run(struct thread_smc_args *args)
+{
+ size_t n;
+ struct thread_core_local *l = thread_get_core_local();
+ bool found_thread = false;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state == THREAD_STATE_FREE) {
+ threads[n].state = THREAD_STATE_ACTIVE;
+ found_thread = true;
+ break;
+ }
+ }
+
+ unlock_global();
+
+ if (!found_thread) {
+ args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ threads[n].flags = 0;
+ init_regs(threads + n, args);
+
+ /* Save Hypervisor Client ID */
+ threads[n].hyp_clnt_id = args->a7;
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+#ifdef ARM32
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * r0-r3 when thread is resumed.
+ */
+ regs->r0 = args->a0;
+ regs->r1 = args->a1;
+ regs->r2 = args->a2;
+ regs->r3 = args->a3;
+ regs->r4 = args->a4;
+ regs->r5 = args->a5;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * x0-x3 when thread is resumed.
+ */
+ regs->x[0] = args->a0;
+ regs->x[1] = args->a1;
+ regs->x[2] = args->a2;
+ regs->x[3] = args->a3;
+ regs->x[4] = args->a4;
+ regs->x[5] = args->a5;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static bool is_from_user(uint32_t cpsr)
+{
+ return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif
+
+#ifdef ARM64
+static bool is_from_user(uint32_t cpsr)
+{
+ if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif
+
+static bool is_user_mode(struct thread_ctx_regs *regs)
+{
+ return is_from_user((uint32_t)regs->cpsr);
+}
+
+static void thread_resume_from_rpc(struct thread_smc_args *args)
+{
+ size_t n = args->a3; /* thread id */
+ struct thread_core_local *l = thread_get_core_local();
+ uint32_t rv = 0;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ if (n < CFG_NUM_THREADS &&
+ threads[n].state == THREAD_STATE_SUSPENDED &&
+ args->a7 == threads[n].hyp_clnt_id)
+ threads[n].state = THREAD_STATE_ACTIVE;
+ else
+ rv = OPTEE_SMC_RETURN_ERESUME;
+
+ unlock_global();
+
+ if (rv) {
+ args->a0 = rv;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ if (is_user_mode(&threads[n].regs))
+ tee_ta_update_session_utime_resume();
+
+ if (threads[n].have_user_map)
+ core_mmu_set_user_map(&threads[n].user_map);
+
+ /*
+ * Return from RPC to request service of an IRQ must not
+ * get parameters from non-secure world.
+ */
+ if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
+ copy_a0_to_a5(&threads[n].regs, args);
+ threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ }
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+void thread_handle_fast_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+ thread_fast_smc_handler_ptr(args);
+ /* Fast handlers must not unmask any exceptions */
+ assert(thread_get_exceptions() == THREAD_EXCP_ALL);
+}
+
+void thread_handle_std_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+
+ if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC)
+ thread_resume_from_rpc(args);
+ else
+ thread_alloc_and_run(args);
+}
+
+/* Helper routine for the assembly function thread_std_smc_entry() */
+void __thread_std_smc_entry(struct thread_smc_args *args)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (!thr->rpc_arg) {
+ paddr_t parg;
+ uint64_t carg;
+ void *arg;
+
+ thread_rpc_alloc_arg(
+ OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS),
+ &parg, &carg);
+ if (!parg || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) ||
+ !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) {
+ thread_rpc_free_arg(carg);
+ args->a0 = OPTEE_SMC_RETURN_ENOMEM;
+ return;
+ }
+
+ thr->rpc_arg = arg;
+ thr->rpc_carg = carg;
+ }
+
+ thread_std_smc_handler_ptr(args);
+
+ tee_fs_rpc_cache_clear(&thr->tsd);
+ if (!thread_prealloc_rpc_cache) {
+ thread_rpc_free_arg(thr->rpc_carg);
+ thr->rpc_carg = 0;
+ thr->rpc_arg = 0;
+ }
+}
+
+void *thread_get_tmp_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ return (void *)l->tmp_stack_va_end;
+}
+
+#ifdef ARM64
+vaddr_t thread_get_saved_thread_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ return threads[ct].kern_sp;
+}
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va)
+{
+ struct thread_ctx *thr;
+ int ct = thread_get_id_may_fail();
+
+ if (ct == -1)
+ return false;
+
+ thr = threads + ct;
+ return va < thr->stack_va_end &&
+ va >= (thr->stack_va_end - STACK_THREAD_SIZE);
+}
+
+void thread_state_free(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ assert(TAILQ_EMPTY(&threads[ct].mutexes));
+
+ thread_lazy_restore_ns_vfp();
+ tee_pager_release_phys(
+ (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
+ STACK_THREAD_SIZE);
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].state = THREAD_STATE_FREE;
+ threads[ct].flags = 0;
+ l->curr_thread = -1;
+
+ unlock_global();
+}
+
+#ifdef CFG_WITH_PAGER
+static void release_unused_kernel_stack(struct thread_ctx *thr)
+{
+ vaddr_t sp = thr->regs.svc_sp;
+ vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
+ size_t len = sp - base;
+
+ tee_pager_release_phys((void *)base, len);
+}
+#else
+static void release_unused_kernel_stack(struct thread_ctx *thr __unused)
+{
+}
+#endif
+
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+
+ thread_check_canaries();
+
+ release_unused_kernel_stack(threads + ct);
+
+ if (is_from_user(cpsr)) {
+ thread_user_save_vfp();
+ tee_ta_update_session_utime_suspend();
+ tee_ta_gprof_sample_pc(pc);
+ }
+ thread_lazy_restore_ns_vfp();
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].flags |= flags;
+ threads[ct].regs.cpsr = cpsr;
+ threads[ct].regs.pc = pc;
+ threads[ct].state = THREAD_STATE_SUSPENDED;
+
+ threads[ct].have_user_map = core_mmu_user_mapping_is_active();
+ if (threads[ct].have_user_map) {
+ core_mmu_get_user_map(&threads[ct].user_map);
+ core_mmu_set_user_map(NULL);
+ }
+
+ l->curr_thread = -1;
+
+ unlock_global();
+
+ return ct;
+}
+
+#ifdef ARM32
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->tmp_stack_va_end = sp;
+ thread_set_irq_sp(sp);
+ thread_set_fiq_sp(sp);
+}
+
+static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp)
+{
+ thread_set_abt_sp(sp);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ /*
+ * We're already using the tmp stack when this function is called
+ * so there's no need to assign it to any stack pointer. However,
+ * we'll need to restore it at different times so store it here.
+ */
+ l->tmp_stack_va_end = sp;
+}
+
+static void set_abt_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->abt_stack_va_end = sp;
+}
+#endif /*ARM64*/
+
+bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
+{
+ if (thread_id >= CFG_NUM_THREADS)
+ return false;
+ threads[thread_id].stack_va_end = sp;
+ return true;
+}
+
+int thread_get_id_may_fail(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ thread_unmask_exceptions(exceptions);
+ return ct;
+}
+
+int thread_get_id(void)
+{
+ int ct = thread_get_id_may_fail();
+
+ assert(ct >= 0 && ct < CFG_NUM_THREADS);
+ return ct;
+}
+
+static void init_handlers(const struct thread_handlers *handlers)
+{
+ thread_std_smc_handler_ptr = handlers->std_smc;
+ thread_fast_smc_handler_ptr = handlers->fast_smc;
+ thread_fiq_handler_ptr = handlers->fiq;
+ thread_cpu_on_handler_ptr = handlers->cpu_on;
+ thread_cpu_off_handler_ptr = handlers->cpu_off;
+ thread_cpu_suspend_handler_ptr = handlers->cpu_suspend;
+ thread_cpu_resume_handler_ptr = handlers->cpu_resume;
+ thread_system_off_handler_ptr = handlers->system_off;
+ thread_system_reset_handler_ptr = handlers->system_reset;
+}
+
+#ifdef CFG_WITH_PAGER
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /*
+ * Allocate virtual memory for thread stacks.
+ */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ tee_mm_entry_t *mm;
+ vaddr_t sp;
+
+ /* Find vmem for thread stack and its protection gap */
+ mm = tee_mm_alloc(&tee_mm_vcore,
+ SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
+ assert(mm);
+
+ /* Claim eventual physical page */
+ tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
+ true);
+
+ /* Add the area to the pager */
+ tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
+ tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE,
+ TEE_MATTR_PRW | TEE_MATTR_LOCKED,
+ NULL, NULL);
+
+ /* init effective stack */
+ sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
+ if (!thread_init_stack(n, sp))
+ panic("init stack failed");
+ }
+}
+#else
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /* Assign the thread stacks */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
+ panic("thread_init_stack failed");
+ }
+}
+#endif /*CFG_WITH_PAGER*/
+
+void thread_init_primary(const struct thread_handlers *handlers)
+{
+ init_handlers(handlers);
+
+ /* Initialize canaries around the stacks */
+ init_canaries();
+
+ init_thread_stacks();
+ pgt_init();
+}
+
+static void init_sec_mon(size_t pos __maybe_unused)
+{
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Initialize secure monitor */
+ sm_init(GET_STACK(stack_tmp[pos]));
+#endif
+}
+
+void thread_init_per_cpu(void)
+{
+ size_t pos = get_core_pos();
+ struct thread_core_local *l = thread_get_core_local();
+
+ init_sec_mon(pos);
+
+ set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS);
+ set_abt_stack(l, GET_STACK(stack_abt[pos]));
+
+ thread_init_vbar();
+}
+
+struct thread_specific_data *thread_get_tsd(void)
+{
+ return &threads[thread_get_id()].tsd;
+}
+
+struct thread_ctx_regs *thread_get_ctx_regs(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+ return &threads[l->curr_thread].regs;
+}
+
+void thread_set_irq(bool enable)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (enable) {
+ threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE;
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+ } else {
+ /*
+ * No need to disable IRQ here since it's already disabled
+ * above.
+ */
+ threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE;
+ }
+}
+
+void thread_restore_irq(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE)
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+uint32_t thread_kernel_enable_vfp(void)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (thr->vfp_state.sec_lazy_saved &&
+ !thr->vfp_state.sec_saved) {
+ /*
+ * This happens when we're handling an abort while the
+ * thread was using the VFP state.
+ */
+ vfp_lazy_save_state_final(&thr->vfp_state.sec);
+ thr->vfp_state.sec_saved = true;
+ } else if (tuv && tuv->lazy_saved && !tuv->saved) {
+ /*
+ * This can happen either during syscall or abort
+ * processing (while processing a syscall).
+ */
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_enable();
+ return exceptions;
+}
+
+void thread_kernel_disable_vfp(uint32_t state)
+{
+ uint32_t exceptions;
+
+ assert(vfp_is_enabled());
+
+ vfp_disable();
+ exceptions = thread_get_exceptions();
+ assert(exceptions & THREAD_EXCP_IRQ);
+ exceptions &= ~THREAD_EXCP_IRQ;
+ exceptions |= state & THREAD_EXCP_IRQ;
+ thread_set_exceptions(exceptions);
+}
+
+void thread_kernel_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (vfp_is_enabled()) {
+ vfp_lazy_save_state_init(&thr->vfp_state.sec);
+ thr->vfp_state.sec_lazy_saved = true;
+ }
+}
+
+void thread_kernel_restore_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+ if (thr->vfp_state.sec_lazy_saved) {
+ vfp_lazy_restore_state(&thr->vfp_state.sec,
+ thr->vfp_state.sec_saved);
+ thr->vfp_state.sec_saved = false;
+ thr->vfp_state.sec_lazy_saved = false;
+ }
+}
+
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (tuv && uvfp != tuv) {
+ if (tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+ }
+
+ if (uvfp->lazy_saved)
+ vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+
+ thr->vfp_state.uvfp = uvfp;
+ vfp_enable();
+}
+
+void thread_user_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (!vfp_is_enabled())
+ return;
+
+ assert(tuv && !tuv->lazy_saved && !tuv->saved);
+ vfp_lazy_save_state_init(&tuv->vfp);
+ tuv->lazy_saved = true;
+}
+
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (uvfp == thr->vfp_state.uvfp)
+ thr->vfp_state.uvfp = NULL;
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef ARM32
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (!is_32bit)
+ return false;
+
+ s = read_spsr();
+ s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
+ s |= CPSR_MODE_USR;
+ if (entry_func & 1)
+ s |= CPSR_T;
+ *spsr = s;
+ return true;
+}
+#endif
+
+#ifdef ARM64
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (is_32bit) {
+ s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
+ s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
+ s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
+ } else {
+ s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
+ }
+
+ *spsr = s;
+ return true;
+}
+#endif
+
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1)
+{
+ uint32_t spsr;
+
+ tee_ta_update_session_utime_resume();
+
+ if (!get_spsr(is_32bit, entry_func, &spsr)) {
+ *exit_status0 = 1; /* panic */
+ *exit_status1 = 0xbadbadba;
+ return 0;
+ }
+ return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func,
+ spsr, exit_status0, exit_status1);
+}
+
+void thread_add_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == -1);
+ m->owner_id = ct;
+ TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link);
+}
+
+void thread_rem_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == ct);
+ m->owner_id = -1;
+ TAILQ_REMOVE(&threads[ct].mutexes, m, link);
+}
+
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].rpc_arg) {
+ *cookie = threads[n].rpc_carg;
+ threads[n].rpc_carg = 0;
+ threads[n].rpc_arg = NULL;
+ goto out;
+ }
+ }
+
+ *cookie = 0;
+ thread_prealloc_rpc_cache = false;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+bool thread_enable_prealloc_rpc_cache(void)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ thread_prealloc_rpc_cache = true;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ const size_t params_size = sizeof(struct optee_msg_param) * num_params;
+ size_t n;
+
+ assert(arg && carg && num_params <= THREAD_RPC_MAX_NUM_PARAMS);
+
+ plat_prng_add_jitter_entropy();
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS));
+ arg->cmd = cmd;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = num_params;
+ memcpy(OPTEE_MSG_GET_PARAMS(arg), params, params_size);
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ for (n = 0; n < num_params; n++) {
+ switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) {
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ memcpy(params + n, OPTEE_MSG_GET_PARAMS(arg) + n,
+ sizeof(struct optee_msg_param));
+ break;
+ default:
+ break;
+ }
+ }
+ return arg->ret;
+}
+
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t ret;
+
+ ret = rpc_cmd_nolock(cmd, num_params, params);
+
+ return ret;
+}
+
+static bool check_alloced_shm(paddr_t pa, size_t len, size_t align)
+{
+ if (pa & (align - 1))
+ return false;
+ return core_pbuf_is(CORE_MEM_NSEC_SHM, pa, len);
+}
+
+void thread_rpc_free_arg(uint64_t cookie)
+{
+ if (cookie) {
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_FREE
+ };
+
+ reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ }
+}
+
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie)
+{
+ paddr_t pa;
+ uint64_t co;
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_ALLOC, size
+ };
+
+ thread_rpc(rpc_args);
+
+ pa = reg_pair_to_64(rpc_args[1], rpc_args[2]);
+ co = reg_pair_to_64(rpc_args[4], rpc_args[5]);
+ if (!check_alloced_shm(pa, size, sizeof(uint64_t))) {
+ thread_rpc_free_arg(co);
+ pa = 0;
+ co = 0;
+ }
+
+ *arg = pa;
+ *cookie = co;
+}
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc()
+ *
+ * @cookie: cookie received when allocating the buffer
+ * @bt: must be the same as supplied when allocating
+ */
+static void thread_rpc_free(unsigned int bt, uint64_t cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = cookie;
+ params[0].u.value.c = 0;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+}
+
+/**
+ * Allocates shared memory buffer via RPC
+ *
+ * @size: size in bytes of shared memory buffer
+ * @align: required alignment of buffer
+ * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_*
+ * @payload: returned physical pointer to buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt,
+ paddr_t *payload, uint64_t *cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = size;
+ params[0].u.value.c = align;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ if (arg->ret != TEE_SUCCESS)
+ goto fail;
+
+ if (arg->num_params != 1)
+ goto fail;
+
+ if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
+ goto fail;
+
+ if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) {
+ thread_rpc_free(bt, params[0].u.tmem.shm_ref);
+ goto fail;
+ }
+
+ *payload = params[0].u.tmem.buf_ptr;
+ *cookie = params[0].u.tmem.shm_ref;
+ return;
+fail:
+ *payload = 0;
+ *cookie = 0;
+}
+
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie)
+{
+ thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, payload, cookie);
+}
+
+void thread_rpc_free_payload(uint64_t cookie)
+{
+ thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie);
+}
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
new file mode 100644
index 0000000..6d3ac35
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/abort.h>
+#include <kernel/thread_defs.h>
+#include <kernel/unwind.h>
+
+ .section .text.thread_asm
+
+LOCAL_FUNC vector_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_fast_smc
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_on_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_suspend_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_resume_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_reset_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization. Also used when compiled with the internal monitor, but
+ * the cpu_*_entry and system_*_entry are not used then.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+UNWIND( .fnend)
+END_FUNC thread_vector_table
+
+FUNC thread_set_abt_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_ABT
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_abt_sp
+
+FUNC thread_set_irq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_IRQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_irq_sp
+
+FUNC thread_set_fiq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_FIQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_fiq_sp
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
+
+ cps #CPSR_MODE_SYS
+ ldm r12!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ ldm r12!, {r1, sp, lr}
+ msr spsr_fsxc, r1
+
+ cps #CPSR_MODE_SVC
+ ldm r12, {r1, r2}
+ push {r1, r2}
+
+ ldm r0, {r0-r12}
+
+ /* Restore CPSR and jump to the instruction to resume at */
+ rfefd sp!
+UNWIND( .fnend)
+END_FUNC thread_resume
+
+/*
+ * Disables IRQ and FIQ and saves state of thread, returns original
+ * CPSR.
+ */
+LOCAL_FUNC thread_save_state , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r12, lr}
+ /*
+ * Uses stack for temporary storage, while storing needed
+ * context in the thread context struct.
+ */
+
+ mrs r12, cpsr
+
+ cpsid aif /* Disable Async abort, IRQ and FIQ */
+
+ push {r4-r7}
+ push {r0-r3}
+
+ mov r5, r12 /* Save CPSR in a preserved register */
+ mrs r6, cpsr /* Save current CPSR */
+
+ bl thread_get_ctx_regs
+
+ pop {r1-r4} /* r0-r3 pushed above */
+ stm r0!, {r1-r4}
+ pop {r1-r4} /* r4-r7 pushed above */
+ stm r0!, {r1-r4}
+ stm r0!, {r8-r11}
+
+ pop {r12, lr}
+ stm r0!, {r12}
+
+ cps #CPSR_MODE_SYS
+ stm r0!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+ orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */
+ msr cpsr, r6 /* Restore mode */
+
+ mov r0, r5 /* Return original CPSR */
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_save_state
+
+FUNC thread_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Pass r0-r7 in a struct thread_smc_args */
+ push {r0-r7}
+ mov r0, sp
+ bl __thread_std_smc_entry
+ /*
+ * Load the returned r0-r3 into preserved registers and skip the
+ * "returned" r4-r7 since they will not be returned to normal
+ * world.
+ */
+ pop {r4-r7}
+ add sp, #(4 * 4)
+
+ /* Disable interrupts before switching to temporary stack */
+ cpsid aif
+ bl thread_get_tmp_sp
+ mov sp, r0
+
+ bl thread_state_free
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov r1, r4
+ mov r2, r5
+ mov r3, r6
+ mov r4, r7
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_std_smc_entry
+
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+/*
+ * r0-r2 are used to pass parameters to normal world
+ * r0-r5 are used to pass return vaule back from normal world
+ *
+ * note that r3 is used to pass "resume information", that is, which
+ * thread it is that should resume.
+ *
+ * Since the this function is following AAPCS we need to preserve r4-r5
+ * which are otherwise modified when returning back from normal world.
+ */
+UNWIND( .fnstart)
+ push {r4-r5, lr}
+UNWIND( .save {r4-r5, lr})
+ push {r0}
+UNWIND( .save {r0})
+
+ bl thread_save_state
+ mov r4, r0 /* Save original CPSR */
+
+ /*
+ * Switch to temporary stack and SVC mode. Save CPSR to resume into.
+ */
+ bl thread_get_tmp_sp
+ ldr r5, [sp] /* Get pointer to rv[] */
+ cps #CPSR_MODE_SVC /* Change to SVC mode */
+ mov sp, r0 /* Switch to tmp stack */
+
+ mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ mov r1, r4 /* CPSR to restore */
+ ldr r2, =.thread_rpc_return
+ bl thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldm r5, {r1-r3} /* Load rv[] into r0-r2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * it had when thread_save_state() was called above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop {r12} /* Get pointer to rv[] */
+ stm r12, {r0-r5} /* Store r0-r5 into rv[] */
+ pop {r4-r5, pc}
+UNWIND( .fnend)
+END_FUNC thread_rpc
+
+LOCAL_FUNC thread_fiq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /*
+ * We're saving {r0-r3} and the banked fiq registers {r8-r12}. The
+ * banked fiq registers need to be saved because the secure monitor
+ * doesn't save those. The treatment of the banked fiq registers is
+ * somewhat analogous to the lazy save of VFP registers.
+ */
+ push {r0-r3, r8-r12, lr}
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ pop {r0-r3, r8-r12, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_fiq_handler
+
+LOCAL_FUNC thread_irq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * IRQ mode is set up to use tmp stack so FIQ has to be
+ * disabled before touching the stack. We can also assign
+ * SVC sp from IRQ sp to get SVC mode into the state we
+ * need when doing the SMC below.
+ */
+ cpsid f /* Disable FIQ also */
+ sub lr, lr, #4
+ push {lr}
+ push {r12}
+
+ bl thread_save_state
+
+ mov r0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs r1, spsr
+ pop {r12}
+ pop {r2}
+ blx thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+
+ /*
+ * Switch to SVC mode and copy current stack pointer as it already
+ * is the tmp stack.
+ */
+ mov r0, sp
+ cps #CPSR_MODE_SVC
+ mov sp, r0
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr r1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov r2, #0
+ mov r3, #0
+ /* r4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_irq_handler
+
+FUNC thread_init_vbar , :
+UNWIND( .fnstart)
+ /* Set vector (VBAR) */
+ ldr r0, =thread_vect_table
+ write_vbar r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_init_vbar
+
+/*
+ * Below are low level routines handling entry and return from user mode.
+ *
+ * thread_enter_user_mode() saves all that registers user mode can change
+ * so kernel mode can restore needed registers when resuming execution
+ * after the call to thread_enter_user_mode() has returned.
+ * thread_enter_user_mode() doesn't return directly since it enters user
+ * mode instead, it's thread_unwind_user_mode() that does the
+ * returning by restoring the registers saved by thread_enter_user_mode().
+ *
+ * There's three ways for thread_enter_user_mode() to return to caller,
+ * user TA calls utee_return, user TA calls utee_panic or through an abort.
+ *
+ * Calls to utee_return or utee_panic are handled as:
+ * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which
+ * calls syscall_return() or syscall_panic().
+ *
+ * These function calls returns normally except thread_svc_handler() which
+ * which is an exception handling routine so it reads return address and
+ * SPSR to restore from the stack. syscall_return() and syscall_panic()
+ * changes return address and SPSR used by thread_svc_handler() to instead of
+ * returning into user mode as with other syscalls it returns into
+ * thread_unwind_user_mode() in kernel mode instead. When
+ * thread_svc_handler() returns the stack pointer at the point where
+ * thread_enter_user_mode() left it so this is where
+ * thread_unwind_user_mode() can operate.
+ *
+ * Aborts are handled in a similar way but by thread_abort_handler()
+ * instead, when the pager sees that it's an abort from user mode that
+ * can't be handled it updates SPSR and return address used by
+ * thread_abort_handler() to return into thread_unwind_user_mode()
+ * instead.
+ */
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Save all registers to allow syscall_return() to resume execution
+ * as if this function would have returned. This is also used in
+ * syscall_panic().
+ *
+ * If stack usage of this function is changed
+ * thread_unwind_user_mode() has to be updated.
+ */
+ push {r4-r12,lr}
+
+ ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */
+ ldr r5, [sp, #(11 * 0x4)] /* user function */
+ ldr r6, [sp, #(12 * 0x4)] /* spsr */
+
+ /*
+ * Set the saved Processors Status Register to user mode to allow
+ * entry of user mode through movs below.
+ */
+ msr spsr_cxsf, r6
+
+ /*
+ * Save old user sp and set new user sp.
+ */
+ cps #CPSR_MODE_SYS
+ mov r6, sp
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+ push {r6,r7}
+
+ /*
+ * Don't allow return from this function, return is done through
+ * thread_unwind_user_mode() below.
+ */
+ mov lr, #0
+ /* Call the user function with its arguments */
+ movs pc, r5
+UNWIND( .fnend)
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */
+ str r1, [ip]
+ ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */
+ str r2, [ip]
+
+ /* Restore old user sp */
+ pop {r4,r7}
+ cps #CPSR_MODE_SYS
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+
+ pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/
+UNWIND( .fnend)
+END_FUNC thread_unwind_user_mode
+
+LOCAL_FUNC thread_abort_handler , :
+thread_abort_handler:
+thread_und_handler:
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Switch to abort mode to use that stack instead.
+ */
+ cps #CPSR_MODE_ABT
+ push {r0-r11, ip}
+ cps #CPSR_MODE_UND
+ mrs r0, spsr
+ tst r0, #CPSR_T
+ subne r1, lr, #2
+ subeq r1, lr, #4
+ cps #CPSR_MODE_ABT
+ push {r0, r1}
+ msr spsr_fsxc, r0 /* In case some code reads spsr directly */
+ mov r0, #ABORT_TYPE_UNDEF
+ b .thread_abort_generic
+
+thread_dabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #8
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_DATA
+ b .thread_abort_generic
+
+thread_pabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #4
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_PREFETCH
+ b .thread_abort_generic
+
+.thread_abort_generic:
+ cps #CPSR_MODE_SYS
+ mov r1, sp
+ mov r2, lr
+ cps #CPSR_MODE_ABT
+ push {r1-r3}
+ mov r1, sp
+ bl abort_handler
+ pop {r1-r3}
+ cps #CPSR_MODE_SYS
+ mov sp, r1
+ mov lr, r2
+ cps #CPSR_MODE_ABT
+ pop {r0, r1}
+ mov lr, r1
+ msr spsr_fsxc, r0
+ pop {r0-r11, ip}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_abort_handler
+
+LOCAL_FUNC thread_svc_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7, lr}
+ mrs r0, spsr
+ push {r0}
+ mov r0, sp
+ bl tee_svc_handler
+ pop {r0}
+ msr spsr_fsxc, r0
+ pop {r0-r7, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_svc_handler
+
+ .align 5
+LOCAL_FUNC thread_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b . /* Reset */
+ b thread_und_handler /* Undefined instruction */
+ b thread_svc_handler /* System call */
+ b thread_pabort_handler /* Prefetch abort */
+ b thread_dabort_handler /* Data abort */
+ b . /* Reserved */
+ b thread_irq_handler /* IRQ */
+ b thread_fiq_handler /* FIQ */
+UNWIND( .fnend)
+END_FUNC thread_vect_table
diff --git a/core/arch/arm/kernel/thread_a64.S b/core/arch/arm/kernel/thread_a64.S
new file mode 100644
index 0000000..abd482b
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a64.S
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm64_macros.S>
+#include <arm64.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <asm-defines.h>
+#include <kernel/thread_defs.h>
+#include "thread_private.h"
+
+ .macro get_thread_ctx core_local, res, tmp0, tmp1
+ ldr w\tmp0, [\core_local, \
+ #THREAD_CORE_LOCAL_CURR_THREAD]
+ adr x\res, threads
+ mov x\tmp1, #THREAD_CTX_SIZE
+ madd x\res, x\tmp0, x\tmp1, x\res
+ .endm
+
+ .section .text.thread_asm
+LOCAL_FUNC vector_std_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_fast_smc
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+ adr x16, thread_cpu_on_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+ adr x16, thread_cpu_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+ adr x16, thread_cpu_suspend_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+ adr x16, thread_cpu_resume_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+ adr x16, thread_system_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+ adr x16, thread_system_reset_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+END_FUNC thread_vector_table
+
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+ load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
+ mov sp, x1
+ msr elr_el1, x2
+ msr spsr_el1, x3
+ load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
+ ldr x0, [x0, THREAD_CTX_REGS_X0]
+ eret
+END_FUNC thread_resume
+
+FUNC thread_std_smc_entry , :
+ /* pass x0-x7 in a struct thread_smc_args */
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+
+ /* Call the registered handler */
+ bl __thread_std_smc_entry
+
+ /*
+ * Load the returned x0-x3 into preserved registers and skip the
+ * "returned" x4-x7 since they will not be returned to normal
+ * world.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ bl thread_get_tmp_sp
+ mov sp, x0
+
+ bl thread_state_free
+
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov x1, x20
+ mov x2, x21
+ mov x3, x22
+ mov x4, x23
+ smc #0
+ b . /* SMC should not return */
+END_FUNC thread_std_smc_entry
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+ /* Read daif and create an SPSR */
+ mrs x1, daif
+ orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ push x0, xzr
+ push x1, x30
+ bl thread_get_ctx_regs
+ ldr x30, [sp, #8]
+ store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
+ mov x19, x0
+
+ bl thread_get_tmp_sp
+ pop x1, xzr /* Match "push x1, x30" above */
+ mov x2, sp
+ str x2, [x19, #THREAD_CTX_REGS_SP]
+ ldr x20, [sp] /* Get pointer to rv[] */
+ mov sp, x0 /* Switch to tmp stack */
+
+ adr x2, .thread_rpc_return
+ mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ bl thread_state_suspend
+ mov x4, x0 /* Supply thread index */
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * stored in THREAD_CTX above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop x16, xzr /* Get pointer to rv[] */
+ store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */
+ ret
+END_FUNC thread_rpc
+
+FUNC thread_init_vbar , :
+ adr x0, thread_vect_table
+ msr vbar_el1, x0
+ ret
+END_FUNC thread_init_vbar
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+ ldr x8, [sp]
+ /*
+ * Create the and fill in the struct thread_user_mode_rec
+ */
+ sub sp, sp, #THREAD_USER_MODE_REC_SIZE
+ store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
+ store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+
+ /*
+ * Switch to SP_EL1
+ * Disable exceptions
+ * Save kern sp in x19
+ */
+ msr daifset, #DAIFBIT_ALL
+ mov x19, sp
+ msr spsel, #1
+
+ /*
+ * Save the kernel stack pointer in the thread context
+ */
+ /* get pointer to current thread context */
+ get_thread_ctx sp, 21, 20, 22
+ /*
+ * Save kernel stack pointer to ensure that el0_svc() uses
+ * correct stack pointer
+ */
+ str x19, [x21, #THREAD_CTX_KERN_SP]
+
+ /*
+ * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
+ */
+ msr spsr_el1, x6
+ /* Set user sp */
+ mov x13, x4 /* Used when running TA in Aarch32 */
+ msr sp_el0, x4 /* Used when running TA in Aarch64 */
+ /* Set user function */
+ msr elr_el1, x5
+
+ /* Jump into user mode */
+ eret
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+ /* Store the exit status */
+ ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
+ str w1, [x3]
+ str w2, [x4]
+ /* Restore x19..x30 */
+ load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+ add sp, sp, #THREAD_USER_MODE_REC_SIZE
+ /* Return from the call of thread_enter_user_mode() */
+ ret
+END_FUNC thread_unwind_user_mode
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+
+ .align 11
+LOCAL_FUNC thread_vect_table , :
+ /* -----------------------------------------------------
+ * EL1 with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b el1_sync_abort
+ check_vector_size sync_el1_sp0
+
+ .align 7
+irq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size irq_el1_sp0
+
+ .align 7
+fiq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size fiq_el1_sp0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH64_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a64
+
+ .align 7
+el0_irq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a64
+
+ .align 7
+el0_fiq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH32_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a32
+
+ .align 7
+el0_irq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a32
+
+ .align 7
+el0_fiq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC thread_vect_table
+
+LOCAL_FUNC el0_svc , :
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* load saved kernel sp */
+ ldr x0, [x0, #THREAD_CTX_KERN_SP]
+ /* Keep pointer to initial recod in x1 */
+ mov x1, sp
+ /* Switch to SP_EL0 and restore kernel sp */
+ msr spsel, #0
+ mov x2, sp /* Save SP_EL0 */
+ mov sp, x0
+
+ /* Make room for struct thread_svc_regs */
+ sub sp, sp, #THREAD_SVC_REG_SIZE
+ stp x30,x2, [sp, #THREAD_SVC_REG_X30]
+
+ /* Restore x0-x3 */
+ ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
+ ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
+
+ /* Prepare the argument for the handler */
+ store_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mrs x0, elr_el1
+ mrs x1, spsr_el1
+ store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ mov x0, sp
+
+ /*
+ * Unmask FIQ, Serror, and debug exceptions since we have nothing
+ * left in sp_el1. Note that the SVC handler is excepted to
+ * re-enable IRQs by itself.
+ */
+ msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
+
+ /* Call the handler */
+ bl tee_svc_handler
+
+ /* Mask all maskable exceptions since we're switching back to sp_el1 */
+ msr daifset, #DAIFBIT_ALL
+
+ /*
+ * Save kernel sp we'll had at the beginning of this function.
+ * This is when this TA has called another TA because
+ * __thread_enter_user_mode() also saves the stack pointer in this
+ * field.
+ */
+ msr spsel, #1
+ get_thread_ctx sp, 0, 1, 2
+ msr spsel, #0
+ add x1, sp, #THREAD_SVC_REG_SIZE
+ str x1, [x0, #THREAD_CTX_KERN_SP]
+
+ /* Restore registers to the required state and return*/
+ load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ msr elr_el1, x0
+ msr spsr_el1, x1
+ load_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mov x30, sp
+ ldr x0, [x30, #THREAD_SVC_REG_SP_EL0]
+ mov sp, x0
+ ldr x0, [x30, THREAD_SVC_REG_X0]
+ ldr x30, [x30, #THREAD_SVC_REG_X30]
+
+ eret
+END_FUNC el0_svc
+
+LOCAL_FUNC el1_sync_abort , :
+ mov x0, sp
+ msr spsel, #0
+ mov x3, sp /* Save original sp */
+
+ /*
+ * Update core local flags.
+ * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
+ */
+ ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
+ .Lsel_tmp_sp
+
+ /* Select abort stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ b .Lset_sp
+
+.Lsel_tmp_sp:
+ /* Select tmp stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
+
+.Lset_sp:
+ mov sp, x2
+ str w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in CORE_LOCAL */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el1_sync_abort
+
+ /* sp_el0 in x3 */
+LOCAL_FUNC el0_sync_abort , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+
+ /* load abt_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ /* Keep pointer to initial record in x0 */
+ mov x0, sp
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in EL1_REC */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el0_sync_abort
+
+LOCAL_FUNC elx_irq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_TMP
+ orr w1, w1, #THREAD_CLF_IRQ
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+
+ /* Store original sp_el0 */
+ str x2, [x0, #THREAD_CTX_REGS_SP]
+ /* store x4..x30 */
+ store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
+ /* Load original x0..x3 into x10..x13 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
+ /* Save original x0..x3 */
+ store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Mark current thread as suspended
+ */
+ mov w0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs x1, spsr_el1
+ mrs x2, elr_el1
+ bl thread_state_suspend
+ mov w4, w0 /* Supply thread index */
+
+ /* Update core local flags */
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ msr spsel, #0
+
+ /*
+ * Note that we're exiting with SP_EL0 selected since the entry
+ * functions expects to have SP_EL0 selected with the tmp stack
+ * set.
+ */
+
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr w1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov w2, #0
+ mov w3, #0
+ /* w4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+END_FUNC elx_irq
+
+/*
+ * This struct is never used from C it's only here to visualize the
+ * layout.
+ *
+ * struct elx_fiq_rec {
+ * uint64_t x[19 - 4]; x4..x18
+ * uint64_t lr;
+ * uint64_t sp_el0;
+ * };
+ */
+#define ELX_FIQ_REC_X(x) (8 * ((x) - 4))
+#define ELX_FIQ_REC_LR (8 + ELX_FIQ_REC_X(19))
+#define ELX_FIQ_REC_SP_EL0 (8 + ELX_FIQ_REC_LR)
+#define ELX_FIQ_REC_SIZE (8 + ELX_FIQ_REC_SP_EL0)
+
+LOCAL_FUNC elx_fiq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_FIQ
+ orr w1, w1, #THREAD_CLF_TMP
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Save registers on stack that can be corrupted by a call to
+ * a C function
+ */
+ /* Make room for struct elx_fiq_rec */
+ sub sp, sp, #ELX_FIQ_REC_SIZE
+ /* Store x4..x18 */
+ store_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Store lr and original sp_el0 */
+ stp x30, x2, [sp, #ELX_FIQ_REC_LR]
+
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+
+ /*
+ * Restore registers
+ */
+ /* Restore x4..x18 */
+ load_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Load lr and original sp_el0 */
+ ldp x30, x2, [sp, #ELX_FIQ_REC_LR]
+ /* Restore SP_El0 */
+ mov sp, x2
+ /* Switch back to SP_EL1 */
+ msr spsel, #1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0..x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC elx_fiq
diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h
new file mode 100644
index 0000000..3d87c88
--- /dev/null
+++ b/core/arch/arm/kernel/thread_private.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_PRIVATE_H
+#define THREAD_PRIVATE_H
+
+#ifndef ASM
+
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <kernel/vfp.h>
+#include <kernel/mutex.h>
+#include <kernel/thread.h>
+
+enum thread_state {
+ THREAD_STATE_FREE,
+ THREAD_STATE_SUSPENDED,
+ THREAD_STATE_ACTIVE,
+};
+
+#ifdef ARM32
+struct thread_ctx_regs {
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t pc;
+ uint32_t cpsr;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct thread_ctx_regs {
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t cpsr;
+ uint64_t x[31];
+};
+#endif /*ARM64*/
+
+#ifdef ARM64
+struct thread_user_mode_rec {
+ uint64_t exit_status0_ptr;
+ uint64_t exit_status1_ptr;
+ uint64_t x[31 - 19]; /* x19..x30 */
+};
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+struct thread_vfp_state {
+ bool ns_saved;
+ bool sec_saved;
+ bool sec_lazy_saved;
+ struct vfp_state ns;
+ struct vfp_state sec;
+ struct thread_user_vfp_state *uvfp;
+};
+
+#endif /*CFG_WITH_VFP*/
+
+struct thread_ctx {
+ struct thread_ctx_regs regs;
+ enum thread_state state;
+ vaddr_t stack_va_end;
+ uint32_t hyp_clnt_id;
+ uint32_t flags;
+ struct core_mmu_user_map user_map;
+ bool have_user_map;
+#ifdef ARM64
+ vaddr_t kern_sp; /* Saved kernel SP during user TA execution */
+#endif
+#ifdef CFG_WITH_VFP
+ struct thread_vfp_state vfp_state;
+#endif
+ void *rpc_arg;
+ uint64_t rpc_carg;
+ struct mutex_head mutexes;
+ struct thread_specific_data tsd;
+};
+
+#ifdef ARM64
+/*
+ * struct thread_core_local need to have alignment suitable for a stack
+ * pointer since SP_EL1 points to this
+ */
+#define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
+#else
+#define THREAD_CORE_LOCAL_ALIGNED
+#endif
+
+struct thread_core_local {
+ vaddr_t tmp_stack_va_end;
+ int curr_thread;
+#ifdef ARM64
+ uint32_t flags;
+ vaddr_t abt_stack_va_end;
+ uint64_t x[4];
+#endif
+#ifdef CFG_TEE_CORE_DEBUG
+ unsigned int locked_count; /* Number of spinlocks held */
+#endif
+} THREAD_CORE_LOCAL_ALIGNED;
+
+#endif /*ASM*/
+
+#ifdef ARM64
+#ifdef CFG_WITH_VFP
+#define THREAD_VFP_STATE_SIZE \
+ (16 + (16 * 32 + 16) * 2 + 16)
+#else
+#define THREAD_VFP_STATE_SIZE 0
+#endif
+
+/* Describes the flags field of struct thread_core_local */
+#define THREAD_CLF_SAVED_SHIFT 4
+#define THREAD_CLF_CURR_SHIFT 0
+#define THREAD_CLF_MASK 0xf
+#define THREAD_CLF_TMP_SHIFT 0
+#define THREAD_CLF_ABORT_SHIFT 1
+#define THREAD_CLF_IRQ_SHIFT 2
+#define THREAD_CLF_FIQ_SHIFT 3
+
+#define THREAD_CLF_TMP (1 << THREAD_CLF_TMP_SHIFT)
+#define THREAD_CLF_ABORT (1 << THREAD_CLF_ABORT_SHIFT)
+#define THREAD_CLF_IRQ (1 << THREAD_CLF_IRQ_SHIFT)
+#define THREAD_CLF_FIQ (1 << THREAD_CLF_FIQ_SHIFT)
+
+#endif /*ARM64*/
+
+#ifndef ASM
+/*
+ * Initializes VBAR for current CPU (called by thread_init_per_cpu()
+ */
+void thread_init_vbar(void);
+
+/* Handles a stdcall, r0-r7 holds the parameters */
+void thread_std_smc_entry(void);
+
+struct thread_core_local *thread_get_core_local(void);
+
+/*
+ * Resumes execution of currently active thread by restoring context and
+ * jumping to the instruction where to continue execution.
+ *
+ * Arguments supplied by non-secure world will be copied into the saved
+ * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
+ * in the flags field in the thread context.
+ */
+void thread_resume(struct thread_ctx_regs *regs);
+
+uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long user_func, unsigned long spsr,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * Private functions made available for thread_asm.S
+ */
+
+/* Returns the temp stack for current CPU */
+void *thread_get_tmp_sp(void);
+
+/*
+ * Marks the current thread as suspended. And updated the flags
+ * for the thread context (see thread resume for use of flags).
+ * Returns thread index of the thread that was suspended.
+ */
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
+
+/*
+ * Marks the current thread as free.
+ */
+void thread_state_free(void);
+
+/* Returns a pointer to the saved registers in current thread context. */
+struct thread_ctx_regs *thread_get_ctx_regs(void);
+
+#ifdef ARM32
+/* Sets sp for abort mode */
+void thread_set_abt_sp(vaddr_t sp);
+
+/* Sets sp for irq mode */
+void thread_set_irq_sp(vaddr_t sp);
+
+/* Sets sp for fiq mode */
+void thread_set_fiq_sp(vaddr_t sp);
+#endif /*ARM32*/
+
+/* Handles a fast SMC by dispatching it to the registered fast SMC handler */
+void thread_handle_fast_smc(struct thread_smc_args *args);
+
+/* Handles a std SMC by dispatching it to the registered std SMC handler */
+void thread_handle_std_smc(struct thread_smc_args *args);
+
+/*
+ * Suspends current thread and temorarily exits to non-secure world.
+ * This function returns later when non-secure world returns.
+ *
+ * The purpose of this function is to request services from non-secure
+ * world.
+ */
+#define THREAD_RPC_NUM_ARGS 6
+void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
+
+/* Checks stack canaries */
+void thread_check_canaries(void);
+
+void __thread_std_smc_entry(struct thread_smc_args *args);
+
+#endif /*ASM*/
+
+#endif /*THREAD_PRIVATE_H*/
diff --git a/core/arch/arm/kernel/trace_ext.c b/core/arch/arm/kernel/trace_ext.c
new file mode 100644
index 0000000..8b8454c
--- /dev/null
+++ b/core/arch/arm/kernel/trace_ext.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdbool.h>
+#include <trace.h>
+#include <console.h>
+#include <kernel/thread.h>
+
+const char trace_ext_prefix[] = "TEE-CORE";
+int trace_level = TRACE_LEVEL;
+
+void trace_ext_puts(const char *str)
+{
+ const char *p;
+
+ console_flush();
+
+ for (p = str; *p; p++)
+ console_putc(*p);
+
+ console_flush();
+}
+
+int trace_ext_get_thread_id(void)
+{
+ return thread_get_id_may_fail();
+}
diff --git a/core/arch/arm/kernel/tz_ssvce_pl310_a32.S b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
new file mode 100644
index 0000000..184e936
--- /dev/null
+++ b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+#define PL310_LOCKDOWN_NBREGS 8
+#define PL310_LOCKDOWN_SZREG 4
+
+#define PL310_8WAYS_MASK 0x00FF
+#define PL310_16WAYS_UPPERMASK 0xFF00
+
+/*
+ * void arm_cl2_lockallways(vaddr_t base)
+ *
+ * lock all L2 caches ways for data and instruction
+ */
+FUNC arm_cl2_lockallways , :
+UNWIND( .fnstart)
+ add r1, r0, #PL310_DCACHE_LOCKDOWN_BASE
+ ldr r2, [r0, #PL310_AUX_CTRL]
+ tst r2, #PL310_AUX_16WAY_BIT
+ mov r2, #PL310_8WAYS_MASK
+ orrne r2, #PL310_16WAYS_UPPERMASK
+ mov r0, #PL310_LOCKDOWN_NBREGS
+1: /* lock Dcache and Icache */
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ subs r0, r0, #1
+ bne 1b
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_lockallways
+
+/*
+ * Set sync operation mask according to ways associativity.
+ * Preserve r0 = pl310 iomem base address
+ */
+.macro syncbyway_set_mask reg
+ ldr \reg, [r0, #PL310_AUX_CTRL]
+ tst \reg, #PL310_AUX_16WAY_BIT
+ mov \reg, #PL310_8WAYS_MASK
+ orrne \reg, \reg, #PL310_16WAYS_UPPERMASK
+.endm
+
+/*
+ * void arm_cl2_cleaninvbyway(vaddr_t base)
+ * clean & invalidate the whole L2 cache.
+ */
+FUNC arm_cl2_cleaninvbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_FLUSH_BY_WAY]
+
+ /* Wait for all cache ways to be cleaned and invalidated */
+loop_cli_way_done:
+ ldr r2, [r0, #PL310_FLUSH_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cli_way_done
+
+ /* Cache Sync */
+
+ /* Wait for writing cache sync */
+loop_cli_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cli_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbyway
+
+/* void arm_cl2_invbyway(vaddr_t base) */
+FUNC arm_cl2_invbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_INV_BY_WAY]
+
+loop_inv_way_done:
+ ldr r2, [r0, #PL310_INV_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_inv_way_done
+
+loop_inv_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_inv_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbyway
+
+/* void arm_cl2_cleanbyway(vaddr_t base) */
+FUNC arm_cl2_cleanbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_CLEAN_BY_WAY]
+
+loop_cl_way_done:
+ ldr r2, [r0, #PL310_CLEAN_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cl_way_done
+
+loop_cl_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cl_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbyway
+
+/*
+ * void _arm_cl2_xxxbypa(vaddr_t pl310_base, paddr_t start, paddr_t end,
+ * int pl310value);
+ * pl310value is one of PL310_CLEAN_BY_PA, PL310_INV_BY_PA or PL310_FLUSH_BY_PA
+ */
+LOCAL_FUNC _arm_cl2_xxxbypa , :
+UNWIND( .fnstart)
+ /* Align start address on PL310 line size */
+ and r1, #(~(PL310_LINE_SIZE - 1))
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ /*
+ * NOTE:
+ * We're assuming that if mmu is enabled PL310_BASE and SCU_BASE
+ * still have the same relative offsets from each other.
+ */
+ sub r0, r0, #(PL310_BASE - SCU_BASE)
+ mov r12, #1
+ str r12, [r0, #SCU_ERRATA744369]
+ dsb
+ add r0, r0, #(PL310_BASE - SCU_BASE)
+
+loop_cl2_xxxbypa:
+ str r1, [r0, r3]
+
+loop_xxx_pa_done:
+ ldr r12, [r0, r3]
+ and r12, r12, r1
+ cmp r12, #0
+ bne loop_xxx_pa_done
+
+ add r1, r1, #PL310_LINE_SIZE
+ cmp r2, r1
+ bpl loop_cl2_xxxbypa
+
+loop_xxx_pa_sync:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync
+
+ mov r12, #1
+ str r12, [r0, #PL310_SYNC]
+
+loop_xxx_pa_sync_done:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC _arm_cl2_xxxbypa
+
+/*
+ * void _arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleanbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_CLEAN_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbypa
+
+/*
+ * void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_invbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_INV_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbypa
+
+/*
+ * void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean and invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleaninvbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_FLUSH_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbypa
+
diff --git a/core/arch/arm/kernel/unwind_arm32.c b/core/arch/arm/kernel/unwind_arm32.c
new file mode 100644
index 0000000..7efe94b
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm32.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2015 Linaro Limited
+ * Copyright 2013-2014 Andrew Turner.
+ * Copyright 2013-2014 Ian Lepore.
+ * Copyright 2013-2014 Rui Paulo.
+ * Copyright 2013 Eitan Adler.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/misc.h>
+#include <kernel/unwind.h>
+#include <string.h>
+#include <trace.h>
+
+/* The register names */
+#define FP 11
+#define SP 13
+#define LR 14
+#define PC 15
+
+/*
+ * Definitions for the instruction interpreter.
+ *
+ * The ARM EABI specifies how to perform the frame unwinding in the
+ * Exception Handling ABI for the ARM Architecture document. To perform
+ * the unwind we need to know the initial frame pointer, stack pointer,
+ * link register and program counter. We then find the entry within the
+ * index table that points to the function the program counter is within.
+ * This gives us either a list of three instructions to process, a 31-bit
+ * relative offset to a table of instructions, or a value telling us
+ * we can't unwind any further.
+ *
+ * When we have the instructions to process we need to decode them
+ * following table 4 in section 9.3. This describes a collection of bit
+ * patterns to encode that steps to take to update the stack pointer and
+ * link register to the correct values at the start of the function.
+ */
+
+/* A special case when we are unable to unwind past this function */
+#define EXIDX_CANTUNWIND 1
+
+/*
+ * Entry types.
+ * These are the only entry types that have been seen in the kernel.
+ */
+#define ENTRY_MASK 0xff000000
+#define ENTRY_ARM_SU16 0x80000000
+#define ENTRY_ARM_LU16 0x81000000
+
+/* Instruction masks. */
+#define INSN_VSP_MASK 0xc0
+#define INSN_VSP_SIZE_MASK 0x3f
+#define INSN_STD_MASK 0xf0
+#define INSN_STD_DATA_MASK 0x0f
+#define INSN_POP_TYPE_MASK 0x08
+#define INSN_POP_COUNT_MASK 0x07
+#define INSN_VSP_LARGE_INC_MASK 0xff
+
+/* Instruction definitions */
+#define INSN_VSP_INC 0x00
+#define INSN_VSP_DEC 0x40
+#define INSN_POP_MASKED 0x80
+#define INSN_VSP_REG 0x90
+#define INSN_POP_COUNT 0xa0
+#define INSN_FINISH 0xb0
+#define INSN_POP_REGS 0xb1
+#define INSN_VSP_LARGE_INC 0xb2
+
+/* An item in the exception index table */
+struct unwind_idx {
+ uint32_t offset;
+ uint32_t insn;
+};
+
+/*
+ * These are set in the linker script. Their addresses will be
+ * either the start or end of the exception table or index.
+ */
+extern struct unwind_idx __exidx_start;
+extern struct unwind_idx __exidx_end;
+
+/* Expand a 31-bit signed value to a 32-bit signed value */
+static int32_t expand_prel31(uint32_t prel31)
+{
+
+ return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2;
+}
+
+/*
+ * Perform a binary search of the index table to find the function
+ * with the largest address that doesn't exceed addr.
+ */
+static struct unwind_idx *find_index(uint32_t addr)
+{
+ vaddr_t idx_start, idx_end;
+ unsigned int min, mid, max;
+ struct unwind_idx *start;
+ struct unwind_idx *item;
+ int32_t prel31_addr;
+ uint32_t func_addr;
+
+ start = &__exidx_start;
+ idx_start = (vaddr_t)&__exidx_start;
+ idx_end = (vaddr_t)&__exidx_end;
+
+ min = 0;
+ max = (idx_end - idx_start) / sizeof(struct unwind_idx);
+
+ while (min != max) {
+ mid = min + (max - min + 1) / 2;
+
+ item = &start[mid];
+
+ prel31_addr = expand_prel31(item->offset);
+ func_addr = (uint32_t)&item->offset + prel31_addr;
+
+ if (func_addr <= addr) {
+ min = mid;
+ } else {
+ max = mid - 1;
+ }
+ }
+
+ return &start[min];
+}
+
+/* Reads the next byte from the instruction list */
+static uint8_t unwind_exec_read_byte(struct unwind_state *state)
+{
+ uint8_t insn;
+
+ /* Read the unwind instruction */
+ insn = (*state->insn) >> (state->byte * 8);
+
+ /* Update the location of the next instruction */
+ if (state->byte == 0) {
+ state->byte = 3;
+ state->insn++;
+ state->entries--;
+ } else
+ state->byte--;
+
+ return insn;
+}
+
+/* Executes the next instruction on the list */
+static bool unwind_exec_insn(struct unwind_state *state)
+{
+ unsigned int insn;
+ uint32_t *vsp = (uint32_t *)state->registers[SP];
+ int update_vsp = 0;
+
+ /* This should never happen */
+ if (state->entries == 0)
+ return false;
+
+ /* Read the next instruction */
+ insn = unwind_exec_read_byte(state);
+
+ if ((insn & INSN_VSP_MASK) == INSN_VSP_INC) {
+ state->registers[SP] += ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_VSP_MASK) == INSN_VSP_DEC) {
+ state->registers[SP] -= ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_MASKED) {
+ unsigned int mask, reg;
+
+ /* Load the mask */
+ mask = unwind_exec_read_byte(state);
+ mask |= (insn & INSN_STD_DATA_MASK) << 8;
+
+ /* We have a refuse to unwind instruction */
+ if (mask == 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 4; mask && reg < 16; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+
+ /* If we have updated SP kep its value */
+ if (reg == SP)
+ update_vsp = 0;
+ }
+ }
+
+ } else if ((insn & INSN_STD_MASK) == INSN_VSP_REG &&
+ ((insn & INSN_STD_DATA_MASK) != 13) &&
+ ((insn & INSN_STD_DATA_MASK) != 15)) {
+ /* sp = register */
+ state->registers[SP] =
+ state->registers[insn & INSN_STD_DATA_MASK];
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_COUNT) {
+ unsigned int count, reg;
+
+ /* Read how many registers to load */
+ count = insn & INSN_POP_COUNT_MASK;
+
+ /* Update sp */
+ update_vsp = 1;
+
+ /* Pop the registers */
+ for (reg = 4; reg <= 4 + count; reg++) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+
+ /* Check if we are in the pop r14 version */
+ if ((insn & INSN_POP_TYPE_MASK) != 0) {
+ state->registers[14] = *vsp++;
+ }
+
+ } else if (insn == INSN_FINISH) {
+ /* Stop processing */
+ state->entries = 0;
+
+ } else if (insn == INSN_POP_REGS) {
+ unsigned int mask, reg;
+
+ mask = unwind_exec_read_byte(state);
+ if (mask == 0 || (mask & 0xf0) != 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 0; mask && reg < 4; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+ }
+
+ } else if ((insn & INSN_VSP_LARGE_INC_MASK) == INSN_VSP_LARGE_INC) {
+ unsigned int uleb128;
+
+ /* Read the increment value */
+ uleb128 = unwind_exec_read_byte(state);
+
+ state->registers[SP] += 0x204 + (uleb128 << 2);
+
+ } else {
+ /* We hit a new instruction that needs to be implemented */
+ DMSG("Unhandled instruction %.2x\n", insn);
+ return false;
+ }
+
+ if (update_vsp) {
+ state->registers[SP] = (uint32_t)vsp;
+ }
+
+ return true;
+}
+
+/* Performs the unwind of a function */
+static bool unwind_tab(struct unwind_state *state)
+{
+ uint32_t entry;
+
+ /* Set PC to a known value */
+ state->registers[PC] = 0;
+
+ /* Read the personality */
+ entry = *state->insn & ENTRY_MASK;
+
+ if (entry == ENTRY_ARM_SU16) {
+ state->byte = 2;
+ state->entries = 1;
+ } else if (entry == ENTRY_ARM_LU16) {
+ state->byte = 1;
+ state->entries = ((*state->insn >> 16) & 0xFF) + 1;
+ } else {
+ DMSG("Unknown entry: %x\n", entry);
+ return true;
+ }
+
+ while (state->entries > 0) {
+ if (!unwind_exec_insn(state))
+ return true;
+ }
+
+ /*
+ * The program counter was not updated, load it from the link register.
+ */
+ if (state->registers[PC] == 0) {
+ state->registers[PC] = state->registers[LR];
+
+ /*
+ * If the program counter changed, flag it in the update mask.
+ */
+ if (state->start_pc != state->registers[PC])
+ state->update_mask |= 1 << PC;
+ }
+
+ return false;
+}
+
+bool unwind_stack(struct unwind_state *state)
+{
+ struct unwind_idx *index;
+ bool finished;
+
+ /* Reset the mask of updated registers */
+ state->update_mask = 0;
+
+ /* The pc value is correct and will be overwritten, save it */
+ state->start_pc = state->registers[PC];
+
+ /* Find the item to run */
+ index = find_index(state->start_pc);
+
+ finished = false;
+ if (index->insn != EXIDX_CANTUNWIND) {
+ if (index->insn & (1U << 31)) {
+ /* The data is within the instruction */
+ state->insn = &index->insn;
+ } else {
+ /* A prel31 offset to the unwind table */
+ state->insn = (uint32_t *)
+ ((uintptr_t)&index->insn +
+ expand_prel31(index->insn));
+ }
+ /* Run the unwind function */
+ finished = unwind_tab(state);
+ }
+
+ /* This is the top of the stack, finish */
+ if (index->insn == EXIDX_CANTUNWIND)
+ finished = true;
+
+ return !finished;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(state.registers, 0, sizeof(state.registers));
+ /* r7: Thumb-style frame pointer */
+ state.registers[7] = read_r7();
+ /* r11: ARM-style frame pointer */
+ state.registers[FP] = read_fp();
+ state.registers[SP] = read_sp();
+ state.registers[LR] = read_lr();
+ state.registers[PC] = (uint32_t)print_stack;
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
+
+/*
+ * These functions are referenced but never used
+ */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void);
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/core/arch/arm/kernel/unwind_arm64.c b/core/arch/arm/kernel/unwind_arm64.c
new file mode 100644
index 0000000..10b70ef
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm64.c
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/unwind.h>
+#include <kernel/thread.h>
+#include <string.h>
+#include <trace.h>
+
+bool unwind_stack(struct unwind_state *frame)
+{
+ uint64_t fp;
+
+ fp = frame->fp;
+ if (!thread_addr_is_in_stack(fp))
+ return false;
+
+ frame->sp = fp + 0x10;
+ /* FP to previous frame (X29) */
+ frame->fp = *(uint64_t *)(fp);
+ /* LR (X30) */
+ frame->pc = *(uint64_t *)(fp + 8) - 4;
+
+ return true;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = read_pc();
+ state.fp = read_fp();
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c
new file mode 100644
index 0000000..a63fb22
--- /dev/null
+++ b/core/arch/arm/kernel/user_ta.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015-2017 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include <keep.h>
+#include <kernel/panic.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <kernel/user_ta.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg_supplicant.h>
+#include <signed_hdr.h>
+#include <stdlib.h>
+#include <ta_pub_key.h>
+#include <tee/tee_cryp_provider.h>
+#include <tee/tee_cryp_utl.h>
+#include <tee/tee_obj.h>
+#include <tee/tee_svc_cryp.h>
+#include <tee/tee_svc.h>
+#include <tee/tee_svc_storage.h>
+#include <tee/uuid.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <utee_defines.h>
+#include <util.h>
+
+#include "elf_load.h"
+#include "elf_common.h"
+
+#define STACK_ALIGNMENT (sizeof(long) * 2)
+
+static TEE_Result load_header(const struct shdr *signed_ta,
+ struct shdr **sec_shdr)
+{
+ size_t s;
+
+ if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta)))
+ return TEE_ERROR_SECURITY;
+
+ s = SHDR_GET_SIZE(signed_ta);
+ if (!tee_vbuf_is_non_sec(signed_ta, s))
+ return TEE_ERROR_SECURITY;
+
+ /* Copy signed header into secure memory */
+ *sec_shdr = malloc(s);
+ if (!*sec_shdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(*sec_shdr, signed_ta, s);
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result check_shdr(struct shdr *shdr)
+{
+ struct rsa_public_key key;
+ TEE_Result res;
+ uint32_t e = TEE_U32_TO_BIG_ENDIAN(ta_pub_key_exponent);
+ size_t hash_size;
+
+ if (shdr->magic != SHDR_MAGIC || shdr->img_type != SHDR_TA)
+ return TEE_ERROR_SECURITY;
+
+ if (TEE_ALG_GET_MAIN_ALG(shdr->algo) != TEE_MAIN_ALGO_RSA)
+ return TEE_ERROR_SECURITY;
+
+ res = tee_hash_get_digest_size(TEE_DIGEST_HASH_TO_ALGO(shdr->algo),
+ &hash_size);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (hash_size != shdr->hash_size)
+ return TEE_ERROR_SECURITY;
+
+ if (!crypto_ops.acipher.alloc_rsa_public_key ||
+ !crypto_ops.acipher.free_rsa_public_key ||
+ !crypto_ops.acipher.rsassa_verify ||
+ !crypto_ops.bignum.bin2bn)
+ return TEE_ERROR_NOT_SUPPORTED;
+
+ res = crypto_ops.acipher.alloc_rsa_public_key(&key, shdr->sig_size);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = crypto_ops.bignum.bin2bn((uint8_t *)&e, sizeof(e), key.e);
+ if (res != TEE_SUCCESS)
+ goto out;
+ res = crypto_ops.bignum.bin2bn(ta_pub_key_modulus,
+ ta_pub_key_modulus_size, key.n);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = crypto_ops.acipher.rsassa_verify(shdr->algo, &key, -1,
+ SHDR_GET_HASH(shdr), shdr->hash_size,
+ SHDR_GET_SIG(shdr), shdr->sig_size);
+out:
+ crypto_ops.acipher.free_rsa_public_key(&key);
+ if (res != TEE_SUCCESS)
+ return TEE_ERROR_SECURITY;
+ return TEE_SUCCESS;
+}
+
+static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs)
+{
+ uint32_t mattr = 0;
+
+ if (init_attrs)
+ mattr = TEE_MATTR_PRW;
+ else {
+ if (flags & PF_X)
+ mattr |= TEE_MATTR_UX;
+ if (flags & PF_W)
+ mattr |= TEE_MATTR_UW;
+ if (flags & PF_R)
+ mattr |= TEE_MATTR_UR;
+ }
+
+ return mattr;
+}
+
+#ifdef CFG_PAGED_USER_TA
+static TEE_Result config_initial_paging(struct user_ta_ctx *utc)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ if (!tee_pager_add_uta_area(utc, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size))
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+
+static TEE_Result config_final_paging(struct user_ta_ctx *utc)
+{
+ size_t n;
+ uint32_t flags;
+
+ tee_pager_assign_uta_tables(utc);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ flags = utc->mmu->regions[n].attr &
+ (TEE_MATTR_PRW | TEE_MATTR_URWX);
+ if (!tee_pager_set_uta_area_attr(utc, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size,
+ flags))
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+#else /*!CFG_PAGED_USER_TA*/
+static TEE_Result config_initial_paging(struct user_ta_ctx *utc __unused)
+{
+ return TEE_SUCCESS;
+}
+
+static TEE_Result config_final_paging(struct user_ta_ctx *utc)
+{
+ void *va = (void *)utc->mmu->ta_private_vmem_start;
+ size_t vasize = utc->mmu->ta_private_vmem_end -
+ utc->mmu->ta_private_vmem_start;
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va, vasize);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va, vasize);
+ return TEE_SUCCESS;
+}
+#endif /*!CFG_PAGED_USER_TA*/
+
+static TEE_Result load_elf_segments(struct user_ta_ctx *utc,
+ struct elf_load_state *elf_state, bool init_attrs)
+{
+ TEE_Result res;
+ uint32_t mattr;
+ size_t idx = 0;
+
+ tee_mmu_map_clear(utc);
+
+ /*
+ * Add stack segment
+ */
+ tee_mmu_map_stack(utc, utc->mobj_stack);
+
+ /*
+ * Add code segment
+ */
+ while (true) {
+ vaddr_t offs;
+ size_t size;
+ uint32_t flags;
+
+ res = elf_load_get_next_segment(elf_state, &idx, &offs, &size,
+ &flags);
+ if (res == TEE_ERROR_ITEM_NOT_FOUND)
+ break;
+ if (res != TEE_SUCCESS)
+ return res;
+
+ mattr = elf_flags_to_mattr(flags, init_attrs);
+ res = tee_mmu_map_add_segment(utc, utc->mobj_code, offs, size,
+ mattr);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ if (init_attrs)
+ return config_initial_paging(utc);
+ else
+ return config_final_paging(utc);
+}
+
+static struct mobj *alloc_ta_mem(size_t size)
+{
+#ifdef CFG_PAGED_USER_TA
+ return mobj_paged_alloc(size);
+#else
+ return mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr);
+#endif
+}
+
+static TEE_Result load_elf(struct user_ta_ctx *utc, struct shdr *shdr,
+ const struct shdr *nmem_shdr)
+{
+ TEE_Result res;
+ size_t hash_ctx_size;
+ void *hash_ctx = NULL;
+ uint32_t hash_algo;
+ uint8_t *nwdata = (uint8_t *)nmem_shdr + SHDR_GET_SIZE(shdr);
+ size_t nwdata_len = shdr->img_size;
+ void *digest = NULL;
+ struct elf_load_state *elf_state = NULL;
+ struct ta_head *ta_head;
+ void *p;
+ size_t vasize;
+
+ if (!tee_vbuf_is_non_sec(nwdata, nwdata_len))
+ return TEE_ERROR_SECURITY;
+
+ if (!crypto_ops.hash.get_ctx_size || !crypto_ops.hash.init ||
+ !crypto_ops.hash.update || !crypto_ops.hash.final) {
+ res = TEE_ERROR_NOT_IMPLEMENTED;
+ goto out;
+ }
+ hash_algo = TEE_DIGEST_HASH_TO_ALGO(shdr->algo);
+ res = crypto_ops.hash.get_ctx_size(hash_algo, &hash_ctx_size);
+ if (res != TEE_SUCCESS)
+ goto out;
+ hash_ctx = malloc(hash_ctx_size);
+ if (!hash_ctx) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+ res = crypto_ops.hash.init(hash_ctx, hash_algo);
+ if (res != TEE_SUCCESS)
+ goto out;
+ res = crypto_ops.hash.update(hash_ctx, hash_algo,
+ (uint8_t *)shdr, sizeof(struct shdr));
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = elf_load_init(hash_ctx, hash_algo, nwdata, nwdata_len,
+ &elf_state);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = elf_load_head(elf_state, sizeof(struct ta_head), &p, &vasize,
+ &utc->is_32bit);
+ if (res != TEE_SUCCESS)
+ goto out;
+ ta_head = p;
+
+ utc->mobj_code = alloc_ta_mem(vasize);
+ if (!utc->mobj_code) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ /* Currently all TA must execute from DDR */
+ if (!(ta_head->flags & TA_FLAG_EXEC_DDR)) {
+ res = TEE_ERROR_BAD_FORMAT;
+ goto out;
+ }
+ /* Temporary assignment to setup memory mapping */
+ utc->ctx.flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+
+ /* Ensure proper aligment of stack */
+ utc->mobj_stack = alloc_ta_mem(ROUNDUP(ta_head->stack_size,
+ STACK_ALIGNMENT));
+ if (!utc->mobj_stack) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ /*
+ * Map physical memory into TA virtual memory
+ */
+
+ res = tee_mmu_init(utc);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = load_elf_segments(utc, elf_state, true /* init attrs */);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ tee_mmu_set_ctx(&utc->ctx);
+
+ res = elf_load_body(elf_state, tee_mmu_get_load_addr(&utc->ctx));
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ digest = malloc(shdr->hash_size);
+ if (!digest) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ res = crypto_ops.hash.final(hash_ctx, hash_algo, digest,
+ shdr->hash_size);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ if (memcmp(digest, SHDR_GET_HASH(shdr), shdr->hash_size) != 0) {
+ res = TEE_ERROR_SECURITY;
+ goto out;
+ }
+
+ /*
+ * Replace the init attributes with attributes used when the TA is
+ * running.
+ */
+ res = load_elf_segments(utc, elf_state, false /* final attrs */);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+out:
+ elf_load_final(elf_state);
+ free(digest);
+ free(hash_ctx);
+ return res;
+}
+
+/*-----------------------------------------------------------------------------
+ * Loads TA header and hashes.
+ * Verifies the TA signature.
+ * Returns context ptr and TEE_Result.
+ *---------------------------------------------------------------------------*/
+static TEE_Result ta_load(const TEE_UUID *uuid, const struct shdr *signed_ta,
+ struct tee_ta_ctx **ta_ctx)
+{
+ TEE_Result res;
+ /* man_flags: mandatory flags */
+ uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+ /* opt_flags: optional flags */
+ uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
+ TA_FLAG_MULTI_SESSION | TA_FLAG_UNSAFE_NW_PARAMS |
+ TA_FLAG_INSTANCE_KEEP_ALIVE | TA_FLAG_CACHE_MAINTENANCE;
+ struct user_ta_ctx *utc = NULL;
+ struct shdr *sec_shdr = NULL;
+ struct ta_head *ta_head;
+
+ res = load_header(signed_ta, &sec_shdr);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ res = check_shdr(sec_shdr);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ /*
+ * ------------------------------------------------------------------
+ * 2nd step: Register context
+ * Alloc and init the ta context structure, alloc physical/virtual
+ * memories to store/map the TA.
+ * ------------------------------------------------------------------
+ */
+
+ /*
+ * Register context
+ */
+
+ /* code below must be protected by mutex (multi-threaded) */
+ utc = calloc(1, sizeof(struct user_ta_ctx));
+ if (!utc) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+ TAILQ_INIT(&utc->open_sessions);
+ TAILQ_INIT(&utc->cryp_states);
+ TAILQ_INIT(&utc->objects);
+ TAILQ_INIT(&utc->storage_enums);
+#if defined(CFG_SE_API)
+ utc->se_service = NULL;
+#endif
+
+ res = load_elf(utc, sec_shdr, signed_ta);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ utc->load_addr = tee_mmu_get_load_addr(&utc->ctx);
+ ta_head = (struct ta_head *)(vaddr_t)utc->load_addr;
+
+ if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) {
+ res = TEE_ERROR_SECURITY;
+ goto error_return;
+ }
+
+ /* check input flags bitmask consistency and save flags */
+ if ((ta_head->flags & opt_flags) != ta_head->flags ||
+ (ta_head->flags & man_flags) != man_flags) {
+ EMSG("TA flag issue: flags=%x opt=%X man=%X",
+ ta_head->flags, opt_flags, man_flags);
+ res = TEE_ERROR_BAD_FORMAT;
+ goto error_return;
+ }
+
+ utc->ctx.flags = ta_head->flags;
+ utc->ctx.uuid = ta_head->uuid;
+ utc->entry_func = ta_head->entry.ptr64;
+
+ utc->ctx.ref_count = 1;
+
+ condvar_init(&utc->ctx.busy_cv);
+ TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link);
+ *ta_ctx = &utc->ctx;
+
+ DMSG("ELF load address 0x%x", utc->load_addr);
+
+ tee_mmu_set_ctx(NULL);
+ /* end thread protection (multi-threaded) */
+
+ free(sec_shdr);
+ return TEE_SUCCESS;
+
+error_return:
+ free(sec_shdr);
+ tee_mmu_set_ctx(NULL);
+ if (utc) {
+ pgt_flush_ctx(&utc->ctx);
+ tee_pager_rem_uta_areas(utc);
+ tee_mmu_final(utc);
+ mobj_free(utc->mobj_code);
+ mobj_free(utc->mobj_stack);
+ free(utc);
+ }
+ return res;
+}
+
+static void init_utee_param(struct utee_params *up,
+ const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
+{
+ size_t n;
+
+ up->types = p->types;
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uintptr_t a;
+ uintptr_t b;
+
+ switch (TEE_PARAM_TYPE_GET(p->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ a = (uintptr_t)va[n];
+ b = p->u[n].mem.size;
+ break;
+ case TEE_PARAM_TYPE_VALUE_INPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ a = p->u[n].val.a;
+ b = p->u[n].val.b;
+ break;
+ default:
+ a = 0;
+ b = 0;
+ break;
+ }
+ /* See comment for struct utee_params in utee_types.h */
+ up->vals[n * 2] = a;
+ up->vals[n * 2 + 1] = b;
+ }
+}
+
+static void update_from_utee_param(struct tee_ta_param *p,
+ const struct utee_params *up)
+{
+ size_t n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(p->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ /* See comment for struct utee_params in utee_types.h */
+ p->u[n].mem.size = up->vals[n * 2 + 1];
+ break;
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ /* See comment for struct utee_params in utee_types.h */
+ p->u[n].val.a = up->vals[n * 2];
+ p->u[n].val.b = up->vals[n * 2 + 1];
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void clear_vfp_state(struct user_ta_ctx *utc __unused)
+{
+#ifdef CFG_WITH_VFP
+ thread_user_clear_vfp(&utc->vfp);
+#endif
+}
+
+static TEE_Result user_ta_enter(TEE_ErrorOrigin *err,
+ struct tee_ta_session *session,
+ enum utee_entry_func func, uint32_t cmd,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+ struct utee_params *usr_params;
+ uaddr_t usr_stack;
+ struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
+ TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
+ struct tee_ta_session *s __maybe_unused;
+ void *param_va[TEE_NUM_PARAMS] = { NULL };
+
+ if (!(utc->ctx.flags & TA_FLAG_EXEC_DDR))
+ panic("TA does not exec in DDR");
+
+ /* Map user space memory */
+ res = tee_mmu_map_param(utc, param, param_va);
+ if (res != TEE_SUCCESS)
+ goto cleanup_return;
+
+ /* Switch to user ctx */
+ tee_ta_push_current_session(session);
+
+ /* Make room for usr_params at top of stack */
+ usr_stack = (uaddr_t)utc->mmu->regions[0].va + utc->mobj_stack->size;
+ usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
+ usr_params = (struct utee_params *)usr_stack;
+ init_utee_param(usr_params, param, param_va);
+
+ res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session),
+ (vaddr_t)usr_params, cmd, usr_stack,
+ utc->entry_func, utc->is_32bit,
+ &utc->ctx.panicked, &utc->ctx.panic_code);
+
+ clear_vfp_state(utc);
+ /*
+ * According to GP spec the origin should allways be set to the
+ * TA after TA execution
+ */
+ serr = TEE_ORIGIN_TRUSTED_APP;
+
+ if (utc->ctx.panicked) {
+ DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
+ utc->ctx.panic_code);
+ serr = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_TARGET_DEAD;
+ }
+
+ /* Copy out value results */
+ update_from_utee_param(param, usr_params);
+
+ s = tee_ta_pop_current_session();
+ assert(s == session);
+cleanup_return:
+
+ /*
+ * Clear the cancel state now that the user TA has returned. The next
+ * time the TA will be invoked will be with a new operation and should
+ * not have an old cancellation pending.
+ */
+ session->cancel = false;
+
+ /*
+ * Can't update *err until now since it may point to an address
+ * mapped for the user mode TA.
+ */
+ *err = serr;
+
+ return res;
+}
+
+/*
+ * Load a TA via RPC with UUID defined by input param uuid. The virtual
+ * address of the TA is recieved in out parameter ta
+ *
+ * Function is not thread safe
+ */
+static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,
+ uint64_t *cookie_ta)
+{
+ TEE_Result res;
+ struct optee_msg_param params[2];
+ paddr_t phta = 0;
+ uint64_t cta = 0;
+
+
+ if (!uuid || !ta || !cookie_ta)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(params, 0, sizeof(params));
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ tee_uuid_to_octets((void *)&params[0].u.value, uuid);
+ params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ params[1].u.tmem.buf_ptr = 0;
+ params[1].u.tmem.size = 0;
+ params[1].u.tmem.shm_ref = 0;
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ thread_rpc_alloc_payload(params[1].u.tmem.size, &phta, &cta);
+ if (!phta)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ *ta = phys_to_virt(phta, MEM_AREA_NSEC_SHM);
+ if (!*ta) {
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+ *cookie_ta = cta;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ tee_uuid_to_octets((void *)&params[0].u.value, uuid);
+ params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ params[1].u.tmem.buf_ptr = phta;
+ params[1].u.tmem.shm_ref = cta;
+ /* Note that params[1].u.tmem.size is already assigned */
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
+out:
+ if (res != TEE_SUCCESS)
+ thread_rpc_free_payload(cta);
+ return res;
+}
+
+static TEE_Result init_session_with_signed_ta(const TEE_UUID *uuid,
+ const struct shdr *signed_ta,
+ struct tee_ta_session *s)
+{
+ TEE_Result res;
+
+ DMSG(" Load dynamic TA");
+ /* load and verify */
+ res = ta_load(uuid, signed_ta, &s->ctx);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ DMSG(" dyn TA : %pUl", (void *)&s->ctx->uuid);
+
+ return res;
+}
+
+static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s,
+ struct tee_ta_param *param, TEE_ErrorOrigin *eo)
+{
+ return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param);
+}
+
+static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s,
+ uint32_t cmd, struct tee_ta_param *param,
+ TEE_ErrorOrigin *eo)
+{
+ return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param);
+}
+
+static void user_ta_enter_close_session(struct tee_ta_session *s)
+{
+ TEE_ErrorOrigin eo;
+ struct tee_ta_param param = { 0 };
+
+ user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, &param);
+}
+
+static void user_ta_dump_state(struct tee_ta_ctx *ctx)
+{
+ struct user_ta_ctx *utc __maybe_unused = to_user_ta_ctx(ctx);
+ size_t n;
+
+ EMSG_RAW("- load addr : 0x%x ctx-idr: %d",
+ utc->load_addr, utc->context);
+ EMSG_RAW("- stack: 0x%" PRIxVA " %zu",
+ utc->mmu->regions[0].va, utc->mobj_stack->size);
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ paddr_t pa = 0;
+
+ if (utc->mmu->regions[n].mobj)
+ mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset, 0, &pa);
+
+ EMSG_RAW("sect %zu : va %#" PRIxVA " pa %#" PRIxPA " %#zx",
+ n, utc->mmu->regions[n].va, pa,
+ utc->mmu->regions[n].size);
+ }
+}
+KEEP_PAGER(user_ta_dump_state);
+
+static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
+{
+ struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
+
+ tee_pager_rem_uta_areas(utc);
+
+ /*
+ * Clean all traces of the TA, both RO and RW data.
+ * No L2 cache maintenance to avoid sync problems
+ */
+ if (ctx->flags & TA_FLAG_EXEC_DDR) {
+ void *va;
+
+ if (utc->mobj_code) {
+ va = mobj_get_va(utc->mobj_code, 0);
+ if (va) {
+ memset(va, 0, utc->mobj_code->size);
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ utc->mobj_code->size);
+ }
+ }
+
+ if (utc->mobj_stack) {
+ va = mobj_get_va(utc->mobj_stack, 0);
+ if (va) {
+ memset(va, 0, utc->mobj_stack->size);
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ utc->mobj_stack->size);
+ }
+ }
+ }
+
+ /*
+ * Close sessions opened by this TA
+ * Note that tee_ta_close_session() removes the item
+ * from the utc->open_sessions list.
+ */
+ while (!TAILQ_EMPTY(&utc->open_sessions)) {
+ tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
+ &utc->open_sessions, KERN_IDENTITY);
+ }
+
+ tee_mmu_final(utc);
+ mobj_free(utc->mobj_code);
+ mobj_free(utc->mobj_stack);
+
+ /* Free cryp states created by this TA */
+ tee_svc_cryp_free_states(utc);
+ /* Close cryp objects opened by this TA */
+ tee_obj_close_all(utc);
+ /* Free emums created by this TA */
+ tee_svc_storage_close_all_enum(utc);
+ free(utc);
+}
+
+static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx)
+{
+ return to_user_ta_ctx(ctx)->context;
+}
+
+static const struct tee_ta_ops user_ta_ops __rodata_unpaged = {
+ .enter_open_session = user_ta_enter_open_session,
+ .enter_invoke_cmd = user_ta_enter_invoke_cmd,
+ .enter_close_session = user_ta_enter_close_session,
+ .dump_state = user_ta_dump_state,
+ .destroy = user_ta_ctx_destroy,
+ .get_instance_id = user_ta_get_instance_id,
+};
+
+TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s)
+{
+ TEE_Result res;
+ struct shdr *ta = NULL;
+ uint64_t cookie_ta = 0;
+
+
+ /* Request TA from tee-supplicant */
+ res = rpc_load(uuid, &ta, &cookie_ta);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = init_session_with_signed_ta(uuid, ta, s);
+ /*
+ * Free normal world shared memory now that the TA either has been
+ * copied into secure memory or the TA failed to be initialized.
+ */
+ thread_rpc_free_payload(cookie_ta);
+
+ if (res == TEE_SUCCESS)
+ s->ctx->ops = &user_ta_ops;
+ return res;
+}
diff --git a/core/arch/arm/kernel/vfp.c b/core/arch/arm/kernel/vfp.c
new file mode 100644
index 0000000..9903642
--- /dev/null
+++ b/core/arch/arm/kernel/vfp.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/vfp.h>
+#include "vfp_private.h"
+
+#ifdef ARM32
+bool vfp_is_enabled(void)
+{
+ return !!(vfp_read_fpexc() & FPEXC_EN);
+}
+
+void vfp_enable(void)
+{
+ vfp_write_fpexc(vfp_read_fpexc() | FPEXC_EN);
+}
+
+void vfp_disable(void)
+{
+ vfp_write_fpexc(vfp_read_fpexc() & ~FPEXC_EN);
+}
+
+void vfp_lazy_save_state_init(struct vfp_state *state)
+{
+ uint32_t fpexc = vfp_read_fpexc();
+
+ state->fpexc = fpexc;
+ vfp_write_fpexc(fpexc & ~FPEXC_EN);
+}
+
+void vfp_lazy_save_state_final(struct vfp_state *state)
+{
+ if (state->fpexc & FPEXC_EN) {
+ uint32_t fpexc = vfp_read_fpexc();
+
+ assert(!(fpexc & FPEXC_EN));
+ vfp_write_fpexc(fpexc | FPEXC_EN);
+ state->fpscr = vfp_read_fpscr();
+ vfp_save_extension_regs(state->reg);
+ vfp_write_fpexc(fpexc);
+ }
+}
+
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state)
+{
+
+ if (full_state) {
+ /*
+ * Only restore VFP registers if they have been touched as they
+ * otherwise are intact.
+ */
+
+ /* FPEXC is restored to what's in state->fpexc below */
+ vfp_write_fpexc(vfp_read_fpexc() | FPEXC_EN);
+
+ vfp_write_fpscr(state->fpscr);
+ vfp_restore_extension_regs(state->reg);
+ }
+ vfp_write_fpexc(state->fpexc);
+}
+#endif /* ARM32 */
+
+#ifdef ARM64
+bool vfp_is_enabled(void)
+{
+ return (CPACR_EL1_FPEN(read_cpacr_el1()) & CPACR_EL1_FPEN_EL0EL1);
+}
+
+void vfp_enable(void)
+{
+ uint32_t val = read_cpacr_el1();
+
+ val |= (CPACR_EL1_FPEN_EL0EL1 << CPACR_EL1_FPEN_SHIFT);
+ write_cpacr_el1(val);
+ isb();
+}
+
+void vfp_disable(void)
+{
+ uint32_t val = read_cpacr_el1();
+
+ val &= ~(CPACR_EL1_FPEN_MASK << CPACR_EL1_FPEN_SHIFT);
+ write_cpacr_el1(val);
+ isb();
+}
+
+void vfp_lazy_save_state_init(struct vfp_state *state)
+{
+ state->cpacr_el1 = read_cpacr_el1();
+ vfp_disable();
+}
+
+void vfp_lazy_save_state_final(struct vfp_state *state)
+{
+ if ((CPACR_EL1_FPEN(state->cpacr_el1) & CPACR_EL1_FPEN_EL0EL1) ||
+ state->force_save) {
+ assert(!vfp_is_enabled());
+ vfp_enable();
+ state->fpcr = read_fpcr();
+ state->fpsr = read_fpsr();
+ vfp_save_extension_regs(state->reg);
+ vfp_disable();
+ }
+}
+
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state)
+{
+ if (full_state) {
+ /*
+ * Only restore VFP registers if they have been touched as they
+ * otherwise are intact.
+ */
+
+ /* CPACR_EL1 is restored to what's in state->cpacr_el1 below */
+ vfp_enable();
+ write_fpcr(state->fpcr);
+ write_fpsr(state->fpsr);
+ vfp_restore_extension_regs(state->reg);
+ }
+ write_cpacr_el1(state->cpacr_el1);
+ isb();
+}
+#endif /* ARM64 */
diff --git a/core/arch/arm/kernel/vfp_a32.S b/core/arch/arm/kernel/vfp_a32.S
new file mode 100644
index 0000000..6cc3e77
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_a32.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/unwind.h>
+
+ .section .text.vfp_asm
+
+/* void vfp_save_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
+FUNC vfp_save_extension_regs , :
+UNWIND( .fnstart)
+ vstm r0!, {d0-d15}
+ vstm r0, {d16-d31}
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_save_extension_regs
+
+/* void vfp_restore_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
+FUNC vfp_restore_extension_regs , :
+UNWIND( .fnstart)
+ vldm r0!, {d0-d15}
+ vldm r0, {d16-d31}
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_restore_extension_regs
+
+/* void vfp_write_fpexc(uint32_t fpexc) */
+FUNC vfp_write_fpexc , :
+UNWIND( .fnstart)
+ vmsr fpexc, r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_write_fpexc
+
+/* uint32_t vfp_read_fpexc(void) */
+FUNC vfp_read_fpexc , :
+UNWIND( .fnstart)
+ vmrs r0, fpexc
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_read_fpexc
+
+/* void vfp_write_fpscr(uint32_t fpscr) */
+FUNC vfp_write_fpscr , :
+UNWIND( .fnstart)
+ vmsr fpscr, r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_write_fpscr
+
+/* uint32_t vfp_read_fpscr(void) */
+FUNC vfp_read_fpscr , :
+UNWIND( .fnstart)
+ vmrs r0, fpscr
+ bx lr
+UNWIND( .fnend)
+END_FUNC vfp_read_fpscr
diff --git a/core/arch/arm/kernel/vfp_a64.S b/core/arch/arm/kernel/vfp_a64.S
new file mode 100644
index 0000000..53210c5
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_a64.S
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+
+ .section .text.vfp_asm
+
+/* void vfp_save_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]); */
+FUNC vfp_save_extension_regs , :
+ stp q0, q1, [x0, #16 * 0]
+ stp q2, q3, [x0, #16 * 2]
+ stp q4, q5, [x0, #16 * 4]
+ stp q6, q7, [x0, #16 * 6]
+ stp q8, q9, [x0, #16 * 8]
+ stp q10, q11, [x0, #16 * 10]
+ stp q12, q13, [x0, #16 * 12]
+ stp q14, q15, [x0, #16 * 14]
+ stp q16, q17, [x0, #16 * 16]
+ stp q18, q19, [x0, #16 * 18]
+ stp q20, q21, [x0, #16 * 20]
+ stp q22, q23, [x0, #16 * 22]
+ stp q24, q25, [x0, #16 * 24]
+ stp q26, q27, [x0, #16 * 26]
+ stp q28, q29, [x0, #16 * 28]
+ stp q30, q31, [x0, #16 * 30]
+ ret
+END_FUNC vfp_save_extension_regs
+
+/* void vfp_restore_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]); */
+FUNC vfp_restore_extension_regs , :
+ ldp q0, q1, [x0, #16 * 0]
+ ldp q2, q3, [x0, #16 * 2]
+ ldp q4, q5, [x0, #16 * 4]
+ ldp q6, q7, [x0, #16 * 6]
+ ldp q8, q9, [x0, #16 * 8]
+ ldp q10, q11, [x0, #16 * 10]
+ ldp q12, q13, [x0, #16 * 12]
+ ldp q14, q15, [x0, #16 * 14]
+ ldp q16, q17, [x0, #16 * 16]
+ ldp q18, q19, [x0, #16 * 18]
+ ldp q20, q21, [x0, #16 * 20]
+ ldp q22, q23, [x0, #16 * 22]
+ ldp q24, q25, [x0, #16 * 24]
+ ldp q26, q27, [x0, #16 * 26]
+ ldp q28, q29, [x0, #16 * 28]
+ ldp q30, q31, [x0, #16 * 30]
+ ret
+END_FUNC vfp_restore_extension_regs
diff --git a/core/arch/arm/kernel/vfp_private.h b/core/arch/arm/kernel/vfp_private.h
new file mode 100644
index 0000000..0c0ffba
--- /dev/null
+++ b/core/arch/arm/kernel/vfp_private.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VFP_PRIVATE
+#define VFP_PRIVATE
+
+#include <kernel/vfp.h>
+
+void vfp_save_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]);
+void vfp_restore_extension_regs(struct vfp_reg regs[VFP_NUM_REGS]);
+void vfp_clear_extension_regs(void);
+
+#ifdef ARM32
+
+#define FPEXC_EN (1 << 30)
+
+/*
+ * These functions can't be implemented in inline assembly when compiling
+ * for thumb mode, to make it easy always implement then in ARM assembly as
+ * ordinary functions.
+ */
+void vfp_write_fpexc(uint32_t fpexc);
+uint32_t vfp_read_fpexc(void);
+void vfp_write_fpscr(uint32_t fpscr);
+uint32_t vfp_read_fpscr(void);
+
+#endif /* ARM32 */
+
+#endif /*VFP_PRIVATE*/
diff --git a/core/arch/arm/kernel/wait_queue.c b/core/arch/arm/kernel/wait_queue.c
new file mode 100644
index 0000000..a96e0fe
--- /dev/null
+++ b/core/arch/arm/kernel/wait_queue.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <types_ext.h>
+#include <tee_api_defines.h>
+#include <string.h>
+#include <optee_msg.h>
+#include <kernel/spinlock.h>
+#include <kernel/wait_queue.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+static unsigned wq_spin_lock;
+
+
+void wq_init(struct wait_queue *wq)
+{
+ *wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
+}
+
+static void wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
+ const char *fname, int lineno __maybe_unused)
+{
+ uint32_t ret;
+ struct optee_msg_param params;
+ const char *cmd_str __maybe_unused =
+ func == OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
+
+ if (fname)
+ DMSG("%s thread %u %p %s:%d", cmd_str, id,
+ sync_obj, fname, lineno);
+ else
+ DMSG("%s thread %u %p", cmd_str, id, sync_obj);
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params.u.value.a = func;
+ params.u.value.b = id;
+
+ ret = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_WAIT_QUEUE, 1, &params);
+ if (ret != TEE_SUCCESS)
+ DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
+}
+
+static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
+{
+ struct wait_queue_elem *wqe_iter;
+
+ /* Add elem to end of wait queue */
+ wqe_iter = SLIST_FIRST(wq);
+ if (wqe_iter) {
+ while (SLIST_NEXT(wqe_iter, link))
+ wqe_iter = SLIST_NEXT(wqe_iter, link);
+ SLIST_INSERT_AFTER(wqe_iter, wqe, link);
+ } else
+ SLIST_INSERT_HEAD(wq, wqe, link);
+}
+
+void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ struct condvar *cv)
+{
+ uint32_t old_itr_status;
+
+ wqe->handle = thread_get_id();
+ wqe->done = false;
+ wqe->cv = cv;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ slist_add_tail(wq, wqe);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+}
+
+void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ const void *sync_obj, const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ unsigned done;
+
+ do {
+ wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
+ sync_obj, fname, lineno);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ done = wqe->done;
+ if (done)
+ SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+ } while (!done);
+}
+
+void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
+ const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+ int handle = -1;
+ bool do_wakeup = false;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ SLIST_FOREACH(wqe, wq, link) {
+ if (!wqe->cv) {
+ do_wakeup = !wqe->done;
+ wqe->done = true;
+ handle = wqe->handle;
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (do_wakeup)
+ wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP, handle,
+ sync_obj, fname, lineno);
+}
+
+void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
+ bool only_one, const void *sync_obj __unused,
+ const char *fname, int lineno __maybe_unused)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+
+ if (!cv)
+ return;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ /*
+ * Find condvar waiter(s) and promote each to an active waiter.
+ * This is a bit unfair to eventual other active waiters as a
+ * condvar waiter is added the the queue when waiting for the
+ * condvar.
+ */
+ SLIST_FOREACH(wqe, wq, link) {
+ if (wqe->cv == cv) {
+ if (fname)
+ FMSG("promote thread %u %p %s:%d",
+ wqe->handle, (void *)cv->m, fname, lineno);
+ else
+ FMSG("promote thread %u %p",
+ wqe->handle, (void *)cv->m);
+
+ wqe->cv = NULL;
+ if (only_one)
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+}
+
+bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem *wqe;
+ bool rc = false;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ SLIST_FOREACH(wqe, wq, link) {
+ if (wqe->cv == cv) {
+ rc = true;
+ break;
+ }
+ }
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return rc;
+}
+
+bool wq_is_empty(struct wait_queue *wq)
+{
+ uint32_t old_itr_status;
+ bool ret;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&wq_spin_lock);
+
+ ret = SLIST_EMPTY(wq);
+
+ cpu_spin_unlock(&wq_spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return ret;
+}
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
new file mode 100644
index 0000000..62dda73
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu.c
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This core mmu supports static section mapping (1MByte) and finer mapping
+ * with 4k pages.
+ * It should also allow core to map/unmap (and va/pa) at run-time.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <kernel/tz_ssvce.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <platform_config.h>
+#include <stdlib.h>
+#include <trace.h>
+#include <util.h>
+
+#include "core_mmu_private.h"
+
+#define MAX_MMAP_REGIONS 10
+#define RES_VASPACE_SIZE (CORE_MMU_PGDIR_SIZE * 10)
+
+/*
+ * These variables are initialized before .bss is cleared. To avoid
+ * resetting them when .bss is cleared we're storing them in .data instead,
+ * even if they initially are zero.
+ */
+
+/* Default NSec shared memory allocated from NSec world */
+unsigned long default_nsec_shm_size __early_bss;
+unsigned long default_nsec_shm_paddr __early_bss;
+
+static struct tee_mmap_region
+ static_memory_map[MAX_MMAP_REGIONS + 1] __early_bss;
+static bool mem_map_inited __early_bss;
+
+static struct tee_mmap_region *map_tee_ram __early_bss;
+static struct tee_mmap_region *map_ta_ram __early_bss;
+static struct tee_mmap_region *map_nsec_shm __early_bss;
+
+/* Define the platform's memory layout. */
+struct memaccess_area {
+ paddr_t paddr;
+ size_t size;
+};
+#define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
+
+static struct memaccess_area ddr[] = {
+ MEMACCESS_AREA(DRAM0_BASE, DRAM0_SIZE),
+#ifdef DRAM1_BASE
+ MEMACCESS_AREA(DRAM1_BASE, DRAM1_SIZE),
+#endif
+};
+
+static struct memaccess_area secure_only[] = {
+#ifdef TZSRAM_BASE
+ MEMACCESS_AREA(TZSRAM_BASE, TZSRAM_SIZE),
+#endif
+ MEMACCESS_AREA(TZDRAM_BASE, TZDRAM_SIZE),
+};
+
+static struct memaccess_area nsec_shared[] = {
+ MEMACCESS_AREA(CFG_SHMEM_START, CFG_SHMEM_SIZE),
+};
+
+register_phys_mem(MEM_AREA_TEE_RAM, CFG_TEE_RAM_START, CFG_TEE_RAM_PH_SIZE);
+register_phys_mem(MEM_AREA_TA_RAM, CFG_TA_RAM_START, CFG_TA_RAM_SIZE);
+register_phys_mem(MEM_AREA_NSEC_SHM, CFG_SHMEM_START, CFG_SHMEM_SIZE);
+#ifdef DEVICE0_PA_BASE
+register_phys_mem(DEVICE0_TYPE, DEVICE0_PA_BASE, DEVICE0_SIZE);
+#endif
+#ifdef DEVICE1_PA_BASE
+register_phys_mem(DEVICE1_TYPE, DEVICE1_PA_BASE, DEVICE1_SIZE);
+#endif
+#ifdef DEVICE2_PA_BASE
+register_phys_mem(DEVICE2_TYPE, DEVICE2_PA_BASE, DEVICE2_SIZE);
+#endif
+#ifdef DEVICE3_PA_BASE
+register_phys_mem(DEVICE3_TYPE, DEVICE3_PA_BASE, DEVICE3_SIZE);
+#endif
+#ifdef DEVICE4_PA_BASE
+register_phys_mem(DEVICE4_TYPE, DEVICE4_PA_BASE, DEVICE4_SIZE);
+#endif
+#ifdef DEVICE5_PA_BASE
+register_phys_mem(DEVICE5_TYPE, DEVICE5_PA_BASE, DEVICE5_SIZE);
+#endif
+#ifdef DEVICE6_PA_BASE
+register_phys_mem(DEVICE6_TYPE, DEVICE6_PA_BASE, DEVICE6_SIZE);
+#endif
+
+static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
+ paddr_t pa, size_t size)
+{
+ size_t n;
+
+ for (n = 0; n < alen; n++)
+ if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
+ return true;
+ return false;
+}
+#define pbuf_intersects(a, pa, size) \
+ _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
+
+static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
+ paddr_t pa, size_t size)
+{
+ size_t n;
+
+ for (n = 0; n < alen; n++)
+ if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
+ return true;
+ return false;
+}
+#define pbuf_is_inside(a, pa, size) \
+ _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
+
+static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa)
+{
+ if (!map)
+ return false;
+ return (pa >= map->pa && pa <= (map->pa + map->size - 1));
+}
+
+static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
+{
+ if (!map)
+ return false;
+ return (va >= map->va && va <= (map->va + map->size - 1));
+}
+
+/* check if target buffer fits in a core default map area */
+static bool pbuf_inside_map_area(unsigned long p, size_t l,
+ struct tee_mmap_region *map)
+{
+ return core_is_buffer_inside(p, l, map->pa, map->size);
+}
+
+static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
+{
+ struct tee_mmap_region *map;
+
+ for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++)
+ if (map->type == type)
+ return map;
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_type_and_pa(
+ enum teecore_memtypes type, paddr_t pa)
+{
+ struct tee_mmap_region *map;
+
+ for (map = static_memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ if (map->type != type)
+ continue;
+ if (pa_is_in_map(map, pa))
+ return map;
+ }
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_va(void *va)
+{
+ struct tee_mmap_region *map = static_memory_map;
+ unsigned long a = (unsigned long)va;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((a >= map->va) && (a <= (map->va - 1 + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
+{
+ struct tee_mmap_region *map = static_memory_map;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((pa >= map->pa) && (pa < (map->pa + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+extern const struct core_mmu_phys_mem __start_phys_mem_map_section;
+extern const struct core_mmu_phys_mem __end_phys_mem_map_section;
+
+static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
+ const struct core_mmu_phys_mem *mem, size_t *last)
+{
+ size_t n = 0;
+ paddr_t pa;
+ size_t size;
+
+ /*
+ * When all entries are added we'd like to have it in a sorted
+ * array first based on memory type and secondly on physical
+ * address. If some ranges of memory of the same type overlaps of
+ * are next to each others they are coalesced into one entry. This
+ * makes it easier later when building the translation tables.
+ *
+ * Note that it's valid to have the same physical memory as several
+ * different memory types, for instance the same device memory
+ * mapped as both secure and non-secure. This will probably not
+ * happen often in practice.
+ */
+ DMSG("%s %d 0x%08" PRIxPA " size 0x%08zx",
+ mem->name, mem->type, mem->addr, mem->size);
+ while (true) {
+ if (n >= (num_elems - 1)) {
+ EMSG("Out of entries (%zu) in memory_map", num_elems);
+ panic();
+ }
+ if (n == *last)
+ break;
+ pa = memory_map[n].pa;
+ size = memory_map[n].size;
+ if (mem->addr >= pa && mem->addr <= (pa + (size - 1)) &&
+ mem->type == memory_map[n].type) {
+ DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
+ memory_map[n].pa = MIN(pa, mem->addr);
+ memory_map[n].size = MAX(size, mem->size) +
+ (pa - memory_map[n].pa);
+ return;
+ }
+ if (mem->type < memory_map[n].type ||
+ (mem->type == memory_map[n].type && mem->addr < pa))
+ break; /* found the spot where to inseart this memory */
+ n++;
+ }
+
+ memmove(memory_map + n + 1, memory_map + n,
+ sizeof(struct tee_mmap_region) * (*last - n));
+ (*last)++;
+ memset(memory_map + n, 0, sizeof(memory_map[0]));
+ memory_map[n].type = mem->type;
+ memory_map[n].pa = mem->addr;
+ memory_map[n].size = mem->size;
+}
+
+static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
+ unsigned int type, size_t size, size_t *last) {
+ size_t n = 0;
+
+ DMSG("type %d size 0x%08zx", type, size);
+ while (true) {
+ if (n >= (num_elems - 1)) {
+ EMSG("Out of entries (%zu) in memory_map", num_elems);
+ panic();
+ }
+ if (n == *last)
+ break;
+ if (type < memory_map[n].type)
+ break;
+ n++;
+ }
+
+ memmove(memory_map + n + 1, memory_map + n,
+ sizeof(struct tee_mmap_region) * (*last - n));
+ (*last)++;
+ memset(memory_map + n, 0, sizeof(memory_map[0]));
+ memory_map[n].type = type;
+ memory_map[n].size = size;
+}
+
+uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
+{
+ const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+ TEE_MATTR_GLOBAL;
+ const uint32_t cached = TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT;
+ const uint32_t noncache = TEE_MATTR_CACHE_NONCACHE <<
+ TEE_MATTR_CACHE_SHIFT;
+
+ switch (t) {
+ case MEM_AREA_TEE_RAM:
+ return attr | TEE_MATTR_SECURE | TEE_MATTR_PX | cached;
+ case MEM_AREA_TA_RAM:
+ return attr | TEE_MATTR_SECURE | cached;
+ case MEM_AREA_NSEC_SHM:
+ return attr | cached;
+ case MEM_AREA_IO_NSEC:
+ return attr | noncache;
+ case MEM_AREA_IO_SEC:
+ return attr | TEE_MATTR_SECURE | noncache;
+ case MEM_AREA_RAM_NSEC:
+ return attr | cached;
+ case MEM_AREA_RAM_SEC:
+ return attr | TEE_MATTR_SECURE | cached;
+ case MEM_AREA_RES_VASPACE:
+ return 0;
+ default:
+ panic("invalid type");
+ }
+}
+
+static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
+{
+ const struct core_mmu_phys_mem *mem;
+ struct tee_mmap_region *map;
+ size_t last = 0;
+ vaddr_t va;
+ size_t n;
+
+ for (mem = &__start_phys_mem_map_section;
+ mem < &__end_phys_mem_map_section; mem++) {
+ struct core_mmu_phys_mem m = *mem;
+
+ if (m.type == MEM_AREA_IO_NSEC || m.type == MEM_AREA_IO_SEC) {
+ m.addr = ROUNDDOWN(m.addr, CORE_MMU_PGDIR_SIZE);
+ m.size = ROUNDUP(m.size + (mem->addr - m.addr),
+ CORE_MMU_PGDIR_SIZE);
+ }
+ add_phys_mem(memory_map, num_elems, &m, &last);
+ }
+
+ add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
+ RES_VASPACE_SIZE, &last);
+
+ memory_map[last].type = MEM_AREA_NOTYPE;
+
+ /*
+ * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
+ * SMALL_PAGE_SIZE if paging is enabled.
+ */
+ for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ paddr_t mask = map->pa | map->size;
+
+ if (!(mask & CORE_MMU_PGDIR_MASK))
+ map->region_size = CORE_MMU_PGDIR_SIZE;
+ else if (!(mask & SMALL_PAGE_MASK))
+ map->region_size = SMALL_PAGE_SIZE;
+ else
+ panic("Impossible memory alignment");
+ }
+
+ /*
+ * bootcfg_memory_map is sorted in order first by type and last by
+ * address. This puts TEE_RAM first and TA_RAM second
+ *
+ */
+ map = memory_map;
+ assert(map->type == MEM_AREA_TEE_RAM);
+ map->va = map->pa;
+#ifdef CFG_WITH_PAGER
+ map->region_size = SMALL_PAGE_SIZE,
+#endif
+ map->attr = core_mmu_type_to_attr(map->type);
+
+
+ if (core_mmu_place_tee_ram_at_top(map->pa)) {
+ va = map->va;
+ map++;
+ while (map->type != MEM_AREA_NOTYPE) {
+ map->attr = core_mmu_type_to_attr(map->type);
+ va -= map->size;
+ map->va = va;
+ map++;
+ }
+ /*
+ * The memory map should be sorted by virtual address
+ * when this function returns. As we're assigning va in
+ * the oposite direction we need to reverse the list.
+ */
+ for (n = 0; n < last / 2; n++) {
+ struct tee_mmap_region r;
+
+ r = memory_map[last - n - 1];
+ memory_map[last - n - 1] = memory_map[n];
+ memory_map[n] = r;
+ }
+ } else {
+ va = ROUNDUP(map->va + map->size, CORE_MMU_PGDIR_SIZE);
+ map++;
+ while (map->type != MEM_AREA_NOTYPE) {
+ map->attr = core_mmu_type_to_attr(map->type);
+ map->va = va;
+ va += map->size;
+ map++;
+ }
+ }
+
+ for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
+ vaddr_t __maybe_unused vstart;
+
+ vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
+ DMSG("type va %d 0x%08" PRIxVA "..0x%08" PRIxVA
+ " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size %#zx",
+ map->type, vstart, vstart + map->size - 1,
+ (paddr_t)map->pa, (paddr_t)map->pa + map->size - 1,
+ map->size);
+ }
+}
+
+/*
+ * core_init_mmu_map - init tee core default memory mapping
+ *
+ * this routine sets the static default tee core mapping.
+ *
+ * If an error happend: core_init_mmu_map is expected to reset.
+ */
+void core_init_mmu_map(void)
+{
+ struct tee_mmap_region *map;
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
+ if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
+ secure_only[n].size))
+ panic("Invalid memory access config: sec/nsec");
+ }
+
+ if (!mem_map_inited)
+ init_mem_map(static_memory_map, ARRAY_SIZE(static_memory_map));
+
+ map = static_memory_map;
+ while (map->type != MEM_AREA_NOTYPE) {
+ switch (map->type) {
+ case MEM_AREA_TEE_RAM:
+ if (!pbuf_is_inside(secure_only, map->pa, map->size))
+ panic("TEE_RAM can't fit in secure_only");
+
+ map_tee_ram = map;
+ break;
+ case MEM_AREA_TA_RAM:
+ if (!pbuf_is_inside(secure_only, map->pa, map->size))
+ panic("TA_RAM can't fit in secure_only");
+ map_ta_ram = map;
+ break;
+ case MEM_AREA_NSEC_SHM:
+ if (!pbuf_is_inside(nsec_shared, map->pa, map->size))
+ panic("NS_SHM can't fit in nsec_shared");
+ map_nsec_shm = map;
+ break;
+ case MEM_AREA_IO_SEC:
+ case MEM_AREA_IO_NSEC:
+ case MEM_AREA_RAM_SEC:
+ case MEM_AREA_RAM_NSEC:
+ case MEM_AREA_RES_VASPACE:
+ break;
+ default:
+ EMSG("Uhandled memtype %d", map->type);
+ panic();
+ }
+ map++;
+ }
+
+ /* Check that we have the mandatory memory areas defined */
+ if (!map_tee_ram || !map_ta_ram || !map_nsec_shm)
+ panic("mandatory area(s) not found");
+
+ core_init_mmu_tables(static_memory_map);
+}
+
+/* routines to retrieve shared mem configuration */
+bool core_mmu_is_shm_cached(void)
+{
+ if (!map_nsec_shm)
+ return false;
+ return map_nsec_shm->attr >> TEE_MATTR_CACHE_SHIFT ==
+ TEE_MATTR_CACHE_CACHED;
+}
+
+bool core_mmu_mattr_is_ok(uint32_t mattr)
+{
+ /*
+ * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
+ * core_mmu_v7.c:mattr_to_texcb
+ */
+
+ switch ((mattr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
+ case TEE_MATTR_CACHE_NONCACHE:
+ case TEE_MATTR_CACHE_CACHED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * test attributes of target physical buffer
+ *
+ * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
+ *
+ */
+bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
+{
+ struct tee_mmap_region *map;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ switch (attr) {
+ case CORE_MEM_SEC:
+ return pbuf_is_inside(secure_only, pbuf, len);
+ case CORE_MEM_NON_SEC:
+ return pbuf_is_inside(nsec_shared, pbuf, len);
+ case CORE_MEM_TEE_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_tee_ram);
+ case CORE_MEM_TA_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_ta_ram);
+ case CORE_MEM_NSEC_SHM:
+ return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
+ case CORE_MEM_EXTRAM:
+ return pbuf_is_inside(ddr, pbuf, len);
+ case CORE_MEM_CACHED:
+ map = find_map_by_pa(pbuf);
+ if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
+ return false;
+ return map->attr >> TEE_MATTR_CACHE_SHIFT ==
+ TEE_MATTR_CACHE_CACHED;
+ default:
+ return false;
+ }
+}
+
+/* test attributes of target virtual buffer (in core mapping) */
+bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
+{
+ paddr_t p;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ p = virt_to_phys((void *)vbuf);
+ if (!p)
+ return false;
+
+ return core_pbuf_is(attr, p, len);
+}
+
+
+/* core_va2pa - teecore exported service */
+int core_va2pa_helper(void *va, paddr_t *pa)
+{
+ struct tee_mmap_region *map;
+
+ map = find_map_by_va(va);
+ if (!va_is_in_map(map, (vaddr_t)va))
+ return -1;
+
+ *pa = ((uintptr_t)va & (map->region_size - 1)) |
+ ((map->pa + (uintptr_t)va - map->va) & ~(map->region_size - 1));
+ return 0;
+}
+
+static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa)
+{
+ if (!pa_is_in_map(map, pa))
+ return NULL;
+ return (void *)((pa & (map->region_size - 1)) |
+ ((map->va + pa - map->pa) & ~((vaddr_t)map->region_size - 1)));
+}
+
+/*
+ * teecore gets some memory area definitions
+ */
+void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
+{
+ struct tee_mmap_region *map = find_map_by_type(type);
+
+ if (map) {
+ *s = map->va;
+ *e = map->va + map->size;
+ } else {
+ *s = 0;
+ *e = 0;
+ }
+}
+
+enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
+{
+ struct tee_mmap_region *map = find_map_by_pa(pa);
+
+ if (!map)
+ return MEM_AREA_NOTYPE;
+ return map->type;
+}
+
+int core_tlb_maintenance(int op, unsigned int a)
+{
+ /*
+ * We're doing TLB invalidation because we've changed mapping.
+ * The dsb() makes sure that written data is visible.
+ */
+ dsb();
+
+ switch (op) {
+ case TLBINV_UNIFIEDTLB:
+ secure_mmu_unifiedtlbinvall();
+ break;
+ case TLBINV_CURRENT_ASID:
+ secure_mmu_unifiedtlbinv_curasid();
+ break;
+ case TLBINV_BY_ASID:
+ secure_mmu_unifiedtlbinv_byasid(a);
+ break;
+ case TLBINV_BY_MVA:
+ EMSG("TLB_INV_SECURE_MVA is not yet supported!");
+ while (1)
+ ;
+ secure_mmu_unifiedtlbinvbymva(a);
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+unsigned int cache_maintenance_l1(int op, void *va, size_t len)
+{
+ switch (op) {
+ case DCACHE_CLEAN:
+ arm_cl1_d_cleanbysetway();
+ break;
+ case DCACHE_AREA_CLEAN:
+ if (len)
+ arm_cl1_d_cleanbyva(va, (char *)va + len - 1);
+ break;
+ case DCACHE_INVALIDATE:
+ arm_cl1_d_invbysetway();
+ break;
+ case DCACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl1_d_invbyva(va, (char *)va + len - 1);
+ break;
+ case ICACHE_INVALIDATE:
+ arm_cl1_i_inv_all();
+ break;
+ case ICACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl1_i_inv(va, (char *)va + len - 1);
+ break;
+ case WRITE_BUFFER_DRAIN:
+ DMSG("unsupported operation 0x%X (WRITE_BUFFER_DRAIN)",
+ (unsigned int)op);
+ return -1;
+ case DCACHE_CLEAN_INV:
+ arm_cl1_d_cleaninvbysetway();
+ break;
+ case DCACHE_AREA_CLEAN_INV:
+ if (len)
+ arm_cl1_d_cleaninvbyva(va, (char *)va + len - 1);
+ break;
+ default:
+ return TEE_ERROR_NOT_IMPLEMENTED;
+ }
+ return TEE_SUCCESS;
+}
+
+#ifdef CFG_PL310
+unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len)
+{
+ unsigned int ret = TEE_SUCCESS;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ tee_l2cc_mutex_lock();
+ switch (op) {
+ case L2CACHE_INVALIDATE:
+ arm_cl2_invbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_INVALIDATE:
+ if (len)
+ arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ case L2CACHE_CLEAN:
+ arm_cl2_cleanbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_CLEAN:
+ if (len)
+ arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ case L2CACHE_CLEAN_INV:
+ arm_cl2_cleaninvbyway(pl310_base());
+ break;
+ case L2CACHE_AREA_CLEAN_INV:
+ if (len)
+ arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
+ break;
+ default:
+ ret = TEE_ERROR_NOT_IMPLEMENTED;
+ }
+
+ tee_l2cc_mutex_unlock();
+ thread_set_exceptions(exceptions);
+ return ret;
+}
+#endif /*CFG_PL310*/
+
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr)
+{
+ assert(idx < tbl_info->num_entries);
+ core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
+ idx, pa, attr);
+}
+
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ assert(idx < tbl_info->num_entries);
+ core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
+ idx, pa, attr);
+}
+
+static void set_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned end;
+ unsigned idx;
+ paddr_t pa;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+ pa = region->pa;
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, pa, region->attr);
+ idx++;
+ pa += 1 << tbl_info->shift;
+ }
+}
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+static void set_pg_region(struct core_mmu_table_info *dir_info,
+ struct tee_ta_region *region, struct pgt **pgt,
+ struct core_mmu_table_info *pg_info)
+{
+ struct tee_mmap_region r = {
+ .va = region->va,
+ .size = region->size,
+ .attr = region->attr,
+ };
+ vaddr_t end = r.va + r.size;
+ uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
+
+ while (r.va < end) {
+ if (!pg_info->table ||
+ r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
+ /*
+ * We're assigning a new translation table.
+ */
+ unsigned int idx;
+
+ assert(*pgt); /* We should have alloced enough */
+
+ /* Virtual addresses must grow */
+ assert(r.va > pg_info->va_base);
+
+ idx = core_mmu_va2idx(dir_info, r.va);
+ pg_info->table = (*pgt)->tbl;
+ pg_info->va_base = core_mmu_idx2va(dir_info, idx);
+#ifdef CFG_PAGED_USER_TA
+ assert((*pgt)->vabase == pg_info->va_base);
+#endif
+ *pgt = SLIST_NEXT(*pgt, link);
+
+ core_mmu_set_entry(dir_info, idx,
+ virt_to_phys(pg_info->table),
+ pgt_attr);
+ }
+
+ r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
+ end - r.va);
+ if (!mobj_is_paged(region->mobj)) {
+ size_t granule = BIT(pg_info->shift);
+ size_t offset = r.va - region->va + region->offset;
+
+ if (mobj_get_pa(region->mobj, offset, granule,
+ &r.pa) != TEE_SUCCESS)
+ panic("Failed to get PA of unpaged mobj");
+ set_region(pg_info, &r);
+ }
+ r.va += r.size;
+ }
+}
+
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc)
+{
+ struct core_mmu_table_info pg_info;
+ struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
+ struct pgt *pgt;
+ size_t n;
+
+ /* Find the last valid entry */
+ n = ARRAY_SIZE(utc->mmu->regions);
+ while (true) {
+ n--;
+ if (utc->mmu->regions[n].size)
+ break;
+ if (!n)
+ return; /* Nothing to map */
+ }
+
+ /*
+ * Allocate all page tables in advance.
+ */
+ pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->regions[0].va,
+ utc->mmu->regions[n].va + utc->mmu->regions[n].size - 1);
+ pgt = SLIST_FIRST(pgt_cache);
+
+ core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++)
+ mobj_update_mapping(utc->mmu->regions[n].mobj, utc,
+ utc->mmu->regions[n].va);
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+ set_pg_region(dir_info, utc->mmu->regions + n, &pgt, &pg_info);
+ }
+}
+
+#else
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc)
+{
+ unsigned n;
+ struct tee_mmap_region r;
+ size_t offset;
+ size_t granule = BIT(dir_info->shift);
+
+ memset(&r, 0, sizeof(r));
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].size)
+ continue;
+
+ offset = utc->mmu->regions[n].offset;
+ r.va = utc->mmu->regions[n].va;
+ r.size = utc->mmu->regions[n].size;
+ r.attr = utc->mmu->regions[n].attr;
+
+ if (mobj_get_pa(utc->mmu->regions[n].mobj, offset, granule,
+ &r.pa) != TEE_SUCCESS)
+ panic("Failed to get PA of unpaged mobj");
+
+ set_region(dir_info, &r);
+ }
+}
+#endif
+
+bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
+{
+ struct core_mmu_table_info tbl_info;
+ struct tee_mmap_region *map;
+ size_t n;
+ size_t granule;
+ paddr_t p;
+ size_t l;
+
+ if (!len)
+ return true;
+
+ /* Check if the memory is already mapped */
+ map = find_map_by_type_and_pa(type, addr);
+ if (map && pbuf_inside_map_area(addr, len, map))
+ return true;
+
+ /* Find the reserved va space used for late mappings */
+ map = find_map_by_type(MEM_AREA_RES_VASPACE);
+ if (!map)
+ return false;
+
+ if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
+ return false;
+
+ granule = 1 << tbl_info.shift;
+ p = ROUNDDOWN(addr, granule);
+ l = ROUNDUP(len + addr - p, granule);
+ /*
+ * Something is wrong, we can't fit the va range into the selected
+ * table. The reserved va range is possibly missaligned with
+ * granule.
+ */
+ if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
+ return false;
+
+ /* Find end of the memory map */
+ n = 0;
+ while (static_memory_map[n].type != MEM_AREA_NOTYPE)
+ n++;
+
+ if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
+ /* There's room for another entry */
+ static_memory_map[n].va = map->va;
+ static_memory_map[n].size = l;
+ static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
+ map->va += l;
+ map->size -= l;
+ map = static_memory_map + n;
+ } else {
+ /*
+ * There isn't room for another entry, steal the reserved
+ * entry as it's not useful for anything else any longer.
+ */
+ map->size = l;
+ }
+ map->type = type;
+ map->region_size = granule;
+ map->attr = core_mmu_type_to_attr(type);
+ map->pa = p;
+
+ set_region(&tbl_info, map);
+ return true;
+}
+
+static bool arm_va2pa_helper(void *va, paddr_t *pa)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+ paddr_t par;
+ paddr_t par_pa_mask;
+ bool ret = false;
+
+#ifdef ARM32
+ write_ats1cpr((vaddr_t)va);
+ isb();
+#ifdef CFG_WITH_LPAE
+ par = read_par64();
+ par_pa_mask = PAR64_PA_MASK;
+#else
+ par = read_par32();
+ par_pa_mask = PAR32_PA_MASK;
+#endif
+#endif /*ARM32*/
+
+#ifdef ARM64
+ write_at_s1e1r((vaddr_t)va);
+ isb();
+ par = read_par_el1();
+ par_pa_mask = PAR_PA_MASK;
+#endif
+ if (par & PAR_F)
+ goto out;
+ *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
+ ((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1));
+
+ ret = true;
+out:
+ thread_unmask_exceptions(exceptions);
+ return ret;
+}
+
+#ifdef CFG_WITH_PAGER
+static vaddr_t get_linear_map_end(void)
+{
+ /* this is synced with the generic linker file kern.ld.S */
+ return (vaddr_t)__heap2_end;
+}
+#endif
+
+#if defined(CFG_TEE_CORE_DEBUG)
+static void check_pa_matches_va(void *va, paddr_t pa)
+{
+ TEE_Result res;
+ vaddr_t v = (vaddr_t)va;
+ paddr_t p = 0;
+
+ if (core_mmu_user_va_range_is_defined()) {
+ vaddr_t user_va_base;
+ size_t user_va_size;
+
+ core_mmu_get_user_va_range(&user_va_base, &user_va_size);
+ if (v >= user_va_base &&
+ v <= (user_va_base - 1 + user_va_size)) {
+ if (!core_mmu_user_mapping_is_active()) {
+ if (pa)
+ panic("issue in linear address space");
+ return;
+ }
+
+ res = tee_mmu_user_va2pa_helper(
+ to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
+ if (res == TEE_SUCCESS && pa != p)
+ panic("bad pa");
+ if (res != TEE_SUCCESS && pa)
+ panic("false pa");
+ return;
+ }
+ }
+#ifdef CFG_WITH_PAGER
+ if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
+ if (v != pa)
+ panic("issue in linear address space");
+ return;
+ }
+ if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
+ v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ uint32_t a;
+
+ /*
+ * Lookups in the page table managed by the pager is
+ * dangerous for addresses in the paged area as those pages
+ * changes all the time. But some ranges are safe,
+ * rw-locked areas when the page is populated for instance.
+ */
+ core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
+ if (a & TEE_MATTR_VALID_BLOCK) {
+ paddr_t mask = ((1 << ti->shift) - 1);
+
+ p |= v & mask;
+ if (pa != p)
+ panic();
+ } else
+ if (pa)
+ panic();
+ return;
+ }
+#endif
+ if (!core_va2pa_helper(va, &p)) {
+ if (pa != p)
+ panic();
+ } else {
+ if (pa)
+ panic();
+ }
+}
+#else
+static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
+{
+}
+#endif
+
+paddr_t virt_to_phys(void *va)
+{
+ paddr_t pa;
+
+ if (!arm_va2pa_helper(va, &pa))
+ pa = 0;
+ check_pa_matches_va(va, pa);
+ return pa;
+}
+
+#if defined(CFG_TEE_CORE_DEBUG)
+static void check_va_matches_pa(paddr_t pa, void *va)
+{
+ if (va && virt_to_phys(va) != pa)
+ panic();
+}
+#else
+static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
+{
+}
+#endif
+
+static void *phys_to_virt_ta_vaspace(paddr_t pa)
+{
+ TEE_Result res;
+ void *va = NULL;
+
+ if (!core_mmu_user_mapping_is_active())
+ return NULL;
+
+ res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
+ pa, &va);
+ if (res != TEE_SUCCESS)
+ return NULL;
+ return va;
+}
+
+#ifdef CFG_WITH_PAGER
+static void *phys_to_virt_tee_ram(paddr_t pa)
+{
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ unsigned idx;
+ unsigned end_idx;
+ uint32_t a;
+ paddr_t p;
+
+ if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
+ return (void *)(vaddr_t)pa;
+
+ end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
+ CFG_TEE_RAM_VA_SIZE);
+ /* Most addresses are mapped lineary, try that first if possible. */
+ idx = core_mmu_va2idx(ti, pa);
+ if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
+ idx < end_idx) {
+ core_mmu_get_entry(ti, idx, &p, &a);
+ if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
+ return (void *)core_mmu_idx2va(ti, idx);
+ }
+
+ for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
+ idx < end_idx; idx++) {
+ core_mmu_get_entry(ti, idx, &p, &a);
+ if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
+ return (void *)core_mmu_idx2va(ti, idx);
+ }
+
+ return NULL;
+}
+#else
+static void *phys_to_virt_tee_ram(paddr_t pa)
+{
+ return map_pa2va(find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa), pa);
+}
+#endif
+
+void *phys_to_virt(paddr_t pa, enum teecore_memtypes m)
+{
+ void *va;
+
+ switch (m) {
+ case MEM_AREA_TA_VASPACE:
+ va = phys_to_virt_ta_vaspace(pa);
+ break;
+ case MEM_AREA_TEE_RAM:
+ va = phys_to_virt_tee_ram(pa);
+ break;
+ default:
+ va = map_pa2va(find_map_by_type_and_pa(m, pa), pa);
+ }
+ check_va_matches_pa(pa, va);
+ return va;
+}
+
+bool cpu_mmu_enabled(void)
+{
+ uint32_t sctlr;
+
+#ifdef ARM32
+ sctlr = read_sctlr();
+#else
+ sctlr = read_sctlr_el1();
+#endif
+
+ return sctlr & SCTLR_M ? true : false;
+}
diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c
new file mode 100644
index 0000000..eb96c70
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_lpae.c
@@ -0,0 +1,890 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+
+#include <arm.h>
+#include <assert.h>
+#include <compiler.h>
+#include <inttypes.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <mm/core_memprot.h>
+#include <mm/pgt_cache.h>
+#include <mm/core_memprot.h>
+#include <string.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <util.h>
+
+#include "core_mmu_private.h"
+
+#ifndef DEBUG_XLAT_TABLE
+#define DEBUG_XLAT_TABLE 0
+#endif
+
+#if DEBUG_XLAT_TABLE
+#define debug_print(...) DMSG_RAW(__VA_ARGS__)
+#else
+#define debug_print(...) ((void)0)
+#endif
+
+
+/*
+ * Miscellaneous MMU related constants
+ */
+
+#define INVALID_DESC 0x0
+#define BLOCK_DESC 0x1
+#define L3_BLOCK_DESC 0x3
+#define TABLE_DESC 0x3
+#define DESC_ENTRY_TYPE_MASK 0x3
+
+#define HIDDEN_DESC 0x4
+#define HIDDEN_DIRTY_DESC 0x8
+
+#define XN (1ull << 2)
+#define PXN (1ull << 1)
+#define CONT_HINT (1ull << 0)
+
+#define UPPER_ATTRS(x) (((x) & 0x7) << 52)
+#define NON_GLOBAL (1ull << 9)
+#define ACCESS_FLAG (1ull << 8)
+#define NSH (0x0 << 6)
+#define OSH (0x2 << 6)
+#define ISH (0x3 << 6)
+
+#define AP_RO (0x1 << 5)
+#define AP_RW (0x0 << 5)
+#define AP_UNPRIV (0x1 << 4)
+
+#define NS (0x1 << 3)
+#define LOWER_ATTRS_SHIFT 2
+#define LOWER_ATTRS(x) (((x) & 0xfff) << LOWER_ATTRS_SHIFT)
+
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_NTR_INDEX 0x1
+#define ATTR_INDEX_MASK 0x7
+
+#define ATTR_DEVICE (0x4)
+#define ATTR_IWBWA_OWBWA_NTR (0xff)
+
+#define MAIR_ATTR_SET(attr, index) (((uint64_t)attr) << ((index) << 3))
+
+#define OUTPUT_ADDRESS_MASK (0x0000FFFFFFFFF000ULL)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB (0x0)
+#define TCR_PS_BITS_64GB (0x1)
+#define TCR_PS_BITS_1TB (0x2)
+#define TCR_PS_BITS_4TB (0x3)
+#define TCR_PS_BITS_16TB (0x4)
+#define TCR_PS_BITS_256TB (0x5)
+
+#define ADDR_MASK_48_TO_63 0xFFFF000000000000ULL
+#define ADDR_MASK_44_TO_47 0x0000F00000000000ULL
+#define ADDR_MASK_42_TO_43 0x00000C0000000000ULL
+#define ADDR_MASK_40_TO_41 0x0000030000000000ULL
+#define ADDR_MASK_36_TO_39 0x000000F000000000ULL
+#define ADDR_MASK_32_TO_35 0x0000000F00000000ULL
+
+#define UNSET_DESC ((uint64_t)-1)
+
+#define FOUR_KB_SHIFT 12
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
+
+#define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
+#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+
+#define MAX_MMAP_REGIONS 16
+#define NUM_L1_ENTRIES \
+ (CFG_LPAE_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES 5
+#endif
+
+/* MMU L1 table, one for each core */
+static uint64_t l1_xlation_table[CFG_TEE_CORE_NB_CORE][NUM_L1_ENTRIES]
+ __aligned(NUM_L1_ENTRIES * XLAT_ENTRY_SIZE) __section(".nozi.mmu.l1");
+
+static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+/* MMU L2 table for TAs, one for each thread */
+static uint64_t xlat_tables_ul1[CFG_NUM_THREADS][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+
+static unsigned next_xlat __early_bss;
+static uint64_t tcr_ps_bits __early_bss;
+static int user_va_idx = -1;
+
+static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
+{
+ uint32_t a;
+
+ if (!(desc & 1)) {
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & HIDDEN_DIRTY_DESC)
+ return TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ return 0;
+ }
+
+ if (level == 3) {
+ if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
+ return 0;
+ } else {
+ if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
+ return TEE_MATTR_TABLE;
+ }
+
+ a = TEE_MATTR_VALID_BLOCK;
+
+ if (desc & LOWER_ATTRS(ACCESS_FLAG))
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & LOWER_ATTRS(AP_RO)))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
+ a &= ~TEE_MATTR_URWX;
+
+ if (desc & UPPER_ATTRS(XN))
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & UPPER_ATTRS(PXN))
+ a &= ~TEE_MATTR_PX;
+
+ COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
+ COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
+ TEE_MATTR_CACHE_CACHED);
+
+ a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
+ TEE_MATTR_CACHE_SHIFT;
+
+ if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & LOWER_ATTRS(NS)))
+ a |= TEE_MATTR_SECURE;
+
+ return a;
+}
+
+static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint64_t desc;
+ uint32_t a = attr;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_HIDDEN_DIRTY_BLOCK)
+ return INVALID_DESC | HIDDEN_DIRTY_DESC;
+
+ if (a & TEE_MATTR_TABLE)
+ return TABLE_DESC;
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+ if (level == 3)
+ desc = L3_BLOCK_DESC;
+ else
+ desc = BLOCK_DESC;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= UPPER_ATTRS(XN);
+ if (!(a & TEE_MATTR_PX))
+ desc |= UPPER_ATTRS(PXN);
+
+ if (a & TEE_MATTR_UR)
+ desc |= LOWER_ATTRS(AP_UNPRIV);
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= LOWER_ATTRS(AP_RO);
+
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ switch ((a >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
+ case TEE_MATTR_CACHE_NONCACHE:
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ break;
+ case TEE_MATTR_CACHE_CACHED:
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ break;
+ default:
+ /*
+ * "Can't happen" the attribute is supposed to be checked
+ * with core_mmu_mattr_is_ok() before.
+ */
+ panic();
+ }
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= LOWER_ATTRS(NON_GLOBAL);
+
+ desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
+
+ return desc;
+}
+
+static uint64_t mmap_desc(uint32_t attr, uint64_t addr_pa,
+ unsigned level)
+{
+ return mattr_to_desc(level, attr) | addr_pa;
+}
+
+static int mmap_region_attr(struct tee_mmap_region *mm, uint64_t base_va,
+ uint64_t size)
+{
+ uint32_t attr = mm->attr;
+
+ for (;;) {
+ mm++;
+
+ if (!mm->size)
+ return attr; /* Reached end of list */
+
+ if (mm->va >= base_va + size)
+ return attr; /* Next region is after area so end */
+
+ if (mm->va + mm->size <= base_va)
+ continue; /* Next region has already been overtaken */
+
+ if (mm->attr == attr)
+ continue; /* Region doesn't override attribs so skip */
+
+ if (mm->va > base_va ||
+ mm->va + mm->size < base_va + size)
+ return -1; /* Region doesn't fully cover our area */
+ }
+}
+
+static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
+ uint64_t base_va, uint64_t *table, unsigned level)
+{
+ unsigned int level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned int level_size = BIT32(level_size_shift);
+ uint64_t level_index_mask = SHIFT_U64(XLAT_TABLE_ENTRIES_MASK,
+ level_size_shift);
+
+ assert(level <= 3);
+
+ debug_print("New xlat table (level %u):", level);
+
+ do {
+ uint64_t desc = UNSET_DESC;
+
+ if (mm->va + mm->size <= base_va) {
+ /* Area now after the region so skip it */
+ mm++;
+ continue;
+ }
+
+
+ if (mm->va >= base_va + level_size) {
+ /* Next region is after area so nothing to map yet */
+ desc = INVALID_DESC;
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ } else if (mm->va <= base_va &&
+ mm->va + mm->size >= base_va + level_size &&
+ !(mm->pa & (level_size - 1))) {
+ /* Next region covers all of area */
+ int attr = mmap_region_attr(mm, base_va, level_size);
+
+ if (attr >= 0) {
+ desc = mmap_desc(attr,
+ base_va - mm->va + mm->pa,
+ level);
+ debug_print("%*s%010" PRIx64 " %8x %s-%s-%s-%s",
+ level * 2, "", base_va, level_size,
+ attr & (TEE_MATTR_CACHE_CACHED <<
+ TEE_MATTR_CACHE_SHIFT) ?
+ "MEM" : "DEV",
+ attr & TEE_MATTR_PW ? "RW" : "RO",
+ attr & TEE_MATTR_PX ? "X" : "XN",
+ attr & TEE_MATTR_SECURE ? "S" : "NS");
+ } else {
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ }
+ }
+ /* else Next region only partially covers area, so need */
+
+ if (desc == UNSET_DESC) {
+ /* Area not covered by a region so need finer table */
+ uint64_t *new_table = xlat_tables[next_xlat++];
+ /* Clear table before use */
+ if (next_xlat > MAX_XLAT_TABLES)
+ panic("running out of xlat tables");
+ memset(new_table, 0, XLAT_TABLE_SIZE);
+
+ desc = TABLE_DESC | virt_to_phys(new_table);
+
+ /* Recurse to fill in new table */
+ mm = init_xlation_table(mm, base_va, new_table,
+ level + 1);
+ }
+
+ *table++ = desc;
+ base_va += level_size;
+ } while (mm->size && (base_va & level_index_mask));
+
+ return mm;
+}
+
+static unsigned int calc_physical_addr_size_bits(uint64_t max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert(!(max_addr & ADDR_MASK_48_TO_63));
+
+ /* 48 bits address */
+ if (max_addr & ADDR_MASK_44_TO_47)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if (max_addr & ADDR_MASK_42_TO_43)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if (max_addr & ADDR_MASK_40_TO_41)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if (max_addr & ADDR_MASK_36_TO_39)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if (max_addr & ADDR_MASK_32_TO_35)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ paddr_t max_pa = 0;
+ uint64_t max_va = 0;
+ size_t n;
+
+ for (n = 0; mm[n].size; n++) {
+ paddr_t pa_end;
+ vaddr_t va_end;
+
+ debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
+ mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);
+
+ if (!IS_PAGE_ALIGNED(mm[n].pa) || !IS_PAGE_ALIGNED(mm[n].size))
+ panic("unaligned region");
+
+ pa_end = mm[n].pa + mm[n].size - 1;
+ va_end = mm[n].va + mm[n].size - 1;
+ if (pa_end > max_pa)
+ max_pa = pa_end;
+ if (va_end > max_va)
+ max_va = va_end;
+ }
+
+ /* Clear table before use */
+ memset(l1_xlation_table[0], 0, NUM_L1_ENTRIES * XLAT_ENTRY_SIZE);
+ init_xlation_table(mm, 0, l1_xlation_table[0], 1);
+ for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++)
+ memcpy(l1_xlation_table[n], l1_xlation_table[0],
+ XLAT_ENTRY_SIZE * NUM_L1_ENTRIES);
+
+ for (n = 1; n < NUM_L1_ENTRIES; n++) {
+ if (!l1_xlation_table[0][n]) {
+ user_va_idx = n;
+ break;
+ }
+ }
+ assert(user_va_idx != -1);
+
+ tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+ COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_SIZE > 0);
+ assert(max_va < CFG_LPAE_ADDR_SPACE_SIZE);
+}
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
+{
+ size_t l1size = (1 << L1_XLAT_ADDRESS_SHIFT);
+ paddr_t l1mask = l1size - 1;
+
+ return (paddr & l1mask) > (l1size / 2);
+}
+
+#ifdef ARM32
+void core_init_mmu_regs(void)
+{
+ uint32_t ttbcr = TTBCR_EAE;
+ uint32_t mair;
+ paddr_t ttbr0;
+
+ ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
+
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ write_mair0(mair);
+
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
+ ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
+
+ /* Disable the use of TTBR1 */
+ ttbcr |= TTBCR_EPD1;
+
+ /* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
+
+ write_ttbcr(ttbcr);
+ write_ttbr0_64bit(ttbr0);
+ write_ttbr1_64bit(0);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+void core_init_mmu_regs(void)
+{
+ uint64_t mair;
+ uint64_t tcr;
+ paddr_t ttbr0;
+
+ ttbr0 = virt_to_phys(l1_xlation_table[get_core_pos()]);
+
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ write_mair_el1(mair);
+
+ tcr = TCR_RES1;
+ tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
+ tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
+ tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
+ tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
+ tcr |= 64 - __builtin_ctzl(CFG_LPAE_ADDR_SPACE_SIZE);
+
+ /* Disable the use of TTBR1 */
+ tcr |= TCR_EPD1;
+
+ /*
+ * TCR.A1 = 0 => ASID is stored in TTBR0
+ * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
+ */
+
+ write_tcr_el1(tcr);
+ write_ttbr0_el1(ttbr0);
+ write_ttbr1_el1(0);
+}
+#endif /*ARM64*/
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table)
+{
+ tbl_info->level = level;
+ tbl_info->table = table;
+ tbl_info->va_base = va_base;
+ tbl_info->shift = L1_XLAT_ADDRESS_SHIFT -
+ (level - 1) * XLAT_TABLE_ENTRIES_SHIFT;
+ assert(level <= 3);
+ if (level == 1)
+ tbl_info->num_entries = NUM_L1_ENTRIES;
+ else
+ tbl_info->num_entries = XLAT_TABLE_ENTRIES;
+}
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
+{
+ vaddr_t va_range_base;
+ void *tbl = xlat_tables_ul1[thread_get_id()];
+
+ core_mmu_get_user_va_range(&va_range_base, NULL);
+ core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
+}
+
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map)
+{
+ struct core_mmu_table_info dir_info;
+
+ COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
+
+ core_mmu_get_user_pgdir(&dir_info);
+ memset(dir_info.table, 0, PGT_SIZE);
+ core_mmu_populate_user_map(&dir_info, utc);
+ map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
+ map->asid = utc->context & TTBR_ASID_MASK;
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint64_t *tbl = l1_xlation_table[get_core_pos()];
+ uintptr_t ntbl;
+ unsigned level = 1;
+ vaddr_t va_base = 0;
+ unsigned num_entries = NUM_L1_ENTRIES;
+
+ while (true) {
+ unsigned level_size_shift =
+ L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned n = (va - va_base) >> level_size_shift;
+
+ if (n >= num_entries)
+ return false;
+
+ if (level == max_level || level == 3 ||
+ (tbl[n] & TABLE_DESC) != TABLE_DESC) {
+ /*
+ * We've either reached max_level, level 3, a block
+ * mapping entry or an "invalid" mapping entry.
+ */
+ tbl_info->table = tbl;
+ tbl_info->va_base = va_base;
+ tbl_info->level = level;
+ tbl_info->shift = level_size_shift;
+ tbl_info->num_entries = num_entries;
+ return true;
+ }
+
+ /* Copy bits 39:12 from tbl[n] to ntbl */
+ ntbl = (tbl[n] & ((1ULL << 40) - 1)) & ~((1 << 12) - 1);
+
+ tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM);
+ if (!tbl)
+ return false;
+
+ va_base += n << level_size_shift;
+ level++;
+ num_entries = XLAT_TABLE_ENTRIES;
+ }
+}
+
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx)
+{
+ uint64_t *new_table;
+ uint64_t *entry;
+ uint64_t new_table_desc;
+ size_t new_entry_size;
+ paddr_t paddr;
+ uint32_t attr;
+ int i;
+
+ if (tbl_info->level >= 3)
+ return false;
+
+ if (next_xlat >= MAX_XLAT_TABLES)
+ return false;
+
+ if (tbl_info->level == 1 && idx >= NUM_L1_ENTRIES)
+ return false;
+
+ if (tbl_info->level > 1 && idx >= XLAT_TABLE_ENTRIES)
+ return false;
+
+ entry = (uint64_t *)tbl_info->table + idx;
+ assert((*entry & DESC_ENTRY_TYPE_MASK) == BLOCK_DESC);
+
+ new_table = xlat_tables[next_xlat++];
+ new_table_desc = TABLE_DESC | (uint64_t)(uintptr_t)new_table;
+
+ /* store attributes of original block */
+ attr = desc_to_mattr(tbl_info->level, *entry);
+ paddr = *entry & OUTPUT_ADDRESS_MASK;
+ new_entry_size = 1 << (tbl_info->shift - XLAT_TABLE_ENTRIES_SHIFT);
+
+ /* Fill new xlat table with entries pointing to the same memory */
+ for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
+ *new_table = paddr | mattr_to_desc(tbl_info->level + 1, attr);
+ paddr += new_entry_size;
+ new_table++;
+ }
+
+ /* Update descriptor at current level */
+ *entry = new_table_desc;
+ return true;
+}
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint64_t *tbl = table;
+ uint64_t desc = mattr_to_desc(level, attr);
+
+ tbl[idx] = desc | pa;
+}
+
+void core_mmu_get_entry_primitive(const void *table, size_t level,
+ size_t idx, paddr_t *pa, uint32_t *attr)
+{
+ const uint64_t *tbl = table;
+
+ if (pa)
+ *pa = (tbl[idx] & ((1ull << 40) - 1)) & ~((1 << 12) - 1);
+
+ if (attr)
+ *attr = desc_to_mattr(level, tbl[idx]);
+}
+
+bool core_mmu_user_va_range_is_defined(void)
+{
+ return user_va_idx != -1;
+}
+
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
+{
+ assert(user_va_idx != -1);
+
+ if (base)
+ *base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
+ if (size)
+ *size = 1 << L1_XLAT_ADDRESS_SHIFT;
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ assert(user_va_idx != -1);
+ return !!l1_xlation_table[get_core_pos()][user_va_idx];
+}
+
+#ifdef ARM32
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ assert(user_va_idx != -1);
+
+ map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
+ if (map->user_map) {
+ map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
+ TTBR_ASID_MASK;
+ } else {
+ map->asid = 0;
+ }
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint64_t ttbr;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ assert(user_va_idx != -1);
+
+ ttbr = read_ttbr0_64bit();
+ /* Clear ASID */
+ ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
+ write_ttbr0_64bit(ttbr);
+ isb();
+
+ /* Set the new map */
+ if (map && map->user_map) {
+ l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
+ dsb(); /* Make sure the write above is visible */
+ ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
+ write_ttbr0_64bit(ttbr);
+ isb();
+ } else {
+ l1_xlation_table[get_core_pos()][user_va_idx] = 0;
+ dsb(); /* Make sure the write above is visible */
+ }
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ thread_unmask_exceptions(exceptions);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
+{
+ assert(fault_descr & FSR_LPAE);
+
+ switch (fault_descr & FSR_STATUS_MASK) {
+ case 0x21: /* b100001 Alignment fault */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+ case 0x12: /* b100010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ default:
+ break;
+ }
+
+ switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
+ case 0x1: /* b0001LL Translation fault */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0x2: /* b0010LL Access flag fault */
+ case 0x3: /* b0011LL Permission fault */
+ if (fault_descr & FSR_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ assert(user_va_idx != -1);
+
+ map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
+ if (map->user_map) {
+ map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
+ TTBR_ASID_MASK;
+ } else {
+ map->asid = 0;
+ }
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint64_t ttbr;
+ uint32_t daif = read_daif();
+
+ write_daif(daif | DAIF_AIF);
+
+ ttbr = read_ttbr0_el1();
+ /* Clear ASID */
+ ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
+ write_ttbr0_el1(ttbr);
+ isb();
+
+ /* Set the new map */
+ if (map && map->user_map) {
+ l1_xlation_table[get_core_pos()][user_va_idx] = map->user_map;
+ dsb(); /* Make sure the write above is visible */
+ ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
+ write_ttbr0_el1(ttbr);
+ isb();
+ } else {
+ l1_xlation_table[get_core_pos()][user_va_idx] = 0;
+ dsb(); /* Make sure the write above is visible */
+ }
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ write_daif(daif);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
+{
+ switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_SP_ALIGN:
+ case ESR_EC_PC_ALIGN:
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case ESR_EC_IABT_EL0:
+ case ESR_EC_DABT_EL0:
+ case ESR_EC_IABT_EL1:
+ case ESR_EC_DABT_EL1:
+ switch (fault_descr & ESR_FSC_MASK) {
+ case ESR_FSC_TRANS_L0:
+ case ESR_FSC_TRANS_L1:
+ case ESR_FSC_TRANS_L2:
+ case ESR_FSC_TRANS_L3:
+ return CORE_MMU_FAULT_TRANSLATION;
+ case ESR_FSC_ACCF_L1:
+ case ESR_FSC_ACCF_L2:
+ case ESR_FSC_ACCF_L3:
+ case ESR_FSC_PERMF_L1:
+ case ESR_FSC_PERMF_L2:
+ case ESR_FSC_PERMF_L3:
+ if (fault_descr & ESR_ABT_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ case ESR_FSC_ALIGN:
+ return CORE_MMU_FAULT_ALIGNMENT;
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
+#endif /*ARM64*/
diff --git a/core/arch/arm/mm/core_mmu_private.h b/core/arch/arm/mm/core_mmu_private.h
new file mode 100644
index 0000000..5bcb9ea
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_private.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MMU_PRIVATE_H
+#define CORE_MMU_PRIVATE_H
+
+#include <mm/core_mmu.h>
+#include <mm/tee_mmu_types.h>
+
+
+void core_init_mmu_tables(struct tee_mmap_region *mm);
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table);
+void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
+ struct user_ta_ctx *utc);
+
+
+#endif /*CORE_MMU_PRIVATE_H*/
+
diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c
new file mode 100644
index 0000000..54f6caa
--- /dev/null
+++ b/core/arch/arm/mm/core_mmu_v7.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/panic.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <platform_config.h>
+#include <stdlib.h>
+#include <string.h>
+#include <trace.h>
+#include <util.h>
+#include "core_mmu_private.h"
+
+#ifdef CFG_WITH_LPAE
+#error This file is not to be used with LPAE
+#endif
+
+/*
+ * MMU related values
+ */
+
+/* Sharable */
+#define TEE_MMU_TTB_S (1 << 1)
+
+/* Not Outer Sharable */
+#define TEE_MMU_TTB_NOS (1 << 5)
+
+/* Normal memory, Inner Non-cacheable */
+#define TEE_MMU_TTB_IRGN_NC 0
+
+/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
+
+/* Normal memory, Inner Write-Through Cacheable */
+#define TEE_MMU_TTB_IRGN_WT 1
+
+/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
+
+/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_RNG_WBWA (1 << 3)
+
+#define TEE_MMU_DEFAULT_ATTRS \
+ (TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
+ TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
+
+
+#define INVALID_DESC 0x0
+#define HIDDEN_DESC 0x4
+#define HIDDEN_DIRTY_DESC 0x8
+
+#define SECTION_SHIFT 20
+#define SECTION_MASK 0x000fffff
+#define SECTION_SIZE 0x00100000
+
+/* armv7 memory mapping attributes: section mapping */
+#define SECTION_SECURE (0 << 19)
+#define SECTION_NOTSECURE (1 << 19)
+#define SECTION_SHARED (1 << 16)
+#define SECTION_NOTGLOBAL (1 << 17)
+#define SECTION_ACCESS_FLAG (1 << 10)
+#define SECTION_UNPRIV (1 << 11)
+#define SECTION_RO (1 << 15)
+#define SECTION_TEXCB(texcb) ((((texcb) >> 2) << 12) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SECTION_DEVICE SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL_CACHED SECTION_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+
+#define SECTION_XN (1 << 4)
+#define SECTION_PXN (1 << 0)
+#define SECTION_SECTION (2 << 0)
+
+#define SECTION_PT_NOTSECURE (1 << 3)
+#define SECTION_PT_PT (1 << 0)
+
+#define SMALL_PAGE_SMALL_PAGE (1 << 1)
+#define SMALL_PAGE_SHARED (1 << 10)
+#define SMALL_PAGE_NOTGLOBAL (1 << 11)
+#define SMALL_PAGE_TEXCB(texcb) ((((texcb) >> 2) << 6) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL_CACHED SMALL_PAGE_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+#define SMALL_PAGE_ACCESS_FLAG (1 << 4)
+#define SMALL_PAGE_UNPRIV (1 << 5)
+#define SMALL_PAGE_RO (1 << 9)
+#define SMALL_PAGE_XN (1 << 0)
+
+
+/* The TEX, C and B bits concatenated */
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_INDEX 0x1
+
+#define PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
+ ((uint32_t)(nos) << ((idx) + 24)))
+#define NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
+ ((uint32_t)(or) << (2 * (idx) + 16)))
+#define PRRR_DS0 (1 << 16)
+#define PRRR_DS1 (1 << 17)
+#define PRRR_NS0 (1 << 18)
+#define PRRR_NS1 (1 << 19)
+
+#define ATTR_DEVICE_PRRR PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
+#define ATTR_DEVICE_NMRR NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
+
+#define ATTR_IWBWA_OWBWA_PRRR PRRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 2, 1)
+#define ATTR_IWBWA_OWBWA_NMRR NMRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 1, 1)
+
+#define NUM_L1_ENTRIES 4096
+#define NUM_L2_ENTRIES 256
+
+#define L1_TBL_SIZE (NUM_L1_ENTRIES * 4)
+#define L2_TBL_SIZE (NUM_L2_ENTRIES * 4)
+#define L1_ALIGNMENT L1_TBL_SIZE
+#define L2_ALIGNMENT L2_TBL_SIZE
+
+/* Defined to the smallest possible secondary L1 MMU table */
+#define TTBCR_N_VALUE 7
+
+/* Number of sections in ttbr0 when user mapping activated */
+#define NUM_UL1_ENTRIES (1 << (12 - TTBCR_N_VALUE))
+#define UL1_ALIGNMENT (NUM_UL1_ENTRIES * 4)
+/* TTB attributes */
+
+/* TTB0 of TTBR0 (depends on TTBCR_N_VALUE) */
+#define TTB_UL1_MASK (~(UL1_ALIGNMENT - 1))
+/* TTB1 of TTBR1 */
+#define TTB_L1_MASK (~(L1_ALIGNMENT - 1))
+
+#ifndef MAX_XLAT_TABLES
+#define MAX_XLAT_TABLES 4
+#endif
+
+enum desc_type {
+ DESC_TYPE_PAGE_TABLE,
+ DESC_TYPE_SECTION,
+ DESC_TYPE_SUPER_SECTION,
+ DESC_TYPE_LARGE_PAGE,
+ DESC_TYPE_SMALL_PAGE,
+ DESC_TYPE_INVALID,
+};
+
+/* Main MMU L1 table for teecore */
+static uint32_t main_mmu_l1_ttb[NUM_L1_ENTRIES]
+ __aligned(L1_ALIGNMENT) __section(".nozi.mmu.l1");
+
+/* L2 MMU tables */
+static uint32_t main_mmu_l2_ttb[MAX_XLAT_TABLES][NUM_L2_ENTRIES]
+ __aligned(L2_ALIGNMENT) __section(".nozi.mmu.l2");
+
+/* MMU L1 table for TAs, one for each thread */
+static uint32_t main_mmu_ul1_ttb[CFG_NUM_THREADS][NUM_UL1_ENTRIES]
+ __aligned(UL1_ALIGNMENT) __section(".nozi.mmu.ul1");
+
+static vaddr_t core_mmu_get_main_ttb_va(void)
+{
+ return (vaddr_t)main_mmu_l1_ttb;
+}
+
+static paddr_t core_mmu_get_main_ttb_pa(void)
+{
+ paddr_t pa = virt_to_phys((void *)core_mmu_get_main_ttb_va());
+
+ if (pa & ~TTB_L1_MASK)
+ panic("invalid core l1 table");
+ return pa;
+}
+
+static vaddr_t core_mmu_get_ul1_ttb_va(void)
+{
+ return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
+}
+
+static paddr_t core_mmu_get_ul1_ttb_pa(void)
+{
+ paddr_t pa = virt_to_phys((void *)core_mmu_get_ul1_ttb_va());
+
+ if (pa & ~TTB_UL1_MASK)
+ panic("invalid user l1 table");
+ return pa;
+}
+
+static void *core_mmu_alloc_l2(size_t size)
+{
+ /* Can't have this in .bss since it's not initialized yet */
+ static uint32_t tables_used __early_bss;
+ uint32_t to_alloc = ROUNDUP(size, NUM_L2_ENTRIES * SMALL_PAGE_SIZE) /
+ (NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
+
+ if (tables_used + to_alloc > MAX_XLAT_TABLES)
+ return NULL;
+
+ tables_used += to_alloc;
+ return main_mmu_l2_ttb[tables_used - to_alloc];
+}
+
+static enum desc_type get_desc_type(unsigned level, uint32_t desc)
+{
+ assert(level >= 1 && level <= 2);
+
+ if (level == 1) {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_PAGE_TABLE;
+
+ if ((desc & 0x2) == 0x2) {
+ if (desc & (1 << 18))
+ return DESC_TYPE_SUPER_SECTION;
+ return DESC_TYPE_SECTION;
+ }
+ } else {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_LARGE_PAGE;
+
+ if ((desc & 0x2) == 0x2)
+ return DESC_TYPE_SMALL_PAGE;
+ }
+
+ return DESC_TYPE_INVALID;
+}
+
+static uint32_t texcb_to_mattr(uint32_t texcb)
+{
+ COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
+ COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_INDEX == TEE_MATTR_CACHE_CACHED);
+
+ return texcb << TEE_MATTR_CACHE_SHIFT;
+}
+
+static uint32_t mattr_to_texcb(uint32_t attr)
+{
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ return (attr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK;
+}
+
+
+static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
+{
+ uint32_t a;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_PAGE_TABLE:
+ a = TEE_MATTR_TABLE;
+ if (!(desc & SECTION_PT_NOTSECURE))
+ a |= TEE_MATTR_SECURE;
+ break;
+ case DESC_TYPE_SECTION:
+ a = TEE_MATTR_VALID_BLOCK;
+ if (desc & SECTION_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SECTION_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SECTION_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & SECTION_PXN)
+ a &= ~TEE_MATTR_PX;
+
+ a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SECTION_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & SECTION_NOTSECURE))
+ a |= TEE_MATTR_SECURE;
+
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ a = TEE_MATTR_VALID_BLOCK;
+ if (desc & SMALL_PAGE_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SMALL_PAGE_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SMALL_PAGE_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SMALL_PAGE_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+ break;
+ case DESC_TYPE_INVALID:
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & HIDDEN_DIRTY_DESC)
+ return TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ return 0;
+ default:
+ return 0;
+ }
+
+ return a;
+}
+
+static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint32_t desc;
+ uint32_t a = attr;
+ unsigned texcb;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_HIDDEN_DIRTY_BLOCK)
+ return INVALID_DESC | HIDDEN_DIRTY_DESC;
+
+ if (level == 1 && (a & TEE_MATTR_TABLE)) {
+ desc = SECTION_PT_PT;
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_PT_NOTSECURE;
+ return desc;
+ }
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+
+ texcb = mattr_to_texcb(a);
+
+ if (level == 1) { /* Section */
+ desc = SECTION_SECTION | SECTION_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SECTION_XN;
+
+#ifdef CFG_HWSUPP_MEM_PERM_PXN
+ if (!(a & TEE_MATTR_PX))
+ desc |= SECTION_PXN;
+#endif
+
+ if (a & TEE_MATTR_UR)
+ desc |= SECTION_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SECTION_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SECTION_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SECTION_NOTGLOBAL;
+
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_NOTSECURE;
+
+ desc |= SECTION_TEXCB(texcb);
+ } else {
+ desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SMALL_PAGE_XN;
+
+ if (a & TEE_MATTR_UR)
+ desc |= SMALL_PAGE_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SMALL_PAGE_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SMALL_PAGE_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SMALL_PAGE_NOTGLOBAL;
+
+ desc |= SMALL_PAGE_TEXCB(texcb);
+ }
+
+ return desc;
+}
+
+void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table)
+{
+ tbl_info->level = level;
+ tbl_info->table = table;
+ tbl_info->va_base = va_base;
+ assert(level <= 2);
+ if (level == 1) {
+ tbl_info->shift = SECTION_SHIFT;
+ tbl_info->num_entries = NUM_L1_ENTRIES;
+ } else {
+ tbl_info->shift = SMALL_PAGE_SHIFT;
+ tbl_info->num_entries = NUM_L2_ENTRIES;
+ }
+}
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
+{
+ void *tbl = (void *)core_mmu_get_ul1_ttb_va();
+
+ core_mmu_set_info_table(pgd_info, 1, 0, tbl);
+ pgd_info->num_entries = NUM_UL1_ENTRIES;
+}
+
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map)
+{
+ struct core_mmu_table_info dir_info;
+
+ COMPILE_TIME_ASSERT(L2_TBL_SIZE == PGT_SIZE);
+
+ core_mmu_get_user_pgdir(&dir_info);
+ memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t));
+ core_mmu_populate_user_map(&dir_info, utc);
+ map->ttbr0 = core_mmu_get_ul1_ttb_pa() | TEE_MMU_DEFAULT_ATTRS;
+ map->ctxid = utc->context & 0xff;
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint32_t *tbl = (uint32_t *)core_mmu_get_main_ttb_va();
+ unsigned n = va >> SECTION_SHIFT;
+
+ if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
+ core_mmu_set_info_table(tbl_info, 1, 0, tbl);
+ } else {
+ paddr_t ntbl = tbl[n] & ~((1 << 10) - 1);
+ void *l2tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM);
+
+ if (!l2tbl)
+ return false;
+
+ core_mmu_set_info_table(tbl_info, 2, n << SECTION_SHIFT, l2tbl);
+ }
+ return true;
+}
+
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx)
+{
+ uint32_t *new_table;
+ uint32_t *entry;
+ uint32_t new_table_desc;
+ paddr_t paddr;
+ uint32_t attr;
+ int i;
+
+ if (tbl_info->level != 1)
+ return false;
+
+ if (idx >= NUM_L1_ENTRIES)
+ return false;
+
+ new_table = core_mmu_alloc_l2(NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
+ if (!new_table)
+ return false;
+
+ entry = (uint32_t *)tbl_info->table + idx;
+ assert(get_desc_type(1, *entry) == DESC_TYPE_SECTION);
+
+ new_table_desc = SECTION_PT_PT | (uint32_t)new_table;
+ if (*entry & SECTION_NOTSECURE)
+ new_table_desc |= SECTION_PT_NOTSECURE;
+
+ /* store attributes of original block */
+ attr = desc_to_mattr(1, *entry);
+ paddr = *entry & ~SECTION_MASK;
+
+ /* Fill new xlat table with entries pointing to the same memory */
+ for (i = 0; i < NUM_L2_ENTRIES; i++) {
+ *new_table = paddr | mattr_to_desc(tbl_info->level + 1, attr);
+ paddr += SMALL_PAGE_SIZE;
+ new_table++;
+ }
+
+ /* Update descriptor at current level */
+ *entry = new_table_desc;
+ return true;
+}
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint32_t *tbl = table;
+ uint32_t desc = mattr_to_desc(level, attr);
+
+ tbl[idx] = desc | pa;
+}
+
+static paddr_t desc_to_pa(unsigned level, uint32_t desc)
+{
+ unsigned shift_mask;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_PAGE_TABLE:
+ shift_mask = 10;
+ break;
+ case DESC_TYPE_SECTION:
+ shift_mask = 20;
+ break;
+ case DESC_TYPE_SUPER_SECTION:
+ shift_mask = 24; /* We're ignoring bits 32 and above. */
+ break;
+ case DESC_TYPE_LARGE_PAGE:
+ shift_mask = 16;
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ shift_mask = 12;
+ break;
+ default:
+ /* Invalid section, HIDDEN_DESC, HIDDEN_DIRTY_DESC */
+ shift_mask = 4;
+ }
+
+ return desc & ~((1 << shift_mask) - 1);
+}
+
+void core_mmu_get_entry_primitive(const void *table, size_t level,
+ size_t idx, paddr_t *pa, uint32_t *attr)
+{
+ const uint32_t *tbl = table;
+
+ if (pa)
+ *pa = desc_to_pa(level, tbl[idx]);
+
+ if (attr)
+ *attr = desc_to_mattr(level, tbl[idx]);
+}
+
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
+{
+ if (base) {
+ /* Leaving the first entry unmapped to make NULL unmapped */
+ *base = 1 << SECTION_SHIFT;
+ }
+
+ if (size)
+ *size = (NUM_UL1_ENTRIES - 1) << SECTION_SHIFT;
+}
+
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ map->ttbr0 = read_ttbr0();
+ map->ctxid = read_contextidr();
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ /*
+ * Update the reserved Context ID and TTBR0
+ */
+
+ dsb(); /* ARM erratum 754322 */
+ write_contextidr(0);
+ isb();
+
+ if (map) {
+ write_ttbr0(map->ttbr0);
+ isb();
+ write_contextidr(map->ctxid);
+ } else {
+ write_ttbr0(read_ttbr1());
+ }
+ isb();
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ /* Restore interrupts */
+ thread_unmask_exceptions(exceptions);
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ return read_ttbr0() != read_ttbr1();
+}
+
+static paddr_t map_page_memarea(struct tee_mmap_region *mm)
+{
+ uint32_t *l2 = core_mmu_alloc_l2(mm->size);
+ size_t pg_idx;
+ uint32_t attr;
+
+ if (!l2)
+ panic("no l2 table");
+
+ attr = mattr_to_desc(2, mm->attr);
+
+ /* Zero fill initial entries */
+ pg_idx = 0;
+ while ((pg_idx * SMALL_PAGE_SIZE) < (mm->va & SECTION_MASK)) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ /* Fill in the entries */
+ while ((pg_idx * SMALL_PAGE_SIZE) <
+ (mm->size + (mm->va & SECTION_MASK))) {
+ l2[pg_idx] = ((mm->pa & ~SECTION_MASK) +
+ pg_idx * SMALL_PAGE_SIZE) | attr;
+ pg_idx++;
+ }
+
+ /* Zero fill the rest */
+ while (pg_idx < ROUNDUP(mm->size, SECTION_SIZE) / SMALL_PAGE_SIZE) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ return virt_to_phys(l2);
+}
+
+/*
+* map_memarea - load mapping in target L1 table
+* A finer mapping must be supported. Currently section mapping only!
+*/
+static void map_memarea(struct tee_mmap_region *mm, uint32_t *ttb)
+{
+ size_t m, n;
+ uint32_t attr;
+ paddr_t pa;
+ uint32_t region_size;
+
+ assert(mm && ttb);
+
+ /*
+ * If mm->va is smaller than 32M, then mm->va will conflict with
+ * user TA address space. This mapping will be overridden/hidden
+ * later when a user TA is loaded since these low addresses are
+ * used as TA virtual address space.
+ *
+ * Some SoCs have devices at low addresses, so we need to map at
+ * least those devices at a virtual address which isn't the same
+ * as the physical.
+ *
+ * TODO: support mapping devices at a virtual address which isn't
+ * the same as the physical address.
+ */
+ if (mm->va < (NUM_UL1_ENTRIES * SECTION_SIZE))
+ panic("va conflicts with user ta address");
+
+ if ((mm->va | mm->pa | mm->size) & SECTION_MASK) {
+ region_size = SMALL_PAGE_SIZE;
+
+ /*
+ * Need finer grained mapping, if small pages aren't
+ * good enough, panic.
+ */
+ if ((mm->va | mm->pa | mm->size) & SMALL_PAGE_MASK)
+ panic("memarea can't be mapped");
+
+ attr = mattr_to_desc(1, mm->attr | TEE_MATTR_TABLE);
+ pa = map_page_memarea(mm);
+ } else {
+ region_size = SECTION_SIZE;
+
+ attr = mattr_to_desc(1, mm->attr);
+ pa = mm->pa;
+ }
+
+ m = (mm->va >> SECTION_SHIFT);
+ n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
+ while (n--) {
+ ttb[m] = pa | attr;
+ m++;
+ if (region_size == SECTION_SIZE)
+ pa += SECTION_SIZE;
+ else
+ pa += L2_TBL_SIZE;
+ }
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ void *ttb1 = (void *)core_mmu_get_main_ttb_va();
+ size_t n;
+
+ /* reset L1 table */
+ memset(ttb1, 0, L1_TBL_SIZE);
+
+ for (n = 0; mm[n].size; n++)
+ map_memarea(mm + n, ttb1);
+}
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
+{
+ return paddr > 0x80000000;
+}
+
+void core_init_mmu_regs(void)
+{
+ uint32_t prrr;
+ uint32_t nmrr;
+ paddr_t ttb_pa = core_mmu_get_main_ttb_pa();
+
+ /* Enable Access flag (simplified access permissions) and TEX remap */
+ write_sctlr(read_sctlr() | SCTLR_AFE | SCTLR_TRE);
+
+ prrr = ATTR_DEVICE_PRRR | ATTR_IWBWA_OWBWA_PRRR;
+ nmrr = ATTR_DEVICE_NMRR | ATTR_IWBWA_OWBWA_NMRR;
+
+ prrr |= PRRR_NS1 | PRRR_DS1;
+
+ write_prrr(prrr);
+ write_nmrr(nmrr);
+
+
+ /*
+ * Program Domain access control register with two domains:
+ * domain 0: teecore
+ * domain 1: TA
+ */
+ write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
+ DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT));
+
+ /*
+ * Enable lookups using TTBR0 and TTBR1 with the split of addresses
+ * defined by TEE_MMU_TTBCR_N_VALUE.
+ */
+ write_ttbcr(TTBCR_N_VALUE);
+
+ write_ttbr0(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+ write_ttbr1(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
+{
+ assert(!(fsr & FSR_LPAE));
+
+ switch (fsr & FSR_FS_MASK) {
+ case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ case 0x4: /* DFSR[10,3:0] b00100 Fault on instr cache maintenance */
+ case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
+ case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
+ case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
+ if (fsr & FSR_WNR)
+ return CORE_MMU_FAULT_WRITE_PERMISSION;
+ else
+ return CORE_MMU_FAULT_READ_PERMISSION;
+ case 0x3: /* DFSR[10,3:0] b00011 access bit fault on section */
+ case 0x6: /* DFSR[10,3:0] b00110 access bit fault on page */
+ return CORE_MMU_FAULT_ACCESS_BIT;
+ case (1 << 10) | 0x6:
+ /* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
diff --git a/core/arch/arm/mm/mobj.c b/core/arch/arm/mm/mobj.c
new file mode 100644
index 0000000..5458638
--- /dev/null
+++ b/core/arch/arm/mm/mobj.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <keep.h>
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <stdlib.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+struct mobj *mobj_sec_ddr;
+
+/*
+ * mobj_phys implementation
+ */
+
+struct mobj_phys {
+ struct mobj mobj;
+ enum buf_is_attr battr;
+ uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
+ vaddr_t va;
+ paddr_t pa;
+};
+
+static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
+
+static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ if (!moph->va)
+ return NULL;
+
+ return (void *)(moph->va + offset);
+}
+
+static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+ paddr_t p;
+
+ if (!pa)
+ return TEE_ERROR_GENERIC;
+
+ p = moph->pa + offs;
+
+ if (granule) {
+ if (granule != SMALL_PAGE_SIZE &&
+ granule != CORE_MMU_PGDIR_SIZE)
+ return TEE_ERROR_GENERIC;
+ p &= ~(granule - 1);
+ }
+
+ *pa = p;
+ return TEE_SUCCESS;
+}
+/* ifndef due to an asserting AArch64 linker */
+#ifndef ARM64
+KEEP_PAGER(mobj_phys_get_pa);
+#endif
+
+static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ if (!cattr)
+ return TEE_ERROR_GENERIC;
+
+ *cattr = moph->cattr;
+ return TEE_SUCCESS;
+}
+
+static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+ enum buf_is_attr a;
+
+ a = moph->battr;
+
+ switch (attr) {
+ case CORE_MEM_SEC:
+ return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
+ a == CORE_MEM_TA_RAM;
+ case CORE_MEM_NON_SEC:
+ return a == CORE_MEM_NSEC_SHM;
+ case CORE_MEM_TEE_RAM:
+ case CORE_MEM_TA_RAM:
+ case CORE_MEM_NSEC_SHM:
+ return attr == a;
+ default:
+ return false;
+ }
+}
+
+static void mobj_phys_free(struct mobj *mobj)
+{
+ struct mobj_phys *moph = to_mobj_phys(mobj);
+
+ free(moph);
+}
+
+static const struct mobj_ops mobj_phys_ops __rodata_unpaged = {
+ .get_va = mobj_phys_get_va,
+ .get_pa = mobj_phys_get_pa,
+ .get_cattr = mobj_phys_get_cattr,
+ .matches = mobj_phys_matches,
+ .free = mobj_phys_free,
+};
+
+static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_phys_ops);
+ return container_of(mobj, struct mobj_phys, mobj);
+}
+
+struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
+ enum buf_is_attr battr)
+{
+ struct mobj_phys *moph;
+ void *va;
+
+ if ((pa & CORE_MMU_USER_PARAM_MASK) ||
+ (size & CORE_MMU_USER_PARAM_MASK)) {
+ DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
+ return NULL;
+ }
+
+ va = phys_to_virt(pa, battr);
+ if (!va)
+ return NULL;
+
+ moph = calloc(1, sizeof(*moph));
+ if (!moph)
+ return NULL;
+
+ moph->battr = battr;
+ moph->cattr = cattr;
+ moph->mobj.size = size;
+ moph->mobj.ops = &mobj_phys_ops;
+ moph->pa = pa;
+ moph->va = (vaddr_t)va;
+
+ return &moph->mobj;
+}
+
+/*
+ * mobj_virt implementation
+ */
+
+static void mobj_virt_assert_type(struct mobj *mobj);
+
+static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
+{
+ mobj_virt_assert_type(mobj);
+
+ return (void *)(vaddr_t)offset;
+}
+
+static const struct mobj_ops mobj_virt_ops __rodata_unpaged = {
+ .get_va = mobj_virt_get_va,
+};
+
+static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
+{
+ assert(mobj->ops == &mobj_virt_ops);
+}
+
+struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
+
+/*
+ * mobj_mm implementation
+ */
+
+struct mobj_mm {
+ tee_mm_entry_t *mm;
+ struct mobj *parent_mobj;
+ struct mobj mobj;
+};
+
+static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
+
+static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
+{
+ tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
+
+ return (mm->offset << mm->pool->shift) + offs;
+}
+
+static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
+{
+ return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
+ mobj_mm_offs(mobj, offs));
+}
+
+
+static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
+ mobj_mm_offs(mobj, offs), granule, pa);
+}
+/* ifndef due to an asserting AArch64 linker */
+#ifndef ARM64
+KEEP_PAGER(mobj_mm_get_pa);
+#endif
+
+static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
+}
+
+static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
+}
+
+static void mobj_mm_free(struct mobj *mobj)
+{
+ struct mobj_mm *m = to_mobj_mm(mobj);
+
+ tee_mm_free(m->mm);
+ free(m);
+}
+
+static const struct mobj_ops mobj_mm_ops __rodata_unpaged = {
+ .get_va = mobj_mm_get_va,
+ .get_pa = mobj_mm_get_pa,
+ .get_cattr = mobj_mm_get_cattr,
+ .matches = mobj_mm_matches,
+ .free = mobj_mm_free,
+};
+
+static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_mm_ops);
+ return container_of(mobj, struct mobj_mm, mobj);
+}
+
+struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
+ tee_mm_pool_t *pool)
+{
+ struct mobj_mm *m = calloc(1, sizeof(*m));
+
+ if (!m)
+ return NULL;
+
+ m->mm = tee_mm_alloc(pool, size);
+ if (!m->mm) {
+ free(m);
+ return NULL;
+ }
+
+ m->parent_mobj = mobj_parent;
+ m->mobj.size = size;
+ m->mobj.ops = &mobj_mm_ops;
+
+ return &m->mobj;
+}
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * mobj_paged implementation
+ */
+
+static void mobj_paged_free(struct mobj *mobj);
+
+static const struct mobj_ops mobj_paged_ops __rodata_unpaged = {
+ .free = mobj_paged_free,
+};
+
+static void mobj_paged_free(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_paged_ops);
+ free(mobj);
+}
+
+struct mobj *mobj_paged_alloc(size_t size)
+{
+ struct mobj *mobj = calloc(1, sizeof(*mobj));
+
+ if (mobj) {
+ mobj->size = size;
+ mobj->ops = &mobj_paged_ops;
+ }
+ return mobj;
+}
+
+/*
+ * mobj_seccpy_shm implementation
+ */
+
+struct mobj_seccpy_shm {
+ struct user_ta_ctx *utc;
+ vaddr_t va;
+ size_t pgdir_offset;
+ struct mobj mobj;
+};
+
+static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
+
+static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
+{
+ assert(mobj_is_seccpy_shm(mobj));
+ return container_of(mobj, struct mobj_seccpy_shm, mobj);
+}
+
+static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
+{
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+
+ if (&m->utc->ctx != thread_get_tsd()->ctx)
+ return NULL;
+
+ if (offs >= mobj->size)
+ return NULL;
+ return (void *)(m->va + offs);
+}
+
+static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
+ enum buf_is_attr attr)
+{
+ assert(mobj_is_seccpy_shm(mobj));
+
+ return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
+}
+
+static void mobj_seccpy_shm_free(struct mobj *mobj)
+{
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+
+ tee_pager_rem_uta_region(m->utc, m->va, mobj->size);
+ tee_mmu_rem_rwmem(m->utc, mobj, m->va);
+ free(m);
+}
+
+static void mobj_seccpy_shm_update_mapping(struct mobj *mobj,
+ struct user_ta_ctx *utc, vaddr_t va)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
+ size_t s;
+
+ if (utc == m->utc && va == m->va)
+ return;
+
+ s = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
+ pgt_transfer(&tsd->pgt_cache, &m->utc->ctx, m->va, &utc->ctx, va, s);
+
+ m->va = va;
+ m->utc = utc;
+}
+
+static const struct mobj_ops mobj_seccpy_shm_ops __rodata_unpaged = {
+ .get_va = mobj_seccpy_shm_get_va,
+ .matches = mobj_seccpy_shm_matches,
+ .free = mobj_seccpy_shm_free,
+ .update_mapping = mobj_seccpy_shm_update_mapping,
+};
+
+static bool mobj_is_seccpy_shm(struct mobj *mobj)
+{
+ return mobj && mobj->ops == &mobj_seccpy_shm_ops;
+}
+
+struct mobj *mobj_seccpy_shm_alloc(size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct mobj_seccpy_shm *m;
+ struct user_ta_ctx *utc;
+ vaddr_t va = 0;
+
+ if (!is_user_ta_ctx(tsd->ctx))
+ return NULL;
+ utc = to_user_ta_ctx(tsd->ctx);
+
+ m = calloc(1, sizeof(*m));
+ if (!m)
+ return NULL;
+
+ m->mobj.size = size;
+ m->mobj.ops = &mobj_seccpy_shm_ops;
+
+ if (tee_mmu_add_rwmem(utc, &m->mobj, -1, &va) != TEE_SUCCESS)
+ goto bad;
+
+ if (!tee_pager_add_uta_area(utc, va, size))
+ goto bad;
+
+ m->va = va;
+ m->pgdir_offset = va & CORE_MMU_PGDIR_MASK;
+ m->utc = to_user_ta_ctx(tsd->ctx);
+ return &m->mobj;
+bad:
+ if (va)
+ tee_mmu_rem_rwmem(utc, &m->mobj, va);
+ free(m);
+ return NULL;
+}
+
+bool mobj_is_paged(struct mobj *mobj)
+{
+ return mobj->ops == &mobj_paged_ops ||
+ mobj->ops == &mobj_seccpy_shm_ops;
+}
+#endif /*CFG_PAGED_USER_TA*/
diff --git a/core/arch/arm/mm/pager_aes_gcm.c b/core/arch/arm/mm/pager_aes_gcm.c
new file mode 100644
index 0000000..e0ff286
--- /dev/null
+++ b/core/arch/arm/mm/pager_aes_gcm.c
@@ -0,0 +1,348 @@
+/*
+ * Galois/Counter Mode (GCM) and GMAC with AES
+ *
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2012, Jouni Malinen <j@w1.fi>
+ *
+ * This software may be distributed under the terms of the BSD license.
+ * See README for more details.
+ *
+ * The license part of what was the "README" above:
+ * License
+ * -------
+ *
+ * This software may be distributed, used, and modified under the terms of
+ * BSD license:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name(s) of the above-listed copyright holder(s) nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include "pager_private.h"
+#include <tomcrypt.h>
+#include <trace.h>
+#include <utee_defines.h>
+#include <util.h>
+
+/*
+ * Source copied from git://w1.fi/srv/git/hostap.git files
+ * src/utils/common.h and src/crypto/aes-gcm.c
+ *
+ * The source has been modified for the pager use case.
+ */
+
+#define BLOCK_ALIGNMENT sizeof(uint64_t)
+
+static uint32_t get_be32(const void *a)
+{
+ return TEE_U32_FROM_BIG_ENDIAN(*(const uint32_t *)a);
+}
+
+static void put_be32(void *a, uint32_t val)
+{
+ *(uint32_t *)a = TEE_U32_TO_BIG_ENDIAN(val);
+}
+
+static void put_be64(void *a, uint64_t val)
+{
+ *(uint64_t *)a = TEE_U64_TO_BIG_ENDIAN(val);
+}
+
+static void aes_encrypt(symmetric_key *skey, const uint8_t *plain,
+ uint8_t *crypt)
+{
+ aes_ecb_encrypt(plain, crypt, skey);
+}
+
+static void inc32(uint8_t *block)
+{
+ uint32_t val;
+
+ val = get_be32(block + TEE_AES_BLOCK_SIZE - 4);
+ val++;
+ put_be32(block + TEE_AES_BLOCK_SIZE - 4, val);
+}
+
+static void xor_block(void *dst, const void *src)
+{
+ uint64_t *d = dst;
+ const uint64_t *s = src;
+
+ *d++ ^= *s++;
+ *d++ ^= *s++;
+}
+
+static void shift_right_block(uint8_t *v)
+{
+ uint32_t next_val;
+ uint32_t val;
+
+ val = get_be32(v + 12);
+ next_val = get_be32(v + 8);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 12, val);
+
+ val = next_val;
+ next_val = get_be32(v + 4);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 8, val);
+
+ val = next_val;
+ next_val = get_be32(v);
+ val >>= 1;
+ val |= next_val << 31;
+ put_be32(v + 4, val);
+
+ val = next_val;
+ val >>= 1;
+ put_be32(v, val);
+}
+
+/* Multiplication in GF(2^128) */
+static void gf_mult(const uint8_t *x, const uint8_t *y, uint8_t *z)
+{
+ uint8_t v[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ unsigned i;
+ unsigned j;
+
+ memset(z, 0, TEE_AES_BLOCK_SIZE); /* Z_0 = 0^128 */
+ memcpy(v, y, TEE_AES_BLOCK_SIZE); /* V_0 = Y */
+
+ for (i = 0; i < TEE_AES_BLOCK_SIZE; i++) {
+ for (j = 0; j < 8; j++) {
+ if (x[i] & BIT(7 - j)) {
+ /* Z_(i + 1) = Z_i XOR V_i */
+ xor_block(z, v);
+ } else {
+ /* Z_(i + 1) = Z_i */
+ }
+
+ if (v[15] & 0x01) {
+ /* V_(i + 1) = (V_i >> 1) XOR R */
+ shift_right_block(v);
+ /* R = 11100001 || 0^120 */
+ v[0] ^= 0xe1;
+ } else {
+ /* V_(i + 1) = V_i >> 1 */
+ shift_right_block(v);
+ }
+ }
+ }
+}
+
+static void ghash_start(uint8_t *y)
+{
+ /* Y_0 = 0^128 */
+ memset(y, 0, TEE_AES_BLOCK_SIZE);
+}
+
+
+static void ghash(const uint8_t *h, const uint8_t *in, size_t len, uint8_t *out)
+{
+ size_t n;
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ /* We're only dealing with complete blocks */
+ assert(!(len % TEE_AES_BLOCK_SIZE));
+
+ for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) {
+ /* Y_i = (Y^(i-1) XOR X_i) dot H */
+ xor_block(out, in + n);
+
+ /* dot operation:
+ * multiplication operation for binary Galois (finite) field of
+ * 2^128 elements */
+ gf_mult(out, h, tmp);
+ memcpy(out, tmp, TEE_AES_BLOCK_SIZE);
+ }
+ /* Return Y_m */
+}
+
+static bool aes_gcm_init_hash_subkey(symmetric_key *skey, const uint8_t *key,
+ size_t key_len, uint8_t *H)
+{
+ if (aes_setup(key, key_len, 0, skey) != CRYPT_OK)
+ return false;
+
+ /* Generate hash subkey H = AES_K(0^128) */
+ memset(H, 0, TEE_AES_BLOCK_SIZE);
+ aes_encrypt(skey, H, H);
+ return true;
+}
+
+
+static void aes_gcm_prepare_j0(const struct pager_aes_gcm_iv *iv, uint8_t *J0)
+{
+ /* Prepare block J_0 = IV || 0^31 || 1 [len(IV) = 96] */
+ memcpy(J0, iv, sizeof(*iv));
+ memset(J0 + sizeof(*iv), 0, TEE_AES_BLOCK_SIZE - sizeof(*iv));
+ J0[TEE_AES_BLOCK_SIZE - 1] = 0x01;
+}
+
+static void aes_gcm_core(symmetric_key *skey, bool enc, const uint8_t *J0,
+ const uint8_t *H, const uint8_t *in, size_t len,
+ uint8_t *out, uint8_t *tmp, uint8_t *S)
+{
+ uint8_t J0inc[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ size_t n;
+
+ /* We're only dealing with complete blocks */
+ assert(len && !(len % TEE_AES_BLOCK_SIZE));
+
+ /*
+ * Below in the loop we're doing the encryption and hashing
+ * on each block interleaved since the encrypted data is stored
+ * in less secure memory.
+ */
+
+ /*
+ * u = 128 * ceil[len(C)/128] - len(C)
+ * v = 128 * ceil[len(A)/128] - len(A)
+ * S = GHASH_H(A || 0^v || C || 0^u || [len(A)]64 || [len(C)]64)
+ * (i.e., zero padded to block size A || C and lengths of each in bits)
+ */
+ ghash_start(S);
+
+
+ memcpy(J0inc, J0, TEE_AES_BLOCK_SIZE);
+ inc32(J0inc);
+
+ /* Full blocks */
+ for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) {
+ aes_encrypt(skey, J0inc, tmp);
+ xor_block(tmp, in + n);
+ memcpy(out + n, tmp, TEE_AES_BLOCK_SIZE);
+ inc32(J0inc);
+
+ /* Hash */
+ if (enc)
+ xor_block(S, tmp);
+ else
+ xor_block(S, in + n);
+ gf_mult(S, H, tmp);
+ memcpy(S, tmp, TEE_AES_BLOCK_SIZE);
+ }
+
+ put_be64(tmp, 0); /* no aad */
+ put_be64(tmp + 8, len * 8);
+ ghash(H, tmp, TEE_AES_BLOCK_SIZE, S);
+}
+
+/**
+ * aes_gcm_ae - GCM-AE_K(IV, P, A)
+ */
+static bool aes_gcm_ae(const uint8_t *key, size_t key_len,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t *plain, size_t plain_len,
+ uint8_t *crypt, uint8_t *tag)
+{
+ symmetric_key skey;
+ uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H))
+ return false;
+
+ aes_gcm_prepare_j0(iv, J0);
+
+ /* C = GCTR_K(inc_32(J_0), P) */
+ aes_gcm_core(&skey, true, J0, H, plain, plain_len, crypt, tmp, S);
+
+ /* T = MSB_t(GCTR_K(J_0, S)) */
+ aes_encrypt(&skey, J0, tag);
+ xor_block(tag, S);
+
+ /* Return (C, T) */
+
+ aes_done(&skey);
+
+ return true;
+}
+
+/**
+ * aes_gcm_ad - GCM-AD_K(IV, C, A, T)
+ */
+static bool aes_gcm_ad(const uint8_t *key, size_t key_len,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t *crypt, size_t crypt_len,
+ const uint8_t *tag, uint8_t *plain)
+{
+ symmetric_key skey;
+ uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+ uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT);
+
+ if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H))
+ return false;
+
+ aes_gcm_prepare_j0(iv, J0);
+
+ /* P = GCTR_K(inc_32(J_0), C) */
+ aes_gcm_core(&skey, false, J0, H, crypt, crypt_len, plain, tmp, S);
+
+ /* T' = MSB_t(GCTR_K(J_0, S)) */
+ aes_encrypt(&skey, J0, tmp);
+ xor_block(tmp, S);
+
+ aes_done(&skey);
+
+ return !buf_compare_ct(tag, tmp, TEE_AES_BLOCK_SIZE);
+}
+
+static bool check_block_alignment(const void *p)
+{
+ return !((vaddr_t)p % BLOCK_ALIGNMENT);
+}
+
+bool pager_aes_gcm_decrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen)
+{
+ if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) ||
+ !check_block_alignment(src) || !check_block_alignment(dst))
+ return false;
+ return aes_gcm_ad(key, keylen, iv, src, datalen, tag, dst);
+}
+
+bool pager_aes_gcm_encrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen)
+{
+ if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) ||
+ !check_block_alignment(src) || !check_block_alignment(dst))
+ return false;
+ return aes_gcm_ae(key, keylen, iv, src, datalen, dst, tag);
+}
diff --git a/core/arch/arm/mm/pager_private.h b/core/arch/arm/mm/pager_private.h
new file mode 100644
index 0000000..e7acf95
--- /dev/null
+++ b/core/arch/arm/mm/pager_private.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <types_ext.h>
+
+struct pager_aes_gcm_iv {
+ uint32_t iv[3];
+};
+
+#define PAGER_AES_GCM_TAG_LEN 16
+
+bool pager_aes_gcm_decrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ const uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen);
+
+bool pager_aes_gcm_encrypt(const void *key, size_t keylen,
+ const struct pager_aes_gcm_iv *iv,
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN],
+ const void *src, void *dst, size_t datalen);
+
diff --git a/core/arch/arm/mm/pgt_cache.c b/core/arch/arm/mm/pgt_cache.c
new file mode 100644
index 0000000..76c9e6e
--- /dev/null
+++ b/core/arch/arm/mm/pgt_cache.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/mutex.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_pager.h>
+#include <stdlib.h>
+#include <trace.h>
+#include <util.h>
+
+/*
+ * With pager enabled we allocate page table from the pager.
+ *
+ * For LPAE each page table is a complete page which is allocated and freed
+ * using the interface provided by the pager.
+ *
+ * For compat v7 page tables there's room for four page table in one page
+ * so we need to keep track of how much of an allocated page is used. When
+ * a page is completely unused it's returned to the pager.
+ *
+ * With pager disabled we have a static allocation of page tables instead.
+ *
+ * In all cases we limit the number of active page tables to
+ * PGT_CACHE_SIZE. This pool of page tables are shared between all
+ * threads. In case a thread can't allocate the needed number of pager
+ * tables it will release all its current tables and wait for some more to
+ * be freed. A threads allocated tables are freed each time a TA is
+ * unmapped so each thread should be able to allocate the needed tables in
+ * turn if needed.
+ */
+
+#if defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
+struct pgt_parent {
+ size_t num_used;
+ struct pgt_cache pgt_cache;
+};
+
+static struct pgt_parent pgt_parents[PGT_CACHE_SIZE / PGT_NUM_PGT_PER_PAGE];
+#else
+
+static struct pgt_cache pgt_free_list = SLIST_HEAD_INITIALIZER(pgt_free_list);
+#endif
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * When a user TA context is temporarily unmapped the used struct pgt's of
+ * the context (page tables holding valid physical pages) are saved in this
+ * cache in the hope that some of the valid physical pages may still be
+ * valid when the context is mapped again.
+ */
+static struct pgt_cache pgt_cache_list = SLIST_HEAD_INITIALIZER(pgt_cache_list);
+#endif
+
+static struct pgt pgt_entries[PGT_CACHE_SIZE];
+
+static struct mutex pgt_mu = MUTEX_INITIALIZER;
+static struct condvar pgt_cv = CONDVAR_INITIALIZER;
+
+#if defined(CFG_WITH_PAGER) && defined(CFG_WITH_LPAE)
+void pgt_init(void)
+{
+ size_t n;
+
+ for (n = 0; n < PGT_CACHE_SIZE; n++) {
+ struct pgt *p = pgt_entries + n;
+
+ p->tbl = tee_pager_alloc(PGT_SIZE, TEE_MATTR_LOCKED);
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+ }
+}
+#elif defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
+void pgt_init(void)
+{
+ size_t n;
+ size_t m;
+
+ COMPILE_TIME_ASSERT(PGT_CACHE_SIZE % PGT_NUM_PGT_PER_PAGE == 0);
+ COMPILE_TIME_ASSERT(PGT_SIZE * PGT_NUM_PGT_PER_PAGE == SMALL_PAGE_SIZE);
+
+ for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
+ uint8_t *tbl = tee_pager_alloc(SMALL_PAGE_SIZE,
+ TEE_MATTR_LOCKED);
+
+ SLIST_INIT(&pgt_parents[n].pgt_cache);
+ for (m = 0; m < PGT_NUM_PGT_PER_PAGE; m++) {
+ struct pgt *p = pgt_entries +
+ n * PGT_NUM_PGT_PER_PAGE + m;
+
+ p->tbl = tbl + m * PGT_SIZE;
+ p->parent = &pgt_parents[n];
+ SLIST_INSERT_HEAD(&pgt_parents[n].pgt_cache, p, link);
+ }
+ }
+}
+#else
+void pgt_init(void)
+{
+ /*
+ * We're putting this in .nozi.* instead of .bss because .nozi.* already
+ * has a large alignment, while .bss has a small alignment. The current
+ * link script is optimized for small alignment in .bss
+ */
+ static uint8_t pgt_tables[PGT_CACHE_SIZE][PGT_SIZE]
+ __aligned(PGT_SIZE) __section(".nozi.pgt_cache");
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(pgt_tables); n++) {
+ struct pgt *p = pgt_entries + n;
+
+ p->tbl = pgt_tables[n];
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+ }
+}
+#endif
+
+#if defined(CFG_WITH_LPAE) || !defined(CFG_WITH_PAGER)
+static struct pgt *pop_from_free_list(void)
+{
+ struct pgt *p = SLIST_FIRST(&pgt_free_list);
+
+ if (p) {
+ SLIST_REMOVE_HEAD(&pgt_free_list, link);
+ memset(p->tbl, 0, PGT_SIZE);
+ }
+ return p;
+}
+
+static void push_to_free_list(struct pgt *p)
+{
+ SLIST_INSERT_HEAD(&pgt_free_list, p, link);
+#if defined(CFG_WITH_PAGER)
+ tee_pager_release_phys(p->tbl, PGT_SIZE);
+#endif
+}
+#else
+static struct pgt *pop_from_free_list(void)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
+ struct pgt *p = SLIST_FIRST(&pgt_parents[n].pgt_cache);
+
+ if (p) {
+ SLIST_REMOVE_HEAD(&pgt_parents[n].pgt_cache, link);
+ pgt_parents[n].num_used++;
+ memset(p->tbl, 0, PGT_SIZE);
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void push_to_free_list(struct pgt *p)
+{
+ SLIST_INSERT_HEAD(&p->parent->pgt_cache, p, link);
+ assert(p->parent->num_used > 0);
+ p->parent->num_used--;
+ if (!p->parent->num_used) {
+ vaddr_t va = (vaddr_t)p->tbl & ~SMALL_PAGE_MASK;
+
+ tee_pager_release_phys((void *)va, SMALL_PAGE_SIZE);
+ }
+}
+#endif
+
+#ifdef CFG_PAGED_USER_TA
+static void push_to_cache_list(struct pgt *pgt)
+{
+ SLIST_INSERT_HEAD(&pgt_cache_list, pgt, link);
+}
+
+static bool match_pgt(struct pgt *pgt, vaddr_t vabase, void *ctx)
+{
+ return pgt->ctx == ctx && pgt->vabase == vabase;
+}
+
+static struct pgt *pop_from_cache_list(vaddr_t vabase, void *ctx)
+{
+ struct pgt *pgt;
+ struct pgt *p;
+
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ if (!pgt)
+ return NULL;
+ if (match_pgt(pgt, vabase, ctx)) {
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ return pgt;
+ }
+
+ while (true) {
+ p = SLIST_NEXT(pgt, link);
+ if (!p)
+ break;
+ if (match_pgt(p, vabase, ctx)) {
+ SLIST_REMOVE_AFTER(pgt, link);
+ break;
+ }
+ pgt = p;
+ }
+ return p;
+}
+
+static struct pgt *pop_least_used_from_cache_list(void)
+{
+ struct pgt *pgt;
+ struct pgt *p_prev = NULL;
+ size_t least_used;
+
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ if (!pgt)
+ return NULL;
+ if (!pgt->num_used_entries)
+ goto out;
+ least_used = pgt->num_used_entries;
+
+ while (true) {
+ if (!SLIST_NEXT(pgt, link))
+ break;
+ if (SLIST_NEXT(pgt, link)->num_used_entries <= least_used) {
+ p_prev = pgt;
+ least_used = SLIST_NEXT(pgt, link)->num_used_entries;
+ }
+ pgt = SLIST_NEXT(pgt, link);
+ }
+
+out:
+ if (p_prev) {
+ pgt = SLIST_NEXT(p_prev, link);
+ SLIST_REMOVE_AFTER(p_prev, link);
+ } else {
+ pgt = SLIST_FIRST(&pgt_cache_list);
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ }
+ return pgt;
+}
+
+static void pgt_free_unlocked(struct pgt_cache *pgt_cache, bool save_ctx)
+{
+ while (!SLIST_EMPTY(pgt_cache)) {
+ struct pgt *p = SLIST_FIRST(pgt_cache);
+
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ if (save_ctx && p->num_used_entries) {
+ push_to_cache_list(p);
+ } else {
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+
+ push_to_free_list(p);
+ }
+ }
+}
+
+static struct pgt *pop_from_some_list(vaddr_t vabase, void *ctx)
+{
+ struct pgt *p = pop_from_cache_list(vabase, ctx);
+
+ if (p)
+ return p;
+ p = pop_from_free_list();
+ if (!p) {
+ p = pop_least_used_from_cache_list();
+ if (!p)
+ return NULL;
+ tee_pager_pgt_save_and_release_entries(p);
+ }
+ assert(!p->num_used_entries);
+ p->ctx = ctx;
+ p->vabase = vabase;
+ return p;
+}
+
+void pgt_flush_ctx(struct tee_ta_ctx *ctx)
+{
+ struct pgt *p;
+ struct pgt *pp = NULL;
+
+ mutex_lock(&pgt_mu);
+
+ while (true) {
+ p = SLIST_FIRST(&pgt_cache_list);
+ if (!p)
+ goto out;
+ if (p->ctx != ctx)
+ break;
+ SLIST_REMOVE_HEAD(&pgt_cache_list, link);
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+ push_to_free_list(p);
+ }
+
+ pp = p;
+ while (true) {
+ p = SLIST_NEXT(pp, link);
+ if (!p)
+ break;
+ if (p->ctx == ctx) {
+ SLIST_REMOVE_AFTER(pp, link);
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+ push_to_free_list(p);
+ } else {
+ pp = p;
+ }
+ }
+
+out:
+ mutex_unlock(&pgt_mu);
+}
+
+static void flush_pgt_entry(struct pgt *p)
+{
+ tee_pager_pgt_save_and_release_entries(p);
+ assert(!p->num_used_entries);
+ p->ctx = NULL;
+ p->vabase = 0;
+}
+
+static bool pgt_entry_matches(struct pgt *p, void *ctx, vaddr_t begin,
+ vaddr_t last)
+{
+ if (!p)
+ return false;
+ if (p->ctx != ctx)
+ return false;
+ if (last <= begin)
+ return false;
+ if (!core_is_buffer_inside(p->vabase, SMALL_PAGE_SIZE, begin,
+ last - begin))
+ return false;
+
+ return true;
+}
+
+static void flush_ctx_range_from_list(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ struct pgt *p;
+ struct pgt *next_p;
+
+ /*
+ * Do the special case where the first element in the list is
+ * removed first.
+ */
+ p = SLIST_FIRST(pgt_cache);
+ while (pgt_entry_matches(p, ctx, begin, last)) {
+ flush_pgt_entry(p);
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ push_to_free_list(p);
+ p = SLIST_FIRST(pgt_cache);
+ }
+
+ /*
+ * p either points to the first element in the list or it's NULL,
+ * if NULL the list is empty and we're done.
+ */
+ if (!p)
+ return;
+
+ /*
+ * Do the common case where the next element in the list is
+ * removed.
+ */
+ while (true) {
+ next_p = SLIST_NEXT(p, link);
+ if (!next_p)
+ break;
+ if (pgt_entry_matches(next_p, ctx, begin, last)) {
+ flush_pgt_entry(next_p);
+ SLIST_REMOVE_AFTER(p, link);
+ push_to_free_list(next_p);
+ continue;
+ }
+
+ p = SLIST_NEXT(p, link);
+ }
+}
+
+void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ mutex_lock(&pgt_mu);
+
+ flush_ctx_range_from_list(pgt_cache, ctx, begin, last);
+ flush_ctx_range_from_list(&pgt_cache_list, ctx, begin, last);
+
+ condvar_broadcast(&pgt_cv);
+ mutex_unlock(&pgt_mu);
+}
+
+static void transfer_tables(struct pgt_cache *pgt_cache, void *old_ctx,
+ vaddr_t old_va, void *new_ctx, vaddr_t new_va,
+ size_t size)
+{
+ const size_t pgtsize = CORE_MMU_PGDIR_SIZE;
+ const vaddr_t new_base = ROUNDDOWN(new_va, pgtsize);
+ const vaddr_t old_base = ROUNDDOWN(old_va, pgtsize);
+ const size_t num_new_pgt = (size - 1 + new_va - new_base) / pgtsize + 1;
+ const size_t num_old_pgt = (size - 1 + old_va - old_base) / pgtsize + 1;
+ struct pgt *new_pgt[num_new_pgt];
+ struct pgt *old_pgt[num_old_pgt];
+ struct pgt *pgt;
+ size_t n;
+
+ /*
+ * Fill in new_pgt based on pgt_cache. Note that the pages should
+ * already have been allocated.
+ */
+ SLIST_FOREACH(pgt, pgt_cache, link) {
+ if (pgt->vabase < new_base)
+ continue;
+ n = (pgt->vabase - new_base) / pgtsize;
+ if (n < num_new_pgt)
+ new_pgt[n] = pgt;
+ }
+ for (n = 0; n < num_new_pgt; n++) {
+ assert(new_pgt[n]);
+ assert(new_pgt[n]->ctx == new_ctx);
+ }
+
+ mutex_lock(&pgt_mu);
+
+ /* Extract the array of pgts that need their content transferred */
+ for (n = 0; n < num_old_pgt; n++) {
+ /*
+ * If the pgt isn't in the cache list there's nothing to
+ * transfer, so NULL here is OK.
+ */
+ old_pgt[n] = pop_from_cache_list(old_base + n * pgtsize,
+ old_ctx);
+ }
+
+ tee_pager_transfer_uta_region(to_user_ta_ctx(old_ctx), old_va,
+ to_user_ta_ctx(new_ctx), new_va, new_pgt,
+ size);
+
+ for (n = 0; n < num_old_pgt; n++) {
+ if (!old_pgt[n])
+ continue;
+
+ if (old_pgt[n]->num_used_entries)
+ push_to_cache_list(old_pgt[n]);
+ else
+ push_to_free_list(old_pgt[n]);
+ }
+
+ mutex_unlock(&pgt_mu);
+}
+
+void pgt_transfer(struct pgt_cache *pgt_cache, void *old_ctx, vaddr_t old_va,
+ void *new_ctx, vaddr_t new_va, size_t size)
+{
+ if (size)
+ transfer_tables(pgt_cache, old_ctx, old_va, new_ctx,
+ new_va, size);
+}
+
+#else /*!CFG_PAGED_USER_TA*/
+
+static void pgt_free_unlocked(struct pgt_cache *pgt_cache,
+ bool save_ctx __unused)
+{
+ while (!SLIST_EMPTY(pgt_cache)) {
+ struct pgt *p = SLIST_FIRST(pgt_cache);
+
+ SLIST_REMOVE_HEAD(pgt_cache, link);
+ push_to_free_list(p);
+ }
+}
+
+static struct pgt *pop_from_some_list(vaddr_t vabase __unused,
+ void *ctx __unused)
+{
+ return pop_from_free_list();
+}
+#endif /*!CFG_PAGED_USER_TA*/
+
+static bool pgt_alloc_unlocked(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ const vaddr_t base = ROUNDDOWN(begin, CORE_MMU_PGDIR_SIZE);
+ const size_t num_tbls = ((last - base) >> CORE_MMU_PGDIR_SHIFT) + 1;
+ size_t n = 0;
+ struct pgt *p;
+ struct pgt *pp = NULL;
+
+ while (n < num_tbls) {
+ p = pop_from_some_list(base + n * CORE_MMU_PGDIR_SIZE, ctx);
+ if (!p) {
+ pgt_free_unlocked(pgt_cache, ctx);
+ return false;
+ }
+
+ if (pp)
+ SLIST_INSERT_AFTER(pp, p, link);
+ else
+ SLIST_INSERT_HEAD(pgt_cache, p, link);
+ pp = p;
+ n++;
+ }
+
+ return true;
+}
+
+void pgt_alloc(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last)
+{
+ if (last <= begin)
+ return;
+
+ mutex_lock(&pgt_mu);
+
+ pgt_free_unlocked(pgt_cache, ctx);
+ while (!pgt_alloc_unlocked(pgt_cache, ctx, begin, last)) {
+ DMSG("Waiting for page tables");
+ condvar_broadcast(&pgt_cv);
+ condvar_wait(&pgt_cv, &pgt_mu);
+ }
+
+ mutex_unlock(&pgt_mu);
+}
+
+void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx)
+{
+ if (SLIST_EMPTY(pgt_cache))
+ return;
+
+ mutex_lock(&pgt_mu);
+
+ pgt_free_unlocked(pgt_cache, save_ctx);
+
+ condvar_broadcast(&pgt_cv);
+ mutex_unlock(&pgt_mu);
+}
diff --git a/core/arch/arm/mm/sub.mk b/core/arch/arm/mm/sub.mk
new file mode 100644
index 0000000..71f70f3
--- /dev/null
+++ b/core/arch/arm/mm/sub.mk
@@ -0,0 +1,12 @@
+srcs-y += core_mmu.c
+srcs-$(CFG_WITH_PAGER) += tee_pager.c
+srcs-$(CFG_WITH_PAGER) += pager_aes_gcm.c
+srcs-y += tee_mmu.c
+ifeq ($(CFG_WITH_LPAE),y)
+srcs-y += core_mmu_lpae.c
+else
+srcs-y += core_mmu_v7.c
+endif
+srcs-y += tee_mm.c
+srcs-$(CFG_SMALL_PAGE_USER_TA) += pgt_cache.c
+srcs-y += mobj.c
diff --git a/core/arch/arm/mm/tee_mm.c b/core/arch/arm/mm/tee_mm.c
new file mode 100644
index 0000000..22a39df
--- /dev/null
+++ b/core/arch/arm/mm/tee_mm.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/panic.h>
+#include <kernel/tee_common.h>
+#include <util.h>
+#include <trace.h>
+
+#include <mm/tee_mm.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_pager.h>
+
+bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_t hi, uint8_t shift,
+ uint32_t flags)
+{
+ if (pool == NULL)
+ return false;
+
+ lo = ROUNDUP(lo, 1 << shift);
+ hi = ROUNDDOWN(hi, 1 << shift);
+
+ assert(((uint64_t)(hi - lo) >> shift) < (uint64_t)UINT32_MAX);
+
+ pool->lo = lo;
+ pool->hi = hi;
+ pool->shift = shift;
+ pool->flags = flags;
+ pool->entry = calloc(1, sizeof(tee_mm_entry_t));
+
+ if (pool->entry == NULL)
+ return false;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ pool->entry->offset = ((hi - lo - 1) >> shift) + 1;
+ pool->entry->pool = pool;
+
+ return true;
+}
+
+void tee_mm_final(tee_mm_pool_t *pool)
+{
+ if (pool == NULL || pool->entry == NULL)
+ return;
+
+ while (pool->entry->next != NULL)
+ tee_mm_free(pool->entry->next);
+ free(pool->entry);
+ pool->entry = NULL;
+}
+
+static tee_mm_entry_t *tee_mm_add(tee_mm_entry_t *p)
+{
+ /* add to list */
+ if (p->next == NULL) {
+ p->next = malloc(sizeof(tee_mm_entry_t));
+ if (p->next == NULL)
+ return NULL;
+ p->next->next = NULL;
+ } else {
+ tee_mm_entry_t *nn = malloc(sizeof(tee_mm_entry_t));
+
+ if (nn == NULL)
+ return NULL;
+ nn->next = p->next;
+ p->next = nn;
+ }
+ return p->next;
+}
+
+#ifdef CFG_WITH_STATS
+static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
+{
+ tee_mm_entry_t *entry;
+ uint32_t sz = 0;
+
+ if (!pool)
+ return 0;
+
+ entry = pool->entry;
+ while (entry) {
+ sz += entry->size;
+ entry = entry->next;
+ }
+
+ return sz << pool->shift;
+}
+
+void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct malloc_stats *stats,
+ bool reset)
+{
+ memset(stats, 0, sizeof(*stats));
+
+ stats->size = pool->hi - pool->lo;
+ stats->max_allocated = pool->max_allocated;
+ stats->allocated = tee_mm_stats_allocated(pool);
+
+ if (reset)
+ pool->max_allocated = 0;
+}
+
+static void update_max_allocated(tee_mm_pool_t *pool)
+{
+ size_t sz = tee_mm_stats_allocated(pool);
+
+ if (sz > pool->max_allocated)
+ pool->max_allocated = sz;
+}
+#else /* CFG_WITH_STATS */
+static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
+{
+}
+#endif /* CFG_WITH_STATS */
+
+tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
+{
+ size_t psize;
+ tee_mm_entry_t *entry;
+ tee_mm_entry_t *nn;
+ size_t remaining;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ entry = pool->entry;
+ if (size == 0)
+ psize = 0;
+ else
+ psize = ((size - 1) >> pool->shift) + 1;
+ /* Protect with mutex (multi thread) */
+
+ /* find free slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL && psize >
+ (entry->offset - entry->next->offset -
+ entry->next->size))
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && psize >
+ (entry->next->offset - entry->size - entry->offset))
+ entry = entry->next;
+ }
+
+ /* check if we have enough memory */
+ if (entry->next == NULL) {
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ /*
+ * entry->offset is a "block count" offset from
+ * pool->lo. The byte offset is
+ * (entry->offset << pool->shift).
+ * In the HI_ALLOC allocation scheme the memory is
+ * allocated from the end of the segment, thus to
+ * validate there is sufficient memory validate that
+ * (entry->offset << pool->shift) > size.
+ */
+ if ((entry->offset << pool->shift) < size)
+ /* out of memory */
+ return NULL;
+ } else {
+ if (pool->hi <= pool->lo)
+ panic("invalid pool");
+
+ remaining = (pool->hi - pool->lo);
+ remaining -= ((entry->offset + entry->size) <<
+ pool->shift);
+
+ if (remaining < size)
+ /* out of memory */
+ return NULL;
+ }
+ }
+
+ nn = tee_mm_add(entry);
+ if (nn == NULL)
+ return NULL;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ nn->offset = entry->offset - psize;
+ else
+ nn->offset = entry->offset + entry->size;
+ nn->size = psize;
+ nn->pool = pool;
+
+ update_max_allocated(pool);
+
+ /* Protect with mutex end (multi thread) */
+
+ return nn;
+}
+
+static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
+ paddr_t offslo, paddr_t offshi)
+{
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ if (offshi > e->offset ||
+ (e->next != NULL &&
+ (offslo < e->next->offset + e->next->size)) ||
+ (offshi << pool->shift) - 1 > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ } else {
+ if (offslo < (e->offset + e->size) ||
+ (e->next != NULL && (offshi > e->next->offset)) ||
+ (offshi << pool->shift) > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ }
+
+ return true;
+}
+
+tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
+{
+ tee_mm_entry_t *entry;
+ paddr_t offslo;
+ paddr_t offshi;
+ tee_mm_entry_t *mm;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ /* Wrapping and sanity check */
+ if ((base + size) < base || base < pool->lo)
+ return NULL;
+
+ entry = pool->entry;
+ offslo = (base - pool->lo) >> pool->shift;
+ offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
+
+ /* find slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL &&
+ offshi < entry->next->offset + entry->next->size)
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && offslo > entry->next->offset)
+ entry = entry->next;
+ }
+
+ /* Check that memory is available */
+ if (!fit_in_gap(pool, entry, offslo, offshi))
+ return NULL;
+
+ mm = tee_mm_add(entry);
+ if (mm == NULL)
+ return NULL;
+
+ mm->offset = offslo;
+ mm->size = offshi - offslo;
+ mm->pool = pool;
+
+ update_max_allocated(pool);
+
+ return mm;
+}
+
+void tee_mm_free(tee_mm_entry_t *p)
+{
+ tee_mm_entry_t *entry;
+
+ if (!p || !p->pool)
+ return;
+
+ entry = p->pool->entry;
+
+ /* Protect with mutex (multi thread) */
+
+ /* remove entry from list */
+ while (entry->next != NULL && entry->next != p)
+ entry = entry->next;
+
+ if (!entry->next)
+ panic("invalid mm_entry");
+
+ entry->next = entry->next->next;
+
+ free(p);
+
+ /* Protect with mutex end (multi thread) */
+}
+
+size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
+{
+ if (!mm || !mm->pool)
+ return 0;
+ else
+ return mm->size << mm->pool->shift;
+}
+
+bool tee_mm_addr_is_within_range(tee_mm_pool_t *pool, paddr_t addr)
+{
+ return (pool && ((addr >= pool->lo) && (addr <= pool->hi)));
+}
+
+bool tee_mm_is_empty(tee_mm_pool_t *pool)
+{
+ return pool == NULL || pool->entry == NULL || pool->entry->next == NULL;
+}
+
+/* Physical Secure DDR pool */
+tee_mm_pool_t tee_mm_sec_ddr __early_bss;
+
+/* Virtual eSRAM pool */
+tee_mm_pool_t tee_mm_vcore __early_bss;
+
+tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
+{
+ tee_mm_entry_t *entry = pool->entry;
+ uint16_t offset = (addr - pool->lo) >> pool->shift;
+
+ if (addr > pool->hi || addr < pool->lo)
+ return NULL;
+
+ while (entry->next != NULL) {
+ entry = entry->next;
+
+ if ((offset >= entry->offset) &&
+ (offset < (entry->offset + entry->size))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
+{
+ return (mm->offset << mm->pool->shift) + mm->pool->lo;
+}
diff --git a/core/arch/arm/mm/tee_mmu.c b/core/arch/arm/mm/tee_mmu.c
new file mode 100644
index 0000000..f5c6dde
--- /dev/null
+++ b/core/arch/arm/mm/tee_mmu.c
@@ -0,0 +1,896 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/panic.h>
+#include <kernel/tee_common.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tz_ssvce.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_mmu_types.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mm.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/tee_pager.h>
+#include <sm/optee_smc.h>
+#include <stdlib.h>
+#include <tee_api_defines_extensions.h>
+#include <tee_api_types.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <user_ta_header.h>
+#include <util.h>
+
+#ifdef CFG_PL310
+#include <kernel/tee_l2cc_mutex.h>
+#endif
+
+#define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URW | \
+ TEE_MATTR_SECURE)
+#define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URWX | \
+ TEE_MATTR_SECURE)
+
+#define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_CACHE_CACHED << \
+ TEE_MATTR_CACHE_SHIFT)
+
+/* Support for 31 concurrent sessions */
+static uint32_t g_asid = 0xffffffff;
+
+static TEE_Result tee_mmu_umap_add_param(struct tee_mmu_info *mmu,
+ struct param_mem *mem)
+{
+ TEE_Result res;
+ struct tee_ta_region *last_entry = NULL;
+ size_t n;
+ uint32_t attr = TEE_MMU_UDATA_ATTR;
+ size_t nsz;
+ size_t noffs;
+
+ if (!mobj_is_paged(mem->mobj)) {
+ uint32_t cattr;
+
+ res = mobj_get_cattr(mem->mobj, &cattr);
+ if (res != TEE_SUCCESS)
+ return res;
+ attr |= cattr << TEE_MATTR_CACHE_SHIFT;
+ }
+
+ if (!mobj_is_secure(mem->mobj))
+ attr &= ~TEE_MATTR_SECURE;
+
+ /* Check that we can map memory using this attribute */
+ if (!core_mmu_mattr_is_ok(attr))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ /* Find empty entry */
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++)
+ if (!mmu->regions[n].size)
+ break;
+
+ if (n == TEE_MMU_UMAP_MAX_ENTRIES) {
+ /* No entries left "can't happen" */
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ mmu->regions[n].mobj = mem->mobj;
+ mmu->regions[n].offset = ROUNDDOWN(mem->offs, CORE_MMU_USER_PARAM_SIZE);
+ mmu->regions[n].size = ROUNDUP(mem->offs - mmu->regions[n].offset +
+ mem->size,
+ CORE_MMU_USER_PARAM_SIZE);
+ mmu->regions[n].attr = attr;
+
+ /* Try to coalesce some entries */
+ while (true) {
+ /* Find last param */
+ n = TEE_MMU_UMAP_MAX_ENTRIES - 1;
+
+ while (!mmu->regions[n].size) {
+ n--;
+ if (n < TEE_MMU_UMAP_PARAM_IDX) {
+ /* No param entries found, "can't happen" */
+ return TEE_ERROR_BAD_STATE;
+ }
+ }
+
+ if (last_entry == mmu->regions + n)
+ return TEE_SUCCESS; /* Can't coalesc more */
+ last_entry = mmu->regions + n;
+
+ n--;
+ while (n >= TEE_MMU_UMAP_PARAM_IDX) {
+ struct tee_ta_region *entry = mmu->regions + n;
+
+ n--;
+ if (last_entry->mobj != entry->mobj)
+ continue;
+
+ if ((last_entry->offset + last_entry->size) ==
+ entry->offset ||
+ (entry->offset + entry->size) ==
+ last_entry->offset ||
+ core_is_buffer_intersect(last_entry->offset,
+ last_entry->size,
+ entry->offset,
+ entry->size)) {
+ noffs = MIN(last_entry->offset, entry->offset);
+ nsz = MAX(last_entry->offset + last_entry->size,
+ entry->offset + entry->size) - noffs;
+ entry->offset = noffs;
+ entry->size = nsz;
+ last_entry->mobj = NULL;
+ last_entry->size = 0;
+ last_entry->attr = 0;
+ break;
+ }
+ }
+ }
+}
+
+static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu)
+{
+ const size_t granule = CORE_MMU_USER_PARAM_SIZE;
+ vaddr_t va_range_base;
+ vaddr_t va;
+ size_t va_range_size;
+ size_t n;
+
+ /* Find last table entry used to map code and data */
+ n = TEE_MMU_UMAP_PARAM_IDX - 1;
+ while (n && !mmu->regions[n].size)
+ n--;
+ va = mmu->regions[n].va + mmu->regions[n].size;
+ assert(va);
+
+ core_mmu_get_user_va_range(&va_range_base, &va_range_size);
+ assert(va_range_base == mmu->ta_private_vmem_start);
+
+ /*
+ * Assign parameters in secure memory.
+ */
+ va = ROUNDUP(va, granule);
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ if (!mmu->regions[n].size ||
+ !(mmu->regions[n].attr & TEE_MATTR_SECURE))
+ continue;
+ mmu->regions[n].va = va;
+ va += mmu->regions[n].size;
+ /* Put some empty space between each area */
+ va += granule;
+ if ((va - va_range_base) >= va_range_size)
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ /*
+ * Assign parameters in nonsecure shared memory.
+ * Note that we're making sure that they will reside in a new page
+ * directory as they are to be mapped nonsecure.
+ */
+ va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE);
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ if (!mmu->regions[n].size ||
+ (mmu->regions[n].attr & TEE_MATTR_SECURE))
+ continue;
+ mmu->regions[n].va = va;
+ va += mmu->regions[n].size;
+ /* Put some empty space between each area */
+ va += granule;
+ if ((va - va_range_base) >= va_range_size)
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_mmu_init(struct user_ta_ctx *utc)
+{
+ uint32_t asid = 1;
+
+ if (!utc->context) {
+ utc->context = 1;
+
+ /* Find available ASID */
+ while (!(asid & g_asid) && (asid != 0)) {
+ utc->context++;
+ asid = asid << 1;
+ }
+
+ if (asid == 0) {
+ DMSG("Failed to allocate ASID");
+ return TEE_ERROR_GENERIC;
+ }
+ g_asid &= ~asid;
+ }
+
+ utc->mmu = calloc(1, sizeof(struct tee_mmu_info));
+ if (!utc->mmu)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ core_mmu_get_user_va_range(&utc->mmu->ta_private_vmem_start, NULL);
+ return TEE_SUCCESS;
+}
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+static TEE_Result alloc_pgt(struct user_ta_ctx *utc __maybe_unused,
+ vaddr_t base, vaddr_t end)
+{
+ struct thread_specific_data *tsd __maybe_unused;
+ vaddr_t b = ROUNDDOWN(base, CORE_MMU_PGDIR_SIZE);
+ vaddr_t e = ROUNDUP(end, CORE_MMU_PGDIR_SIZE);
+ size_t ntbl = (e - b) >> CORE_MMU_PGDIR_SHIFT;
+
+ if (!pgt_check_avail(ntbl)) {
+ EMSG("%zu page tables not available", ntbl);
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+
+#ifdef CFG_PAGED_USER_TA
+ tsd = thread_get_tsd();
+ if (&utc->ctx == tsd->ctx) {
+ /*
+ * The supplied utc is the current active utc, allocate the
+ * page tables too as the pager needs to use them soon.
+ */
+ pgt_alloc(&tsd->pgt_cache, &utc->ctx, b, e - 1);
+ }
+#endif
+
+ return TEE_SUCCESS;
+}
+
+static void free_pgt(struct user_ta_ctx *utc, vaddr_t base, size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct pgt_cache *pgt_cache = NULL;
+
+ if (&utc->ctx == tsd->ctx)
+ pgt_cache = &tsd->pgt_cache;
+
+ pgt_flush_ctx_range(pgt_cache, &utc->ctx, base, base + size);
+}
+
+#else
+static TEE_Result alloc_pgt(struct user_ta_ctx *utc __unused,
+ vaddr_t base __unused, vaddr_t end __unused)
+{
+ return TEE_SUCCESS;
+}
+
+static void free_pgt(struct user_ta_ctx *utc __unused, vaddr_t base __unused,
+ size_t size __unused)
+{
+}
+#endif
+
+void tee_mmu_map_stack(struct user_ta_ctx *utc, struct mobj *mobj)
+{
+ const size_t granule = CORE_MMU_USER_CODE_SIZE;
+ struct tee_ta_region *region = utc->mmu->regions +
+ TEE_MMU_UMAP_STACK_IDX;
+
+ region->mobj = mobj;
+ region->offset = 0;
+ region->va = utc->mmu->ta_private_vmem_start;
+ region->size = ROUNDUP(utc->mobj_stack->size, granule);
+ region->attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ TEE_MATTR_URW | TEE_MATTR_PRW |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
+}
+
+TEE_Result tee_mmu_map_add_segment(struct user_ta_ctx *utc, struct mobj *mobj,
+ size_t offs, size_t size, uint32_t prot)
+{
+ const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
+ const size_t granule = CORE_MMU_USER_CODE_SIZE;
+ struct tee_ta_region *tbl = utc->mmu->regions;
+ vaddr_t va;
+ vaddr_t end_va;
+ size_t n = TEE_MMU_UMAP_CODE_IDX;
+ size_t o;
+
+ if (!tbl[n].size) {
+ /* We're continuing the va space from previous entry. */
+ assert(tbl[n - 1].size);
+
+ /* This is the first segment */
+ va = tbl[n - 1].va + tbl[n - 1].size;
+ end_va = ROUNDUP((offs & (granule - 1)) + size, granule) + va;
+ o = ROUNDDOWN(offs, granule);
+ goto set_entry;
+ }
+
+ /*
+ * mobj of code segments must not change once the first is
+ * assigned.
+ */
+ if (mobj != tbl[n].mobj)
+ return TEE_ERROR_SECURITY;
+
+ /*
+ * Let's find an entry we overlap with or if we need to add a new
+ * entry.
+ */
+ o = offs - tbl[n].offset;
+ va = ROUNDDOWN(o, granule) + tbl[n].va;
+ end_va = ROUNDUP(o + size, granule) + tbl[n].va;
+ o = ROUNDDOWN(offs, granule);
+ while (true) {
+ if (va >= (tbl[n].va + tbl[n].size)) {
+ n++;
+ if (n >= TEE_MMU_UMAP_PARAM_IDX)
+ return TEE_ERROR_SECURITY;
+ if (!tbl[n].size)
+ goto set_entry;
+ continue;
+ }
+
+ /*
+ * There's at least partial overlap with this entry
+ *
+ * Since we're overlapping there should be at least one
+ * free entry after this.
+ */
+ if (((n + 1) >= TEE_MMU_UMAP_PARAM_IDX) || tbl[n + 1].size)
+ return TEE_ERROR_SECURITY;
+
+ /* offset must match or the segments aren't added in order */
+ if (o != (va - tbl[n].va + tbl[n].offset))
+ return TEE_ERROR_SECURITY;
+ /* We should only overlap in the last granule of the entry. */
+ if ((va + granule) < (tbl[n].va + tbl[n].size))
+ return TEE_ERROR_SECURITY;
+
+ /* Merge protection attribute for this entry */
+ tbl[n].attr |= prot;
+
+ va += granule;
+ /* If the segment was completely overlapped, we're done. */
+ if (va == end_va)
+ return TEE_SUCCESS;
+ o += granule;
+ n++;
+ goto set_entry;
+ }
+
+set_entry:
+ tbl[n].mobj = mobj;
+ tbl[n].va = va;
+ tbl[n].offset = o;
+ tbl[n].size = end_va - va;
+ tbl[n].attr = prot | attr;
+
+ utc->mmu->ta_private_vmem_end = tbl[n].va + tbl[n].size;
+ /*
+ * Check that we have enough translation tables available to map
+ * this TA.
+ */
+ return alloc_pgt(utc, utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end);
+}
+
+void tee_mmu_map_clear(struct user_ta_ctx *utc)
+{
+ utc->mmu->ta_private_vmem_end = 0;
+ memset(utc->mmu->regions, 0, sizeof(utc->mmu->regions));
+}
+
+static void clear_param_map(struct user_ta_ctx *utc)
+{
+ const size_t n = TEE_MMU_UMAP_PARAM_IDX;
+ const size_t array_size = ARRAY_SIZE(utc->mmu->regions);
+
+ memset(utc->mmu->regions + n, 0,
+ (array_size - n) * sizeof(utc->mmu->regions[0]));
+}
+
+static TEE_Result param_mem_to_user_va(struct user_ta_ctx *utc,
+ struct param_mem *mem, void **user_va)
+{
+ size_t n;
+
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ struct tee_ta_region *region = utc->mmu->regions + n;
+ vaddr_t va;
+
+ if (mem->mobj != region->mobj)
+ continue;
+ if (mem->offs < region->offset)
+ continue;
+ if (mem->offs >= (region->offset + region->size))
+ continue;
+ va = region->va + mem->offs - region->offset;
+ *user_va = (void *)va;
+ return TEE_SUCCESS;
+ }
+ return TEE_ERROR_GENERIC;
+}
+
+TEE_Result tee_mmu_map_param(struct user_ta_ctx *utc,
+ struct tee_ta_param *param, void *param_va[TEE_NUM_PARAMS])
+{
+ TEE_Result res = TEE_SUCCESS;
+ size_t n;
+
+ /* Clear all the param entries as they can hold old information */
+ clear_param_map(utc);
+
+ /* Map secure memory params first then nonsecure memory params */
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (!mem->size)
+ continue;
+ if (mobj_is_nonsec(mem->mobj))
+ continue;
+
+ res = tee_mmu_umap_add_param(utc->mmu, mem);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (!mem->size)
+ continue;
+ if (!mobj_is_nonsec(mem->mobj))
+ continue;
+
+ res = tee_mmu_umap_add_param(utc->mmu, mem);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ res = tee_mmu_umap_set_vas(utc->mmu);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ struct param_mem *mem = &param->u[n].mem;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (mem->size == 0)
+ continue;
+
+ res = param_mem_to_user_va(utc, mem, param_va + n);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+
+ utc->mmu->ta_private_vmem_start = utc->mmu->regions[0].va;
+
+ n = ARRAY_SIZE(utc->mmu->regions);
+ do {
+ n--;
+ } while (n && !utc->mmu->regions[n].size);
+
+ return alloc_pgt(utc, utc->mmu->ta_private_vmem_start,
+ utc->mmu->regions[n].va + utc->mmu->regions[n].size);
+}
+
+TEE_Result tee_mmu_add_rwmem(struct user_ta_ctx *utc, struct mobj *mobj,
+ int pgdir_offset, vaddr_t *va)
+{
+ struct tee_ta_region *reg = NULL;
+ struct tee_ta_region *last_reg;
+ vaddr_t v;
+ vaddr_t end_v;
+ size_t n;
+
+ assert(pgdir_offset < CORE_MMU_PGDIR_SIZE);
+
+ /*
+ * Avoid the corner case when no regions are assigned, currently
+ * stack and code areas are always assigned before we end up here.
+ */
+ if (!utc->mmu->regions[0].size)
+ return TEE_ERROR_GENERIC;
+
+ for (n = 1; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!reg && utc->mmu->regions[n].size)
+ continue;
+ last_reg = utc->mmu->regions + n;
+
+ if (!reg) {
+ reg = last_reg;
+ v = ROUNDUP((reg - 1)->va + (reg - 1)->size,
+ SMALL_PAGE_SIZE);
+#ifndef CFG_WITH_LPAE
+ /*
+ * Non-LPAE mappings can't mix secure and
+ * non-secure in a single pgdir.
+ */
+ if (mobj_is_secure((reg - 1)->mobj) !=
+ mobj_is_secure(mobj))
+ v = ROUNDUP(v, CORE_MMU_PGDIR_SIZE);
+#endif
+
+ /*
+ * If mobj needs to span several page directories
+ * the offset into the first pgdir need to match
+ * the supplied offset or some area used by the
+ * pager may not fit into a single pgdir.
+ */
+ if (pgdir_offset >= 0 &&
+ mobj->size > CORE_MMU_PGDIR_SIZE) {
+ if ((v & CORE_MMU_PGDIR_MASK) <
+ (size_t)pgdir_offset)
+ v = ROUNDDOWN(v, CORE_MMU_PGDIR_SIZE);
+ else
+ v = ROUNDUP(v, CORE_MMU_PGDIR_SIZE);
+ v += pgdir_offset;
+ }
+ end_v = ROUNDUP(v + mobj->size, SMALL_PAGE_SIZE);
+ continue;
+ }
+
+ if (!last_reg->size)
+ continue;
+ /*
+ * There's one registered region after our selected spot,
+ * check if we can still fit or if we need a later spot.
+ */
+ if (end_v > last_reg->va) {
+ reg = NULL;
+ continue;
+ }
+#ifndef CFG_WITH_LPAE
+ if (mobj_is_secure(mobj) != mobj_is_secure(last_reg->mobj) &&
+ end_v > ROUNDDOWN(last_reg->va, CORE_MMU_PGDIR_SIZE))
+ reg = NULL;
+#endif
+ }
+
+ if (reg) {
+ TEE_Result res;
+
+ end_v = MAX(end_v, last_reg->va + last_reg->size);
+ res = alloc_pgt(utc, utc->mmu->ta_private_vmem_start, end_v);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ *va = v;
+ reg->va = v;
+ reg->mobj = mobj;
+ reg->offset = 0;
+ reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
+ if (mobj_is_secure(mobj))
+ reg->attr = TEE_MATTR_SECURE;
+ else
+ reg->attr = 0;
+ return TEE_SUCCESS;
+ }
+
+ return TEE_ERROR_OUT_OF_MEMORY;
+}
+
+void tee_mmu_rem_rwmem(struct user_ta_ctx *utc, struct mobj *mobj, vaddr_t va)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ struct tee_ta_region *reg = utc->mmu->regions + n;
+
+ if (reg->mobj == mobj && reg->va == va) {
+ free_pgt(utc, reg->va, reg->size);
+ memset(reg, 0, sizeof(*reg));
+ return;
+ }
+ }
+}
+
+/*
+ * tee_mmu_final - finalise and free ctx mmu
+ */
+void tee_mmu_final(struct user_ta_ctx *utc)
+{
+ uint32_t asid = 1 << ((utc->context - 1) & 0xff);
+
+ /* return ASID */
+ g_asid |= asid;
+
+ /* clear MMU entries to avoid clash when asid is reused */
+ secure_mmu_unifiedtlbinv_byasid(utc->context & 0xff);
+ utc->context = 0;
+
+ free(utc->mmu);
+ utc->mmu = NULL;
+}
+
+/* return true only if buffer fits inside TA private memory */
+bool tee_mmu_is_vbuf_inside_ta_private(const struct user_ta_ctx *utc,
+ const void *va, size_t size)
+{
+ return core_is_buffer_inside(va, size,
+ utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start);
+}
+
+/* return true only if buffer intersects TA private memory */
+bool tee_mmu_is_vbuf_intersect_ta_private(const struct user_ta_ctx *utc,
+ const void *va, size_t size)
+{
+ return core_is_buffer_intersect(va, size,
+ utc->mmu->ta_private_vmem_start,
+ utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start);
+}
+
+TEE_Result tee_mmu_vbuf_to_mobj_offs(const struct user_ta_ctx *utc,
+ const void *va, size_t size,
+ struct mobj **mobj, size_t *offs)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].mobj)
+ continue;
+ if (core_is_buffer_inside(va, size, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size)) {
+ *mobj = utc->mmu->regions[n].mobj;
+ *offs = (vaddr_t)va - utc->mmu->regions[n].va +
+ utc->mmu->regions[n].offset;
+ return TEE_SUCCESS;
+ }
+ }
+
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+static TEE_Result tee_mmu_user_va2pa_attr(const struct user_ta_ctx *utc,
+ void *ua, paddr_t *pa, uint32_t *attr)
+{
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (core_is_buffer_inside(ua, 1, utc->mmu->regions[n].va,
+ utc->mmu->regions[n].size)) {
+ if (pa) {
+ TEE_Result res;
+ paddr_t p;
+
+ res = mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset,
+ 0, &p);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ *pa = (paddr_t)ua - utc->mmu->regions[n].va + p;
+ }
+ if (attr)
+ *attr = utc->mmu->regions[n].attr;
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_user_va2pa_helper(const struct user_ta_ctx *utc, void *ua,
+ paddr_t *pa)
+{
+ return tee_mmu_user_va2pa_attr(utc, ua, pa, NULL);
+}
+
+/* */
+TEE_Result tee_mmu_user_pa2va_helper(const struct user_ta_ctx *utc,
+ paddr_t pa, void **va)
+{
+ TEE_Result res;
+ paddr_t p;
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) {
+ if (!utc->mmu->regions[n].mobj)
+ continue;
+
+ res = mobj_get_pa(utc->mmu->regions[n].mobj,
+ utc->mmu->regions[n].offset, 0, &p);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (core_is_buffer_inside(pa, 1, p,
+ utc->mmu->regions[n].size)) {
+ *va = (void *)(pa - p + utc->mmu->regions[n].va);
+ return TEE_SUCCESS;
+ }
+ }
+
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_check_access_rights(const struct user_ta_ctx *utc,
+ uint32_t flags, uaddr_t uaddr,
+ size_t len)
+{
+ uaddr_t a;
+ size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
+ CORE_MMU_USER_PARAM_SIZE);
+
+ /* Address wrap */
+ if ((uaddr + len) < uaddr)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
+ (flags & TEE_MEMORY_ACCESS_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ /*
+ * Rely on TA private memory test to check if address range is private
+ * to TA or not.
+ */
+ if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
+ !tee_mmu_is_vbuf_inside_ta_private(utc, (void *)uaddr, len))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ for (a = uaddr; a < (uaddr + len); a += addr_incr) {
+ uint32_t attr;
+ TEE_Result res;
+
+ res = tee_mmu_user_va2pa_attr(utc, (void *)a, NULL, &attr);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
+ (attr & TEE_MATTR_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
+ !(attr & TEE_MATTR_SECURE))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
+ return TEE_ERROR_ACCESS_DENIED;
+ if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
+ return TEE_ERROR_ACCESS_DENIED;
+ }
+
+ return TEE_SUCCESS;
+}
+
+void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+
+ core_mmu_set_user_map(NULL);
+#ifdef CFG_SMALL_PAGE_USER_TA
+ /*
+ * No matter what happens below, the current user TA will not be
+ * current any longer. Make sure pager is in sync with that.
+ * This function has to be called before there's a chance that
+ * pgt_free_unlocked() is called.
+ *
+ * Save translation tables in a cache if it's a user TA.
+ */
+ pgt_free(&tsd->pgt_cache, tsd->ctx && is_user_ta_ctx(tsd->ctx));
+#endif
+
+ if (ctx && is_user_ta_ctx(ctx)) {
+ struct core_mmu_user_map map;
+ struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
+
+ core_mmu_create_user_map(utc, &map);
+ core_mmu_set_user_map(&map);
+ tee_pager_assign_uta_tables(utc);
+ }
+ tsd->ctx = ctx;
+}
+
+struct tee_ta_ctx *tee_mmu_get_ctx(void)
+{
+ return thread_get_tsd()->ctx;
+}
+
+uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
+{
+ const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);
+
+ assert(utc->mmu);
+ return utc->mmu->regions[TEE_MMU_UMAP_CODE_IDX].va;
+}
+
+void teecore_init_ta_ram(void)
+{
+ vaddr_t s;
+ vaddr_t e;
+ paddr_t ps;
+ paddr_t pe;
+
+ /* get virtual addr/size of RAM where TA are loaded/executedNSec
+ * shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
+ ps = virt_to_phys((void *)s);
+ pe = virt_to_phys((void *)(e - 1)) + 1;
+
+ if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
+ !pe || (pe & CORE_MMU_USER_CODE_MASK))
+ panic("invalid TA RAM");
+
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ if (!tee_pbuf_is_sec(ps, pe - ps))
+ panic("TA RAM is not secure");
+
+ if (!tee_mm_is_empty(&tee_mm_sec_ddr))
+ panic("TA RAM pool is not empty");
+
+ /* remove previous config and init TA ddr memory pool */
+ tee_mm_final(&tee_mm_sec_ddr);
+ tee_mm_init(&tee_mm_sec_ddr, ps, pe, CORE_MMU_USER_CODE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS);
+}
+
+void teecore_init_pub_ram(void)
+{
+ vaddr_t s;
+ vaddr_t e;
+
+ /* get virtual addr/size of NSec shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
+
+ if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
+ panic("invalid PUB RAM");
+
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ if (!tee_vbuf_is_non_sec(s, e - s))
+ panic("PUB RAM is not non-secure");
+
+#ifdef CFG_PL310
+ /* Allocate statically the l2cc mutex */
+ tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
+ s += sizeof(uint32_t); /* size of a pl310 mutex */
+ s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */
+#endif
+
+ default_nsec_shm_paddr = virt_to_phys((void *)s);
+ default_nsec_shm_size = e - s;
+}
+
+uint32_t tee_mmu_user_get_cache_attr(struct user_ta_ctx *utc, void *va)
+{
+ uint32_t attr;
+
+ if (tee_mmu_user_va2pa_attr(utc, va, NULL, &attr) != TEE_SUCCESS)
+ panic("cannot get attr");
+
+ return (attr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK;
+}
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
new file mode 100644
index 0000000..c7238fe
--- /dev/null
+++ b/core/arch/arm/mm/tee_pager.c
@@ -0,0 +1,1473 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <keep.h>
+#include <sys/queue.h>
+#include <kernel/abort.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_pager.h>
+#include <types_ext.h>
+#include <stdlib.h>
+#include <tee_api_defines.h>
+#include <tee/tee_cryp_provider.h>
+#include <trace.h>
+#include <utee_defines.h>
+#include <util.h>
+
+#include "pager_private.h"
+
+#define PAGER_AE_KEY_BITS 256
+
+struct pager_rw_pstate {
+ uint64_t iv;
+ uint8_t tag[PAGER_AES_GCM_TAG_LEN];
+};
+
+enum area_type {
+ AREA_TYPE_RO,
+ AREA_TYPE_RW,
+ AREA_TYPE_LOCK,
+};
+
+struct tee_pager_area {
+ union {
+ const uint8_t *hashes;
+ struct pager_rw_pstate *rwp;
+ } u;
+ uint8_t *store;
+ enum area_type type;
+ uint32_t flags;
+ vaddr_t base;
+ size_t size;
+ struct pgt *pgt;
+ TAILQ_ENTRY(tee_pager_area) link;
+};
+
+TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
+
+static struct tee_pager_area_head tee_pager_area_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
+
+#define INVALID_PGIDX UINT_MAX
+
+/*
+ * struct tee_pager_pmem - Represents a physical page used for paging.
+ *
+ * @pgidx an index of the entry in area->ti.
+ * @va_alias Virtual address where the physical page always is aliased.
+ * Used during remapping of the page when the content need to
+ * be updated before it's available at the new location.
+ * @area a pointer to the pager area
+ */
+struct tee_pager_pmem {
+ unsigned pgidx;
+ void *va_alias;
+ struct tee_pager_area *area;
+ TAILQ_ENTRY(tee_pager_pmem) link;
+};
+
+/* The list of physical pages. The first page in the list is the oldest */
+TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
+
+static struct tee_pager_pmem_head tee_pager_pmem_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
+
+static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
+
+static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
+
+/* number of pages hidden */
+#define TEE_PAGER_NHIDE (tee_pager_npages / 3)
+
+/* Number of registered physical pages, used hiding pages. */
+static size_t tee_pager_npages;
+
+#ifdef CFG_WITH_STATS
+static struct tee_pager_stats pager_stats;
+
+static inline void incr_ro_hits(void)
+{
+ pager_stats.ro_hits++;
+}
+
+static inline void incr_rw_hits(void)
+{
+ pager_stats.rw_hits++;
+}
+
+static inline void incr_hidden_hits(void)
+{
+ pager_stats.hidden_hits++;
+}
+
+static inline void incr_zi_released(void)
+{
+ pager_stats.zi_released++;
+}
+
+static inline void incr_npages_all(void)
+{
+ pager_stats.npages_all++;
+}
+
+static inline void set_npages(void)
+{
+ pager_stats.npages = tee_pager_npages;
+}
+
+void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ *stats = pager_stats;
+
+ pager_stats.hidden_hits = 0;
+ pager_stats.ro_hits = 0;
+ pager_stats.rw_hits = 0;
+ pager_stats.zi_released = 0;
+}
+
+#else /* CFG_WITH_STATS */
+static inline void incr_ro_hits(void) { }
+static inline void incr_rw_hits(void) { }
+static inline void incr_hidden_hits(void) { }
+static inline void incr_zi_released(void) { }
+static inline void incr_npages_all(void) { }
+static inline void set_npages(void) { }
+
+void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ memset(stats, 0, sizeof(struct tee_pager_stats));
+}
+#endif /* CFG_WITH_STATS */
+
+static struct pgt pager_core_pgt;
+struct core_mmu_table_info tee_pager_tbl_info;
+static struct core_mmu_table_info pager_alias_tbl_info;
+
+static unsigned pager_spinlock = SPINLOCK_UNLOCK;
+
+/* Defines the range of the alias area */
+static tee_mm_entry_t *pager_alias_area;
+/*
+ * Physical pages are added in a stack like fashion to the alias area,
+ * @pager_alias_next_free gives the address of next free entry if
+ * @pager_alias_next_free is != 0
+ */
+static uintptr_t pager_alias_next_free;
+
+static uint32_t pager_lock(void)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ cpu_spin_lock(&pager_spinlock);
+ return exceptions;
+}
+
+static void pager_unlock(uint32_t exceptions)
+{
+ cpu_spin_unlock(&pager_spinlock);
+ thread_set_exceptions(exceptions);
+}
+
+static void set_alias_area(tee_mm_entry_t *mm)
+{
+ struct core_mmu_table_info *ti = &pager_alias_tbl_info;
+ size_t tbl_va_size;
+ unsigned idx;
+ unsigned last_idx;
+ vaddr_t smem = tee_mm_get_smem(mm);
+ size_t nbytes = tee_mm_get_bytes(mm);
+
+ DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
+
+ if (pager_alias_area)
+ panic("null pager_alias_area");
+
+ if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
+ panic("Can't find translation table");
+
+ if ((1 << ti->shift) != SMALL_PAGE_SIZE)
+ panic("Unsupported page size in translation table");
+
+ tbl_va_size = (1 << ti->shift) * ti->num_entries;
+ if (!core_is_buffer_inside(smem, nbytes,
+ ti->va_base, tbl_va_size)) {
+ EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
+ smem, nbytes, ti->va_base, tbl_va_size);
+ panic();
+ }
+
+ if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
+ panic("invalid area alignment");
+
+ pager_alias_area = mm;
+ pager_alias_next_free = smem;
+
+ /* Clear all mapping in the alias area */
+ idx = core_mmu_va2idx(ti, smem);
+ last_idx = core_mmu_va2idx(ti, smem + nbytes);
+ for (; idx < last_idx; idx++)
+ core_mmu_set_entry(ti, idx, 0, 0);
+
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+static void generate_ae_key(void)
+{
+ if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
+ panic("failed to generate random");
+}
+
+void tee_pager_init(tee_mm_entry_t *mm_alias)
+{
+ set_alias_area(mm_alias);
+ generate_ae_key();
+}
+
+static void *pager_add_alias_page(paddr_t pa)
+{
+ unsigned idx;
+ struct core_mmu_table_info *ti = &pager_alias_tbl_info;
+ uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
+ (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
+ TEE_MATTR_SECURE | TEE_MATTR_PRW;
+
+ DMSG("0x%" PRIxPA, pa);
+
+ if (!pager_alias_next_free || !ti->num_entries)
+ panic("invalid alias entry");
+
+ idx = core_mmu_va2idx(ti, pager_alias_next_free);
+ core_mmu_set_entry(ti, idx, pa, attr);
+ pgt_inc_used_entries(&pager_core_pgt);
+ pager_alias_next_free += SMALL_PAGE_SIZE;
+ if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
+ tee_mm_get_bytes(pager_alias_area)))
+ pager_alias_next_free = 0;
+ return (void *)core_mmu_idx2va(ti, idx);
+}
+
+static struct tee_pager_area *alloc_area(struct pgt *pgt,
+ vaddr_t base, size_t size,
+ uint32_t flags, const void *store,
+ const void *hashes)
+{
+ struct tee_pager_area *area = calloc(1, sizeof(*area));
+ enum area_type at;
+ tee_mm_entry_t *mm_store = NULL;
+
+ if (!area)
+ return NULL;
+
+ if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
+ if (flags & TEE_MATTR_LOCKED) {
+ at = AREA_TYPE_LOCK;
+ goto out;
+ }
+ mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
+ if (!mm_store)
+ goto bad;
+ area->store = phys_to_virt(tee_mm_get_smem(mm_store),
+ MEM_AREA_TA_RAM);
+ if (!area->store)
+ goto bad;
+ area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
+ sizeof(struct pager_rw_pstate));
+ if (!area->u.rwp)
+ goto bad;
+ at = AREA_TYPE_RW;
+ } else {
+ area->store = (void *)store;
+ area->u.hashes = hashes;
+ at = AREA_TYPE_RO;
+ }
+out:
+ area->pgt = pgt;
+ area->base = base;
+ area->size = size;
+ area->flags = flags;
+ area->type = at;
+ return area;
+bad:
+ tee_mm_free(mm_store);
+ free(area->u.rwp);
+ free(area);
+ return NULL;
+}
+
+static void area_insert_tail(struct tee_pager_area *area)
+{
+ uint32_t exceptions = pager_lock();
+
+ TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(area_insert_tail);
+
+static size_t tbl_usage_count(struct pgt *pgt)
+{
+ size_t n;
+ paddr_t pa;
+ size_t usage = 0;
+
+ for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
+ core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
+ n, &pa, NULL);
+ if (pa)
+ usage++;
+ }
+ return usage;
+}
+
+bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
+ const void *store, const void *hashes)
+{
+ struct tee_pager_area *area;
+ size_t tbl_va_size;
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+
+ DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
+ base, base + size, flags, store, hashes);
+
+ if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
+ EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
+ panic();
+ }
+
+ if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
+ panic("write pages cannot provide store or hashes");
+
+ if ((flags & TEE_MATTR_PW) && (store || hashes))
+ panic("non-write pages must provide store and hashes");
+
+ if (!pager_core_pgt.tbl) {
+ pager_core_pgt.tbl = ti->table;
+ pgt_set_used_entries(&pager_core_pgt,
+ tbl_usage_count(&pager_core_pgt));
+ }
+
+ tbl_va_size = (1 << ti->shift) * ti->num_entries;
+ if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
+ DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
+ base, size, ti->va_base, tbl_va_size);
+ return false;
+ }
+
+ area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
+ if (!area)
+ return false;
+
+ area_insert_tail(area);
+ return true;
+}
+
+static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
+ vaddr_t va)
+{
+ struct tee_pager_area *area;
+
+ if (!areas)
+ return NULL;
+
+ TAILQ_FOREACH(area, areas, link) {
+ if (core_is_buffer_inside(va, 1, area->base, area->size))
+ return area;
+ }
+ return NULL;
+}
+
+#ifdef CFG_PAGED_USER_TA
+static struct tee_pager_area *find_uta_area(vaddr_t va)
+{
+ struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
+
+ if (!ctx || !is_user_ta_ctx(ctx))
+ return NULL;
+ return find_area(to_user_ta_ctx(ctx)->areas, va);
+}
+#else
+static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
+{
+ return NULL;
+}
+#endif /*CFG_PAGED_USER_TA*/
+
+
+static uint32_t get_area_mattr(uint32_t area_flags)
+{
+ uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
+ TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
+ (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
+
+ if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
+ attr |= TEE_MATTR_GLOBAL;
+
+ return attr;
+}
+
+static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
+{
+ paddr_t pa;
+ unsigned idx;
+
+ idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
+ core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
+ return pa;
+}
+
+static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
+ void *dst)
+{
+ struct pager_aes_gcm_iv iv = {
+ { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
+ };
+
+ return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
+ &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
+}
+
+static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
+{
+ struct pager_aes_gcm_iv iv;
+
+ assert((rwp->iv + 1) > rwp->iv);
+ rwp->iv++;
+ /*
+ * IV is constructed as recommended in section "8.2.1 Deterministic
+ * Construction" of "Recommendation for Block Cipher Modes of
+ * Operation: Galois/Counter Mode (GCM) and GMAC",
+ * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
+ */
+ iv.iv[0] = (vaddr_t)rwp;
+ iv.iv[1] = rwp->iv >> 32;
+ iv.iv[2] = rwp->iv;
+
+ if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
+ &iv, rwp->tag,
+ src, dst, SMALL_PAGE_SIZE))
+ panic("gcm failed");
+}
+
+static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
+ void *va_alias)
+{
+ size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
+ const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
+
+ switch (area->type) {
+ case AREA_TYPE_RO:
+ {
+ const void *hash = area->u.hashes +
+ idx * TEE_SHA256_HASH_SIZE;
+
+ memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
+ incr_ro_hits();
+
+ if (hash_sha256_check(hash, va_alias,
+ SMALL_PAGE_SIZE) != TEE_SUCCESS) {
+ EMSG("PH 0x%" PRIxVA " failed", page_va);
+ panic();
+ }
+ }
+ break;
+ case AREA_TYPE_RW:
+ FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
+ va_alias, page_va, area->u.rwp[idx].iv);
+ if (!area->u.rwp[idx].iv)
+ memset(va_alias, 0, SMALL_PAGE_SIZE);
+ else if (!decrypt_page(&area->u.rwp[idx], stored_page,
+ va_alias)) {
+ EMSG("PH 0x%" PRIxVA " failed", page_va);
+ panic();
+ }
+ incr_rw_hits();
+ break;
+ case AREA_TYPE_LOCK:
+ FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
+ memset(va_alias, 0, SMALL_PAGE_SIZE);
+ break;
+ default:
+ panic();
+ }
+}
+
+static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
+{
+ const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
+ TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+
+ if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
+ size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
+ size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
+ void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
+
+ assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
+ encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
+ stored_page);
+ FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
+ pmem->area->base + idx * SMALL_PAGE_SIZE,
+ pmem->area->u.rwp[idx].iv);
+ }
+}
+
+static void area_get_entry(struct tee_pager_area *area, size_t idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ assert(area->pgt);
+ assert(idx < tee_pager_tbl_info.num_entries);
+ core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
+ idx, pa, attr);
+}
+
+static void area_set_entry(struct tee_pager_area *area, size_t idx,
+ paddr_t pa, uint32_t attr)
+{
+ assert(area->pgt);
+ assert(idx < tee_pager_tbl_info.num_entries);
+ core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
+ idx, pa, attr);
+}
+
+static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
+{
+ return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
+}
+
+static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
+ size_t idx)
+{
+ return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
+}
+
+#ifdef CFG_PAGED_USER_TA
+static void free_area(struct tee_pager_area *area)
+{
+ tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
+ virt_to_phys(area->store)));
+ if (area->type == AREA_TYPE_RW)
+ free(area->u.rwp);
+ free(area);
+}
+
+static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ uint32_t flags;
+ vaddr_t b = base;
+ size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
+
+ if (!utc->areas) {
+ utc->areas = malloc(sizeof(*utc->areas));
+ if (!utc->areas)
+ return false;
+ TAILQ_INIT(utc->areas);
+ }
+
+ flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
+
+ while (s) {
+ size_t s2;
+
+ if (find_area(utc->areas, b))
+ return false;
+
+ s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
+
+ /* Table info will be set when the context is activated. */
+ area = alloc_area(NULL, b, s2, flags, NULL, NULL);
+ if (!area)
+ return false;
+ TAILQ_INSERT_TAIL(utc->areas, area, link);
+ b += s2;
+ s -= s2;
+ }
+
+ return true;
+}
+
+bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
+{
+ struct thread_specific_data *tsd = thread_get_tsd();
+ struct tee_pager_area *area;
+ struct core_mmu_table_info dir_info = { NULL };
+
+ if (&utc->ctx != tsd->ctx) {
+ /*
+ * Changes are to an utc that isn't active. Just add the
+ * areas page tables will be dealt with later.
+ */
+ return pager_add_uta_area(utc, base, size);
+ }
+
+ /*
+ * Assign page tables before adding areas to be able to tell which
+ * are newly added and should be removed in case of failure.
+ */
+ tee_pager_assign_uta_tables(utc);
+ if (!pager_add_uta_area(utc, base, size)) {
+ struct tee_pager_area *next_a;
+
+ /* Remove all added areas */
+ TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
+ if (!area->pgt) {
+ TAILQ_REMOVE(utc->areas, area, link);
+ free_area(area);
+ }
+ }
+ return false;
+ }
+
+ /*
+ * Assign page tables to the new areas and make sure that the page
+ * tables are registered in the upper table.
+ */
+ tee_pager_assign_uta_tables(utc);
+ core_mmu_get_user_pgdir(&dir_info);
+ TAILQ_FOREACH(area, utc->areas, link) {
+ paddr_t pa;
+ size_t idx;
+ uint32_t attr;
+
+ idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
+ core_mmu_get_entry(&dir_info, idx, &pa, &attr);
+
+ /*
+ * Check if the page table already is used, if it is, it's
+ * already registered.
+ */
+ if (area->pgt->num_used_entries) {
+ assert(attr & TEE_MATTR_TABLE);
+ assert(pa == virt_to_phys(area->pgt->tbl));
+ continue;
+ }
+
+ attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
+ pa = virt_to_phys(area->pgt->tbl);
+ assert(pa);
+ /*
+ * Note that the update of the table entry is guaranteed to
+ * be atomic.
+ */
+ core_mmu_set_entry(&dir_info, idx, pa, attr);
+ }
+
+ return true;
+}
+
+static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
+ struct pgt *pgt)
+{
+ assert(pgt);
+ ti->table = pgt->tbl;
+ ti->va_base = pgt->vabase;
+ ti->level = tee_pager_tbl_info.level;
+ ti->shift = tee_pager_tbl_info.shift;
+ ti->num_entries = tee_pager_tbl_info.num_entries;
+}
+
+static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
+ vaddr_t new_base)
+{
+ uint32_t exceptions = pager_lock();
+
+ /*
+ * If there's no pgt assigned to the old area there's no pages to
+ * deal with either, just update with a new pgt and base.
+ */
+ if (area->pgt) {
+ struct core_mmu_table_info old_ti;
+ struct core_mmu_table_info new_ti;
+ struct tee_pager_pmem *pmem;
+
+ init_tbl_info_from_pgt(&old_ti, area->pgt);
+ init_tbl_info_from_pgt(&new_ti, new_pgt);
+
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ vaddr_t va;
+ paddr_t pa;
+ uint32_t attr;
+
+ if (pmem->area != area)
+ continue;
+ core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
+ core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
+
+ assert(pa == get_pmem_pa(pmem));
+ assert(attr);
+ assert(area->pgt->num_used_entries);
+ area->pgt->num_used_entries--;
+
+ va = core_mmu_idx2va(&old_ti, pmem->pgidx);
+ va = va - area->base + new_base;
+ pmem->pgidx = core_mmu_va2idx(&new_ti, va);
+ core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
+ new_pgt->num_used_entries++;
+ }
+ }
+
+ area->pgt = new_pgt;
+ area->base = new_base;
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(transpose_area);
+
+void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
+ vaddr_t src_base,
+ struct user_ta_ctx *dst_utc,
+ vaddr_t dst_base, struct pgt **dst_pgt,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ struct tee_pager_area *next_a;
+
+ TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
+ vaddr_t new_area_base;
+ size_t new_idx;
+
+ if (!core_is_buffer_inside(area->base, area->size,
+ src_base, size))
+ continue;
+
+ TAILQ_REMOVE(src_utc->areas, area, link);
+
+ new_area_base = dst_base + (src_base - area->base);
+ new_idx = (new_area_base - dst_pgt[0]->vabase) /
+ CORE_MMU_PGDIR_SIZE;
+ assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
+ dst_pgt[new_idx]->vabase);
+ transpose_area(area, dst_pgt[new_idx], new_area_base);
+
+ /*
+ * Assert that this will not cause any conflicts in the new
+ * utc. This should already be guaranteed, but a bug here
+ * could be tricky to find.
+ */
+ assert(!find_area(dst_utc->areas, area->base));
+ TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
+ }
+}
+
+static void rem_area(struct tee_pager_area_head *area_head,
+ struct tee_pager_area *area)
+{
+ struct tee_pager_pmem *pmem;
+ uint32_t exceptions;
+
+ exceptions = pager_lock();
+
+ TAILQ_REMOVE(area_head, area, link);
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->area == area) {
+ area_set_entry(area, pmem->pgidx, 0, 0);
+ pgt_dec_used_entries(area->pgt);
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ }
+ }
+
+ pager_unlock(exceptions);
+ free_area(area);
+}
+KEEP_PAGER(rem_area);
+
+void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size)
+{
+ struct tee_pager_area *area;
+ struct tee_pager_area *next_a;
+ size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
+
+ TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
+ if (core_is_buffer_inside(area->base, area->size, base, s))
+ rem_area(utc->areas, area);
+ }
+}
+
+void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
+{
+ struct tee_pager_area *area;
+
+ if (!utc->areas)
+ return;
+
+ while (true) {
+ area = TAILQ_FIRST(utc->areas);
+ if (!area)
+ break;
+ TAILQ_REMOVE(utc->areas, area, link);
+ free_area(area);
+ }
+
+ free(utc->areas);
+}
+
+bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size, uint32_t flags)
+{
+ bool ret;
+ vaddr_t b = base;
+ size_t s = size;
+ size_t s2;
+ struct tee_pager_area *area = find_area(utc->areas, b);
+ uint32_t exceptions;
+ struct tee_pager_pmem *pmem;
+ paddr_t pa;
+ uint32_t a;
+ uint32_t f;
+
+ f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
+ if (f & TEE_MATTR_UW)
+ f |= TEE_MATTR_PW;
+ f = get_area_mattr(f);
+
+ exceptions = pager_lock();
+
+ while (s) {
+ s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
+ if (!area || area->base != b || area->size != s2) {
+ ret = false;
+ goto out;
+ }
+ b += s2;
+ s -= s2;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->area != area)
+ continue;
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
+ if (a & TEE_MATTR_VALID_BLOCK)
+ assert(pa == get_pmem_pa(pmem));
+ else
+ pa = get_pmem_pa(pmem);
+ if (a == f)
+ continue;
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ if (!(flags & TEE_MATTR_UW))
+ tee_pager_save_page(pmem, a);
+
+ area_set_entry(pmem->area, pmem->pgidx, pa, f);
+
+ if (flags & TEE_MATTR_UX) {
+ void *va = (void *)area_idx2va(pmem->area,
+ pmem->pgidx);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, va,
+ SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va,
+ SMALL_PAGE_SIZE);
+ }
+ }
+
+ area->flags = f;
+ area = TAILQ_NEXT(area, link);
+ }
+
+ ret = true;
+out:
+ pager_unlock(exceptions);
+ return ret;
+}
+KEEP_PAGER(tee_pager_set_uta_area_attr);
+#endif /*CFG_PAGED_USER_TA*/
+
+static bool tee_pager_unhide_page(vaddr_t page_va)
+{
+ struct tee_pager_pmem *pmem;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ paddr_t pa;
+ uint32_t attr;
+
+ if (pmem->pgidx == INVALID_PGIDX)
+ continue;
+
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
+
+ if (!(attr &
+ (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
+ continue;
+
+ if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
+ uint32_t a = get_area_mattr(pmem->area->flags);
+
+ /* page is hidden, show and move to back */
+ if (pa != get_pmem_pa(pmem))
+ panic("unexpected pa");
+
+ /*
+ * If it's not a dirty block, then it should be
+ * read only.
+ */
+ if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
+ a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
+ else
+ FMSG("Unhide %#" PRIxVA, page_va);
+
+ if (page_va == 0x8000a000)
+ FMSG("unhide %#" PRIxVA " a %#" PRIX32,
+ page_va, a);
+ area_set_entry(pmem->area, pmem->pgidx, pa, a);
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+
+ /* TODO only invalidate entry touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ incr_hidden_hits();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void tee_pager_hide_pages(void)
+{
+ struct tee_pager_pmem *pmem;
+ size_t n = 0;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ paddr_t pa;
+ uint32_t attr;
+ uint32_t a;
+
+ if (n >= TEE_PAGER_NHIDE)
+ break;
+ n++;
+
+ /* we cannot hide pages when pmem->area is not defined. */
+ if (!pmem->area)
+ continue;
+
+ area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+
+ assert(pa == get_pmem_pa(pmem));
+ if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
+ a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
+ FMSG("Hide %#" PRIxVA,
+ area_idx2va(pmem->area, pmem->pgidx));
+ } else
+ a = TEE_MATTR_HIDDEN_BLOCK;
+ area_set_entry(pmem->area, pmem->pgidx, pa, a);
+ }
+
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+/*
+ * Find mapped pmem, hide and move to pageble pmem.
+ * Return false if page was not mapped, and true if page was mapped.
+ */
+static bool tee_pager_release_one_phys(struct tee_pager_area *area,
+ vaddr_t page_va)
+{
+ struct tee_pager_pmem *pmem;
+ unsigned pgidx;
+ paddr_t pa;
+ uint32_t attr;
+
+ pgidx = area_va2idx(area, page_va);
+ area_get_entry(area, pgidx, &pa, &attr);
+
+ FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
+
+ TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
+ if (pmem->area != area || pmem->pgidx != pgidx)
+ continue;
+
+ assert(pa == get_pmem_pa(pmem));
+ area_set_entry(area, pgidx, 0, 0);
+ pgt_dec_used_entries(area->pgt);
+ TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ tee_pager_npages++;
+ set_npages();
+ TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
+ incr_zi_released();
+ return true;
+ }
+
+ return false;
+}
+
+/* Finds the oldest page and unmats it from its old virtual address */
+static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
+{
+ struct tee_pager_pmem *pmem;
+
+ pmem = TAILQ_FIRST(&tee_pager_pmem_head);
+ if (!pmem) {
+ EMSG("No pmem entries");
+ return NULL;
+ }
+ if (pmem->pgidx != INVALID_PGIDX) {
+ uint32_t a;
+
+ assert(pmem->area && pmem->area->pgt);
+ area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ pgt_dec_used_entries(pmem->area->pgt);
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ tee_pager_save_page(pmem, a);
+ }
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ pmem->pgidx = INVALID_PGIDX;
+ pmem->area = NULL;
+ if (area->type == AREA_TYPE_LOCK) {
+ /* Move page to lock list */
+ if (tee_pager_npages <= 0)
+ panic("running out of page");
+ tee_pager_npages--;
+ set_npages();
+ TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
+ } else {
+ /* move page to back */
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+ }
+
+ return pmem;
+}
+
+static bool pager_update_permissions(struct tee_pager_area *area,
+ struct abort_info *ai, bool *handled)
+{
+ unsigned int pgidx = area_va2idx(area, ai->va);
+ uint32_t attr;
+ paddr_t pa;
+
+ *handled = false;
+
+ area_get_entry(area, pgidx, &pa, &attr);
+
+ /* Not mapped */
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ return false;
+
+ /* Not readable, should not happen */
+ if (abort_is_user_exception(ai)) {
+ if (!(attr & TEE_MATTR_UR))
+ return true;
+ } else {
+ if (!(attr & TEE_MATTR_PR)) {
+ abort_print_error(ai);
+ panic();
+ }
+ }
+
+ switch (core_mmu_get_fault_type(ai->fault_descr)) {
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ if (ai->abort_type == ABORT_TYPE_PREFETCH) {
+ /* Check attempting to execute from an NOX page */
+ if (abort_is_user_exception(ai)) {
+ if (!(attr & TEE_MATTR_UX))
+ return true;
+ } else {
+ if (!(attr & TEE_MATTR_PX)) {
+ abort_print_error(ai);
+ panic();
+ }
+ }
+ }
+ /* Since the page is mapped now it's OK */
+ break;
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ /* Check attempting to write to an RO page */
+ if (abort_is_user_exception(ai)) {
+ if (!(area->flags & TEE_MATTR_UW))
+ return true;
+ if (!(attr & TEE_MATTR_UW)) {
+ FMSG("Dirty %p",
+ (void *)(ai->va & ~SMALL_PAGE_MASK));
+ area_set_entry(area, pgidx, pa,
+ get_area_mattr(area->flags));
+ /* TODO only invalidate entry above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
+
+ } else {
+ if (!(area->flags & TEE_MATTR_PW)) {
+ abort_print_error(ai);
+ panic();
+ }
+ if (!(attr & TEE_MATTR_PW)) {
+ FMSG("Dirty %p",
+ (void *)(ai->va & ~SMALL_PAGE_MASK));
+ area_set_entry(area, pgidx, pa,
+ get_area_mattr(area->flags));
+ /* TODO only invalidate entry above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
+ }
+ /* Since permissions has been updated now it's OK */
+ break;
+ default:
+ /* Some fault we can't deal with */
+ if (abort_is_user_exception(ai))
+ return true;
+ abort_print_error(ai);
+ panic();
+ }
+ *handled = true;
+ return true;
+}
+
+#ifdef CFG_TEE_CORE_DEBUG
+static void stat_handle_fault(void)
+{
+ static size_t num_faults;
+ static size_t min_npages = SIZE_MAX;
+ static size_t total_min_npages = SIZE_MAX;
+
+ num_faults++;
+ if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
+ DMSG("nfaults %zu npages %zu (min %zu)",
+ num_faults, tee_pager_npages, min_npages);
+ min_npages = tee_pager_npages; /* reset */
+ }
+ if (tee_pager_npages < min_npages)
+ min_npages = tee_pager_npages;
+ if (tee_pager_npages < total_min_npages)
+ total_min_npages = tee_pager_npages;
+}
+#else
+static void stat_handle_fault(void)
+{
+}
+#endif
+
+bool tee_pager_handle_fault(struct abort_info *ai)
+{
+ struct tee_pager_area *area;
+ vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
+ uint32_t exceptions;
+ bool ret;
+
+#ifdef TEE_PAGER_DEBUG_PRINT
+ abort_print(ai);
+#endif
+
+ /*
+ * We're updating pages that can affect several active CPUs at a
+ * time below. We end up here because a thread tries to access some
+ * memory that isn't available. We have to be careful when making
+ * that memory available as other threads may succeed in accessing
+ * that address the moment after we've made it available.
+ *
+ * That means that we can't just map the memory and populate the
+ * page, instead we use the aliased mapping to populate the page
+ * and once everything is ready we map it.
+ */
+ exceptions = pager_lock();
+
+ stat_handle_fault();
+
+ /* check if the access is valid */
+ if (abort_is_user_exception(ai)) {
+ area = find_uta_area(ai->va);
+
+ } else {
+ area = find_area(&tee_pager_area_head, ai->va);
+ if (!area)
+ area = find_uta_area(ai->va);
+ }
+ if (!area || !area->pgt) {
+ ret = false;
+ goto out;
+ }
+
+ if (!tee_pager_unhide_page(page_va)) {
+ struct tee_pager_pmem *pmem = NULL;
+ uint32_t attr;
+
+ /*
+ * The page wasn't hidden, but some other core may have
+ * updated the table entry before we got here or we need
+ * to make a read-only page read-write (dirty).
+ */
+ if (pager_update_permissions(area, ai, &ret)) {
+ /*
+ * Nothing more to do with the abort. The problem
+ * could already have been dealt with from another
+ * core or if ret is false the TA will be paniced.
+ */
+ goto out;
+ }
+
+ pmem = tee_pager_get_page(area);
+ if (!pmem) {
+ abort_print(ai);
+ panic();
+ }
+
+ /* load page code & data */
+ tee_pager_load_page(area, page_va, pmem->va_alias);
+
+ /*
+ * We've updated the page using the aliased mapping and
+ * some cache maintenence is now needed if it's an
+ * executable page.
+ *
+ * Since the d-cache is a Physically-indexed,
+ * physically-tagged (PIPT) cache we can clean the aliased
+ * address instead of the real virtual address.
+ *
+ * The i-cache can also be PIPT, but may be something else
+ * to, to keep it simple we invalidate the entire i-cache.
+ * As a future optimization we may invalidate only the
+ * aliased area if it a PIPT cache else the entire cache.
+ */
+ if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
+ /*
+ * Doing these operations to LoUIS (Level of
+ * unification, Inner Shareable) would be enough
+ */
+ cache_maintenance_l1(DCACHE_AREA_CLEAN,
+ pmem->va_alias, SMALL_PAGE_SIZE);
+
+ cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
+ }
+
+ pmem->area = area;
+ pmem->pgidx = area_va2idx(area, ai->va);
+ attr = get_area_mattr(area->flags) &
+ ~(TEE_MATTR_PW | TEE_MATTR_UW);
+ area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
+ pgt_inc_used_entries(area->pgt);
+
+ FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
+ area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
+
+ }
+
+ tee_pager_hide_pages();
+ ret = true;
+out:
+ pager_unlock(exceptions);
+ return ret;
+}
+
+void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
+{
+ struct core_mmu_table_info *ti = &tee_pager_tbl_info;
+ size_t n;
+
+ DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
+ vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
+
+ /* setup memory */
+ for (n = 0; n < npages; n++) {
+ struct tee_pager_pmem *pmem;
+ vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
+ unsigned pgidx = core_mmu_va2idx(ti, va);
+ paddr_t pa;
+ uint32_t attr;
+
+ /*
+ * Note that we can only support adding pages in the
+ * valid range of this table info, currently not a problem.
+ */
+ core_mmu_get_entry(ti, pgidx, &pa, &attr);
+
+ /* Ignore unmapped pages/blocks */
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+
+ pmem = malloc(sizeof(struct tee_pager_pmem));
+ if (!pmem)
+ panic("out of mem");
+
+ pmem->va_alias = pager_add_alias_page(pa);
+
+ if (unmap) {
+ pmem->area = NULL;
+ pmem->pgidx = INVALID_PGIDX;
+ core_mmu_set_entry(ti, pgidx, 0, 0);
+ pgt_dec_used_entries(&pager_core_pgt);
+ } else {
+ /*
+ * The page is still mapped, let's assign the area
+ * and update the protection bits accordingly.
+ */
+ pmem->area = find_area(&tee_pager_area_head, va);
+ assert(pmem->area->pgt == &pager_core_pgt);
+ pmem->pgidx = pgidx;
+ assert(pa == get_pmem_pa(pmem));
+ area_set_entry(pmem->area, pgidx, pa,
+ get_area_mattr(pmem->area->flags));
+ }
+
+ tee_pager_npages++;
+ incr_npages_all();
+ set_npages();
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+ }
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+#ifdef CFG_PAGED_USER_TA
+static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
+{
+ struct pgt *p = pgt;
+
+ while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
+ p = SLIST_NEXT(p, link);
+ return p;
+}
+
+void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
+{
+ struct tee_pager_area *area;
+ struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
+
+ TAILQ_FOREACH(area, utc->areas, link) {
+ if (!area->pgt)
+ area->pgt = find_pgt(pgt, area->base);
+ else
+ assert(area->pgt == find_pgt(pgt, area->base));
+ if (!area->pgt)
+ panic();
+ }
+}
+
+static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
+{
+ uint32_t attr;
+
+ assert(pmem->area && pmem->area->pgt);
+
+ area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
+ area_set_entry(pmem->area, pmem->pgidx, 0, 0);
+ tee_pager_save_page(pmem, attr);
+ assert(pmem->area->pgt->num_used_entries);
+ pmem->area->pgt->num_used_entries--;
+ pmem->pgidx = INVALID_PGIDX;
+ pmem->area = NULL;
+}
+
+void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
+{
+ struct tee_pager_pmem *pmem;
+ struct tee_pager_area *area;
+ uint32_t exceptions = pager_lock();
+
+ if (!pgt->num_used_entries)
+ goto out;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
+ continue;
+ if (pmem->area->pgt == pgt)
+ pager_save_and_release_entry(pmem);
+ }
+ assert(!pgt->num_used_entries);
+
+out:
+ if (is_user_ta_ctx(pgt->ctx)) {
+ TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
+ if (area->pgt == pgt)
+ area->pgt = NULL;
+ }
+ }
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
+#endif /*CFG_PAGED_USER_TA*/
+
+void tee_pager_release_phys(void *addr, size_t size)
+{
+ bool unmaped = false;
+ vaddr_t va = (vaddr_t)addr;
+ vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
+ vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
+ struct tee_pager_area *area;
+ uint32_t exceptions;
+
+ if (!size)
+ return;
+
+ area = find_area(&tee_pager_area_head, begin);
+ if (!area ||
+ area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
+ panic();
+
+ exceptions = pager_lock();
+
+ for (va = begin; va < end; va += SMALL_PAGE_SIZE)
+ unmaped |= tee_pager_release_one_phys(area, va);
+
+ /* Invalidate secure TLB */
+ if (unmaped)
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ pager_unlock(exceptions);
+}
+KEEP_PAGER(tee_pager_release_phys);
+
+void *tee_pager_alloc(size_t size, uint32_t flags)
+{
+ tee_mm_entry_t *mm;
+ uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
+
+ if (!size)
+ return NULL;
+
+ mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
+ if (!mm)
+ return NULL;
+
+ tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ f, NULL, NULL);
+
+ return (void *)tee_mm_get_smem(mm);
+}
diff --git a/core/arch/arm/plat-d02/conf.mk b/core/arch/arm/plat-d02/conf.mk
new file mode 100644
index 0000000..06a573c
--- /dev/null
+++ b/core/arch/arm/plat-d02/conf.mk
@@ -0,0 +1,34 @@
+CFG_NUM_THREADS ?= 16
+CFG_CRYPTO_WITH_CE ?= y
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_SOFTWARE_PRNG ?= n
+# Override default size of emulated TrustZone protected SRAM, 384 kB
+CFG_CORE_TZSRAM_EMUL_SIZE ?= 393216
+# Overrides default in mk/config.mk with 96 kB
+CFG_CORE_HEAP_SIZE ?= 98304
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_HI16XX_UART,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+ifneq ($(CFG_WITH_SOFTWARE_PRNG),y)
+$(call force,CFG_HI16XX_RNG,y)
+endif
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a57
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
diff --git a/core/arch/arm/plat-d02/kern.ld.S b/core/arch/arm/plat-d02/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-d02/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-d02/link.mk b/core/arch/arm/plat-d02/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-d02/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-d02/main.c b/core/arch/arm/plat-d02/main.c
new file mode 100644
index 0000000..95161d1
--- /dev/null
+++ b/core/arch/arm/plat-d02/main.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <drivers/hi16xx_uart.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/tee_pager.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, HI16XX_UART_REG_SIZE);
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ hi16xx_uart_init(console_base(), CONSOLE_UART_CLK_IN_HZ,
+ CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ hi16xx_uart_putc('\r', base);
+ hi16xx_uart_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ hi16xx_uart_flush(console_base());
+}
diff --git a/core/arch/arm/plat-d02/platform_config.h b/core/arch/arm/plat-d02/platform_config.h
new file mode 100644
index 0000000..fffa3d1
--- /dev/null
+++ b/core/arch/arm/plat-d02/platform_config.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /* ARM64 */
+
+/* UART */
+#define PERI_SUB_CTRL_ADDR 0x80000000
+#define CONSOLE_UART_BASE (PERI_SUB_CTRL_ADDR + 0x00300000)
+#define CONSOLE_BAUDRATE 115200
+#define CONSOLE_UART_CLK_IN_HZ 200000000
+
+/* ALG sub-controller */
+#define ALG_SC_BASE 0xD0000000
+#define ALG_SC_REG_SIZE 0xF010
+
+/* RNG */
+#define RNG_BASE 0xD1010000
+#define RNG_REG_SIZE 0x18
+
+/*
+ * HiSilicon D02 memory map
+ *
+ * Note: the physical address ranges below correspond to DRAM which is
+ * non-secure by default. Therefore, the terms TZDRAM and TZSRAM may not
+ * reflect the reality and only indicate areas that "would normally be"
+ * secure DRAM and secure SRAM in a more complete implementation.
+ * The memory map was defined like this for lack of better documentation.
+ * It is good enough for development/testing purposes.
+ *
+ * CFG_WITH_PAGER=n
+ *
+ * 0x7FC0_0000 -
+ * Linux/other | DRAM1
+ * 0x5180_0000 -
+ * TA RAM: 16 MiB |
+ * 0x5080_0000 | TZDRAM
+ * TEE RAM: 4 MiB (CFG_TEE_RAM_VA_SIZE) |
+ * 0x5040_0000 [TZDRAM_BASE, CFG_TEE_LOAD_ADDR] -
+ * Shared memory: 4 MiB | SHMEM
+ * 0x5000_0000 -
+ * Linux/other | DRAM0
+ * 0x0000_0000 [DRAM0_BASE] -
+ *
+ * CFG_WITH_PAGER=y
+ *
+ * 0x7FC0_0000 -
+ * Linux/other | DRAM1
+ * 0x5180_0000 -
+ * TA RAM: 20096 KiB (TZDRAM_SIZE) | TZDRAM
+ * 0x5046_0000 -
+ * TEE RAM: 384 KiB (TZSRAM_SIZE) | TZSRAM
+ * 0x5040_0000 [TZSRAM_BASE, CFG_TEE_LOAD_ADDR] -
+ * Shared memory: 4 MiB | SHMEM
+ * 0x5000_0000 -
+ * Linux/other | DRAM0
+ * 0x0000_0000 [DRAM0_BASE] -
+ */
+
+#define DRAM0_BASE 0x00000000
+#define DRAM0_SIZE 0x50000000
+
+#define DRAM1_BASE 0x51800000
+#define DRAM1_SIZE 0x2E400000
+
+#ifdef CFG_WITH_PAGER
+
+#define TZSRAM_BASE 0x50400000
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE 0x50460000
+#define TZDRAM_SIZE (20096 * 1024)
+
+#define CFG_TEE_RAM_START TZSRAM_BASE
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+#define CFG_TA_RAM_START ROUNDUP(TZDRAM_BASE, CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN(TZDRAM_SIZE, CORE_MMU_DEVICE_SIZE)
+
+#define CFG_TEE_RAM_VA_SIZE (1 * 1024 * 1024)
+
+#else /* CFG_WITH_PAGER */
+
+#define TZDRAM_BASE 0x50400000
+#define TZDRAM_SIZE (20 * 1024 * 1024)
+
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE),\
+ CORE_MMU_DEVICE_SIZE)
+
+#define CFG_TEE_RAM_VA_SIZE (4 * 1024 * 1024)
+
+#endif /* CFG_WITH_PAGER */
+
+#define CFG_SHMEM_START 0x50000000
+#define CFG_SHMEM_SIZE (4 * 1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 16
+
+#define CFG_TEE_LOAD_ADDR 0x50400000
+
+#endif /* PLATFORM_CONFIG_H */
diff --git a/core/arch/arm/plat-d02/sub.mk b/core/arch/arm/plat-d02/sub.mk
new file mode 100644
index 0000000..8ddc2fd
--- /dev/null
+++ b/core/arch/arm/plat-d02/sub.mk
@@ -0,0 +1,2 @@
+global-incdirs-y += .
+srcs-y += main.c
diff --git a/core/arch/arm/plat-hikey/conf.mk b/core/arch/arm/plat-hikey/conf.mk
new file mode 100644
index 0000000..8a8ef2f
--- /dev/null
+++ b/core/arch/arm/plat-hikey/conf.mk
@@ -0,0 +1,42 @@
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a53
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PL011,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+CFG_NUM_THREADS ?= 8
+CFG_CRYPTO_WITH_CE ?= y
+CFG_WITH_STACK_CANARIES ?= y
+
+CFG_PL061 ?= y
+CFG_PL022 ?= y
+CFG_SPI ?= y
+
+ifeq ($(CFG_SPI_TEST),y)
+$(call force,CFG_SPI,y)
+endif
+
+ifeq ($(CFG_SPI),y)
+$(call force,CFG_PL061,y)
+$(call force,CFG_PL022,y)
+endif
+
+ifeq ($(CFG_PL061),y)
+core-platform-cppflags += -DPLAT_PL061_MAX_GPIOS=160
+endif
diff --git a/core/arch/arm/plat-hikey/hikey_peripherals.h b/core/arch/arm/plat-hikey/hikey_peripherals.h
new file mode 100644
index 0000000..2d4500c
--- /dev/null
+++ b/core/arch/arm/plat-hikey/hikey_peripherals.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd and Contributors. All rights reserved.
+ * Copyright (c) 2016, Hisilicon Ltd and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIKEY_PERIPHERALS_H__
+#define __HIKEY_PERIPHERALS_H__
+
+#include <types_ext.h>
+
+#define PMUSSI_BASE 0xF8000000
+#define PERI_BASE 0xF7030000
+#define PMX0_BASE 0xF7010000
+#define PMX1_BASE 0xF7010800
+#define GPIO6_BASE 0xF7022000
+#define SPI_BASE 0xF7106000
+
+#define PMUSSI_REG_SIZE 0x1000
+#define PERI_BASE_REG_SIZE 0x2000
+#define PMX0_REG_SIZE 0x27c
+#define PMX1_REG_SIZE 0x28c
+
+/* register offsets */
+#define PMUSSI_LDO21_REG_ADJ SHIFT_U32(0x86, 2)
+#define PMUSSI_ENA_LDO17_22 SHIFT_U32(0x2F, 2)
+
+#define PERI_SC_PERIPH_RSTDIS3 0x334
+#define PERI_SC_PERIPH_RSTSTAT3 0x338
+#define PERI_SC_PERIPH_CLKEN3 0x230
+#define PERI_SC_PERIPH_CLKSTAT3 0x238
+
+#define PMX0_IOMG104 0x1a0
+#define PMX0_IOMG105 0x1a4
+#define PMX0_IOMG106 0x1a8
+#define PMX0_IOMG107 0x1ac
+
+#define PMX1_IOCG104 0x1b0
+#define PMX1_IOCG105 0x1b4
+#define PMX1_IOCG106 0x1b8
+#define PMX1_IOCG107 0x1bc
+/* end register offsets */
+
+#define PMUSSI_LDO21_REG_VL_MASK 0x7
+#define PMUSSI_LDO21_REG_VL_1V8 0x3
+#define PMUSSI_ENA_LDO21 BIT(4)
+
+#define PERI_RST3_SSP BIT(9)
+#define PERI_CLK3_SSP BIT(9)
+
+#define PINMUX_GPIO 0
+#define PINMUX_SPI 1
+
+#define PINCFG_NOPULL 0
+#define PINCFG_PULLUP 1
+#define PINCFG_PULLDN 2
+
+#define GPIO6_2 50
+#define SPI_CLK_HZ 150000000 /* 150mhz */
+#define SPI_500_KHZ 500000
+#define SPI_10_KHZ 10000
+
+vaddr_t nsec_periph_base(paddr_t pa);
+
+#ifdef CFG_SPI
+void spi_init(void);
+#ifdef CFG_SPI_TEST
+void spi_test(void);
+#endif /* CFG_SPI_TEST */
+#endif /* CFG_SPI */
+
+#endif /* __HIKEY_PERIPHERALS_H__ */
diff --git a/core/arch/arm/plat-hikey/kern.ld.S b/core/arch/arm/plat-hikey/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-hikey/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-hikey/link.mk b/core/arch/arm/plat-hikey/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-hikey/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-hikey/main.c b/core/arch/arm/plat-hikey/main.c
new file mode 100644
index 0000000..36789ce
--- /dev/null
+++ b/core/arch/arm/plat-hikey/main.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <drivers/pl011.h>
+#ifdef CFG_SPI
+#include <drivers/pl022_spi.h>
+#include <drivers/pl061_gpio.h>
+#endif
+#include <hikey_peripherals.h>
+#include <initcall.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/tee_pager.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, PL011_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, PMUSSI_BASE, PMUSSI_REG_SIZE);
+#ifdef CFG_SPI
+register_phys_mem(MEM_AREA_IO_NSEC, PERI_BASE, PERI_BASE_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, PMX0_BASE, PMX0_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, PMX1_BASE, PMX1_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, GPIO6_BASE, PL061_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, SPI_BASE, PL022_REG_SIZE);
+#endif
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ pl011_init(console_base(), CONSOLE_UART_CLK_IN_HZ, CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ pl011_putc('\r', base);
+ pl011_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ pl011_flush(console_base());
+}
+
+vaddr_t nsec_periph_base(paddr_t pa)
+{
+ if (cpu_mmu_enabled())
+ return (vaddr_t)phys_to_virt(pa, MEM_AREA_IO_NSEC);
+ return (vaddr_t)pa;
+}
+
+#ifdef CFG_SPI
+void spi_init(void)
+{
+ uint32_t shifted_val, read_val;
+ vaddr_t peri_base = nsec_periph_base(PERI_BASE);
+ vaddr_t pmx0_base = nsec_periph_base(PMX0_BASE);
+ vaddr_t pmx1_base = nsec_periph_base(PMX1_BASE);
+
+ DMSG("take SPI0 out of reset\n");
+ shifted_val = PERI_RST3_SSP;
+ /*
+ * no need to read PERI_SC_PERIPH_RSTDIS3 first
+ * as all the bits are processed and cleared after writing
+ */
+ write32(shifted_val, peri_base + PERI_SC_PERIPH_RSTDIS3);
+ DMSG("PERI_SC_PERIPH_RSTDIS3: 0x%x\n",
+ read32(peri_base + PERI_SC_PERIPH_RSTDIS3));
+
+ /*
+ * wait until the requested device is out of reset
+ * and ready to be used
+ */
+ do {
+ read_val = read32(peri_base + PERI_SC_PERIPH_RSTSTAT3);
+ } while (read_val & shifted_val);
+ DMSG("PERI_SC_PERIPH_RSTSTAT3: 0x%x\n", read_val);
+
+ DMSG("enable SPI clock\n");
+ /*
+ * no need to read PERI_SC_PERIPH_CLKEN3 first
+ * as all the bits are processed and cleared after writing
+ */
+ shifted_val = PERI_CLK3_SSP;
+ write32(shifted_val, peri_base + PERI_SC_PERIPH_CLKEN3);
+ DMSG("PERI_SC_PERIPH_CLKEN3: 0x%x\n",
+ read32(peri_base + PERI_SC_PERIPH_CLKEN3));
+
+ DMSG("PERI_SC_PERIPH_CLKSTAT3: 0x%x\n",
+ read32(peri_base + PERI_SC_PERIPH_CLKSTAT3));
+
+ /*
+ * GPIO6_2 can be configured as PINMUX_GPIO, but as PINMUX_SPI, HW IP
+ * will control the chip select pin so we don't have to manually do it.
+ * The only concern is that the IP will pulse it between each packet,
+ * which might not work with certain clients. There seems to be no
+ * option to configure it to stay enabled for the total duration of the
+ * transfer.
+ * ref: http://infocenter.arm.com/help/topic/com.arm.doc.ddi0194h/CJACFAFG.html
+ */
+ DMSG("configure gpio6 pins 0-3 as SPI\n");
+ write32(PINMUX_SPI, pmx0_base + PMX0_IOMG104);
+ write32(PINMUX_SPI, pmx0_base + PMX0_IOMG105);
+ write32(PINMUX_SPI, pmx0_base + PMX0_IOMG106);
+ write32(PINMUX_SPI, pmx0_base + PMX0_IOMG107);
+
+ DMSG("configure gpio6 pins 0-3 as nopull\n");
+ write32(PINCFG_NOPULL, pmx1_base + PMX1_IOCG104);
+ write32(PINCFG_NOPULL, pmx1_base + PMX1_IOCG105);
+ write32(PINCFG_NOPULL, pmx1_base + PMX1_IOCG106);
+ write32(PINCFG_NOPULL, pmx1_base + PMX1_IOCG107);
+
+#ifdef CFG_SPI_TEST
+ spi_test();
+#endif
+}
+#endif
+
+static TEE_Result peripherals_init(void)
+{
+ vaddr_t pmussi_base = nsec_periph_base(PMUSSI_BASE);
+
+ DMSG("enable LD021_1V8 source (pin 35) on LS connector\n");
+ /*
+ * Mezzanine cards usually use this to source level shifters for
+ * UART, GPIO, SPI, I2C, etc so if not enabled, connected
+ * peripherals will not work either (during bootloader stage)
+ * until linux is booted.
+ */
+ io_mask8(pmussi_base + PMUSSI_LDO21_REG_ADJ, PMUSSI_LDO21_REG_VL_1V8,
+ PMUSSI_LDO21_REG_VL_MASK);
+ write8(PMUSSI_ENA_LDO21, pmussi_base + PMUSSI_ENA_LDO17_22);
+
+#ifdef CFG_SPI
+ spi_init();
+#endif
+ return TEE_SUCCESS;
+}
+
+driver_init(peripherals_init);
diff --git a/core/arch/arm/plat-hikey/platform_config.h b/core/arch/arm/plat-hikey/platform_config.h
new file mode 100644
index 0000000..633759f
--- /dev/null
+++ b/core/arch/arm/plat-hikey/platform_config.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /* ARM64 */
+
+/* PL011 UART */
+#if defined(CFG_CONSOLE_UART) && (CFG_CONSOLE_UART == 0)
+#define CONSOLE_UART_BASE 0xF8015000
+#elif !defined(CFG_CONSOLE_UART) || (CFG_CONSOLE_UART == 3)
+#define CONSOLE_UART_BASE 0xF7113000
+#else
+#error Unknown console UART
+#endif
+
+#define CONSOLE_BAUDRATE 115200
+#define CONSOLE_UART_CLK_IN_HZ 19200000
+
+/*
+ * HiKey memory map
+ *
+ * We use only non-secure DRAM (TZDRAM and TZSRAM are emulated).
+ *
+ * CFG_WITH_PAGER=n
+ *
+ * 0x4000_0000 -
+ * TA RAM: 15 MiB |
+ * 0x3F10_0000 | TZDRAM
+ * TEE RAM: 1 MiB (CFG_TEE_RAM_VA_SIZE) |
+ * 0x3F00_0000 [TZDRAM_BASE, BL32_LOAD_ADDR] -
+ * Shared memory: 1 MiB |
+ * 0x3EF0_0000 | DRAM0
+ * Available to Linux |
+ * 0x0000_0000 [DRAM0_BASE] -
+ *
+ * CFG_WITH_PAGER=y
+ *
+ * 0x4000_0000 -
+ * TA RAM: 15 MiB | TZDRAM
+ * 0x3F10_0000 -
+ * Unused
+ * 0x3F03_2000 -
+ * TEE RAM: 200 KiB | TZSRAM
+ * 0x3F00_0000 [TZSRAM_BASE, BL32_LOAD_ADDR] -
+ * Shared memory: 1 MiB |
+ * 0x3EF0_0000 | DRAM0
+ * Available to Linux |
+ * 0x0000_0000 [DRAM0_BASE] -
+ */
+
+#define DRAM0_BASE 0x00000000
+#define DRAM0_SIZE 0x3F000000
+
+#ifdef CFG_WITH_PAGER
+
+#define TZSRAM_BASE 0x3F000000
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE 0x3F100000
+#define TZDRAM_SIZE (15 * 1024 * 1024)
+
+#else /* CFG_WITH_PAGER */
+
+#define TZDRAM_BASE 0x3F000000
+#define TZDRAM_SIZE (16 * 1024 * 1024)
+
+#endif /* CFG_WITH_PAGER */
+
+#define CFG_SHMEM_START 0x3EE00000
+#define CFG_SHMEM_SIZE (2 * 1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 8
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#define CFG_TEE_LOAD_ADDR 0x3F000000
+
+#ifdef CFG_WITH_PAGER
+
+#define CFG_TEE_RAM_START TZSRAM_BASE
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+#define CFG_TA_RAM_START ROUNDUP(TZDRAM_BASE, CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN(TZDRAM_SIZE, CORE_MMU_DEVICE_SIZE)
+
+#else /* CFG_WITH_PAGER */
+
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE),\
+ CORE_MMU_DEVICE_SIZE)
+
+#endif /* CFG_WITH_PAGER */
+
+#endif /* PLATFORM_CONFIG_H */
diff --git a/core/arch/arm/plat-hikey/spi_test.c b/core/arch/arm/plat-hikey/spi_test.c
new file mode 100644
index 0000000..11843c0
--- /dev/null
+++ b/core/arch/arm/plat-hikey/spi_test.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <drivers/pl022_spi.h>
+#include <drivers/pl061_gpio.h>
+#include <hikey_peripherals.h>
+#include <io.h>
+#include <kernel/tee_time.h>
+#include <stdint.h>
+#include <trace.h>
+#include <util.h>
+
+#define PL022_STAT 0x00C
+#define PL022_STAT_BSY SHIFT_U32(1, 4)
+
+static void spi_cs_callback(enum gpio_level value)
+{
+ static bool inited;
+ static struct pl061_data pd;
+ vaddr_t gpio6_base = nsec_periph_base(GPIO6_BASE);
+ vaddr_t spi_base = nsec_periph_base(SPI_BASE);
+
+ if (!inited) {
+ pl061_init(&pd);
+ pl061_register(gpio6_base, 6);
+ pl061_set_mode_control(GPIO6_2, PL061_MC_SW);
+ pd.chip.ops->set_interrupt(GPIO6_2, GPIO_INTERRUPT_DISABLE);
+ pd.chip.ops->set_direction(GPIO6_2, GPIO_DIR_OUT);
+ inited = true;
+ }
+
+ if (read8(spi_base + PL022_STAT) & PL022_STAT_BSY)
+ DMSG("pl022 busy - do NOT set CS!");
+ while (read8(spi_base + PL022_STAT) & PL022_STAT_BSY)
+ ;
+ DMSG("pl022 done - set CS!");
+
+ pd.chip.ops->set_value(GPIO6_2, value);
+}
+
+static void spi_set_cs_mux(uint32_t val)
+{
+ uint32_t data;
+ vaddr_t pmx0_base = nsec_periph_base(PMX0_BASE);
+
+ if (val == PINMUX_SPI) {
+ DMSG("Configure gpio6 pin2 as SPI");
+ write32(PINMUX_SPI, pmx0_base + PMX0_IOMG106);
+ } else {
+ DMSG("Configure gpio6 pin2 as GPIO");
+ write32(PINMUX_GPIO, pmx0_base + PMX0_IOMG106);
+ }
+
+ data = read32(pmx0_base + PMX0_IOMG106);
+ if (data)
+ DMSG("gpio6 pin2 is SPI");
+ else
+ DMSG("gpio6 pin2 is GPIO");
+}
+
+static void spi_test_with_manual_cs_control(void)
+{
+ struct pl022_data pd;
+ vaddr_t spi_base = nsec_periph_base(SPI_BASE);
+ uint8_t tx[3] = {0x01, 0x80, 0x00};
+ uint8_t rx[3] = {0};
+ size_t i, j, len = 3;
+ enum spi_result res;
+
+ spi_set_cs_mux(PINMUX_GPIO);
+
+ DMSG("Set CS callback");
+ pd.cs_control = PL022_CS_CTRL_MANUAL;
+
+ DMSG("spi_base: 0x%" PRIxVA "\n", spi_base);
+ DMSG("Configure SPI");
+ pd.base = spi_base;
+ pd.clk_hz = SPI_CLK_HZ;
+ pd.speed_hz = SPI_10_KHZ;
+ pd.mode = SPI_MODE0;
+ pd.data_size_bits = 8;
+ pd.loopback = true;
+
+ pl022_init(&pd);
+ pd.chip.ops->configure(&pd.chip);
+ pd.chip.ops->start(&pd.chip);
+
+ /*
+ * Pulse CS only once for the whole transmission.
+ * This is the scheme used by the pl022 driver.
+ */
+ spi_cs_callback(GPIO_LEVEL_HIGH);
+ tee_time_busy_wait(2);
+ spi_cs_callback(GPIO_LEVEL_LOW);
+ for (j = 0; j < 10; j++) {
+ DMSG("SPI test loop: %zu", j);
+ res = pd.chip.ops->txrx8(&pd.chip, tx, rx, len);
+ if (res) {
+ EMSG("SPI transceive error %d", res);
+ break;
+ }
+
+ for (i = 0; i < len; i++)
+ DMSG("rx[%zu] = 0x%x", i, rx[i]);
+
+ tee_time_busy_wait(20);
+ }
+ spi_cs_callback(GPIO_LEVEL_HIGH);
+
+ /* Pulse CS once per transfer */
+ spi_cs_callback(GPIO_LEVEL_HIGH);
+ tee_time_busy_wait(2);
+ for (j = 10; j < 20; j++) {
+ DMSG("SPI test loop: %zu", j);
+ spi_cs_callback(GPIO_LEVEL_LOW);
+ res = pd.chip.ops->txrx8(&pd.chip, tx, rx, len);
+ if (res) {
+ EMSG("SPI transceive error %d", res);
+ break;
+ }
+
+ for (i = 0; i < len; i++)
+ DMSG("rx[%zu] = 0x%x", i, rx[i]);
+
+ tee_time_busy_wait(20);
+ spi_cs_callback(GPIO_LEVEL_HIGH);
+ }
+
+ /* Pulse CS once per word/byte */
+ spi_set_cs_mux(PINMUX_SPI);
+ tee_time_busy_wait(2);
+ for (j = 20; j < 30; j++) {
+ DMSG("SPI test loop: %zu", j);
+ res = pd.chip.ops->txrx8(&pd.chip, tx, rx, len);
+ if (res) {
+ EMSG("SPI transceive error %d", res);
+ break;
+ }
+
+ for (i = 0; i < len; i++)
+ DMSG("rx[%zu] = 0x%x", i, rx[i]);
+
+ tee_time_busy_wait(20);
+ }
+
+ pd.chip.ops->end(&pd.chip);
+}
+
+static void spi_test_with_registered_cs_cb(void)
+{
+ struct pl022_data pd;
+ vaddr_t spi_base = nsec_periph_base(SPI_BASE);
+ uint8_t tx[3] = {0x01, 0x80, 0x00};
+ uint8_t rx[3] = {0};
+ size_t i, j, len = 3;
+ enum spi_result res;
+
+ spi_set_cs_mux(PINMUX_GPIO);
+
+ DMSG("Set CS callback");
+ pd.cs_data.cs_cb = spi_cs_callback;
+ pd.cs_control = PL022_CS_CTRL_CB;
+
+ DMSG("spi_base: 0x%" PRIxVA "\n", spi_base);
+ DMSG("Configure SPI");
+ pd.base = spi_base;
+ pd.clk_hz = SPI_CLK_HZ;
+ pd.speed_hz = SPI_10_KHZ;
+ pd.mode = SPI_MODE0;
+ pd.data_size_bits = 8;
+ pd.loopback = true;
+
+ pl022_init(&pd);
+ pd.chip.ops->configure(&pd.chip);
+ pd.chip.ops->start(&pd.chip);
+
+ for (j = 0; j < 20; j++) {
+ DMSG("SPI test loop: %zu", j);
+ res = pd.chip.ops->txrx8(&pd.chip, tx, rx, len);
+ if (res) {
+ EMSG("SPI transceive error %d", res);
+ break;
+ }
+
+ for (i = 0; i < len; i++)
+ DMSG("rx[%zu] = 0x%x", i, rx[i]);
+
+ tee_time_busy_wait(20);
+ }
+
+ pd.chip.ops->end(&pd.chip);
+}
+
+static void spi_test_with_builtin_cs_control(void)
+{
+ struct pl061_data pd061;
+ struct pl022_data pd022;
+ vaddr_t gpio6_base = nsec_periph_base(GPIO6_BASE);
+ vaddr_t spi_base = nsec_periph_base(SPI_BASE);
+ uint8_t tx[3] = {0x01, 0x80, 0x00};
+ uint8_t rx[3] = {0};
+ size_t i, j, len = 3;
+ enum spi_result res;
+
+ spi_set_cs_mux(PINMUX_GPIO);
+
+ DMSG("gpio6_base: 0x%" PRIxVA "\n", gpio6_base);
+ DMSG("Configure GPIO");
+ pl061_init(&pd061);
+ pl061_register(gpio6_base, 6);
+ DMSG("Enable software mode control for chip select");
+ pl061_set_mode_control(GPIO6_2, PL061_MC_SW);
+
+ pd022.cs_data.gpio_data.chip = &pd061.chip;
+ pd022.cs_data.gpio_data.pin_num = GPIO6_2;
+ pd022.cs_control = PL022_CS_CTRL_AUTO_GPIO;
+
+ DMSG("spi_base: 0x%" PRIxVA "\n", spi_base);
+ DMSG("Configure SPI");
+ pd022.base = spi_base;
+ pd022.clk_hz = SPI_CLK_HZ;
+ pd022.speed_hz = SPI_10_KHZ;
+ pd022.mode = SPI_MODE0;
+ pd022.data_size_bits = 8;
+ pd022.loopback = true;
+
+ pl022_init(&pd022);
+ pd022.chip.ops->configure(&pd022.chip);
+ pd022.chip.ops->start(&pd022.chip);
+
+ for (j = 0; j < 20; j++) {
+ DMSG("SPI test loop: %zu", j);
+ res = pd022.chip.ops->txrx8(&pd022.chip, tx, rx, len);
+ if (res) {
+ EMSG("SPI transceive error %d", res);
+ break;
+ }
+
+ for (i = 0; i < len; i++)
+ DMSG("rx[%zu] = 0x%x", i, rx[i]);
+
+ tee_time_busy_wait(20);
+ }
+
+ pd022.chip.ops->end(&pd022.chip);
+}
+
+/*
+ * spi_init() MUST be run before calling this function!
+ *
+ * spi_test runs some loopback tests, so the SPI module will just receive
+ * what is transmitted, i.e. 0x01, 0x80, 0x00.
+ *
+ * In non-loopback mode, the transmitted value will elicit a readback of
+ * the measured value from the ADC chip on the Linksprite 96Boards
+ * Mezzanine card [1], which can be connected to either a sliding
+ * rheostat [2] or photoresistor [3].
+ *
+ * [1] http://linksprite.com/wiki/index.php5?title=Linker_Mezzanine_card_for_96board
+ * [2] http://learn.linksprite.com/96-board/sliding-rheostat
+ * [3] http://learn.linksprite.com/96-board/photoresistor
+ */
+void spi_test(void)
+{
+ spi_test_with_builtin_cs_control();
+ spi_test_with_registered_cs_cb();
+ spi_test_with_manual_cs_control();
+}
diff --git a/core/arch/arm/plat-hikey/sub.mk b/core/arch/arm/plat-hikey/sub.mk
new file mode 100644
index 0000000..5c23b86
--- /dev/null
+++ b/core/arch/arm/plat-hikey/sub.mk
@@ -0,0 +1,3 @@
+global-incdirs-y += .
+srcs-y += main.c
+srcs-$(CFG_SPI_TEST) += spi_test.c
diff --git a/core/arch/arm/plat-imx/a9_plat_init.S b/core/arch/arm/plat-imx/a9_plat_init.S
new file mode 100644
index 0000000..64d03f5
--- /dev/null
+++ b/core/arch/arm/plat-imx/a9_plat_init.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Entry points for the A9 inits, A9 revision specific or not.
+ * It is assume no stack is available when these routines are called.
+ * It is assume each routine is called with return address in LR
+ * and with ARM registers R0, R1, R2, R3 being scratchable.
+ */
+
+#include <arm32.h>
+#include <arm32_macros.S>
+#include <arm32_macros_cortex_a9.S>
+#include <asm.S>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * Cortex A9 early configuration
+ *
+ * Use registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
+
+ /*
+ * Disallow NSec to mask FIQ [bit4: FW=0]
+ * Allow NSec to manage Imprecise Abort [bit5: AW=1]
+ * Imprecise Abort trapped to Abort Mode [bit3: EA=0]
+ * In Sec world, FIQ trapped to FIQ Mode [bit2: FIQ=0]
+ * IRQ always trapped to IRQ Mode [bit1: IRQ=0]
+ * Secure World [bit0: NS=0]
+ */
+ mov r0, #SCR_AW
+ write_scr r0
+
+ /*
+ * Mandated HW config loaded
+ *
+ * SCTLR = 0x00004000
+ * - Round-Robin replac. for icache, btac, i/duTLB (bit14: RoundRobin)
+ *
+ * ACTRL = 0x00000041
+ * - core always in full SMP (FW bit0=1, SMP bit6=1)
+ * - L2 write full line of zero disabled (bit3=0)
+ * (keep WFLZ low. Will be set once outer L2 is ready)
+ *
+ * NSACR = 0x00020C00
+ * - NSec cannot change ACTRL.SMP (NS_SMP bit18=0)
+ * - Nsec can lockdown TLB (TL bit17=1)
+ * - NSec cannot access PLE (PLE bit16=0)
+ * - NSec can use SIMD/VFP (CP10/CP11) (bit15:14=2b00, bit11:10=2b11)
+ *
+ * PCR = 0x00000001
+ * - no change latency, enable clk gating
+ */
+ movw r0, #0x4000
+ movt r0, #0x0000
+ write_sctlr r0
+
+ movw r0, #0x0041
+ movt r0, #0x0000
+ write_actlr r0
+
+ movw r0, #0x0C00
+ movt r0, #0x0002
+ write_nsacr r0
+
+ movw r0, #0x0000
+ movt r0, #0x0001
+ write_pcr r0
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC plat_cpu_reset_early
diff --git a/core/arch/arm/plat-imx/conf.mk b/core/arch/arm/plat-imx/conf.mk
new file mode 100644
index 0000000..785736a
--- /dev/null
+++ b/core/arch/arm/plat-imx/conf.mk
@@ -0,0 +1,34 @@
+PLATFORM_FLAVOR ?= mx6ulevk
+
+ifeq ($(PLATFORM_FLAVOR),mx6ulevk)
+arm32-platform-cpuarch := cortex-a7
+endif
+ifeq ($(PLATFORM_FLAVOR),$(filter $(PLATFORM_FLAVOR),mx6qsabrelite mx6qsabresd))
+arm32-platform-cpuarch := cortex-a9
+endif
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_IMX_UART,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_WITH_SOFTWARE_PRNG,y)
+ifeq ($(PLATFORM_FLAVOR),mx6ulevk)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+endif
+ifeq ($(PLATFORM_FLAVOR),$(filter $(PLATFORM_FLAVOR),mx6qsabrelite mx6qsabresd))
+$(call force,CFG_PL310,y)
+$(call force,CFG_PL310_LOCKED,y)
+$(call force,CFG_SECURE_TIME_SOURCE_REE,y)
+
+CFG_BOOT_SYNC_CPU ?= y
+CFG_BOOT_SECONDARY_REQUEST ?= y
+endif
+
+ta-targets = ta_arm32
+
+CFG_CRYPTO_SIZE_OPTIMIZATION ?= n
+CFG_WITH_STACK_CANARIES ?= y
diff --git a/core/arch/arm/plat-imx/imx6ul.c b/core/arch/arm/plat-imx/imx6ul.c
new file mode 100644
index 0000000..795c750
--- /dev/null
+++ b/core/arch/arm/plat-imx/imx6ul.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm32.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <platform_config.h>
+#include <stdint.h>
+
+static void init_csu(void)
+{
+ uintptr_t addr;
+
+ /* first grant all peripherals */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(CSU_ACCESS_ALL, addr);
+
+ /* lock the settings */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(read32(addr) | CSU_SETTING_LOCK, addr);
+}
+
+/* MMU not enabled now */
+void plat_cpu_reset_late(void)
+{
+ init_csu();
+}
diff --git a/core/arch/arm/plat-imx/imx_pl310.c b/core/arch/arm/plat-imx/imx_pl310.c
new file mode 100644
index 0000000..fcad225
--- /dev/null
+++ b/core/arch/arm/plat-imx/imx_pl310.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm32.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <platform_config.h>
+#include <stdint.h>
+
+void arm_cl2_config(vaddr_t pl310_base)
+{
+ /* Disable PL310 */
+ write32(0, pl310_base + PL310_CTRL);
+
+ write32(PL310_TAG_RAM_CTRL_INIT, pl310_base + PL310_TAG_RAM_CTRL);
+ write32(PL310_DATA_RAM_CTRL_INIT, pl310_base + PL310_DATA_RAM_CTRL);
+ write32(PL310_AUX_CTRL_INIT, pl310_base + PL310_AUX_CTRL);
+ write32(PL310_PREFETCH_CTRL_INIT, pl310_base + PL310_PREFETCH_CTRL);
+ write32(PL310_POWER_CTRL_INIT, pl310_base + PL310_POWER_CTRL);
+
+ /* invalidate all cache ways */
+ arm_cl2_invbyway(pl310_base);
+}
+
+void arm_cl2_enable(vaddr_t pl310_base)
+{
+ uint32_t val;
+
+ /* Enable PL310 ctrl -> only set lsb bit */
+ write32(1, pl310_base + PL310_CTRL);
+
+ /* if L2 FLZW enable, enable in L1 */
+ val = read32(pl310_base + PL310_AUX_CTRL);
+ if (val & 1)
+ write_actlr(read_actlr() | (1 << 3));
+}
diff --git a/core/arch/arm/plat-imx/kern.ld.S b/core/arch/arm/plat-imx/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-imx/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-imx/link.mk b/core/arch/arm/plat-imx/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-imx/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-imx/main.c b/core/arch/arm/plat-imx/main.c
new file mode 100644
index 0000000..edfbc37
--- /dev/null
+++ b/core/arch/arm/plat-imx/main.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm32.h>
+#include <console.h>
+#include <drivers/gic.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+
+#if defined(PLATFORM_FLAVOR_mx6qsabrelite) || \
+ defined(PLATFORM_FLAVOR_mx6qsabresd)
+#include <kernel/tz_ssvce_pl310.h>
+#endif
+
+static void main_fiq(void);
+static struct gic_data gic_data;
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GIC_BASE, CORE_MMU_DEVICE_SIZE);
+
+#if defined(PLATFORM_FLAVOR_mx6qsabrelite) || \
+ defined(PLATFORM_FLAVOR_mx6qsabresd)
+register_phys_mem(MEM_AREA_IO_SEC, PL310_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, SRC_BASE, CORE_MMU_DEVICE_SIZE);
+#endif
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+#if defined(PLATFORM_FLAVOR_mx6qsabrelite) || \
+ defined(PLATFORM_FLAVOR_mx6qsabresd)
+void plat_cpu_reset_late(void)
+{
+ uintptr_t addr;
+
+ if (!get_core_pos()) {
+ /* primary core */
+#if defined(CFG_BOOT_SYNC_CPU)
+ /* set secondary entry address and release core */
+ write32(CFG_TEE_LOAD_ADDR, SRC_BASE + SRC_GPR1 + 8);
+ write32(CFG_TEE_LOAD_ADDR, SRC_BASE + SRC_GPR1 + 16);
+ write32(CFG_TEE_LOAD_ADDR, SRC_BASE + SRC_GPR1 + 24);
+
+ write32(SRC_SCR_CPU_ENABLE_ALL, SRC_BASE + SRC_SCR);
+#endif
+
+ /* SCU config */
+ write32(SCU_INV_CTRL_INIT, SCU_BASE + SCU_INV_SEC);
+ write32(SCU_SAC_CTRL_INIT, SCU_BASE + SCU_SAC);
+ write32(SCU_NSAC_CTRL_INIT, SCU_BASE + SCU_NSAC);
+
+ /* SCU enable */
+ write32(read32(SCU_BASE + SCU_CTRL) | 0x1,
+ SCU_BASE + SCU_CTRL);
+
+ /* configure imx6 CSU */
+
+ /* first grant all peripherals */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(CSU_ACCESS_ALL, addr);
+
+ /* lock the settings */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(read32(addr) | CSU_SETTING_LOCK, addr);
+ }
+}
+#endif
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE,
+ MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ vaddr_t base = console_base();
+
+ imx_uart_init(base);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ /* If \n, also do \r */
+ if (ch == '\n')
+ imx_uart_putc('\r', base);
+ imx_uart_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ vaddr_t base = console_base();
+
+ imx_uart_flush_tx_fifo(base);
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ /* Initialize GIC */
+ gic_init(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+}
+
+#if defined(PLATFORM_FLAVOR_mx6qsabrelite) || \
+ defined(PLATFORM_FLAVOR_mx6qsabresd)
+vaddr_t pl310_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(PL310_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return PL310_BASE;
+}
+
+void main_secondary_init_gic(void)
+{
+ gic_cpu_init(&gic_data);
+}
+#endif
diff --git a/core/arch/arm/plat-imx/platform_config.h b/core/arch/arm/plat-imx/platform_config.h
new file mode 100644
index 0000000..8e55ee8
--- /dev/null
+++ b/core/arch/arm/plat-imx/platform_config.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#define STACK_ALIGNMENT 64
+
+/* For i.MX 6UltraLite EVK board */
+
+#if defined(PLATFORM_FLAVOR_mx6ulevk)
+
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform mx6ulevk"
+#endif
+#ifdef CFG_WITH_LPAE
+#error "LPAE not supported for now"
+#endif
+
+#define GIC_BASE 0xA00000
+#define GIC_SIZE 0x8000
+#define GICC_OFFSET 0x2000
+#define GICD_OFFSET 0x1000
+#define UART0_BASE 0x2020000
+#define UART1_BASE 0x21E8000
+#define UART2_BASE 0x21EC000
+
+#define AHB1_BASE 0x02000000
+#define AHB1_SIZE 0x100000
+#define AHB2_BASE 0x02100000
+#define AHB2_SIZE 0x100000
+#define AHB3_BASE 0x02200000
+#define AHB3_SIZE 0x100000
+
+#define AIPS_TZ1_BASE_ADDR 0x02000000
+#define AIPS1_OFF_BASE_ADDR (AIPS_TZ1_BASE_ADDR + 0x80000)
+
+#define DRAM0_BASE 0x80000000
+#define DRAM0_SIZE 0x20000000
+
+#define CFG_TEE_CORE_NB_CORE 1
+
+#define DDR_PHYS_START DRAM0_BASE
+#define DDR_SIZE DRAM0_SIZE
+
+#define CFG_DDR_START DDR_PHYS_START
+#define CFG_DDR_SIZE DDR_SIZE
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_SHMEM_START (TZDRAM_BASE - 0x100000)
+#define CFG_SHMEM_SIZE 0x100000
+
+/* Location of trusted dram on imx */
+#define TZDRAM_BASE (0x9c100000)
+#define TZDRAM_SIZE (0x03000000)
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+/*
+ * Everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#define CONSOLE_UART_BASE (UART0_BASE)
+
+/* Central Security Unit register values */
+#define CSU_BASE 0x021C0000
+#define CSU_CSL_START 0x0
+#define CSU_CSL_END 0xA0
+#define CSU_ACCESS_ALL 0x00FF00FF
+#define CSU_SETTING_LOCK 0x01000100
+
+/* For i.MX6 Quad SABRE Lite and Smart Device board */
+
+#elif defined(PLATFORM_FLAVOR_mx6qsabrelite) || \
+ defined(PLATFORM_FLAVOR_mx6qsabresd)
+
+#define SCU_BASE 0x00A00000
+#define PL310_BASE 0x00A02000
+#define SRC_BASE 0x020D8000
+#define SRC_SCR 0x000
+#define SRC_GPR1 0x020
+#define SRC_SCR_CPU_ENABLE_ALL SHIFT_U32(0x7, 22)
+#define SRC_SCR_CORE1_RST_OFFSET 14
+#define SRC_SCR_CORE1_ENABLE_OFFSET 22
+#define GIC_BASE 0x00A00000
+#define GICC_OFFSET 0x100
+#define GICD_OFFSET 0x1000
+#define GIC_CPU_BASE (GIC_BASE + GICC_OFFSET)
+#define GIC_DIST_BASE (GIC_BASE + GICD_OFFSET)
+#define UART1_BASE 0x02020000
+#define UART2_BASE 0x021E8000
+
+/* Central Security Unit register values */
+#define CSU_BASE 0x021C0000
+#define CSU_CSL_START 0x0
+#define CSU_CSL_END 0xA0
+#define CSU_CSL5 0x14
+#define CSU_CSL16 0x40
+#define CSU_ACCESS_ALL 0x00FF00FF
+#define CSU_SETTING_LOCK 0x01000100
+
+#if defined(PLATFORM_FLAVOR_mx6qsabrelite)
+#define CONSOLE_UART_BASE UART2_BASE
+#endif
+#if defined(PLATFORM_FLAVOR_mx6qsabresd)
+#define CONSOLE_UART_BASE UART1_BASE
+#endif
+#define DRAM0_BASE 0x10000000
+#define DRAM0_SIZE 0x40000000
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 4
+
+#define DDR_PHYS_START DRAM0_BASE
+#define DDR_SIZE DRAM0_SIZE
+
+#define CFG_DDR_START DDR_PHYS_START
+#define CFG_DDR_SIZE DDR_SIZE
+
+/*
+ * PL310 TAG RAM Control Register
+ *
+ * bit[10:8]:1 - 2 cycle of write accesses latency
+ * bit[6:4]:1 - 2 cycle of read accesses latency
+ * bit[2:0]:1 - 2 cycle of setup latency
+ */
+#ifndef PL310_TAG_RAM_CTRL_INIT
+#define PL310_TAG_RAM_CTRL_INIT 0x00000111
+#endif
+
+/*
+ * PL310 DATA RAM Control Register
+ *
+ * bit[10:8]:2 - 3 cycle of write accesses latency
+ * bit[6:4]:2 - 3 cycle of read accesses latency
+ * bit[2:0]:2 - 3 cycle of setup latency
+ */
+#ifndef PL310_DATA_RAM_CTRL_INIT
+#define PL310_DATA_RAM_CTRL_INIT 0x00000222
+#endif
+
+/*
+ * PL310 Auxiliary Control Register
+ *
+ * I/Dcache prefetch enabled (bit29:28=2b11)
+ * NS can access interrupts (bit27=1)
+ * NS can lockown cache lines (bit26=1)
+ * Pseudo-random replacement policy (bit25=0)
+ * Force write allocated (default)
+ * Shared attribute internally ignored (bit22=1, bit13=0)
+ * Parity disabled (bit21=0)
+ * Event monitor disabled (bit20=0)
+ * Platform fmavor specific way config:
+ * - 64kb way size (bit19:17=3b011)
+ * - 16-way associciativity (bit16=1)
+ * Store buffer device limitation enabled (bit11=1)
+ * Cacheable accesses have high prio (bit10=0)
+ * Full Line Zero (FLZ) disabled (bit0=0)
+ */
+#ifndef PL310_AUX_CTRL_INIT
+#define PL310_AUX_CTRL_INIT 0x3C470800
+#endif
+
+/*
+ * PL310 Prefetch Control Register
+ *
+ * Double linefill disabled (bit30=0)
+ * I/D prefetch enabled (bit29:28=2b11)
+ * Prefetch drop enabled (bit24=1)
+ * Incr double linefill disable (bit23=0)
+ * Prefetch offset = 7 (bit4:0)
+ */
+#define PL310_PREFETCH_CTRL_INIT 0x31000007
+
+/*
+ * PL310 Power Register
+ *
+ * Dynamic clock gating enabled
+ * Standby mode enabled
+ */
+#define PL310_POWER_CTRL_INIT 0x00000003
+
+/*
+ * SCU Invalidate Register
+ *
+ * Invalidate all registers
+ */
+#define SCU_INV_CTRL_INIT 0xFFFFFFFF
+
+/*
+ * SCU Access Register
+ * - both secure CPU access SCU
+ */
+#define SCU_SAC_CTRL_INIT 0x0000000F
+
+/*
+ * SCU NonSecure Access Register
+ * - both nonsec cpu access SCU, private and global timer
+ */
+#define SCU_NSAC_CTRL_INIT 0x00000FFF
+
+/* define the memory areas */
+
+#ifdef CFG_WITH_PAGER
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +---------------------------------------+ <- CFG_CORE_TZSRAM_EMUL_START
+ * | TEE private highly | TEE_RAM | ^
+ * | secure memory | | | CFG_CORE_TZSRAM_EMUL_SIZE
+ * +---------------------------------------+ v
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TA_RAM | ^
+ * | external memory | | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : default 256kByte
+ * TA_RAM : all what is left in DDR TEE reserved area
+ * PUB_RAM : default 2MByte
+ */
+
+/* emulated SRAM, at start of secure DDR */
+
+#define CFG_CORE_TZSRAM_EMUL_START 0x4E000000
+
+#define TZSRAM_BASE CFG_CORE_TZSRAM_EMUL_START
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+/* Location of trusted dram */
+
+#define CFG_DDR_TEETZ_RESERVED_START 0x4E100000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x01F00000
+
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+
+#define TZDRAM_BASE (CFG_DDR_TEETZ_RESERVED_START)
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_TA_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_SIZE TZDRAM_SIZE
+
+#define CFG_SHMEM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
+
+#define CFG_TEE_RAM_START TZSRAM_BASE
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR TZSRAM_BASE
+#endif
+
+#else /* CFG_WITH_PAGER */
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TEE_RAM | ^
+ * | external memory +------------------+ |
+ * | | TA_RAM | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : default 1MByte
+ * PUB_RAM : default 2MByte
+ * TA_RAM : all what is left
+ */
+
+#define CFG_DDR_TEETZ_RESERVED_START 0x4E000000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x02000000
+
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_PH_SIZE (1 * 1024 * 1024)
+
+#define TZDRAM_BASE (CFG_DDR_TEETZ_RESERVED_START)
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_TA_RAM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ CFG_TEE_RAM_PH_SIZE)
+#define CFG_TA_RAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_TEE_RAM_PH_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_SHMEM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
+
+#define CFG_TEE_RAM_START TZDRAM_BASE
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR TZDRAM_BASE
+#endif
+
+#endif /* CFG_WITH_PAGER */
+
+#else
+#error "Unknown platform flavor"
+#endif /* defined(PLATFORM_FLAVOR_mx6ulevk) */
+
+#ifdef CFG_PL310
+/*
+ * PL310 TAG RAM Control Register
+ *
+ * bit[10:8]:1 - 2 cycle of write accesses latency
+ * bit[6:4]:1 - 2 cycle of read accesses latency
+ * bit[2:0]:1 - 2 cycle of setup latency
+ */
+#define PL310_TAG_RAM_CTRL_INIT 0x00000111
+
+/*
+ * DATA RAM Control Register
+ *
+ * bit[10:8]:2 - 3 cycle of write accesses latency
+ * bit[6:4]:2 - 3 cycle of read accesses latency
+ * bit[2:0]:2 - 3 cycle of setup latency
+ */
+#define PL310_DATA_RAM_CTRL_INIT 0x00000222
+
+/*
+ * Auxiliary Control Register
+ *
+ * I/Dcache prefetch enabled (bit29:28=2b11)
+ * NS can access interrupts (bit27=1)
+ * NS can lockown cache lines (bit26=1)
+ * Pseudo-random replacement policy (bit25=0)
+ * Force write allocated (default)
+ * Shared attribute internally ignored (bit22=1, bit13=0)
+ * Parity disabled (bit21=0)
+ * Event monitor disabled (bit20=0)
+ * 64kB ways, 16-way associativity (bit19:17=3b011 bit16=1)
+ * Store buffer device limitation enabled (bit11=1)
+ * Cacheable accesses have high prio (bit10=0)
+ * Full Line Zero (FLZ) disabled (bit0=0)
+ */
+#define PL310_AUX_CTRL_INIT 0x3C470800
+
+/*
+ * Prefetch Control Register
+ *
+ * Double linefill disabled (bit30=0)
+ * I/D prefetch enabled (bit29:28=2b11)
+ * Prefetch drop enabled (bit24=1)
+ * Incr double linefill disable (bit23=0)
+ * Prefetch offset = 7 (bit4:0)
+ */
+#define PL310_PREFETCH_CTRL_INIT 0x31000007
+
+/*
+ * Power Register = 0x00000003
+ *
+ * Dynamic clock gating enabled
+ * Standby mode enabled
+ */
+#define PL310_POWER_CTRL_INIT 0x00000003
+
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-imx/psci.c b/core/arch/arm/plat-imx/psci.c
new file mode 100644
index 0000000..065555b
--- /dev/null
+++ b/core/arch/arm/plat-imx/psci.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <sm/psci.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+
+static vaddr_t src_base(void)
+{
+ static void *va __data; /* in case it's used before .bss is cleared */
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(SRC_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return SRC_BASE;
+}
+
+int psci_cpu_on(uint32_t core_idx, uint32_t entry,
+ uint32_t context_id __attribute__((unused)))
+{
+ uint32_t val;
+ vaddr_t va = src_base();
+
+ if ((core_idx == 0) || (core_idx >= CFG_TEE_CORE_NB_CORE))
+ return PSCI_RET_INVALID_PARAMETERS;
+
+ /* set secondary cores' NS entry addresses */
+ ns_entry_addrs[core_idx] = entry;
+
+ /* boot secondary cores from OP-TEE load address */
+ write32((uint32_t)CFG_TEE_LOAD_ADDR, va + SRC_GPR1 + core_idx * 8);
+
+ /* release secondary core */
+ val = read32(va + SRC_SCR);
+ val |= BIT32(SRC_SCR_CORE1_ENABLE_OFFSET + (core_idx - 1));
+ val |= BIT32(SRC_SCR_CORE1_RST_OFFSET + (core_idx - 1));
+ write32(val, va + SRC_SCR);
+
+ return PSCI_RET_SUCCESS;
+}
diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk
new file mode 100644
index 0000000..d0a2f51
--- /dev/null
+++ b/core/arch/arm/plat-imx/sub.mk
@@ -0,0 +1,9 @@
+global-incdirs-y += .
+srcs-y += main.c
+
+srcs-$(CFG_PL310) += imx_pl310.c
+srcs-$(CFG_PSCI_ARM32) += psci.c
+
+srcs-$(PLATFORM_FLAVOR_mx6qsabrelite) += a9_plat_init.S
+srcs-$(PLATFORM_FLAVOR_mx6qsabresd) += a9_plat_init.S
+srcs-$(PLATFORM_FLAVOR_mx6ulevk) += imx6ul.c
diff --git a/core/arch/arm/plat-ls/conf.mk b/core/arch/arm/plat-ls/conf.mk
new file mode 100644
index 0000000..e7385fd
--- /dev/null
+++ b/core/arch/arm/plat-ls/conf.mk
@@ -0,0 +1,20 @@
+PLATFORM_FLAVOR ?= ls1021atwr
+
+arm32-platform-cpuarch := cortex-a7
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_16550_UART,y)
+$(call force,CFG_PM_STUBS,y)
+
+ta-targets = ta_arm32
+
+CFG_BOOT_SYNC_CPU ?= y
+CFG_BOOT_SECONDARY_REQUEST ?= y
+CFG_CRYPTO_SIZE_OPTIMIZATION ?= n
+CFG_WITH_STACK_CANARIES ?= y
diff --git a/core/arch/arm/plat-ls/kern.ld.S b/core/arch/arm/plat-ls/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-ls/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-ls/link.mk b/core/arch/arm/plat-ls/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-ls/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-ls/ls_core_pos.S b/core/arch/arm/plat-ls/ls_core_pos.S
new file mode 100644
index 0000000..21cfb4b
--- /dev/null
+++ b/core/arch/arm/plat-ls/ls_core_pos.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+/* Layerscape platform specific function to calculate core position. */
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ /* Calculate CorePos = CoreId */
+ and r0, r0, #MPIDR_CPU_MASK
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
diff --git a/core/arch/arm/plat-ls/main.c b/core/arch/arm/plat-ls/main.c
new file mode 100644
index 0000000..23ac0c6
--- /dev/null
+++ b/core/arch/arm/plat-ls/main.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <arm32.h>
+#include <console.h>
+#include <drivers/gic.h>
+#include <drivers/ns16550.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/thread.h>
+#include <kernel/tz_ssvce_def.h>
+#include <mm/core_memprot.h>
+#include <sm/optee_smc.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+static struct gic_data gic_data;
+
+register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GIC_BASE, CORE_MMU_DEVICE_SIZE);
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+void plat_cpu_reset_late(void)
+{
+ static uint32_t cntfrq __early_bss;
+ vaddr_t addr;
+
+ if (!get_core_pos()) {
+ /* read cnt freq */
+ cntfrq = read_cntfrq();
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+ /* set secondary entry address */
+ write32(__compiler_bswap32(CFG_TEE_LOAD_ADDR),
+ DCFG_BASE + DCFG_SCRATCHRW1);
+
+ /* release secondary cores */
+ write32(__compiler_bswap32(0x1 << 1), /* cpu1 */
+ DCFG_BASE + DCFG_CCSR_BRR);
+ dsb();
+ sev();
+#endif
+
+ /* configure CSU */
+
+ /* first grant all peripherals */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(__compiler_bswap32(CSU_ACCESS_ALL), addr);
+
+ /* restrict key preipherals from NS */
+ write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
+ CSU_BASE + CSU_CSL30);
+ write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
+ CSU_BASE + CSU_CSL37);
+
+ /* lock the settings */
+ for (addr = CSU_BASE + CSU_CSL_START;
+ addr != CSU_BASE + CSU_CSL_END;
+ addr += 4)
+ write32(read32(addr) |
+ __compiler_bswap32(CSU_SETTING_LOCK),
+ addr);
+ } else {
+ /* program the cntfrq, the cntfrq is banked for each core */
+ write_cntfrq(cntfrq);
+ }
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ /*
+ * Do nothing, uart driver shared with normal world,
+ * everything for uart driver intialization is done in bootloader.
+ */
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ ns16550_putc('\r', base);
+ ns16550_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ ns16550_flush(console_base());
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ /* Initialize GIC */
+ gic_init(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+}
+
+void main_secondary_init_gic(void)
+{
+ gic_cpu_init(&gic_data);
+}
diff --git a/core/arch/arm/plat-ls/plat_init.S b/core/arch/arm/plat-ls/plat_init.S
new file mode 100644
index 0000000..81ba7d7
--- /dev/null
+++ b/core/arch/arm/plat-ls/plat_init.S
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Entry points for the A9 inits, A9 revision specific or not.
+ * It is assume no stack is available when these routines are called.
+ * It is assume each routine is called with return address in LR
+ * and with ARM registers R0, R1, R2, R3 being scratchable.
+ */
+
+#include <arm32.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * platform early configuration
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
+
+ /*
+ * Disallow NSec to mask FIQ [bit4: FW=0]
+ * Allow NSec to manage Imprecise Abort [bit5: AW=1]
+ * Imprecise Abort trapped to Abort Mode [bit3: EA=0]
+ * In Sec world, FIQ trapped to FIQ Mode [bit2: FIQ=0]
+ * IRQ always trapped to IRQ Mode [bit1: IRQ=0]
+ * Secure World [bit0: NS=0]
+ */
+ mov r0, #SCR_AW
+ write_scr r0 /* write Secure Configuration Register */
+
+ /*
+ * Mandated HW config loaded
+ *
+ * SCTLR = 0x00000000
+ *
+ * ACTRL = 0x00000041
+ * - core always in full SMP (FW bit0=1)
+ *
+ * NSACR = 0x00020C00
+ * - NSec cannot change ACTRL.SMP (NS_SMP bit18=0)
+ * - NSec can use SIMD/VFP (CP10/CP11) (bit15:14=2b00, bit11:10=2b11)
+ */
+ movw r0, #0x0000
+ movt r0, #0x0000
+ write_sctlr r0
+
+ movw r0, #0x0040
+ movt r0, #0x0000
+ write_actlr r0
+
+ movw r0, #0x0C00
+ movt r0, #0x0000
+ write_nsacr r0
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC plat_cpu_reset_early
diff --git a/core/arch/arm/plat-ls/platform_config.h b/core/arch/arm/plat-ls/platform_config.h
new file mode 100644
index 0000000..7f0adc6
--- /dev/null
+++ b/core/arch/arm/plat-ls/platform_config.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#define STACK_ALIGNMENT 64
+
+#define GIC_BASE 0x01400000
+#define GICC_OFFSET 0x2000
+#define GICD_OFFSET 0x1000
+
+#define DCFG_BASE 0x01EE0000
+#define DCFG_CCSR_BRR 0xE4
+#define DCFG_SCRATCHRW1 0x200
+
+#define CSU_BASE 0x01510000
+#define CSU_CSL_START 0x0
+#define CSU_CSL_END 0xE8
+#define CSU_CSL30 0x78
+#define CSU_CSL37 0x94
+
+/* Central Security Unit register values */
+#define CSU_ACCESS_ALL 0x00FF00FF
+#define CSU_ACCESS_SEC_ONLY 0x003F003F
+#define CSU_SETTING_LOCK 0x01000100
+
+/* DUART 1 */
+#define UART0_BASE 0x021C0500
+/* DUART 2 */
+#define UART1_BASE 0x021D0500
+/* LPUART 1 */
+#define UART2_BASE 0x02950000
+/* LPUART 2 */
+#define UART3_BASE 0x02960000
+
+
+/* console uart define */
+#define CONSOLE_UART_BASE UART0_BASE
+
+#define DRAM0_BASE 0x80000000
+#if defined(PLATFORM_FLAVOR_ls1021aqds)
+#define DRAM0_SIZE 0x80000000
+#endif
+
+#if defined(PLATFORM_FLAVOR_ls1021atwr)
+#define DRAM0_SIZE 0x40000000
+#endif
+
+/* Location of trusted dram on layerscape */
+
+#if defined(PLATFORM_FLAVOR_ls1021atwr)
+#define CFG_DDR_TEETZ_RESERVED_START 0xBC000000
+#endif
+
+#if defined(PLATFORM_FLAVOR_ls1021aqds)
+#define CFG_DDR_TEETZ_RESERVED_START 0xFC000000
+#endif
+
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x03F00000
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#define DDR_PHYS_START DRAM0_BASE
+#define DDR_SIZE DRAM0_SIZE
+
+#define CFG_DDR_START DDR_PHYS_START
+#define CFG_DDR_SIZE DDR_SIZE
+
+#ifndef CFG_DDR_TEETZ_RESERVED_START
+#error "TEETZ reserved DDR start address undef: CFG_DDR_TEETZ_RESERVED_START"
+#endif
+#ifndef CFG_DDR_TEETZ_RESERVED_SIZE
+#error "TEETZ reserved DDR siez undefined: CFG_DDR_TEETZ_RESERVED_SIZE"
+#endif
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +-----------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEETZ private RAM | TEE_RAM | ^
+ * | +--------------------+ |
+ * | | TA_RAM | |
+ * +-----------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | | teecore alloc | |
+ * | TEE/TZ and NSec | PUB_RAM --------| |
+ * | shared memory | NSec alloc | |
+ * +-----------------------------------------+ v
+ *
+ * TEE_RAM : 1MByte
+ * PUB_RAM : 1MByte
+ * TA_RAM : all what is left (at least 2MByte !)
+ */
+
+/* define the several memory area sizes */
+#if (CFG_DDR_TEETZ_RESERVED_SIZE < (4 * 1024 * 1024))
+#error "Invalid CFG_DDR_TEETZ_RESERVED_SIZE: at least 4MB expected"
+#endif
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_PH_SIZE (1 * 1024 * 1024)
+#define CFG_TA_RAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_TEE_RAM_PH_SIZE - CFG_PUB_RAM_SIZE)
+
+/* define the secure/unsecure memory areas */
+#define TZDRAM_BASE (CFG_DDR_TEETZ_RESERVED_START)
+#define TZDRAM_SIZE (CFG_TEE_RAM_PH_SIZE + CFG_TA_RAM_SIZE)
+
+#define CFG_SHMEM_START (TZDRAM_BASE + TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
+
+/* define the memory areas (TEE_RAM must start at reserved DDR start addr */
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START (CFG_TEE_RAM_START + \
+ CFG_TEE_RAM_PH_SIZE)
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-ls/sub.mk b/core/arch/arm/plat-ls/sub.mk
new file mode 100644
index 0000000..8bee406
--- /dev/null
+++ b/core/arch/arm/plat-ls/sub.mk
@@ -0,0 +1,4 @@
+global-incdirs-y += .
+srcs-y += main.c
+srcs-y += ls_core_pos.S
+srcs-y += plat_init.S
diff --git a/core/arch/arm/plat-mediatek/conf.mk b/core/arch/arm/plat-mediatek/conf.mk
new file mode 100644
index 0000000..81642b6
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/conf.mk
@@ -0,0 +1,25 @@
+PLATFORM_FLAVOR ?= mt8173
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a15
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_8250_UART,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+CFG_WITH_STACK_CANARIES ?= y
diff --git a/core/arch/arm/plat-mediatek/kern.ld.S b/core/arch/arm/plat-mediatek/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-mediatek/link.mk b/core/arch/arm/plat-mediatek/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-mediatek/main.c b/core/arch/arm/plat-mediatek/main.c
new file mode 100644
index 0000000..7780591
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/main.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <drivers/serial8250_uart.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ serial8250_uart_init(console_base(), CONSOLE_UART_CLK_IN_HZ,
+ CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ serial8250_uart_putc('\r', base);
+ serial8250_uart_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ serial8250_uart_flush_tx_fifo(console_base());
+}
diff --git a/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S b/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S
new file mode 100644
index 0000000..227deb3
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ /*
+ * Number of cores in cluster is 2,
+ * we should have the following mapping:
+ * MPIDR core_pos
+ * 0x0000 -> 0
+ * 0x0001 -> 1
+ * 0x0100 -> 2
+ * 0x0101 -> 3
+ */
+ add r0, r1, r0, LSR #7
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
+
diff --git a/core/arch/arm/plat-mediatek/mt8173_core_pos_a64.S b/core/arch/arm/plat-mediatek/mt8173_core_pos_a64.S
new file mode 100644
index 0000000..a1c3b3e
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/mt8173_core_pos_a64.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+
+FUNC get_core_pos , :
+ mrs x0, mpidr_el1
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ /*
+ * Number of cores in cluster is 2,
+ * we should have the following mapping:
+ * MPIDR core_pos
+ * 0x0000 -> 0
+ * 0x0001 -> 1
+ * 0x0100 -> 2
+ * 0x0101 -> 3
+ */
+ add x0, x1, x0, LSR #7
+ ret
+END_FUNC get_core_pos
+
diff --git a/core/arch/arm/plat-mediatek/platform_config.h b/core/arch/arm/plat-mediatek/platform_config.h
new file mode 100644
index 0000000..7fe2f93
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/platform_config.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /*ARM64*/
+
+#if defined(PLATFORM_FLAVOR_mt8173)
+
+#define GIC_BASE 0x10220000
+#define GICC_OFFSET 0x2000
+#define GICD_OFFSET 0x1000
+
+#define UART0_BASE 0x11002000
+#define UART1_BASE 0x11003000
+#define UART2_BASE 0x11004000
+#define UART3_BASE 0x11005000
+
+#define CONSOLE_UART_BASE UART0_BASE
+#define CONSOLE_BAUDRATE 921600
+#define CONSOLE_UART_CLK_IN_HZ 26000000
+
+#define DRAM0_BASE 0x40000000
+#define DRAM0_SIZE 0x80000000
+
+/* Location of trusted dram */
+#define TZDRAM_BASE 0xBE000000
+#define TZDRAM_SIZE 0x02000000
+
+#define CFG_TEE_CORE_NB_CORE 4
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_SHMEM_START (TZDRAM_BASE - 0x200000)
+#define CFG_SHMEM_SIZE 0x200000
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+/*
+ * Everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#define DEVICE0_PA_BASE ROUNDDOWN(CONSOLE_UART_BASE, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_VA_BASE DEVICE0_PA_BASE
+#define DEVICE0_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE0_TYPE MEM_AREA_IO_NSEC
+
+#define DEVICE1_PA_BASE ROUNDDOWN(GIC_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_VA_BASE DEVICE1_PA_BASE
+#define DEVICE1_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE1_TYPE MEM_AREA_IO_SEC
+
+#ifdef CFG_WITH_LPAE
+#define MAX_XLAT_TABLES 5
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-mediatek/sub.mk b/core/arch/arm/plat-mediatek/sub.mk
new file mode 100644
index 0000000..95a8571
--- /dev/null
+++ b/core/arch/arm/plat-mediatek/sub.mk
@@ -0,0 +1,6 @@
+global-incdirs-y += .
+srcs-y += main.c
+ifeq ($(PLATFORM_FLAVOR),mt8173)
+srcs-$(CFG_ARM32_core) += mt8173_core_pos_a32.S
+srcs-$(CFG_ARM64_core) += mt8173_core_pos_a64.S
+endif
diff --git a/core/arch/arm/plat-rcar/conf.mk b/core/arch/arm/plat-rcar/conf.mk
new file mode 100644
index 0000000..caf26c8
--- /dev/null
+++ b/core/arch/arm/plat-rcar/conf.mk
@@ -0,0 +1,27 @@
+PLATFORM_FLAVOR ?= h3
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a57
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+$(call force,CFG_SCIF,y)
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+ifeq ($(CFG_ARM32_core),y)
+ta-targets = ta_arm32
+endif
+
+CFG_WITH_STACK_CANARIES ?= y
diff --git a/core/arch/arm/plat-rcar/kern.ld.S b/core/arch/arm/plat-rcar/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-rcar/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-rcar/link.mk b/core/arch/arm/plat-rcar/link.mk
new file mode 100644
index 0000000..763298f
--- /dev/null
+++ b/core/arch/arm/plat-rcar/link.mk
@@ -0,0 +1,7 @@
+include core/arch/arm/kernel/link.mk
+
+all: $(link-out-dir)/tee.srec
+cleanfiles += $(link-out-dir)/tee.srec
+$(link-out-dir)/tee.srec: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(OBJCOPYcore) -O srec $< $@
diff --git a/core/arch/arm/plat-rcar/main.c b/core/arch/arm/plat-rcar/main.c
new file mode 100644
index 0000000..8f9482e
--- /dev/null
+++ b/core/arch/arm/plat-rcar/main.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, GlobalLogic
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+#include <drivers/scif.h>
+#include <drivers/gic.h>
+
+register_phys_mem(MEM_AREA_IO_SEC, CONSOLE_UART_BASE, SCIF_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GICD_BASE, GIC_DIST_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GICC_BASE, GIC_DIST_REG_SIZE);
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ scif_uart_init(console_base());
+}
+
+void console_putc(int ch)
+{
+ if (ch == '\n')
+ scif_uart_putc('\r', console_base());
+ scif_uart_putc(ch, console_base());
+}
+
+void console_flush(void)
+{
+ scif_uart_flush(console_base());
+}
diff --git a/core/arch/arm/plat-rcar/platform_config.h b/core/arch/arm/plat-rcar/platform_config.h
new file mode 100644
index 0000000..287689d
--- /dev/null
+++ b/core/arch/arm/plat-rcar/platform_config.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, GlobalLogic
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /*ARM64*/
+
+#define GIC_BASE 0xF1000000
+#define GICC_BASE 0xF1020000
+#define GICD_BASE 0xF1010000
+
+#define CONSOLE_UART_BASE 0xE6E88000
+
+#define DRAM0_BASE 0x44000000
+#define DRAM0_SIZE 0x04000000
+
+/* Location of trusted dram */
+#define TZDRAM_BASE 0x44000000
+#define TZDRAM_SIZE 0x03E00000
+
+#define CFG_TEE_CORE_NB_CORE 8
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_SHMEM_START (TZDRAM_BASE + TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE 0x100000
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+/*
+ * Everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START (TZDRAM_BASE + 0x00100000)
+#define CFG_TA_RAM_START ROUNDUP((CFG_TEE_RAM_START + \
+ CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-rcar/sub.mk b/core/arch/arm/plat-rcar/sub.mk
new file mode 100644
index 0000000..8ddc2fd
--- /dev/null
+++ b/core/arch/arm/plat-rcar/sub.mk
@@ -0,0 +1,2 @@
+global-incdirs-y += .
+srcs-y += main.c
diff --git a/core/arch/arm/plat-rpi3/conf.mk b/core/arch/arm/plat-rpi3/conf.mk
new file mode 100644
index 0000000..567680a
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/conf.mk
@@ -0,0 +1,39 @@
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a53
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_8250_UART,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+CFG_NUM_THREADS ?= 4
+CFG_CRYPTO_WITH_CE ?= n
+CFG_WITH_STACK_CANARIES ?= y
+
+CFG_TEE_CORE_EMBED_INTERNAL_TESTS ?= y
+CFG_TEE_FS_KEY_MANAGER_TEST ?= y
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
+
+arm32-platform-cflags += -Wno-error=cast-align
+arm64-platform-cflags += -Wno-error=cast-align
+
+$(call force,CFG_CRYPTO_SHA256_ARM32_CE,n)
+$(call force,CFG_CRYPTO_SHA256_ARM64_CE,n)
+$(call force,CFG_CRYPTO_SHA1_ARM32_CE,n)
+$(call force,CFG_CRYPTO_SHA1_ARM64_CE,n)
+$(call force,CFG_CRYPTO_AES_ARM64_CE,n)
diff --git a/core/arch/arm/plat-rpi3/kern.ld.S b/core/arch/arm/plat-rpi3/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-rpi3/link.mk b/core/arch/arm/plat-rpi3/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-rpi3/main.c b/core/arch/arm/plat-rpi3/main.c
new file mode 100644
index 0000000..9270e19
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/main.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016, Sequitur Labs Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <drivers/serial8250_uart.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_pager.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static vaddr_t console_base(void)
+{
+ static vaddr_t va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = (vaddr_t)phys_to_virt(CONSOLE_UART_BASE,
+ MEM_AREA_IO_NSEC);
+ return va;
+ }
+
+ return CONSOLE_UART_BASE;
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ serial8250_uart_putc('\r', base);
+ serial8250_uart_putc(ch, base);
+}
+
+void console_init(void)
+{
+ serial8250_uart_init(console_base(), CONSOLE_UART_CLK_IN_HZ,
+ CONSOLE_BAUDRATE);
+}
+
+void console_flush(void)
+{
+ serial8250_uart_flush_tx_fifo(console_base());
+}
diff --git a/core/arch/arm/plat-rpi3/platform_config.h b/core/arch/arm/plat-rpi3/platform_config.h
new file mode 100644
index 0000000..ac53e26
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/platform_config.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, Sequitur Labs Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /* ARM64 */
+
+/* 16550 UART */
+#define CONSOLE_UART_BASE 0x3f215040 /* UART0 */
+#define CONSOLE_BAUDRATE 115200
+#define CONSOLE_UART_CLK_IN_HZ 19200000
+
+/*
+ * RPi memory map
+ *
+ * No secure memory on RPi...
+ *
+ *
+ * Available to Linux <above>
+ * 0x0a00_0000
+ * TA RAM: 16 MiB |
+ * 0x0842_0000 | TZDRAM
+ * TEE RAM: 4 MiB (CFG_TEE_RAM_VA_SIZE) |
+ * 0x0840_0000 [ARM Trusted Firmware ] -
+ * 0x0840_0000 [TZDRAM_BASE, BL32_LOAD_ADDR] -
+ * Shared memory: 4 MiB |
+ * 0x0800_0000 | DRAM0
+ * Available to Linux |
+ * 0x0000_0000 [DRAM0_BASE] -
+ *
+ */
+
+#define DRAM0_BASE 0x00000000
+#define DRAM0_SIZE 0x40000000
+
+/* Below ARM-TF */
+#define CFG_SHMEM_START (0x08000000)
+#define CFG_SHMEM_SIZE (4 * 1024 * 1024)
+
+#define TZDRAM_BASE (CFG_SHMEM_START + CFG_SHMEM_SIZE)
+#define TZDRAM_SIZE (32 * 1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 4
+
+#define CFG_TEE_RAM_VA_SIZE (4 * 1024 * 1024)
+
+#define CFG_TEE_LOAD_ADDR (TZDRAM_BASE + 0x20000)
+
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+# define CFG_TA_RAM_SIZE (16 * 1024 * 1024)
+
+#define DEVICE0_BASE ROUNDDOWN(CONSOLE_UART_BASE, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_PA_BASE DEVICE0_BASE
+#define DEVICE0_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE0_TYPE MEM_AREA_IO_NSEC
+
+#endif /* PLATFORM_CONFIG_H */
diff --git a/core/arch/arm/plat-rpi3/sub.mk b/core/arch/arm/plat-rpi3/sub.mk
new file mode 100644
index 0000000..8ddc2fd
--- /dev/null
+++ b/core/arch/arm/plat-rpi3/sub.mk
@@ -0,0 +1,2 @@
+global-incdirs-y += .
+srcs-y += main.c
diff --git a/core/arch/arm/plat-sprd/conf.mk b/core/arch/arm/plat-sprd/conf.mk
new file mode 100644
index 0000000..137214f
--- /dev/null
+++ b/core/arch/arm/plat-sprd/conf.mk
@@ -0,0 +1,28 @@
+PLATFORM_FLAVOR ?= sc9860
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a15
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_SPRD_UART,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+
+CFG_WITH_STACK_CANARIES ?= y
+# Overrides default in mk/config.mk with 128 kB
+CFG_CORE_HEAP_SIZE ?= 131072
diff --git a/core/arch/arm/plat-sprd/console.c b/core/arch/arm/plat-sprd/console.c
new file mode 100644
index 0000000..3263d69
--- /dev/null
+++ b/core/arch/arm/plat-sprd/console.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, Spreadtrum Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <console.h>
+#include <drivers/sprd_uart.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+
+static vaddr_t console_base(void)
+{
+ static vaddr_t base;
+
+ if (cpu_mmu_enabled())
+ base = (vaddr_t)phys_to_virt(CONSOLE_UART_BASE,
+ MEM_AREA_IO_SEC);
+ else
+ base = CONSOLE_UART_BASE;
+
+ return base;
+}
+
+/* Do nothing in this function */
+void console_init(void)
+{
+}
+
+void console_putc(int ch)
+{
+ sprd_uart_putc(console_base(), (unsigned char)(ch & 0xff));
+}
+
+void console_flush(void)
+{
+ sprd_uart_flush(console_base());
+}
diff --git a/core/arch/arm/plat-sprd/kern.ld.S b/core/arch/arm/plat-sprd/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-sprd/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-sprd/link.mk b/core/arch/arm/plat-sprd/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-sprd/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-sprd/main.c b/core/arch/arm/plat-sprd/main.c
new file mode 100644
index 0000000..bf3a62d
--- /dev/null
+++ b/core/arch/arm/plat-sprd/main.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016, Spreadtrum Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <drivers/gic.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <trace.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+};
+
+static struct gic_data gic_data;
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ gic_init_base_addr(&gic_data, gicc_base, gicd_base);
+
+ itr_init(&gic_data.chip);
+}
+
+static void main_fiq(void)
+{
+ gic_it_handle(&gic_data);
+}
diff --git a/core/arch/arm/plat-sprd/platform_config.h b/core/arch/arm/plat-sprd/platform_config.h
new file mode 100644
index 0000000..b30583e
--- /dev/null
+++ b/core/arch/arm/plat-sprd/platform_config.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016, Spreadtrum Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /*ARM64*/
+
+#if defined(PLATFORM_FLAVOR_sc9860)
+
+#define GIC_BASE 0x12000000
+#define UART0_BASE 0x70000000
+#define UART1_BASE 0x70100000
+#define UART2_BASE 0x70200000
+#define UART3_BASE 0x70300000
+
+#define CONSOLE_UART_BASE UART1_BASE
+
+#define DRAM0_BASE 0x80000000
+#define DRAM0_SIZE 0x20000000
+
+#define TZDRAM_BASE 0x8f600000
+#define TZDRAM_SIZE (0x02000000 - CFG_SHMEM_SIZE)
+
+#define CFG_TEE_CORE_NB_CORE 8
+
+#define CFG_SHMEM_START (TZDRAM_BASE + TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE 0x200000
+
+#define GICC_OFFSET 0x2000
+#define GICD_OFFSET 0x1000
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+/*
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#define DEVICE0_PA_BASE ROUNDDOWN(CONSOLE_UART_BASE, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_VA_BASE DEVICE0_PA_BASE
+#define DEVICE0_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE0_TYPE MEM_AREA_IO_NSEC
+
+#define DEVICE1_PA_BASE ROUNDDOWN(GIC_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_VA_BASE DEVICE1_PA_BASE
+#define DEVICE1_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE1_TYPE MEM_AREA_IO_SEC
+
+#define DEVICE2_PA_BASE ROUNDDOWN(GIC_BASE + GICD_OFFSET, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_VA_BASE DEVICE2_PA_BASE
+#define DEVICE2_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE2_TYPE MEM_AREA_IO_SEC
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-sprd/sub.mk b/core/arch/arm/plat-sprd/sub.mk
new file mode 100644
index 0000000..3a8214b
--- /dev/null
+++ b/core/arch/arm/plat-sprd/sub.mk
@@ -0,0 +1,3 @@
+global-incdirs-y += .
+srcs-y += main.c
+srcs-y += console.c
diff --git a/core/arch/arm/plat-stm/.gitignore b/core/arch/arm/plat-stm/.gitignore
new file mode 100644
index 0000000..49b7bb9
--- /dev/null
+++ b/core/arch/arm/plat-stm/.gitignore
@@ -0,0 +1 @@
+System.map
diff --git a/core/arch/arm/plat-stm/asc.S b/core/arch/arm/plat-stm/asc.S
new file mode 100644
index 0000000..3f2b6aa
--- /dev/null
+++ b/core/arch/arm/plat-stm/asc.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+#include <asm.S>
+#include <kernel/unwind.h>
+
+#define ASC_BAUDRATE 0x00
+#define ASC_TXBUFFER 0x04
+#define ASC_RXBUFFER 0x08
+#define ASC_CONTROL 0x0c
+#define ASC_INTENABLE 0x10
+#define ASC_STATUS 0x14
+#define ASC_GUARDTIME 0x18
+#define ASC_TIMEOUT 0x1c
+#define ASC_TXRESET 0x20
+#define ASC_RXRESET 0x24
+#define ASC_RETRIES 0x28
+
+.section .text.asc
+
+
+/*
+ * void __asc_flush(vaddr_t base)
+ *
+ * Clobbers r0-r3
+ */
+FUNC __asc_flush , :
+UNWIND( .fnstart)
+
+ ADD r3, r0, #ASC_STATUS
+
+flush_wait:
+ LDR r1, [r3]
+ ANDS r1, r1, #0x02 /* AND TX FIFO EMPTY flag */
+ BEQ flush_wait /* ANDS should have set Z bit if zero */
+
+ LDR r0, =0
+ BX lr
+UNWIND( .fnend)
+END_FUNC __asc_flush
+
+/*
+ * int __asc_xmit_char(char p, vaddr_t base) - Transmit a single character.
+ *
+ * R0 is the 1-byte character to be transmited
+ * R1 is the base address of the uart
+ * Clobbers r0-r3
+ */
+FUNC __asc_xmit_char , :
+UNWIND( .fnstart)
+
+ ADD r2, r1, #ASC_TXBUFFER
+ ADD r3, r1, #ASC_STATUS
+
+ /* Output byte */
+
+ /* Spin until TX FIFO ready */
+__asc_char_crwait:
+ LDR r1, [r3]
+ ANDS r1, r1, #0x04 /* AND TX FIFO HALF EMPTY flag */
+ BEQ __asc_char_crwait /* ANDS should have set Z bit if zero */
+
+ MOVS r1, r0
+ LDR r0, =0xFF
+ AND r1, r1, r0
+ BEQ __asc_char_exit
+ CMP r1, #0xa /* r1 == \n (line feed) ? */
+ BNE __asc_char_notlf
+
+ /* Transmit character extra carriage return for each line feed */
+ LDR r1, =0x0d
+ STR r1, [r2]
+
+ LDR r1, =0x0a /* replace line feed */
+
+__asc_char_notlf:
+ /* Transmit character */
+ STR r1, [r2]
+
+__asc_char_exit:
+ LDR r0, =0
+ BX lr
+UNWIND( .fnend)
+END_FUNC __asc_xmit_char
diff --git a/core/arch/arm/plat-stm/asc.h b/core/arch/arm/plat-stm/asc.h
new file mode 100644
index 0000000..bbf574c
--- /dev/null
+++ b/core/arch/arm/plat-stm/asc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ASC_H
+#define ASC_H
+
+#include <types_ext.h>
+
+extern int __asc_xmit_char(const char p, vaddr_t base);
+extern void __asc_flush(vaddr_t base);
+
+#endif
diff --git a/core/arch/arm/plat-stm/conf.mk b/core/arch/arm/plat-stm/conf.mk
new file mode 100644
index 0000000..4afe256
--- /dev/null
+++ b/core/arch/arm/plat-stm/conf.mk
@@ -0,0 +1,30 @@
+PLATFORM_FLAVOR ?= b2260
+
+arm32-platform-cpuarch := cortex-a9
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_SECURE_TIME_SOURCE_REE,y)
+$(call force,CFG_PL310,y)
+$(call force,CFG_CACHE_API,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_WITH_LPAE,n)
+$(call force,CFG_GIC,y)
+
+ta-targets = ta_arm32
+
+CFG_WITH_PAGER ?= n
+CFG_BOOT_SYNC_CPU ?= y
+CFG_TEE_CORE_EMBED_INTERNAL_TESTS ?= y
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
+CFG_WITH_SOFTWARE_PRNG ?= n
+
+ifeq ($(PLATFORM_FLAVOR),b2260)
+CFG_PL310_LOCKED ?= y
+else
+CFG_PL310_LOCKED ?= n
+endif
diff --git a/core/arch/arm/plat-stm/kern.ld.S b/core/arch/arm/plat-stm/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-stm/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-stm/link.mk b/core/arch/arm/plat-stm/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-stm/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-stm/main.c b/core/arch/arm/plat-stm/main.c
new file mode 100644
index 0000000..e569e07
--- /dev/null
+++ b/core/arch/arm/plat-stm/main.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2014-2016, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm32.h>
+#include <asc.h>
+#include <console.h>
+#include <drivers/gic.h>
+#include <drivers/pl011.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+#include <trace.h>
+#include <util.h>
+
+register_phys_mem(MEM_AREA_IO_SEC, CPU_IOMEM_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, RNG_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_NSEC, UART_CONSOLE_BASE, CORE_MMU_DEVICE_SIZE);
+
+static struct gic_data gic_data;
+static void main_fiq(void);
+
+#if defined(PLATFORM_FLAVOR_b2260)
+#define stm_tee_entry_std tee_entry_std
+static bool ns_resources_ready(void)
+{
+ return true;
+}
+#else
+/* some nonsecure resource might not be ready (uart) */
+static int boot_is_completed __early_bss;
+static bool ns_resources_ready(void)
+{
+ return !!boot_is_completed;
+}
+static void stm_tee_entry_std(struct thread_smc_args *smc_args)
+{
+ boot_is_completed = 1;
+ tee_entry_std(smc_args);
+}
+#endif
+
+static const struct thread_handlers handlers = {
+ .std_smc = stm_tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(UART_CONSOLE_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return UART_CONSOLE_BASE;
+}
+
+void console_init(void)
+{
+}
+
+void console_putc(int ch)
+{
+ if (ns_resources_ready()) {
+ if (ch == '\n')
+ __asc_xmit_char('\r', console_base());
+ __asc_xmit_char((char)ch, console_base());
+ }
+}
+
+void console_flush(void)
+{
+ if (ns_resources_ready())
+ __asc_flush(console_base());
+}
+
+vaddr_t pl310_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(PL310_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return PL310_BASE;
+}
+
+void arm_cl2_config(vaddr_t pl310)
+{
+ /* pl310 off */
+ write32(0, pl310 + PL310_CTRL);
+
+ /* config PL310 */
+ write32(PL310_TAG_RAM_CTRL_INIT, pl310 + PL310_TAG_RAM_CTRL);
+ write32(PL310_DATA_RAM_CTRL_INIT, pl310 + PL310_DATA_RAM_CTRL);
+ write32(PL310_AUX_CTRL_INIT, pl310 + PL310_AUX_CTRL);
+ write32(PL310_PREFETCH_CTRL_INIT, pl310 + PL310_PREFETCH_CTRL);
+ write32(PL310_POWER_CTRL_INIT, pl310 + PL310_POWER_CTRL);
+
+ /* invalidate all pl310 cache ways */
+ arm_cl2_invbyway(pl310);
+}
+
+void plat_cpu_reset_late(void)
+{
+ int i;
+
+ assert(!cpu_mmu_enabled());
+
+ /* Allow NSec to Imprecise abort */
+ write_scr(SCR_AW);
+
+ if (get_core_pos())
+ return;
+
+ write32(SCU_SAC_INIT, SCU_BASE + SCU_SAC);
+ write32(SCU_NSAC_INIT, SCU_BASE + SCU_NSAC);
+ write32(CPU_PORT_FILT_END, SCU_BASE + SCU_FILT_EA);
+ write32(CPU_PORT_FILT_START, SCU_BASE + SCU_FILT_SA);
+ write32(SCU_CTRL_INIT, SCU_BASE + SCU_CTRL);
+
+ write32(CPU_PORT_FILT_END, pl310_base() + PL310_ADDR_FILT_END);
+ write32(CPU_PORT_FILT_START | PL310_CTRL_ENABLE_BIT,
+ pl310_base() + PL310_ADDR_FILT_START);
+
+ /* TODO: gic_init scan fails, pre-init all SPIs are nonsecure */
+ for (i = 0; i < (31 * 4); i += 4)
+ write32(0xFFFFFFFF, GIC_DIST_BASE + GIC_DIST_ISR1 + i);
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_CPU_BASE, MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_DIST_BASE, MEM_AREA_IO_SEC);
+
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ gic_init(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+}
+
+void main_secondary_init_gic(void)
+{
+ gic_cpu_init(&gic_data);
+}
+
+static void main_fiq(void)
+{
+ gic_it_handle(&gic_data);
+}
diff --git a/core/arch/arm/plat-stm/platform_config.h b/core/arch/arm/plat-stm/platform_config.h
new file mode 100644
index 0000000..407a412
--- /dev/null
+++ b/core/arch/arm/plat-stm/platform_config.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Below are platform/SoC settings specific to stm platform flavors */
+
+#if defined(PLATFORM_FLAVOR_b2260)
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#ifndef CFG_DDR_START
+#define CFG_DDR_START 0x40000000
+#define CFG_DDR_SIZE 0x40000000
+#endif
+#ifndef CFG_DDR_TEETZ_RESERVED_START
+#define CFG_DDR_TEETZ_RESERVED_START 0x7E000000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x01E00000
+#endif
+#ifndef CFG_CORE_TZSRAM_EMUL_START
+#define CFG_CORE_TZSRAM_EMUL_START 0x7FE00000
+#endif
+
+#define CPU_IOMEM_BASE 0x08760000
+#define CPU_PORT_FILT_START 0x40000000
+#define CPU_PORT_FILT_END 0xC0000000
+#define STXHxxx_LPM_PERIPH_BASE 0x09700000
+#define RNG_BASE 0x08A89000
+
+#define ASC_NUM 21
+#define UART_CONSOLE_BASE ST_ASC21_REGS_BASE
+
+#elif defined(PLATFORM_FLAVOR_cannes)
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#ifndef CFG_DDR_START
+#define CFG_DDR_START 0x40000000
+#define CFG_DDR_SIZE 0x80000000
+#endif
+#ifndef CFG_DDR_TEETZ_RESERVED_START
+#define CFG_DDR_TEETZ_RESERVED_START 0x93a00000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x01000000
+#endif
+#ifndef CFG_CORE_TZSRAM_EMUL_START
+#define CFG_CORE_TZSRAM_EMUL_START 0x94a00000
+#endif
+
+#define CPU_IOMEM_BASE 0x08760000
+#define CPU_PORT_FILT_START 0x40000000
+#define CPU_PORT_FILT_END 0xC0000000
+#define STXHxxx_LPM_PERIPH_BASE 0x09400000
+#define RNG_BASE 0x08A89000
+
+#define ASC_NUM 20
+#define UART_CONSOLE_BASE ST_ASC20_REGS_BASE
+
+#elif defined(PLATFORM_FLAVOR_orly2)
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#ifndef CFG_DDR_START
+#define CFG_DDR_START 0x40000000
+#define CFG_DDR_SIZE 0x40000000
+#define CFG_DDR1_START 0x80000000
+#define CFG_DDR1_SIZE 0x40000000
+#endif
+#ifndef CFG_DDR_TEETZ_RESERVED_START
+#define CFG_DDR_TEETZ_RESERVED_START 0x8F000000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x00800000
+#endif
+
+#define CPU_IOMEM_BASE 0xFFFE0000
+#define CPU_PORT_FILT_START 0x40000000
+#define CPU_PORT_FILT_END 0x80000000
+#define STXHxxx_LPM_PERIPH_BASE 0xFE400000
+#define RNG_BASE 0xFEE80000
+
+#define ASC_NUM 21
+#define UART_CONSOLE_BASE ST_ASC21_REGS_BASE
+
+#else /* defined(PLATFORM_FLAVOR_xxx) */
+
+#error "Unknown platform flavor"
+
+#endif /* defined(PLATFORM_FLAVOR_xxx) */
+
+/* Below are settings common to stm platform flavors */
+
+/*
+ * CP15 Secure ConTroL Register (SCTLR
+ *
+ * - Round-Robin replac. for icache, btac, i/duTLB (bit14: RoundRobin)
+ */
+#define CPU_SCTLR_INIT 0x00004000
+
+/*
+ * CP15 Auxiliary ConTroL Register (ACTRL)
+ *
+ * - core always in full SMP (FW bit0=1, SMP bit6=1)
+ * - L2 write full line of zero disabled (bit3=0)
+ * (keep WFLZ low. Will be set once outer L2 is ready)
+ */
+
+#define CPU_ACTLR_INIT 0x00000041
+
+/*
+ * CP15 NonSecure Access Control Register (NSACR)
+ *
+ * - NSec cannot change ACTRL.SMP (NS_SMP bit18=0)
+ * - Nsec can lockdown TLB (TL bit17=1)
+ * - NSec cannot access PLE (PLE bit16=0)
+ * - NSec can use SIMD/VFP (CP10/CP11) (bit15:14=2b00, bit11:10=2b11)
+ */
+#define CPU_NSACR_INIT 0x00020C00
+
+/*
+ * CP15 Power Control Register (PCR)
+ *
+ * - no change latency, enable clk gating
+ */
+#define CPU_PCR_INIT 0x00000001
+
+
+/*
+ * SCU Secure Access Control / NonSecure Access Control
+ *
+ * SAC: Both secure CPU access SCU (bit[3:0]).
+ * NSAC: Both nonsec cpu access SCU (bit[3:0]), private timers (bit[7:4])
+ * and global timers (bit[11:8]).
+ */
+#if !defined(SCU_SAC_INIT) || !defined(SCU_NSAC_INIT)
+#define SCU_CPUS_MASK (SHIFT_U32(1, CFG_TEE_CORE_NB_CORE) - 1)
+
+#define SCU_SAC_INIT SCU_CPUS_MASK
+#define SCU_NSAC_INIT (SHIFT_U32(SCU_CPUS_MASK, SCU_NSAC_SCU_SHIFT) | \
+ SHIFT_U32(SCU_CPUS_MASK, SCU_NSAC_PTIMER_SHIFT) | \
+ SHIFT_U32(SCU_CPUS_MASK, SCU_NSAC_GTIMER_SHIFT))
+#endif
+
+/*
+ * PL310 TAG RAM Control Register
+ *
+ * bit[10:8]:1 - 2 cycle of write accesses latency
+ * bit[6:4]:1 - 2 cycle of read accesses latency
+ * bit[2:0]:1 - 2 cycle of setup latency
+ */
+#ifndef PL310_TAG_RAM_CTRL_INIT
+#define PL310_TAG_RAM_CTRL_INIT 0x00000111
+#endif
+
+/*
+ * PL310 DATA RAM Control Register
+ *
+ * bit[10:8]:2 - 3 cycle of write accesses latency
+ * bit[6:4]:2 - 3 cycle of read accesses latency
+ * bit[2:0]:2 - 3 cycle of setup latency
+ */
+#ifndef PL310_DATA_RAM_CTRL_INIT
+#define PL310_DATA_RAM_CTRL_INIT 0x00000222
+#endif
+
+/*
+ * PL310 Auxiliary Control Register
+ *
+ * I/Dcache prefetch enabled (bit29:28=2b11)
+ * NS can access interrupts (bit27=1)
+ * NS can lockown cache lines (bit26=1)
+ * Pseudo-random replacement policy (bit25=0)
+ * Force write allocated (default)
+ * Shared attribute internally ignored (bit22=1, bit13=0)
+ * Parity disabled (bit21=0)
+ * Event monitor disabled (bit20=0)
+ * Platform fmavor specific way config:
+ * - way size (bit19:17)
+ * - way associciativity (bit16)
+ * Store buffer device limitation enabled (bit11=1)
+ * Cacheable accesses have high prio (bit10=0)
+ * Full Line Zero (FLZ) disabled (bit0=0)
+ */
+#ifndef PL310_AUX_CTRL_INIT
+#define PL310_AUX_CTRL_INIT 0x3C480800
+#endif
+
+/*
+ * PL310 Prefetch Control Register
+ *
+ * Double linefill disabled (bit30=0)
+ * I/D prefetch enabled (bit29:28=2b11)
+ * Prefetch drop enabled (bit24=1)
+ * Incr double linefill disable (bit23=0)
+ * Prefetch offset = 7 (bit4:0)
+ */
+#define PL310_PREFETCH_CTRL_INIT 0x31000007
+
+/*
+ * PL310 Power Register
+ *
+ * Dynamic clock gating enabled
+ * Standby mode enabled
+ */
+#define PL310_POWER_CTRL_INIT 0x00000003
+
+/*
+ * SCU Control Register : CTRL = 0x00000065
+ * - ic stanby enable=1
+ * - scu standby enable=1
+ * - scu enable=1
+ */
+#define SCU_CTRL_INIT 0x00000065
+
+/*
+ * TEE RAM layout without CFG_WITH_PAGER:
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TEE_RAM | ^
+ * | external memory +------------------+ |
+ * | | TA_RAM | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : default 1MByte
+ * PUB_RAM : default 2MByte
+ * TA_RAM : all what is left
+ *
+ * ----------------------------------------------------------------------------
+ * TEE RAM layout with CFG_WITH_PAGER=y:
+ *
+ * +---------------------------------------+ <- CFG_CORE_TZSRAM_EMUL_START
+ * | TEE private highly | TEE_RAM | ^
+ * | secure memory | | | CFG_CORE_TZSRAM_EMUL_SIZE
+ * +---------------------------------------+ v
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TA_RAM | ^
+ * | external memory | | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : default 256kByte
+ * TA_RAM : all what is left in DDR TEE reserved area
+ * PUB_RAM : default 2MByte
+ */
+
+/* default locate shared memory at the end of the TEE reserved DDR */
+#ifndef CFG_SHMEM_SIZE
+#define CFG_SHMEM_SIZE (2 * 1024 * 1024)
+#endif
+
+#ifndef CFG_SHMEM_START
+#define CFG_SHMEM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_SHMEM_SIZE)
+#endif
+
+#if defined(CFG_WITH_PAGER)
+
+#define TZSRAM_BASE CFG_CORE_TZSRAM_EMUL_START
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE CFG_DDR_TEETZ_RESERVED_START
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - CFG_SHMEM_SIZE)
+
+#define CFG_TEE_RAM_START TZSRAM_BASE
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+
+#define CFG_TA_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_SIZE TZDRAM_SIZE
+
+#else /* CFG_WITH_PAGER */
+
+#define TZDRAM_BASE CFG_DDR_TEETZ_RESERVED_START
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - CFG_SHMEM_SIZE)
+
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#ifndef CFG_TEE_RAM_PH_SIZE
+#define CFG_TEE_RAM_PH_SIZE (1 * 1024 * 1024)
+#endif
+
+#define CFG_TA_RAM_START (TZDRAM_BASE + CFG_TEE_RAM_PH_SIZE)
+#define CFG_TA_RAM_SIZE (TZDRAM_SIZE - CFG_TEE_RAM_PH_SIZE)
+
+#endif /* !CFG_WITH_PAGER */
+
+/* External DDR dies */
+#define DRAM0_BASE CFG_DDR_START
+#define DRAM0_SIZE CFG_DDR_SIZE
+#ifdef CFG_DDR1_START
+#define DRAM1_BASE CFG_DDR1_START
+#define DRAM1_SIZE CFG_DDR1_SIZE
+#endif
+
+#ifndef CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+#endif
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+#define PL310_BASE (CPU_IOMEM_BASE + 0x2000)
+#define GIC_DIST_BASE (CPU_IOMEM_BASE + 0x1000)
+#define SCU_BASE (CPU_IOMEM_BASE + 0x0000)
+#define GIC_CPU_BASE (CPU_IOMEM_BASE + 0x0100)
+#define ST_ASC20_REGS_BASE (STXHxxx_LPM_PERIPH_BASE + 0x00130000)
+#define ST_ASC21_REGS_BASE (STXHxxx_LPM_PERIPH_BASE + 0x00131000)
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 32
+
+#endif /* PLATFORM_CONFIG_H */
diff --git a/core/arch/arm/plat-stm/rng_support.c b/core/arch/arm/plat-stm/rng_support.c
new file mode 100644
index 0000000..25b3893
--- /dev/null
+++ b/core/arch/arm/plat-stm/rng_support.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2014-2016, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <io.h>
+#include <kernel/panic.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <trace.h>
+
+#include "rng_support.h"
+
+/* Address of the register to read in the RNG IP */
+#define RNG_VAL_OFFSET 0x24
+#define RNG_STATUS_OFFSET 0x20
+
+#define RNG_STATUS_ERR0 BIT32(0)
+#define RNG_STATUS_ERR1 BIT32(1)
+#define RNG_STATUS_FULL BIT32(5)
+
+static vaddr_t rng_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(RNG_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return RNG_BASE;
+}
+
+static inline int hwrng_waithost_fifo_full(void)
+{
+ uint32_t status;
+
+ do {
+ status = read32(rng_base() + RNG_STATUS_OFFSET);
+ } while (!(status & RNG_STATUS_FULL));
+
+ if (status & (RNG_STATUS_ERR0 | RNG_STATUS_ERR1))
+ return 1;
+
+ return 0;
+}
+
+uint8_t hw_get_random_byte(void)
+{
+ /*
+ * Only the HW RNG IP is used to generate the value through the
+ * HOST interface.
+ *
+ * @see the document rng_fspec_revG_120720.pdf for details
+ *
+ * - HOST FIFO size = 8x8b (64b)
+ * - LSB (16b) of the RNG_VAL register allows to read 16b
+ * - bit5 of the RNG_STATUS register allows to known if the HOST
+ * FIFO is full or not.
+ * - bit1,0 of the RNG_STATUS register allows to known if the
+ * data are valid.
+ *
+ * Main principle:
+ * For performance reason, a local SW fifo is used to store the
+ * content of the HOST FIFO (max size = 8bytes). When a random
+ * value is expected, this SW fifo is used to return a stored value.
+ * When the local SW fifo is empty, it is filled with the HOST FIFO
+ * according the following sequence:
+ *
+ * - wait HOST FIFO full
+ * o Indicates that max 8-bytes (64b) are available
+ * o This is mandatory to guarantee that a valid data is
+ * available. No STATUS bit to indicate that the HOST FIFO
+ * is empty is provided.
+ * - check STATUS bits
+ * - update the local SW fifo with the HOST FIFO
+ *
+ * This avoid to wait at each iteration that a valid random value is
+ * available. _LOCAL_FIFO_SIZE indicates the size of the local SW fifo.
+ *
+ */
+
+
+#define _LOCAL_FIFO_SIZE 8 /* min 2, 4, 6, max 8 */
+
+ static uint8_t lfifo[_LOCAL_FIFO_SIZE]; /* local fifo */
+ static int pos = -1;
+
+ static int nbcall; /* debug purpose - 0 is the initial value*/
+
+ volatile uint32_t tmpval[_LOCAL_FIFO_SIZE/2];
+ uint8_t value;
+ int i;
+
+ nbcall++;
+
+ /* Retrieve data from local fifo */
+ if (pos >= 0) {
+ pos++;
+ value = lfifo[pos];
+ if (pos == (_LOCAL_FIFO_SIZE - 1))
+ pos = -1;
+ return value;
+ }
+
+ if (hwrng_waithost_fifo_full())
+ return 0;
+
+ /* Read the FIFO according the number of expected element */
+ for (i = 0; i < _LOCAL_FIFO_SIZE / 2; i++)
+ tmpval[i] = read32(rng_base() + RNG_VAL_OFFSET) & 0xFFFF;
+
+ /* Update the local SW fifo for next request */
+ pos = 0;
+ for (i = 0; i < _LOCAL_FIFO_SIZE / 2; i++) {
+ lfifo[pos] = tmpval[i] & 0xFF;
+ pos++;
+ lfifo[pos] = (tmpval[i] >> 8) & 0xFF;
+ pos++;
+ };
+
+ pos = 0;
+ return lfifo[pos];
+}
diff --git a/core/arch/arm/plat-stm/sub.mk b/core/arch/arm/plat-stm/sub.mk
new file mode 100644
index 0000000..d16bb72
--- /dev/null
+++ b/core/arch/arm/plat-stm/sub.mk
@@ -0,0 +1,6 @@
+global-incdirs-y += .
+
+srcs-y += rng_support.c
+srcs-y += asc.S
+srcs-y += tz_a9init.S
+srcs-y += main.c
diff --git a/core/arch/arm/plat-stm/tz_a9init.S b/core/arch/arm/plat-stm/tz_a9init.S
new file mode 100644
index 0000000..aee7dbe
--- /dev/null
+++ b/core/arch/arm/plat-stm/tz_a9init.S
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014-2016, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm32.h>
+#include <arm32_macros.S>
+#include <arm32_macros_cortex_a9.S>
+#include <asm.S>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * void arm_cl2_enable(vaddr_t pl310_base) - Memory Cache Level2 Enable Function
+ *
+ * If PL310 supports FZLW, enable also FZL in A9 core
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ * TODO: to be moved to PL310 code (tz_svce_pl310.S ?)
+ */
+FUNC arm_cl2_enable , :
+UNWIND( .fnstart)
+
+ /* Enable PL310 ctrl -> only set lsb bit */
+ mov r1, #0x1
+ str r1, [r0, #PL310_CTRL]
+
+ /* if L2 FLZW enable, enable in L1 */
+ ldr r1, [r0, #PL310_AUX_CTRL]
+ tst r1, #(1 << 0) /* test AUX_CTRL[FLZ] */
+ read_actlr r0
+ orrne r0, r0, #(1 << 3) /* enable ACTLR[FLZW] */
+ write_actlr r0
+
+ mov pc, lr
+
+UNWIND( .fnend)
+END_FUNC arm_cl2_enable
+
+/*
+ * Cortex A9 configuration early configuration
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
+
+ movw r0, #(CPU_SCTLR_INIT & 0xFFFF)
+ movt r0, #((CPU_SCTLR_INIT >> 16) & 0xFFFF)
+ write_sctlr r0
+
+ movw r0, #(CPU_ACTLR_INIT & 0xFFFF)
+ movt r0, #((CPU_ACTLR_INIT >> 16) & 0xFFFF)
+ write_actlr r0
+
+ movw r0, #(CPU_NSACR_INIT & 0xFFFF)
+ movt r0, #((CPU_NSACR_INIT >> 16) & 0xFFFF)
+ write_nsacr r0
+
+ movw r0, #(CPU_PCR_INIT & 0xFFFF)
+ movt r0, #((CPU_PCR_INIT >> 16) & 0xFFFF)
+ write_pcr r0
+
+ mov pc, lr
+
+UNWIND( .fnend)
+END_FUNC plat_cpu_reset_early
+
diff --git a/core/arch/arm/plat-sunxi/conf.mk b/core/arch/arm/plat-sunxi/conf.mk
new file mode 100644
index 0000000..b2a9dc1
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/conf.mk
@@ -0,0 +1,17 @@
+arm32-platform-cpuarch := cortex-a15
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_SUNXI_UART,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_GIC,y)
+
+ta-targets = ta_arm32
+
+CFG_CRYPTO_SIZE_OPTIMIZATION ?= n
+CFG_NUM_THREADS ?= 4
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
diff --git a/core/arch/arm/plat-sunxi/console.c b/core/arch/arm/plat-sunxi/console.c
new file mode 100644
index 0000000..b985316
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/console.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+#include <drivers/sunxi_uart.h>
+#include <mm/core_memprot.h>
+#include <console.h>
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+
+void console_init(void)
+{
+ sunxi_uart_init(console_base());
+}
+
+void console_putc(int ch)
+{
+ sunxi_uart_putc(ch, console_base());
+}
+
+void console_flush(void)
+{
+ sunxi_uart_flush(console_base());
+}
diff --git a/core/arch/arm/plat-sunxi/entry.S b/core/arch/arm/plat-sunxi/entry.S
new file mode 100644
index 0000000..bff2b8b
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/entry.S
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+
+.section .text.boot
+.align 5
+FUNC _start , :
+ b reset
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+END_FUNC _start
+
+LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ write_sctlr r0
+
+ ldr r0, =_start
+ write_vbar r0
+
+ mov r4, r1
+ bl get_core_pos
+ add r0, r0, #1
+ ldr r2, =stack_tmp_stride
+ ldr r1, [r2]
+ mul r2, r0, r1
+ ldr r1, =stack_tmp
+ ldr sp, [r1, r2]
+
+ /* NSACR configuration */
+ read_nsacr r1
+ orr r1, r1, #NSACR_CP10
+ orr r1, r1, #NSACR_CP11
+ orr r1, r1, #NSACR_NS_SMP
+ write_nsacr r1
+
+ /* Enable SMP bit */
+ read_actlr r0
+ orr r0, r0, #ACTLR_SMP
+ write_actlr r0
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ /* init BSS section */
+ ldr r0, =__bss_start
+ ldr r2, =__bss_end
+ sub r2, r2, r0
+ ldr r1, =0
+ bl memset
+
+ /* r4: the return address of normal world */
+ mov r0, r4
+ bl main_init
+
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should never return */
+UNWIND( .fnend)
+END_FUNC reset
+
diff --git a/core/arch/arm/plat-sunxi/head.c b/core/arch/arm/plat-sunxi/head.c
new file mode 100644
index 0000000..838dbf4
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/head.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Header for optee, use for secure bootloader.
+ **/
+
+#include <platform_config.h>
+
+/******************************************************************************/
+/* the control information stored in file head */
+/******************************************************************************/
+struct spare_boot_ctrl_head {
+ unsigned int jump_instruction; /* one intruction jumping to real code */
+ unsigned char magic[8]; /* ="optee" */
+ unsigned int check_sum; /* generated by PC */
+ unsigned int align_size; /* align size in byte */
+ unsigned int length; /* the size of all file */
+ unsigned int optee_length; /* the size of optee */
+ unsigned char version[8]; /* optee version */
+ unsigned char platform[8]; /* platform information */
+ int reserved[1]; /* stamp space, 16bytes align */
+};
+
+const struct spare_boot_ctrl_head tee_spare_head
+ __attribute__ ((section(".text.head"))) = {
+ (0xEA000000 | (((sizeof(struct spare_boot_ctrl_head) + sizeof(int) - 1) / sizeof(int) - 2) & 0x00FFFFFF)),
+ "optee",
+ 0,
+ 0,
+ 0,
+ 0,
+ "2.0",
+ "optee",
+ {TZDRAM_BASE}
+};
diff --git a/core/arch/arm/plat-sunxi/kern.ld.S b/core/arch/arm/plat-sunxi/kern.ld.S
new file mode 100644
index 0000000..600ed09
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/kern.ld.S
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2008-2010 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <platform_config.h>
+
+OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
+OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
+
+ENTRY(_start)
+SECTIONS
+{
+ . = TEE_RAM_START;
+
+ /* text/read-only data */
+ .text : {
+ __text_start = .;
+ KEEP(*(.text.head))
+ KEEP(*(.text.boot.vectab1))
+ KEEP(*(.text.boot.vectab2))
+ KEEP(*(.text.boot))
+ *(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
+ __text_end = .;
+
+ . = ALIGN(4);
+ __initcall_start = .;
+ *(.initcall1)
+ *(.initcall2)
+ *(.initcall3)
+ *(.initcall4)
+ __initcall_end = .;
+ }
+
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x9090
+ .plt : { *(.plt) }
+
+ /* .ARM.exidx is sorted, so has to go in its own output section. */
+ __exidx_start = .;
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ __exidx_end = .;
+
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
+ .rodata : ALIGN(4) {
+ __rodata_start = .;
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+
+ . = ALIGN(4);
+ __rodata_end = .;
+ }
+
+
+ .data : ALIGN(4) {
+ /* writable data */
+ __data_start_rom = .;
+ /* in one segment binaries, the rom data address is on top of the ram data address */
+ __data_start = .;
+ *(.data .data.* .gnu.linkonce.d.*)
+ }
+
+ .ctors : ALIGN(4) {
+ __ctor_list = .;
+ *(.ctors)
+ __ctor_end = .;
+ }
+ .dtors : ALIGN(4) {
+ __dtor_list = .;
+ *(.dtors)
+ __dtor_end = .;
+ }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+
+ __data_end = .;
+
+ /* unintialized data (in same segment as writable data) */
+ .bss : ALIGN(4) {
+ KEEP(*(.bss.prebss.*))
+ . = ALIGN(4);
+ __bss_start = .;
+ *(.bss .bss.*)
+ *(.gnu.linkonce.b.*)
+ *(COMMON)
+ . = ALIGN(4);
+ __bss_end = .;
+ }
+
+ /*
+ * Uninitialized data that shouldn't be zero initialized at
+ * runtime.
+ *
+ * L1 mmu table requires 16 KiB alignment
+ */
+ .nozi : ALIGN(16 * 1024) {
+ __nozi_pad_end = .;
+ KEEP(*(.nozi .nozi.*))
+ }
+
+ teecore_heap_start = .;
+ . += 0x40000 /*256KiB*/;
+ teecore_heap_end = .;
+
+ _end = .;
+
+ . = TEE_RAM_START + TEE_RAM_SIZE;
+ _end_of_ram = .;
+
+ /* Strip unnecessary stuff */
+ /DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/core/arch/arm/plat-sunxi/link.mk b/core/arch/arm/plat-sunxi/link.mk
new file mode 100644
index 0000000..2e289e6
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/link.mk
@@ -0,0 +1,54 @@
+link-out-dir = $(out-dir)/core
+
+link-script = $(platform-dir)/kern.ld.S
+link-script-pp = $(link-out-dir)/kern.ld
+link-script-dep = $(link-out-dir)/.kern.ld.d
+
+AWK = awk
+
+all: $(link-out-dir)/tee.elf $(link-out-dir)/tee.dmp $(link-out-dir)/tee.bin
+all: $(link-out-dir)/tee.symb_sizes
+cleanfiles += $(link-out-dir)/tee.elf $(link-out-dir)/tee.dmp $(link-out-dir)/tee.map
+cleanfiles += $(link-out-dir)/tee.bin
+cleanfiles += $(link-out-dir)/tee.symb_sizes
+cleanfiles += $(link-script-pp) $(link-script-dep)
+
+link-ldflags = $(LDFLAGS)
+link-ldflags += -T $(link-script-pp) -Map=$(link-out-dir)/tee.map
+link-ldflags += --sort-section=alignment
+
+link-ldadd = $(LDADD)
+link-ldadd += $(addprefix -L,$(libdirs))
+link-ldadd += $(addprefix -l,$(libnames))
+ldargs-tee.elf := $(link-ldflags) $(objs) $(link-ldadd) $(libgcccore)
+
+link-script-cppflags := \
+ $(filter-out $(CPPFLAGS_REMOVE) $(cppflags-remove), \
+ $(nostdinccore) $(CPPFLAGS) \
+ $(addprefix -I,$(incdirscore)) $(cppflagscore))
+
+
+-include $(link-script-dep)
+
+$(link-script-pp): $(link-script) $(conf-file)
+ @$(cmd-echo-silent) ' CPP $@'
+ @mkdir -p $(dir $@)
+ $(q)$(CPPcore) -Wp,-P,-MT,$@,-MD,$(link-script-dep) \
+ $(link-script-cppflags) $< > $@
+
+
+$(link-out-dir)/tee.elf: $(objs) $(libdeps) $(link-script-pp)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-tee.elf) -o $@
+
+$(link-out-dir)/tee.dmp: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJDUMP $@'
+ $(q)$(OBJDUMPcore) -l -x -d $< > $@
+
+$(link-out-dir)/tee.bin: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary $< $@
+
+$(link-out-dir)/tee.symb_sizes: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) --print-size --reverse-sort --size-sort $< > $@
diff --git a/core/arch/arm/plat-sunxi/main.c b/core/arch/arm/plat-sunxi/main.c
new file mode 100644
index 0000000..3954d9d
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/main.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+
+#include <sm/sm.h>
+#include <sm/tee_mon.h>
+#include <sm/optee_smc.h>
+#include <optee_msg.h>
+
+#include <arm.h>
+#include <kernel/thread.h>
+#include <kernel/time_source.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/misc.h>
+#include <mm/tee_mmu.h>
+#include <mm/core_mmu.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+#include <platform.h>
+#include <util.h>
+#include <trace.h>
+#include <malloc.h>
+
+/* teecore heap address/size is defined in scatter file */
+extern unsigned char teecore_heap_start;
+extern unsigned char teecore_heap_end;
+
+static void main_fiq(void);
+static void main_tee_entry_std(struct thread_smc_args *args);
+static void main_tee_entry_fast(struct thread_smc_args *args);
+
+static const struct thread_handlers handlers = {
+ .std_smc = main_tee_entry_std,
+ .fast_smc = main_tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+void main_init(uint32_t nsec_entry); /* called from assembly only */
+void main_init(uint32_t nsec_entry)
+{
+ struct sm_nsec_ctx *nsec_ctx;
+ size_t pos = get_core_pos();
+
+ /*
+ * Mask IRQ and FIQ before switch to the thread vector as the
+ * thread handler requires IRQ and FIQ to be masked while executing
+ * with the temporary stack. The thread subsystem also asserts that
+ * IRQ is blocked when using most if its functions.
+ */
+ thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ);
+
+ if (pos == 0) {
+ thread_init_primary(&handlers);
+
+ /* initialize platform */
+ platform_init();
+ }
+
+ thread_init_per_cpu();
+
+ /* Initialize secure monitor */
+ nsec_ctx = sm_get_nsec_ctx();
+ nsec_ctx->mon_lr = nsec_entry;
+ nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
+
+ if (pos == 0) {
+ unsigned long a, s;
+ /* core malloc pool init */
+#ifdef CFG_TEE_MALLOC_START
+ a = CFG_TEE_MALLOC_START;
+ s = CFG_TEE_MALLOC_SIZE;
+#else
+ a = (unsigned long)&teecore_heap_start;
+ s = (unsigned long)&teecore_heap_end;
+ a = ((a + 1) & ~0x0FFFF) + 0x10000; /* 64kB aligned */
+ s = s & ~0x0FFFF; /* 64kB aligned */
+ s = s - a;
+#endif
+ malloc_add_pool((void *)a, s);
+
+ teecore_init_ta_ram();
+
+ if (init_teecore() != TEE_SUCCESS) {
+ panic();
+ }
+ }
+
+ IMSG("optee initialize finished\n");
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+static void main_tee_entry_fast(struct thread_smc_args *args)
+{
+ /* TODO move to main_init() */
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+
+ /* SiP Service Call Count */
+ if (args->a0 == OPTEE_SMC_SIP_SUNXI_CALLS_COUNT) {
+ args->a0 = 1;
+ return;
+ }
+
+ /* SiP Service Call UID */
+ if (args->a0 == OPTEE_SMC_SIP_SUNXI_CALLS_UID) {
+ args->a0 = OPTEE_SMC_SIP_SUNXI_UID_R0;
+ args->a1 = OPTEE_SMC_SIP_SUNXI_UID_R1;
+ args->a2 = OPTEE_SMC_SIP_SUNXI_UID_R2;
+ args->a3 = OPTEE_SMC_SIP_SUNXI_UID_R3;
+ return;
+ }
+
+ /* SiP Service Calls */
+ if (args->a0 == OPTEE_SMC_OPTEE_FAST_CALL_SIP_SUNXI) {
+ platform_smc_handle(args);
+ return;
+ }
+
+ tee_entry_fast(args);
+}
+
+
+
+static void main_tee_entry_std(struct thread_smc_args *args)
+{
+ /* TODO move to main_init() */
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+
+ tee_entry_std(args);
+}
+
+/* main_tee_entry_fast() supports 3 platform-specific functions */
+void tee_entry_get_api_call_count(struct thread_smc_args *args)
+{
+ args->a0 = tee_entry_generic_get_api_call_count() + 3;
+}
diff --git a/core/arch/arm/plat-sunxi/platform.c b/core/arch/arm/plat-sunxi/platform.c
new file mode 100644
index 0000000..e46541a
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/platform.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+
+#include <sm/sm.h>
+#include <sm/tee_mon.h>
+#include <sm/optee_smc.h>
+#include <optee_msg.h>
+
+#include <arm.h>
+#include <kernel/thread.h>
+#include <kernel/time_source.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <mm/tee_pager.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+
+#include <drivers/gic.h>
+#include <drivers/sunxi_uart.h>
+
+#include <trace.h>
+#include <io.h>
+#include <assert.h>
+#include <util.h>
+#include <platform.h>
+#include <console.h>
+
+void sunxi_secondary_entry(void);
+
+uint32_t sunxi_secondary_ns_entry;
+
+struct gic_data gic_data;
+
+static int platform_smp_init(void)
+{
+ vaddr_t base = (vaddr_t)phys_to_virt(PRCM_BASE, MEM_AREA_IO_SEC);
+
+ assert(base);
+ write32((uint32_t)sunxi_secondary_entry,
+ base + PRCM_CPU_SOFT_ENTRY_REG);
+
+ return 0;
+}
+
+void platform_init(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+ vaddr_t cci400_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+ cci400_base = (vaddr_t)phys_to_virt(CCI400_BASE, MEM_AREA_IO_SEC);
+ if (!gicc_base || !gicd_base || !cci400_base)
+ panic();
+
+ /*
+ * GIC configuration is initialized in Secure bootloader,
+ * Initialize GIC base address here for debugging.
+ */
+ gic_init_base_addr(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+
+ /* platform smp initialize */
+ platform_smp_init();
+
+ /* enable non-secure access cci-400 registers */
+ write32(0x1, cci400_base + CCI400_SECURE_ACCESS_REG);
+
+ /* Initialize uart */
+ console_init();
+
+ return ;
+}
+
+/**
+ * handle platform special smc commands.
+ */
+uint32_t platform_smc_handle(struct thread_smc_args *smc_args)
+{
+ uint32_t ret = TEE_SUCCESS;
+ switch (smc_args->a1) {
+ case OPTEE_SMC_SIP_SUNXI_SET_SMP_BOOTENTRY:
+ sunxi_secondary_ns_entry = smc_args->a2;
+
+ /* in order to sync with secondary up cpu */
+ cache_maintenance_l1(DCACHE_AREA_CLEAN,
+ (void *)(&sunxi_secondary_ns_entry),
+ sizeof(uint32_t));
+ break;
+ default:
+ ret = OPTEE_SMC_RETURN_EBADCMD;
+ break;
+ }
+ smc_args->a0 = ret;
+ return ret;
+}
+
diff --git a/core/arch/arm/plat-sunxi/platform.h b/core/arch/arm/plat-sunxi/platform.h
new file mode 100644
index 0000000..c6db14b
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/platform.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_H
+#define PLATFORM_H
+
+/*
+ * Function specified by SMC Calling convention.
+ *
+ * SiP Service Calls
+ *
+ * Call register usage:
+ * r0 SMC Function ID, OPTEE_SMC_FUNCID_SIP_SUNXI
+ * r1 OPTEE_SMC_SIP_SUNXI_SET_SMP_BOOTENTRY set smp bootup entry
+ */
+#define OPTEE_SMC_SIP_SUNXI_SET_SMP_BOOTENTRY (0xFFFF0000)
+
+#define OPTEE_SMC_FUNCID_SIP_SUNXI 0x8000
+#define OPTEE_SMC_OPTEE_FAST_CALL_SIP_SUNXI \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_SIP, \
+ OPTEE_SMC_FUNCID_SIP_SUNXI)
+
+
+/*
+ * Function specified by SMC Calling convention.
+ *
+ * SiP Service Call Count
+ *
+ * This call returns a 32-bit count of the available
+ * Service Calls. A return value of zero means no
+ * services are available.
+ */
+#define OPTEE_SMC_FUNCID_SIP_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_SIP_SUNXI_CALLS_COUNT \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_SIP, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * SiP Service Call UID
+ *
+ * Return the implementation of SiP Service Calls UID.
+ *
+ */
+#define OPTEE_SMC_SIP_SUNXI_UID_R0 0xa5d5c51b
+#define OPTEE_SMC_SIP_SUNXI_UID_R1 0x8d6c0002
+#define OPTEE_SMC_SIP_SUNXI_UID_R2 0x6f8611e4
+#define OPTEE_SMC_SIP_SUNXI_UID_R3 0x12b7e560
+#define OPTEE_SMC_FUNCID_SIP_SUNXI_CALLS_UID 0xFF01
+#define OPTEE_SMC_SIP_SUNXI_CALLS_UID \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_SIP, \
+ OPTEE_SMC_FUNCID_SIP_SUNXI_CALLS_UID)
+
+void platform_init(void);
+uint32_t platform_smc_handle(struct thread_smc_args *smc_args);
+
+#endif /*PLATFORM_H*/
diff --git a/core/arch/arm/plat-sunxi/platform_config.h b/core/arch/arm/plat-sunxi/platform_config.h
new file mode 100644
index 0000000..8060d9b
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/platform_config.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#define STACK_ALIGNMENT 8
+
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform sunxi"
+#endif
+#ifdef CFG_WITH_LPAE
+#error "LPAE not supported for platform sunxi"
+#endif
+
+#define GIC_BASE 0x01c40000
+#define GICC_OFFSET 0x2000
+#define GICD_OFFSET 0x1000
+#define UART0_BASE 0x07000000
+#define UART1_BASE 0x07000400
+#define UART2_BASE 0x07000800
+#define UART3_BASE 0x07000c00
+#define CCI400_BASE 0x01c90000
+#define SMC_BASE 0x01c0b000
+#define PRCM_BASE 0x08001400
+
+/* CCI-400 register defines */
+#define CCI400_SECURE_ACCESS_REG (0x8)
+
+/* PRCM register defines */
+#define PRCM_CPU_SOFT_ENTRY_REG (0x164)
+
+/* console uart define */
+#define CONSOLE_UART_BASE UART0_BASE
+
+#define DRAM0_BASE 0x20000000
+#define DRAM0_SIZE 0x80000000
+
+/* Location of trusted dram on sunxi */
+#define TZDRAM_BASE 0x9C000000
+#define TZDRAM_SIZE 0x04000000
+
+#define CFG_TEE_CORE_NB_CORE 8
+
+#define DDR_PHYS_START DRAM0_BASE
+#define DDR_SIZE DRAM0_SIZE
+
+#define CFG_DDR_START DDR_PHYS_START
+#define CFG_DDR_SIZE DDR_SIZE
+
+#define CFG_DDR_TEETZ_RESERVED_START TZDRAM_BASE
+#define CFG_DDR_TEETZ_RESERVED_SIZE TZDRAM_SIZE
+
+#define TEE_RAM_START (TZDRAM_BASE)
+#define TEE_RAM_SIZE (1 * 1024 * 1024)
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +-----------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEETZ private RAM | TEE_RAM | ^
+ * | +--------------------+ |
+ * | | TA_RAM | |
+ * +-----------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | | teecore alloc | |
+ * | TEE/TZ and NSec | PUB_RAM --------| |
+ * | shared memory | NSec alloc | |
+ * +-----------------------------------------+ v
+ *
+ * TEE_RAM : 1MByte
+ * PUB_RAM : 1MByte
+ * TA_RAM : all what is left (at least 2MByte !)
+ */
+
+/* define the several memory area sizes */
+#if (CFG_DDR_TEETZ_RESERVED_SIZE < (4 * 1024 * 1024))
+#error "Invalid CFG_DDR_TEETZ_RESERVED_SIZE: at least 4MB expected"
+#endif
+
+#define CFG_TEE_RAM_PH_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_SIZE CFG_TEE_RAM_PH_SIZE
+#define CFG_TA_RAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_TEE_RAM_SIZE - CFG_SHMEM_SIZE)
+
+/* define the secure/unsecure memory areas */
+#define CFG_DDR_ARMTZ_ONLY_START (CFG_DDR_TEETZ_RESERVED_START)
+#define CFG_DDR_ARMTZ_ONLY_SIZE (CFG_TEE_RAM_SIZE + CFG_TA_RAM_SIZE)
+
+#define CFG_DDR_ARM_ARMTZ_START \
+ (CFG_DDR_ARMTZ_ONLY_START + CFG_DDR_ARMTZ_ONLY_SIZE)
+#define CFG_DDR_ARM_ARMTZ_SIZE (CFG_PUB_RAM_SIZE)
+
+/* define the memory areas (TEE_RAM must start at reserved DDR start addr */
+#define CFG_TEE_RAM_START (CFG_DDR_ARMTZ_ONLY_START)
+#define CFG_TA_RAM_START (CFG_TEE_RAM_START + CFG_TEE_RAM_SIZE)
+#define CFG_PUB_RAM_START (CFG_TA_RAM_START + CFG_TA_RAM_SIZE)
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_SHMEM_START (DDR_PHYS_START + 0x1000000)
+#define CFG_SHMEM_SIZE 0x100000
+
+#define CFG_TEE_LOAD_ADDR TEE_RAM_START
+
+/* AHB0 devices */
+#define DEVICE0_PA_BASE ROUNDDOWN(0x01400000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_VA_BASE DEVICE0_PA_BASE
+#define DEVICE0_SIZE ROUNDUP(0x00900000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_TYPE MEM_AREA_IO_SEC
+
+/* AHB1 devices */
+#define DEVICE1_PA_BASE ROUNDDOWN(0x00800000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_VA_BASE DEVICE1_PA_BASE
+#define DEVICE1_SIZE ROUNDUP(0x00300000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_TYPE MEM_AREA_IO_SEC
+
+/* AHB2 devices */
+#define DEVICE2_PA_BASE ROUNDDOWN(0x03000000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_VA_BASE DEVICE2_PA_BASE
+#define DEVICE2_SIZE ROUNDUP(0x01000000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_TYPE MEM_AREA_IO_SEC
+
+/* AHBS devices */
+#define DEVICE3_PA_BASE ROUNDDOWN(0x06000000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE3_VA_BASE DEVICE3_PA_BASE
+#define DEVICE3_SIZE ROUNDUP(0x02200000, CORE_MMU_DEVICE_SIZE)
+#define DEVICE3_TYPE MEM_AREA_IO_SEC
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-sunxi/rng_support.c b/core/arch/arm/plat-sunxi/rng_support.c
new file mode 100644
index 0000000..434b104
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/rng_support.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdlib.h>
+#include <rng_support.h>
+#include <trace.h>
+
+/* Bad software version */
+uint8_t hw_get_random_byte(void)
+{
+ static uint32_t lcg_state;
+ static uint32_t nb_soft = 9876543;
+#define MAX_SOFT_RNG 1024
+ static const uint32_t a = 1664525;
+ static const uint32_t c = 1013904223;
+
+ nb_soft = (nb_soft + 1) % MAX_SOFT_RNG;
+ lcg_state = (a * lcg_state + c);
+ return (uint8_t) (lcg_state >> 24);
+}
diff --git a/core/arch/arm/plat-sunxi/smp_boot.S b/core/arch/arm/plat-sunxi/smp_boot.S
new file mode 100644
index 0000000..9e79842
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/smp_boot.S
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+
+
+FUNC smp_init_vector , :
+ b . /* Reset */
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+END_FUNC smp_init_vector
+
+FUNC sunxi_secondary_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* secondary CPUs internal initialization */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ write_sctlr r0
+
+ /* install smp initialization vector */
+ ldr r0, =smp_init_vector
+ write_vbar r0
+
+ /* Setup tmp stack */
+ bl get_core_pos
+ add r0, r0, #1
+ ldr r2, =stack_tmp_stride
+ ldr r1, [r2]
+ mul r2, r0, r1
+ ldr r1, =stack_tmp
+ ldr sp, [r1, r2]
+
+ /* NSACR configuration */
+ read_nsacr r1
+ orr r1, r1, #NSACR_CP10
+ orr r1, r1, #NSACR_CP11
+ orr r1, r1, #NSACR_NS_SMP
+ write_nsacr r1
+ mcr p15, 0, r1, c1, c1, 2
+
+ /* Enable SMP bit */
+ read_actlr r0
+ orr r0, r0, #ACTLR_SMP
+ write_actlr r0
+
+ /* fixup some platform limits */
+ bl sunxi_secondary_fixup
+
+ /* initialize gic cpu interface */
+ ldr r0, =gic_data
+ bl gic_cpu_init
+
+ /* secure env initialization */
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ /* Initialize thread handling and secure monitor */
+ ldr r0, =sunxi_secondary_ns_entry
+ ldr r0, [r0]
+ bl main_init
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC sunxi_secondary_entry
diff --git a/core/arch/arm/plat-sunxi/smp_fixup.S b/core/arch/arm/plat-sunxi/smp_fixup.S
new file mode 100644
index 0000000..bf533b4
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/smp_fixup.S
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014, Allwinner Technology Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm.S>
+#include <kernel/unwind.h>
+
+#define SLAVE_SNOOPCTL_OFFSET 0
+#define SNOOPCTL_SNOOP_ENABLE (1 << 0)
+#define SNOOPCTL_DVM_ENABLE (1 << 1)
+
+#define CCI_STATUS_OFFSET 0xc
+#define STATUS_CHANGE_PENDING (1 << 0)
+
+#define CCI_SLAVE_OFFSET(n) (0x1000 + 0x1000 * (n))
+
+#define SUNXI_CCI_PHYS_BASE 0x01c90000
+#define SUNXI_CCI_SLAVE_A7 3
+#define SUNXI_CCI_SLAVE_A15 4
+#define SUNXI_CCI_A15_OFFSET CCI_SLAVE_OFFSET(SUNXI_CCI_SLAVE_A15)
+#define SUNXI_CCI_A7_OFFSET CCI_SLAVE_OFFSET(SUNXI_CCI_SLAVE_A7)
+
+#define SUNXI_CCU_PHYS_BASE (0x06000000)
+#define SUNXI_CCU_C0_CFG_OFFSET (0x54)
+#define SUNXI_CCU_C1_CFG_OFFSET (0x58)
+
+FUNC sunxi_secondary_fixup , :
+UNWIND( .fnstart)
+ mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
+ ubfx r0, r0, #8, #4 /* cluster */
+
+ ldr r3, =SUNXI_CCU_PHYS_BASE + SUNXI_CCU_C0_CFG_OFFSET
+ cmp r0, #0 /* A7 cluster? */
+ addne r3, r3, #SUNXI_CCU_C1_CFG_OFFSET - SUNXI_CCU_C0_CFG_OFFSET
+ ldr r1, [r3]
+ bic r1, r1, #(0x3<<8) /* a15 atb div */
+ orr r1, r1, #(0x1<<8) /* div = 2 */
+ bic r1, r1, #(0x7<<0) /* a15 atb div */
+ orr r1, r1, #(0x2<<0) /* div = value + 1 */
+ str r1, [r3] /* set atb div to 2, axi div to 3 */
+ dsb /* Synchronise side-effects of axi config */
+ ldr r1, [r3]
+ bic r1, r1, #(0x3<<8) /* a15 atb div */
+ orr r1, r1, #(0x2<<8) /* div = 4 */
+ bic r1, r1, #(0x7<<0) /* a15 atb div */
+ orr r1, r1, #(0x3<<0) /* div = value + 1 */
+ str r1, [r3] /* set atb div to 4, axi div to 4 */
+ dsb /* Synchronise side-effects of axi config */
+
+ /* Enable CCI snoops. */
+ ldr r3, =SUNXI_CCI_PHYS_BASE + SUNXI_CCI_A7_OFFSET
+ cmp r0, #0 /* A7 cluster? */
+ addne r3, r3, #SUNXI_CCI_A15_OFFSET - SUNXI_CCI_A7_OFFSET
+
+ @ r3 now points to the correct CCI slave register block
+ ldr r1, [r3, #SLAVE_SNOOPCTL_OFFSET]
+ orr r1, r1, #SNOOPCTL_SNOOP_ENABLE
+ orr r1, r1, #SNOOPCTL_DVM_ENABLE
+ str r1, [r3, #SLAVE_SNOOPCTL_OFFSET] /* enable CCI snoops */
+
+ /* Wait for snoop control change to complete */
+ ldr r3, =SUNXI_CCI_PHYS_BASE
+1:
+ ldr r1, [r3, #CCI_STATUS_OFFSET]
+ tst r1, #STATUS_CHANGE_PENDING
+ bne 1b
+ dsb /* Synchronise side-effects of enabling CCI */
+
+ cmp r0, #1 /* A15 cluster ? */
+ bne 2f
+
+ /* a80 platform-specific Cortex-A15 setup */
+ mrc p15, 1, r1, c15, c0, 4 /* ACTLR2 */
+ orr r1, r1, #(0x1<<31) /* Enable CPU regional clock gates */
+ mcr p15, 1, r1, c15, c0, 4
+
+ mrc p15, 1, r1, c15, c0, 0 /* L2ACTLR */
+ orr r1, r1, #(0x1<<26) /* Enables L2, GIC, and Timer regional clock gates */
+ mcr p15, 1, r1, c15, c0, 0
+
+ mrc p15, 1, r1, c15, c0, 0 /* L2ACTLR */
+ orr r1, r1, #(0x1<<3) /* Disables clean/evict from being pushed to external */
+ mcr p15, 1, r1, c15, c0, 0
+
+ mrc p15, 1, r1, c9, c0, 2
+ bic r1, r1, #(0x7<<0) /* L2 data ram latency */
+ orr r1, r1, #(0x3<<0)
+ mcr p15, 1, r1, c9, c0, 2
+
+2:
+ /* a80 platform-specific operations porcess done. */
+ bx lr
+UNWIND( .fnend)
+END_FUNC sunxi_secondary_fixup
diff --git a/core/arch/arm/plat-sunxi/sub.mk b/core/arch/arm/plat-sunxi/sub.mk
new file mode 100644
index 0000000..7c98a65
--- /dev/null
+++ b/core/arch/arm/plat-sunxi/sub.mk
@@ -0,0 +1,9 @@
+global-incdirs-y += .
+srcs-y += entry.S
+srcs-y += main.c
+srcs-y += rng_support.c
+srcs-y += platform.c
+srcs-y += smp_boot.S
+srcs-y += smp_fixup.S
+srcs-y += head.c
+srcs-y += console.c
diff --git a/core/arch/arm/plat-ti/conf.mk b/core/arch/arm/plat-ti/conf.mk
new file mode 100644
index 0000000..64e499f
--- /dev/null
+++ b/core/arch/arm/plat-ti/conf.mk
@@ -0,0 +1,24 @@
+PLATFORM_FLAVOR ?= dra7xx
+
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
+CFG_WITH_SOFTWARE_PRNG ?= n
+
+$(call force,CFG_8250_UART,y)
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_GIC,y)
+ifneq ($(CFG_WITH_SOFTWARE_PRNG),y)
+$(call force,CFG_DRA7_RNG,y)
+endif
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a15
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+ta-targets = ta_arm32
diff --git a/core/arch/arm/plat-ti/console.c b/core/arch/arm/plat-ti/console.c
new file mode 100644
index 0000000..48f0f65
--- /dev/null
+++ b/core/arch/arm/plat-ti/console.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <drivers/serial8250_uart.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+
+register_phys_mem(MEM_AREA_IO_NSEC,
+ CONSOLE_UART_BASE,
+ SERIAL8250_UART_REG_SIZE);
+
+static vaddr_t console_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ serial8250_uart_init(console_base(), CONSOLE_UART_CLK_IN_HZ,
+ CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ serial8250_uart_putc('\r', base);
+ serial8250_uart_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ serial8250_uart_flush_tx_fifo(console_base());
+}
diff --git a/core/arch/arm/plat-ti/kern.ld.S b/core/arch/arm/plat-ti/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-ti/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-ti/link.mk b/core/arch/arm/plat-ti/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-ti/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-ti/main.c b/core/arch/arm/plat-ti/main.c
new file mode 100644
index 0000000..c811862
--- /dev/null
+++ b/core/arch/arm/plat-ti/main.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <drivers/gic.h>
+#include <arm.h>
+#include <kernel/generic_boot.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <trace.h>
+#include <kernel/misc.h>
+#include <kernel/mutex.h>
+#include <kernel/tee_time.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <tee/entry_std.h>
+#include <tee/entry_fast.h>
+#include <console.h>
+#include <sm/sm.h>
+
+static struct gic_data gic_data;
+
+register_phys_mem(MEM_AREA_IO_SEC, GICC_BASE, GICC_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GICD_BASE, GICD_SIZE);
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GICC_BASE, MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GICD_BASE, MEM_AREA_IO_SEC);
+
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ gic_init(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+}
+
+void main_secondary_init_gic(void)
+{
+ gic_cpu_init(&gic_data);
+}
+
+static void main_fiq(void)
+{
+ gic_it_handle(&gic_data);
+}
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+struct plat_nsec_ctx {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t svc_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t abt_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+ uint32_t und_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+ uint32_t irq_spsr;
+ uint32_t fiq_sp;
+ uint32_t fiq_lr;
+ uint32_t fiq_spsr;
+ uint32_t fiq_rx[5];
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+void init_sec_mon(unsigned long nsec_entry)
+{
+ struct plat_nsec_ctx *plat_ctx;
+ struct sm_nsec_ctx *nsec_ctx;
+
+ plat_ctx = phys_to_virt(nsec_entry, MEM_AREA_IO_SEC);
+ if (!plat_ctx)
+ panic();
+
+ /* Invalidate cache to fetch data from external memory */
+ cache_maintenance_l1(DCACHE_AREA_INVALIDATE,
+ plat_ctx, sizeof(*plat_ctx));
+
+ /* Initialize secure monitor */
+ nsec_ctx = sm_get_nsec_ctx();
+
+ nsec_ctx->mode_regs.usr_sp = plat_ctx->usr_sp;
+ nsec_ctx->mode_regs.usr_lr = plat_ctx->usr_lr;
+ nsec_ctx->mode_regs.irq_spsr = plat_ctx->irq_spsr;
+ nsec_ctx->mode_regs.irq_sp = plat_ctx->irq_sp;
+ nsec_ctx->mode_regs.irq_lr = plat_ctx->irq_lr;
+ nsec_ctx->mode_regs.svc_spsr = plat_ctx->svc_spsr;
+ nsec_ctx->mode_regs.svc_sp = plat_ctx->svc_sp;
+ nsec_ctx->mode_regs.svc_lr = plat_ctx->svc_lr;
+ nsec_ctx->mode_regs.abt_spsr = plat_ctx->abt_spsr;
+ nsec_ctx->mode_regs.abt_sp = plat_ctx->abt_sp;
+ nsec_ctx->mode_regs.abt_lr = plat_ctx->abt_lr;
+ nsec_ctx->mode_regs.und_spsr = plat_ctx->und_spsr;
+ nsec_ctx->mode_regs.und_sp = plat_ctx->und_sp;
+ nsec_ctx->mode_regs.und_lr = plat_ctx->und_lr;
+ nsec_ctx->mon_lr = plat_ctx->mon_lr;
+ nsec_ctx->mon_spsr = plat_ctx->mon_spsr;
+}
diff --git a/core/arch/arm/plat-ti/platform_config.h b/core/arch/arm/plat-ti/platform_config.h
new file mode 100644
index 0000000..c1aaee9
--- /dev/null
+++ b/core/arch/arm/plat-ti/platform_config.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#if defined(PLATFORM_FLAVOR_dra7xx)
+
+#define DRAM0_BASE 0xbe000000
+#define DRAM0_SIZE 0x02000000
+
+#ifdef CFG_WITH_PAGER
+#error Pager not supported on this platform
+#endif /*CFG_WITH_PAGER*/
+
+/* Location of protected DDR on the DRA7xx platform */
+#define TZDRAM_BASE 0xbe000000
+#define TZDRAM_SIZE 0x01c00000
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+/* UART1 */
+#define CONSOLE_UART_BASE 0x4806A000
+#define CONSOLE_UART_CLK_IN_HZ 48000000
+#define UART_BAUDRATE 115200
+
+#define GIC_BASE 0x48210000
+#define GICC_OFFSET 0x2000
+#define GICC_SIZE 0x1000
+#define GICD_OFFSET 0x1000
+#define GICD_SIZE 0x1000
+#define GICC_BASE (GIC_BASE + GICC_OFFSET)
+#define GICD_BASE (GIC_BASE + GICD_OFFSET)
+
+#define SECRAM_BASE 0x40200000
+
+/* RNG */
+#define RNG_BASE 0x48090000
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+/* Full GlobalPlatform test suite requires CFG_SHMEM_SIZE to be at least 2MB */
+#define CFG_SHMEM_START (DRAM0_BASE + TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE 0x400000
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR (CFG_TEE_RAM_START + 0x100)
+#endif
+
+/*
+ * Assumes that either TZSRAM isn't large enough or TZSRAM doesn't exist,
+ * everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+#define DEVICE2_PA_BASE ROUNDDOWN(SECRAM_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_VA_BASE DEVICE2_PA_BASE
+#define DEVICE2_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE2_TYPE MEM_AREA_IO_SEC
+
+#ifndef UART_BAUDRATE
+#define UART_BAUDRATE 115200
+#endif
+#ifndef CONSOLE_BAUDRATE
+#define CONSOLE_BAUDRATE UART_BAUDRATE
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-ti/sub.mk b/core/arch/arm/plat-ti/sub.mk
new file mode 100644
index 0000000..3a8214b
--- /dev/null
+++ b/core/arch/arm/plat-ti/sub.mk
@@ -0,0 +1,3 @@
+global-incdirs-y += .
+srcs-y += main.c
+srcs-y += console.c
diff --git a/core/arch/arm/plat-vexpress/conf.mk b/core/arch/arm/plat-vexpress/conf.mk
new file mode 100644
index 0000000..5d7d8c1
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/conf.mk
@@ -0,0 +1,71 @@
+PLATFORM_FLAVOR ?= qemu_virt
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a15
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+ifeq ($(PLATFORM_FLAVOR),fvp)
+platform-flavor-armv8 := 1
+platform-debugger-arm := 1
+endif
+ifeq ($(PLATFORM_FLAVOR),juno)
+platform-flavor-armv8 := 1
+platform-debugger-arm := 1
+endif
+ifeq ($(PLATFORM_FLAVOR),qemu_armv8a)
+platform-flavor-armv8 := 1
+$(call force,CFG_DT,y)
+endif
+
+
+ifeq ($(platform-debugger-arm),1)
+# ARM debugger needs this
+platform-cflags-debug-info = -gdwarf-2
+platform-aflags-debug-info = -gdwarf-2
+endif
+
+ifeq ($(platform-flavor-armv8),1)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+endif
+
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PL011,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+CFG_TEE_FS_KEY_MANAGER_TEST ?= y
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
+
+ifeq ($(PLATFORM_FLAVOR),juno)
+CFG_CRYPTO_WITH_CE ?= y
+endif
+
+ifeq ($(PLATFORM_FLAVOR),qemu_virt)
+ifeq ($(CFG_CORE_SANITIZE_KADDRESS),y)
+# CFG_ASAN_SHADOW_OFFSET is calculated as:
+# (&__asan_shadow_start - (CFG_TEE_RAM_START / 8)
+# This is unfortunately currently not possible to do in make so we have to
+# calculate it offline, there's some asserts in
+# core/arch/arm/kernel/generic_boot.c to check that we got it right
+CFG_ASAN_SHADOW_OFFSET=0xc4e38e0
+endif
+$(call force,CFG_DT,y)
+# SE API is only supported by QEMU Virt platform
+CFG_SE_API ?= y
+CFG_SE_API_SELF_TEST ?= y
+CFG_PCSC_PASSTHRU_READER_DRV ?= y
+endif
diff --git a/core/arch/arm/plat-vexpress/juno_core_pos_a32.S b/core/arch/arm/plat-vexpress/juno_core_pos_a32.S
new file mode 100644
index 0000000..a75a65d
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/juno_core_pos_a32.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+/* For Juno number the two A57s as 4 to 5 and A53s as 0 to 3 */
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ /* Calculate CorePos = ((ClusterId ^ 1) * 4) + CoreId */
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ eor r0, r0, #(1 << MPIDR_CLUSTER_SHIFT)
+ add r0, r1, r0, LSR #6
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
+
diff --git a/core/arch/arm/plat-vexpress/juno_core_pos_a64.S b/core/arch/arm/plat-vexpress/juno_core_pos_a64.S
new file mode 100644
index 0000000..47bb85d
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/juno_core_pos_a64.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+
+/* For Juno number the two A57s as 4 to 5 and A53s as 0 to 3 */
+FUNC get_core_pos , :
+ mrs x0, mpidr_el1
+ /* Calculate CorePos = ((ClusterId ^ 1) * 4) + CoreId */
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ eor x0, x0, #(1 << MPIDR_CLUSTER_SHIFT)
+ add x0, x1, x0, LSR #6
+ ret
+END_FUNC get_core_pos
+
+
diff --git a/core/arch/arm/plat-vexpress/kern.ld.S b/core/arch/arm/plat-vexpress/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-vexpress/link.mk b/core/arch/arm/plat-vexpress/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-vexpress/main.c b/core/arch/arm/plat-vexpress/main.c
new file mode 100644
index 0000000..85ed9ee
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/main.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <stdint.h>
+#include <string.h>
+
+#include <drivers/gic.h>
+#include <drivers/pl011.h>
+#include <drivers/tzc400.h>
+
+#include <arm.h>
+#include <kernel/generic_boot.h>
+#include <kernel/pm_stubs.h>
+#include <trace.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/tee_time.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <console.h>
+#include <keep.h>
+#include <initcall.h>
+
+static void main_fiq(void);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+#else
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+#endif
+};
+
+static struct gic_data gic_data;
+
+register_phys_mem(MEM_AREA_IO_SEC, CONSOLE_UART_BASE, PL011_REG_SIZE);
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+#ifdef GIC_BASE
+
+register_phys_mem(MEM_AREA_IO_SEC, GICD_BASE, GIC_DIST_REG_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GICC_BASE, GIC_DIST_REG_SIZE);
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+ if (!gicc_base || !gicd_base)
+ panic();
+
+#if defined(PLATFORM_FLAVOR_fvp) || defined(PLATFORM_FLAVOR_juno) || \
+ defined(PLATFORM_FLAVOR_qemu_armv8a)
+ /* On ARMv8, GIC configuration is initialized in ARM-TF */
+ gic_init_base_addr(&gic_data, gicc_base, gicd_base);
+#else
+ /* Initialize GIC */
+ gic_init(&gic_data, gicc_base, gicd_base);
+#endif
+ itr_init(&gic_data.chip);
+}
+#endif
+
+static void main_fiq(void)
+{
+ gic_it_handle(&gic_data);
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ pl011_init(console_base(), CONSOLE_UART_CLK_IN_HZ, CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ vaddr_t base = console_base();
+
+ if (ch == '\n')
+ pl011_putc('\r', base);
+ pl011_putc(ch, base);
+}
+
+void console_flush(void)
+{
+ pl011_flush(console_base());
+}
+
+#ifdef IT_CONSOLE_UART
+static enum itr_return console_itr_cb(struct itr_handler *h __unused)
+{
+ paddr_t uart_base = console_base();
+
+ while (pl011_have_rx_data(uart_base)) {
+ int ch __maybe_unused = pl011_getchar(uart_base);
+
+ DMSG("cpu %zu: got 0x%x", get_core_pos(), ch);
+ }
+ return ITRR_HANDLED;
+}
+
+static struct itr_handler console_itr = {
+ .it = IT_CONSOLE_UART,
+ .flags = ITRF_TRIGGER_LEVEL,
+ .handler = console_itr_cb,
+};
+KEEP_PAGER(console_itr);
+
+static TEE_Result init_console_itr(void)
+{
+ itr_add(&console_itr);
+ itr_enable(IT_CONSOLE_UART);
+ return TEE_SUCCESS;
+}
+driver_init(init_console_itr);
+#endif
+
+#ifdef CFG_TZC400
+register_phys_mem(MEM_AREA_IO_SEC, TZC400_BASE, TZC400_REG_SIZE);
+
+static TEE_Result init_tzc400(void)
+{
+ void *va;
+
+ DMSG("Initializing TZC400");
+
+ va = phys_to_virt(TZC400_BASE, MEM_AREA_IO_SEC);
+ if (!va) {
+ EMSG("TZC400 not mapped");
+ panic();
+ }
+
+ tzc_init((vaddr_t)va);
+ tzc_dump_state();
+
+ return TEE_SUCCESS;
+}
+
+service_init(init_tzc400);
+#endif /*CFG_TZC400*/
diff --git a/core/arch/arm/plat-vexpress/platform_config.h b/core/arch/arm/plat-vexpress/platform_config.h
new file mode 100644
index 0000000..bd006ca
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/platform_config.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#include <stdint.h>
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef ARM64
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for ARM64"
+#endif
+#endif /*ARM64*/
+
+#if defined(PLATFORM_FLAVOR_fvp)
+
+#define GIC_BASE 0x2c000000
+#define UART0_BASE 0x1c090000
+#define UART1_BASE 0x1c0a0000
+#define UART2_BASE 0x1c0b0000
+#define UART3_BASE 0x1c0c0000
+#define TZC400_BASE 0x2a4a0000
+
+#define IT_UART1 38
+
+#define CONSOLE_UART_BASE UART1_BASE
+#define IT_CONSOLE_UART IT_UART1
+
+#elif defined(PLATFORM_FLAVOR_juno)
+
+#define GIC_BASE 0x2c010000
+
+/* FPGA UART0 */
+#define UART0_BASE 0x1c090000
+/* FPGA UART1 */
+#define UART1_BASE 0x1c0a0000
+/* SoC UART0 */
+#define UART2_BASE 0x7ff80000
+/* SoC UART1 */
+#define UART3_BASE 0x7ff70000
+
+
+#define UART0_CLK_IN_HZ 24000000
+#define UART1_CLK_IN_HZ 24000000
+#define UART2_CLK_IN_HZ 7273800
+#define UART3_CLK_IN_HZ 7273800
+
+
+#define IT_UART3 116
+
+#define CONSOLE_UART_BASE UART3_BASE
+#define IT_CONSOLE_UART IT_UART3
+#define CONSOLE_UART_CLK_IN_HZ UART3_CLK_IN_HZ
+
+#elif defined(PLATFORM_FLAVOR_qemu_virt)
+
+#define GIC_BASE 0x08000000
+#define UART0_BASE 0x09000000
+#define UART1_BASE 0x09040000
+#define PCSC_BASE 0x09100000
+
+#define IT_UART1 40
+#define IT_PCSC 37
+
+#define CONSOLE_UART_BASE UART1_BASE
+#define IT_CONSOLE_UART IT_UART1
+
+#elif defined(PLATFORM_FLAVOR_qemu_armv8a)
+
+#define UART0_BASE 0x09000000
+#define UART1_BASE 0x09040000
+
+#define CONSOLE_UART_BASE UART1_BASE
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+#if defined(PLATFORM_FLAVOR_fvp)
+/*
+ * FVP specifics.
+ */
+
+#define DRAM0_BASE 0x80000000
+#define DRAM0_SIZE 0x80000000
+
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE (0x06000000)
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE (TZSRAM_BASE + CFG_TEE_RAM_VA_SIZE)
+#define TZDRAM_SIZE (0x02000000 - CFG_TEE_RAM_VA_SIZE)
+
+#else /*CFG_WITH_PAGER*/
+
+/* Location of trusted dram on the base fvp */
+#define TZDRAM_BASE 0x06000000
+#define TZDRAM_SIZE 0x02000000
+
+#endif /*CFG_WITH_PAGER*/
+
+#define CFG_TEE_CORE_NB_CORE 8
+
+#define CFG_SHMEM_START (DRAM0_BASE + 0x3000000)
+#define CFG_SHMEM_SIZE 0x200000
+
+#define GICC_OFFSET 0x0
+#define GICD_OFFSET 0x3000000
+
+#elif defined(PLATFORM_FLAVOR_juno)
+/*
+ * Juno specifics.
+ */
+
+#define DRAM0_BASE 0x80000000
+#define DRAM0_SIZE 0x7F000000
+
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE 0xFF000000
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE (TZSRAM_BASE + CFG_TEE_RAM_VA_SIZE)
+#define TZDRAM_SIZE (0x00E00000 - CFG_TEE_RAM_VA_SIZE)
+
+#else /*CFG_WITH_PAGER*/
+/*
+ * Last part of DRAM is reserved as secure dram, note that the last 2MiB
+ * of DRAM0 is used by SCP dor DDR retraining.
+ */
+#define TZDRAM_BASE 0xFF000000
+/*
+ * Should be
+ * #define TZDRAM_SIZE 0x00FF8000
+ * but is smaller due to SECTION_SIZE alignment, can be fixed once
+ * OP-TEE OS is mapped using small pages instead.
+ */
+#define TZDRAM_SIZE 0x00E00000
+#endif /*CFG_WITH_PAGER*/
+
+#define CFG_TEE_CORE_NB_CORE 6
+
+#define CFG_SHMEM_START (DRAM0_BASE + DRAM0_SIZE - CFG_SHMEM_SIZE)
+#define CFG_SHMEM_SIZE 0x200000
+
+#define GICC_OFFSET 0x1f000
+#define GICD_OFFSET 0
+
+#elif defined(PLATFORM_FLAVOR_qemu_virt)
+/*
+ * QEMU virt specifics.
+ */
+
+#define DRAM0_BASE UINTPTR_C(0x40000000)
+#define DRAM0_SIZE (UINTPTR_C(0x42100000) - CFG_SHMEM_SIZE)
+
+#define DRAM0_TEERES_BASE (DRAM0_BASE + DRAM0_SIZE)
+#define DRAM0_TEERES_SIZE CFG_SHMEM_SIZE
+
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE 0x0e000000
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+#define TZDRAM_BASE (TZSRAM_BASE + TZSRAM_SIZE)
+#define TZDRAM_SIZE (0x01000000 - TZSRAM_SIZE)
+
+#else /* CFG_WITH_PAGER */
+
+#define TZDRAM_BASE 0x0e000000
+#define TZDRAM_SIZE 0x01000000
+
+#endif /* CFG_WITH_PAGER */
+
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#define CFG_SHMEM_START (DRAM0_TEERES_BASE + \
+ (DRAM0_TEERES_SIZE - CFG_SHMEM_SIZE))
+#define CFG_SHMEM_SIZE 0x200000
+
+#define GICD_OFFSET 0
+#define GICC_OFFSET 0x10000
+
+
+#elif defined(PLATFORM_FLAVOR_qemu_armv8a)
+
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform vexpress-qemu_armv8a"
+#endif
+
+#define DRAM0_BASE UINTPTR_C(0x40000000)
+#define DRAM0_SIZE (UINTPTR_C(0x40000000) - CFG_SHMEM_SIZE)
+
+#define DRAM0_TEERES_BASE (DRAM0_BASE + DRAM0_SIZE)
+#define DRAM0_TEERES_SIZE CFG_SHMEM_SIZE
+
+#define TZDRAM_BASE 0x0e100000
+#define TZDRAM_SIZE 0x00f00000
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#define CFG_SHMEM_START (DRAM0_TEERES_BASE + \
+ (DRAM0_TEERES_SIZE - CFG_SHMEM_SIZE))
+#define CFG_SHMEM_SIZE 0x200000
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+#ifdef CFG_WITH_PAGER
+/*
+ * Have TZSRAM either as real physical or emulated by reserving an area
+ * somewhere else.
+ *
+ * +------------------+
+ * | TZSRAM | TEE_RAM |
+ * +--------+---------+
+ * | TZDRAM | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+#define CFG_TEE_RAM_START TZSRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP(TZDRAM_BASE, CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN(TZDRAM_SIZE, CORE_MMU_DEVICE_SIZE)
+#else
+/*
+ * Assumes that either TZSRAM isn't large enough or TZSRAM doesn't exist,
+ * everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#endif
+
+#ifdef GIC_BASE
+#define GICD_BASE (GIC_BASE + GICD_OFFSET)
+#define GICC_BASE (GIC_BASE + GICC_OFFSET)
+#endif
+
+#ifndef UART_BAUDRATE
+#define UART_BAUDRATE 115200
+#endif
+#ifndef CONSOLE_BAUDRATE
+#define CONSOLE_BAUDRATE UART_BAUDRATE
+#endif
+
+/* For virtual platforms where there isn't a clock */
+#ifndef CONSOLE_UART_CLK_IN_HZ
+#define CONSOLE_UART_CLK_IN_HZ 1
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-vexpress/sub.mk b/core/arch/arm/plat-vexpress/sub.mk
new file mode 100644
index 0000000..086f539
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/sub.mk
@@ -0,0 +1,7 @@
+global-incdirs-y += .
+srcs-y += main.c
+ifeq ($(PLATFORM_FLAVOR_juno),y)
+srcs-$(CFG_ARM32_core) += juno_core_pos_a32.S
+srcs-$(CFG_ARM64_core) += juno_core_pos_a64.S
+endif
+srcs-y += vendor_props.c
diff --git a/core/arch/arm/plat-vexpress/vendor_props.c b/core/arch/arm/plat-vexpress/vendor_props.c
new file mode 100644
index 0000000..2f4d10d
--- /dev/null
+++ b/core/arch/arm/plat-vexpress/vendor_props.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, Linaro Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <tee/tee_svc.h>
+#include <user_ta_header.h>
+#include <util.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/tee_common_otp.h>
+#include <tee/tee_cryp_utl.h>
+
+/*
+ * The data to hash is 48 bytes made up of:
+ * - 16 bytes: the UUID of the calling TA.
+ * - 32 bytes: the hardware device ID
+ * The resulting endorsement seed is 32 bytes.
+ *
+ * The output buffer is the "binary" struct defined in
+ * the "prop_value" union and therefore comprises:
+ * - 4 bytes: the size of the binary value data (32)
+ * - 32 bytes: the binary value data (endorsement seed)
+ *
+ * Note that this code assumes an endorsement seed
+ * size == device ID size for convenience.
+ */
+static TEE_Result get_prop_endorsement(struct tee_ta_session *sess,
+ void *buf, size_t *blen)
+{
+ TEE_Result res;
+ uint32_t ta_endorsement_seed_size = 32;
+ uint8_t data[sizeof(TEE_UUID) + ta_endorsement_seed_size];
+ uint32_t bin[1 + ta_endorsement_seed_size / sizeof(uint32_t)];
+ uint32_t *bin_len = (uint32_t *)bin;
+ uint8_t *bin_val = (uint8_t *)(&bin[1]);
+
+ if (*blen < sizeof(bin)) {
+ *blen = sizeof(bin);
+ return TEE_ERROR_SHORT_BUFFER;
+ }
+ *blen = sizeof(bin);
+
+ memcpy(data, &sess->ctx->uuid, sizeof(TEE_UUID));
+
+ if (tee_otp_get_die_id(&data[sizeof(TEE_UUID)],
+ ta_endorsement_seed_size))
+ return TEE_ERROR_BAD_STATE;
+
+ res = tee_hash_createdigest(TEE_ALG_SHA256, data, sizeof(data),
+ bin_val, ta_endorsement_seed_size);
+ if (res != TEE_SUCCESS)
+ return TEE_ERROR_BAD_STATE;
+
+ *bin_len = ta_endorsement_seed_size;
+
+ return tee_svc_copy_to_user((void *)buf, bin, sizeof(bin));
+}
+
+static const struct tee_props vendor_propset_array_tee[] = {
+ {
+ .name = "com.microsoft.ta.endorsementSeed",
+ .prop_type = USER_TA_PROP_TYPE_BINARY_BLOCK,
+ .get_prop_func = get_prop_endorsement
+ },
+};
+
+const struct tee_vendor_props vendor_props_tee = {
+ .props = vendor_propset_array_tee,
+ .len = ARRAY_SIZE(vendor_propset_array_tee),
+};
diff --git a/core/arch/arm/plat-zynq7k/conf.mk b/core/arch/arm/plat-zynq7k/conf.mk
new file mode 100644
index 0000000..f457d8d
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/conf.mk
@@ -0,0 +1,24 @@
+PLATFORM_FLAVOR ?= zc702
+
+arm32-platform-cpuarch := cortex-a9
+
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+core_arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_ARM32_core,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_CDNS_UART,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_WITH_SOFTWARE_PRNG,y)
+$(call force,CFG_PL310,y)
+$(call force,CFG_PL310_LOCKED,y)
+$(call force,CFG_SECURE_TIME_SOURCE_REE,y)
+
+ta-targets = ta_arm32
+
+CFG_BOOT_SYNC_CPU ?= y
+CFG_BOOT_SECONDARY_REQUEST ?= y
+CFG_CRYPTO_SIZE_OPTIMIZATION ?= n
+CFG_WITH_STACK_CANARIES ?= y
diff --git a/core/arch/arm/plat-zynq7k/kern.ld.S b/core/arch/arm/plat-zynq7k/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-zynq7k/link.mk b/core/arch/arm/plat-zynq7k/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-zynq7k/main.c b/core/arch/arm/plat-zynq7k/main.c
new file mode 100644
index 0000000..74bc1ce
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/main.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm32.h>
+#include <console.h>
+#include <drivers/cdns_uart.h>
+#include <drivers/gic.h>
+#include <io.h>
+#include <kernel/generic_boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <platform_smc.h>
+#include <stdint.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+
+static void main_fiq(void);
+static void platform_tee_entry_fast(struct thread_smc_args *args);
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = platform_tee_entry_fast,
+ .fiq = main_fiq,
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+};
+
+static struct gic_data gic_data;
+
+register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, GIC_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, PL310_BASE, CORE_MMU_DEVICE_SIZE);
+register_phys_mem(MEM_AREA_IO_SEC, SLCR_BASE, CORE_MMU_DEVICE_SIZE);
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+void plat_cpu_reset_late(void)
+{
+ if (!get_core_pos()) {
+ /* primary core */
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+ /* set secondary entry address and release core */
+ write32(CFG_TEE_LOAD_ADDR, SECONDARY_ENTRY_DROP);
+ dsb();
+ sev();
+#endif
+
+ /* SCU config */
+ write32(SCU_INV_CTRL_INIT, SCU_BASE + SCU_INV_SEC);
+ write32(SCU_SAC_CTRL_INIT, SCU_BASE + SCU_SAC);
+ write32(SCU_NSAC_CTRL_INIT, SCU_BASE + SCU_NSAC);
+
+ /* SCU enable */
+ write32(read32(SCU_BASE + SCU_CTRL) | 0x1,
+ SCU_BASE + SCU_CTRL);
+
+ /* NS Access control */
+ write32(ACCESS_BITS_ALL, SECURITY2_SDIO0);
+ write32(ACCESS_BITS_ALL, SECURITY3_SDIO1);
+ write32(ACCESS_BITS_ALL, SECURITY4_QSPI);
+ write32(ACCESS_BITS_ALL, SECURITY6_APB_SLAVES);
+
+ write32(SLCR_UNLOCK_MAGIC, SLCR_UNLOCK);
+
+ write32(ACCESS_BITS_ALL, SLCR_TZ_DDR_RAM);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_DMA_NS);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_DMA_IRQ_NS);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_DMA_PERIPH_NS);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_GEM);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_SDIO);
+ write32(ACCESS_BITS_ALL, SLCR_TZ_USB);
+
+ write32(SLCR_LOCK_MAGIC, SLCR_LOCK);
+ }
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE,
+ MEM_AREA_IO_NSEC);
+ return (vaddr_t)va;
+ }
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+}
+
+void console_putc(int ch)
+{
+ if (ch == '\n')
+ cdns_uart_putc('\r', console_base());
+ cdns_uart_putc(ch, console_base());
+}
+
+void console_flush(void)
+{
+ cdns_uart_flush(console_base());
+}
+
+vaddr_t pl310_base(void)
+{
+ static void *va __early_bss;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(PL310_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+ return PL310_BASE;
+}
+
+void arm_cl2_config(vaddr_t pl310_base)
+{
+ /* Disable PL310 */
+ write32(0, pl310_base + PL310_CTRL);
+
+ /*
+ * Xilinx AR#54190 recommends setting L2C RAM in SLCR
+ * to 0x00020202 for proper cache operations.
+ */
+ write32(SLCR_L2C_RAM_VALUE, SLCR_L2C_RAM);
+
+ write32(PL310_TAG_RAM_CTRL_INIT, pl310_base + PL310_TAG_RAM_CTRL);
+ write32(PL310_DATA_RAM_CTRL_INIT, pl310_base + PL310_DATA_RAM_CTRL);
+ write32(PL310_AUX_CTRL_INIT, pl310_base + PL310_AUX_CTRL);
+ write32(PL310_PREFETCH_CTRL_INIT, pl310_base + PL310_PREFETCH_CTRL);
+ write32(PL310_POWER_CTRL_INIT, pl310_base + PL310_POWER_CTRL);
+
+ /* invalidate all cache ways */
+ arm_cl2_invbyway(pl310_base);
+}
+
+void arm_cl2_enable(vaddr_t pl310_base)
+{
+ uint32_t val;
+
+ /* Enable PL310 ctrl -> only set lsb bit */
+ write32(1, pl310_base + PL310_CTRL);
+
+ /* if L2 FLZW enable, enable in L1 */
+ val = read32(pl310_base + PL310_AUX_CTRL);
+ if (val & 1)
+ write_actlr(read_actlr() | (1 << 3));
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+
+ if (!gicc_base || !gicd_base)
+ panic();
+
+ /* Initialize GIC */
+ gic_init(&gic_data, gicc_base, gicd_base);
+ itr_init(&gic_data.chip);
+}
+
+void main_secondary_init_gic(void)
+{
+ gic_cpu_init(&gic_data);
+}
+
+static vaddr_t slcr_access_range[] = {
+ 0x004, 0x008, /* lock, unlock */
+ 0x100, 0x1FF, /* PLL */
+ 0x200, 0x2FF, /* Reset */
+ 0xA00, 0xAFF /* L2C */
+};
+
+static uint32_t write_slcr(uint32_t addr, uint32_t val)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(slcr_access_range); i += 2) {
+ if (addr >= slcr_access_range[i] &&
+ addr <= slcr_access_range[i+1]) {
+ static vaddr_t va __early_bss;
+
+ if (!va)
+ va = (vaddr_t)phys_to_virt(SLCR_BASE,
+ MEM_AREA_IO_SEC);
+ write32(val, va + addr);
+ return OPTEE_SMC_RETURN_OK;
+ }
+ }
+ return OPTEE_SMC_RETURN_EBADADDR;
+}
+
+static uint32_t read_slcr(uint32_t addr, uint32_t *val)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(slcr_access_range); i += 2) {
+ if (addr >= slcr_access_range[i] &&
+ addr <= slcr_access_range[i+1]) {
+ static vaddr_t va __early_bss;
+
+ if (!va)
+ va = (vaddr_t)phys_to_virt(SLCR_BASE,
+ MEM_AREA_IO_SEC);
+ *val = read32(va + addr);
+ return OPTEE_SMC_RETURN_OK;
+ }
+ }
+ return OPTEE_SMC_RETURN_EBADADDR;
+}
+
+static void platform_tee_entry_fast(struct thread_smc_args *args)
+{
+ switch (args->a0) {
+ case ZYNQ7K_SMC_SLCR_WRITE:
+ args->a0 = write_slcr(args->a1, args->a2);
+ break;
+ case ZYNQ7K_SMC_SLCR_READ:
+ args->a0 = read_slcr(args->a1, &args->a2);
+ break;
+ default:
+ tee_entry_fast(args);
+ break;
+ }
+}
diff --git a/core/arch/arm/plat-zynq7k/plat_init.S b/core/arch/arm/plat-zynq7k/plat_init.S
new file mode 100644
index 0000000..6d99a30
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/plat_init.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Entry points for the A9 inits, A9 revision specific or not.
+ * It is assume no stack is available when these routines are called.
+ * It is assume each routine is called with return address in LR
+ * and with ARM registers R0, R1, R2, R3 being scratchable.
+ */
+
+#include <arm32.h>
+#include <arm32_macros.S>
+#include <arm32_macros_cortex_a9.S>
+#include <asm.S>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+#define ZYNQ_SLCR_L2C_RAM 0xF8000A1C
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * Cortex A9 early configuration
+ *
+ * Use registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
+
+ /*
+ * Disallow NSec to mask FIQ [bit4: FW=0]
+ * Allow NSec to manage Imprecise Abort [bit5: AW=1]
+ * Imprecise Abort trapped to Abort Mode [bit3: EA=0]
+ * In Sec world, FIQ trapped to FIQ Mode [bit2: FIQ=0]
+ * IRQ always trapped to IRQ Mode [bit1: IRQ=0]
+ * Secure World [bit0: NS=0]
+ */
+ mov r0, #SCR_AW
+ write_scr r0 /* write Secure Configuration Register */
+
+ /*
+ * Mandated HW config loaded
+ *
+ * SCTLR = 0x00004000
+ * - Round-Robin replac. for icache, btac, i/duTLB (bit14: RoundRobin)
+ *
+ * ACTRL = 0x00000041
+ * - core always in full SMP (FW bit0=1, SMP bit6=1)
+ * - L2 write full line of zero disabled (bit3=0)
+ * (keep WFLZ low. Will be set once outer L2 is ready)
+ *
+ * NSACR = 0x00020C00
+ * - NSec cannot change ACTRL.SMP (NS_SMP bit18=0)
+ * - Nsec can lockdown TLB (TL bit17=1)
+ * - NSec cannot access PLE (PLE bit16=0)
+ * - NSec can use SIMD/VFP (CP10/CP11) (bit15:14=2b00, bit11:10=2b11)
+ *
+ * PCR = 0x00000001
+ * - no change latency, enable clk gating
+ */
+ movw r0, #0x4000
+ movt r0, #0x0000
+ write_sctlr r0
+
+ movw r0, #0x0041
+ movt r0, #0x0000
+ write_actlr r0
+
+ movw r0, #0x0FFF
+ movt r0, #0x0002
+ write_nsacr r0
+
+ movw r0, #0x0000
+ movt r0, #0x0001
+ write_pcr r0
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC plat_cpu_reset_early
diff --git a/core/arch/arm/plat-zynq7k/platform_config.h b/core/arch/arm/plat-zynq7k/platform_config.h
new file mode 100644
index 0000000..c970048
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/platform_config.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2016, Wind River Systems.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+#define STACK_ALIGNMENT 64
+
+/* For Zynq7000 board */
+
+#define SCU_BASE 0xF8F00000
+#define PL310_BASE 0xF8F02000
+#define GIC_BASE 0xF8F00000
+#define GICC_OFFSET 0x100
+#define GICD_OFFSET 0x1000
+#define GIC_CPU_BASE (GIC_BASE + GICC_OFFSET)
+#define GIC_DIST_BASE (GIC_BASE + GICD_OFFSET)
+
+#define SLCR_BASE 0xF8000000
+#define SLCR_LOCK 0xF8000004
+#define SLCR_UNLOCK 0xF8000008
+#define SLCR_TZ_DDR_RAM 0xF8000430
+#define SLCR_TZ_DMA_NS 0xF8000440
+#define SLCR_TZ_DMA_IRQ_NS 0xF8000444
+#define SLCR_TZ_DMA_PERIPH_NS 0xF8000448
+#define SLCR_TZ_GEM 0xF8000450
+#define SLCR_TZ_SDIO 0xF8000454
+#define SLCR_TZ_USB 0xF8000458
+#define SLCR_L2C_RAM 0xF8000A1C
+
+#define SLCR_LOCK_MAGIC 0x0000767B
+#define SLCR_UNLOCK_MAGIC 0x0000DF0D
+
+#define SECURITY2_SDIO0 0xE0200008
+#define SECURITY3_SDIO1 0xE020000C
+#define SECURITY4_QSPI 0xE0200010
+#define SECURITY6_APB_SLAVES 0xE0200018
+
+#define UART0_BASE 0xE0000000
+#define UART1_BASE 0xE0001000
+
+#define CONSOLE_UART_BASE UART1_BASE
+
+#define DRAM0_BASE 0x00100000
+#define DRAM0_SIZE 0x3FF00000
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#define CFG_TEE_CORE_NB_CORE 2
+
+#define DDR_PHYS_START DRAM0_BASE
+#define DDR_SIZE DRAM0_SIZE
+
+#define CFG_DDR_START DDR_PHYS_START
+#define CFG_DDR_SIZE DDR_SIZE
+
+/*
+ * PL310 TAG RAM Control Register
+ *
+ * bit[10:8]:1 - 2 cycle of write accesses latency
+ * bit[6:4]:1 - 2 cycle of read accesses latency
+ * bit[2:0]:1 - 2 cycle of setup latency
+ */
+#ifndef PL310_TAG_RAM_CTRL_INIT
+#define PL310_TAG_RAM_CTRL_INIT 0x00000111
+#endif
+
+/*
+ * PL310 DATA RAM Control Register
+ *
+ * bit[10:8]:2 - 3 cycle of write accesses latency
+ * bit[6:4]:2 - 3 cycle of read accesses latency
+ * bit[2:0]:2 - 3 cycle of setup latency
+ */
+#ifndef PL310_DATA_RAM_CTRL_INIT
+#define PL310_DATA_RAM_CTRL_INIT 0x00000222
+#endif
+
+/*
+ * PL310 Auxiliary Control Register
+ *
+ * I/Dcache prefetch enabled (bit29:28=2b11)
+ * NS can access interrupts (bit27=1)
+ * NS can lockown cache lines (bit26=1)
+ * Pseudo-random replacement policy (bit25=0)
+ * Force write allocated (default)
+ * Shared attribute internally ignored (bit22=1, bit13=0)
+ * Parity disabled (bit21=0)
+ * Event monitor disabled (bit20=0)
+ * Platform fmavor specific way config:
+ * - 64kb way size (bit19:17=3b011)
+ * - 8-way associciativity (bit16=0)
+ * Store buffer device limitation enabled (bit11=1)
+ * Cacheable accesses have high prio (bit10=0)
+ * Full Line Zero (FLZ) disabled (bit0=0)
+ */
+#ifndef PL310_AUX_CTRL_INIT
+#define PL310_AUX_CTRL_INIT 0x3C460800
+#endif
+
+/*
+ * PL310 Prefetch Control Register
+ *
+ * Double linefill disabled (bit30=0)
+ * I/D prefetch enabled (bit29:28=2b11)
+ * Prefetch drop enabled (bit24=1)
+ * Incr double linefill disable (bit23=0)
+ * Prefetch offset = 7 (bit4:0)
+ */
+#define PL310_PREFETCH_CTRL_INIT 0x31000007
+
+/*
+ * PL310 Power Register
+ *
+ * Dynamic clock gating enabled
+ * Standby mode enabled
+ */
+#define PL310_POWER_CTRL_INIT 0x00000003
+
+/*
+ * SCU Invalidate Register
+ *
+ * Invalidate all registers
+ */
+#define SCU_INV_CTRL_INIT 0xFFFFFFFF
+
+/*
+ * SCU Access Register
+ * - both secure CPU access SCU
+ */
+#define SCU_SAC_CTRL_INIT 0x0000000F
+
+/*
+ * SCU NonSecure Access Register
+ * - both nonsec cpu access SCU, private and global timer
+ */
+#define SCU_NSAC_CTRL_INIT 0x00000FFF
+
+/* all bit enabled in access control register */
+#define ACCESS_BITS_ALL 0xFFFFFFFF
+
+/* recommended value for setting the L2C_RAM register */
+#define SLCR_L2C_RAM_VALUE 0x00020202
+
+/* place in OCRAM to write secondary entry to */
+#define SECONDARY_ENTRY_DROP 0xFFFFFFF0
+
+/* define the memory areas */
+
+#ifdef CFG_WITH_PAGER
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +---------------------------------------+ <- CFG_CORE_TZSRAM_EMUL_START
+ * | TEE private highly | TEE_RAM | ^
+ * | secure memory | | | CFG_CORE_TZSRAM_EMUL_SIZE
+ * +---------------------------------------+ v
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TA_RAM | ^
+ * | external memory | | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : default 256kByte
+ * TA_RAM : all what is left in DDR TEE reserved area
+ * PUB_RAM : default 2MByte
+ */
+
+/* emulated SRAM, 256K at start of secure DDR */
+
+#define CFG_CORE_TZSRAM_EMUL_START 0x3E000000
+
+#define TZSRAM_BASE CFG_CORE_TZSRAM_EMUL_START
+#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE
+
+/* Location of trusted dram */
+
+#define CFG_DDR_TEETZ_RESERVED_START 0x3E100000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x01F00000
+
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+
+#define TZDRAM_BASE (CFG_DDR_TEETZ_RESERVED_START)
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_SHMEM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
+
+#define CFG_TA_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_SIZE TZDRAM_SIZE
+
+#define CFG_TEE_RAM_START TZSRAM_BASE
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR TZSRAM_BASE
+#endif
+
+#else /* CFG_WITH_PAGER */
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +---------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEE private secure | TEE_RAM | ^
+ * | external memory +------------------+ |
+ * | | TA_RAM | |
+ * +---------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | Non secure | SHM | |
+ * | shared memory | | |
+ * +---------------------------------------+ v
+ *
+ * TEE_RAM : 1MByte
+ * PUB_RAM : 1MByte
+ * TA_RAM : all what is left (at least 2MByte !)
+ */
+
+#define CFG_DDR_TEETZ_RESERVED_START 0x3E000000
+#define CFG_DDR_TEETZ_RESERVED_SIZE 0x02000000
+
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_PH_SIZE (1 * 1024 * 1024)
+
+#define TZDRAM_BASE (CFG_DDR_TEETZ_RESERVED_START)
+#define TZDRAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_TA_RAM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ CFG_TEE_RAM_PH_SIZE)
+#define CFG_TA_RAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_TEE_RAM_PH_SIZE - \
+ CFG_PUB_RAM_SIZE)
+
+#define CFG_SHMEM_START (CFG_DDR_TEETZ_RESERVED_START + \
+ TZDRAM_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
+
+#define CFG_TEE_RAM_START TZDRAM_BASE
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR TZDRAM_BASE
+#endif
+
+#endif /* CFG_WITH_PAGER */
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-zynq7k/platform_smc.h b/core/arch/arm/plat-zynq7k/platform_smc.h
new file mode 100644
index 0000000..ac6bc33
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/platform_smc.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, Wind River System
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef PLATFORM_SMC_H
+#define PLATFORM_SMC_H
+
+#include <sm/optee_smc.h>
+
+/*
+ * Read SLCR (System Level Control Register)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, ZYNQ7K_SMC_SLCR_READ
+ * a1 Register offset
+ * a2-7 Not used
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Value read back
+ * a2-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0 OPTEE_SMC_RETURN_EBADCMD
+ * a1 Undefined value
+ * a2-3 Not used
+ * a4-7 Preserved
+ */
+#define ZYNQ7K_SMC_FUNCID_SLCR_READ 0x100
+#define ZYNQ7K_SMC_SLCR_READ \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_OEM, ZYNQ7K_SMC_FUNCID_SLCR_READ)
+
+/*
+ * Write SLCR (System Level Control Register)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, ZYNQ7K_SMC_SLCR_READ
+ * a1 Register offset
+ * a2 Value to write
+ * a3-7 Not used
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0 OPTEE_SMC_RETURN_EBADCMD
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define ZYNQ7K_SMC_FUNCID_SLCR_WRITE 0x101
+#define ZYNQ7K_SMC_SLCR_WRITE \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_OEM, ZYNQ7K_SMC_FUNCID_SLCR_WRITE)
+
+#endif /* PLATFORM_SMC_H */
diff --git a/core/arch/arm/plat-zynq7k/sub.mk b/core/arch/arm/plat-zynq7k/sub.mk
new file mode 100644
index 0000000..652c084
--- /dev/null
+++ b/core/arch/arm/plat-zynq7k/sub.mk
@@ -0,0 +1,3 @@
+global-incdirs-y += .
+srcs-y += main.c
+srcs-y += plat_init.S
diff --git a/core/arch/arm/plat-zynqmp/conf.mk b/core/arch/arm/plat-zynqmp/conf.mk
new file mode 100644
index 0000000..67570bc
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/conf.mk
@@ -0,0 +1,29 @@
+PLATFORM_FLAVOR ?= zcu102
+
+# 32-bit flags
+arm32-platform-cpuarch := cortex-a53
+arm32-platform-cflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mcpu=$(arm32-platform-cpuarch)
+arm32-platform-aflags += -mfpu=neon
+
+$(call force,CFG_CDNS_UART,y)
+$(call force,CFG_GENERIC_BOOT,y)
+$(call force,CFG_GIC,y)
+$(call force,CFG_HWSUPP_MEM_PERM_PXN,y)
+$(call force,CFG_PM_STUBS,y)
+$(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
+$(call force,CFG_WITH_ARM_TRUSTED_FW,y)
+
+ta-targets = ta_arm32
+
+ifeq ($(CFG_ARM64_core),y)
+$(call force,CFG_WITH_LPAE,y)
+ta-targets += ta_arm64
+else
+$(call force,CFG_ARM32_core,y)
+endif
+
+CFG_TEE_FS_KEY_MANAGER_TEST ?= y
+CFG_WITH_STACK_CANARIES ?= y
+CFG_WITH_STATS ?= y
+CFG_CRYPTO_WITH_CE ?= y
diff --git a/core/arch/arm/plat-zynqmp/kern.ld.S b/core/arch/arm/plat-zynqmp/kern.ld.S
new file mode 100644
index 0000000..8d794ee
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/kern.ld.S
@@ -0,0 +1 @@
+#include "../kernel/kern.ld.S"
diff --git a/core/arch/arm/plat-zynqmp/link.mk b/core/arch/arm/plat-zynqmp/link.mk
new file mode 100644
index 0000000..448ab89
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/link.mk
@@ -0,0 +1 @@
+include core/arch/arm/kernel/link.mk
diff --git a/core/arch/arm/plat-zynqmp/main.c b/core/arch/arm/plat-zynqmp/main.c
new file mode 100644
index 0000000..31b8475
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/main.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016, Xilinx Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <stdint.h>
+#include <string.h>
+
+#include <drivers/gic.h>
+#include <drivers/cdns_uart.h>
+
+#include <arm.h>
+#include <console.h>
+#include <kernel/generic_boot.h>
+#include <kernel/pm_stubs.h>
+#include <kernel/misc.h>
+#include <kernel/tee_time.h>
+#include <mm/core_memprot.h>
+#include <tee/entry_fast.h>
+#include <tee/entry_std.h>
+#include <trace.h>
+
+static void main_fiq(void);
+static struct gic_data gic_data;
+
+static const struct thread_handlers handlers = {
+ .std_smc = tee_entry_std,
+ .fast_smc = tee_entry_fast,
+ .fiq = main_fiq,
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ .cpu_on = cpu_on_handler,
+ .cpu_off = pm_do_nothing,
+ .cpu_suspend = pm_do_nothing,
+ .cpu_resume = pm_do_nothing,
+ .system_off = pm_do_nothing,
+ .system_reset = pm_do_nothing,
+#else
+ .cpu_on = pm_panic,
+ .cpu_off = pm_panic,
+ .cpu_suspend = pm_panic,
+ .cpu_resume = pm_panic,
+ .system_off = pm_panic,
+ .system_reset = pm_panic,
+#endif
+};
+
+const struct thread_handlers *generic_boot_get_handlers(void)
+{
+ return &handlers;
+}
+
+void main_init_gic(void)
+{
+ vaddr_t gicc_base, gicd_base;
+
+ gicc_base = (vaddr_t)phys_to_virt(GIC_BASE + GICC_OFFSET,
+ MEM_AREA_IO_SEC);
+ gicd_base = (vaddr_t)phys_to_virt(GIC_BASE + GICD_OFFSET,
+ MEM_AREA_IO_SEC);
+ /* On ARMv8, GIC configuration is initialized in ARM-TF */
+ gic_init_base_addr(&gic_data, gicc_base, gicd_base);
+}
+
+static void main_fiq(void)
+{
+ gic_it_handle(&gic_data);
+}
+
+static vaddr_t console_base(void)
+{
+ static void *va;
+
+ if (cpu_mmu_enabled()) {
+ if (!va)
+ va = phys_to_virt(CONSOLE_UART_BASE, MEM_AREA_IO_SEC);
+ return (vaddr_t)va;
+ }
+
+ return CONSOLE_UART_BASE;
+}
+
+void console_init(void)
+{
+ cdns_uart_init(console_base(), CONSOLE_UART_CLK_IN_HZ,
+ CONSOLE_BAUDRATE);
+}
+
+void console_putc(int ch)
+{
+ if (ch == '\n')
+ cdns_uart_putc('\r', console_base());
+ cdns_uart_putc(ch, console_base());
+}
+
+void console_flush(void)
+{
+ cdns_uart_flush(console_base());
+}
diff --git a/core/arch/arm/plat-zynqmp/platform_config.h b/core/arch/arm/plat-zynqmp/platform_config.h
new file mode 100644
index 0000000..91c0f82
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/platform_config.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016, Xilinx Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PLATFORM_CONFIG_H
+#define PLATFORM_CONFIG_H
+
+/* Make stacks aligned to data cache line length */
+#define STACK_ALIGNMENT 64
+
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for zynqmp"
+#endif
+
+#if defined(PLATFORM_FLAVOR_zc1751_dc1) || \
+ defined(PLATFORM_FLAVOR_zc1751_dc2) || \
+ defined(PLATFORM_FLAVOR_zcu102)
+
+#define GIC_BASE 0xF9010000
+#define UART0_BASE 0xFF000000
+#define UART1_BASE 0xFF001000
+
+#define IT_UART0 53
+#define IT_UART1 54
+
+#define UART0_CLK_IN_HZ 100000000
+#define UART1_CLK_IN_HZ 100000000
+#define CONSOLE_UART_BASE UART0_BASE
+#define IT_CONSOLE_UART IT_UART0
+#define CONSOLE_UART_CLK_IN_HZ UART0_CLK_IN_HZ
+
+#define DRAM0_BASE 0
+#define DRAM0_SIZE 0x80000000
+
+/* Location of trusted dram */
+#define TZDRAM_BASE 0x60000000
+#define TZDRAM_SIZE 0x10000000
+
+#define CFG_SHMEM_START 0x70000000
+#define CFG_SHMEM_SIZE 0x10000000
+
+#define GICD_OFFSET 0
+#define GICC_OFFSET 0x20000
+
+#else
+#error "Unknown platform flavor"
+#endif
+
+#define CFG_TEE_CORE_NB_CORE 4
+
+#define CFG_TEE_RAM_VA_SIZE (1024 * 1024)
+
+#ifndef CFG_TEE_LOAD_ADDR
+#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
+#endif
+
+/*
+ * Assumes that either TZSRAM isn't large enough or TZSRAM doesn't exist,
+ * everything is in TZDRAM.
+ * +------------------+
+ * | | TEE_RAM |
+ * + TZDRAM +---------+
+ * | | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
+#define CFG_TEE_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+
+
+#define DEVICE0_PA_BASE ROUNDDOWN(CONSOLE_UART_BASE, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_VA_BASE DEVICE0_PA_BASE
+#define DEVICE0_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE0_TYPE MEM_AREA_IO_SEC
+
+#define DEVICE1_PA_BASE ROUNDDOWN(GIC_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_VA_BASE DEVICE1_PA_BASE
+#define DEVICE1_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE1_TYPE MEM_AREA_IO_SEC
+
+#define DEVICE2_PA_BASE ROUNDDOWN(GIC_BASE + GICD_OFFSET, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_VA_BASE DEVICE2_PA_BASE
+#define DEVICE2_SIZE CORE_MMU_DEVICE_SIZE
+#define DEVICE2_TYPE MEM_AREA_IO_SEC
+
+#ifndef UART_BAUDRATE
+#define UART_BAUDRATE 115200
+#endif
+#ifndef CONSOLE_BAUDRATE
+#define CONSOLE_BAUDRATE UART_BAUDRATE
+#endif
+
+/* For virtual platforms where there isn't a clock */
+#ifndef CONSOLE_UART_CLK_IN_HZ
+#define CONSOLE_UART_CLK_IN_HZ 1
+#endif
+
+#endif /*PLATFORM_CONFIG_H*/
diff --git a/core/arch/arm/plat-zynqmp/sub.mk b/core/arch/arm/plat-zynqmp/sub.mk
new file mode 100644
index 0000000..8ddc2fd
--- /dev/null
+++ b/core/arch/arm/plat-zynqmp/sub.mk
@@ -0,0 +1,2 @@
+global-incdirs-y += .
+srcs-y += main.c
diff --git a/core/arch/arm/pta/core_self_tests.c b/core/arch/arm/pta/core_self_tests.c
new file mode 100644
index 0000000..639b8a6
--- /dev/null
+++ b/core/arch/arm/pta/core_self_tests.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <malloc.h>
+#include <stdbool.h>
+#include <trace.h>
+#include "core_self_tests.h"
+
+/*
+ * Enable expect LOG macro to enable/disable self tests traces.
+ *
+ * #define LOG DMSG_RAW
+ * #define LOG(...)
+ */
+#define LOG(...)
+
+static int self_test_division(void);
+static int self_test_malloc(void);
+
+/* exported entry points for some basic test */
+TEE_Result core_self_tests(uint32_t nParamTypes __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS] __unused)
+{
+ if (self_test_division() || self_test_malloc()) {
+ EMSG("some self_test_xxx failed! you should enable local LOG");
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+
+/* test division support. resulting trace shall be manually checked */
+static int self_test_division(void)
+{
+ signed a, b, c, d;
+ bool r;
+ int ret = 0;
+
+ LOG("");
+ LOG("division tests (division and modulo):");
+ /* get some unpredicted values to prevent compilation optimizations: */
+ /* => use the stack address */
+
+ LOG("- test with unsigned small integers:");
+ a = (signed)((unsigned)(vaddr_t)&a & 0xFFFFF);
+ b = (signed)((unsigned)(vaddr_t)&b & 0x00FFF) + 1;
+ c = a / b;
+ d = a % b;
+ r = ((b * c + d) == a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with signed small integers, negative numerator:");
+ a = (signed)(vaddr_t)&a;
+ b = (signed)((unsigned)(vaddr_t)&b & 0x00FFF) - 1;
+ c = a / b;
+ d = a % b;
+ r = ((b * c + d) == a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %d / %d = %d = 0x%x)",
+ (unsigned)a, (unsigned)b, (signed)a, (signed)b, (signed)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %d %% %d = %d = 0x%x)", (unsigned)a,
+ (unsigned)b, (signed)a, (signed)b, (signed)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with signed small integers, negative denominator:");
+ a = (signed)((unsigned)(vaddr_t)&a & 0xFFFFF);
+ b = -(signed)((unsigned)(vaddr_t)&b & 0x00FFF) + 1;
+ c = a / b;
+ d = a % b;
+
+ LOG("- test with unsigned integers, big numerator (> 0x80000000):");
+ a = (signed)(vaddr_t)&a;
+ b = (signed)((unsigned)(vaddr_t)&b & 0x00FFF) + 1;
+ c = (signed)((unsigned)a / (unsigned)b);
+ d = (signed)((unsigned)a % (unsigned)b);
+ r = (((unsigned)b * (unsigned)c + (unsigned)d) == (unsigned)a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with unsigned integers, big num. & denom. (> 0x80000000):");
+ a = (signed)(vaddr_t)&a;
+ b = (signed)((unsigned)(vaddr_t)&a - 1);
+ c = (signed)((unsigned)a / (unsigned)b);
+ d = (signed)((unsigned)a % (unsigned)b);
+ r = (((unsigned)b * (unsigned)c + (unsigned)d) == (unsigned)a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ return ret;
+}
+
+/* test malloc support. resulting trace shall be manually checked */
+static int self_test_malloc(void)
+{
+ char *p1 = NULL, *p2 = NULL;
+ int *p3 = NULL, *p4 = NULL;
+ bool r;
+ int ret = 0;
+
+ LOG("malloc tests (malloc, free, calloc, realloc, memalign):");
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ /* test malloc */
+ p1 = malloc(1024);
+ LOG("- p1 = malloc(1024)");
+ p2 = malloc(1024);
+ LOG("- p2 = malloc(1024)");
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ r = (p1 && p2 && malloc_buffer_is_within_alloced(p1, 1024) &&
+ !malloc_buffer_is_within_alloced(p1 + 25, 1000) &&
+ !malloc_buffer_is_within_alloced(p1 - 25, 500) &&
+ malloc_buffer_overlaps_heap(p1 - 25, 500));
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+
+ /* test realloc */
+ p1 = realloc(p1, 3 * 1024);
+ LOG("- p1 = realloc(p1, 3*1024)");
+ LOG("- free p2");
+ free(p2);
+ p2 = malloc(1024);
+ LOG("- p2 = malloc(1024)");
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ r = (p1 && p2);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p1, p2");
+ free(p1);
+ free(p2);
+ p1 = NULL;
+ p2 = NULL;
+
+ /* test calloc */
+ p3 = calloc(4, 1024);
+ p4 = calloc(0x100, 1024 * 1024);
+ LOG("- p3 = calloc(4, 1024)");
+ LOG("- p4 = calloc(0x100, 1024*1024) too big: should fail!");
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ r = (p3 && !p4);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p3, p4");
+ free(p3);
+ free(p4);
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test memalign */
+ p3 = memalign(0x1000, 1024);
+ LOG("- p3 = memalign(%d, 1024)", 0x1000);
+ p1 = malloc(1024);
+ LOG("- p1 = malloc(1024)");
+ p4 = memalign(0x100, 512);
+ LOG("- p4 = memalign(%d, 512)", 0x100);
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ r = (p1 && p3 && p4 &&
+ !((vaddr_t)p3 % 0x1000) && !((vaddr_t)p4 % 0x100));
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p1, p3, p4");
+ free(p1);
+ free(p3);
+ free(p4);
+ p1 = NULL;
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test memalign with invalid alignments */
+ p3 = memalign(100, 1024);
+ LOG("- p3 = memalign(%d, 1024)", 100);
+ p4 = memalign(0, 1024);
+ LOG("- p4 = memalign(%d, 1024)", 0);
+ LOG(" p1=%p p2=%p p3=%p p4=%p",
+ (void *)p1, (void *)p2, (void *)p3, (void *)p4);
+ r = (!p3 && !p4);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p3, p4");
+ free(p3);
+ free(p4);
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test free(NULL) */
+ LOG("- free NULL");
+ free(NULL);
+ LOG("");
+ LOG("malloc test done");
+
+ return ret;
+}
diff --git a/core/arch/arm/pta/core_self_tests.h b/core/arch/arm/pta/core_self_tests.h
new file mode 100644
index 0000000..ed98669
--- /dev/null
+++ b/core/arch/arm/pta/core_self_tests.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_SELF_TESTS_H
+#define CORE_SELF_TESTS_H
+
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+
+/* basic run-time tests */
+TEE_Result core_self_tests(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS]);
+
+#endif /*CORE_SELF_TESTS_H*/
diff --git a/core/arch/arm/pta/gprof.c b/core/arch/arm/pta/gprof.c
new file mode 100644
index 0000000..20ff366
--- /dev/null
+++ b/core/arch/arm/pta/gprof.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/misc.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/user_ta.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mmu.h>
+#include <optee_msg_supplicant.h>
+#include <pta_gprof.h>
+#include <string.h>
+
+static TEE_Result gprof_send_rpc(TEE_UUID *uuid, void *buf, size_t len,
+ uint32_t *id)
+{
+ struct optee_msg_param params[3];
+ TEE_Result res = TEE_ERROR_GENERIC;
+ uint64_t c = 0;
+ paddr_t pa;
+ char *va;
+
+ thread_rpc_alloc_payload(sizeof(*uuid) + len, &pa, &c);
+ if (!pa)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ va = phys_to_virt(pa, MEM_AREA_NSEC_SHM);
+ if (!va)
+ goto exit;
+
+ memcpy(va, uuid, sizeof(*uuid));
+ memcpy(va + sizeof(*uuid), buf, len);
+
+ memset(params, 0, sizeof(params));
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INOUT;
+ params[0].u.value.a = *id;
+
+ params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ params[1].u.tmem.buf_ptr = pa;
+ params[1].u.tmem.size = sizeof(*uuid);
+ params[1].u.tmem.shm_ref = c;
+
+ params[2].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ params[2].u.tmem.buf_ptr = pa + sizeof(*uuid);
+ params[2].u.tmem.size = len;
+ params[2].u.tmem.shm_ref = c;
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_GPROF, 3, params);
+ if (res != TEE_SUCCESS)
+ goto exit;
+
+ *id = (uint32_t)params[0].u.value.a;
+exit:
+ thread_rpc_free_payload(c);
+ return res;
+}
+
+static TEE_Result gprof_send(struct tee_ta_session *s,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INOUT,
+ TEE_PARAM_TYPE_MEMREF_INPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+
+ if (exp_pt != param_types)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ return gprof_send_rpc(&s->ctx->uuid, params[1].memref.buffer,
+ params[1].memref.size, &params[0].value.a);
+}
+
+static TEE_Result gprof_start_pc_sampling(struct tee_ta_session *s,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
+ TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+ struct sample_buf *sbuf;
+ uint32_t offset;
+ uint32_t scale;
+ TEE_Result res;
+ uint32_t len;
+ uaddr_t buf;
+
+ if (exp_pt != param_types)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ buf = (uaddr_t)params[0].memref.buffer;
+ len = params[0].memref.size;
+ offset = params[1].value.a;
+ scale = params[1].value.b;
+
+ res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx),
+ TEE_MEMORY_ACCESS_WRITE |
+ TEE_MEMORY_ACCESS_ANY_OWNER,
+ buf, len);
+ if (res != TEE_SUCCESS)
+ return res;
+ sbuf = calloc(1, sizeof(*sbuf));
+ if (!sbuf)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ sbuf->samples = (uint16_t *)buf;
+ sbuf->nsamples = len / sizeof(*sbuf->samples);
+ sbuf->offset = offset;
+ sbuf->scale = scale;
+ sbuf->freq = read_cntfrq();
+ sbuf->enabled = true;
+ s->sbuf = sbuf;
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result gprof_stop_pc_sampling(struct tee_ta_session *s,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+ struct sample_buf *sbuf;
+ uint32_t rate;
+
+ if (exp_pt != param_types)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ sbuf = s->sbuf;
+ if (!sbuf)
+ return TEE_ERROR_BAD_STATE;
+ assert(sbuf->samples);
+
+ /* Stop sampling */
+ if (sbuf->enabled)
+ sbuf->enabled = false;
+
+ rate = ((uint64_t)sbuf->count * sbuf->freq) / sbuf->usr;
+ params[0].value.a = rate;
+
+ DMSG("TA sampling stats: sample count=%" PRIu32 " user time=%" PRIu64
+ " cntfrq=%" PRIu32 " rate=%" PRIu32, sbuf->count, sbuf->usr,
+ sbuf->freq, rate);
+
+ free(sbuf);
+ s->sbuf = NULL;
+
+ return TEE_SUCCESS;
+}
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result open_session(uint32_t param_types __unused,
+ TEE_Param params[TEE_NUM_PARAMS] __unused,
+ void **sess_ctx __unused)
+{
+ struct tee_ta_session *s;
+
+ /* Check that we're called from a user TA */
+ s = tee_ta_get_calling_session();
+ if (!s)
+ return TEE_ERROR_ACCESS_DENIED;
+ if (is_pseudo_ta_ctx(s->ctx))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result invoke_command(void *sess_ctx __unused, uint32_t cmd_id,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ struct tee_ta_session *s = tee_ta_get_calling_session();
+
+ switch (cmd_id) {
+ case PTA_GPROF_SEND:
+ return gprof_send(s, param_types, params);
+ case PTA_GPROF_START_PC_SAMPLING:
+ return gprof_start_pc_sampling(s, param_types, params);
+ case PTA_GPROF_STOP_PC_SAMPLING:
+ return gprof_stop_pc_sampling(s, param_types, params);
+ default:
+ break;
+ }
+ return TEE_ERROR_NOT_IMPLEMENTED;
+}
+
+pseudo_ta_register(.uuid = PTA_GPROF_UUID, .name = "gprof",
+ .flags = PTA_DEFAULT_FLAGS,
+ .open_session_entry_point = open_session,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/pta/interrupt_tests.c b/core/arch/arm/pta/interrupt_tests.c
new file mode 100644
index 0000000..bc307a8
--- /dev/null
+++ b/core/arch/arm/pta/interrupt_tests.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <keep.h>
+#include <kernel/interrupt.h>
+#include <kernel/misc.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/tee_time.h>
+#include <kernel/thread.h>
+#include <platform_config.h>
+#include <string.h>
+#include <tee/tee_cryp_provider.h>
+#include <trace.h>
+
+#define TA_NAME "interrupt_tests.ta"
+
+#define INTERRUPT_TESTS_UUID \
+ { 0x48d58475, 0x3d5e, 0x4202, \
+ { 0xa7, 0x75, 0x97, 0x85, 0xd2, 0x0f, 0x78, 0xae } }
+
+#define CMD_INTERRUPT_TESTS 0
+
+#define SGI_NUM 16
+#define PPI_NUM 32
+
+#ifndef TEST_SGI_ID
+#define TEST_SGI_ID 11
+#endif
+#ifndef TEST_PPI_ID
+#define TEST_PPI_ID 29
+#endif
+#ifndef TEST_SPI_ID
+#define TEST_SPI_ID 61
+#endif
+#ifndef TEST_TIMES
+#define TEST_TIMES 3
+#endif
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static size_t test_sgi_value[CFG_TEE_CORE_NB_CORE];
+static size_t test_spi_value[CFG_TEE_CORE_NB_CORE];
+static size_t test_ppi_value[CFG_TEE_CORE_NB_CORE];
+static size_t expect_sgi_value[CFG_TEE_CORE_NB_CORE];
+static size_t expect_spi_value[CFG_TEE_CORE_NB_CORE];
+static size_t expect_ppi_value[CFG_TEE_CORE_NB_CORE];
+
+static enum itr_return __maybe_unused ihandler_ok(struct itr_handler *handler)
+{
+ size_t core_num = get_core_pos();
+
+ assert(core_num < CFG_TEE_CORE_NB_CORE);
+
+ if (handler->it < SGI_NUM)
+ test_sgi_value[core_num]++;
+ else if (handler->it < PPI_NUM)
+ test_ppi_value[core_num]++;
+ else
+ test_spi_value[core_num]++;
+
+ return ITRR_HANDLED;
+}
+KEEP_PAGER(ihandler_ok);
+
+struct itr_handler sgi_handler = {
+ .it = TEST_SGI_ID,
+ .handler = ihandler_ok,
+};
+
+struct itr_handler spi_handler = {
+ .it = TEST_SPI_ID,
+ .handler = ihandler_ok,
+};
+
+struct itr_handler ppi_handler = {
+ .it = TEST_PPI_ID,
+ .handler = ihandler_ok,
+};
+
+static TEE_Result test_sgi(void)
+{
+ TEE_Result res;
+ uint8_t i;
+ uint8_t j;
+ uint8_t num;
+ uint8_t cpu_mask;
+
+ itr_add(&sgi_handler);
+ itr_enable(TEST_SGI_ID);
+
+ for (i = 0; i < CFG_TEE_CORE_NB_CORE; i++)
+ expect_sgi_value[i]++;
+ itr_raise_sgi(TEST_SGI_ID,
+ (uint8_t)(SHIFT_U32(1, CFG_TEE_CORE_NB_CORE) - 1));
+ tee_time_wait(200);
+ if (memcmp(test_sgi_value, expect_sgi_value, sizeof(test_sgi_value)))
+ return TEE_ERROR_GENERIC;
+
+ for (i = 0; i < TEST_TIMES; i++) {
+ res = crypto_ops.prng.read(&num, 1);
+ if (res != TEE_SUCCESS)
+ return TEE_ERROR_GENERIC;
+ num = num % CFG_TEE_CORE_NB_CORE;
+ cpu_mask = 0x0;
+ for (j = 0; j < num; j++) {
+ expect_sgi_value[j]++;
+ cpu_mask |= (0x1 << j);
+ }
+ itr_raise_sgi(TEST_SGI_ID, cpu_mask);
+ tee_time_wait(200);
+ if (memcmp(test_sgi_value, expect_sgi_value,
+ sizeof(test_sgi_value)))
+ return TEE_ERROR_GENERIC;
+ }
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_spi(void)
+{
+ TEE_Result res;
+ uint8_t i;
+ uint8_t num;
+
+ itr_add(&spi_handler);
+ itr_enable(TEST_SPI_ID);
+
+ for (i = 0; i < TEST_TIMES; i++) {
+ res = crypto_ops.prng.read(&num, 1);
+ if (res != TEE_SUCCESS)
+ return TEE_ERROR_GENERIC;
+ num = num % CFG_TEE_CORE_NB_CORE;
+ expect_spi_value[num]++;
+ itr_set_affinity(TEST_SPI_ID, 0x1 << num);
+ itr_raise_pi(TEST_SPI_ID);
+ tee_time_wait(200);
+ if (memcmp(test_spi_value, expect_spi_value,
+ sizeof(test_spi_value)))
+ return TEE_ERROR_GENERIC;
+ }
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_ppi(void)
+{
+ uint32_t exceptions;
+
+ itr_add(&ppi_handler);
+ itr_enable(TEST_PPI_ID);
+
+ exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ expect_ppi_value[get_core_pos()]++;
+ itr_raise_pi(TEST_PPI_ID);
+ thread_unmask_exceptions(exceptions);
+ tee_time_wait(200);
+ if (memcmp(test_ppi_value, expect_ppi_value, sizeof(test_ppi_value)))
+ return TEE_ERROR_GENERIC;
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result interrupt_tests(uint32_t nParamTypes __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS]__unused)
+{
+ TEE_Result res;
+
+ assert(crypto_ops.prng.read);
+
+ res = test_sgi();
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = test_spi();
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = test_ppi();
+ if (res != TEE_SUCCESS)
+ return res;
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result invoke_command(void *psess __unused,
+ uint32_t cmd, uint32_t ptypes,
+ TEE_Param params[4])
+{
+ TEE_Result res;
+ uint8_t i;
+
+ switch (cmd) {
+ case CMD_INTERRUPT_TESTS:
+ res = interrupt_tests(ptypes, params);
+ DMSG("test value: sgi spi ppi");
+ for (i = 0; i < CFG_TEE_CORE_NB_CORE; i++)
+ DMSG("------------[%zu] [%zu] [%zu]",
+ test_sgi_value[i], test_spi_value[i],
+ test_ppi_value[i]);
+ DMSG("expc value: sgi spi ppi");
+ for (i = 0; i < CFG_TEE_CORE_NB_CORE; i++)
+ DMSG("------------[%zu] [%zu] [%zu]",
+ expect_sgi_value[i], expect_spi_value[i],
+ expect_ppi_value[i]);
+ return res;
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = INTERRUPT_TESTS_UUID, .name = TA_NAME,
+ .flags = PTA_DEFAULT_FLAGS,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/pta/pta_self_tests.c b/core/arch/arm/pta/pta_self_tests.c
new file mode 100644
index 0000000..6472356
--- /dev/null
+++ b/core/arch/arm/pta/pta_self_tests.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <types_ext.h>
+#include <kernel/pseudo_ta.h>
+#include <trace.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include "core_self_tests.h"
+
+#define TA_NAME "sta_self_tests.ta"
+
+#define STA_SELF_TEST_UUID \
+ { 0xd96a5b40, 0xc3e5, 0x21e3, \
+ { 0x87, 0x94, 0x10, 0x02, 0xa5, 0xd5, 0xc6, 0x1b } }
+
+#define CMD_TRACE 0
+#define CMD_PARAMS 1
+#define CMD_SELF_TESTS 2
+
+static TEE_Result test_trace(uint32_t param_types __unused,
+ TEE_Param params[TEE_NUM_PARAMS] __unused)
+{
+ IMSG("pseudo TA \"%s\" says \"Hello world !\"", TA_NAME);
+
+ return TEE_SUCCESS;
+}
+
+/*
+ * Supported tests on parameters
+ * (I, J, K, L refer to param index)
+ *
+ * Case 1: command parameters type are: 1 in/out value, 3 empty.
+ * => process outI.a = inI.a + inI.b
+ * Case 2: command parameters type are: 3 input value, 1 output value
+ * => process = outI.a = inJ.a + inK.a + inL.a
+ * Case 3: command parameters type are: 1 in/out memref, 3 empty.
+ * => process = outI[0] = sum(inI[0..len-1])
+ */
+static TEE_Result test_entry_params(uint32_t type, TEE_Param p[TEE_NUM_PARAMS])
+{
+ size_t i;
+ uint8_t d8, *in;
+
+ /* case 1a: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[0].value.a = p[0].value.a + p[0].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1b: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[1].value.a = p[1].value.a + p[1].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1c: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[2].value.a = p[2].value.a + p[2].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1d: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INOUT)) {
+ p[3].value.a = p[3].value.a + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[0].value.a = p[1].value.a + p[2].value.a + p[3].value.a;
+ p[0].value.b = p[1].value.b + p[2].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[1].value.a = p[0].value.a + p[2].value.a + p[3].value.a;
+ p[1].value.b = p[0].value.b + p[2].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[2].value.a = p[0].value.a + p[1].value.a + p[3].value.a;
+ p[2].value.b = p[0].value.b + p[1].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_OUTPUT)) {
+ p[3].value.a = p[0].value.a + p[1].value.a + p[2].value.a;
+ p[3].value.b = p[0].value.b + p[1].value.b + p[2].value.b;
+ return TEE_SUCCESS;
+ }
+
+ DMSG("expect memref params: %p/%" PRIu32 " - %p/%" PRIu32 "zu - %p/%" PRIu32 "zu - %p/%" PRIu32 "zu",
+ p[0].memref.buffer, p[0].memref.size,
+ p[1].memref.buffer, p[1].memref.size,
+ p[2].memref.buffer, p[2].memref.size,
+ p[3].memref.buffer, p[3].memref.size);
+
+ /* case 3a: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[0].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[0].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[0].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3b: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[1].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[1].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[1].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3c: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[2].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[2].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[2].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3d: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_MEMREF_INOUT)) {
+ in = (uint8_t *)p[3].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[3].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[3].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+
+ EMSG("unexpected parameters");
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result create_ta(void)
+{
+ DMSG("create entry point for pseudo TA \"%s\"", TA_NAME);
+ return TEE_SUCCESS;
+}
+
+static void destroy_ta(void)
+{
+ DMSG("destroy entry point for pseudo ta \"%s\"", TA_NAME);
+}
+
+static TEE_Result open_session(uint32_t nParamTypes __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS] __unused,
+ void **ppSessionContext __unused)
+{
+ DMSG("open entry point for pseudo ta \"%s\"", TA_NAME);
+ return TEE_SUCCESS;
+}
+
+static void close_session(void *pSessionContext __unused)
+{
+ DMSG("close entry point for pseudo ta \"%s\"", TA_NAME);
+}
+
+static TEE_Result invoke_command(void *pSessionContext __unused,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS])
+{
+ DMSG("command entry point for pseudo ta \"%s\"", TA_NAME);
+
+ switch (nCommandID) {
+ case CMD_TRACE:
+ return test_trace(nParamTypes, pParams);
+ case CMD_PARAMS:
+ return test_entry_params(nParamTypes, pParams);
+ case CMD_SELF_TESTS:
+ return core_self_tests(nParamTypes, pParams);
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = STA_SELF_TEST_UUID, .name = TA_NAME,
+ .flags = PTA_DEFAULT_FLAGS,
+ .create_entry_point = create_ta,
+ .destroy_entry_point = destroy_ta,
+ .open_session_entry_point = open_session,
+ .close_session_entry_point = close_session,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/pta/se_api_self_tests.c b/core/arch/arm/pta/se_api_self_tests.c
new file mode 100644
index 0000000..f6f5ec4
--- /dev/null
+++ b/core/arch/arm/pta/se_api_self_tests.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <compiler.h>
+#include <kernel/pseudo_ta.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <trace.h>
+
+#include <tee/se/manager.h>
+#include <tee/se/reader.h>
+#include <tee/se/session.h>
+#include <tee/se/iso7816.h>
+#include <tee/se/aid.h>
+#include <tee/se/apdu.h>
+#include <tee/se/channel.h>
+#include <tee/se/util.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "aid_priv.h"
+#include "apdu_priv.h"
+#include "reader_priv.h"
+
+
+#define TA_NAME "se_api_self_tests.ta"
+
+#define MAX_READERS 10
+
+#define CMD_SELF_TESTS 0
+
+#define SE_API_SELF_TEST_UUID \
+ { 0xAEB79790, 0x6F03, 0x11E4, \
+ { 0x98, 0x03, 0x08, 0x00, 0x20, 0x0C, 0x9A, 0x66 } }
+
+#define ASSERT(expr) \
+ do { \
+ if (!(expr)) { \
+ EMSG("assertion '%s' failed at %s:%d (func '%s')", \
+ #expr, __FILE__, __LINE__, __func__); \
+ return TEE_ERROR_GENERIC; \
+ } \
+ } while (0)
+
+#define CHECK(ret) \
+ do { \
+ if (ret != TEE_SUCCESS) \
+ return ret; \
+ } while (0)
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result test_reader(struct tee_se_reader_proxy **handle)
+{
+ TEE_Result ret;
+ uint8_t cmd[] = { ISO7816_CLA, MANAGE_CHANNEL_CMD,
+ OPEN_CHANNEL, OPEN_NEXT_AVAILABLE };
+ uint8_t resp[3];
+ size_t resp_size = sizeof(resp);
+ const int expected_channel_id = 1;
+
+ DMSG("entry");
+ /* transmit should fail since no one attached to the reader */
+ ret = tee_se_reader_transmit(handle[0], cmd, sizeof(cmd),
+ resp, &resp_size);
+ ASSERT(ret == TEE_ERROR_BAD_STATE);
+
+ ret = tee_se_reader_attach(handle[0]);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_reader_attach(handle[0]);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* referenced by 2 owners */
+ ASSERT(2 == tee_se_reader_get_refcnt(handle[0]));
+
+ ret = tee_se_reader_transmit(handle[0], cmd, sizeof(cmd),
+ resp, &resp_size);
+ ASSERT(ret == TEE_SUCCESS);
+ ASSERT(resp[0] == expected_channel_id &&
+ resp[1] == CMD_OK_SW1 && resp[2] == CMD_OK_SW2);
+
+ tee_se_reader_detach(handle[0]);
+
+ ASSERT(1 == tee_se_reader_get_refcnt(handle[0]));
+
+ tee_se_reader_detach(handle[0]);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_aid(struct tee_se_reader_proxy **proxies)
+{
+ struct tee_se_session *s = NULL;
+ struct tee_se_channel *b = NULL, *l = NULL;
+ struct tee_se_aid *aid = NULL;
+ TEE_Result ret;
+
+ DMSG("entry");
+ ret = tee_se_aid_create("D0000CAFE00001", &aid);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_reader_open_session(proxies[0], &s);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_session_open_basic_channel(s, aid, &b);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_session_open_logical_channel(s, aid, &l);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ASSERT(tee_se_aid_get_refcnt(aid) == 3);
+
+ tee_se_session_close_channel(s, b);
+ tee_se_session_close_channel(s, l);
+
+ ASSERT(tee_se_aid_get_refcnt(aid) == 1);
+
+ tee_se_session_close(s);
+ tee_se_aid_release(aid);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_session(struct tee_se_reader_proxy **proxies)
+{
+ struct tee_se_channel *c1 = NULL, *c2 = NULL;
+ struct tee_se_session *s1 = NULL, *s2 = NULL;
+ TEE_Result ret;
+
+ DMSG("entry");
+ ret = tee_se_reader_open_session(proxies[0], &s1);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* should success, multiple sessions open by different user */
+ ret = tee_se_reader_open_session(proxies[0], &s2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* open basic channel on s1 (should success) */
+ ret = tee_se_session_open_basic_channel(s1, NULL, &c1);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* open basic channel on s2
+ * (should fail, basic channel is locked by s1)
+ */
+ ret = tee_se_session_open_basic_channel(s2, NULL, &c2);
+ ASSERT(ret == TEE_ERROR_NOT_SUPPORTED);
+ ASSERT(c2 == NULL);
+
+ /* close basic channel on s1 */
+ tee_se_session_close_channel(s1, c1);
+ c1 = NULL;
+
+ /* open basic channel on s2 (this time should success) */
+ ret = tee_se_session_open_basic_channel(s1, NULL, &c2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* close basic channel on s2 */
+ tee_se_session_close_channel(s2, c2);
+ c2 = NULL;
+
+ /* open logical channel on s1 and s2 (both should success) */
+ ret = tee_se_session_open_logical_channel(s1, NULL, &c1);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = tee_se_session_open_logical_channel(s2, NULL, &c2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* clean up */
+ tee_se_session_close_channel(s1, c1);
+ tee_se_session_close_channel(s2, c2);
+
+ tee_se_session_close(s1);
+ tee_se_session_close(s2);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_select_resp(struct tee_se_reader_proxy **proxies)
+{
+ struct tee_se_aid *aid = NULL;
+ struct tee_se_session *s = NULL;
+ struct tee_se_channel *c = NULL;
+ struct resp_apdu *resp;
+ TEE_Result ret;
+
+ DMSG("entry");
+ ret = tee_se_aid_create("D0000CAFE00001", &aid);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_reader_open_session(proxies[0], &s);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_session_open_logical_channel(s, aid, &c);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_channel_get_select_response(c, &resp);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ASSERT((resp_apdu_get_sw1(resp) == CMD_OK_SW1) &&
+ (resp_apdu_get_sw2(resp) == CMD_OK_SW2));
+
+ /*
+ * the ownership of resp apdu should be the channel
+ * and it should be the only owner
+ */
+ ASSERT(apdu_get_refcnt(to_apdu_base(resp)) == 1);
+
+ /* increase the reference counter of resp apdu */
+ apdu_acquire(to_apdu_base(resp));
+
+ /* clean up */
+ tee_se_session_close_channel(s, c);
+
+ /* channel should release resp apdu when closed */
+ ASSERT(apdu_get_refcnt(to_apdu_base(resp)) == 1);
+ apdu_release(to_apdu_base(resp));
+
+ tee_se_session_close(s);
+ tee_se_aid_release(aid);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+/*
+ * The JAVA Card Simulator (jcardsim.jar) built-in applet(s):
+ *
+ * AID |Type
+ * -------------------------------------+----------------------
+ * D0000CAFE00001 | MultiSelectable
+ * (default selected on basic channel) |
+ * -------------------------------------+----------------------
+ * D0000CAFE00002 | Non-MultiSelectable
+ * -------------------------------------+----------------------
+ *
+ */
+static TEE_Result test_logical_channel(struct tee_se_reader_proxy **proxies)
+{
+ struct tee_se_channel *channel[MAX_LOGICAL_CHANNEL] = { NULL };
+ struct tee_se_aid *aid = NULL;
+ struct tee_se_session *s = NULL;
+ TEE_Result ret;
+ int i;
+
+ DMSG("entry");
+ ret = tee_se_reader_open_session(proxies[0], &s);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /*
+ * test open logical channels based on AID selected on basic channel
+ * (D0000CAFE00001 is default selected on basic channel,
+ * this call should success since D0000CAFE00001 is MultiSelectable,
+ * upon open, each logical channel should select D0000CAFE00001)
+ */
+ for (i = 1; i < MAX_LOGICAL_CHANNEL; i ++) {
+ ret = tee_se_session_open_logical_channel(s, NULL, &channel[i]);
+ ASSERT(ret == TEE_SUCCESS);
+ }
+
+ /*
+ * should fail on next open
+ * (exceeds maximum logical channel number)
+ */
+ ret = tee_se_session_open_logical_channel(s, NULL, &channel[0]);
+ ASSERT(ret == TEE_ERROR_NOT_SUPPORTED);
+
+ /* close 3 channels */
+ for (i = 1; i < 4; i++) {
+ tee_se_session_close_channel(s, channel[i]);
+ channel[i] = NULL;
+ }
+
+ /* re-open 3 channels (should success) */
+ for (i = 1; i < 4; i++) {
+ ret = tee_se_session_open_logical_channel(s, NULL, &channel[i]);
+ ASSERT(ret == TEE_SUCCESS);
+ }
+
+ /* logical channel 1 select D0000CAFE00002 (should success) */
+ tee_se_aid_create("D0000CAFE00002", &aid);
+ ret = tee_se_channel_select(channel[1], aid);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* logical channel 2 select D0000CAFE00002
+ * (should fail since D0000CAFE00002 is not MultiSelectable)
+ */
+ ret = tee_se_channel_select(channel[2], aid);
+ ASSERT(ret == TEE_ERROR_NOT_SUPPORTED);
+
+ /* clean up */
+ for (i = 1; i < MAX_LOGICAL_CHANNEL; i++)
+ tee_se_session_close_channel(s, channel[i]);
+ tee_se_session_close(s);
+ tee_se_aid_release(aid);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result verify_result(struct resp_apdu *apdu, const char *data)
+{
+ size_t str_length = strlen(data);
+ size_t byte_length = strlen(data) / 2;
+ uint8_t *resp_data = resp_apdu_get_data(apdu);
+ size_t resp_len = resp_apdu_get_data_len(apdu);
+ uint8_t bytes[byte_length];
+ size_t i = 0;
+
+ ASSERT(resp_len == byte_length);
+
+ hex_decode(data, str_length, bytes);
+ while (i < resp_len) {
+ ASSERT(bytes[i] == resp_data[i]);
+ i++;
+ }
+ return TEE_SUCCESS;
+}
+
+static TEE_Result test_transmit(struct tee_se_reader_proxy **proxies)
+{
+ struct tee_se_channel *c1 = NULL, *c2 = NULL;
+ struct tee_se_session *s1 = NULL, *s2 = NULL;
+ struct tee_se_aid *full_aid = NULL, *partial_aid = NULL;
+ struct cmd_apdu *cmd;
+ struct resp_apdu *resp;
+ size_t tx_buf_len = 0, rx_buf_len = 7;
+ TEE_Result ret;
+
+ DMSG("entry");
+ ret = tee_se_aid_create("D0000CAFE00001", &full_aid);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_aid_create("D0000CAFE0000", &partial_aid);
+ ASSERT(ret == TEE_SUCCESS);
+
+ cmd = alloc_cmd_apdu(ISO7816_CLA, 0xFF, 0x0, 0x0,
+ tx_buf_len, rx_buf_len, NULL);
+ ASSERT(cmd);
+ resp = alloc_resp_apdu(rx_buf_len);
+ ASSERT(resp);
+
+ ret = tee_se_reader_open_session(proxies[0], &s1);
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_reader_open_session(proxies[0], &s2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* open logical channel on s1 (given full aid) */
+ ret = tee_se_session_open_logical_channel(s1, full_aid, &c1);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* should route to D0000CAFE00001 */
+ ret = tee_se_channel_transmit(c1, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* select next should fail (full aid given) */
+ ret = tee_se_channel_select_next(c1);
+ ASSERT(ret == TEE_ERROR_ITEM_NOT_FOUND);
+
+ /* open logical channel on s2 (given partial aid) */
+ ret = tee_se_session_open_logical_channel(s2, partial_aid, &c2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* should route to D0000CAFE00001 */
+ ret = tee_se_channel_transmit(c2, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = verify_result(resp, "D0000CAFE00001");
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* select next should success (select D0000CAFE00002) */
+ ret = tee_se_channel_select_next(c2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* should route to D0000CAFE00002 */
+ ret = tee_se_channel_transmit(c2, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = verify_result(resp, "D0000CAFE00002");
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* select next should success (select D0000CAFE00001) */
+ ret = tee_se_channel_select_next(c2);
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* should route to D0000CAFE00001 */
+ ret = tee_se_channel_transmit(c2, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = verify_result(resp, "D0000CAFE00001");
+ ASSERT(ret == TEE_SUCCESS);
+
+ /*
+ * test route to the same applet in a row from different channel
+ * (both should success)
+ */
+ ret = tee_se_channel_transmit(c1, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = verify_result(resp, "D0000CAFE00001");
+ ASSERT(ret == TEE_SUCCESS);
+
+ ret = tee_se_channel_transmit(c2, cmd, resp);
+ ASSERT(ret == TEE_SUCCESS);
+ ret = verify_result(resp, "D0000CAFE00001");
+ ASSERT(ret == TEE_SUCCESS);
+
+ /* clean up */
+ tee_se_session_close_channel(s1, c1);
+ tee_se_session_close_channel(s2, c2);
+
+ tee_se_session_close(s1);
+ tee_se_session_close(s2);
+
+ tee_se_aid_release(full_aid);
+ tee_se_aid_release(partial_aid);
+ DMSG("exit");
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result se_api_self_tests(uint32_t nParamTypes __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS] __unused)
+{
+ size_t size = MAX_READERS;
+ TEE_Result ret;
+ struct tee_se_reader_proxy **proxies =
+ malloc(sizeof(void *) * MAX_READERS);
+
+ tee_se_manager_get_readers(proxies, &size);
+
+ ret = test_aid(proxies);
+ CHECK(ret);
+
+ ret = test_select_resp(proxies);
+ CHECK(ret);
+
+ ret = test_session(proxies);
+ CHECK(ret);
+
+ ret = test_logical_channel(proxies);
+ CHECK(ret);
+
+ ret = test_transmit(proxies);
+ CHECK(ret);
+
+ ret = test_reader(proxies);
+ CHECK(ret);
+
+ free(proxies);
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result invoke_command(void *pSessionContext __unused,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS])
+{
+ DMSG("command entry point for static ta \"%s\"", TA_NAME);
+
+ switch (nCommandID) {
+ case CMD_SELF_TESTS:
+ return se_api_self_tests(nParamTypes, pParams);
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = SE_API_SELF_TEST_UUID, .name = TA_NAME,
+ .flags = PTA_DEFAULT_FLAGS,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/pta/stats.c b/core/arch/arm/pta/stats.c
new file mode 100644
index 0000000..408c284
--- /dev/null
+++ b/core/arch/arm/pta/stats.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <stdio.h>
+#include <trace.h>
+#include <kernel/pseudo_ta.h>
+#include <mm/tee_pager.h>
+#include <mm/tee_mm.h>
+#include <string.h>
+#include <string_ext.h>
+#include <malloc.h>
+
+#define TA_NAME "stats.ta"
+
+#define STATS_UUID \
+ { 0xd96a5b40, 0xe2c7, 0xb1af, \
+ { 0x87, 0x94, 0x10, 0x02, 0xa5, 0xd5, 0xc6, 0x1b } }
+
+#define STATS_CMD_PAGER_STATS 0
+#define STATS_CMD_ALLOC_STATS 1
+
+#define STATS_NB_POOLS 3
+
+static TEE_Result get_alloc_stats(uint32_t type, TEE_Param p[TEE_NUM_PARAMS])
+{
+ struct malloc_stats *stats;
+ uint32_t size_to_retrieve;
+ uint32_t pool_id;
+ uint32_t i;
+
+ /*
+ * p[0].value.a = pool id (from 0 to n)
+ * - 0 means all the pools to be retrieved
+ * - 1..n means pool id
+ * p[0].value.b = 0 if no reset of the stats
+ * p[1].memref.buffer = output buffer to struct malloc_stats
+ */
+ if (TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_MEMREF_OUTPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE) != type) {
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ pool_id = p[0].value.a;
+ if (pool_id > STATS_NB_POOLS)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ size_to_retrieve = sizeof(struct malloc_stats);
+ if (!pool_id)
+ size_to_retrieve *= STATS_NB_POOLS;
+
+ if (p[1].memref.size < size_to_retrieve) {
+ p[1].memref.size = size_to_retrieve;
+ return TEE_ERROR_SHORT_BUFFER;
+ }
+ p[1].memref.size = size_to_retrieve;
+ stats = p[1].memref.buffer;
+
+ for (i = 1; i <= STATS_NB_POOLS; i++) {
+ if ((pool_id) && (i != pool_id))
+ continue;
+
+ switch (i) {
+ case 1:
+ malloc_get_stats(stats);
+ strlcpy(stats->desc, "Heap", sizeof(stats->desc));
+ if (p[0].value.b)
+ malloc_reset_stats();
+ break;
+
+ case 2:
+ EMSG("public DDR not managed by secure side anymore");
+ break;
+
+ case 3:
+ tee_mm_get_pool_stats(&tee_mm_sec_ddr, stats,
+ !!p[0].value.b);
+ strlcpy(stats->desc, "Secure DDR", sizeof(stats->desc));
+ break;
+
+ default:
+ EMSG("Wrong pool id");
+ break;
+ }
+
+ stats++;
+ }
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result get_pager_stats(uint32_t type, TEE_Param p[TEE_NUM_PARAMS])
+{
+ struct tee_pager_stats stats;
+
+ if (TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
+ TEE_PARAM_TYPE_VALUE_OUTPUT,
+ TEE_PARAM_TYPE_VALUE_OUTPUT,
+ TEE_PARAM_TYPE_NONE) != type) {
+ EMSG("expect 3 output values as argument");
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ tee_pager_get_stats(&stats);
+ p[0].value.a = stats.npages;
+ p[0].value.b = stats.npages_all;
+ p[1].value.a = stats.ro_hits;
+ p[1].value.b = stats.rw_hits;
+ p[2].value.a = stats.hidden_hits;
+ p[2].value.b = stats.zi_released;
+
+ return TEE_SUCCESS;
+}
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result invoke_command(void *psess __unused,
+ uint32_t cmd, uint32_t ptypes,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ switch (cmd) {
+ case STATS_CMD_PAGER_STATS:
+ return get_pager_stats(ptypes, params);
+ case STATS_CMD_ALLOC_STATS:
+ return get_alloc_stats(ptypes, params);
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = STATS_UUID, .name = TA_NAME,
+ .flags = PTA_DEFAULT_FLAGS,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/pta/sub.mk b/core/arch/arm/pta/sub.mk
new file mode 100644
index 0000000..3d961d4
--- /dev/null
+++ b/core/arch/arm/pta/sub.mk
@@ -0,0 +1,14 @@
+srcs-$(CFG_TEE_CORE_EMBED_INTERNAL_TESTS) += pta_self_tests.c
+srcs-$(CFG_TEE_CORE_EMBED_INTERNAL_TESTS) += core_self_tests.c
+srcs-$(CFG_TEE_CORE_EMBED_INTERNAL_TESTS) += interrupt_tests.c
+srcs-$(CFG_WITH_STATS) += stats.c
+srcs-$(CFG_TA_GPROF_SUPPORT) += gprof.c
+
+ifeq ($(CFG_SE_API),y)
+srcs-$(CFG_SE_API_SELF_TEST) += se_api_self_tests.c
+cppflags-se_api_self_tests.c-y += -Icore/tee/se
+endif
+
+ifeq ($(CFG_WITH_USER_TA),y)
+srcs-$(CFG_TEE_FS_KEY_MANAGER_TEST) += tee_fs_key_manager_tests.c
+endif
diff --git a/core/arch/arm/pta/tee_fs_key_manager_tests.c b/core/arch/arm/pta/tee_fs_key_manager_tests.c
new file mode 100644
index 0000000..f9dc714
--- /dev/null
+++ b/core/arch/arm/pta/tee_fs_key_manager_tests.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/pseudo_ta.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tee/tee_fs_key_manager.h>
+#include <trace.h>
+
+#define TA_NAME "tee_fs_key_manager_tests.ta"
+
+#define CMD_SELF_TESTS 0
+
+#define ENC_FS_KEY_MANAGER_TEST_UUID \
+ { 0x17E5E280, 0xD12E, 0x11E4, \
+ { 0xA4, 0x1A, 0x00, 0x02, 0xA5, 0xD5, 0xC5, 0x1B } }
+
+#define DUMP_BUF_MAX 256
+
+static uint8_t test_data[] = {
+ 0x00, 0x6E, 0x04, 0x57, 0x08, 0xFB, 0x71, 0x96,
+ 0x00, 0x2E, 0x55, 0x3D, 0x02, 0xC3, 0xA6, 0x92,
+ 0x00, 0xC3, 0xEF, 0x8A, 0xB2, 0x34, 0x53, 0xE6,
+ 0x00, 0x74, 0x9C, 0xD6, 0x36, 0xE7, 0xA8, 0x00
+};
+
+static char *print_buf(char *buf, size_t *remain_size, const char *fmt, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+
+static char *print_buf(char *buf, size_t *remain_size, const char *fmt, ...)
+{
+ va_list ap;
+ size_t len;
+
+ va_start(ap, fmt);
+ len = vsnprintf(buf, *remain_size, fmt, ap);
+ buf += len;
+ *remain_size -= len;
+ va_end(ap);
+ return buf;
+}
+
+static void dump_hex(char *buf, size_t *remain_size, uint8_t *input_buf,
+ size_t input_size)
+{
+ size_t i;
+
+ for (i = 0; i < input_size; i++)
+ buf = print_buf(buf, remain_size, "%02X ", input_buf[i]);
+}
+
+static void print_hex(uint8_t *input_buf, size_t input_size)
+{
+ char buf[DUMP_BUF_MAX];
+ size_t remain = sizeof(buf);
+
+ dump_hex(buf, &remain, input_buf, input_size);
+ DMSG("%s", buf);
+}
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result test_file_decrypt_with_invalid_content(void)
+{
+ TEE_Result res = TEE_SUCCESS;
+ size_t header_size;
+ size_t encrypt_data_out_size;
+ uint8_t *encrypt_data_out = NULL;
+ size_t decrypt_data_out_size;
+ uint8_t *decrypt_data_out = NULL;
+ uint8_t tmp_byte;
+ uint8_t encrypted_fek[TEE_FS_KM_FEK_SIZE];
+
+ DMSG("Start");
+
+ /* data encryption */
+ header_size = tee_fs_get_header_size(META_FILE);
+
+ encrypt_data_out_size = header_size + sizeof(test_data);
+ encrypt_data_out = malloc(encrypt_data_out_size);
+ if (!encrypt_data_out) {
+ EMSG("malloc for encrypt data buffer failed");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ res = tee_fs_encrypt_file(META_FILE,
+ test_data, sizeof(test_data),
+ encrypt_data_out, &encrypt_data_out_size,
+ encrypted_fek);
+ if (res != TEE_SUCCESS) {
+ EMSG("file encryption failed");
+ goto exit;
+ }
+
+ /* data decryption */
+ decrypt_data_out_size = sizeof(test_data);
+ decrypt_data_out = malloc(decrypt_data_out_size);
+ if (!decrypt_data_out) {
+ EMSG("malloc for decrypt data buffer failed");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ /* case1: data decryption with modified encrypted_key */
+ tmp_byte = *(encrypt_data_out + 4);
+ *(encrypt_data_out + 4) = ~tmp_byte;
+
+ DMSG("case1: decryption with modified encrypted FEK");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res == TEE_ERROR_MAC_INVALID) {
+ DMSG("case1: passed, return code=%x", res);
+ } else {
+ EMSG("case1: failed, return code=%x", res);
+ res = TEE_ERROR_GENERIC;
+ goto exit;
+ }
+
+ *(encrypt_data_out + 4) = tmp_byte;
+
+ /* case2: data decryption with modified iv */
+ tmp_byte = *(encrypt_data_out + 20);
+ *(encrypt_data_out + 20) = ~tmp_byte;
+
+ DMSG("case2: decryption with modified IV");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res == TEE_ERROR_MAC_INVALID) {
+ DMSG("case2: passed, return code=%x", res);
+ } else {
+ EMSG("case2: failed, return code=%x", res);
+ res = TEE_ERROR_GENERIC;
+ goto exit;
+ }
+
+ *(encrypt_data_out + 20) = tmp_byte;
+
+ /* case3: data decryption with modified cipher text */
+ tmp_byte = *(encrypt_data_out + encrypt_data_out_size - 5);
+ *(encrypt_data_out + encrypt_data_out_size - 5) = ~tmp_byte;
+
+ DMSG("case3: decryption with modified cipher text");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res == TEE_ERROR_MAC_INVALID) {
+ DMSG("case3: passed, return code=%x", res);
+ } else {
+ EMSG("case3: failed, return code=%x", res);
+ res = TEE_ERROR_GENERIC;
+ goto exit;
+ }
+
+ *(encrypt_data_out + encrypt_data_out_size - 5) = tmp_byte;
+
+ /* case4: data decryption with shorter cipher text length */
+ DMSG("case4: decryption with shorter cipher text length");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size - 1,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res == TEE_ERROR_MAC_INVALID) {
+ DMSG("case4: passed, return code=%x", res);
+ } else {
+ EMSG("case4: failed, return code=%x", res);
+ res = TEE_ERROR_GENERIC;
+ goto exit;
+ }
+
+ /* case5: data decryption with shorter plain text buffer */
+ decrypt_data_out_size = sizeof(test_data) - 1;
+
+ DMSG("case5: decryption with shorter plain text buffer");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res == TEE_ERROR_SHORT_BUFFER) {
+ DMSG("case5: passed, return code=%x", res);
+ } else {
+ EMSG("case5: failed, return code=%x", res);
+ res = TEE_ERROR_GENERIC;
+ goto exit;
+ }
+
+ decrypt_data_out_size = encrypt_data_out_size;
+
+ /* data decryption with correct encrypted data */
+ DMSG("good path test - decryption with correct data");
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res != TEE_SUCCESS) {
+ EMSG("failed to decrypted data, return code=%x", res);
+ goto exit;
+ }
+
+ /* data comparison */
+ if (memcmp(test_data, decrypt_data_out, sizeof(test_data)) != 0) {
+ EMSG("decrypted data doest not correct");
+ res = TEE_ERROR_GENERIC;
+ } else {
+ DMSG("good path test - passed");
+ }
+
+exit:
+ if (encrypt_data_out != NULL)
+ free(encrypt_data_out);
+
+ if (decrypt_data_out != NULL)
+ free(decrypt_data_out);
+
+ DMSG("Finish");
+
+ return res;
+}
+
+static TEE_Result test_file_decrypt_success(void)
+{
+ TEE_Result res = TEE_SUCCESS;
+ size_t header_size;
+ size_t encrypt_data_out_size;
+ uint8_t *encrypt_data_out = NULL;
+ size_t decrypt_data_out_size;
+ uint8_t *decrypt_data_out = NULL;
+ uint8_t encrypted_fek[TEE_FS_KM_FEK_SIZE];
+
+ DMSG("Start");
+
+ res = tee_fs_generate_fek(encrypted_fek, TEE_FS_KM_FEK_SIZE);
+ if (res != TEE_SUCCESS)
+ goto exit;
+
+ /* data encryption */
+ header_size = tee_fs_get_header_size(META_FILE);
+
+ encrypt_data_out_size = header_size + sizeof(test_data);
+ encrypt_data_out = malloc(encrypt_data_out_size);
+ if (!encrypt_data_out) {
+ EMSG("malloc for encrypt data buffer failed");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ res = tee_fs_encrypt_file(META_FILE,
+ test_data, sizeof(test_data),
+ encrypt_data_out, &encrypt_data_out_size,
+ encrypted_fek);
+ if (res != TEE_SUCCESS) {
+ EMSG("file encryption failed");
+ goto exit;
+ }
+
+
+ /* data decryption */
+ decrypt_data_out_size = sizeof(test_data);
+ decrypt_data_out = malloc(decrypt_data_out_size);
+ if (!decrypt_data_out) {
+ EMSG("malloc for decrypt data buffer failed");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ res = tee_fs_decrypt_file(META_FILE,
+ encrypt_data_out, encrypt_data_out_size,
+ decrypt_data_out, &decrypt_data_out_size,
+ encrypted_fek);
+ if (res != TEE_SUCCESS)
+ goto exit;
+
+ /* data comparison */
+ if (memcmp(test_data, decrypt_data_out, sizeof(test_data)) != 0) {
+ EMSG("Data compare failed");
+ res = TEE_ERROR_GENERIC;
+ }
+
+exit:
+ /* dump data for debug */
+ if (res != TEE_SUCCESS)
+ DMSG("return code = %x", res);
+ else {
+ DMSG("Test Data (%zu bytes)", sizeof(test_data));
+ print_hex(test_data, sizeof(test_data));
+ DMSG("Encrypted Data (%zu bytes)", encrypt_data_out_size);
+ print_hex(encrypt_data_out, encrypt_data_out_size);
+ DMSG("Decrypted Data (%zu bytes)", decrypt_data_out_size);
+ print_hex(decrypt_data_out, decrypt_data_out_size);
+ }
+
+ if (encrypt_data_out != NULL)
+ free(encrypt_data_out);
+
+ if (decrypt_data_out != NULL)
+ free(decrypt_data_out);
+
+ DMSG("Finish");
+
+ return res;
+}
+
+static TEE_Result self_tests(
+ uint32_t nParamTypes __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS] __unused)
+{
+ TEE_Result res;
+
+ res = test_file_decrypt_success();
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = test_file_decrypt_with_invalid_content();
+ if (res != TEE_SUCCESS)
+ return res;
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result invoke_command(void *pSessionContext __unused,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS])
+{
+ DMSG("command entry point for static ta \"%s\"", TA_NAME);
+
+ switch (nCommandID) {
+ case CMD_SELF_TESTS:
+ return self_tests(nParamTypes, pParams);
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = ENC_FS_KEY_MANAGER_TEST_UUID, .name = TA_NAME,
+ .flags = PTA_DEFAULT_FLAGS,
+ .invoke_command_entry_point = invoke_command);
diff --git a/core/arch/arm/sm/psci.c b/core/arch/arm/sm/psci.c
new file mode 100644
index 0000000..b2bd645
--- /dev/null
+++ b/core/arch/arm/sm/psci.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <console.h>
+#include <kernel/generic_boot.h>
+#include <kernel/thread.h>
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <sm/psci.h>
+#include <sm/sm.h>
+#include <trace.h>
+
+__weak uint32_t psci_version(void)
+{
+ return PSCI_VERSION_0_2;
+}
+
+__weak int psci_cpu_suspend(uint32_t power_state __unused,
+ uintptr_t entry __unused,
+ uint32_t context_id __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_cpu_off(void)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_cpu_on(uint32_t cpu_id __unused, uint32_t entry __unused,
+ uint32_t context_id __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_affinity_info(uint32_t affinity __unused,
+ uint32_t lowest_affnity_level __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_migrate(uint32_t cpu_id __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_migrate_info_type(void)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_migrate_info_up_cpu(void)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak void psci_system_off(void)
+{
+}
+
+__weak void psci_system_reset(void)
+{
+}
+
+__weak int psci_features(uint32_t psci_fid __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_node_hw_state(uint32_t cpu_id __unused,
+ uint32_t power_level __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_stat_residency(uint32_t cpu_id __unused,
+ uint32_t power_state __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+__weak int psci_stat_count(uint32_t cpu_id __unused,
+ uint32_t power_state __unused)
+{
+ return PSCI_RET_NOT_SUPPORTED;
+}
+
+void tee_psci_handler(struct thread_smc_args *args)
+{
+ uint32_t smc_fid = args->a0;
+ uint32_t a1 = args->a1;
+ uint32_t a2 = args->a2;
+ uint32_t a3 = args->a3;
+
+ switch (smc_fid) {
+ case PSCI_VERSION:
+ args->a0 = psci_version();
+ break;
+ case PSCI_CPU_SUSPEND:
+ args->a0 = psci_cpu_suspend(a1, a2, a3);
+ break;
+ case PSCI_CPU_OFF:
+ args->a0 = psci_cpu_off();
+ break;
+ case PSCI_CPU_ON:
+ args->a0 = psci_cpu_on(a1, a2, a3);
+ break;
+ case PSCI_AFFINITY_INFO:
+ args->a0 = psci_affinity_info(a1, a2);
+ break;
+ case PSCI_MIGRATE:
+ args->a0 = psci_migrate(a1);
+ break;
+ case PSCI_MIGRATE_INFO_TYPE:
+ args->a0 = psci_migrate_info_type();
+ break;
+ case PSCI_MIGRATE_INFO_UP_CPU:
+ args->a0 = psci_migrate_info_up_cpu();
+ break;
+ case PSCI_SYSTEM_OFF:
+ psci_system_off();
+ while (1)
+ ;
+ break;
+ case PSCI_SYSTEM_RESET:
+ psci_system_off();
+ while (1)
+ ;
+ break;
+ case PSCI_PSCI_FEATURES:
+ args->a0 = psci_features(a1);
+ break;
+ case PSCI_NODE_HW_STATE:
+ args->a0 = psci_node_hw_state(a1, a2);
+ break;
+ default:
+ args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
+ break;
+ }
+}
diff --git a/core/arch/arm/sm/sm.c b/core/arch/arm/sm/sm.c
new file mode 100644
index 0000000..4a0c0f6
--- /dev/null
+++ b/core/arch/arm/sm/sm.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm.h>
+#include <compiler.h>
+#include <kernel/misc.h>
+#include <platform_config.h>
+#include <sm/optee_smc.h>
+#include <sm/sm.h>
+#include <sm/std_smc.h>
+#include <string.h>
+#include "sm_private.h"
+
+bool sm_from_nsec(struct sm_ctx *ctx)
+{
+ uint32_t *nsec_r0 = (uint32_t *)(&ctx->nsec.r0);
+
+#ifdef CFG_PSCI_ARM32
+ if (OPTEE_SMC_OWNER_NUM(*nsec_r0) == OPTEE_SMC_OWNER_STANDARD) {
+ smc_std_handler((struct thread_smc_args *)nsec_r0);
+ return false; /* Return to non secure state */
+ }
+#endif
+
+ sm_save_modes_regs(&ctx->nsec.mode_regs);
+ sm_restore_modes_regs(&ctx->sec.mode_regs);
+
+ memcpy(&ctx->sec.r0, nsec_r0, sizeof(uint32_t) * 8);
+ if (OPTEE_SMC_IS_FAST_CALL(ctx->sec.r0))
+ ctx->sec.mon_lr = (uint32_t)&thread_vector_table.fast_smc_entry;
+ else
+ ctx->sec.mon_lr = (uint32_t)&thread_vector_table.std_smc_entry;
+ return true; /* return into secure state */
+}
diff --git a/core/arch/arm/sm/sm_a32.S b/core/arch/arm/sm/sm_a32.S
new file mode 100644
index 0000000..9c6becd
--- /dev/null
+++ b/core/arch/arm/sm/sm_a32.S
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <asm-defines.h>
+
+ .section .text.sm_asm
+
+FUNC sm_save_modes_regs , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* User mode registers has to be saved from system mode */
+ cps #CPSR_MODE_SYS
+ stm r0!, {sp, lr}
+
+ cps #CPSR_MODE_IRQ
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_FIQ
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_SVC
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_ABT
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_UND
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_MON
+ bx lr
+UNWIND( .fnend)
+END_FUNC sm_save_modes_regs
+
+/* Restores the mode specific registers */
+FUNC sm_restore_modes_regs , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* User mode registers has to be saved from system mode */
+ cps #CPSR_MODE_SYS
+ ldm r0!, {sp, lr}
+
+ cps #CPSR_MODE_IRQ
+ ldm r0!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #CPSR_MODE_FIQ
+ ldm r0!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #CPSR_MODE_SVC
+ ldm r0!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #CPSR_MODE_ABT
+ ldm r0!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #CPSR_MODE_UND
+ ldm r0!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #CPSR_MODE_MON
+ bx lr
+UNWIND( .fnend)
+END_FUNC sm_restore_modes_regs
+
+/*
+ * stack_tmp is used as stack, the top of the stack is reserved to hold
+ * struct sm_ctx, everything below is for normal stack usage. As several
+ * different CPU modes are using the same stack it's important that switch
+ * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
+ * Async abort has to be masked while using stack_tmp.
+ */
+LOCAL_FUNC sm_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ srsdb sp!, #CPSR_MODE_MON
+ push {r0-r7}
+
+ clrex /* Clear the exclusive monitor */
+
+ /* Find out if we're doing an secure or non-secure entry */
+ read_scr r1
+ tst r1, #SCR_NS
+ bne .smc_from_nsec
+
+ /*
+ * As we're coming from secure world (NS bit cleared) the stack
+ * pointer points to sm_ctx.sec.r0 at this stage. After the
+ * instruction below the stack pointer points to sm_ctx.
+ */
+ sub sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
+
+ /* Save secure context */
+ add r0, sp, #SM_CTX_SEC
+ bl sm_save_modes_regs
+
+ /*
+ * On FIQ exit we're restoring the non-secure context unchanged, on
+ * all other exits we're shifting r1-r4 from secure context into
+ * r0-r3 in non-secure context.
+ */
+ add r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
+ ldm r8, {r0-r4}
+ mov_imm r9, TEESMC_OPTEED_RETURN_FIQ_DONE
+ cmp r0, r9
+ addne r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
+ stmne r8, {r1-r4}
+
+ /* Restore non-secure context */
+ add r0, sp, #SM_CTX_NSEC
+ bl sm_restore_modes_regs
+
+.sm_ret_to_nsec:
+ /*
+ * Return to non-secure world
+ */
+ add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
+ ldm r0, {r8-r12}
+
+ /* Update SCR */
+ read_scr r0
+ orr r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
+ write_scr r0
+
+ add sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
+ b .sm_exit
+
+.smc_from_nsec:
+ /*
+ * As we're coming from non-secure world (NS bit set) the stack
+ * pointer points to sm_ctx.nsec.r0 at this stage. After the
+ * instruction below the stack pointer points to sm_ctx.
+ */
+ sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
+
+ bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
+ write_scr r1
+
+ add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
+ stm r0, {r8-r12}
+
+ mov r0, sp
+ bl sm_from_nsec
+ cmp r0, #0
+ beq .sm_ret_to_nsec
+
+ /*
+ * Continue into secure world
+ */
+ add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
+
+.sm_exit:
+ pop {r0-r7}
+ rfefd sp!
+UNWIND( .fnend)
+END_FUNC sm_smc_entry
+
+/*
+ * FIQ handling
+ *
+ * Saves CPU context in the same way as sm_smc_entry() above. The CPU
+ * context will later be restored by sm_smc_entry() when handling a return
+ * from FIQ.
+ */
+LOCAL_FUNC sm_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /* sp points just past struct sm_sec_ctx */
+ srsdb sp!, #CPSR_MODE_MON
+ push {r0-r7}
+
+ clrex /* Clear the exclusive monitor */
+
+ /*
+ * As we're coming from non-secure world the stack pointer points
+ * to sm_ctx.nsec.r0 at this stage. After the instruction below the
+ * stack pointer points to sm_ctx.
+ */
+ sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
+
+ /* Update SCR */
+ read_scr r1
+ bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
+ write_scr r1
+
+ /* Save non-secure context */
+ add r0, sp, #SM_CTX_NSEC
+ bl sm_save_modes_regs
+ stm r0!, {r8-r12}
+
+ /* Set FIQ entry */
+ ldr r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
+ str r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
+
+ /* Restore secure context */
+ add r0, sp, #SM_CTX_SEC
+ bl sm_restore_modes_regs
+
+ add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
+
+ rfefd sp!
+UNWIND( .fnend)
+END_FUNC sm_fiq_entry
+
+ .align 5
+LOCAL_FUNC sm_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b . /* Reset */
+ b . /* Undefined instruction */
+ b sm_smc_entry /* Secure monitor call */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b sm_fiq_entry /* FIQ */
+UNWIND( .fnend)
+END_FUNC sm_vect_table
+
+/* void sm_init(vaddr_t stack_pointer); */
+FUNC sm_init , :
+UNWIND( .fnstart)
+ /* Set monitor stack */
+ mrs r1, cpsr
+ cps #CPSR_MODE_MON
+ /* Point just beyond sm_ctx.sec */
+ sub sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
+ msr cpsr, r1
+
+ /* Set monitor vector (MVBAR) */
+ ldr r0, =sm_vect_table
+ write_mvbar r0
+
+ bx lr
+END_FUNC sm_init
+
+
+/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
+FUNC sm_get_nsec_ctx , :
+ mrs r1, cpsr
+ cps #CPSR_MODE_MON
+ mov r0, sp
+ msr cpsr, r1
+
+ /*
+ * As we're in secure mode mon_sp points just beyond sm_ctx.sec
+ * which is sm_ctx.nsec
+ */
+ bx lr
+END_FUNC sm_get_nsec_ctx
diff --git a/core/arch/arm/sm/sm_private.h b/core/arch/arm/sm/sm_private.h
new file mode 100644
index 0000000..0b41bec
--- /dev/null
+++ b/core/arch/arm/sm/sm_private.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SM_PRIVATE_H
+#define SM_PRIVATE_H
+
+/* Returns true if returning to sec, false if returning to nsec */
+bool sm_from_nsec(struct sm_ctx *ctx);
+
+void sm_save_modes_regs(struct sm_mode_regs *regs);
+void sm_restore_modes_regs(struct sm_mode_regs *regs);
+
+#endif /*SM_PRIVATE_H*/
+
diff --git a/core/arch/arm/sm/std_smc.c b/core/arch/arm/sm/std_smc.c
new file mode 100644
index 0000000..5e5bb81
--- /dev/null
+++ b/core/arch/arm/sm/std_smc.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <sm/psci.h>
+#include <sm/sm.h>
+#include <sm/std_smc.h>
+#include <tee/uuid.h>
+#include <trace.h>
+
+static const TEE_UUID uuid = {
+ 0x5f8b97df, 0x2d0d, 0x4ad2,
+ {0x98, 0xd2, 0x74, 0xf4, 0x38, 0x27, 0x98, 0xbb},
+};
+
+void smc_std_handler(struct thread_smc_args *args)
+{
+ uint32_t smc_fid = args->a0;
+
+ if (is_psci_fid(smc_fid)) {
+ tee_psci_handler(args);
+ return;
+ }
+
+ switch (smc_fid) {
+ case ARM_STD_SVC_CALL_COUNT:
+ /* PSCI is the only STD service implemented */
+ args->a0 = PSCI_NUM_CALLS;
+ break;
+ case ARM_STD_SVC_UID:
+ args->a0 = uuid.timeLow;
+ args->a1 = (uuid.timeHiAndVersion << 16) | uuid.timeMid;
+ args->a2 = (uuid.clockSeqAndNode[3] << 24) |
+ (uuid.clockSeqAndNode[2] << 16) |
+ (uuid.clockSeqAndNode[1] << 8) |
+ uuid.clockSeqAndNode[0];
+ args->a3 = (uuid.clockSeqAndNode[7] << 24) |
+ (uuid.clockSeqAndNode[6] << 16) |
+ (uuid.clockSeqAndNode[5] << 8) |
+ uuid.clockSeqAndNode[4];
+ break;
+ case ARM_STD_SVC_VERSION:
+ args->a0 = STD_SVC_VERSION_MAJOR;
+ args->a1 = STD_SVC_VERSION_MINOR;
+ break;
+ default:
+ args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
+ break;
+ }
+}
diff --git a/core/arch/arm/sm/sub.mk b/core/arch/arm/sm/sub.mk
new file mode 100644
index 0000000..4e28e29
--- /dev/null
+++ b/core/arch/arm/sm/sub.mk
@@ -0,0 +1,3 @@
+srcs-y += sm_a32.S
+srcs-y += sm.c
+srcs-$(CFG_PSCI_ARM32) += std_smc.c psci.c
diff --git a/core/arch/arm/tee/arch_svc.c b/core/arch/arm/tee/arch_svc.c
new file mode 100644
index 0000000..8a89ce9
--- /dev/null
+++ b/core/arch/arm/tee/arch_svc.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <kernel/misc.h>
+#include <kernel/thread.h>
+#include <kernel/trace_ta.h>
+#include <tee/tee_svc.h>
+#include <tee/arch_svc.h>
+#include <tee/tee_svc_cryp.h>
+#include <tee/tee_svc_storage.h>
+#include <tee/se/svc.h>
+#include <tee_syscall_numbers.h>
+#include <trace.h>
+#include <util.h>
+
+#include "arch_svc_private.h"
+#include "svc_cache.h"
+
+#if (TRACE_LEVEL == TRACE_FLOW) && defined(CFG_TEE_CORE_TA_TRACE)
+#define TRACE_SYSCALLS
+#endif
+
+struct syscall_entry {
+ syscall_t fn;
+#ifdef TRACE_SYSCALLS
+ const char *name;
+#endif
+};
+
+#ifdef TRACE_SYSCALLS
+#define SYSCALL_ENTRY(_fn) { .fn = (syscall_t)_fn, .name = #_fn }
+#else
+#define SYSCALL_ENTRY(_fn) { .fn = (syscall_t)_fn }
+#endif
+
+/*
+ * This array is ordered according to the SYSCALL ids TEE_SCN_xxx
+ */
+static const struct syscall_entry tee_svc_syscall_table[] = {
+ SYSCALL_ENTRY(syscall_sys_return),
+ SYSCALL_ENTRY(syscall_log),
+ SYSCALL_ENTRY(syscall_panic),
+ SYSCALL_ENTRY(syscall_get_property),
+ SYSCALL_ENTRY(syscall_get_property_name_to_index),
+ SYSCALL_ENTRY(syscall_open_ta_session),
+ SYSCALL_ENTRY(syscall_close_ta_session),
+ SYSCALL_ENTRY(syscall_invoke_ta_command),
+ SYSCALL_ENTRY(syscall_check_access_rights),
+ SYSCALL_ENTRY(syscall_get_cancellation_flag),
+ SYSCALL_ENTRY(syscall_unmask_cancellation),
+ SYSCALL_ENTRY(syscall_mask_cancellation),
+ SYSCALL_ENTRY(syscall_wait),
+ SYSCALL_ENTRY(syscall_get_time),
+ SYSCALL_ENTRY(syscall_set_ta_time),
+ SYSCALL_ENTRY(syscall_cryp_state_alloc),
+ SYSCALL_ENTRY(syscall_cryp_state_copy),
+ SYSCALL_ENTRY(syscall_cryp_state_free),
+ SYSCALL_ENTRY(syscall_hash_init),
+ SYSCALL_ENTRY(syscall_hash_update),
+ SYSCALL_ENTRY(syscall_hash_final),
+ SYSCALL_ENTRY(syscall_cipher_init),
+ SYSCALL_ENTRY(syscall_cipher_update),
+ SYSCALL_ENTRY(syscall_cipher_final),
+ SYSCALL_ENTRY(syscall_cryp_obj_get_info),
+ SYSCALL_ENTRY(syscall_cryp_obj_restrict_usage),
+ SYSCALL_ENTRY(syscall_cryp_obj_get_attr),
+ SYSCALL_ENTRY(syscall_cryp_obj_alloc),
+ SYSCALL_ENTRY(syscall_cryp_obj_close),
+ SYSCALL_ENTRY(syscall_cryp_obj_reset),
+ SYSCALL_ENTRY(syscall_cryp_obj_populate),
+ SYSCALL_ENTRY(syscall_cryp_obj_copy),
+ SYSCALL_ENTRY(syscall_cryp_derive_key),
+ SYSCALL_ENTRY(syscall_cryp_random_number_generate),
+ SYSCALL_ENTRY(syscall_authenc_init),
+ SYSCALL_ENTRY(syscall_authenc_update_aad),
+ SYSCALL_ENTRY(syscall_authenc_update_payload),
+ SYSCALL_ENTRY(syscall_authenc_enc_final),
+ SYSCALL_ENTRY(syscall_authenc_dec_final),
+ SYSCALL_ENTRY(syscall_asymm_operate),
+ SYSCALL_ENTRY(syscall_asymm_verify),
+ SYSCALL_ENTRY(syscall_storage_obj_open),
+ SYSCALL_ENTRY(syscall_storage_obj_create),
+ SYSCALL_ENTRY(syscall_storage_obj_del),
+ SYSCALL_ENTRY(syscall_storage_obj_rename),
+ SYSCALL_ENTRY(syscall_storage_alloc_enum),
+ SYSCALL_ENTRY(syscall_storage_free_enum),
+ SYSCALL_ENTRY(syscall_storage_reset_enum),
+ SYSCALL_ENTRY(syscall_storage_start_enum),
+ SYSCALL_ENTRY(syscall_storage_next_enum),
+ SYSCALL_ENTRY(syscall_storage_obj_read),
+ SYSCALL_ENTRY(syscall_storage_obj_write),
+ SYSCALL_ENTRY(syscall_storage_obj_trunc),
+ SYSCALL_ENTRY(syscall_storage_obj_seek),
+ SYSCALL_ENTRY(syscall_obj_generate_key),
+ SYSCALL_ENTRY(syscall_se_service_open),
+ SYSCALL_ENTRY(syscall_se_service_close),
+ SYSCALL_ENTRY(syscall_se_service_get_readers),
+ SYSCALL_ENTRY(syscall_se_reader_get_prop),
+ SYSCALL_ENTRY(syscall_se_reader_get_name),
+ SYSCALL_ENTRY(syscall_se_reader_open_session),
+ SYSCALL_ENTRY(syscall_se_reader_close_sessions),
+ SYSCALL_ENTRY(syscall_se_session_is_closed),
+ SYSCALL_ENTRY(syscall_se_session_get_atr),
+ SYSCALL_ENTRY(syscall_se_session_open_channel),
+ SYSCALL_ENTRY(syscall_se_session_close),
+ SYSCALL_ENTRY(syscall_se_channel_select_next),
+ SYSCALL_ENTRY(syscall_se_channel_get_select_resp),
+ SYSCALL_ENTRY(syscall_se_channel_transmit),
+ SYSCALL_ENTRY(syscall_se_channel_close),
+ SYSCALL_ENTRY(syscall_cache_operation),
+};
+
+#ifdef TRACE_SYSCALLS
+static void trace_syscall(size_t num)
+{
+ if (num == TEE_SCN_RETURN || num > TEE_SCN_MAX)
+ return;
+ FMSG("syscall #%zu (%s)", num, tee_svc_syscall_table[num].name);
+}
+#else
+static void trace_syscall(size_t num __unused)
+{
+}
+#endif
+
+#ifdef ARM32
+static void get_scn_max_args(struct thread_svc_regs *regs, size_t *scn,
+ size_t *max_args)
+{
+ *scn = regs->r7;
+ *max_args = regs->r6;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void get_scn_max_args(struct thread_svc_regs *regs, size_t *scn,
+ size_t *max_args)
+{
+ if (((regs->spsr >> SPSR_MODE_RW_SHIFT) & SPSR_MODE_RW_MASK) ==
+ SPSR_MODE_RW_32) {
+ *scn = regs->x7;
+ *max_args = regs->x6;
+ } else {
+ *scn = regs->x8;
+ *max_args = 0;
+ }
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static void set_svc_retval(struct thread_svc_regs *regs, uint32_t ret_val)
+{
+ regs->r0 = ret_val;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_svc_retval(struct thread_svc_regs *regs, uint64_t ret_val)
+{
+ regs->x0 = ret_val;
+}
+#endif /*ARM64*/
+
+void tee_svc_handler(struct thread_svc_regs *regs)
+{
+ size_t scn;
+ size_t max_args;
+ syscall_t scf;
+
+ COMPILE_TIME_ASSERT(ARRAY_SIZE(tee_svc_syscall_table) ==
+ (TEE_SCN_MAX + 1));
+
+ thread_user_save_vfp();
+
+ /* TA has just entered kernel mode */
+ tee_ta_update_session_utime_suspend();
+
+ /* Restore IRQ which are disabled on exception entry */
+ thread_restore_irq();
+
+ get_scn_max_args(regs, &scn, &max_args);
+
+ trace_syscall(scn);
+
+ if (max_args > TEE_SVC_MAX_ARGS) {
+ DMSG("Too many arguments for SCN %zu (%zu)", scn, max_args);
+ set_svc_retval(regs, TEE_ERROR_GENERIC);
+ return;
+ }
+
+ if (scn > TEE_SCN_MAX)
+ scf = syscall_not_supported;
+ else
+ scf = tee_svc_syscall_table[scn].fn;
+
+ set_svc_retval(regs, tee_svc_do_call(regs, scf));
+
+ if (scn != TEE_SCN_RETURN) {
+ /* We're about to switch back to user mode */
+ tee_ta_update_session_utime_resume();
+ }
+}
+
+#ifdef ARM32
+uint32_t tee_svc_sys_return_helper(uint32_t ret, bool panic,
+ uint32_t panic_code, struct thread_svc_regs *regs)
+{
+ if (panic) {
+ TAMSG("TA panicked with code 0x%x usr_sp 0x%x usr_lr 0x%x",
+ panic_code, read_mode_sp(CPSR_MODE_SYS),
+ read_mode_lr(CPSR_MODE_SYS));
+ }
+ regs->r1 = panic;
+ regs->r2 = panic_code;
+ regs->lr = (uintptr_t)thread_unwind_user_mode;
+ regs->spsr = read_cpsr();
+ return ret;
+}
+#endif /*ARM32*/
+#ifdef ARM64
+uint32_t tee_svc_sys_return_helper(uint32_t ret, bool panic,
+ uint32_t panic_code, struct thread_svc_regs *regs)
+{
+ if (panic) {
+ TAMSG("TA panicked with code 0x%x usr_sp 0x%" PRIx64 " usr_lr 0x%" PRIx64,
+ panic_code, regs->x13, regs->x14);
+ }
+ regs->x1 = panic;
+ regs->x2 = panic_code;
+ regs->elr = (uintptr_t)thread_unwind_user_mode;
+ regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0);
+ regs->spsr |= read_daif();
+ /*
+ * Regs is the value of stack pointer before calling the SVC
+ * handler. By the addition matches for the reserved space at the
+ * beginning of el0_sync_svc(). This prepares the stack when
+ * returning to thread_unwind_user_mode instead of a normal
+ * exception return.
+ */
+ regs->sp_el0 = (uint64_t)(regs + 1);
+ return ret;
+}
+#endif /*ARM64*/
diff --git a/core/arch/arm/tee/arch_svc_a32.S b/core/arch/arm/tee/arch_svc_a32.S
new file mode 100644
index 0000000..d9c725c
--- /dev/null
+++ b/core/arch/arm/tee/arch_svc_a32.S
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "tee_syscall_numbers.h"
+#include "trace_levels.h"
+#include <asm.S>
+#include <arm.h>
+#include <tee_api_defines.h>
+#include <kernel/thread.h>
+#include <kernel/unwind.h>
+#include <asm-defines.h>
+
+ .section .text.arch_svc_asm
+
+/*
+ * uint32_t tee_svc_do_call(struct thread_svc_regs *regs, tee_svc_func func);
+ *
+ * Called from tee_svc_handler()
+ */
+FUNC tee_svc_do_call , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r5-r9, lr}
+ mov r7, sp
+ mov r8, r0
+ mov r9, r1
+ ldr r5, [r8, #THREAD_SVC_REG_R5]
+ ldr r6, [r8, #THREAD_SVC_REG_R6]
+
+ /*
+ * Copy eventual arguments passed on the user stack.
+ *
+ * r5 holds the address of the first word
+ * r6 holds the number of words
+ *
+ * tee_svc_handler() who calls this function has already checked
+ * that we don't copy too much data.
+ */
+ cmp r6, #0
+ beq .Lno_args
+ sub sp, sp, r6, lsl #2
+ bic sp, sp, #7 /* make sure it's a multiple of 8 */
+ mov r0, sp
+ mov r1, r5
+ mov r2, r6, lsl #2
+ ldr lr, =tee_svc_copy_from_user
+ blx lr
+
+ /* If copy failed return the error */
+ cmp r0, #0
+ bne .Lret
+
+.Lno_args:
+ /* Load arguments to function */
+ add lr, r8, #THREAD_SVC_REG_R0
+ ldm lr, {r0-r3}
+ blx r9
+.Lret:
+ mov sp, r7
+ pop {r5-r9, pc}
+UNWIND( .fnend)
+END_FUNC tee_svc_do_call
+
+/*
+ * User space sees this function as:
+ * void syscall_sys_return(uint32_t ret) __noreturn;
+ *
+ * But internally the function depends on being called from
+ * tee_svc_do_call() with pointer to the struct thread_svc_regs saved by
+ * thread_svc_handler() in r8. The argument ret is already in r0 so we
+ * don't touch that and let it propagate as return value of the called
+ * thread_unwind_user_mode().
+ */
+FUNC syscall_sys_return , :
+UNWIND( .fnstart)
+ mov r1, #0 /* panic = false */
+ mov r2, #0 /* panic_code = 0 */
+ mov r3, r8
+ b tee_svc_sys_return_helper
+UNWIND( .fnend)
+END_FUNC syscall_sys_return
+
+/*
+ * User space sees this function as:
+ * void syscall_panic(uint32_t code) __noreturn;
+ *
+ * But internally the function depends on being called from
+ * tee_svc_do_call() with pointer to the struct thread_svc_regs saved by
+ * thread_svc_handler() in r8.
+ */
+FUNC syscall_panic , :
+UNWIND( .fnstart)
+ mov r1, #1 /* panic = true */
+ mov r2, r0 /* panic_code = 0 */
+ mov r3, r8
+ ldr r0, =TEE_ERROR_TARGET_DEAD
+ b tee_svc_sys_return_helper
+UNWIND( .fnend)
+END_FUNC syscall_panic
diff --git a/core/arch/arm/tee/arch_svc_a64.S b/core/arch/arm/tee/arch_svc_a64.S
new file mode 100644
index 0000000..f76c2eb
--- /dev/null
+++ b/core/arch/arm/tee/arch_svc_a64.S
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "tee_syscall_numbers.h"
+#include "trace_levels.h"
+#include <asm.S>
+#include <arm64_macros.S>
+#include <arm64.h>
+#include <tee_api_defines.h>
+#include <kernel/thread.h>
+#include <asm-defines.h>
+
+ .section .text.arch_svc_asm
+
+#if 0
+struct sc_rec {
+ uint64_t x0;
+ uint64_t x1;
+ uint64_t x19;
+ uint64_t x30;
+}
+#endif
+#define SC_REC_X0 (8 * 0)
+#define SC_REC_X1 (8 * 1)
+#define SC_REC_X19 (8 * 2)
+#define SC_REC_X30 (8 * 3)
+#define SC_REC_SIZE (SC_REC_X30 + 8)
+
+/*
+ * uint32_t tee_svc_do_call(struct thread_svc_regs *regs, tee_svc_func func);
+ *
+ * Called from tee_svc_handler()
+ */
+FUNC tee_svc_do_call , :
+ sub sp, sp, #SC_REC_SIZE
+ stp x0, x1, [sp, #SC_REC_X0]
+ stp x19, x30, [sp, #SC_REC_X19]
+ mov x19, sp
+
+ ldr x2, [x0, #THREAD_SVC_REG_SPSR]
+ tst x2, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)
+ b.eq .Lcall_a64
+
+ ldp x5, x6, [x0, #THREAD_SVC_REG_X5]
+ cmp x6, #0
+ b.eq .Lno_args_a32
+
+ /*
+ * Calculate required space on stack to copy Aarch32 arguments
+ * and to transform them into Aarch64 arguments.
+ * x6 = nargs_on_stack
+ * n64 = (nargs_on_stack - 4) * 8
+ * n32 = nargs_on_stack * 4
+ * sp -= ROUNDUP(MAX(n32, n64), 16)
+ *
+ */
+ /* n64 = (nargs_on_stack - 4) * 8 */
+ sub x1, x6, #0x4
+ lsl x1, x1, #3
+ /* n32 = nargs_on_stack * 4 */
+ lsl x0, x6, #2
+ /* sp -= ROUNDUP(MAX(n32, n64), 16) */
+ cmp x1, x0
+ csel x0, x1, x0, ge
+ add x0, x0, #0xf
+ and x0, x0, #0xfffffffffffffff0
+ sub sp, sp, x0
+
+ /*
+ * Find location on stack where to copy the Aarch32 arguments
+ * and do the copy.
+ * tee_svc_copy_from_user(sp, x5, nargs_on_stack * 4)
+ */
+ mov x0, sp
+ mov x1, x5
+ add x2, xzr, x6, lsl #2
+ bl tee_svc_copy_from_user
+ /* If copy failed return the error */
+ cmp x0, #0
+ bne .Lret
+
+ /*
+ * Load arguments into w4..w7, we're loading junk into unused
+ * registers, but it's quicker than trying to figure out how
+ * many registers to load into.
+ */
+ /* x0 = nargs_on_stack */
+ ldr x0, [x19, #SC_REC_X0]
+ ldr x0, [x0, #THREAD_SVC_REG_X6]
+ load_wregs sp, 0, 4, 7
+
+ /*
+ * Convert remaining Aarch32 parameters passed on stack as Aarch64
+ * parameters on stack.
+ *
+ * nargs_on_stack is initialized in x0 above
+ * n64 = (nargs_on_stack - 4) * 8
+ * if n64 < 0 goro .Lno_args
+ * x0 = x2 = x19 - n64
+ * x1 points to next argument
+ * while (x2 != x19) {
+ * w3 = *x1
+ * x1 += 4
+ * *x2 = x3
+ * x2 += 8
+ * }
+ * sp = x0
+ */
+ /* n64 = (nargs_on_stack - 4) * 8 */
+ subs x2, x0, #0x4
+ b.le .Lno_args_a32
+ lsl x2, x2, #3
+ mov x0, x2
+
+.Lcpy_to_stack:
+ ldr w3, [x1], #4
+ str x3, [x2], #8
+ cmp x2, x19
+ b.ne .Lcpy_to_stack
+ mov sp, x0
+
+
+.Lno_args_a32: /* Load the first 4 arguments to function */
+ ldr x9, [x19, #SC_REC_X0]
+ load_xregs x9, THREAD_SVC_REG_X0, 0, 3
+ mov w0, w0
+ mov w1, w1
+ mov w2, w2
+ mov w3, w3
+
+ /* Call the svc function */
+ ldr x16, [x19, #SC_REC_X1]
+ blr x16
+ b .Lret
+
+.Lcall_a64: /* Load the first 8 arguments to function */
+ ldr x9, [x19, #SC_REC_X0]
+ load_xregs x9, THREAD_SVC_REG_X0, 0, 8
+
+ /* Call the svc function */
+ ldr x16, [x19, #SC_REC_X1]
+ blr x16
+
+.Lret:
+ mov sp, x19
+ ldp x19, x30, [sp, #SC_REC_X19]
+ add sp, sp, #SC_REC_SIZE
+ ret
+END_FUNC tee_svc_do_call
+
+/*
+ * User space sees this function as:
+ * void syscall_sys_return(uint32_t ret) __noreturn;
+ *
+ * But internally the function depends on being called from
+ * tee_svc_do_call() with pointer to the struct thread_svc_regs saved by
+ * thread_svc_handler() in r8. The argument ret is already in r0 so we
+ * don't touch that and let it propagate as return value of the called
+ * tee_svc_unwind_enter_user_mode().
+ */
+FUNC syscall_sys_return , :
+ mov x1, #0 /* panic = false */
+ mov x2, #0 /* panic_code = 0 */
+ ldr x3, [x19, #SC_REC_X0]
+ b tee_svc_sys_return_helper
+END_FUNC syscall_sys_return
+
+/*
+ * User space sees this function as:
+ * void syscall_panic(uint32_t code) __noreturn;
+ *
+ * But internally the function depends on being called from
+ * tee_svc_do_call() with pointer to the struct thread_svc_regs saved by
+ * thread_svc_handler() in r8.
+ */
+FUNC syscall_panic , :
+ mov x1, #1 /* panic = true */
+ mov x2, x0 /* code */
+ ldr w0, =TEE_ERROR_TARGET_DEAD
+ ldr x3, [x19, #SC_REC_X0]
+ b tee_svc_sys_return_helper
+END_FUNC syscall_panic
diff --git a/core/arch/arm/tee/arch_svc_private.h b/core/arch/arm/tee/arch_svc_private.h
new file mode 100644
index 0000000..7b2ea94
--- /dev/null
+++ b/core/arch/arm/tee/arch_svc_private.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ARCH_SVC_PRIVATE_H
+#define ARCH_SVC_PRIVATE_H
+
+#include <tee_api_types.h>
+
+/* void argument but in reality it can be any number of arguments */
+typedef TEE_Result (*syscall_t)(void);
+
+/* Helper function for tee_svc_handler() */
+uint32_t tee_svc_do_call(struct thread_svc_regs *regs, syscall_t func);
+
+#endif /*ARCH_SVC_PRIVATE_H*/
diff --git a/core/arch/arm/tee/entry_fast.c b/core/arch/arm/tee/entry_fast.c
new file mode 100644
index 0000000..0e80dc8
--- /dev/null
+++ b/core/arch/arm/tee/entry_fast.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tee/entry_fast.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <kernel/generic_boot.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/misc.h>
+#include <mm/core_mmu.h>
+
+static void tee_entry_get_shm_config(struct thread_smc_args *args)
+{
+ args->a0 = OPTEE_SMC_RETURN_OK;
+ args->a1 = default_nsec_shm_paddr;
+ args->a2 = default_nsec_shm_size;
+ /* Should this be TEESMC cache attributes instead? */
+ args->a3 = core_mmu_is_shm_cached();
+}
+
+static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
+{
+ TEE_Result ret;
+#ifdef ARM32
+ paddr_t pa = 0;
+
+ switch (args->a1) {
+ case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
+ ret = tee_get_l2cc_mutex(&pa);
+ reg_pair_from_64(pa, &args->a2, &args->a3);
+ break;
+ case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
+ pa = reg_pair_to_64(args->a2, args->a3);
+ ret = tee_set_l2cc_mutex(&pa);
+ break;
+ case OPTEE_SMC_L2CC_MUTEX_ENABLE:
+ ret = tee_enable_l2cc_mutex();
+ break;
+ case OPTEE_SMC_L2CC_MUTEX_DISABLE:
+ ret = tee_disable_l2cc_mutex();
+ break;
+ default:
+ args->a0 = OPTEE_SMC_RETURN_EBADCMD;
+ return;
+ }
+#else
+ ret = TEE_ERROR_NOT_SUPPORTED;
+#endif
+ if (ret == TEE_ERROR_NOT_SUPPORTED)
+ args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
+ else if (ret)
+ args->a0 = OPTEE_SMC_RETURN_EBADADDR;
+ else
+ args->a0 = OPTEE_SMC_RETURN_OK;
+}
+
+static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
+{
+ if (args->a1) {
+ /*
+ * Either unknown capability or
+ * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR, in either case we can't
+ * deal with it.
+ *
+ * The memory mapping of shared memory is defined as normal
+ * shared memory for SMP systems and normal memory for UP
+ * systems. Currently we map all memory as shared in secure
+ * world.
+ */
+ args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
+ return;
+ }
+
+ args->a0 = OPTEE_SMC_RETURN_OK;
+ args->a1 = OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
+}
+
+static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
+{
+ uint64_t cookie;
+
+ if (!thread_disable_prealloc_rpc_cache(&cookie)) {
+ args->a0 = OPTEE_SMC_RETURN_EBUSY;
+ return;
+ }
+
+ if (!cookie) {
+ args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
+ return;
+ }
+
+ args->a0 = OPTEE_SMC_RETURN_OK;
+ args->a1 = cookie >> 32;
+ args->a2 = cookie;
+}
+
+static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
+{
+ if (thread_enable_prealloc_rpc_cache())
+ args->a0 = OPTEE_SMC_RETURN_OK;
+ else
+ args->a0 = OPTEE_SMC_RETURN_EBUSY;
+}
+
+static void tee_entry_boot_secondary(struct thread_smc_args *args)
+{
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+ if (!generic_boot_core_release(args->a1, (paddr_t)(args->a3)))
+ args->a0 = OPTEE_SMC_RETURN_OK;
+ else
+ args->a0 = OPTEE_SMC_RETURN_EBADCMD;
+#else
+ args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
+#endif
+}
+
+void tee_entry_fast(struct thread_smc_args *args)
+{
+ switch (args->a0) {
+
+ /* Generic functions */
+ case OPTEE_SMC_CALLS_COUNT:
+ tee_entry_get_api_call_count(args);
+ break;
+ case OPTEE_SMC_CALLS_UID:
+ tee_entry_get_api_uuid(args);
+ break;
+ case OPTEE_SMC_CALLS_REVISION:
+ tee_entry_get_api_revision(args);
+ break;
+ case OPTEE_SMC_CALL_GET_OS_UUID:
+ tee_entry_get_os_uuid(args);
+ break;
+ case OPTEE_SMC_CALL_GET_OS_REVISION:
+ tee_entry_get_os_revision(args);
+ break;
+
+ /* OP-TEE specific SMC functions */
+ case OPTEE_SMC_GET_SHM_CONFIG:
+ tee_entry_get_shm_config(args);
+ break;
+ case OPTEE_SMC_L2CC_MUTEX:
+ tee_entry_fastcall_l2cc_mutex(args);
+ break;
+ case OPTEE_SMC_EXCHANGE_CAPABILITIES:
+ tee_entry_exchange_capabilities(args);
+ break;
+ case OPTEE_SMC_DISABLE_SHM_CACHE:
+ tee_entry_disable_shm_cache(args);
+ break;
+ case OPTEE_SMC_ENABLE_SHM_CACHE:
+ tee_entry_enable_shm_cache(args);
+ break;
+ case OPTEE_SMC_BOOT_SECONDARY:
+ tee_entry_boot_secondary(args);
+ break;
+
+ default:
+ args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
+ break;
+ }
+}
+
+size_t tee_entry_generic_get_api_call_count(void)
+{
+ /*
+ * All the different calls handled in this file. If the specific
+ * target has additional calls it will call this function and
+ * add the number of calls the target has added.
+ */
+ return 9;
+}
+
+void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
+{
+ args->a0 = tee_entry_generic_get_api_call_count();
+}
+
+void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
+{
+ args->a0 = OPTEE_MSG_UID_0;
+ args->a1 = OPTEE_MSG_UID_1;
+ args->a2 = OPTEE_MSG_UID_2;
+ args->a3 = OPTEE_MSG_UID_3;
+}
+
+void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
+{
+ args->a0 = OPTEE_MSG_REVISION_MAJOR;
+ args->a1 = OPTEE_MSG_REVISION_MINOR;
+}
+
+void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
+{
+ args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
+ args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
+ args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
+ args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
+}
+
+void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
+{
+ args->a0 = CFG_OPTEE_REVISION_MAJOR;
+ args->a1 = CFG_OPTEE_REVISION_MINOR;
+}
diff --git a/core/arch/arm/tee/entry_std.c b/core/arch/arm/tee/entry_std.c
new file mode 100644
index 0000000..29c3b74
--- /dev/null
+++ b/core/arch/arm/tee/entry_std.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include <initcall.h>
+#include <kernel/panic.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <string.h>
+#include <tee/entry_std.h>
+#include <tee/tee_cryp_utl.h>
+#include <tee/uuid.h>
+#include <util.h>
+
+#define SHM_CACHE_ATTRS \
+ (uint32_t)(core_mmu_is_shm_cached() ? OPTEE_SMC_SHM_CACHED : 0)
+
+/* Sessions opened from normal world */
+static struct tee_ta_session_head tee_open_sessions =
+TAILQ_HEAD_INITIALIZER(tee_open_sessions);
+
+static struct mobj *shm_mobj;
+
+static TEE_Result set_mem_param(const struct optee_msg_param *param,
+ struct param_mem *mem)
+{
+ paddr_t b;
+ size_t sz;
+ size_t tsz;
+
+ if (mobj_get_pa(shm_mobj, 0, 0, &b) != TEE_SUCCESS)
+ panic("Failed to be PA of shared memory MOBJ");
+
+ sz = shm_mobj->size;
+ tsz = param->u.tmem.size;
+ if (param->u.tmem.buf_ptr && !tsz)
+ tsz++;
+ if (!core_is_buffer_inside(param->u.tmem.buf_ptr, tsz, b, sz))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ mem->mobj = shm_mobj;
+ mem->offs = param->u.tmem.buf_ptr - b;
+ mem->size = param->u.tmem.size;
+ return TEE_SUCCESS;
+}
+
+static TEE_Result copy_in_params(const struct optee_msg_param *params,
+ uint32_t num_params, struct tee_ta_param *ta_param)
+{
+ TEE_Result res;
+ size_t n;
+ uint8_t pt[TEE_NUM_PARAMS];
+
+ if (num_params > TEE_NUM_PARAMS)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(ta_param, 0, sizeof(*ta_param));
+
+ for (n = 0; n < num_params; n++) {
+ uint32_t attr;
+
+ if (params[n].attr & OPTEE_MSG_ATTR_META)
+ return TEE_ERROR_BAD_PARAMETERS;
+ if (params[n].attr & OPTEE_MSG_ATTR_FRAGMENT)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ attr = params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ pt[n] = TEE_PARAM_TYPE_NONE;
+ memset(&ta_param->u[n], 0, sizeof(ta_param->u[n]));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ pt[n] = TEE_PARAM_TYPE_VALUE_INPUT + attr -
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ ta_param->u[n].val.a = params[n].u.value.a;
+ ta_param->u[n].val.b = params[n].u.value.b;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr -
+ OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ res = set_mem_param(params + n, &ta_param->u[n].mem);
+ if (res != TEE_SUCCESS)
+ return res;
+ break;
+ default:
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+ }
+
+ ta_param->types = TEE_PARAM_TYPES(pt[0], pt[1], pt[2], pt[3]);
+
+ return TEE_SUCCESS;
+}
+
+static void copy_out_param(struct tee_ta_param *ta_param, uint32_t num_params,
+ struct optee_msg_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ switch (TEE_PARAM_TYPE_GET(ta_param->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ params[n].u.tmem.size = ta_param->u[n].mem.size;
+ break;
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ params[n].u.value.a = ta_param->u[n].val.a;
+ params[n].u.value.b = ta_param->u[n].val.b;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * Extracts mandatory parameter for open session.
+ *
+ * Returns
+ * false : mandatory parameter wasn't found or malformatted
+ * true : paramater found and OK
+ */
+static TEE_Result get_open_session_meta(size_t num_params,
+ struct optee_msg_param *params,
+ size_t *num_meta, TEE_UUID *uuid,
+ TEE_Identity *clnt_id)
+{
+ const uint32_t req_attr = OPTEE_MSG_ATTR_META |
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+
+ if (num_params < 2)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ if (params[0].attr != req_attr || params[1].attr != req_attr)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ tee_uuid_from_octets(uuid, (void *)&params[0].u.value);
+ clnt_id->login = params[1].u.value.c;
+ switch (clnt_id->login) {
+ case TEE_LOGIN_PUBLIC:
+ memset(&clnt_id->uuid, 0, sizeof(clnt_id->uuid));
+ break;
+ case TEE_LOGIN_USER:
+ case TEE_LOGIN_GROUP:
+ case TEE_LOGIN_APPLICATION:
+ case TEE_LOGIN_APPLICATION_USER:
+ case TEE_LOGIN_APPLICATION_GROUP:
+ tee_uuid_from_octets(&clnt_id->uuid,
+ (void *)&params[1].u.value);
+ break;
+ default:
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ *num_meta = 2;
+ return TEE_SUCCESS;
+}
+
+static void entry_open_session(struct thread_smc_args *smc_args,
+ struct optee_msg_arg *arg, uint32_t num_params)
+{
+ TEE_Result res;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+ TEE_ErrorOrigin err_orig = TEE_ORIGIN_TEE;
+ struct tee_ta_session *s = NULL;
+ TEE_Identity clnt_id;
+ TEE_UUID uuid;
+ struct tee_ta_param param;
+ size_t num_meta;
+
+ res = get_open_session_meta(num_params, params, &num_meta, &uuid,
+ &clnt_id);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = copy_in_params(params + num_meta, num_params - num_meta, &param);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = tee_ta_open_session(&err_orig, &s, &tee_open_sessions, &uuid,
+ &clnt_id, TEE_TIMEOUT_INFINITE, &param);
+ if (res != TEE_SUCCESS)
+ s = NULL;
+ copy_out_param(&param, num_params - num_meta, params + num_meta);
+
+ /*
+ * The occurrence of open/close session command is usually
+ * un-predictable, using this property to increase randomness
+ * of prng
+ */
+ plat_prng_add_jitter_entropy();
+
+out:
+ if (s)
+ arg->session = (vaddr_t)s;
+ else
+ arg->session = 0;
+ arg->ret = res;
+ arg->ret_origin = err_orig;
+ smc_args->a0 = OPTEE_SMC_RETURN_OK;
+}
+
+static void entry_close_session(struct thread_smc_args *smc_args,
+ struct optee_msg_arg *arg, uint32_t num_params)
+{
+ TEE_Result res;
+ struct tee_ta_session *s;
+
+ if (num_params) {
+ res = TEE_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ plat_prng_add_jitter_entropy();
+
+ s = (struct tee_ta_session *)(vaddr_t)arg->session;
+ res = tee_ta_close_session(s, &tee_open_sessions, NSAPP_IDENTITY);
+out:
+ arg->ret = res;
+ arg->ret_origin = TEE_ORIGIN_TEE;
+ smc_args->a0 = OPTEE_SMC_RETURN_OK;
+}
+
+static void entry_invoke_command(struct thread_smc_args *smc_args,
+ struct optee_msg_arg *arg, uint32_t num_params)
+{
+ TEE_Result res;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+ TEE_ErrorOrigin err_orig = TEE_ORIGIN_TEE;
+ struct tee_ta_session *s;
+ struct tee_ta_param param;
+
+ res = copy_in_params(params, num_params, &param);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ s = tee_ta_get_session(arg->session, true, &tee_open_sessions);
+ if (!s) {
+ res = TEE_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ res = tee_ta_invoke_command(&err_orig, s, NSAPP_IDENTITY,
+ TEE_TIMEOUT_INFINITE, arg->func, &param);
+
+ tee_ta_put_session(s);
+
+ copy_out_param(&param, num_params, params);
+
+out:
+ arg->ret = res;
+ arg->ret_origin = err_orig;
+ smc_args->a0 = OPTEE_SMC_RETURN_OK;
+}
+
+static void entry_cancel(struct thread_smc_args *smc_args,
+ struct optee_msg_arg *arg, uint32_t num_params)
+{
+ TEE_Result res;
+ TEE_ErrorOrigin err_orig = TEE_ORIGIN_TEE;
+ struct tee_ta_session *s;
+
+ if (num_params) {
+ res = TEE_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ s = tee_ta_get_session(arg->session, false, &tee_open_sessions);
+ if (!s) {
+ res = TEE_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ res = tee_ta_cancel_command(&err_orig, s, NSAPP_IDENTITY);
+ tee_ta_put_session(s);
+
+out:
+ arg->ret = res;
+ arg->ret_origin = err_orig;
+ smc_args->a0 = OPTEE_SMC_RETURN_OK;
+}
+
+void tee_entry_std(struct thread_smc_args *smc_args)
+{
+ paddr_t parg;
+ struct optee_msg_arg *arg = NULL; /* fix gcc warning */
+ uint32_t num_params;
+
+ if (smc_args->a0 != OPTEE_SMC_CALL_WITH_ARG) {
+ EMSG("Unknown SMC 0x%" PRIx64, (uint64_t)smc_args->a0);
+ DMSG("Expected 0x%x\n", OPTEE_SMC_CALL_WITH_ARG);
+ smc_args->a0 = OPTEE_SMC_RETURN_EBADCMD;
+ return;
+ }
+ parg = (uint64_t)smc_args->a1 << 32 | smc_args->a2;
+ if (!tee_pbuf_is_non_sec(parg, sizeof(struct optee_msg_arg)) ||
+ !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) ||
+ !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) {
+ EMSG("Bad arg address 0x%" PRIxPA, parg);
+ smc_args->a0 = OPTEE_SMC_RETURN_EBADADDR;
+ return;
+ }
+
+ num_params = arg->num_params;
+ if (!tee_pbuf_is_non_sec(parg, OPTEE_MSG_GET_ARG_SIZE(num_params))) {
+ EMSG("Bad arg address 0x%" PRIxPA, parg);
+ smc_args->a0 = OPTEE_SMC_RETURN_EBADADDR;
+ return;
+ }
+
+ thread_set_irq(true); /* Enable IRQ for STD calls */
+ switch (arg->cmd) {
+ case OPTEE_MSG_CMD_OPEN_SESSION:
+ entry_open_session(smc_args, arg, num_params);
+ break;
+ case OPTEE_MSG_CMD_CLOSE_SESSION:
+ entry_close_session(smc_args, arg, num_params);
+ break;
+ case OPTEE_MSG_CMD_INVOKE_COMMAND:
+ entry_invoke_command(smc_args, arg, num_params);
+ break;
+ case OPTEE_MSG_CMD_CANCEL:
+ entry_cancel(smc_args, arg, num_params);
+ break;
+ default:
+ EMSG("Unknown cmd 0x%x\n", arg->cmd);
+ smc_args->a0 = OPTEE_SMC_RETURN_EBADCMD;
+ }
+}
+
+static TEE_Result default_mobj_init(void)
+{
+ shm_mobj = mobj_phys_alloc(default_nsec_shm_paddr,
+ default_nsec_shm_size, SHM_CACHE_ATTRS,
+ CORE_MEM_NSEC_SHM);
+ if (!shm_mobj)
+ panic("Failed to register shared memory");
+
+ mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
+ tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo,
+ SHM_CACHE_ATTRS, CORE_MEM_TA_RAM);
+ if (!mobj_sec_ddr)
+ panic("Failed to register secure ta ram");
+
+ return TEE_SUCCESS;
+}
+
+driver_init_late(default_mobj_init);
diff --git a/core/arch/arm/tee/init.c b/core/arch/arm/tee/init.c
new file mode 100644
index 0000000..66d2a2b
--- /dev/null
+++ b/core/arch/arm/tee/init.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <initcall.h>
+#include <malloc.h> /* required for inits */
+
+#include <sm/tee_mon.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_memprot.h>
+#include <trace.h>
+#include <kernel/time_source.h>
+#include <kernel/generic_boot.h>
+#include <mm/tee_mmu.h>
+#include <tee/tee_fs.h>
+#include <tee/tee_cryp_provider.h>
+#include <tee/tee_svc.h>
+#include <platform_config.h>
+
+
+#define TEE_MON_MAX_NUM_ARGS 8
+
+static void call_initcalls(void)
+{
+ initcall_t *call;
+
+ for (call = &__initcall_start; call < &__initcall_end; call++) {
+ TEE_Result ret;
+ ret = (*call)();
+ if (ret != TEE_SUCCESS) {
+ EMSG("Initial call 0x%08" PRIxVA " failed",
+ (vaddr_t)call);
+ }
+ }
+}
+
+TEE_Result init_teecore(void)
+{
+ static int is_first = 1;
+
+ /* (DEBUG) for inits at 1st TEE service: when UART is setup */
+ if (!is_first)
+ return TEE_SUCCESS;
+ is_first = 0;
+
+#ifdef CFG_WITH_USER_TA
+ tee_svc_uref_base = CFG_TEE_LOAD_ADDR;
+#endif
+
+ /* init support for future mapping of TAs */
+ teecore_init_pub_ram();
+
+ /* time initialization */
+ time_source_init();
+
+ /* call pre-define initcall routines */
+ call_initcalls();
+
+ IMSG("Initialized");
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm/tee/pta_socket.c b/core/arch/arm/tee/pta_socket.c
new file mode 100644
index 0000000..d696773
--- /dev/null
+++ b/core/arch/arm/tee/pta_socket.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/pseudo_ta.h>
+#include <optee_msg.h>
+#include <optee_msg_supplicant.h>
+#include <pta_socket.h>
+#include <string.h>
+#include <tee/tee_fs_rpc.h>
+
+static uint32_t get_instance_id(struct tee_ta_session *sess)
+{
+ return sess->ctx->ops->get_instance_id(sess->ctx);
+}
+
+static TEE_Result socket_open(struct tee_ta_session *sess, uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ TEE_Result res;
+ paddr_t pa;
+ uint64_t cookie;
+ void *va;
+ struct optee_msg_param msg_params[4];
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_MEMREF_INPUT,
+ TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_VALUE_OUTPUT);
+
+ if (exp_pt != param_types) {
+ DMSG("got param_types 0x%x, expected 0x%x",
+ param_types, exp_pt);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ va = tee_fs_rpc_cache_alloc(params[1].memref.size, &pa, &cookie);
+ if (!va)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_OPEN;
+ msg_params[0].u.value.b = get_instance_id(sess);
+
+ msg_params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[1].u.value.a = params[0].value.b; /* server port number */
+ msg_params[1].u.value.b = params[2].value.a; /* protocol */
+ msg_params[1].u.value.c = params[0].value.a; /* ip version */
+
+ /* server address */
+ msg_params[2].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_params[2].u.tmem.buf_ptr = pa;
+ msg_params[2].u.tmem.size = params[1].memref.size;
+ msg_params[2].u.tmem.shm_ref = cookie;
+ memcpy(va, params[1].memref.buffer, params[1].memref.size);
+
+ /* socket handle */
+ msg_params[3].attr = OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT;
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 4, msg_params);
+
+ if (res == TEE_SUCCESS)
+ params[3].value.a = msg_params[3].u.value.a;
+
+ return res;
+}
+
+static TEE_Result socket_close(struct tee_ta_session *sess,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ struct optee_msg_param msg_params[1];
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+
+ if (exp_pt != param_types) {
+ DMSG("got param_types 0x%x, expected 0x%x",
+ param_types, exp_pt);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_CLOSE;
+ msg_params[0].u.value.b = get_instance_id(sess);
+ msg_params[0].u.value.c = params[0].value.a;
+
+ return thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 1, msg_params);
+}
+
+static TEE_Result socket_send(struct tee_ta_session *sess, uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ TEE_Result res;
+ paddr_t pa;
+ uint64_t cookie;
+ void *va;
+ struct optee_msg_param msg_params[3];
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_MEMREF_INPUT,
+ TEE_PARAM_TYPE_VALUE_OUTPUT,
+ TEE_PARAM_TYPE_NONE);
+
+ if (exp_pt != param_types) {
+ DMSG("got param_types 0x%x, expected 0x%x",
+ param_types, exp_pt);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ va = tee_fs_rpc_cache_alloc(params[1].memref.size, &pa, &cookie);
+ if (!va)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_SEND;
+ msg_params[0].u.value.b = get_instance_id(sess);
+ msg_params[0].u.value.c = params[0].value.a; /* handle */
+
+ /* buffer */
+ msg_params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_params[1].u.tmem.buf_ptr = pa;
+ msg_params[1].u.tmem.size = params[1].memref.size;
+ msg_params[1].u.tmem.shm_ref = cookie;
+ memcpy(va, params[1].memref.buffer, params[1].memref.size);
+
+ msg_params[2].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INOUT;
+ msg_params[2].u.value.a = params[0].value.b /* timeout */;
+
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 3, msg_params);
+ params[2].value.a = msg_params[2].u.value.b; /* transmitted bytes */
+ return res;
+}
+
+static TEE_Result socket_recv(struct tee_ta_session *sess, uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ TEE_Result res;
+ paddr_t pa;
+ uint64_t cookie;
+ void *va;
+ struct optee_msg_param msg_params[3];
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_MEMREF_OUTPUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+
+ if (exp_pt != param_types) {
+ DMSG("got param_types 0x%x, expected 0x%x",
+ param_types, exp_pt);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ va = tee_fs_rpc_cache_alloc(params[1].memref.size, &pa, &cookie);
+ if (!va)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_RECV;
+ msg_params[0].u.value.b = get_instance_id(sess);
+ msg_params[0].u.value.c = params[0].value.a; /* handle */
+
+ /* buffer */
+ msg_params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ msg_params[1].u.tmem.buf_ptr = pa;
+ msg_params[1].u.tmem.size = params[1].memref.size;
+ msg_params[1].u.tmem.shm_ref = cookie;
+
+ msg_params[2].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[2].u.value.a = params[0].value.b /* timeout */;
+
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 3, msg_params);
+ params[1].memref.size = msg_params[1].u.tmem.size;
+ if (msg_params[1].u.tmem.size)
+ memcpy(params[1].memref.buffer, va, msg_params[1].u.tmem.size);
+ return res;
+}
+
+static TEE_Result socket_ioctl(struct tee_ta_session *sess,
+ uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS])
+{
+ TEE_Result res;
+ paddr_t pa;
+ uint64_t cookie;
+ void *va;
+ struct optee_msg_param msg_params[3];
+ uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+ TEE_PARAM_TYPE_MEMREF_INOUT,
+ TEE_PARAM_TYPE_NONE,
+ TEE_PARAM_TYPE_NONE);
+
+ if (exp_pt != param_types) {
+ DMSG("got param_types 0x%x, expected 0x%x",
+ param_types, exp_pt);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ va = tee_fs_rpc_cache_alloc(params[1].memref.size, &pa, &cookie);
+ if (!va)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_IOCTL;
+ msg_params[0].u.value.b = get_instance_id(sess);
+ msg_params[0].u.value.c = params[0].value.a; /* handle */
+
+ /* buffer */
+ msg_params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INOUT;
+ msg_params[1].u.tmem.buf_ptr = pa;
+ msg_params[1].u.tmem.size = params[1].memref.size;
+ msg_params[1].u.tmem.shm_ref = cookie;
+ memcpy(va, params[1].memref.buffer, params[1].memref.size);
+
+ msg_params[2].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[2].u.value.a = params[0].value.b; /* ioctl command */
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 3, msg_params);
+ if (msg_params[1].u.tmem.size <= params[1].memref.size)
+ memcpy(params[1].memref.buffer, va, msg_params[1].u.tmem.size);
+ params[1].memref.size = msg_params[1].u.tmem.size;
+ return res;
+}
+
+typedef TEE_Result (*ta_func)(struct tee_ta_session *sess, uint32_t param_types,
+ TEE_Param params[TEE_NUM_PARAMS]);
+
+static const ta_func ta_funcs[] = {
+ [PTA_SOCKET_OPEN] = socket_open,
+ [PTA_SOCKET_CLOSE] = socket_close,
+ [PTA_SOCKET_SEND] = socket_send,
+ [PTA_SOCKET_RECV] = socket_recv,
+ [PTA_SOCKET_IOCTL] = socket_ioctl,
+};
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result pta_socket_open_session(uint32_t param_types __unused,
+ TEE_Param pParams[TEE_NUM_PARAMS] __unused,
+ void **sess_ctx __unused)
+{
+ struct tee_ta_session *s;
+
+ /* Check that we're called from a TA */
+ s = tee_ta_get_calling_session();
+ if (!s)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ *sess_ctx = s;
+
+ return TEE_SUCCESS;
+}
+
+static void pta_socket_close_session(void *sess_ctx)
+{
+ TEE_Result res;
+ struct optee_msg_param msg_params[1];
+
+ memset(msg_params, 0, sizeof(msg_params));
+
+ msg_params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_params[0].u.value.a = OPTEE_MRC_SOCKET_CLOSE_ALL;
+ msg_params[0].u.value.b = get_instance_id(sess_ctx);
+
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SOCKET, 1, msg_params);
+ if (res != TEE_SUCCESS)
+ DMSG("OPTEE_MRC_SOCKET_CLOSE_ALL failed: %#" PRIx32, res);
+}
+
+static TEE_Result pta_socket_invoke_command(void *sess_ctx, uint32_t cmd_id,
+ uint32_t param_types, TEE_Param params[TEE_NUM_PARAMS])
+{
+ if (cmd_id < ARRAY_SIZE(ta_funcs) && ta_funcs[cmd_id])
+ return ta_funcs[cmd_id](sess_ctx, param_types, params);
+
+ return TEE_ERROR_NOT_IMPLEMENTED;
+}
+
+pseudo_ta_register(.uuid = PTA_SOCKET_UUID, .name = "socket",
+ .flags = PTA_DEFAULT_FLAGS,
+ .open_session_entry_point = pta_socket_open_session,
+ .close_session_entry_point = pta_socket_close_session,
+ .invoke_command_entry_point = pta_socket_invoke_command);
diff --git a/core/arch/arm/tee/sub.mk b/core/arch/arm/tee/sub.mk
new file mode 100644
index 0000000..0ee9f64
--- /dev/null
+++ b/core/arch/arm/tee/sub.mk
@@ -0,0 +1,12 @@
+ifeq ($(CFG_WITH_USER_TA),y)
+srcs-$(CFG_ARM32_core) += arch_svc_a32.S
+srcs-$(CFG_ARM64_core) += arch_svc_a64.S
+srcs-$(CFG_CACHE_API) += svc_cache.c
+srcs-y += arch_svc.c
+srcs-$(CFG_GP_SOCKETS) += pta_socket.c
+else
+srcs-y += svc_dummy.c
+endif
+srcs-y += entry_std.c
+srcs-y += entry_fast.c
+srcs-y += init.c
diff --git a/core/arch/arm/tee/svc_cache.c b/core/arch/arm/tee/svc_cache.c
new file mode 100644
index 0000000..88b89a9
--- /dev/null
+++ b/core/arch/arm/tee/svc_cache.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <types_ext.h>
+#include <utee_types.h>
+#include <kernel/tee_ta_manager.h>
+#include <mm/tee_mmu.h>
+#include <mm/core_memprot.h>
+
+#include "svc_cache.h"
+
+/*
+ * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
+ * It follows ARM recommendation:
+ * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
+ * Note that this implementation assumes dsb operations are part of
+ * cache_maintenance_l1(), and L2 cache sync are part of
+ * cache_maintenance_l2()
+ */
+static TEE_Result cache_operation(struct tee_ta_session *sess,
+ enum utee_cache_operation op, void *va, size_t len)
+{
+ TEE_Result ret;
+ paddr_t pa = 0;
+ struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);
+
+ if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
+ return TEE_ERROR_NOT_SUPPORTED;
+
+ /*
+ * TAs are allowed to operate cache maintenance on TA memref parameters
+ * only, not on the TA private memory.
+ */
+ if (tee_mmu_is_vbuf_intersect_ta_private(utc, va, len))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ |
+ TEE_MEMORY_ACCESS_ANY_OWNER,
+ (uaddr_t)va, len);
+ if (ret != TEE_SUCCESS)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ pa = virt_to_phys(va);
+ if (!pa)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ switch (op) {
+ case TEE_CACHEFLUSH:
+ /* Clean L1, Flush L2, Flush L1 */
+ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
+ if (ret != TEE_SUCCESS)
+ return ret;
+ ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
+ if (ret != TEE_SUCCESS)
+ return ret;
+ return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);
+
+ case TEE_CACHECLEAN:
+ /* Clean L1, Clean L2 */
+ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
+ if (ret != TEE_SUCCESS)
+ return ret;
+ return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);
+
+ case TEE_CACHEINVALIDATE:
+ /* Inval L2, Inval L1 */
+ ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
+ if (ret != TEE_SUCCESS)
+ return ret;
+ return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);
+
+ default:
+ return TEE_ERROR_NOT_SUPPORTED;
+ }
+}
+
+TEE_Result syscall_cache_operation(void *va, size_t len, unsigned long op)
+{
+ TEE_Result res;
+ struct tee_ta_session *s = NULL;
+
+ res = tee_ta_get_current_session(&s);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if ((s->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
+ return TEE_ERROR_NOT_SUPPORTED;
+
+ return cache_operation(s, op, va, len);
+}
diff --git a/core/arch/arm/tee/svc_cache.h b/core/arch/arm/tee/svc_cache.h
new file mode 100644
index 0000000..d5d4972
--- /dev/null
+++ b/core/arch/arm/tee/svc_cache.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SVC_CACHE_H
+#define SVC_CACHE_H
+
+#include <types_ext.h>
+#include <tee_api_types.h>
+
+#ifdef CFG_CACHE_API
+TEE_Result syscall_cache_operation(void *va, size_t len, unsigned long op);
+#else
+#define syscall_cache_operation syscall_not_supported
+#endif
+
+#endif /*SVC_CACHE_H*/
diff --git a/core/arch/arm/tee/svc_dummy.c b/core/arch/arm/tee/svc_dummy.c
new file mode 100644
index 0000000..4e8a924
--- /dev/null
+++ b/core/arch/arm/tee/svc_dummy.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <tee/arch_svc.h>
+
+void __noreturn tee_svc_handler(struct thread_svc_regs *regs __unused)
+{
+ /* "Can't happen" as we have no user space TAs */
+ panic();
+}