summaryrefslogtreecommitdiff
path: root/core/arch/arm
diff options
context:
space:
mode:
authorr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 09:42:10 (GMT)
committerr.tyminski <r.tyminski@partner.samsung.com>2017-05-29 09:49:50 (GMT)
commitf9a43781767007462965b21f3f518c4cfc0744c7 (patch)
tree201509439b1d9798256227794dae6774345adf43 /core/arch/arm
parent1fed20f5471aa0dad5e4b4f79d1f2843ac88734f (diff)
downloadtef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.zip
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.gz
tef-optee_os-f9a43781767007462965b21f3f518c4cfc0744c7.tar.bz2
Initial commit with upstream sourcesrefs/changes/71/131471/1
Change-Id: Ie9460111f21fc955102fd8732a0173b2d0499a4a
Diffstat (limited to 'core/arch/arm')
-rw-r--r--core/arch/arm/arm.mk157
-rw-r--r--core/arch/arm/include/arm.h65
-rw-r--r--core/arch/arm/include/arm32.h606
-rw-r--r--core/arch/arm/include/arm32_macros.S215
-rw-r--r--core/arch/arm/include/arm32_macros_cortex_a9.S44
-rw-r--r--core/arch/arm/include/arm64.h310
-rw-r--r--core/arch/arm/include/arm64_macros.S125
-rw-r--r--core/arch/arm/include/kernel/abort.h57
-rw-r--r--core/arch/arm/include/kernel/generic_boot.h101
-rw-r--r--core/arch/arm/include/kernel/misc.h53
-rw-r--r--core/arch/arm/include/kernel/mutex.h98
-rw-r--r--core/arch/arm/include/kernel/pm_stubs.h37
-rw-r--r--core/arch/arm/include/kernel/pseudo_ta.h84
-rw-r--r--core/arch/arm/include/kernel/spinlock.h86
-rw-r--r--core/arch/arm/include/kernel/tee_l2cc_mutex.h72
-rw-r--r--core/arch/arm/include/kernel/thread.h559
-rw-r--r--core/arch/arm/include/kernel/thread_defs.h35
-rw-r--r--core/arch/arm/include/kernel/time_source.h44
-rw-r--r--core/arch/arm/include/kernel/tz_proc_def.h110
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce.h73
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_def.h141
-rw-r--r--core/arch/arm/include/kernel/tz_ssvce_pl310.h46
-rw-r--r--core/arch/arm/include/kernel/unwind.h77
-rw-r--r--core/arch/arm/include/kernel/user_ta.h93
-rw-r--r--core/arch/arm/include/kernel/vfp.h127
-rw-r--r--core/arch/arm/include/kernel/wait_queue.h85
-rw-r--r--core/arch/arm/include/mm/core_memprot.h103
-rw-r--r--core/arch/arm/include/mm/core_mmu.h399
-rw-r--r--core/arch/arm/include/mm/mobj.h130
-rw-r--r--core/arch/arm/include/mm/pgt_cache.h144
-rw-r--r--core/arch/arm/include/mm/tee_pager.h226
-rw-r--r--core/arch/arm/include/sm/optee_smc.h533
-rw-r--r--core/arch/arm/include/sm/psci.h60
-rw-r--r--core/arch/arm/include/sm/sm.h123
-rw-r--r--core/arch/arm/include/sm/std_smc.h22
-rw-r--r--core/arch/arm/include/sm/tee_mon.h38
-rw-r--r--core/arch/arm/include/sm/teesmc_opteed.h142
-rw-r--r--core/arch/arm/include/sm/teesmc_opteed_macros.h35
-rw-r--r--core/arch/arm/include/tee/arch_svc.h43
-rw-r--r--core/arch/arm/include/tee/entry_fast.h52
-rw-r--r--core/arch/arm/include/tee/entry_std.h38
-rw-r--r--core/arch/arm/kernel/abort.c582
-rw-r--r--core/arch/arm/kernel/asm-defines.c107
-rw-r--r--core/arch/arm/kernel/cache_helpers_a64.S207
-rw-r--r--core/arch/arm/kernel/elf32.h245
-rw-r--r--core/arch/arm/kernel/elf64.h248
-rw-r--r--core/arch/arm/kernel/elf_common.h1006
-rw-r--r--core/arch/arm/kernel/elf_load.c646
-rw-r--r--core/arch/arm/kernel/elf_load.h44
-rw-r--r--core/arch/arm/kernel/generic_boot.c710
-rw-r--r--core/arch/arm/kernel/generic_entry_a32.S503
-rw-r--r--core/arch/arm/kernel/generic_entry_a64.S315
-rw-r--r--core/arch/arm/kernel/kern.ld.S340
-rw-r--r--core/arch/arm/kernel/link.mk241
-rw-r--r--core/arch/arm/kernel/misc_a32.S90
-rw-r--r--core/arch/arm/kernel/misc_a64.S41
-rw-r--r--core/arch/arm/kernel/mutex.c279
-rw-r--r--core/arch/arm/kernel/pm_stubs.c41
-rw-r--r--core/arch/arm/kernel/proc_a32.S96
-rw-r--r--core/arch/arm/kernel/proc_a64.S71
-rw-r--r--core/arch/arm/kernel/pseudo_ta.c256
-rw-r--r--core/arch/arm/kernel/spin_lock_a32.S85
-rw-r--r--core/arch/arm/kernel/spin_lock_a64.S89
-rw-r--r--core/arch/arm/kernel/spin_lock_debug.c63
-rw-r--r--core/arch/arm/kernel/ssvce_a32.S334
-rw-r--r--core/arch/arm/kernel/ssvce_a64.S115
-rw-r--r--core/arch/arm/kernel/sub.mk45
-rw-r--r--core/arch/arm/kernel/tee_l2cc_mutex.c160
-rw-r--r--core/arch/arm/kernel/tee_time.c83
-rw-r--r--core/arch/arm/kernel/tee_time_arm_cntpct.c100
-rw-r--r--core/arch/arm/kernel/tee_time_ree.c62
-rw-r--r--core/arch/arm/kernel/thread.c1365
-rw-r--r--core/arch/arm/kernel/thread_a32.S645
-rw-r--r--core/arch/arm/kernel/thread_a64.S816
-rw-r--r--core/arch/arm/kernel/thread_private.h251
-rw-r--r--core/arch/arm/kernel/trace_ext.c50
-rw-r--r--core/arch/arm/kernel/tz_ssvce_pl310_a32.S258
-rw-r--r--core/arch/arm/kernel/unwind_arm32.c417
-rw-r--r--core/arch/arm/kernel/unwind_arm64.c84
-rw-r--r--core/arch/arm/kernel/user_ta.c826
-rw-r--r--core/arch/arm/kernel/vfp.c149
-rw-r--r--core/arch/arm/kernel/vfp_a32.S81
-rw-r--r--core/arch/arm/kernel/vfp_a64.S72
-rw-r--r--core/arch/arm/kernel/vfp_private.h53
-rw-r--r--core/arch/arm/kernel/wait_queue.c225
-rw-r--r--core/arch/arm/mm/core_mmu.c1177
-rw-r--r--core/arch/arm/mm/core_mmu_lpae.c890
-rw-r--r--core/arch/arm/mm/core_mmu_private.h43
-rw-r--r--core/arch/arm/mm/core_mmu_v7.c790
-rw-r--r--core/arch/arm/mm/mobj.c439
-rw-r--r--core/arch/arm/mm/pager_aes_gcm.c348
-rw-r--r--core/arch/arm/mm/pager_private.h45
-rw-r--r--core/arch/arm/mm/pgt_cache.c567
-rw-r--r--core/arch/arm/mm/sub.mk12
-rw-r--r--core/arch/arm/mm/tee_mm.c354
-rw-r--r--core/arch/arm/mm/tee_mmu.c896
-rw-r--r--core/arch/arm/mm/tee_pager.c1473
-rw-r--r--core/arch/arm/plat-d02/conf.mk34
-rw-r--r--core/arch/arm/plat-d02/kern.ld.S1
-rw-r--r--core/arch/arm/plat-d02/link.mk1
-rw-r--r--core/arch/arm/plat-d02/main.c96
-rw-r--r--core/arch/arm/plat-d02/platform_config.h137
-rw-r--r--core/arch/arm/plat-d02/sub.mk2
-rw-r--r--core/arch/arm/plat-hikey/conf.mk42
-rw-r--r--core/arch/arm/plat-hikey/hikey_peripherals.h97
-rw-r--r--core/arch/arm/plat-hikey/kern.ld.S1
-rw-r--r--core/arch/arm/plat-hikey/link.mk1
-rw-r--r--core/arch/arm/plat-hikey/main.c207
-rw-r--r--core/arch/arm/plat-hikey/platform_config.h130
-rw-r--r--core/arch/arm/plat-hikey/spi_test.c292
-rw-r--r--core/arch/arm/plat-hikey/sub.mk3
-rw-r--r--core/arch/arm/plat-imx/a9_plat_init.S109
-rw-r--r--core/arch/arm/plat-imx/conf.mk34
-rw-r--r--core/arch/arm/plat-imx/imx6ul.c57
-rw-r--r--core/arch/arm/plat-imx/imx_pl310.c63
-rw-r--r--core/arch/arm/plat-imx/kern.ld.S1
-rw-r--r--core/arch/arm/plat-imx/link.mk1
-rw-r--r--core/arch/arm/plat-imx/main.c202
-rw-r--r--core/arch/arm/plat-imx/platform_config.h410
-rw-r--r--core/arch/arm/plat-imx/psci.c78
-rw-r--r--core/arch/arm/plat-imx/sub.mk9
-rw-r--r--core/arch/arm/plat-ls/conf.mk20
-rw-r--r--core/arch/arm/plat-ls/kern.ld.S1
-rw-r--r--core/arch/arm/plat-ls/link.mk1
-rw-r--r--core/arch/arm/plat-ls/ls_core_pos.S41
-rw-r--r--core/arch/arm/plat-ls/main.c178
-rw-r--r--core/arch/arm/plat-ls/plat_init.S93
-rw-r--r--core/arch/arm/plat-ls/platform_config.h147
-rw-r--r--core/arch/arm/plat-ls/sub.mk4
-rw-r--r--core/arch/arm/plat-mediatek/conf.mk25
-rw-r--r--core/arch/arm/plat-mediatek/kern.ld.S1
-rw-r--r--core/arch/arm/plat-mediatek/link.mk1
-rw-r--r--core/arch/arm/plat-mediatek/main.c93
-rw-r--r--core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S51
-rw-r--r--core/arch/arm/plat-mediatek/mt8173_core_pos_a64.S47
-rw-r--r--core/arch/arm/plat-mediatek/platform_config.h108
-rw-r--r--core/arch/arm/plat-mediatek/sub.mk6
-rw-r--r--core/arch/arm/plat-rcar/conf.mk27
-rw-r--r--core/arch/arm/plat-rcar/kern.ld.S1
-rw-r--r--core/arch/arm/plat-rcar/link.mk7
-rw-r--r--core/arch/arm/plat-rcar/main.c95
-rw-r--r--core/arch/arm/plat-rcar/platform_config.h81
-rw-r--r--core/arch/arm/plat-rcar/sub.mk2
-rw-r--r--core/arch/arm/plat-rpi3/conf.mk39
-rw-r--r--core/arch/arm/plat-rpi3/kern.ld.S1
-rw-r--r--core/arch/arm/plat-rpi3/link.mk1
-rw-r--r--core/arch/arm/plat-rpi3/main.c94
-rw-r--r--core/arch/arm/plat-rpi3/platform_config.h95
-rw-r--r--core/arch/arm/plat-rpi3/sub.mk2
-rw-r--r--core/arch/arm/plat-sprd/conf.mk28
-rw-r--r--core/arch/arm/plat-sprd/console.c58
-rw-r--r--core/arch/arm/plat-sprd/kern.ld.S1
-rw-r--r--core/arch/arm/plat-sprd/link.mk1
-rw-r--r--core/arch/arm/plat-sprd/main.c79
-rw-r--r--core/arch/arm/plat-sprd/platform_config.h105
-rw-r--r--core/arch/arm/plat-sprd/sub.mk3
-rw-r--r--core/arch/arm/plat-stm/.gitignore1
-rw-r--r--core/arch/arm/plat-stm/asc.S108
-rw-r--r--core/arch/arm/plat-stm/asc.h35
-rw-r--r--core/arch/arm/plat-stm/conf.mk30
-rw-r--r--core/arch/arm/plat-stm/kern.ld.S1
-rw-r--r--core/arch/arm/plat-stm/link.mk1
-rw-r--r--core/arch/arm/plat-stm/main.c201
-rw-r--r--core/arch/arm/plat-stm/platform_config.h340
-rw-r--r--core/arch/arm/plat-stm/rng_support.c147
-rw-r--r--core/arch/arm/plat-stm/sub.mk6
-rw-r--r--core/arch/arm/plat-stm/tz_a9init.S101
-rw-r--r--core/arch/arm/plat-sunxi/conf.mk17
-rw-r--r--core/arch/arm/plat-sunxi/console.c59
-rw-r--r--core/arch/arm/plat-sunxi/entry.S107
-rw-r--r--core/arch/arm/plat-sunxi/head.c60
-rw-r--r--core/arch/arm/plat-sunxi/kern.ld.S198
-rw-r--r--core/arch/arm/plat-sunxi/link.mk54
-rw-r--r--core/arch/arm/plat-sunxi/main.c177
-rw-r--r--core/arch/arm/plat-sunxi/platform.c125
-rw-r--r--core/arch/arm/plat-sunxi/platform.h85
-rw-r--r--core/arch/arm/plat-sunxi/platform_config.h152
-rw-r--r--core/arch/arm/plat-sunxi/rng_support.c43
-rw-r--r--core/arch/arm/plat-sunxi/smp_boot.S104
-rw-r--r--core/arch/arm/plat-sunxi/smp_fixup.S116
-rw-r--r--core/arch/arm/plat-sunxi/sub.mk9
-rw-r--r--core/arch/arm/plat-ti/conf.mk24
-rw-r--r--core/arch/arm/plat-ti/console.c67
-rw-r--r--core/arch/arm/plat-ti/kern.ld.S1
-rw-r--r--core/arch/arm/plat-ti/link.mk1
-rw-r--r--core/arch/arm/plat-ti/main.c151
-rw-r--r--core/arch/arm/plat-ti/platform_config.h109
-rw-r--r--core/arch/arm/plat-ti/sub.mk3
-rw-r--r--core/arch/arm/plat-vexpress/conf.mk71
-rw-r--r--core/arch/arm/plat-vexpress/juno_core_pos_a32.S45
-rw-r--r--core/arch/arm/plat-vexpress/juno_core_pos_a64.S42
-rw-r--r--core/arch/arm/plat-vexpress/kern.ld.S1
-rw-r--r--core/arch/arm/plat-vexpress/link.mk1
-rw-r--r--core/arch/arm/plat-vexpress/main.c201
-rw-r--r--core/arch/arm/plat-vexpress/platform_config.h298
-rw-r--r--core/arch/arm/plat-vexpress/sub.mk7
-rw-r--r--core/arch/arm/plat-vexpress/vendor_props.c91
-rw-r--r--core/arch/arm/plat-zynq7k/conf.mk24
-rw-r--r--core/arch/arm/plat-zynq7k/kern.ld.S1
-rw-r--r--core/arch/arm/plat-zynq7k/link.mk1
-rw-r--r--core/arch/arm/plat-zynq7k/main.c276
-rw-r--r--core/arch/arm/plat-zynq7k/plat_init.S111
-rw-r--r--core/arch/arm/plat-zynq7k/platform_config.h276
-rw-r--r--core/arch/arm/plat-zynq7k/platform_smc.h81
-rw-r--r--core/arch/arm/plat-zynq7k/sub.mk3
-rw-r--r--core/arch/arm/plat-zynqmp/conf.mk29
-rw-r--r--core/arch/arm/plat-zynqmp/kern.ld.S1
-rw-r--r--core/arch/arm/plat-zynqmp/link.mk1
-rw-r--r--core/arch/arm/plat-zynqmp/main.c122
-rw-r--r--core/arch/arm/plat-zynqmp/platform_config.h126
-rw-r--r--core/arch/arm/plat-zynqmp/sub.mk2
-rw-r--r--core/arch/arm/pta/core_self_tests.c252
-rw-r--r--core/arch/arm/pta/core_self_tests.h37
-rw-r--r--core/arch/arm/pta/gprof.c221
-rw-r--r--core/arch/arm/pta/interrupt_tests.c239
-rw-r--r--core/arch/arm/pta/pta_self_tests.c251
-rw-r--r--core/arch/arm/pta/se_api_self_tests.c498
-rw-r--r--core/arch/arm/pta/stats.c161
-rw-r--r--core/arch/arm/pta/sub.mk14
-rw-r--r--core/arch/arm/pta/tee_fs_key_manager_tests.c375
-rw-r--r--core/arch/arm/sm/psci.c166
-rw-r--r--core/arch/arm/sm/sm.c58
-rw-r--r--core/arch/arm/sm/sm_a32.S291
-rw-r--r--core/arch/arm/sm/sm_private.h38
-rw-r--r--core/arch/arm/sm/std_smc.c77
-rw-r--r--core/arch/arm/sm/sub.mk3
-rw-r--r--core/arch/arm/tee/arch_svc.c269
-rw-r--r--core/arch/arm/tee/arch_svc_a32.S122
-rw-r--r--core/arch/arm/tee/arch_svc_a64.S205
-rw-r--r--core/arch/arm/tee/arch_svc_private.h38
-rw-r--r--core/arch/arm/tee/entry_fast.c231
-rw-r--r--core/arch/arm/tee/entry_std.c386
-rw-r--r--core/arch/arm/tee/init.c84
-rw-r--r--core/arch/arm/tee/pta_socket.c320
-rw-r--r--core/arch/arm/tee/sub.mk12
-rw-r--r--core/arch/arm/tee/svc_cache.c114
-rw-r--r--core/arch/arm/tee/svc_cache.h39
-rw-r--r--core/arch/arm/tee/svc_dummy.c35
238 files changed, 37643 insertions, 0 deletions
diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk
new file mode 100644
index 0000000..4d8c6f0
--- /dev/null
+++ b/core/arch/arm/arm.mk
@@ -0,0 +1,157 @@
+CFG_LTC_OPTEE_THREAD ?= y
+# Size of emulated TrustZone protected SRAM, 360 kB.
+# Only applicable when paging is enabled.
+CFG_CORE_TZSRAM_EMUL_SIZE ?= 368640
+CFG_LPAE_ADDR_SPACE_SIZE ?= (1ull << 32)
+
+ifeq ($(CFG_ARM64_core),y)
+CFG_KERN_LINKER_FORMAT ?= elf64-littleaarch64
+CFG_KERN_LINKER_ARCH ?= aarch64
+endif
+ifeq ($(CFG_ARM32_core),y)
+CFG_KERN_LINKER_FORMAT ?= elf32-littlearm
+CFG_KERN_LINKER_ARCH ?= arm
+endif
+
+ifeq ($(CFG_TA_FLOAT_SUPPORT),y)
+# Use hard-float for floating point support in user TAs instead of
+# soft-float
+CFG_WITH_VFP ?= y
+ifeq ($(CFG_ARM64_core),y)
+# AArch64 has no fallback to soft-float
+$(call force,CFG_WITH_VFP,y)
+endif
+ifeq ($(CFG_WITH_VFP),y)
+platform-hard-float-enabled := y
+endif
+endif
+
+ifeq ($(CFG_WITH_PAGER),y)
+ifeq ($(CFG_CORE_SANITIZE_KADDRESS),y)
+$(error Error: CFG_CORE_SANITIZE_KADDRESS not compatible with CFG_WITH_PAGER)
+endif
+endif
+
+ifeq ($(CFG_ARM32_core),y)
+# Configration directive related to ARMv7 optee boot arguments.
+# CFG_PAGEABLE_ADDR: if defined, forces pageable data physical address.
+# CFG_NS_ENTRY_ADDR: if defined, forces NS World physical entry address.
+# CFG_DT_ADDR: if defined, forces Device Tree data physical address.
+endif
+
+core-platform-cppflags += -I$(arch-dir)/include
+core-platform-subdirs += \
+ $(addprefix $(arch-dir)/, kernel mm tee pta) $(platform-dir)
+
+ifneq ($(CFG_WITH_ARM_TRUSTED_FW),y)
+core-platform-subdirs += $(arch-dir)/sm
+endif
+
+arm64-platform-cppflags += -DARM64=1 -D__LP64__=1
+arm32-platform-cppflags += -DARM32=1 -D__ILP32__=1
+
+platform-cflags-generic ?= -g -ffunction-sections -fdata-sections -pipe
+platform-aflags-generic ?= -g -pipe
+
+arm32-platform-cflags-no-hard-float ?= -mno-apcs-float -mfloat-abi=soft
+arm32-platform-cflags-hard-float ?= -mfloat-abi=hard -funsafe-math-optimizations
+arm32-platform-cflags-generic ?= -mthumb -mthumb-interwork \
+ -fno-short-enums -fno-common -mno-unaligned-access
+arm32-platform-aflags-no-hard-float ?=
+
+arm64-platform-cflags-no-hard-float ?= -mgeneral-regs-only
+arm64-platform-cflags-hard-float ?=
+arm64-platform-cflags-generic ?= -mstrict-align
+
+ifeq ($(DEBUG),1)
+platform-cflags-optimization ?= -O0
+else
+platform-cflags-optimization ?= -Os
+endif
+
+platform-cflags-debug-info ?= -g3
+platform-aflags-debug-info ?=
+
+core-platform-cflags += $(platform-cflags-optimization)
+core-platform-cflags += $(platform-cflags-generic)
+core-platform-cflags += $(platform-cflags-debug-info)
+
+core-platform-aflags += $(platform-aflags-generic)
+core-platform-aflags += $(platform-aflags-debug-info)
+
+ifeq ($(CFG_ARM64_core),y)
+arch-bits-core := 64
+core-platform-cppflags += $(arm64-platform-cppflags)
+core-platform-cflags += $(arm64-platform-cflags)
+core-platform-cflags += $(arm64-platform-cflags-generic)
+core-platform-cflags += $(arm64-platform-cflags-no-hard-float)
+core-platform-aflags += $(arm64-platform-aflags)
+else
+arch-bits-core := 32
+core-platform-cppflags += $(arm32-platform-cppflags)
+core-platform-cflags += $(arm32-platform-cflags)
+core-platform-cflags += $(arm32-platform-cflags-no-hard-float)
+ifeq ($(CFG_CORE_UNWIND),y)
+core-platform-cflags += -funwind-tables
+endif
+core-platform-cflags += $(arm32-platform-cflags-generic)
+core-platform-aflags += $(core_arm32-platform-aflags)
+core-platform-aflags += $(arm32-platform-aflags)
+endif
+
+ifneq ($(filter ta_arm32,$(ta-targets)),)
+# Variables for ta-target/sm "ta_arm32"
+CFG_ARM32_ta_arm32 := y
+arch-bits-ta_arm32 := 32
+ta_arm32-platform-cppflags += $(arm32-platform-cppflags)
+ta_arm32-platform-cflags += $(arm32-platform-cflags)
+ta_arm32-platform-cflags += $(platform-cflags-optimization)
+ta_arm32-platform-cflags += $(platform-cflags-debug-info)
+ta_arm32-platform-cflags += -fpie
+ta_arm32-platform-cflags += $(arm32-platform-cflags-generic)
+ifeq ($(platform-hard-float-enabled),y)
+ta_arm32-platform-cflags += $(arm32-platform-cflags-hard-float)
+else
+ta_arm32-platform-cflags += $(arm32-platform-cflags-no-hard-float)
+endif
+ta_arm32-platform-aflags += $(platform-aflags-debug-info)
+ta_arm32-platform-aflags += $(arm32-platform-aflags)
+
+ta-mk-file-export-vars-ta_arm32 += CFG_ARM32_ta_arm32
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-cppflags
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-cflags
+ta-mk-file-export-vars-ta_arm32 += ta_arm32-platform-aflags
+
+ta-mk-file-export-add-ta_arm32 += CROSS_COMPILE32 ?= $$(CROSS_COMPILE)_nl_
+ta-mk-file-export-add-ta_arm32 += CROSS_COMPILE_ta_arm32 ?= $$(CROSS_COMPILE32)_nl_
+endif
+
+ifneq ($(filter ta_arm64,$(ta-targets)),)
+# Variables for ta-target/sm "ta_arm64"
+CFG_ARM64_ta_arm64 := y
+arch-bits-ta_arm64 := 64
+ta_arm64-platform-cppflags += $(arm64-platform-cppflags)
+ta_arm64-platform-cflags += $(arm64-platform-cflags)
+ta_arm64-platform-cflags += $(platform-cflags-optimization)
+ta_arm64-platform-cflags += $(platform-cflags-debug-info)
+ta_arm64-platform-cflags += -fpie
+ta_arm64-platform-cflags += $(arm64-platform-cflags-generic)
+ifeq ($(platform-hard-float-enabled),y)
+ta_arm64-platform-cflags += $(arm64-platform-cflags-hard-float)
+else
+ta_arm64-platform-cflags += $(arm64-platform-cflags-no-hard-float)
+endif
+ta_arm64-platform-aflags += $(platform-aflags-debug-info)
+ta_arm64-platform-aflags += $(arm64-platform-aflags)
+
+ta-mk-file-export-vars-ta_arm64 += CFG_ARM64_ta_arm64
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-cppflags
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-cflags
+ta-mk-file-export-vars-ta_arm64 += ta_arm64-platform-aflags
+
+ta-mk-file-export-add-ta_arm64 += CROSS_COMPILE64 ?= $$(CROSS_COMPILE)_nl_
+ta-mk-file-export-add-ta_arm64 += CROSS_COMPILE_ta_arm64 ?= $$(CROSS_COMPILE64)_nl_
+endif
+
+# Set cross compiler prefix for each submodule
+$(foreach sm, core $(ta-targets), $(eval CROSS_COMPILE_$(sm) ?= $(CROSS_COMPILE$(arch-bits-$(sm)))))
diff --git a/core/arch/arm/include/arm.h b/core/arch/arm/include/arm.h
new file mode 100644
index 0000000..a644dd4
--- /dev/null
+++ b/core/arch/arm/include/arm.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ARM_H
+#define ARM_H
+
+
+#define MPIDR_CPU_MASK 0xff
+#define MPIDR_CLUSTER_SHIFT 8
+#define MPIDR_CLUSTER_MASK (0xff << MPIDR_CLUSTER_SHIFT)
+
+#define ARM32_CPSR_MODE_MASK 0x1f
+#define ARM32_CPSR_MODE_USR 0x10
+#define ARM32_CPSR_MODE_FIQ 0x11
+#define ARM32_CPSR_MODE_IRQ 0x12
+#define ARM32_CPSR_MODE_SVC 0x13
+#define ARM32_CPSR_MODE_MON 0x16
+#define ARM32_CPSR_MODE_ABT 0x17
+#define ARM32_CPSR_MODE_UND 0x1b
+#define ARM32_CPSR_MODE_SYS 0x1f
+
+#define ARM32_CPSR_T (1 << 5)
+#define ARM32_CPSR_F_SHIFT 6
+#define ARM32_CPSR_F (1 << 6)
+#define ARM32_CPSR_I (1 << 7)
+#define ARM32_CPSR_A (1 << 8)
+#define ARM32_CPSR_E (1 << 9)
+#define ARM32_CPSR_FIA (ARM32_CPSR_F | ARM32_CPSR_I | ARM32_CPSR_A)
+#define ARM32_CPSR_IT_MASK (ARM32_CPSR_IT_MASK1 | ARM32_CPSR_IT_MASK2)
+#define ARM32_CPSR_IT_MASK1 0x06000000
+#define ARM32_CPSR_IT_MASK2 0x0000fc00
+
+
+#ifdef ARM32
+#include <arm32.h>
+#endif
+
+#ifdef ARM64
+#include <arm64.h>
+#endif
+
+#endif /*ARM_H*/
diff --git a/core/arch/arm/include/arm32.h b/core/arch/arm/include/arm32.h
new file mode 100644
index 0000000..822ff95
--- /dev/null
+++ b/core/arch/arm/include/arm32.h
@@ -0,0 +1,606 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM32_H
+#define ARM32_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+#include <util.h>
+
+#define CPSR_MODE_MASK ARM32_CPSR_MODE_MASK
+#define CPSR_MODE_USR ARM32_CPSR_MODE_USR
+#define CPSR_MODE_FIQ ARM32_CPSR_MODE_FIQ
+#define CPSR_MODE_IRQ ARM32_CPSR_MODE_IRQ
+#define CPSR_MODE_SVC ARM32_CPSR_MODE_SVC
+#define CPSR_MODE_MON ARM32_CPSR_MODE_MON
+#define CPSR_MODE_ABT ARM32_CPSR_MODE_ABT
+#define CPSR_MODE_UND ARM32_CPSR_MODE_UND
+#define CPSR_MODE_SYS ARM32_CPSR_MODE_SYS
+
+#define CPSR_T ARM32_CPSR_T
+#define CPSR_F_SHIFT ARM32_CPSR_F_SHIFT
+#define CPSR_F ARM32_CPSR_F
+#define CPSR_I ARM32_CPSR_I
+#define CPSR_A ARM32_CPSR_A
+#define CPSR_FIA ARM32_CPSR_FIA
+#define CPSR_IT_MASK ARM32_CPSR_IT_MASK
+#define CPSR_IT_MASK1 ARM32_CPSR_IT_MASK1
+#define CPSR_IT_MASK2 ARM32_CPSR_IT_MASK2
+
+#define SCR_NS BIT32(0)
+#define SCR_IRQ BIT32(1)
+#define SCR_FIQ BIT32(2)
+#define SCR_EA BIT32(3)
+#define SCR_FW BIT32(4)
+#define SCR_AW BIT32(5)
+#define SCR_NET BIT32(6)
+#define SCR_SCD BIT32(7)
+#define SCR_HCE BIT32(8)
+#define SCR_SIF BIT32(9)
+
+#define SCTLR_M BIT32(0)
+#define SCTLR_A BIT32(1)
+#define SCTLR_C BIT32(2)
+#define SCTLR_CP15BEN BIT32(5)
+#define SCTLR_SW BIT32(10)
+#define SCTLR_Z BIT32(11)
+#define SCTLR_I BIT32(12)
+#define SCTLR_V BIT32(13)
+#define SCTLR_RR BIT32(14)
+#define SCTLR_HA BIT32(17)
+#define SCTLR_WXN BIT32(19)
+#define SCTLR_UWXN BIT32(20)
+#define SCTLR_FI BIT32(21)
+#define SCTLR_VE BIT32(24)
+#define SCTLR_EE BIT32(25)
+#define SCTLR_NMFI BIT32(26)
+#define SCTLR_TRE BIT32(28)
+#define SCTLR_AFE BIT32(29)
+#define SCTLR_TE BIT32(30)
+
+#define ACTLR_SMP BIT32(6)
+#define ACTLR_DODMBS BIT32(10)
+#define ACTLR_L2RADIS BIT32(11)
+#define ACTLR_L1RADIS BIT32(12)
+#define ACTLR_L1PCTL BIT32(13)
+#define ACTLR_DDVM BIT32(15)
+#define ACTLR_DDI BIT32(28)
+
+#define NSACR_CP10 BIT32(10)
+#define NSACR_CP11 BIT32(11)
+#define NSACR_NSD32DIS BIT32(14)
+#define NSACR_NSASEDIS BIT32(15)
+#define NSACR_NS_L2ERR BIT32(17)
+#define NSACR_NS_SMP BIT32(18)
+
+#define CPACR_ASEDIS BIT32(31)
+#define CPACR_D32DIS BIT32(30)
+#define CPACR_CP(co_proc, access) SHIFT_U32((access), ((co_proc) * 2))
+#define CPACR_CP_ACCESS_DENIED 0x0
+#define CPACR_CP_ACCESS_PL1_ONLY 0x1
+#define CPACR_CP_ACCESS_FULL 0x3
+
+
+#define DACR_DOMAIN(num, perm) SHIFT_U32((perm), ((num) * 2))
+#define DACR_DOMAIN_PERM_NO_ACCESS 0x0
+#define DACR_DOMAIN_PERM_CLIENT 0x1
+#define DACR_DOMAIN_PERM_MANAGER 0x3
+
+#define PAR_F BIT32(0)
+#define PAR_SS BIT32(1)
+#define PAR_LPAE BIT32(11)
+#define PAR_PA_SHIFT 12
+#define PAR32_PA_MASK (BIT32(20) - 1)
+#define PAR64_PA_MASK (BIT64(28) - 1)
+
+/*
+ * TTBCR has different register layout if LPAE is enabled or not.
+ * TTBCR.EAE == 0 => LPAE is not enabled
+ * TTBCR.EAE == 1 => LPAE is enabled
+ */
+#define TTBCR_EAE BIT32(31)
+
+/* When TTBCR.EAE == 0 */
+#define TTBCR_PD0 BIT32(4)
+#define TTBCR_PD1 BIT32(5)
+
+/* When TTBCR.EAE == 1 */
+#define TTBCR_T0SZ_SHIFT 0
+#define TTBCR_EPD0 BIT32(7)
+#define TTBCR_IRGN0_SHIFT 8
+#define TTBCR_ORGN0_SHIFT 10
+#define TTBCR_SH0_SHIFT 12
+#define TTBCR_T1SZ_SHIFT 16
+#define TTBCR_A1 BIT32(22)
+#define TTBCR_EPD1 BIT32(23)
+#define TTBCR_IRGN1_SHIFT 24
+#define TTBCR_ORGN1_SHIFT 26
+#define TTBCR_SH1_SHIFT 28
+
+/* Normal memory, Inner/Outer Non-cacheable */
+#define TTBCR_XRGNX_NC 0x0
+/* Normal memory, Inner/Outer Write-Back Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WB 0x1
+/* Normal memory, Inner/Outer Write-Through Cacheable */
+#define TTBCR_XRGNX_WT 0x2
+/* Normal memory, Inner/Outer Write-Back no Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WBWA 0x3
+
+/* Non-shareable */
+#define TTBCR_SHX_NSH 0x0
+/* Outer Shareable */
+#define TTBCR_SHX_OSH 0x2
+/* Inner Shareable */
+#define TTBCR_SHX_ISH 0x3
+
+#define TTBR_ASID_MASK 0xff
+#define TTBR_ASID_SHIFT 48
+
+
+#define FSR_LPAE BIT32(9)
+#define FSR_WNR BIT32(11)
+
+/* Valid if FSR.LPAE is 1 */
+#define FSR_STATUS_MASK (BIT32(6) - 1)
+
+/* Valid if FSR.LPAE is 0 */
+#define FSR_FS_MASK (BIT32(10) | (BIT32(4) - 1))
+
+#ifndef ASM
+static inline uint32_t read_mpidr(void)
+{
+ uint32_t mpidr;
+
+ asm volatile ("mrc p15, 0, %[mpidr], c0, c0, 5"
+ : [mpidr] "=r" (mpidr)
+ );
+
+ return mpidr;
+}
+
+static inline uint32_t read_sctlr(void)
+{
+ uint32_t sctlr;
+
+ asm volatile ("mrc p15, 0, %[sctlr], c1, c0, 0"
+ : [sctlr] "=r" (sctlr)
+ );
+
+ return sctlr;
+}
+
+static inline void write_sctlr(uint32_t sctlr)
+{
+ asm volatile ("mcr p15, 0, %[sctlr], c1, c0, 0"
+ : : [sctlr] "r" (sctlr)
+ );
+}
+
+static inline uint32_t read_cpacr(void)
+{
+ uint32_t cpacr;
+
+ asm volatile ("mrc p15, 0, %[cpacr], c1, c0, 2"
+ : [cpacr] "=r" (cpacr)
+ );
+
+ return cpacr;
+}
+
+static inline void write_cpacr(uint32_t cpacr)
+{
+ asm volatile ("mcr p15, 0, %[cpacr], c1, c0, 2"
+ : : [cpacr] "r" (cpacr)
+ );
+}
+
+static inline void write_ttbr0(uint32_t ttbr0)
+{
+ asm volatile ("mcr p15, 0, %[ttbr0], c2, c0, 0"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
+static inline void write_ttbr0_64bit(uint64_t ttbr0)
+{
+ asm volatile ("mcrr p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
+static inline uint32_t read_ttbr0(void)
+{
+ uint32_t ttbr0;
+
+ asm volatile ("mrc p15, 0, %[ttbr0], c2, c0, 0"
+ : [ttbr0] "=r" (ttbr0)
+ );
+
+ return ttbr0;
+}
+
+static inline uint64_t read_ttbr0_64bit(void)
+{
+ uint64_t ttbr0;
+
+ asm volatile ("mrrc p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : [ttbr0] "=r" (ttbr0)
+ );
+
+ return ttbr0;
+}
+
+static inline void write_ttbr1(uint32_t ttbr1)
+{
+ asm volatile ("mcr p15, 0, %[ttbr1], c2, c0, 1"
+ : : [ttbr1] "r" (ttbr1)
+ );
+}
+
+static inline void write_ttbr1_64bit(uint64_t ttbr1)
+{
+ asm volatile ("mcrr p15, 1, %Q[ttbr1], %R[ttbr1], c2"
+ : : [ttbr1] "r" (ttbr1)
+ );
+}
+
+static inline uint32_t read_ttbr1(void)
+{
+ uint32_t ttbr1;
+
+ asm volatile ("mrc p15, 0, %[ttbr1], c2, c0, 1"
+ : [ttbr1] "=r" (ttbr1)
+ );
+
+ return ttbr1;
+}
+
+
+static inline void write_ttbcr(uint32_t ttbcr)
+{
+ asm volatile ("mcr p15, 0, %[ttbcr], c2, c0, 2"
+ : : [ttbcr] "r" (ttbcr)
+ );
+}
+
+static inline uint32_t read_ttbcr(void)
+{
+ uint32_t ttbcr;
+
+ asm volatile ("mrc p15, 0, %[ttbcr], c2, c0, 2"
+ : [ttbcr] "=r" (ttbcr)
+ );
+
+ return ttbcr;
+}
+
+static inline void write_dacr(uint32_t dacr)
+{
+ asm volatile ("mcr p15, 0, %[dacr], c3, c0, 0"
+ : : [dacr] "r" (dacr)
+ );
+}
+
+static inline uint32_t read_ifar(void)
+{
+ uint32_t ifar;
+
+ asm volatile ("mrc p15, 0, %[ifar], c6, c0, 2"
+ : [ifar] "=r" (ifar)
+ );
+
+ return ifar;
+}
+
+static inline uint32_t read_dfar(void)
+{
+ uint32_t dfar;
+
+ asm volatile ("mrc p15, 0, %[dfar], c6, c0, 0"
+ : [dfar] "=r" (dfar)
+ );
+
+ return dfar;
+}
+
+static inline uint32_t read_dfsr(void)
+{
+ uint32_t dfsr;
+
+ asm volatile ("mrc p15, 0, %[dfsr], c5, c0, 0"
+ : [dfsr] "=r" (dfsr)
+ );
+
+ return dfsr;
+}
+
+static inline uint32_t read_ifsr(void)
+{
+ uint32_t ifsr;
+
+ asm volatile ("mrc p15, 0, %[ifsr], c5, c0, 1"
+ : [ifsr] "=r" (ifsr)
+ );
+
+ return ifsr;
+}
+
+static inline void write_scr(uint32_t scr)
+{
+ asm volatile ("mcr p15, 0, %[scr], c1, c1, 0"
+ : : [scr] "r" (scr)
+ );
+}
+
+static inline void isb(void)
+{
+ asm volatile ("isb");
+}
+
+static inline void dsb(void)
+{
+ asm volatile ("dsb");
+}
+
+static inline void dmb(void)
+{
+ asm volatile ("dmb");
+}
+
+static inline void sev(void)
+{
+ asm volatile ("sev");
+}
+
+static inline void wfe(void)
+{
+ asm volatile ("wfe");
+}
+
+/* Address translate privileged write translation (current state secure PL1) */
+static inline void write_ats1cpw(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 1" : : "r" (va));
+}
+
+static inline void write_ats1cpr(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+}
+
+static inline void write_ats1cpuw(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 3" : : "r" (va));
+}
+
+static inline void write_ats1cpur(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %0, c7, c8, 2" : : "r" (va));
+}
+
+static inline uint32_t read_par32(void)
+{
+ uint32_t val;
+
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (val));
+ return val;
+}
+
+#ifdef CFG_WITH_LPAE
+static inline uint64_t read_par64(void)
+{
+ uint64_t val;
+
+ asm volatile ("mrrc p15, 0, %Q0, %R0, c7" : "=r" (val));
+ return val;
+}
+#endif
+
+static inline void write_mair0(uint32_t mair0)
+{
+ asm volatile ("mcr p15, 0, %[mair0], c10, c2, 0"
+ : : [mair0] "r" (mair0)
+ );
+}
+
+static inline void write_prrr(uint32_t prrr)
+{
+ /*
+ * Same physical register as MAIR0.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR0 replaces the PRRR
+ */
+ write_mair0(prrr);
+}
+
+static inline void write_mair1(uint32_t mair1)
+{
+ asm volatile ("mcr p15, 0, %[mair1], c10, c2, 1"
+ : : [mair1] "r" (mair1)
+ );
+}
+
+static inline void write_nmrr(uint32_t nmrr)
+{
+ /*
+ * Same physical register as MAIR1.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR1 replaces the NMRR
+ */
+ write_mair1(nmrr);
+}
+
+static inline uint32_t read_contextidr(void)
+{
+ uint32_t contextidr;
+
+ asm volatile ("mrc p15, 0, %[contextidr], c13, c0, 1"
+ : [contextidr] "=r" (contextidr)
+ );
+
+ return contextidr;
+}
+
+static inline void write_contextidr(uint32_t contextidr)
+{
+ asm volatile ("mcr p15, 0, %[contextidr], c13, c0, 1"
+ : : [contextidr] "r" (contextidr)
+ );
+}
+
+static inline uint32_t read_cpsr(void)
+{
+ uint32_t cpsr;
+
+ asm volatile ("mrs %[cpsr], cpsr"
+ : [cpsr] "=r" (cpsr)
+ );
+ return cpsr;
+}
+
+static inline void write_cpsr(uint32_t cpsr)
+{
+ asm volatile ("msr cpsr_fsxc, %[cpsr]"
+ : : [cpsr] "r" (cpsr)
+ );
+}
+
+static inline uint32_t read_spsr(void)
+{
+ uint32_t spsr;
+
+ asm volatile ("mrs %[spsr], spsr"
+ : [spsr] "=r" (spsr)
+ );
+ return spsr;
+}
+
+static inline uint32_t read_actlr(void)
+{
+ uint32_t actlr;
+
+ asm volatile ("mrc p15, 0, %[actlr], c1, c0, 1"
+ : [actlr] "=r" (actlr)
+ );
+
+ return actlr;
+}
+
+static inline void write_actlr(uint32_t actlr)
+{
+ asm volatile ("mcr p15, 0, %[actlr], c1, c0, 1"
+ : : [actlr] "r" (actlr)
+ );
+}
+
+static inline uint32_t read_nsacr(void)
+{
+ uint32_t nsacr;
+
+ asm volatile ("mrc p15, 0, %[nsacr], c1, c1, 2"
+ : [nsacr] "=r" (nsacr)
+ );
+
+ return nsacr;
+}
+
+static inline void write_nsacr(uint32_t nsacr)
+{
+ asm volatile ("mcr p15, 0, %[nsacr], c1, c1, 2"
+ : : [nsacr] "r" (nsacr)
+ );
+}
+
+static inline uint64_t read_cntpct(void)
+{
+ uint64_t val;
+
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (val));
+ return val;
+}
+
+static inline uint32_t read_cntfrq(void)
+{
+ uint32_t frq;
+
+ asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (frq));
+ return frq;
+}
+
+static inline void write_cntfrq(uint32_t frq)
+{
+ asm volatile("mcr p15, 0, %0, c14, c0, 0" : : "r" (frq));
+}
+
+static __always_inline uint32_t read_pc(void)
+{
+ uint32_t val;
+
+ asm volatile ("adr %0, ." : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_sp(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, sp" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_lr(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, lr" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_fp(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, fp" : "=r" (val));
+ return val;
+}
+
+static __always_inline uint32_t read_r7(void)
+{
+ uint32_t val;
+
+ asm volatile ("mov %0, r7" : "=r" (val));
+ return val;
+}
+#endif /*ASM*/
+
+#endif /*ARM32_H*/
diff --git a/core/arch/arm/include/arm32_macros.S b/core/arch/arm/include/arm32_macros.S
new file mode 100644
index 0000000..0a4ca28
--- /dev/null
+++ b/core/arch/arm/include/arm32_macros.S
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ /* Please keep them sorted based on the CRn register */
+ .macro read_mpidr reg
+ mrc p15, 0, \reg, c0, c0, 5
+ .endm
+
+ .macro read_sctlr reg
+ mrc p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro write_sctlr reg
+ mcr p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro write_actlr reg
+ mcr p15, 0, \reg, c1, c0, 1
+ .endm
+
+ .macro read_actlr reg
+ mrc p15, 0, \reg, c1, c0, 1
+ .endm
+
+ .macro write_cpacr reg
+ mcr p15, 0, \reg, c1, c0, 2
+ .endm
+
+ .macro read_cpacr reg
+ mrc p15, 0, \reg, c1, c0, 2
+ .endm
+
+ .macro read_scr reg
+ mrc p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro write_scr reg
+ mcr p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro write_nsacr reg
+ mcr p15, 0, \reg, c1, c1, 2
+ .endm
+
+ .macro read_nsacr reg
+ mrc p15, 0, \reg, c1, c1, 2
+ .endm
+
+ .macro write_ttbr0 reg
+ mcr p15, 0, \reg, c2, c0, 0
+ .endm
+
+ .macro read_ttbr0 reg
+ mrc p15, 0, \reg, c2, c0, 0
+ .endm
+
+ .macro write_ttbr1 reg
+ mcr p15, 0, \reg, c2, c0, 1
+ .endm
+
+ .macro read_ttbr1 reg
+ mrc p15, 0, \reg, c2, c0, 1
+ .endm
+
+ .macro write_ttbcr reg
+ mcr p15, 0, \reg, c2, c0, 2
+ .endm
+
+ .macro read_ttbcr reg
+ mrc p15, 0, \reg, c2, c0, 2
+ .endm
+
+
+ .macro write_dacr reg
+ mcr p15, 0, \reg, c3, c0, 0
+ .endm
+
+ .macro read_dacr reg
+ mrc p15, 0, \reg, c3, c0, 0
+ .endm
+
+ .macro read_dfsr reg
+ mrc p15, 0, \reg, c5, c0, 0
+ .endm
+
+ .macro write_iciallu
+ /* Invalidate all instruction caches to PoU (register ignored) */
+ mcr p15, 0, r0, c7, c5, 0
+ .endm
+
+ .macro write_icialluis
+ /*
+ * Invalidate all instruction caches to PoU, Inner Shareable
+ * (register ignored)
+ */
+ mcr p15, 0, r0, c7, c1, 0
+ .endm
+
+ .macro write_bpiall
+ /* Invalidate entire branch predictor array (register ignored) */
+ mcr p15, 0, r0, c7, c5, 0
+ .endm
+
+ .macro write_bpiallis
+ /*
+ * Invalidate entire branch predictor array, Inner Shareable
+ * (register ignored)
+ */
+ mcr p15, 0, r0, c7, c1, 6
+ .endm
+
+ .macro write_tlbiall
+ /* Invalidate entire unified TLB (register ignored) */
+ mcr p15, 0, r0, c8, c7, 0
+ .endm
+
+ .macro write_tlbiallis
+ /* Invalidate entire unified TLB Inner Sharable (register ignored) */
+ mcr p15, 0, r0, c8, c3, 0
+ .endm
+
+ .macro write_tlbiasidis reg
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ mcr p15, 0, \reg, c8, c3, 2
+ .endm
+
+ .macro write_prrr reg
+ mcr p15, 0, \reg, c10, c2, 0
+ .endm
+
+ .macro read_prrr reg
+ mrc p15, 0, \reg, c10, c2, 0
+ .endm
+
+ .macro write_nmrr reg
+ mcr p15, 0, \reg, c10, c2, 1
+ .endm
+
+ .macro read_nmrr reg
+ mrc p15, 0, \reg, c10, c2, 1
+ .endm
+
+ .macro read_vbar reg
+ mrc p15, 0, \reg, c12, c0, 0
+ .endm
+
+ .macro write_vbar reg
+ mcr p15, 0, \reg, c12, c0, 0
+ .endm
+
+ .macro write_mvbar reg
+ mcr p15, 0, \reg, c12, c0, 1
+ .endm
+
+ .macro read_mvbar reg
+ mrc p15, 0, \reg, c12, c0, 1
+ .endm
+
+ .macro write_fcseidr reg
+ mcr p15, 0, \reg, c13, c0, 0
+ .endm
+
+ .macro read_fcseidr reg
+ mrc p15, 0, \reg, c13, c0, 0
+ .endm
+
+ .macro write_contextidr reg
+ mcr p15, 0, \reg, c13, c0, 1
+ .endm
+
+ .macro read_contextidr reg
+ mrc p15, 0, \reg, c13, c0, 1
+ .endm
+
+ .macro write_tpidruro reg
+ mcr p15, 0, \reg, c13, c0, 3
+ .endm
+
+ .macro read_tpidruro reg
+ mrc p15, 0, \reg, c13, c0, 3
+ .endm
+
+ .macro mov_imm reg, val
+ .if ((\val) & 0xffff0000) == 0
+ mov \reg, #(\val)
+ .else
+ movw \reg, #((\val) & 0xffff)
+ movt \reg, #((\val) >> 16)
+ .endif
+ .endm
+
diff --git a/core/arch/arm/include/arm32_macros_cortex_a9.S b/core/arch/arm/include/arm32_macros_cortex_a9.S
new file mode 100644
index 0000000..57a2a10
--- /dev/null
+++ b/core/arch/arm/include/arm32_macros_cortex_a9.S
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .macro write_pcr reg
+ mcr p15, 0, \reg, c15, c0, 0
+ .endm
+
+ .macro read_pcr reg
+ mrc p15, 0, \reg, c15, c0, 0
+ .endm
+
+ .macro write_diag reg
+ mcr p15, 0, \reg, c15, c0, 1
+ .endm
+
+ .macro read_diag reg
+ mrc p15, 0, \reg, c15, c0, 1
+ .endm
diff --git a/core/arch/arm/include/arm64.h b/core/arch/arm/include/arm64.h
new file mode 100644
index 0000000..148b761
--- /dev/null
+++ b/core/arch/arm/include/arm64.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ARM64_H
+#define ARM64_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+#include <util.h>
+
+#define SCTLR_M BIT32(0)
+#define SCTLR_A BIT32(1)
+#define SCTLR_C BIT32(2)
+#define SCTLR_SA BIT32(3)
+#define SCTLR_I BIT32(12)
+
+#define TTBR_ASID_MASK 0xff
+#define TTBR_ASID_SHIFT 48
+
+#define CLIDR_LOUIS_SHIFT 21
+#define CLIDR_LOC_SHIFT 24
+#define CLIDR_FIELD_WIDTH 3
+
+#define CSSELR_LEVEL_SHIFT 1
+
+#define DAIFBIT_FIQ BIT32(0)
+#define DAIFBIT_IRQ BIT32(1)
+#define DAIFBIT_ABT BIT32(2)
+#define DAIFBIT_DBG BIT32(3)
+#define DAIFBIT_ALL (DAIFBIT_FIQ | DAIFBIT_IRQ | \
+ DAIFBIT_ABT | DAIFBIT_DBG)
+
+#define DAIF_F_SHIFT 6
+#define DAIF_F BIT32(6)
+#define DAIF_I BIT32(7)
+#define DAIF_A BIT32(8)
+#define DAIF_D BIT32(9)
+#define DAIF_AIF (DAIF_A | DAIF_I | DAIF_F)
+
+#define SPSR_MODE_RW_SHIFT 4
+#define SPSR_MODE_RW_MASK 0x1
+#define SPSR_MODE_RW_64 0x0
+#define SPSR_MODE_RW_32 0x1
+
+#define SPSR_64_MODE_SP_SHIFT 0
+#define SPSR_64_MODE_SP_MASK 0x1
+#define SPSR_64_MODE_SP_EL0 0x0
+#define SPSR_64_MODE_SP_ELX 0x1
+
+#define SPSR_64_MODE_EL_SHIFT 2
+#define SPSR_64_MODE_EL_MASK 0x3
+#define SPSR_64_MODE_EL1 0x1
+#define SPSR_64_MODE_EL0 0x0
+
+#define SPSR_64_DAIF_SHIFT 6
+#define SPSR_64_DAIF_MASK 0xf
+
+#define SPSR_32_AIF_SHIFT 6
+#define SPSR_32_AIF_MASK 0x7
+
+#define SPSR_32_E_SHIFT 9
+#define SPSR_32_E_MASK 0x1
+#define SPSR_32_E_LITTLE 0x0
+#define SPSR_32_E_BIG 0x1
+
+#define SPSR_32_T_SHIFT 5
+#define SPSR_32_T_MASK 0x1
+#define SPSR_32_T_ARM 0x0
+#define SPSR_32_T_THUMB 0x1
+
+#define SPSR_32_MODE_SHIFT 0
+#define SPSR_32_MODE_MASK 0xf
+#define SPSR_32_MODE_USR 0x0
+
+
+#define SPSR_64(el, sp, daif) \
+ (SPSR_MODE_RW_64 << SPSR_MODE_RW_SHIFT | \
+ ((el) & SPSR_64_MODE_EL_MASK) << SPSR_64_MODE_EL_SHIFT | \
+ ((sp) & SPSR_64_MODE_SP_MASK) << SPSR_64_MODE_SP_SHIFT | \
+ ((daif) & SPSR_64_DAIF_MASK) << SPSR_64_DAIF_SHIFT)
+
+#define SPSR_32(mode, isa, aif) \
+ (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT | \
+ SPSR_32_E_LITTLE << SPSR_32_E_SHIFT | \
+ ((mode) & SPSR_32_MODE_MASK) << SPSR_32_MODE_SHIFT | \
+ ((isa) & SPSR_32_T_MASK) << SPSR_32_T_SHIFT | \
+ ((aif) & SPSR_32_AIF_MASK) << SPSR_32_AIF_SHIFT)
+
+
+#define TCR_T0SZ_SHIFT 0
+#define TCR_EPD0 BIT32(7)
+#define TCR_IRGN0_SHIFT 8
+#define TCR_ORGN0_SHIFT 10
+#define TCR_SH0_SHIFT 12
+#define TCR_T1SZ_SHIFT 16
+#define TCR_A1 BIT32(22)
+#define TCR_EPD1 BIT32(23)
+#define TCR_IRGN1_SHIFT 24
+#define TCR_ORGN1_SHIFT 26
+#define TCR_SH1_SHIFT 28
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_TG1_4KB SHIFT_U32(2, 30)
+#define TCR_RES1 BIT32(31)
+
+
+/* Normal memory, Inner/Outer Non-cacheable */
+#define TCR_XRGNX_NC 0x0
+/* Normal memory, Inner/Outer Write-Back Write-Allocate Cacheable */
+#define TCR_XRGNX_WB 0x1
+/* Normal memory, Inner/Outer Write-Through Cacheable */
+#define TCR_XRGNX_WT 0x2
+/* Normal memory, Inner/Outer Write-Back no Write-Allocate Cacheable */
+#define TCR_XRGNX_WBWA 0x3
+
+/* Non-shareable */
+#define TCR_SHX_NSH 0x0
+/* Outer Shareable */
+#define TCR_SHX_OSH 0x2
+/* Inner Shareable */
+#define TCR_SHX_ISH 0x3
+
+#define ESR_EC_SHIFT 26
+#define ESR_EC_MASK 0x3f
+
+#define ESR_EC_UNKNOWN 0x00
+#define ESR_EC_WFI 0x01
+#define ESR_EC_AARCH32_CP15_32 0x03
+#define ESR_EC_AARCH32_CP15_64 0x04
+#define ESR_EC_AARCH32_CP14_MR 0x05
+#define ESR_EC_AARCH32_CP14_LS 0x06
+#define ESR_EC_FP_ASIMD 0x07
+#define ESR_EC_AARCH32_CP10_ID 0x08
+#define ESR_EC_AARCH32_CP14_64 0x0c
+#define ESR_EC_ILLEGAL 0x0e
+#define ESR_EC_AARCH32_SVC 0x11
+#define ESR_EC_AARCH64_SVC 0x15
+#define ESR_EC_AARCH64_SYS 0x18
+#define ESR_EC_IABT_EL0 0x20
+#define ESR_EC_IABT_EL1 0x21
+#define ESR_EC_PC_ALIGN 0x22
+#define ESR_EC_DABT_EL0 0x24
+#define ESR_EC_DABT_EL1 0x25
+#define ESR_EC_SP_ALIGN 0x26
+#define ESR_EC_AARCH32_FP 0x28
+#define ESR_EC_AARCH64_FP 0x2c
+#define ESR_EC_SERROR 0x2f
+#define ESR_EC_BREAKPT_EL0 0x30
+#define ESR_EC_BREAKPT_EL1 0x31
+#define ESR_EC_SOFTSTP_EL0 0x32
+#define ESR_EC_SOFTSTP_EL1 0x33
+#define ESR_EC_WATCHPT_EL0 0x34
+#define ESR_EC_WATCHPT_EL1 0x35
+#define ESR_EC_AARCH32_BKPT 0x38
+#define ESR_EC_AARCH64_BRK 0x3c
+
+/* Combined defines for DFSC and IFSC */
+#define ESR_FSC_MASK 0x3f
+#define ESR_FSC_TRANS_L0 0x04
+#define ESR_FSC_TRANS_L1 0x05
+#define ESR_FSC_TRANS_L2 0x06
+#define ESR_FSC_TRANS_L3 0x07
+#define ESR_FSC_ACCF_L1 0x09
+#define ESR_FSC_ACCF_L2 0x0a
+#define ESR_FSC_ACCF_L3 0x0b
+#define ESR_FSC_PERMF_L1 0x0d
+#define ESR_FSC_PERMF_L2 0x0e
+#define ESR_FSC_PERMF_L3 0x0f
+#define ESR_FSC_ALIGN 0x21
+
+/* WnR for DABT and RES0 for IABT */
+#define ESR_ABT_WNR BIT32(6)
+
+#define CPACR_EL1_FPEN_SHIFT 20
+#define CPACR_EL1_FPEN_MASK 0x3
+#define CPACR_EL1_FPEN_NONE 0x0
+#define CPACR_EL1_FPEN_EL1 0x1
+#define CPACR_EL1_FPEN_EL0EL1 0x3
+#define CPACR_EL1_FPEN(x) ((x) >> CPACR_EL1_FPEN_SHIFT \
+ & CPACR_EL1_FPEN_MASK)
+
+
+#define PAR_F BIT32(0)
+#define PAR_PA_SHIFT 12
+#define PAR_PA_MASK (BIT64(36) - 1)
+
+#ifndef ASM
+static inline void isb(void)
+{
+ asm volatile ("isb");
+}
+
+static inline void dsb(void)
+{
+ asm volatile ("dsb sy");
+}
+
+static inline void write_at_s1e1r(uint64_t va)
+{
+ asm volatile ("at S1E1R, %0" : : "r" (va));
+}
+
+static __always_inline uint64_t read_pc(void)
+{
+ uint64_t val;
+
+ asm volatile ("adr %0, ." : "=r" (val));
+ return val;
+}
+
+static __always_inline uint64_t read_fp(void)
+{
+ uint64_t val;
+
+ asm volatile ("mov %0, x29" : "=r" (val));
+ return val;
+}
+
+/*
+ * Templates for register read/write functions based on mrs/msr
+ */
+
+#define DEFINE_REG_READ_FUNC_(reg, type, asmreg) \
+static inline type read_##reg(void) \
+{ \
+ type val; \
+ \
+ asm volatile("mrs %0, " #asmreg : "=r" (val)); \
+ return val; \
+}
+
+#define DEFINE_REG_WRITE_FUNC_(reg, type, asmreg) \
+static inline void write_##reg(type val) \
+{ \
+ asm volatile("msr " #asmreg ", %0" : : "r" (val)); \
+}
+
+#define DEFINE_U32_REG_READ_FUNC(reg) \
+ DEFINE_REG_READ_FUNC_(reg, uint32_t, reg)
+
+#define DEFINE_U32_REG_WRITE_FUNC(reg) \
+ DEFINE_REG_WRITE_FUNC_(reg, uint32_t, reg)
+
+#define DEFINE_U32_REG_READWRITE_FUNCS(reg) \
+ DEFINE_U32_REG_READ_FUNC(reg) \
+ DEFINE_U32_REG_WRITE_FUNC(reg)
+
+#define DEFINE_U64_REG_READ_FUNC(reg) \
+ DEFINE_REG_READ_FUNC_(reg, uint64_t, reg)
+
+#define DEFINE_U64_REG_WRITE_FUNC(reg) \
+ DEFINE_REG_WRITE_FUNC_(reg, uint64_t, reg)
+
+#define DEFINE_U64_REG_READWRITE_FUNCS(reg) \
+ DEFINE_U64_REG_READ_FUNC(reg) \
+ DEFINE_U64_REG_WRITE_FUNC(reg)
+
+/*
+ * Define register access functions
+ */
+
+DEFINE_U32_REG_READWRITE_FUNCS(cpacr_el1)
+DEFINE_U32_REG_READWRITE_FUNCS(daif)
+DEFINE_U32_REG_READWRITE_FUNCS(fpcr)
+DEFINE_U32_REG_READWRITE_FUNCS(fpsr)
+
+DEFINE_U32_REG_READ_FUNC(contextidr_el1)
+DEFINE_U32_REG_READ_FUNC(sctlr_el1)
+
+DEFINE_REG_READ_FUNC_(cntfrq, uint32_t, cntfrq_el0)
+
+DEFINE_U64_REG_READWRITE_FUNCS(ttbr0_el1)
+DEFINE_U64_REG_READWRITE_FUNCS(ttbr1_el1)
+DEFINE_U64_REG_READWRITE_FUNCS(tcr_el1)
+
+DEFINE_U64_REG_READ_FUNC(esr_el1)
+DEFINE_U64_REG_READ_FUNC(far_el1)
+DEFINE_U64_REG_READ_FUNC(mpidr_el1)
+DEFINE_U64_REG_READ_FUNC(par_el1)
+
+DEFINE_U64_REG_WRITE_FUNC(mair_el1)
+
+DEFINE_REG_READ_FUNC_(cntpct, uint64_t, cntpct_el0)
+
+#endif /*ASM*/
+
+#endif /*ARM64_H*/
+
diff --git a/core/arch/arm/include/arm64_macros.S b/core/arch/arm/include/arm64_macros.S
new file mode 100644
index 0000000..981a150
--- /dev/null
+++ b/core/arch/arm/include/arm64_macros.S
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .altmacro
+
+ /*
+ * This helper macro concatenates instr_prefix, instr_suffix, to
+ * create a ldp/stp instruction. It also selects register name x/w
+ * based on reg_bytes.
+ */
+ .macro __do_dregs instr_prefix, instr_suffix, reg_bytes, base_reg, \
+ base_offs, reg0, reg1
+ .if \reg_bytes == 8
+ \instr_prefix\instr_suffix \
+ x\reg0, x\reg1, [\base_reg, #\base_offs]
+ .else
+ \instr_prefix\instr_suffix \
+ w\reg0, w\reg1, [\base_reg, #\base_offs]
+ .endif
+ .endm
+
+ /*
+ * This helper macro concatenates instr_prefix, instr_suffix, to
+ * create a ldr/str instruction. It also selects register name x/w
+ * based on reg_bytes.
+ */
+ .macro __do_reg instr_prefix, instr_suffix, reg_bytes, base_reg, \
+ base_offs, reg
+ .if \reg_bytes == 8
+ \instr_prefix\instr_suffix \
+ x\reg, [\base_reg, #\base_offs]
+ .else
+ \instr_prefix\instr_suffix \
+ w\reg, [\base_reg, #\base_offs]
+ .endif
+ .endm
+
+ /*
+ * This helper macro uses recursion to create a loop which will
+ * start with generating instructions for register pairs and if
+ * it's an odd number of registers end with a single load/store.
+ */
+ .macro _do_regs instr_prefix, reg_bytes, base_reg, base_offs, \
+ from_regnum, to_regnum
+ .if (\to_regnum - \from_regnum + 1) >= 2
+ __do_dregs \instr_prefix, p, \reg_bytes, \base_reg, \
+ \base_offs, \from_regnum, %(\from_regnum + 1)
+ .else
+ __do_reg \instr_prefix, r, \reg_bytes, \base_reg, \
+ \base_offs, \from_regnum
+ .endif
+ .if (\to_regnum - \from_regnum + 1) > 2
+ _do_regs \instr_prefix, \reg_bytes, \base_reg, \
+ %(\base_offs + 2 * \reg_bytes), \
+ %(\from_regnum + 2), \to_regnum
+ .endif
+ .endm
+
+ /*
+ * Stores registers x[from_regnum]..x[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro store_xregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs st 8 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Stores registers w[from_regnum]..w[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro store_wregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs st 4 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Loads registers x[from_regnum]..x[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro load_xregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs ld 8 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+ /*
+ * Loads registers w[from_regnum]..w[to_regnum] at
+ * [base_reg, #base_offs]
+ */
+ .macro load_wregs base_reg, base_offs, from_regnum, to_regnum
+ _do_regs ld 4 \base_reg, \base_offs, \from_regnum, \to_regnum
+ .endm
+
+
+ /* Push register pair on stack */
+ .macro push, r1, r2
+ stp \r1, \r2, [sp, #-16]!
+ .endm
+
+ /* Pop register pair from stack */
+ .macro pop, r1, r2
+ ldp \r1, \r2, [sp], #16
+ .endm
+
diff --git a/core/arch/arm/include/kernel/abort.h b/core/arch/arm/include/kernel/abort.h
new file mode 100644
index 0000000..0480f43
--- /dev/null
+++ b/core/arch/arm/include/kernel/abort.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_ABORT_H
+#define KERNEL_ABORT_H
+
+#define ABORT_TYPE_UNDEF 0
+#define ABORT_TYPE_PREFETCH 1
+#define ABORT_TYPE_DATA 2
+
+#ifndef ASM
+
+#include <compiler.h>
+#include <types_ext.h>
+
+struct abort_info {
+ uint32_t abort_type;
+ uint32_t fault_descr; /* only valid for data of prefetch abort */
+ vaddr_t va;
+ uint32_t pc;
+ struct thread_abort_regs *regs;
+};
+
+void abort_print(struct abort_info *ai);
+void abort_print_error(struct abort_info *ai);
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs);
+
+bool abort_is_user_exception(struct abort_info *ai);
+
+#endif /*ASM*/
+#endif /*KERNEL_ABORT_H*/
+
diff --git a/core/arch/arm/include/kernel/generic_boot.h b/core/arch/arm/include/kernel/generic_boot.h
new file mode 100644
index 0000000..622c6ff
--- /dev/null
+++ b/core/arch/arm/include/kernel/generic_boot.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_GENERIC_BOOT_H
+#define KERNEL_GENERIC_BOOT_H
+
+#include <initcall.h>
+#include <types_ext.h>
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+unsigned long cpu_on_handler(unsigned long a0, unsigned long a1);
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long unused,
+ unsigned long fdt);
+unsigned long generic_boot_cpu_on_handler(unsigned long a0, unsigned long a1);
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt);
+void generic_boot_init_secondary(unsigned long nsec_entry);
+#endif
+
+void main_init_gic(void);
+void main_secondary_init_gic(void);
+
+void init_sec_mon(unsigned long nsec_entry);
+
+const struct thread_handlers *generic_boot_get_handlers(void);
+
+/* weak routines eventually overridden by platform */
+void plat_cpu_reset_early(void);
+void plat_cpu_reset_late(void);
+void arm_cl2_config(vaddr_t pl310);
+void arm_cl2_enable(vaddr_t pl310);
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+extern paddr_t ns_entry_addrs[] __early_bss;
+int generic_boot_core_release(size_t core_idx, paddr_t entry);
+paddr_t generic_boot_core_hpen(void);
+#endif
+
+extern uint8_t __text_init_start[];
+extern uint8_t __text_start[];
+extern initcall_t __initcall_start;
+extern initcall_t __initcall_end;
+extern uint8_t __data_start[];
+extern uint8_t __data_end[];
+extern uint8_t __rodata_start[];
+extern uint8_t __rodata_end[];
+extern uint8_t __early_bss_start[];
+extern uint8_t __early_bss_end[];
+extern uint8_t __bss_start[];
+extern uint8_t __bss_end[];
+extern uint8_t __nozi_start[];
+extern uint8_t __nozi_end[];
+extern uint8_t __nozi_stack_start[];
+extern uint8_t __nozi_stack_end[];
+extern uint8_t __init_start[];
+extern uint8_t __init_size[];
+extern uint8_t __tmp_hashes_start[];
+extern uint8_t __tmp_hashes_size[];
+extern uint8_t __heap1_start[];
+extern uint8_t __heap1_end[];
+extern uint8_t __heap2_start[];
+extern uint8_t __heap2_end[];
+extern uint8_t __pageable_part_start[];
+extern uint8_t __pageable_part_end[];
+extern uint8_t __pageable_start[];
+extern uint8_t __pageable_end[];
+extern uint8_t __asan_shadow_start[];
+extern uint8_t __asan_shadow_end[];
+extern vaddr_t __ctor_list;
+extern vaddr_t __ctor_end;
+extern uint8_t __end[];
+
+/* Generated by core/arch/arm/kernel/link.mk */
+extern const char core_v_str[];
+
+#endif /* KERNEL_GENERIC_BOOT_H */
diff --git a/core/arch/arm/include/kernel/misc.h b/core/arch/arm/include/kernel/misc.h
new file mode 100644
index 0000000..a9174a8
--- /dev/null
+++ b/core/arch/arm/include/kernel/misc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_MISC_H
+#define KERNEL_MISC_H
+
+#include <types_ext.h>
+#include <arm.h>
+#include <kernel/thread.h>
+
+size_t get_core_pos(void);
+
+uint32_t read_mode_sp(int cpu_mode);
+uint32_t read_mode_lr(int cpu_mode);
+
+static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
+{
+ return (uint64_t)reg0 << 32 | reg1;
+}
+
+static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
+ uint32_t *reg1)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+#endif /*KERNEL_MISC_H*/
+
diff --git a/core/arch/arm/include/kernel/mutex.h b/core/arch/arm/include/kernel/mutex.h
new file mode 100644
index 0000000..1698b35
--- /dev/null
+++ b/core/arch/arm/include/kernel/mutex.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_MUTEX_H
+#define KERNEL_MUTEX_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+#include <kernel/wait_queue.h>
+
+enum mutex_value {
+ MUTEX_VALUE_UNLOCKED,
+ MUTEX_VALUE_LOCKED,
+};
+
+struct mutex {
+ enum mutex_value value;
+ unsigned spin_lock; /* used when operating on this struct */
+ struct wait_queue wq;
+ int owner_id;
+ TAILQ_ENTRY(mutex) link;
+};
+#define MUTEX_INITIALIZER \
+ { .value = MUTEX_VALUE_UNLOCKED, .owner_id = -1, \
+ .wq = WAIT_QUEUE_INITIALIZER, }
+
+TAILQ_HEAD(mutex_head, mutex);
+
+void mutex_init(struct mutex *m);
+void mutex_destroy(struct mutex *m);
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_unlock(m) mutex_unlock_debug((m), __FILE__, __LINE__)
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_lock(m) mutex_lock_debug((m), __FILE__, __LINE__)
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno);
+#define mutex_trylock(m) mutex_trylock_debug((m), __FILE__, __LINE__)
+
+#else
+void mutex_unlock(struct mutex *m);
+void mutex_lock(struct mutex *m);
+bool mutex_trylock(struct mutex *m);
+#endif
+
+
+struct condvar {
+ unsigned spin_lock;
+ struct mutex *m;
+};
+#define CONDVAR_INITIALIZER { .m = NULL }
+
+void condvar_init(struct condvar *cv);
+void condvar_destroy(struct condvar *cv);
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_signal(cv) condvar_signal_debug((cv), __FILE__, __LINE__)
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno);
+#define condvar_broadcast(cv) condvar_broadcast_debug((cv), __FILE__, __LINE__)
+
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno);
+#define condvar_wait(cv, m) condvar_wait_debug((cv), (m), __FILE__, __LINE__)
+#else
+void condvar_signal(struct condvar *cv);
+void condvar_broadcast(struct condvar *cv);
+void condvar_wait(struct condvar *cv, struct mutex *m);
+#endif
+
+#endif /*KERNEL_MUTEX_H*/
+
diff --git a/core/arch/arm/include/kernel/pm_stubs.h b/core/arch/arm/include/kernel/pm_stubs.h
new file mode 100644
index 0000000..6cbe897
--- /dev/null
+++ b/core/arch/arm/include/kernel/pm_stubs.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PM_STUBS_H
+#define PM_STUBS_H
+
+#include <stdint.h>
+#include <compiler.h>
+
+unsigned long pm_panic(unsigned long a0, unsigned long a1) __noreturn;
+unsigned long pm_do_nothing(unsigned long a0, unsigned long a1);
+
+#endif /* PM_STUBS_H */
diff --git a/core/arch/arm/include/kernel/pseudo_ta.h b/core/arch/arm/include/kernel/pseudo_ta.h
new file mode 100644
index 0000000..98316bd
--- /dev/null
+++ b/core/arch/arm/include/kernel/pseudo_ta.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_PSEUDO_TA_H
+#define KERNEL_PSEUDO_TA_H
+
+#include <assert.h>
+#include <compiler.h>
+#include <kernel/tee_ta_manager.h>
+#include <tee_api_types.h>
+#include <user_ta_header.h>
+#include <util.h>
+
+#define PTA_MANDATORY_FLAGS (TA_FLAG_SINGLE_INSTANCE | \
+ TA_FLAG_MULTI_SESSION | \
+ TA_FLAG_INSTANCE_KEEP_ALIVE)
+
+#define PTA_ALLOWED_FLAGS PTA_MANDATORY_FLAGS
+#define PTA_DEFAULT_FLAGS PTA_MANDATORY_FLAGS
+
+struct pseudo_ta_head {
+ TEE_UUID uuid;
+ const char *name;
+ uint32_t flags;
+
+ TEE_Result (*create_entry_point)(void);
+ void (*destroy_entry_point)(void);
+ TEE_Result (*open_session_entry_point)(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS],
+ void **ppSessionContext);
+ void (*close_session_entry_point)(void *pSessionContext);
+ TEE_Result (*invoke_command_entry_point)(void *pSessionContext,
+ uint32_t nCommandID, uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS]);
+};
+
+#define pseudo_ta_register(...) static const struct pseudo_ta_head __head \
+ __used __section("ta_head_section") = { __VA_ARGS__ }
+
+
+struct pseudo_ta_ctx {
+ const struct pseudo_ta_head *pseudo_ta;
+ struct tee_ta_ctx ctx;
+};
+
+static inline bool is_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct pseudo_ta_ctx *to_pseudo_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_pseudo_ta_ctx(ctx));
+ return container_of(ctx, struct pseudo_ta_ctx, ctx);
+}
+
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+
+#endif /* KERNEL_PSEUDO_TA_H */
+
diff --git a/core/arch/arm/include/kernel/spinlock.h b/core/arch/arm/include/kernel/spinlock.h
new file mode 100644
index 0000000..c248673
--- /dev/null
+++ b/core/arch/arm/include/kernel/spinlock.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_SPINLOCK_H
+#define KERNEL_SPINLOCK_H
+
+#define SPINLOCK_LOCK 1
+#define SPINLOCK_UNLOCK 0
+
+#ifndef ASM
+#include <assert.h>
+#include <compiler.h>
+#include <stdbool.h>
+#include <kernel/thread.h>
+
+#ifdef CFG_TEE_CORE_DEBUG
+void spinlock_count_incr(void);
+void spinlock_count_decr(void);
+bool have_spinlock(void);
+static inline void assert_have_no_spinlock(void)
+{
+ assert(!have_spinlock());
+}
+#else
+static inline void spinlock_count_incr(void) { }
+static inline void spinlock_count_decr(void) { }
+static inline void assert_have_no_spinlock(void) { }
+#endif
+
+void __cpu_spin_lock(unsigned int *lock);
+void __cpu_spin_unlock(unsigned int *lock);
+/* returns 0 on locking success, non zero on failure */
+unsigned int __cpu_spin_trylock(unsigned int *lock);
+
+static inline void cpu_spin_lock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_lock(lock);
+ spinlock_count_incr();
+}
+
+static inline bool cpu_spin_trylock(unsigned int *lock)
+{
+ unsigned int rc;
+
+ assert(thread_irq_disabled());
+ rc = __cpu_spin_trylock(lock);
+ if (!rc)
+ spinlock_count_incr();
+ return !rc;
+}
+
+static inline void cpu_spin_unlock(unsigned int *lock)
+{
+ assert(thread_irq_disabled());
+ __cpu_spin_unlock(lock);
+ spinlock_count_decr();
+}
+#endif /* ASM */
+
+#endif /* KERNEL_SPINLOCK_H */
diff --git a/core/arch/arm/include/kernel/tee_l2cc_mutex.h b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
new file mode 100644
index 0000000..508a510
--- /dev/null
+++ b/core/arch/arm/include/kernel/tee_l2cc_mutex.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_L2CC_MUTEX_H
+#define TEE_L2CC_MUTEX_H
+#include <inttypes.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <compiler.h>
+
+#if defined(CFG_PL310)
+TEE_Result tee_enable_l2cc_mutex(void);
+TEE_Result tee_disable_l2cc_mutex(void);
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+void tee_l2cc_mutex_lock(void);
+void tee_l2cc_mutex_unlock(void);
+
+/*
+ * Store the pa of a mutex used for l2cc
+ * It is allocated from the boot
+ */
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa);
+
+#else
+static TEE_Result tee_enable_l2cc_mutex(void);
+static TEE_Result tee_disable_l2cc_mutex(void);
+static TEE_Result tee_get_l2cc_mutex(paddr_t *mutex);
+static TEE_Result tee_set_l2cc_mutex(paddr_t *mutex);
+
+static inline TEE_Result tee_enable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_disable_l2cc_mutex(void)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_get_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+static inline TEE_Result tee_set_l2cc_mutex(paddr_t *mutex __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+#endif /* TEE_L2CC_MUTEX_H */
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
new file mode 100644
index 0000000..175ba77
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_H
+#define KERNEL_THREAD_H
+
+#ifndef ASM
+#include <types_ext.h>
+#include <compiler.h>
+#include <optee_msg.h>
+#include <kernel/mutex.h>
+#include <kernel/vfp.h>
+#include <mm/pgt_cache.h>
+#endif
+
+#define THREAD_ID_0 0
+#define THREAD_ID_INVALID -1
+
+#define THREAD_RPC_MAX_NUM_PARAMS 4
+
+#ifndef ASM
+struct thread_vector_table {
+ uint32_t std_smc_entry;
+ uint32_t fast_smc_entry;
+ uint32_t cpu_on_entry;
+ uint32_t cpu_off_entry;
+ uint32_t cpu_resume_entry;
+ uint32_t cpu_suspend_entry;
+ uint32_t fiq_entry;
+ uint32_t system_off_entry;
+ uint32_t system_reset_entry;
+};
+extern struct thread_vector_table thread_vector_table;
+
+struct thread_specific_data {
+ TAILQ_HEAD(, tee_ta_session) sess_stack;
+ struct tee_ta_ctx *ctx;
+#ifdef CFG_SMALL_PAGE_USER_TA
+ struct pgt_cache pgt_cache;
+#endif
+ void *rpc_fs_payload;
+ paddr_t rpc_fs_payload_pa;
+ uint64_t rpc_fs_payload_cookie;
+ size_t rpc_fs_payload_size;
+};
+
+struct thread_user_vfp_state {
+ struct vfp_state vfp;
+ bool lazy_saved;
+ bool saved;
+};
+
+#ifdef ARM32
+struct thread_smc_args {
+ uint32_t a0; /* SMC function ID */
+ uint32_t a1; /* Parameter */
+ uint32_t a2; /* Parameter */
+ uint32_t a3; /* Thread ID when returning from RPC */
+ uint32_t a4; /* Not used */
+ uint32_t a5; /* Not used */
+ uint32_t a6; /* Not used */
+ uint32_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_smc_args {
+ uint64_t a0; /* SMC function ID */
+ uint64_t a1; /* Parameter */
+ uint64_t a2; /* Parameter */
+ uint64_t a3; /* Thread ID when returning from RPC */
+ uint64_t a4; /* Not used */
+ uint64_t a5; /* Not used */
+ uint64_t a6; /* Not used */
+ uint64_t a7; /* Hypervisor Client ID */
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_abort_regs {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t pad;
+ uint32_t spsr;
+ uint32_t elr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t ip;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_abort_regs {
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x15;
+ uint64_t x16;
+ uint64_t x17;
+ uint64_t x18;
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+ uint64_t x22;
+ uint64_t x23;
+ uint64_t x24;
+ uint64_t x25;
+ uint64_t x26;
+ uint64_t x27;
+ uint64_t x28;
+ uint64_t x29;
+ uint64_t x30;
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t sp_el0;
+};
+#endif /*ARM64*/
+
+#ifdef ARM32
+struct thread_svc_regs {
+ uint32_t spsr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t lr;
+};
+#endif /*ARM32*/
+#ifdef ARM64
+struct thread_svc_regs {
+ uint64_t elr;
+ uint64_t spsr;
+ uint64_t x0; /* r0_usr */
+ uint64_t x1; /* r1_usr */
+ uint64_t x2; /* r2_usr */
+ uint64_t x3; /* r3_usr */
+ uint64_t x4; /* r4_usr */
+ uint64_t x5; /* r5_usr */
+ uint64_t x6; /* r6_usr */
+ uint64_t x7; /* r7_usr */
+ uint64_t x8; /* r8_usr */
+ uint64_t x9; /* r9_usr */
+ uint64_t x10; /* r10_usr */
+ uint64_t x11; /* r11_usr */
+ uint64_t x12; /* r12_usr */
+ uint64_t x13; /* r13/sp_usr */
+ uint64_t x14; /* r14/lr_usr */
+ uint64_t x30;
+ uint64_t sp_el0;
+ uint64_t pad;
+} __aligned(16);
+#endif /*ARM64*/
+#endif /*ASM*/
+
+#ifndef ASM
+typedef void (*thread_smc_handler_t)(struct thread_smc_args *args);
+typedef void (*thread_fiq_handler_t)(void);
+typedef unsigned long (*thread_pm_handler_t)(unsigned long a0,
+ unsigned long a1);
+struct thread_handlers {
+ /*
+ * stdcall and fastcall are called as regular functions and
+ * normal ARM Calling Convention applies. Return values are passed
+ * args->param{1-3} and forwarded into r0-r3 when returned to
+ * non-secure world.
+ *
+ * stdcall handles calls which can be preemted from non-secure
+ * world. This handler is executed with a large stack.
+ *
+ * fastcall handles fast calls which can't be preemted. This
+ * handler is executed with a limited stack. This handler must not
+ * cause any aborts or reenenable FIQs which are temporarily masked
+ * while executing this handler.
+ *
+ * TODO investigate if we should execute fastcalls and FIQs on
+ * different stacks allowing FIQs to be enabled during a fastcall.
+ */
+ thread_smc_handler_t std_smc;
+ thread_smc_handler_t fast_smc;
+
+ /*
+ * fiq is called as a regular function and normal ARM Calling
+ * Convention applies.
+ *
+ * This handler handles FIQs which can't be preemted. This handler
+ * is executed with a limited stack. This handler must not cause
+ * any aborts or reenenable FIQs which are temporarily masked while
+ * executing this handler.
+ */
+ thread_fiq_handler_t fiq;
+
+ /*
+ * Power management handlers triggered from ARM Trusted Firmware.
+ * Not used when using internal monitor.
+ */
+ thread_pm_handler_t cpu_on;
+ thread_pm_handler_t cpu_off;
+ thread_pm_handler_t cpu_suspend;
+ thread_pm_handler_t cpu_resume;
+ thread_pm_handler_t system_off;
+ thread_pm_handler_t system_reset;
+};
+void thread_init_primary(const struct thread_handlers *handlers);
+void thread_init_per_cpu(void);
+
+/*
+ * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
+ * first stack, THREAD_ID_0 + 1 for the next and so on.
+ *
+ * Returns true on success and false on errors.
+ */
+bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
+
+/*
+ * Initializes a thread to be used during boot
+ */
+void thread_init_boot_thread(void);
+
+/*
+ * Clears the current thread id
+ * Only supposed to be used during initialization.
+ */
+void thread_clr_boot_thread(void);
+
+/*
+ * Returns current thread id.
+ */
+int thread_get_id(void);
+
+/*
+ * Returns current thread id, return -1 on failure.
+ */
+int thread_get_id_may_fail(void);
+
+/* Returns Thread Specific Data (TSD) pointer. */
+struct thread_specific_data *thread_get_tsd(void);
+
+/*
+ * Sets IRQ status for current thread, must only be called from an
+ * active thread context.
+ *
+ * enable == true -> enable IRQ
+ * enable == false -> disable IRQ
+ */
+void thread_set_irq(bool enable);
+
+/*
+ * Restores the IRQ status (in CPSR) for current thread, must only be called
+ * from an active thread context.
+ */
+void thread_restore_irq(void);
+
+/*
+ * Defines the bits for the exception mask used the the
+ * thread_*_exceptions() functions below.
+ */
+#define THREAD_EXCP_FIQ (1 << 0)
+#define THREAD_EXCP_IRQ (1 << 1)
+#define THREAD_EXCP_ABT (1 << 2)
+#define THREAD_EXCP_ALL (THREAD_EXCP_FIQ | THREAD_EXCP_IRQ | THREAD_EXCP_ABT)
+
+/*
+ * thread_get_exceptions() - return current exception mask
+ */
+uint32_t thread_get_exceptions(void);
+
+/*
+ * thread_set_exceptions() - set exception mask
+ * @exceptions: exception mask to set
+ *
+ * Any previous exception mask is replaced by this exception mask, that is,
+ * old bits are cleared and replaced by these.
+ */
+void thread_set_exceptions(uint32_t exceptions);
+
+/*
+ * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
+ * @exceptions exceptions to mask
+ * @returns old exception state
+ */
+uint32_t thread_mask_exceptions(uint32_t exceptions);
+
+/*
+ * thread_unmask_exceptions() - Unmasks asynchronous exceptions
+ * @state Old asynchronous exception state to restore (returned by
+ * thread_mask_exceptions())
+ */
+void thread_unmask_exceptions(uint32_t state);
+
+
+static inline bool thread_irq_disabled(void)
+{
+ return !!(thread_get_exceptions() & THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+/*
+ * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
+ *
+ * IRQ is masked while VFP is enabled. User space must not be entered before
+ * thread_kernel_disable_vfp() has been called to disable VFP and restore the
+ * IRQ status.
+ *
+ * This function may only be called from an active thread context and may
+ * not be called again before thread_kernel_disable_vfp() has been called.
+ *
+ * VFP state is saved as needed.
+ *
+ * Returns a state variable that should be passed to
+ * thread_kernel_disable_vfp().
+ */
+uint32_t thread_kernel_enable_vfp(void);
+
+/*
+ * thread_kernel_disable_vfp() - Disables usage of VFP
+ * @state: state variable returned by thread_kernel_enable_vfp()
+ *
+ * Disables usage of VFP and restores IRQ status after a call to
+ * thread_kernel_enable_vfp().
+ *
+ * This function may only be called after a call to
+ * thread_kernel_enable_vfp().
+ */
+void thread_kernel_disable_vfp(uint32_t state);
+
+/*
+ * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
+ */
+void thread_kernel_save_vfp(void);
+
+/*
+ * thread_kernel_save_vfp() - Restores kernel vfp state
+ */
+void thread_kernel_restore_vfp(void);
+
+/*
+ * thread_user_enable_vfp() - Enables vfp for user mode usage
+ * @uvfp: pointer to where to save the vfp state if needed
+ */
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
+#else /*CFG_WITH_VFP*/
+static inline void thread_kernel_save_vfp(void)
+{
+}
+
+static inline void thread_kernel_restore_vfp(void)
+{
+}
+#endif /*CFG_WITH_VFP*/
+
+/*
+ * thread_user_save_vfp() - Saves the user vfp state if enabled
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_save_vfp(void);
+#else
+static inline void thread_user_save_vfp(void)
+{
+}
+#endif
+
+/*
+ * thread_user_clear_vfp() - Clears the vfp state
+ * @uvfp: pointer to saved state to clear
+ */
+#ifdef CFG_WITH_VFP
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp);
+#else
+static inline void thread_user_clear_vfp(
+ struct thread_user_vfp_state *uvfp __unused)
+{
+}
+#endif
+
+
+/*
+ * thread_enter_user_mode() - Enters user mode
+ * @a0: Passed in r/x0 for user_func
+ * @a1: Passed in r/x1 for user_func
+ * @a2: Passed in r/x2 for user_func
+ * @a3: Passed in r/x3 for user_func
+ * @user_sp: Assigned sp value in user mode
+ * @user_func: Function to execute in user mode
+ * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
+ * @exit_status0: Pointer to opaque exit staus 0
+ * @exit_status1: Pointer to opaque exit staus 1
+ *
+ * This functions enters user mode with the argument described above,
+ * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
+ * when returning back to the caller of this function through an exception
+ * handler.
+ *
+ * @Returns what's passed in "ret" to thread_unwind_user_mode()
+ */
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * thread_unwind_user_mode() - Unwinds kernel stack from user entry
+ * @ret: Value to return from thread_enter_user_mode()
+ * @exit_status0: Exit status 0
+ * @exit_status1: Exit status 1
+ *
+ * This is the function that exception handlers can return into
+ * to resume execution in kernel mode instead of user mode.
+ *
+ * This function is closely coupled with thread_enter_user_mode() since it
+ * need to restore registers saved by thread_enter_user_mode() and when it
+ * returns make it look like thread_enter_user_mode() just returned. It is
+ * expected that the stack pointer is where thread_enter_user_mode() left
+ * it. The stack will be unwound and the function will return to where
+ * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
+ * are filled in the corresponding pointers supplied to
+ * thread_enter_user_mode().
+ */
+void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ uint32_t exit_status1);
+
+#ifdef ARM64
+/*
+ * thread_get_saved_thread_sp() - Returns the saved sp of current thread
+ *
+ * When switching from the thread stack pointer the value is stored
+ * separately in the current thread context. This function returns this
+ * saved value.
+ *
+ * @returns stack pointer
+ */
+vaddr_t thread_get_saved_thread_sp(void);
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va);
+
+/*
+ * Adds a mutex to the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_add_mutex(struct mutex *m);
+
+/*
+ * Removes a mutex from the list of held mutexes for current thread
+ * Requires IRQs to be disabled.
+ */
+void thread_rem_mutex(struct mutex *m);
+
+/*
+ * Disables and empties the prealloc RPC cache one reference at a time. If
+ * all threads are idle this function returns true and a cookie of one shm
+ * object which was removed from the cache. When the cache is empty *cookie
+ * is set to 0 and the cache is disabled else a valid cookie value. If one
+ * thread isn't idle this function returns false.
+ */
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
+
+/*
+ * Enabled the prealloc RPC cache. If all threads are idle the cache is
+ * enabled and this function returns true. If one thread isn't idle this
+ * function return false.
+ */
+bool thread_enable_prealloc_rpc_cache(void);
+
+/**
+ * Allocates data for struct optee_msg_arg.
+ *
+ * @size: size in bytes of struct optee_msg_arg
+ * @arg: returned physcial pointer to a struct optee_msg_arg buffer,
+ * 0 if allocation failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_arg()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_arg(uint64_t cookie);
+
+/**
+ * Allocates data for payload buffers.
+ *
+ * @size: size in bytes of payload buffer
+ * @payload: returned physcial pointer to payload buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_payload()
+ *
+ * @cookie: cookie received when allocating the buffer
+ */
+void thread_rpc_free_payload(uint64_t cookie);
+
+/**
+ * Does an RPC using a preallocated argument buffer
+ * @cmd: RPC cmd
+ * @num_params: number of parameters (max 2)
+ * @params: RPC parameters
+ * @returns RPC return value
+ */
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params);
+
+#endif /*ASM*/
+
+#endif /*KERNEL_THREAD_H*/
diff --git a/core/arch/arm/include/kernel/thread_defs.h b/core/arch/arm/include/kernel/thread_defs.h
new file mode 100644
index 0000000..0f54569
--- /dev/null
+++ b/core/arch/arm/include/kernel/thread_defs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_DEFS_H
+#define KERNEL_THREAD_DEFS_H
+
+#define THREAD_FLAGS_COPY_ARGS_ON_RETURN (1 << 0)
+#define THREAD_FLAGS_IRQ_ENABLE (1 << 1)
+#define THREAD_FLAGS_EXIT_ON_IRQ (1 << 2)
+
+#endif /*KERNEL_THREAD_DEFS_H*/
diff --git a/core/arch/arm/include/kernel/time_source.h b/core/arch/arm/include/kernel/time_source.h
new file mode 100644
index 0000000..ddabfe9
--- /dev/null
+++ b/core/arch/arm/include/kernel/time_source.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+
+struct time_source {
+ const char *name;
+ uint32_t protection_level;
+ TEE_Result (*get_sys_time)(TEE_Time *time);
+};
+void time_source_init(void);
+
+#define REGISTER_TIME_SOURCE(source) \
+ void time_source_init(void) { \
+ _time_source = source; \
+ }
+
+extern struct time_source _time_source;
+
+void arm_prng_add_jitter_entropy(void);
diff --git a/core/arch/arm/include/kernel/tz_proc_def.h b/core/arch/arm/include/kernel/tz_proc_def.h
new file mode 100644
index 0000000..abe281b
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_proc_def.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * General constants
+ */
+
+/*
+ * CP15 Multiprocessor Affinity register (MPIDR)
+ */
+#define CP15_CONFIG_CPU_ID_MASK 0x00000003
+#define CPU_ID0 0x00000000
+#define CPU_ID1 0x00000001
+
+/*
+ * CP15 Secure configuration register
+ */
+#define CP15_CONFIG_NS_MASK 0x00000001
+#define CP15_CONFIG_IRQ_MASK 0x00000002
+#define CP15_CONFIG_FIQ_MASK 0x00000004
+#define CP15_CONFIG_EA_MASK 0x00000008
+#define CP15_CONFIG_FW_MASK 0x00000010
+#define CP15_CONFIG_AW_MASK 0x00000020
+#define CP15_CONFIG_nET_MASK 0x00000040
+
+/*
+ * CP15 Control register
+ */
+#define CP15_CONTROL_M_MASK 0x00000001
+#define CP15_CONTROL_C_MASK 0x00000004
+#define CP15_CONTROL_Z_MASK 0x00000800
+#define CP15_CONTROL_I_MASK 0x00001000
+#define CP15_CONTROL_V_MASK 0x00002000
+#define CP15_CONTROL_HA_MASK 0x00020000
+#define CP15_CONTROL_EE_MASK 0x02000000
+#define CP15_CONTROL_NMFI_MASK 0x08000000
+#define CP15_CONTROL_TRE_MASK 0x10000000
+#define CP15_CONTROL_AFE_MASK 0x20000000
+#define CP15_CONTROL_TE_MASK 0x40000000
+
+/*
+ * CP15 Auxiliary Control register
+ */
+#define CP15_CONTROL_SMP_MASK 0x00000040
+#define CP15_CONTROL_EXCL_MASK 0x00000080
+
+/*
+ * CP15 Non secure access control register
+ */
+#define CP15_NSAC_TL_MASK 0x10000
+#define CP15_NSAC_CL_MASK 0x20000
+#define CP15_NSAC_CPN_MASK 0x3FFF
+
+/*
+ * CP15 Cache register
+ */
+#define CP15_CACHE_ADDR_R_BIT 12
+#define CP15_CACHE_ADDR_L_BIT (32-CP15_CACHE_ADDR_R_BIT)
+#define CP15_CACHE_RESULT_MASK 0x00000001
+
+/*
+ * CP15 TCM register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_TCM_ENABLE_MASK 0x00000001
+#define CP15_TCM_INSTR_TCM 0x2010000C
+#define CP15_TCM_DATA_TCM 0x2010100C
+
+/*
+ * CP15 cache lockdown register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_CACHE_LOCK_ALLWAYS_MASK 0x0000000F
+
+/*
+ * CP15 cache cleaning constant definition
+ */
+/* start of line number field offset in way/index format */
+#define LINE_FIELD_OFFSET 5
+/* Warning: this assumes a 256 lines/way cache (32kB cache) */
+#define LINE_FIELD_OVERFLOW 13
+/* start of way number field offset in way/index format */
+#define WAY_FIELD_OFFSET 30
diff --git a/core/arch/arm/include/kernel/tz_ssvce.h b/core/arch/arm/include/kernel/tz_ssvce.h
new file mode 100644
index 0000000..a886f9d
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_H
+#define TZ_SSVCE_H
+
+#ifndef ASM
+
+#include <types_ext.h>
+
+unsigned int secure_get_cpu_id(void);
+
+void arm_cl1_d_cleanbysetway(void);
+void arm_cl1_d_invbysetway(void);
+void arm_cl1_d_cleaninvbysetway(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleanbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_invbyva(void *start, void *end);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_d_cleaninvbyva(void *start, void *end);
+void arm_cl1_i_inv_all(void);
+/* End address is included in the range (last address in range)*/
+void arm_cl1_i_inv(void *start, void *end);
+
+void secure_mmu_datatlbinvall(void);
+void secure_mmu_unifiedtlbinvall(void);
+void secure_mmu_unifiedtlbinvbymva(unsigned long addr);
+void secure_mmu_unifiedtlbinv_curasid(void);
+void secure_mmu_unifiedtlbinv_byasid(unsigned long asid);
+
+void secure_mmu_disable(void);
+#endif /*!ASM*/
+
+#ifdef ARM64
+/* D$ set/way op type defines */
+#define DCISW 0x0
+#define DCCISW 0x1
+#define DCCSW 0x2
+
+#ifndef ASM
+void flush_dcache_range(vaddr_t va, size_t len);
+void inv_dcache_range(vaddr_t va, size_t len);
+void dcsw_op_louis(uint32_t op);
+void dcsw_op_all(uint32_t op);
+#endif /*!ASM*/
+#endif /*ARM64*/
+
+#endif
diff --git a/core/arch/arm/include/kernel/tz_ssvce_def.h b/core/arch/arm/include/kernel/tz_ssvce_def.h
new file mode 100644
index 0000000..3e9f9fc
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_def.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TZ_SSVCE_DEF_H
+#define TZ_SSVCE_DEF_H
+
+#include <util.h>
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+#define CPSR_OFFSET 0x00
+#define STACK_INT_USAGE 0x04
+
+/*
+ * tee service IDs (TODO: align with the service ID list).
+ * Set by NSec in R4 before SMC to request a TEE service.
+ */
+#define SSAPI_RET_FROM_INT_SERV 4
+#define SSAPI_RET_FROM_RPC_SERV 5
+
+/*
+ * TEE monitor: status returned by the routine that checks the entry
+ * reason (valid Service ID / secure context).
+ */
+#define SEC_INVALID_ENTRY 0
+#define SEC_PRE_INIT_ENTRY 1
+#define SEC_RET_FROM_INT_ENTRY 2
+#define SEC_RET_FROM_RPC_ENTRY 3
+#define SEC_NORMAL_ENTRY 4
+
+/*
+ * teecore exit reason.
+ * Set by Secure in R4 before SMC to request a switch to NSec.
+ */
+#define SEC_EXIT_NORMAL 1
+#define SEC_EXIT_START_EXT_CODE 2
+#define SEC_EXIT_INT 3
+#define SEC_EXIT_RPC_CALL 4
+#define SEC_EXIT_FIRST 5
+#define SEC_EXIT_DEEP_SLEEP 6
+
+/* misc */
+
+#define SEC_UNDEF_STACK_OFFSET 4
+#define SEC_ABORT_STACK_OFFSET 12
+
+#define SEC_ENTRY_STATUS_NOK 0
+#define SEC_ENTRY_STATUS_OK 1
+
+/*
+ * Outer cache iomem
+ */
+#define PL310_LINE_SIZE 32
+#define PL310_8_WAYS 8
+
+/* reg1 */
+#define PL310_CTRL 0x100
+#define PL310_AUX_CTRL 0x104
+#define PL310_TAG_RAM_CTRL 0x108
+#define PL310_DATA_RAM_CTRL 0x10C
+/* reg7 */
+#define PL310_SYNC 0x730
+#define PL310_INV_BY_WAY 0x77C
+#define PL310_CLEAN_BY_WAY 0x7BC
+#define PL310_FLUSH_BY_WAY 0x7FC
+#define PL310_INV_BY_PA 0x770
+#define PL310_CLEAN_BY_PA 0x7B0
+#define PL310_FLUSH_BY_PA 0x7F0
+#define PL310_FLUSH_BY_INDEXWAY 0x7F8
+/* reg9 */
+#define PL310_DCACHE_LOCKDOWN_BASE 0x900
+#define PL310_ICACHE_LOCKDOWN_BASE 0x904
+/* reg12 */
+#define PL310_ADDR_FILT_START 0xC00
+#define PL310_ADDR_FILT_END 0xC04
+/* reg15 */
+#define PL310_DEBUG_CTRL 0xF40
+#define PL310_PREFETCH_CTRL 0xF60
+#define PL310_POWER_CTRL 0xF80
+
+#define PL310_CTRL_ENABLE_BIT BIT32(0)
+#define PL310_AUX_16WAY_BIT BIT32(16)
+
+/*
+ * SCU iomem
+ */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_POWER 0x08
+#define SCU_INV_SEC 0x0C
+#define SCU_FILT_SA 0x40
+#define SCU_FILT_EA 0x44
+#define SCU_SAC 0x50
+#define SCU_NSAC 0x54
+#define SCU_ERRATA744369 0x30
+
+#define SCU_ACCESS_CONTROL_CPU0 BIT32(0)
+#define SCU_ACCESS_CONTROL_CPU1 BIT32(1)
+#define SCU_ACCESS_CONTROL_CPU2 BIT32(2)
+#define SCU_ACCESS_CONTROL_CPU3 BIT32(3)
+#define SCU_NSAC_SCU_SHIFT 0
+#define SCU_NSAC_PTIMER_SHIFT 4
+#define SCU_NSAC_GTIMER_SHIFT 8
+
+/*
+ * GIC iomem
+ */
+#define GIC_DIST_ISR0 0x080
+#define GIC_DIST_ISR1 0x084
+
+/*
+ * CPU iomem
+ */
+#define CORE_ICC_ICCPMR 0x0004
+
+#endif /* TZ_SSVCE_DEF_H */
diff --git a/core/arch/arm/include/kernel/tz_ssvce_pl310.h b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
new file mode 100644
index 0000000..88b91e1
--- /dev/null
+++ b/core/arch/arm/include/kernel/tz_ssvce_pl310.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_PL310_H
+#define TZ_SSVCE_PL310_H
+
+#include <util.h>
+#include <kernel/tz_ssvce_def.h>
+#include <types_ext.h>
+
+vaddr_t pl310_base(void);
+/*
+ * End address is included in the range (last address in range)
+ */
+void arm_cl2_cleaninvbyway(vaddr_t pl310_base);
+void arm_cl2_invbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbyway(vaddr_t pl310_base);
+void arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+
+#endif /* TZ_SSVCE_PL310_H */
diff --git a/core/arch/arm/include/kernel/unwind.h b/core/arch/arm/include/kernel/unwind.h
new file mode 100644
index 0000000..cc5ff5a
--- /dev/null
+++ b/core/arch/arm/include/kernel/unwind.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef KERNEL_UNWIND
+#define KERNEL_UNWIND
+
+#ifndef ASM
+#include <types_ext.h>
+
+#ifdef ARM32
+/* The state of the unwind process */
+struct unwind_state {
+ uint32_t registers[16];
+ uint32_t start_pc;
+ uint32_t *insn;
+ unsigned entries;
+ unsigned byte;
+ uint16_t update_mask;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct unwind_state {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+};
+#endif /*ARM64*/
+
+bool unwind_stack(struct unwind_state *state);
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+void print_stack(int level);
+#else
+static inline void print_stack(int level __unused)
+{
+}
+#endif
+
+#endif /*ASM*/
+
+#ifdef CFG_CORE_UNWIND
+#define UNWIND(...) __VA_ARGS__
+#else
+#define UNWIND(...)
+#endif
+
+#endif /*KERNEL_UNWIND*/
diff --git a/core/arch/arm/include/kernel/user_ta.h b/core/arch/arm/include/kernel/user_ta.h
new file mode 100644
index 0000000..196c0af
--- /dev/null
+++ b/core/arch/arm/include/kernel/user_ta.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_USER_TA_H
+#define KERNEL_USER_TA_H
+
+#include <assert.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <mm/tee_mm.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+TAILQ_HEAD(tee_cryp_state_head, tee_cryp_state);
+TAILQ_HEAD(tee_obj_head, tee_obj);
+TAILQ_HEAD(tee_storage_enum_head, tee_storage_enum);
+
+struct user_ta_ctx {
+ uaddr_t entry_func;
+ bool is_32bit; /* true if 32-bit ta, false if 64-bit ta */
+ /* list of sessions opened by this TA */
+ struct tee_ta_session_head open_sessions;
+ /* List of cryp states created by this TA */
+ struct tee_cryp_state_head cryp_states;
+ /* List of storage objects opened by this TA */
+ struct tee_obj_head objects;
+ /* List of storage enumerators opened by this TA */
+ struct tee_storage_enum_head storage_enums;
+ struct mobj *mobj_code; /* secure world memory */
+ struct mobj *mobj_stack; /* stack */
+ uint32_t load_addr; /* elf load addr (from TAs address space) */
+ uint32_t context; /* Context ID of the process */
+ struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */
+ void *ta_time_offs; /* Time reference used by the TA */
+ struct tee_pager_area_head *areas;
+#if defined(CFG_SE_API)
+ struct tee_se_service *se_service;
+#endif
+#if defined(CFG_WITH_VFP)
+ struct thread_user_vfp_state vfp;
+#endif
+ struct tee_ta_ctx ctx;
+
+};
+
+static inline bool is_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ return !!(ctx->flags & TA_FLAG_USER_MODE);
+}
+
+static inline struct user_ta_ctx *to_user_ta_ctx(struct tee_ta_ctx *ctx)
+{
+ assert(is_user_ta_ctx(ctx));
+ return container_of(ctx, struct user_ta_ctx, ctx);
+}
+
+#ifdef CFG_WITH_USER_TA
+TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s);
+#else
+static inline TEE_Result tee_ta_init_user_ta_session(
+ const TEE_UUID *uuid __unused,
+ struct tee_ta_session *s __unused)
+{
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+#endif
+
+#endif /*KERNEL_USER_TA_H*/
diff --git a/core/arch/arm/include/kernel/vfp.h b/core/arch/arm/include/kernel/vfp.h
new file mode 100644
index 0000000..267dee2
--- /dev/null
+++ b/core/arch/arm/include/kernel/vfp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_VFP_H
+#define KERNEL_VFP_H
+
+#include <types_ext.h>
+#include <compiler.h>
+
+#ifdef ARM32
+/*
+ * Advanced SIMD/floating point state on ARMv7-A or ARMv8-A AArch32 has:
+ * - 32 64-bit data registers
+ * - FPSCR (32 bits)
+ * - FPEXC (32 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint64_t v;
+};
+
+struct vfp_state {
+ uint32_t fpexc;
+ uint32_t fpscr;
+ struct vfp_reg reg[VFP_NUM_REGS];
+};
+#endif
+
+#ifdef ARM64
+/*
+ * Advanced SIMD/floating point state on ARMv8-A AArch64 has:
+ * - 32 128-bit data registers
+ * - FPSR (32 bits)
+ * - FPCR (32 bits)
+ * - CPACR_EL1.FPEN (2 bits)
+ */
+
+#define VFP_NUM_REGS 32
+
+struct vfp_reg {
+ uint8_t v[16];
+} __aligned(16);
+
+struct vfp_state {
+ struct vfp_reg reg[VFP_NUM_REGS];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ uint32_t cpacr_el1;
+ bool force_save; /* Save to reg even if VFP was not enabled */
+};
+#endif
+
+#ifdef CFG_WITH_VFP
+/* vfp_is_enabled() - Returns true if VFP is enabled */
+bool vfp_is_enabled(void);
+
+/* vfp_enable() - Enables vfp */
+void vfp_enable(void);
+
+/* vfp_disable() - Disables vfp */
+void vfp_disable(void);
+#else
+static inline bool vfp_is_enabled(void)
+{
+ return false;
+}
+
+static inline void vfp_enable(void)
+{
+}
+
+static inline void vfp_disable(void)
+{
+}
+#endif
+
+/*
+ * vfp_lazy_save_state_init() - Saves VFP enable status and disables VFP
+ * @state: VFP state structure to initialize
+ */
+void vfp_lazy_save_state_init(struct vfp_state *state);
+
+/*
+ * vfp_lazy_save_state_final() - Saves rest of VFP state
+ * @state: VFP state to save to
+ *
+ * If VFP was enabled when vfp_lazy_save_state_init() was called: save rest
+ * of state and disable VFP. Otherwise, do nothing.
+ */
+void vfp_lazy_save_state_final(struct vfp_state *state);
+
+/*
+ * vfp_lazy_restore_state() - Lazy restore VFP state
+ * @state: VFP state to restore
+ *
+ * Restores VFP enable status and also restores rest of VFP state if
+ * vfp_lazy_save_state_final() was called on this state.
+ */
+void vfp_lazy_restore_state(struct vfp_state *state, bool full_state);
+
+#endif /*KERNEL_VFP_H*/
diff --git a/core/arch/arm/include/kernel/wait_queue.h b/core/arch/arm/include/kernel/wait_queue.h
new file mode 100644
index 0000000..eb8f881
--- /dev/null
+++ b/core/arch/arm/include/kernel/wait_queue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_WAIT_QUEUE_H
+#define KERNEL_WAIT_QUEUE_H
+
+#include <types_ext.h>
+#include <sys/queue.h>
+
+struct wait_queue_elem;
+SLIST_HEAD(wait_queue, wait_queue_elem);
+
+#define WAIT_QUEUE_INITIALIZER { .slh_first = NULL }
+
+struct condvar;
+struct wait_queue_elem {
+ short handle;
+ bool done;
+ struct condvar *cv;
+ SLIST_ENTRY(wait_queue_elem) link;
+};
+
+/*
+ * Initializes a wait queue
+ */
+void wq_init(struct wait_queue *wq);
+
+/*
+ * Initializes a wait queue element and adds it to the wait queue. This
+ * function is supposed to be called before the lock that protects the
+ * resource we need to wait for is released.
+ *
+ * One call to this function must be followed by one call to wq_wait_final()
+ * on the same wait queue element.
+ */
+void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ struct condvar *cv);
+
+static inline void wq_wait_init(struct wait_queue *wq,
+ struct wait_queue_elem *wqe)
+{
+ wq_wait_init_condvar(wq, wqe, NULL);
+}
+
+/* Waits for the wait queue element to the awakened. */
+void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
+ const void *sync_obj, const char *fname, int lineno);
+
+/* Wakes up the first wait queue element in the wait queue, if there is one */
+void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
+ const char *fname, int lineno);
+
+/* Returns true if the wait queue doesn't contain any elements */
+bool wq_is_empty(struct wait_queue *wq);
+
+void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
+ bool only_one, const void *sync_obj, const char *fname,
+ int lineno);
+bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv);
+
+#endif /*KERNEL_WAIT_QUEUE_H*/
+
diff --git a/core/arch/arm/include/mm/core_memprot.h b/core/arch/arm/include/mm/core_memprot.h
new file mode 100644
index 0000000..b7ccd21
--- /dev/null
+++ b/core/arch/arm/include/mm/core_memprot.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MEMPROT_H
+#define CORE_MEMPROT_H
+
+#include <mm/core_mmu.h>
+#include <types_ext.h>
+
+/*
+ * "pbuf_is" support.
+ *
+ * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped
+ * virtual address or a physical address is "Secure", "Unsecure", "external
+ * RAM" and some other fancy attributes.
+ *
+ * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is
+ * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured
+ * only if 'buf_is(UnSecure, buffer)==true'.
+ */
+
+/* memory atttributes */
+enum buf_is_attr {
+ CORE_MEM_SEC,
+ CORE_MEM_NON_SEC,
+ CORE_MEM_TEE_RAM,
+ CORE_MEM_TA_RAM,
+ CORE_MEM_NSEC_SHM,
+ CORE_MEM_EXTRAM,
+ CORE_MEM_INTRAM,
+ CORE_MEM_CACHED,
+};
+
+/* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
+#define tee_pbuf_is core_pbuf_is
+#define tee_vbuf_is core_vbuf_is
+
+/* Convenience macros */
+#define tee_pbuf_is_non_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_NON_SEC, (paddr_t)(buf), (len))
+
+#define tee_pbuf_is_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_SEC, (paddr_t)(buf), (len))
+
+#define tee_vbuf_is_non_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len))
+
+#define tee_vbuf_is_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len))
+
+/*
+ * This function return true if the buf complies with supplied flags.
+ * If this function returns false buf doesn't comply with supplied flags
+ * or something went wrong.
+ *
+ * Note that returning false doesn't guarantee that buf complies with
+ * the complement of the supplied flags.
+ */
+bool core_pbuf_is(uint32_t flags, paddr_t pbuf, size_t len);
+
+/*
+ * Translates the supplied virtual address to a physical address and uses
+ * tee_phys_buf_is() to check the compliance of the buffer.
+ */
+bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
+
+/*
+ * Translate physical address to virtual address using specified mapping
+ * Returns NULL on failure or a valid virtual address on success.
+ */
+void *phys_to_virt(paddr_t pa, enum teecore_memtypes m);
+
+/*
+ * Translate virtual address to physical address
+ * Returns 0 on failure or a valid physical address on success.
+ */
+paddr_t virt_to_phys(void *va);
+
+#endif /* CORE_MEMPROT_H */
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
new file mode 100644
index 0000000..03ad93d
--- /dev/null
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MMU_H
+#define CORE_MMU_H
+
+#include <compiler.h>
+#include <kernel/user_ta.h>
+#include <mm/tee_mmu_types.h>
+#include <types_ext.h>
+
+/* A small page is the smallest unit of memory that can be mapped */
+#define SMALL_PAGE_SHIFT 12
+#define SMALL_PAGE_MASK 0x00000fff
+#define SMALL_PAGE_SIZE 0x00001000
+
+/*
+ * PGDIR is the translation table above the translation table that holds
+ * the pages.
+ */
+#ifdef CFG_WITH_LPAE
+#define CORE_MMU_PGDIR_SHIFT 21
+#else
+#define CORE_MMU_PGDIR_SHIFT 20
+#endif
+#define CORE_MMU_PGDIR_SIZE (1 << CORE_MMU_PGDIR_SHIFT)
+#define CORE_MMU_PGDIR_MASK (CORE_MMU_PGDIR_SIZE - 1)
+
+/* Devices are mapped using this granularity */
+#define CORE_MMU_DEVICE_SHIFT CORE_MMU_PGDIR_SHIFT
+#define CORE_MMU_DEVICE_SIZE (1 << CORE_MMU_DEVICE_SHIFT)
+#define CORE_MMU_DEVICE_MASK (CORE_MMU_DEVICE_SIZE - 1)
+
+/* TA user space code, data, stack and heap are mapped using this granularity */
+#ifdef CFG_SMALL_PAGE_USER_TA
+#define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
+#else
+#define CORE_MMU_USER_CODE_SHIFT CORE_MMU_PGDIR_SHIFT
+#endif
+#define CORE_MMU_USER_CODE_SIZE (1 << CORE_MMU_USER_CODE_SHIFT)
+#define CORE_MMU_USER_CODE_MASK (CORE_MMU_USER_CODE_SIZE - 1)
+
+/* TA user space parameters are mapped using this granularity */
+#ifdef CFG_SMALL_PAGE_USER_TA
+#define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
+#else
+#define CORE_MMU_USER_PARAM_SHIFT CORE_MMU_PGDIR_SHIFT
+#endif
+#define CORE_MMU_USER_PARAM_SIZE (1 << CORE_MMU_USER_PARAM_SHIFT)
+#define CORE_MMU_USER_PARAM_MASK (CORE_MMU_USER_PARAM_SIZE - 1)
+
+/*
+ * Memory area type:
+ * MEM_AREA_NOTYPE: Undefined type. Used as end of table.
+ * MEM_AREA_TEE_RAM: teecore execution RAM (secure, reserved to TEE, unused)
+ * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
+ * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances.
+ * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
+ * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
+ * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
+ * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
+ * MEM_AREA_IO_SEC: Secure HW mapped registers
+ * MEM_AREA_RES_VASPACE: Reserved virtual memory space
+ * MEM_AREA_TA_VASPACE: TA va space, only used with phys_to_virt()
+ * MEM_AREA_MAXTYPE: lower invalid 'type' value
+ */
+enum teecore_memtypes {
+ MEM_AREA_NOTYPE = 0,
+ MEM_AREA_TEE_RAM,
+ MEM_AREA_TEE_COHERENT,
+ MEM_AREA_TA_RAM,
+ MEM_AREA_NSEC_SHM,
+ MEM_AREA_RAM_NSEC,
+ MEM_AREA_RAM_SEC,
+ MEM_AREA_IO_NSEC,
+ MEM_AREA_IO_SEC,
+ MEM_AREA_RES_VASPACE,
+ MEM_AREA_TA_VASPACE,
+ MEM_AREA_MAXTYPE
+};
+
+struct core_mmu_phys_mem {
+ const char *name;
+ enum teecore_memtypes type;
+ paddr_t addr;
+ size_t size;
+};
+
+#define register_phys_mem(type, addr, size) \
+ static const struct core_mmu_phys_mem __phys_mem_ ## addr \
+ __used __section("phys_mem_map_section") = \
+ { #addr, (type), (addr), (size) }
+
+
+/* Default NSec shared memory allocated from NSec world */
+extern unsigned long default_nsec_shm_paddr;
+extern unsigned long default_nsec_shm_size;
+
+void core_init_mmu_map(void);
+void core_init_mmu_regs(void);
+
+bool core_mmu_place_tee_ram_at_top(paddr_t paddr);
+
+#ifdef CFG_WITH_LPAE
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @user_map: physical address of user map translation table
+ * @asid: ASID for the user map
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint64_t user_map;
+ uint32_t asid;
+};
+#else
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @ttbr0: content of ttbr0
+ * @ctxid: content of contextidr
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint32_t ttbr0;
+ uint32_t ctxid;
+};
+#endif
+
+#ifdef CFG_WITH_LPAE
+bool core_mmu_user_va_range_is_defined(void);
+#else
+static inline bool core_mmu_user_va_range_is_defined(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * core_mmu_get_user_va_range() - Return range of user va space
+ * @base: Lowest user virtual address
+ * @size: Size in bytes of user address space
+ */
+void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
+
+/*
+ * enum core_mmu_fault - different kinds of faults
+ * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
+ * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
+ * @CORE_MMU_FAULT_TRANSLATION: translation fault
+ * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
+ * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
+ * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
+ * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
+ * @CORE_MMU_FAULT_OTHER: Other/unknown fault
+ */
+enum core_mmu_fault {
+ CORE_MMU_FAULT_ALIGNMENT,
+ CORE_MMU_FAULT_DEBUG_EVENT,
+ CORE_MMU_FAULT_TRANSLATION,
+ CORE_MMU_FAULT_WRITE_PERMISSION,
+ CORE_MMU_FAULT_READ_PERMISSION,
+ CORE_MMU_FAULT_ASYNC_EXTERNAL,
+ CORE_MMU_FAULT_ACCESS_BIT,
+ CORE_MMU_FAULT_OTHER,
+};
+
+/*
+ * core_mmu_get_fault_type() - get fault type
+ * @fault_descr: Content of fault status or exception syndrome register
+ * @returns an enum describing the content of fault status register.
+ */
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
+
+/*
+ * core_mm_type_to_attr() - convert memory type to attribute
+ * @t: memory type
+ * @returns an attribute that can be passed to core_mm_set_entry() and friends
+ */
+uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
+
+/*
+ * core_mmu_create_user_map() - Create user space mapping
+ * @utc: Pointer to user TA context
+ * @map: MMU configuration to use when activating this VA space
+ */
+void core_mmu_create_user_map(struct user_ta_ctx *utc,
+ struct core_mmu_user_map *map);
+/*
+ * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
+ * @map: MMU configuration for current user VA space.
+ */
+void core_mmu_get_user_map(struct core_mmu_user_map *map);
+
+/*
+ * core_mmu_set_user_map() - Set new MMU configuration for user VA space
+ * @map: If NULL will disable user VA space, if not NULL the user
+ * VA space to activate.
+ */
+void core_mmu_set_user_map(struct core_mmu_user_map *map);
+
+/*
+ * struct core_mmu_table_info - Properties for a translation table
+ * @table: Pointer to translation table
+ * @va_base: VA base address of the transaltion table
+ * @level: Translation table level
+ * @shift: The shift of each entry in the table
+ * @num_entries: Number of entries in this table.
+ */
+struct core_mmu_table_info {
+ void *table;
+ vaddr_t va_base;
+ unsigned level;
+ unsigned shift;
+ unsigned num_entries;
+};
+
+/*
+ * core_mmu_find_table() - Locates a translation table
+ * @va: Virtual address for the table to cover
+ * @max_level: Don't traverse beyond this level
+ * @tbl_info: Pointer to where to store properties.
+ * @return true if a translation table was found, false on error
+ */
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info);
+
+/*
+ * core_mmu_divide_block() - divide larger block/section into smaller ones
+ * @tbl_info: table where target record located
+ * @idx: index of record
+ * @return true if function was able to divide block, false on error
+ */
+bool core_mmu_divide_block(struct core_mmu_table_info *tbl_info,
+ unsigned int idx);
+
+void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
+ paddr_t pa, uint32_t attr);
+
+void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
+
+/*
+ * core_mmu_set_entry() - Set entry in translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to update
+ * @pa: Physical address to assign entry
+ * @attr: Attributes to assign entry
+ */
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr);
+
+void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
+ paddr_t *pa, uint32_t *attr);
+
+/*
+ * core_mmu_get_entry() - Get entry from translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to read
+ * @pa: Physical address is returned here if pa is not NULL
+ * @attr: Attributues are returned here if attr is not NULL
+ */
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr);
+
+/*
+ * core_mmu_va2idx() - Translate from virtual address to table index
+ * @tbl_info: Translation table properties
+ * @va: Virtual address to translate
+ * @returns index in transaltion table
+ */
+static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
+ vaddr_t va)
+{
+ return (va - tbl_info->va_base) >> tbl_info->shift;
+}
+
+/*
+ * core_mmu_idx2va() - Translate from table index to virtual address
+ * @tbl_info: Translation table properties
+ * @idx: Index to translate
+ * @returns Virtual address
+ */
+static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
+ unsigned idx)
+{
+ return (idx << tbl_info->shift) + tbl_info->va_base;
+}
+
+/*
+ * core_mmu_get_block_offset() - Get offset inside a block/page
+ * @tbl_info: Translation table properties
+ * @pa: Physical address
+ * @returns offset within one block of the translation table
+ */
+static inline size_t core_mmu_get_block_offset(
+ struct core_mmu_table_info *tbl_info, paddr_t pa)
+{
+ return pa & ((1 << tbl_info->shift) - 1);
+}
+
+/*
+ * core_mmu_user_mapping_is_active() - Report if user mapping is active
+ * @returns true if a user VA space is active, false if user VA space is
+ * inactive.
+ */
+bool core_mmu_user_mapping_is_active(void);
+
+/*
+ * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
+ * @returns true if the attributes can be used, false if not.
+ */
+bool core_mmu_mattr_is_ok(uint32_t mattr);
+
+void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
+ vaddr_t *e);
+
+enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
+
+/* Function is deprecated, use virt_to_phys() instead */
+int core_va2pa_helper(void *va, paddr_t *pa);
+
+/* routines to retreive shared mem configuration */
+bool core_mmu_is_shm_cached(void);
+
+bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
+
+/* L1/L2 cache maintenance (op: refer to ???) */
+unsigned int cache_maintenance_l1(int op, void *va, size_t len);
+#ifdef CFG_PL310
+unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len);
+#else
+static inline unsigned int cache_maintenance_l2(int op __unused,
+ paddr_t pa __unused,
+ size_t len __unused)
+{
+ /* Nothing to do about L2 Cache Maintenance when no PL310 */
+ return TEE_SUCCESS;
+}
+#endif
+
+/* various invalidate secure TLB */
+enum teecore_tlb_op {
+ TLBINV_UNIFIEDTLB, /* invalidate unified tlb */
+ TLBINV_CURRENT_ASID, /* invalidate unified tlb for current ASID */
+ TLBINV_BY_ASID, /* invalidate unified tlb by ASID */
+ TLBINV_BY_MVA, /* invalidate unified tlb by MVA */
+};
+
+int core_tlb_maintenance(int op, unsigned int a);
+
+/* Cache maintenance operation type */
+typedef enum {
+ DCACHE_CLEAN = 0x1,
+ DCACHE_AREA_CLEAN = 0x2,
+ DCACHE_INVALIDATE = 0x3,
+ DCACHE_AREA_INVALIDATE = 0x4,
+ ICACHE_INVALIDATE = 0x5,
+ ICACHE_AREA_INVALIDATE = 0x6,
+ WRITE_BUFFER_DRAIN = 0x7,
+ DCACHE_CLEAN_INV = 0x8,
+ DCACHE_AREA_CLEAN_INV = 0x9,
+ L2CACHE_INVALIDATE = 0xA,
+ L2CACHE_AREA_INVALIDATE = 0xB,
+ L2CACHE_CLEAN = 0xC,
+ L2CACHE_AREA_CLEAN = 0xD,
+ L2CACHE_CLEAN_INV = 0xE,
+ L2CACHE_AREA_CLEAN_INV = 0xF
+} t_cache_operation_id;
+
+/* Check cpu mmu enabled or not */
+bool cpu_mmu_enabled(void);
+
+#endif /* CORE_MMU_H */
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
new file mode 100644
index 0000000..d5eeb69
--- /dev/null
+++ b/core/arch/arm/include/mm/mobj.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MM_MOBJ_H
+#define __MM_MOBJ_H
+
+#include <compiler.h>
+#include <mm/core_memprot.h>
+#include <optee_msg.h>
+#include <sys/queue.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+
+
+struct mobj {
+ const struct mobj_ops *ops;
+ size_t size;
+};
+
+struct mobj_ops {
+ void *(*get_va)(struct mobj *mobj, size_t offs);
+ TEE_Result (*get_pa)(struct mobj *mobj, size_t offs, size_t granule,
+ paddr_t *pa);
+ TEE_Result (*get_cattr)(struct mobj *mobj, uint32_t *cattr);
+ bool (*matches)(struct mobj *mobj, enum buf_is_attr attr);
+ void (*free)(struct mobj *mobj);
+ void (*update_mapping)(struct mobj *mobj, struct user_ta_ctx *utc,
+ vaddr_t va);
+};
+
+extern struct mobj mobj_virt;
+extern struct mobj *mobj_sec_ddr;
+
+static inline void *mobj_get_va(struct mobj *mobj, size_t offset)
+{
+ if (mobj && mobj->ops && mobj->ops->get_va)
+ return mobj->ops->get_va(mobj, offset);
+ return NULL;
+}
+
+static inline TEE_Result mobj_get_pa(struct mobj *mobj, size_t offs,
+ size_t granule, paddr_t *pa)
+{
+ if (mobj && mobj->ops && mobj->ops->get_pa)
+ return mobj->ops->get_pa(mobj, offs, granule, pa);
+ return TEE_ERROR_GENERIC;
+}
+
+static inline TEE_Result mobj_get_cattr(struct mobj *mobj, uint32_t *cattr)
+{
+ if (mobj && mobj->ops && mobj->ops->get_cattr)
+ return mobj->ops->get_cattr(mobj, cattr);
+ return TEE_ERROR_GENERIC;
+}
+
+static inline bool mobj_matches(struct mobj *mobj, enum buf_is_attr attr)
+{
+ if (mobj && mobj->ops && mobj->ops->matches)
+ return mobj->ops->matches(mobj, attr);
+ return false;
+}
+
+static inline void mobj_free(struct mobj *mobj)
+{
+ if (mobj && mobj->ops && mobj->ops->free)
+ mobj->ops->free(mobj);
+}
+
+
+static inline void mobj_update_mapping(struct mobj *mobj,
+ struct user_ta_ctx *utc, vaddr_t va)
+{
+ if (mobj && mobj->ops && mobj->ops->update_mapping)
+ mobj->ops->update_mapping(mobj, utc, va);
+}
+
+static inline bool mobj_is_nonsec(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_NON_SEC);
+}
+
+static inline bool mobj_is_secure(struct mobj *mobj)
+{
+ return mobj_matches(mobj, CORE_MEM_SEC);
+}
+
+struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
+ tee_mm_pool_t *pool);
+
+struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
+ enum buf_is_attr battr);
+
+struct mobj *mobj_paged_alloc(size_t size);
+
+#ifdef CFG_PAGED_USER_TA
+bool mobj_is_paged(struct mobj *mobj);
+#else
+static inline bool mobj_is_paged(struct mobj *mobj __unused)
+{
+ return false;
+}
+#endif
+
+struct mobj *mobj_seccpy_shm_alloc(size_t size);
+
+#endif /*__MM_MOBJ_H*/
diff --git a/core/arch/arm/include/mm/pgt_cache.h b/core/arch/arm/include/mm/pgt_cache.h
new file mode 100644
index 0000000..8812758
--- /dev/null
+++ b/core/arch/arm/include/mm/pgt_cache.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MM_PGT_CACHE_H
+#define MM_PGT_CACHE_H
+
+#ifdef CFG_WITH_LPAE
+#define PGT_SIZE (4 * 1024)
+#define PGT_NUM_PGT_PER_PAGE 1
+#else
+#define PGT_SIZE (1 * 1024)
+#define PGT_NUM_PGT_PER_PAGE 4
+#endif
+
+#include <kernel/tee_ta_manager.h>
+#include <sys/queue.h>
+#include <types_ext.h>
+#include <util.h>
+
+struct pgt {
+ void *tbl;
+#if defined(CFG_PAGED_USER_TA)
+ vaddr_t vabase;
+ struct tee_ta_ctx *ctx;
+ size_t num_used_entries;
+#endif
+#if defined(CFG_WITH_PAGER)
+#if !defined(CFG_WITH_LPAE)
+ struct pgt_parent *parent;
+#endif
+#endif
+#ifdef CFG_SMALL_PAGE_USER_TA
+ SLIST_ENTRY(pgt) link;
+#endif
+};
+
+#ifdef CFG_SMALL_PAGE_USER_TA
+/*
+ * Reserve 2 page tables per thread, but at least 4 page tables in total
+ */
+#if CFG_NUM_THREADS < 2
+#define PGT_CACHE_SIZE 4
+#else
+#define PGT_CACHE_SIZE ROUNDUP(CFG_NUM_THREADS * 2, PGT_NUM_PGT_PER_PAGE)
+#endif
+
+SLIST_HEAD(pgt_cache, pgt);
+
+static inline bool pgt_check_avail(size_t num_tbls)
+{
+ return num_tbls <= PGT_CACHE_SIZE;
+}
+
+void pgt_alloc(struct pgt_cache *pgt_cache, void *owning_ctx,
+ vaddr_t begin, vaddr_t last);
+void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx);
+
+#ifdef CFG_PAGED_USER_TA
+void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, void *ctx,
+ vaddr_t begin, vaddr_t last);
+#else
+static inline void pgt_flush_ctx_range(struct pgt_cache *pgt_cache __unused,
+ void *ctx __unused,
+ vaddr_t begin __unused,
+ vaddr_t last __unused)
+{
+}
+#endif
+
+void pgt_transfer(struct pgt_cache *pgt_cache, void *old_ctx, vaddr_t old_va,
+ void *new_ctx, vaddr_t new_va, size_t size);
+
+void pgt_init(void);
+
+#else
+
+static inline void pgt_init(void)
+{
+}
+
+#endif
+
+#if defined(CFG_PAGED_USER_TA)
+void pgt_flush_ctx(struct tee_ta_ctx *ctx);
+
+static inline void pgt_inc_used_entries(struct pgt *pgt)
+{
+ pgt->num_used_entries++;
+}
+
+static inline void pgt_dec_used_entries(struct pgt *pgt)
+{
+ pgt->num_used_entries--;
+}
+
+static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
+{
+ pgt->num_used_entries = val;
+}
+
+#else
+static inline void pgt_flush_ctx(struct tee_ta_ctx *ctx __unused)
+{
+}
+
+static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
+{
+}
+
+static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
+{
+}
+
+static inline void pgt_set_used_entries(struct pgt *pgt __unused,
+ size_t val __unused)
+{
+}
+
+#endif
+
+#endif /*MM_PGT_CACHE_H*/
diff --git a/core/arch/arm/include/mm/tee_pager.h b/core/arch/arm/include/mm/tee_pager.h
new file mode 100644
index 0000000..d48577a
--- /dev/null
+++ b/core/arch/arm/include/mm/tee_pager.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MM_TEE_PAGER_H
+#define MM_TEE_PAGER_H
+
+#include <kernel/abort.h>
+#include <kernel/panic.h>
+#include <kernel/user_ta.h>
+#include <mm/tee_mm.h>
+#include <string.h>
+#include <trace.h>
+
+/*
+ * Reference to translation table used to map the virtual memory range
+ * covered by the pager.
+ */
+extern struct core_mmu_table_info tee_pager_tbl_info;
+
+struct tee_pager_area_head;
+
+/*
+ * tee_pager_init() - Initialized the pager
+ * @mm_alias: The alias area where all physical pages managed by the
+ * pager are aliased
+ *
+ * Panics if called twice or some other error occurs.
+ */
+void tee_pager_init(tee_mm_entry_t *mm_alias);
+
+/*
+ * tee_pager_add_core_area() - Adds a pageable core area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ * @flags: describes attributes of mapping
+ * @store: backing store for the memory area
+ * @hashes: hashes of the pages in the backing store
+ *
+ * TEE_MATTR_PW - read-write mapping else read-only mapping
+ * TEE_MATTR_PX - executable mapping
+ * TEE_MATTR_LOCKED - on demand locked mapping, requires TEE_MATTR_PW,
+ * will only be unmapped by a call to
+ * tee_pager_release_phys()
+ *
+ * !TEE_MATTR_PW requires store and hashes to be !NULL while
+ * TEE_MATTR_PW requires store and hashes to be NULL.
+ *
+ * Invalid use of flags or non-page aligned base or size or size == 0 will
+ * cause a panic.
+ *
+ * Return true on success or false if area can't be added.
+ */
+bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
+ const void *store, const void *hashes);
+
+/*
+ * tee_pager_add_uta_area() - Adds a pageable user ta area
+ * @utc: user ta context of the area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ *
+ * The mapping is created suitable to initialize the memory content while
+ * loading the TA. Once the TA is properly loaded the areas should be
+ * finalized with tee_pager_set_uta_area_attr() to get more strict settings.
+ *
+ * Return true on success of false if the area can't be added
+ */
+bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size);
+
+/*
+ * tee_pager_set_uta_area_attr() - Set attributes of a initialized memory area
+ * @utc: user ta context of the area
+ * @base: base of covered memory area
+ * @size: size of covered memory area
+ * @flags: TEE_MATTR_U* flags describing permissions of the area
+ *
+ * Return true on success of false if the area can't be updated
+ */
+bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size, uint32_t flags);
+
+void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
+ vaddr_t src_base,
+ struct user_ta_ctx *dst_utc,
+ vaddr_t dst_base, struct pgt **dst_pgt,
+ size_t size);
+void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
+ size_t size);
+
+/*
+ * tee_pager_rem_uta_areas() - Remove all user ta areas
+ * @utc: user ta context
+ *
+ * This function is called when a user ta context is teared down.
+ */
+#ifdef CFG_PAGED_USER_TA
+void tee_pager_rem_uta_areas(struct user_ta_ctx *utc);
+#else
+static inline void tee_pager_rem_uta_areas(struct user_ta_ctx *utc __unused)
+{
+}
+#endif
+
+/*
+ * tee_pager_assign_uta_tables() - Assigns translation table to a user ta
+ * @utc: user ta context
+ *
+ * This function is called to assign translation tables for the pageable
+ * areas of a user TA.
+ */
+#ifdef CFG_PAGED_USER_TA
+void tee_pager_assign_uta_tables(struct user_ta_ctx *utc);
+#else
+static inline void tee_pager_assign_uta_tables(struct user_ta_ctx *utc __unused)
+{
+}
+#endif
+
+/*
+ * Adds physical pages to the pager to use. The supplied virtual address range
+ * is searched for mapped physical pages and unmapped pages are ignored.
+ *
+ * vaddr is the first virtual address
+ * npages is the number of pages to add
+ */
+void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap);
+
+/*
+ * tee_pager_alloc() - Allocate read-write virtual memory from pager.
+ * @size: size of memory in bytes
+ * @flags: flags for allocation
+ *
+ * Allocates read-write memory from pager, all flags but the optional
+ * TEE_MATTR_LOCKED is ignored.
+ *
+ * @return NULL on failure or a pointer to the virtual memory on success.
+ */
+void *tee_pager_alloc(size_t size, uint32_t flags);
+
+#ifdef CFG_PAGED_USER_TA
+/*
+ * tee_pager_pgt_save_and_release_entries() - Save dirty pages to backing store
+ * and remove physical page from translation table
+ * @pgt: page table descriptor
+ *
+ * This function is called when a translation table needs to be recycled
+ */
+void tee_pager_pgt_save_and_release_entries(struct pgt *pgt);
+#endif
+
+/*
+ * tee_pager_release_phys() - Release physical pages used for mapping
+ * @addr: virtual address of first page to release
+ * @size: number of bytes to release
+ *
+ * Only pages completely covered by the supplied range are affected. This
+ * function only supplies a hint to the pager that the physical page can be
+ * reused. The caller can't expect a released memory range to hold a
+ * specific bit pattern when used next time.
+ *
+ * Note that the virtual memory allocation is still valid after this
+ * function has returned, it's just the content that may or may not have
+ * changed.
+ */
+#ifdef CFG_WITH_PAGER
+void tee_pager_release_phys(void *addr, size_t size);
+#else
+static inline void tee_pager_release_phys(void *addr __unused,
+ size_t size __unused)
+{
+}
+#endif
+
+/*
+ * Statistics on the pager
+ */
+struct tee_pager_stats {
+ size_t hidden_hits;
+ size_t ro_hits;
+ size_t rw_hits;
+ size_t zi_released;
+ size_t npages; /* number of load pages */
+ size_t npages_all; /* number of pages */
+};
+
+#ifdef CFG_WITH_PAGER
+void tee_pager_get_stats(struct tee_pager_stats *stats);
+bool tee_pager_handle_fault(struct abort_info *ai);
+#else /*CFG_WITH_PAGER*/
+static inline bool tee_pager_handle_fault(struct abort_info *ai __unused)
+{
+ return false;
+}
+
+static inline void tee_pager_get_stats(struct tee_pager_stats *stats)
+{
+ memset(stats, 0, sizeof(struct tee_pager_stats));
+}
+#endif /*CFG_WITH_PAGER*/
+
+#endif /*MM_TEE_PAGER_H*/
diff --git a/core/arch/arm/include/sm/optee_smc.h b/core/arch/arm/include/sm/optee_smc.h
new file mode 100644
index 0000000..b6fcd65
--- /dev/null
+++ b/core/arch/arm/include/sm/optee_smc.h
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+/*
+ * This file is exported by OP-TEE and is in kept in sync between secure
+ * world and normal world kernel driver. We're following ARM SMC Calling
+ * Convention as specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ *
+ * This file depends on optee_msg.h being included to expand the SMC id
+ * macros below.
+ */
+
+#define OPTEE_SMC_32 0
+#define OPTEE_SMC_64 0x40000000
+#define OPTEE_SMC_FAST_CALL 0x80000000
+#define OPTEE_SMC_STD_CALL 0
+
+#define OPTEE_SMC_OWNER_MASK 0x3F
+#define OPTEE_SMC_OWNER_SHIFT 24
+
+#define OPTEE_SMC_FUNC_MASK 0xFFFF
+
+#define OPTEE_SMC_IS_FAST_CALL(smc_val) ((smc_val) & OPTEE_SMC_FAST_CALL)
+#define OPTEE_SMC_IS_64(smc_val) ((smc_val) & OPTEE_SMC_64)
+#define OPTEE_SMC_FUNC_NUM(smc_val) ((smc_val) & OPTEE_SMC_FUNC_MASK)
+#define OPTEE_SMC_OWNER_NUM(smc_val) \
+ (((smc_val) >> OPTEE_SMC_OWNER_SHIFT) & OPTEE_SMC_OWNER_MASK)
+
+#define OPTEE_SMC_CALL_VAL(type, calling_convention, owner, func_num) \
+ ((type) | (calling_convention) | \
+ (((owner) & OPTEE_SMC_OWNER_MASK) << \
+ OPTEE_SMC_OWNER_SHIFT) |\
+ ((func_num) & OPTEE_SMC_FUNC_MASK))
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_STD_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
+
+#define OPTEE_SMC_OWNER_ARCH 0
+#define OPTEE_SMC_OWNER_CPU 1
+#define OPTEE_SMC_OWNER_SIP 2
+#define OPTEE_SMC_OWNER_OEM 3
+#define OPTEE_SMC_OWNER_STANDARD 4
+#define OPTEE_SMC_OWNER_TRUSTED_APP 48
+#define OPTEE_SMC_OWNER_TRUSTED_OS 50
+
+#define OPTEE_SMC_OWNER_TRUSTED_OS_OPTEED 62
+#define OPTEE_SMC_OWNER_TRUSTED_OS_API 63
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return the following UID if using API specified in this file
+ * without further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * see also OPTEE_MSG_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extensions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_API, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+/*
+ * Configures L2CC mutex
+ *
+ * Disables, enables usage of L2CC mutex. Returns or sets physical address
+ * of L2CC mutex.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_L2CC_MUTEX
+ * a1 OPTEE_SMC_L2CC_MUTEX_GET_ADDR Get physical address of mutex
+ * OPTEE_SMC_L2CC_MUTEX_SET_ADDR Set physical address of mutex
+ * OPTEE_SMC_L2CC_MUTEX_ENABLE Enable usage of mutex
+ * OPTEE_SMC_L2CC_MUTEX_DISABLE Disable usage of mutex
+ * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, upper 32bit of a 64bit
+ * physical address of mutex
+ * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, lower 32bit of a 64bit
+ * physical address of mutex
+ * a3-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Preserved
+ * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, upper 32bit of a 64bit
+ * physical address of mutex
+ * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, lower 32bit of a 64bit
+ * physical address of mutex
+ * a3-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL Physical address not available
+ * OPTEE_SMC_RETURN_EBADADDR Bad supplied physical address
+ * OPTEE_SMC_RETURN_EBADCMD Unsupported value in a1
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_L2CC_MUTEX_GET_ADDR 0
+#define OPTEE_SMC_L2CC_MUTEX_SET_ADDR 1
+#define OPTEE_SMC_L2CC_MUTEX_ENABLE 2
+#define OPTEE_SMC_L2CC_MUTEX_DISABLE 3
+#define OPTEE_SMC_FUNCID_L2CC_MUTEX 8
+#define OPTEE_SMC_L2CC_MUTEX \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_L2CC_MUTEX)
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR (1 << 0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM (1 << 0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM (1 << 1)
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Release of secondary cores
+ *
+ * OP-TEE in secure world is in charge of the release process of secondary
+ * cores. The Rich OS issue the this request to ask OP-TEE to boot up the
+ * secondary cores, go through the OP-TEE per-core initialization, and then
+ * switch to the Non-seCure world with the Rich OS provided entry address.
+ * The secondary cores enter Non-Secure world in SVC mode, with Thumb, FIQ,
+ * IRQ and Abort bits disabled.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_BOOT_SECONDARY
+ * a1 Index of secondary core to boot
+ * a2 Upper 32 bits of a 64-bit Non-Secure world entry physical address
+ * a3 Lower 32 bits of a 64-bit Non-Secure world entry physical address
+ * a4-7 Not used
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Error return:
+ * a0 OPTEE_SMC_RETURN_EBADCMD Core index out of range
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_BOOT_SECONDARY 12
+#define OPTEE_SMC_BOOT_SECONDARY \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_IRQ
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_IRQ 4
+#define OPTEE_SMC_RETURN_RPC_IRQ \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) \
+ (((ret) != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION) && \
+ ((((ret) & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == \
+ OPTEE_SMC_RETURN_RPC_PREFIX)))
+
+#endif /* OPTEE_SMC_H */
diff --git a/core/arch/arm/include/sm/psci.h b/core/arch/arm/include/sm/psci.h
new file mode 100644
index 0000000..57d8f07
--- /dev/null
+++ b/core/arch/arm/include/sm/psci.h
@@ -0,0 +1,60 @@
+#include <kernel/thread.h>
+#include <stdint.h>
+
+#define PSCI_FN_BASE (0x84000000U)
+#define PSCI_FN(n) (PSCI_FN_BASE + (n))
+
+#define PSCI_VERSION_0_2 (0x00000002)
+#define PSCI_VERSION_1_0 (0x00010000)
+#define PSCI_VERSION PSCI_FN(0)
+#define PSCI_CPU_SUSPEND PSCI_FN(1)
+#define PSCI_CPU_OFF PSCI_FN(2)
+#define PSCI_CPU_ON PSCI_FN(3)
+#define PSCI_AFFINITY_INFO PSCI_FN(4)
+#define PSCI_MIGRATE PSCI_FN(5)
+#define PSCI_MIGRATE_INFO_TYPE PSCI_FN(6)
+#define PSCI_MIGRATE_INFO_UP_CPU PSCI_FN(7)
+#define PSCI_SYSTEM_OFF PSCI_FN(8)
+#define PSCI_SYSTEM_RESET PSCI_FN(9)
+#define PSCI_PSCI_FEATURES PSCI_FN(10)
+#define PSCI_CPU_FREEZE PSCI_FN(11)
+#define PSCI_CPU_DEFAULT_SUSPEND PSCI_FN(12)
+#define PSCI_NODE_HW_STATE PSCI_FN(13)
+#define PSCI_SYSTEM_SUSPEND PSCI_FN(14)
+#define PSCI_PSCI_SET_SUSPEND_MODE PSCI_FN(15)
+#define PSCI_FN_STAT_RESIDENCY PSCI_FN(16)
+#define PSCI_FN_STAT_COUNT PSCI_FN(17)
+
+#define PSCI_NUM_CALLS 18
+
+#define PSCI_AFFINITY_LEVEL_ON 0
+#define PSCI_AFFINITY_LEVEL_OFF 1
+#define PSCI_AFFINITY_LEVEL_ON_PENDING 2
+
+#define PSCI_RET_SUCCESS (0)
+#define PSCI_RET_NOT_SUPPORTED (-1)
+#define PSCI_RET_INVALID_PARAMETERS (-2)
+#define PSCI_RET_DENIED (-3)
+#define PSCI_RET_ALREADY_ON (-4)
+#define PSCI_RET_ON_PENDING (-5)
+#define PSCI_RET_INTERNAL_FAILURE (-6)
+#define PSCI_RET_NOT_PRESENT (-7)
+#define PSCI_RET_DISABLED (-8)
+#define PSCI_RET_INVALID_ADDRESS (-9)
+
+uint32_t psci_version(void);
+int psci_cpu_suspend(uint32_t power_state, uintptr_t entry,
+ uint32_t context_id);
+int psci_cpu_off(void);
+int psci_cpu_on(uint32_t cpu_id, uint32_t entry, uint32_t context_id);
+int psci_affinity_info(uint32_t affinity, uint32_t lowest_affnity_level);
+int psci_migrate(uint32_t cpu_id);
+int psci_migrate_info_type(void);
+int psci_migrate_info_up_cpu(void);
+void psci_system_off(void);
+void psci_system_reset(void);
+int psci_features(uint32_t psci_fid);
+int psci_node_hw_state(uint32_t cpu_id, uint32_t power_level);
+int psci_stat_residency(uint32_t cpu_id, uint32_t power_state);
+int psci_stat_count(uint32_t cpu_id, uint32_t power_state);
+void tee_psci_handler(struct thread_smc_args *args);
diff --git a/core/arch/arm/include/sm/sm.h b/core/arch/arm/include/sm/sm.h
new file mode 100644
index 0000000..6368359
--- /dev/null
+++ b/core/arch/arm/include/sm/sm.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SM_SM_H
+#define SM_SM_H
+
+#include <types_ext.h>
+
+struct sm_mode_regs {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t irq_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+ uint32_t fiq_spsr;
+ uint32_t fiq_sp;
+ uint32_t fiq_lr;
+ /*
+ * Note that fiq_r{8-12} are not saved here. Instead thread_fiq_handler
+ * preserves r{8-12}.
+ */
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t abt_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t und_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+};
+
+struct sm_nsec_ctx {
+ struct sm_mode_regs mode_regs;
+
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+
+ /* return state */
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+struct sm_sec_ctx {
+ struct sm_mode_regs mode_regs;
+
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+
+ /* return state */
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+struct sm_ctx {
+ uint32_t pad;
+ struct sm_sec_ctx sec;
+ struct sm_nsec_ctx nsec;
+};
+
+/*
+ * The secure monitor reserves space at top of stack_tmp to hold struct
+ * sm_ctx.
+ */
+#define SM_STACK_TMP_RESERVE_SIZE sizeof(struct sm_ctx)
+
+
+
+/* Returns storage location of non-secure context for current CPU */
+struct sm_nsec_ctx *sm_get_nsec_ctx(void);
+
+/* Returns stack pointer to use in monitor mode for current CPU */
+void *sm_get_sp(void);
+
+/*
+ * Initializes secure monitor, must be called by each CPU
+ */
+void sm_init(vaddr_t stack_pointer);
+
+#endif /*SM_SM_H*/
diff --git a/core/arch/arm/include/sm/std_smc.h b/core/arch/arm/include/sm/std_smc.h
new file mode 100644
index 0000000..2b2e54d
--- /dev/null
+++ b/core/arch/arm/include/sm/std_smc.h
@@ -0,0 +1,22 @@
+#ifndef __STD_SMC_H__
+#define __STD_SMC_H__
+
+/* SMC function IDs for Standard Service queries */
+
+#define ARM_STD_SVC_CALL_COUNT 0x8400ff00
+#define ARM_STD_SVC_UID 0x8400ff01
+/* 0x8400ff02 is reserved */
+#define ARM_STD_SVC_VERSION 0x8400ff03
+
+/* ARM Standard Service Calls version numbers */
+#define STD_SVC_VERSION_MAJOR 0x0
+#define STD_SVC_VERSION_MINOR 0x1
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK 0xffe0u
+#define PSCI_FID_VALUE 0u
+#define is_psci_fid(_fid) \
+ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+void smc_std_handler(struct thread_smc_args *args);
+#endif
diff --git a/core/arch/arm/include/sm/tee_mon.h b/core/arch/arm/include/sm/tee_mon.h
new file mode 100644
index 0000000..725afb9
--- /dev/null
+++ b/core/arch/arm/include/sm/tee_mon.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_MON_H
+#define TEE_MON_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include "tee_api_types.h"
+#include "user_ta_header.h"
+
+extern TEE_Result init_teecore(void);
+
+#endif /* TEE_MON_H */
diff --git a/core/arch/arm/include/sm/teesmc_opteed.h b/core/arch/arm/include/sm/teesmc_opteed.h
new file mode 100644
index 0000000..c6e25e2
--- /dev/null
+++ b/core/arch/arm/include/sm/teesmc_opteed.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEESMC_OPTEED_H
+#define TEESMC_OPTEED_H
+
+/*
+ * This file specify SMC function IDs used when returning from TEE to the
+ * secure monitor.
+ *
+ * All SMC Function IDs indicates SMC32 Calling Convention but will carry
+ * full 64 bit values in the argument registers if invoked from Aarch64
+ * mode. This violates the SMC Calling Convention, but since this
+ * convention only coveres API towards Normwal World it's something that
+ * only concerns the OP-TEE Dispatcher in ARM Trusted Firmware and OP-TEE
+ * OS at Secure EL1.
+ */
+
+/*
+ * Issued when returning from initial entry.
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ENTRY_DONE
+ * r1/x1 Pointer to entry vector
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE 0
+#define TEESMC_OPTEED_RETURN_ENTRY_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE)
+
+
+
+/*
+ * Issued when returning from "cpu_on" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ON_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ON_DONE 1
+#define TEESMC_OPTEED_RETURN_ON_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ON_DONE)
+
+/*
+ * Issued when returning from "cpu_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_OFF_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE 2
+#define TEESMC_OPTEED_RETURN_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE)
+
+/*
+ * Issued when returning from "cpu_suspend" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE 3
+#define TEESMC_OPTEED_RETURN_SUSPEND_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE)
+
+/*
+ * Issued when returning from "cpu_resume" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_RESUME_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE 4
+#define TEESMC_OPTEED_RETURN_RESUME_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE)
+
+/*
+ * Issued when returning from "std_smc" or "fast_smc" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_CALL_DONE
+ * r1-4/x1-4 Return value 0-3 which will passed to normal world in
+ * r0-3/x0-3
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE 5
+#define TEESMC_OPTEED_RETURN_CALL_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE)
+
+/*
+ * Issued when returning from "fiq" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_FIQ_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE 6
+#define TEESMC_OPTEED_RETURN_FIQ_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE)
+
+/*
+ * Issued when returning from "system_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE 7
+#define TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE)
+
+/*
+ * Issued when returning from "system_reset" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE 8
+#define TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE)
+
+#endif /*TEESMC_OPTEED_H*/
diff --git a/core/arch/arm/include/sm/teesmc_opteed_macros.h b/core/arch/arm/include/sm/teesmc_opteed_macros.h
new file mode 100644
index 0000000..00e9eed
--- /dev/null
+++ b/core/arch/arm/include/sm/teesmc_opteed_macros.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEESMC_OPTEED_MACROS_H
+#define TEESMC_OPTEED_MACROS_H
+
+#define TEESMC_OPTEED_RV(func_num) \
+ OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
+ OPTEE_SMC_OWNER_TRUSTED_OS_OPTEED, (func_num))
+
+#endif /*TEESMC_OPTEED_MACROS_H*/
diff --git a/core/arch/arm/include/tee/arch_svc.h b/core/arch/arm/include/tee/arch_svc.h
new file mode 100644
index 0000000..1848865
--- /dev/null
+++ b/core/arch/arm/include/tee/arch_svc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_ARCH_SVC_H
+#define TEE_ARCH_SVC_H
+
+struct thread_svc_regs;
+
+void tee_svc_handler(struct thread_svc_regs *regs);
+
+/*
+ * Called from the assembly functions syscall_sys_return() and
+ * syscall_panic() to update the register values in the struct
+ * thread_svc_regs to return back to TEE Core from an erlier call to
+ * thread_enter_user_mode().
+ */
+uint32_t tee_svc_sys_return_helper(uint32_t ret, bool panic,
+ uint32_t panic_code, struct thread_svc_regs *regs);
+
+#endif /*TEE_ARCH_SVC_H*/
diff --git a/core/arch/arm/include/tee/entry_fast.h b/core/arch/arm/include/tee/entry_fast.h
new file mode 100644
index 0000000..a9951f2
--- /dev/null
+++ b/core/arch/arm/include/tee/entry_fast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_ENTRY_FAST_H
+#define TEE_ENTRY_FAST_H
+
+#include <kernel/thread.h>
+
+/* These functions are overridable by the specific target */
+void tee_entry_get_api_call_count(struct thread_smc_args *args);
+void tee_entry_get_api_uuid(struct thread_smc_args *args);
+void tee_entry_get_api_revision(struct thread_smc_args *args);
+void tee_entry_get_os_uuid(struct thread_smc_args *args);
+void tee_entry_get_os_revision(struct thread_smc_args *args);
+
+/*
+ * Returns the number of calls recognized by tee_entry(). Used by the
+ * specific target to calculate the total number of supported calls when
+ * overriding tee_entry_get_api_call_count().
+ */
+size_t tee_entry_generic_get_api_call_count(void);
+
+/* Fast call entry */
+void tee_entry_fast(struct thread_smc_args *args);
+
+#endif /* TEE_ENTRY_FAST_H */
diff --git a/core/arch/arm/include/tee/entry_std.h b/core/arch/arm/include/tee/entry_std.h
new file mode 100644
index 0000000..d545912
--- /dev/null
+++ b/core/arch/arm/include/tee/entry_std.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_ENTRY_STD_H
+#define TEE_ENTRY_STD_H
+
+#include <kernel/thread.h>
+
+/* Standard call entry */
+void tee_entry_std(struct thread_smc_args *args);
+
+#endif /* TEE_ENTRY_STD_H */
diff --git a/core/arch/arm/kernel/abort.c b/core/arch/arm/kernel/abort.c
new file mode 100644
index 0000000..3d29521
--- /dev/null
+++ b/core/arch/arm/kernel/abort.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/abort.h>
+#include <kernel/misc.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/panic.h>
+#include <kernel/user_ta.h>
+#include <kernel/unwind.h>
+#include <mm/core_mmu.h>
+#include <mm/tee_pager.h>
+#include <tee/tee_svc.h>
+#include <trace.h>
+#include <arm.h>
+
+enum fault_type {
+ FAULT_TYPE_USER_TA_PANIC,
+ FAULT_TYPE_USER_TA_VFP,
+ FAULT_TYPE_PAGEABLE,
+ FAULT_TYPE_IGNORE,
+};
+
+#ifdef CFG_CORE_UNWIND
+#ifdef ARM32
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.registers[0] = ai->regs->r0;
+ state.registers[1] = ai->regs->r1;
+ state.registers[2] = ai->regs->r2;
+ state.registers[3] = ai->regs->r3;
+ state.registers[4] = ai->regs->r4;
+ state.registers[5] = ai->regs->r5;
+ state.registers[6] = ai->regs->r6;
+ state.registers[7] = ai->regs->r7;
+ state.registers[8] = ai->regs->r8;
+ state.registers[9] = ai->regs->r9;
+ state.registers[10] = ai->regs->r10;
+ state.registers[11] = ai->regs->r11;
+ state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[15] = ai->pc;
+
+ do {
+ EMSG_RAW(" pc 0x%08x", state.registers[15]);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = ai->regs->elr;
+ state.fp = ai->regs->x29;
+
+ do {
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM64*/
+
+static void print_stack_unwind(struct abort_info *ai)
+{
+ EMSG_RAW("Call stack:");
+ __print_stack_unwind(ai);
+}
+#else /*CFG_CORE_UNWIND*/
+static void print_stack_unwind(struct abort_info *ai __unused)
+{
+}
+#endif /*CFG_CORE_UNWIND*/
+
+static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
+{
+ if (abort_type == ABORT_TYPE_DATA)
+ return "data";
+ if (abort_type == ABORT_TYPE_PREFETCH)
+ return "prefetch";
+ return "undef";
+}
+
+static __maybe_unused const char *fault_to_str(uint32_t abort_type,
+ uint32_t fault_descr)
+{
+ /* fault_descr is only valid for data or prefetch abort */
+ if (abort_type != ABORT_TYPE_DATA && abort_type != ABORT_TYPE_PREFETCH)
+ return "";
+
+ switch (core_mmu_get_fault_type(fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ return " (alignment fault)";
+ case CORE_MMU_FAULT_TRANSLATION:
+ return " (translation fault)";
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return " (read permission fault)";
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ return " (write permission fault)";
+ default:
+ return "";
+ }
+}
+
+static __maybe_unused void print_detailed_abort(
+ struct abort_info *ai __maybe_unused,
+ const char *ctx __maybe_unused)
+{
+ EMSG_RAW("\n");
+ EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s\n",
+ ctx, abort_type_to_str(ai->abort_type), ai->va,
+ fault_to_str(ai->abort_type, ai->fault_descr));
+#ifdef ARM32
+ EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0(), read_ttbr1(),
+ read_contextidr());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), ai->regs->spsr);
+ EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n",
+ ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
+ EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n",
+ ai->regs->r1, ai->regs->r5, ai->regs->r9,
+ read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n",
+ ai->regs->r2, ai->regs->r6, ai->regs->r10,
+ read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
+ EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n",
+ ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 " cidr 0x%X\n",
+ ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
+ read_contextidr_el1());
+ EMSG_RAW(" cpu #%zu cpsr 0x%08x\n",
+ get_core_pos(), (uint32_t)ai->regs->spsr);
+ EMSG_RAW("x0 %016" PRIx64 " x1 %016" PRIx64,
+ ai->regs->x0, ai->regs->x1);
+ EMSG_RAW("x2 %016" PRIx64 " x3 %016" PRIx64,
+ ai->regs->x2, ai->regs->x3);
+ EMSG_RAW("x4 %016" PRIx64 " x5 %016" PRIx64,
+ ai->regs->x4, ai->regs->x5);
+ EMSG_RAW("x6 %016" PRIx64 " x7 %016" PRIx64,
+ ai->regs->x6, ai->regs->x7);
+ EMSG_RAW("x8 %016" PRIx64 " x9 %016" PRIx64,
+ ai->regs->x8, ai->regs->x9);
+ EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
+ ai->regs->x10, ai->regs->x11);
+ EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
+ ai->regs->x12, ai->regs->x13);
+ EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
+ ai->regs->x14, ai->regs->x15);
+ EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
+ ai->regs->x16, ai->regs->x17);
+ EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
+ ai->regs->x18, ai->regs->x19);
+ EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
+ ai->regs->x20, ai->regs->x21);
+ EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
+ ai->regs->x22, ai->regs->x23);
+ EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
+ ai->regs->x24, ai->regs->x25);
+ EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
+ ai->regs->x26, ai->regs->x27);
+ EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
+ ai->regs->x28, ai->regs->x29);
+ EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
+ ai->regs->x30, ai->regs->elr);
+ EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
+#endif /*ARM64*/
+}
+
+static void print_user_abort(struct abort_info *ai __maybe_unused)
+{
+#ifdef CFG_TEE_CORE_TA_TRACE
+ print_detailed_abort(ai, "user TA");
+ tee_ta_dump_current();
+#endif
+}
+
+void abort_print(struct abort_info *ai __maybe_unused)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ print_detailed_abort(ai, "core");
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+}
+
+void abort_print_error(struct abort_info *ai)
+{
+#if (TRACE_LEVEL >= TRACE_INFO)
+ /* full verbose log at DEBUG level */
+ print_detailed_abort(ai, "core");
+#else
+#ifdef ARM32
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
+ "CPUID 0x%x CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
+ read_mpidr(), read_spsr());
+#endif /*ARM32*/
+#ifdef ARM64
+ EMSG("%s-abort at 0x%" PRIxVA "\n"
+ "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
+ "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
+ read_contextidr_el1(),
+ read_mpidr_el1(), (uint32_t)ai->regs->spsr);
+#endif /*ARM64*/
+#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+ print_stack_unwind(ai);
+}
+
+#ifdef ARM32
+static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
+ struct abort_info *ai)
+{
+ switch (abort_type) {
+ case ABORT_TYPE_DATA:
+ ai->fault_descr = read_dfsr();
+ ai->va = read_dfar();
+ break;
+ case ABORT_TYPE_PREFETCH:
+ ai->fault_descr = read_ifsr();
+ ai->va = read_ifar();
+ break;
+ default:
+ ai->fault_descr = 0;
+ ai->va = regs->elr;
+ break;
+ }
+ ai->abort_type = abort_type;
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_abort_info(uint32_t abort_type __unused,
+ struct thread_abort_regs *regs, struct abort_info *ai)
+{
+ ai->fault_descr = read_esr_el1();
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_IABT_EL0:
+ case ESR_EC_IABT_EL1:
+ ai->abort_type = ABORT_TYPE_PREFETCH;
+ ai->va = read_far_el1();
+ break;
+ case ESR_EC_DABT_EL0:
+ case ESR_EC_DABT_EL1:
+ case ESR_EC_SP_ALIGN:
+ ai->abort_type = ABORT_TYPE_DATA;
+ ai->va = read_far_el1();
+ break;
+ default:
+ ai->abort_type = ABORT_TYPE_UNDEF;
+ ai->va = regs->elr;
+ }
+ ai->pc = regs->elr;
+ ai->regs = regs;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->r1 = true;
+ ai->regs->r2 = 0xdeadbeef;
+ ai->regs->elr = (uint32_t)thread_unwind_user_mode;
+ ai->regs->spsr = read_cpsr();
+ ai->regs->spsr &= ~CPSR_MODE_MASK;
+ ai->regs->spsr |= CPSR_MODE_SVC;
+ ai->regs->spsr &= ~CPSR_FIA;
+ ai->regs->spsr |= read_spsr() & CPSR_FIA;
+ /* Select Thumb or ARM mode */
+ if (ai->regs->elr & 1)
+ ai->regs->spsr |= CPSR_T;
+ else
+ ai->regs->spsr &= ~CPSR_T;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void handle_user_ta_panic(struct abort_info *ai)
+{
+ uint32_t daif;
+
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
+ ai->regs->x1 = true;
+ ai->regs->x2 = 0xdeadbeef;
+ ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
+ ai->regs->sp_el0 = thread_get_saved_thread_sp();
+
+ daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
+ /* XXX what about DAIF_D? */
+ ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
+}
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+static void handle_user_ta_vfp(void)
+{
+ struct tee_ta_session *s;
+
+ if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
+ panic();
+
+ thread_user_enable_vfp(&to_user_ta_ctx(s->ctx)->vfp);
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef CFG_WITH_USER_TA
+#ifdef ARM32
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from user mode */
+bool abort_is_user_exception(struct abort_info *ai)
+{
+ uint32_t spsr = ai->regs->spsr;
+
+ if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_USER_TA*/
+bool abort_is_user_exception(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_USER_TA*/
+
+#ifdef ARM32
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai)
+{
+ return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+/* Returns true if the exception originated from abort mode */
+static bool is_abort_in_abort_handler(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*ARM64*/
+
+
+#if defined(CFG_WITH_VFP) && defined(CFG_WITH_USER_TA)
+#ifdef ARM32
+
+#define T32_INSTR(w1, w0) \
+ ((((uint32_t)(w0) & 0xffff) << 16) | ((uint32_t)(w1) & 0xffff))
+
+#define T32_VTRANS32_MASK T32_INSTR(0xff << 8, (7 << 9) | 1 << 4)
+#define T32_VTRANS32_VAL T32_INSTR(0xee << 8, (5 << 9) | 1 << 4)
+
+#define T32_VTRANS64_MASK T32_INSTR((0xff << 8) | (7 << 5), 7 << 9)
+#define T32_VTRANS64_VAL T32_INSTR((0xec << 8) | (2 << 5), 5 << 9)
+
+#define T32_VLDST_MASK T32_INSTR((0xff << 8) | (1 << 4), 0)
+#define T32_VLDST_VAL T32_INSTR( 0xf9 << 8 , 0)
+
+#define T32_VXLDST_MASK T32_INSTR(0xfc << 8, 7 << 9)
+#define T32_VXLDST_VAL T32_INSTR(0xec << 8, 5 << 9)
+
+#define T32_VPROC_MASK T32_INSTR(0xef << 8, 0)
+#define T32_VPROC_VAL T32_VPROC_MASK
+
+#define A32_INSTR(x) ((uint32_t)(x))
+
+#define A32_VTRANS32_MASK A32_INSTR(SHIFT_U32(0xf, 24) | \
+ SHIFT_U32(7, 9) | BIT32(4))
+#define A32_VTRANS32_VAL A32_INSTR(SHIFT_U32(0xe, 24) | \
+ SHIFT_U32(5, 9) | BIT32(4))
+
+#define A32_VTRANS64_MASK A32_INSTR(SHIFT_U32(0x7f, 21) | SHIFT_U32(7, 9))
+#define A32_VTRANS64_VAL A32_INSTR(SHIFT_U32(0x62, 21) | SHIFT_U32(5, 9))
+
+#define A32_VLDST_MASK A32_INSTR(SHIFT_U32(0xff, 24) | BIT32(20))
+#define A32_VLDST_VAL A32_INSTR(SHIFT_U32(0xf4, 24))
+#define A32_VXLDST_MASK A32_INSTR(SHIFT_U32(7, 25) | SHIFT_U32(7, 9))
+#define A32_VXLDST_VAL A32_INSTR(SHIFT_U32(6, 25) | SHIFT_U32(5, 9))
+
+#define A32_VPROC_MASK A32_INSTR(SHIFT_U32(0x7f, 25))
+#define A32_VPROC_VAL A32_INSTR(SHIFT_U32(0x79, 25))
+
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ TEE_Result res;
+ uint32_t instr;
+
+ if ((ai->abort_type != ABORT_TYPE_UNDEF) || vfp_is_enabled())
+ return false;
+
+ res = tee_svc_copy_from_user(&instr, (void *)ai->pc, sizeof(instr));
+ if (res != TEE_SUCCESS)
+ return false;
+
+ if (ai->regs->spsr & CPSR_T) {
+ /* Thumb mode */
+ return ((instr & T32_VTRANS32_MASK) == T32_VTRANS32_VAL) ||
+ ((instr & T32_VTRANS64_MASK) == T32_VTRANS64_VAL) ||
+ ((instr & T32_VLDST_MASK) == T32_VLDST_VAL) ||
+ ((instr & T32_VXLDST_MASK) == T32_VXLDST_VAL) ||
+ ((instr & T32_VPROC_MASK) == T32_VPROC_VAL);
+ } else {
+ /* ARM mode */
+ return ((instr & A32_VTRANS32_MASK) == A32_VTRANS32_VAL) ||
+ ((instr & A32_VTRANS64_MASK) == A32_VTRANS64_VAL) ||
+ ((instr & A32_VLDST_MASK) == A32_VLDST_VAL) ||
+ ((instr & A32_VXLDST_MASK) == A32_VXLDST_VAL) ||
+ ((instr & A32_VPROC_MASK) == A32_VPROC_VAL);
+ }
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static bool is_vfp_fault(struct abort_info *ai)
+{
+ switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
+ case ESR_EC_FP_ASIMD:
+ case ESR_EC_AARCH32_FP:
+ case ESR_EC_AARCH64_FP:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /*ARM64*/
+#else /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+static bool is_vfp_fault(struct abort_info *ai __unused)
+{
+ return false;
+}
+#endif /*CFG_WITH_VFP && CFG_WITH_USER_TA*/
+
+static enum fault_type get_fault_type(struct abort_info *ai)
+{
+ if (abort_is_user_exception(ai)) {
+ if (is_vfp_fault(ai))
+ return FAULT_TYPE_USER_TA_VFP;
+#ifndef CFG_WITH_PAGER
+ return FAULT_TYPE_USER_TA_PANIC;
+#endif
+ }
+
+ if (is_abort_in_abort_handler(ai)) {
+ abort_print_error(ai);
+ panic("[abort] abort in abort handler (trap CPU)");
+ }
+
+ if (ai->abort_type == ABORT_TYPE_UNDEF) {
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] undefined abort (trap CPU)");
+ }
+
+ switch (core_mmu_get_fault_type(ai->fault_descr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] alignement fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_ACCESS_BIT:
+ if (abort_is_user_exception(ai))
+ return FAULT_TYPE_USER_TA_PANIC;
+ abort_print_error(ai);
+ panic("[abort] access bit fault! (trap CPU)");
+ break;
+
+ case CORE_MMU_FAULT_DEBUG_EVENT:
+ abort_print(ai);
+ DMSG("[abort] Ignoring debug event!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_WRITE_PERMISSION:
+ case CORE_MMU_FAULT_READ_PERMISSION:
+ return FAULT_TYPE_PAGEABLE;
+
+ case CORE_MMU_FAULT_ASYNC_EXTERNAL:
+ abort_print(ai);
+ DMSG("[abort] Ignoring async external abort!");
+ return FAULT_TYPE_IGNORE;
+
+ case CORE_MMU_FAULT_OTHER:
+ default:
+ abort_print(ai);
+ DMSG("[abort] Unhandled fault!");
+ return FAULT_TYPE_IGNORE;
+ }
+}
+
+void abort_handler(uint32_t abort_type, struct thread_abort_regs *regs)
+{
+ struct abort_info ai;
+ bool handled;
+
+ set_abort_info(abort_type, regs, &ai);
+
+ switch (get_fault_type(&ai)) {
+ case FAULT_TYPE_IGNORE:
+ break;
+ case FAULT_TYPE_USER_TA_PANIC:
+ DMSG("[abort] abort in User mode (TA will panic)");
+ print_user_abort(&ai);
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ break;
+#ifdef CFG_WITH_VFP
+ case FAULT_TYPE_USER_TA_VFP:
+ handle_user_ta_vfp();
+ break;
+#endif
+ case FAULT_TYPE_PAGEABLE:
+ default:
+ thread_kernel_save_vfp();
+ handled = tee_pager_handle_fault(&ai);
+ thread_kernel_restore_vfp();
+ if (!handled) {
+ if (!abort_is_user_exception(&ai)) {
+ abort_print_error(&ai);
+ panic("unhandled pageable abort");
+ }
+ print_user_abort(&ai);
+ DMSG("[abort] abort in User mode (TA will panic)");
+ vfp_disable();
+ handle_user_ta_panic(&ai);
+ }
+ break;
+ }
+}
diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c
new file mode 100644
index 0000000..99c0a63
--- /dev/null
+++ b/core/arch/arm/kernel/asm-defines.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/thread.h>
+#include <sm/sm.h>
+#include <types_ext.h>
+#include "thread_private.h"
+
+#define DEFINES void __defines(void); void __defines(void)
+
+#define DEFINE(def, val) \
+ asm volatile("\n==>" #def " %0 " #val : : "i" (val))
+
+DEFINES
+{
+#ifdef ARM32
+ DEFINE(SM_NSEC_CTX_R0, offsetof(struct sm_nsec_ctx, r0));
+ DEFINE(SM_NSEC_CTX_R8, offsetof(struct sm_nsec_ctx, r8));
+ DEFINE(SM_SEC_CTX_R0, offsetof(struct sm_sec_ctx, r0));
+ DEFINE(SM_SEC_CTX_MON_LR, offsetof(struct sm_sec_ctx, mon_lr));
+ DEFINE(SM_CTX_SIZE, sizeof(struct sm_ctx));
+ DEFINE(SM_CTX_NSEC, offsetof(struct sm_ctx, nsec));
+ DEFINE(SM_CTX_SEC, offsetof(struct sm_ctx, sec));
+
+ DEFINE(THREAD_VECTOR_TABLE_FIQ_ENTRY,
+ offsetof(struct thread_vector_table, fiq_entry));
+
+ DEFINE(THREAD_SVC_REG_R0, offsetof(struct thread_svc_regs, r0));
+ DEFINE(THREAD_SVC_REG_R5, offsetof(struct thread_svc_regs, r5));
+ DEFINE(THREAD_SVC_REG_R6, offsetof(struct thread_svc_regs, r6));
+#endif /*ARM32*/
+
+#ifdef ARM64
+ DEFINE(THREAD_SMC_ARGS_X0, offsetof(struct thread_smc_args, a0));
+ DEFINE(THREAD_SMC_ARGS_SIZE, sizeof(struct thread_smc_args));
+
+ DEFINE(THREAD_SVC_REG_X0, offsetof(struct thread_svc_regs, x0));
+ DEFINE(THREAD_SVC_REG_X5, offsetof(struct thread_svc_regs, x5));
+ DEFINE(THREAD_SVC_REG_X6, offsetof(struct thread_svc_regs, x6));
+ DEFINE(THREAD_SVC_REG_X30, offsetof(struct thread_svc_regs, x30));
+ DEFINE(THREAD_SVC_REG_ELR, offsetof(struct thread_svc_regs, elr));
+ DEFINE(THREAD_SVC_REG_SPSR, offsetof(struct thread_svc_regs, spsr));
+ DEFINE(THREAD_SVC_REG_SP_EL0, offsetof(struct thread_svc_regs, sp_el0));
+ DEFINE(THREAD_SVC_REG_SIZE, sizeof(struct thread_svc_regs));
+
+ /* struct thread_abort_regs */
+ DEFINE(THREAD_ABT_REG_X0, offsetof(struct thread_abort_regs, x0));
+ DEFINE(THREAD_ABT_REG_X2, offsetof(struct thread_abort_regs, x2));
+ DEFINE(THREAD_ABT_REG_X30, offsetof(struct thread_abort_regs, x30));
+ DEFINE(THREAD_ABT_REG_SPSR, offsetof(struct thread_abort_regs, spsr));
+ DEFINE(THREAD_ABT_REGS_SIZE, sizeof(struct thread_abort_regs));
+
+ /* struct thread_ctx */
+ DEFINE(THREAD_CTX_KERN_SP, offsetof(struct thread_ctx, kern_sp));
+ DEFINE(THREAD_CTX_SIZE, sizeof(struct thread_ctx));
+
+ /* struct thread_ctx_regs */
+ DEFINE(THREAD_CTX_REGS_SP, offsetof(struct thread_ctx_regs, sp));
+ DEFINE(THREAD_CTX_REGS_X0, offsetof(struct thread_ctx_regs, x[0]));
+ DEFINE(THREAD_CTX_REGS_X1, offsetof(struct thread_ctx_regs, x[1]));
+ DEFINE(THREAD_CTX_REGS_X4, offsetof(struct thread_ctx_regs, x[4]));
+ DEFINE(THREAD_CTX_REGS_X19, offsetof(struct thread_ctx_regs, x[19]));
+
+ /* struct thread_user_mode_rec */
+ DEFINE(THREAD_USER_MODE_REC_EXIT_STATUS0_PTR,
+ offsetof(struct thread_user_mode_rec, exit_status0_ptr));
+ DEFINE(THREAD_USER_MODE_REC_X19,
+ offsetof(struct thread_user_mode_rec, x[0]));
+ DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));
+
+ /* struct thread_core_local */
+ DEFINE(THREAD_CORE_LOCAL_TMP_STACK_VA_END,
+ offsetof(struct thread_core_local, tmp_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_CURR_THREAD,
+ offsetof(struct thread_core_local, curr_thread));
+ DEFINE(THREAD_CORE_LOCAL_FLAGS,
+ offsetof(struct thread_core_local, flags));
+ DEFINE(THREAD_CORE_LOCAL_ABT_STACK_VA_END,
+ offsetof(struct thread_core_local, abt_stack_va_end));
+ DEFINE(THREAD_CORE_LOCAL_X0, offsetof(struct thread_core_local, x[0]));
+ DEFINE(THREAD_CORE_LOCAL_X2, offsetof(struct thread_core_local, x[2]));
+#endif /*ARM64*/
+}
diff --git a/core/arch/arm/kernel/cache_helpers_a64.S b/core/arch/arm/kernel/cache_helpers_a64.S
new file mode 100644
index 0000000..d3a0248
--- /dev/null
+++ b/core/arch/arm/kernel/cache_helpers_a64.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm64.h>
+#include <asm.S>
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC flush_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+flush_loop:
+ dc civac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo flush_loop
+ dsb sy
+ ret
+END_FUNC flush_dcache_range
+
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+FUNC inv_dcache_range , :
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+inv_loop:
+ dc ivac, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo inv_loop
+ dsb sy
+ ret
+END_FUNC inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ b do_dcsw_op
+ .endm
+
+LOCAL_FUNC do_dcsw_op , :
+ cbz x3, exit
+ mov x10, xzr
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache op
+ isb
+exit:
+ ret
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+END_FUNC do_dcsw_op
+
+
+FUNC dcsw_op_louis , :
+ dcsw_op #CLIDR_LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_louis
+
+
+FUNC dcsw_op_all , :
+ dcsw_op #CLIDR_LOC_SHIFT, #CLIDR_FIELD_WIDTH, #CSSELR_LEVEL_SHIFT
+END_FUNC dcsw_op_all
diff --git a/core/arch/arm/kernel/elf32.h b/core/arch/arm/kernel/elf32.h
new file mode 100644
index 0000000..d374208
--- /dev/null
+++ b/core/arch/arm/kernel/elf32.h
@@ -0,0 +1,245 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF32_H_
+#define _SYS_ELF32_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 32-bit architectures.
+ */
+
+typedef uint32_t Elf32_Addr;
+typedef uint16_t Elf32_Half;
+typedef uint32_t Elf32_Off;
+typedef int32_t Elf32_Sword;
+typedef uint32_t Elf32_Word;
+typedef uint64_t Elf32_Lword;
+
+typedef Elf32_Word Elf32_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf32_Word Elf32_Size;
+typedef Elf32_Sword Elf32_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf32_Half e_type; /* File type. */
+ Elf32_Half e_machine; /* Machine architecture. */
+ Elf32_Word e_version; /* ELF format version. */
+ Elf32_Addr e_entry; /* Entry point. */
+ Elf32_Off e_phoff; /* Program header file offset. */
+ Elf32_Off e_shoff; /* Section header file offset. */
+ Elf32_Word e_flags; /* Architecture-specific flags. */
+ Elf32_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf32_Half e_phentsize; /* Size of program header entry. */
+ Elf32_Half e_phnum; /* Number of program header entries. */
+ Elf32_Half e_shentsize; /* Size of section header entry. */
+ Elf32_Half e_shnum; /* Number of section header entries. */
+ Elf32_Half e_shstrndx; /* Section name strings section. */
+} Elf32_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf32_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf32_Word sh_type; /* Section type. */
+ Elf32_Word sh_flags; /* Section flags. */
+ Elf32_Addr sh_addr; /* Address in memory image. */
+ Elf32_Off sh_offset; /* Offset in file. */
+ Elf32_Word sh_size; /* Size in bytes. */
+ Elf32_Word sh_link; /* Index of a related section. */
+ Elf32_Word sh_info; /* Depends on section type. */
+ Elf32_Word sh_addralign; /* Alignment in bytes. */
+ Elf32_Word sh_entsize; /* Size of each entry in section. */
+} Elf32_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf32_Word p_type; /* Entry type. */
+ Elf32_Off p_offset; /* File offset of contents. */
+ Elf32_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf32_Addr p_paddr; /* Physical address (not used). */
+ Elf32_Word p_filesz; /* Size of contents in file. */
+ Elf32_Word p_memsz; /* Size of contents in memory. */
+ Elf32_Word p_flags; /* Access permission flags. */
+ Elf32_Word p_align; /* Alignment in memory and file. */
+} Elf32_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf32_Sword d_tag; /* Entry type. */
+ union {
+ Elf32_Word d_val; /* Integer value. */
+ Elf32_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf32_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+} Elf32_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf32_Addr r_offset; /* Location to be relocated. */
+ Elf32_Word r_info; /* Relocation type and symbol index. */
+ Elf32_Sword r_addend; /* Addend. */
+} Elf32_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF32_R_SYM(info) ((info) >> 8)
+#define ELF32_R_TYPE(info) ((unsigned char)(info))
+
+/* Macro for constructing r_info from field values. */
+#define ELF32_R_INFO(sym, type) (((sym) << 8) + (unsigned char)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf32_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf32_Lword m_value; /* symbol value */
+ Elf32_Word m_info; /* size + index */
+ Elf32_Word m_poffset; /* symbol offset */
+ Elf32_Half m_repeat; /* repeat count */
+ Elf32_Half m_stride; /* stride info */
+} Elf32_Move;
+
+/*
+ * The macros compose and decompose values for Move.r_info
+ *
+ * sym = ELF32_M_SYM(M.m_info)
+ * size = ELF32_M_SIZE(M.m_info)
+ * M.m_info = ELF32_M_INFO(sym, size)
+ */
+#define ELF32_M_SYM(info) ((info)>>8)
+#define ELF32_M_SIZE(info) ((unsigned char)(info))
+#define ELF32_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf32_Word c_tag; /* how to interpret value */
+ union {
+ Elf32_Word c_val;
+ Elf32_Addr c_ptr;
+ } c_un;
+} Elf32_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf32_Word st_name; /* String table index of name. */
+ Elf32_Addr st_value; /* Symbol value. */
+ Elf32_Word st_size; /* Size of associated object. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf32_Half st_shndx; /* Section index of symbol. */
+} Elf32_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF32_ST_BIND(info) ((info) >> 4)
+#define ELF32_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF32_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU symbol versioning. */
+typedef struct
+{
+ Elf32_Half vd_version;
+ Elf32_Half vd_flags;
+ Elf32_Half vd_ndx;
+ Elf32_Half vd_cnt;
+ Elf32_Word vd_hash;
+ Elf32_Word vd_aux;
+ Elf32_Word vd_next;
+} Elf32_Verdef;
+
+typedef struct
+{
+ Elf32_Word vda_name;
+ Elf32_Word vda_next;
+} Elf32_Verdaux;
+
+typedef struct
+{
+ Elf32_Half vn_version;
+ Elf32_Half vn_cnt;
+ Elf32_Word vn_file;
+ Elf32_Word vn_aux;
+ Elf32_Word vn_next;
+} Elf32_Verneed;
+
+typedef struct
+{
+ Elf32_Word vna_hash;
+ Elf32_Half vna_flags;
+ Elf32_Half vna_other;
+ Elf32_Word vna_name;
+ Elf32_Word vna_next;
+} Elf32_Vernaux;
+
+typedef Elf32_Half Elf32_Versym;
+
+typedef struct {
+ Elf32_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf32_Half si_flags; /* per symbol flags */
+} Elf32_Syminfo;
+
+#endif /* !_SYS_ELF32_H_ */
diff --git a/core/arch/arm/kernel/elf64.h b/core/arch/arm/kernel/elf64.h
new file mode 100644
index 0000000..c468dcd
--- /dev/null
+++ b/core/arch/arm/kernel/elf64.h
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 1996-1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF64_H_
+#define _SYS_ELF64_H_ 1
+
+#include "elf_common.h"
+
+/*
+ * ELF definitions common to all 64-bit architectures.
+ */
+
+typedef uint64_t Elf64_Addr;
+typedef uint16_t Elf64_Half;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Sword;
+typedef int64_t Elf64_Sxword;
+typedef uint32_t Elf64_Word;
+typedef uint64_t Elf64_Lword;
+typedef uint64_t Elf64_Xword;
+
+/*
+ * Types of dynamic symbol hash table bucket and chain elements.
+ *
+ * This is inconsistent among 64 bit architectures, so a machine dependent
+ * typedef is required.
+ */
+
+typedef Elf64_Word Elf64_Hashelt;
+
+/* Non-standard class-dependent datatype used for abstraction. */
+typedef Elf64_Xword Elf64_Size;
+typedef Elf64_Sxword Elf64_Ssize;
+
+/*
+ * ELF header.
+ */
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* File identification. */
+ Elf64_Half e_type; /* File type. */
+ Elf64_Half e_machine; /* Machine architecture. */
+ Elf64_Word e_version; /* ELF format version. */
+ Elf64_Addr e_entry; /* Entry point. */
+ Elf64_Off e_phoff; /* Program header file offset. */
+ Elf64_Off e_shoff; /* Section header file offset. */
+ Elf64_Word e_flags; /* Architecture-specific flags. */
+ Elf64_Half e_ehsize; /* Size of ELF header in bytes. */
+ Elf64_Half e_phentsize; /* Size of program header entry. */
+ Elf64_Half e_phnum; /* Number of program header entries. */
+ Elf64_Half e_shentsize; /* Size of section header entry. */
+ Elf64_Half e_shnum; /* Number of section header entries. */
+ Elf64_Half e_shstrndx; /* Section name strings section. */
+} Elf64_Ehdr;
+
+/*
+ * Section header.
+ */
+
+typedef struct {
+ Elf64_Word sh_name; /* Section name (index into the
+ section header string table). */
+ Elf64_Word sh_type; /* Section type. */
+ Elf64_Xword sh_flags; /* Section flags. */
+ Elf64_Addr sh_addr; /* Address in memory image. */
+ Elf64_Off sh_offset; /* Offset in file. */
+ Elf64_Xword sh_size; /* Size in bytes. */
+ Elf64_Word sh_link; /* Index of a related section. */
+ Elf64_Word sh_info; /* Depends on section type. */
+ Elf64_Xword sh_addralign; /* Alignment in bytes. */
+ Elf64_Xword sh_entsize; /* Size of each entry in section. */
+} Elf64_Shdr;
+
+/*
+ * Program header.
+ */
+
+typedef struct {
+ Elf64_Word p_type; /* Entry type. */
+ Elf64_Word p_flags; /* Access permission flags. */
+ Elf64_Off p_offset; /* File offset of contents. */
+ Elf64_Addr p_vaddr; /* Virtual address in memory image. */
+ Elf64_Addr p_paddr; /* Physical address (not used). */
+ Elf64_Xword p_filesz; /* Size of contents in file. */
+ Elf64_Xword p_memsz; /* Size of contents in memory. */
+ Elf64_Xword p_align; /* Alignment in memory and file. */
+} Elf64_Phdr;
+
+/*
+ * Dynamic structure. The ".dynamic" section contains an array of them.
+ */
+
+typedef struct {
+ Elf64_Sxword d_tag; /* Entry type. */
+ union {
+ Elf64_Xword d_val; /* Integer value. */
+ Elf64_Addr d_ptr; /* Address value. */
+ } d_un;
+} Elf64_Dyn;
+
+/*
+ * Relocation entries.
+ */
+
+/* Relocations that don't need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+} Elf64_Rel;
+
+/* Relocations that need an addend field. */
+typedef struct {
+ Elf64_Addr r_offset; /* Location to be relocated. */
+ Elf64_Xword r_info; /* Relocation type and symbol index. */
+ Elf64_Sxword r_addend; /* Addend. */
+} Elf64_Rela;
+
+/* Macros for accessing the fields of r_info. */
+#define ELF64_R_SYM(info) ((info) >> 32)
+#define ELF64_R_TYPE(info) ((info) & 0xffffffffL)
+
+/* Macro for constructing r_info from field values. */
+#define ELF64_R_INFO(sym, type) (((sym) << 32) + ((type) & 0xffffffffL))
+
+#define ELF64_R_TYPE_DATA(info) (((Elf64_Xword)(info)<<32)>>40)
+#define ELF64_R_TYPE_ID(info) (((Elf64_Xword)(info)<<56)>>56)
+#define ELF64_R_TYPE_INFO(data, type) \
+ (((Elf64_Xword)(data)<<8)+(Elf64_Xword)(type))
+
+/*
+ * Note entry header
+ */
+typedef Elf_Note Elf64_Nhdr;
+
+/*
+ * Move entry
+ */
+typedef struct {
+ Elf64_Lword m_value; /* symbol value */
+ Elf64_Xword m_info; /* size + index */
+ Elf64_Xword m_poffset; /* symbol offset */
+ Elf64_Half m_repeat; /* repeat count */
+ Elf64_Half m_stride; /* stride info */
+} Elf64_Move;
+
+#define ELF64_M_SYM(info) ((info)>>8)
+#define ELF64_M_SIZE(info) ((unsigned char)(info))
+#define ELF64_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
+
+/*
+ * Hardware/Software capabilities entry
+ */
+typedef struct {
+ Elf64_Xword c_tag; /* how to interpret value */
+ union {
+ Elf64_Xword c_val;
+ Elf64_Addr c_ptr;
+ } c_un;
+} Elf64_Cap;
+
+/*
+ * Symbol table entries.
+ */
+
+typedef struct {
+ Elf64_Word st_name; /* String table index of name. */
+ unsigned char st_info; /* Type and binding information. */
+ unsigned char st_other; /* Reserved (not used). */
+ Elf64_Half st_shndx; /* Section index of symbol. */
+ Elf64_Addr st_value; /* Symbol value. */
+ Elf64_Xword st_size; /* Size of associated object. */
+} Elf64_Sym;
+
+/* Macros for accessing the fields of st_info. */
+#define ELF64_ST_BIND(info) ((info) >> 4)
+#define ELF64_ST_TYPE(info) ((info) & 0xf)
+
+/* Macro for constructing st_info from field values. */
+#define ELF64_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
+
+/* Macro for accessing the fields of st_other. */
+#define ELF64_ST_VISIBILITY(oth) ((oth) & 0x3)
+
+/* Structures used by Sun & GNU-style symbol versioning. */
+typedef struct {
+ Elf64_Half vd_version;
+ Elf64_Half vd_flags;
+ Elf64_Half vd_ndx;
+ Elf64_Half vd_cnt;
+ Elf64_Word vd_hash;
+ Elf64_Word vd_aux;
+ Elf64_Word vd_next;
+} Elf64_Verdef;
+
+typedef struct {
+ Elf64_Word vda_name;
+ Elf64_Word vda_next;
+} Elf64_Verdaux;
+
+typedef struct {
+ Elf64_Half vn_version;
+ Elf64_Half vn_cnt;
+ Elf64_Word vn_file;
+ Elf64_Word vn_aux;
+ Elf64_Word vn_next;
+} Elf64_Verneed;
+
+typedef struct {
+ Elf64_Word vna_hash;
+ Elf64_Half vna_flags;
+ Elf64_Half vna_other;
+ Elf64_Word vna_name;
+ Elf64_Word vna_next;
+} Elf64_Vernaux;
+
+typedef Elf64_Half Elf64_Versym;
+
+typedef struct {
+ Elf64_Half si_boundto; /* direct bindings - symbol bound to */
+ Elf64_Half si_flags; /* per symbol flags */
+} Elf64_Syminfo;
+
+#endif /* !_SYS_ELF64_H_ */
diff --git a/core/arch/arm/kernel/elf_common.h b/core/arch/arm/kernel/elf_common.h
new file mode 100644
index 0000000..dd8cd50
--- /dev/null
+++ b/core/arch/arm/kernel/elf_common.h
@@ -0,0 +1,1006 @@
+/*-
+ * Copyright (c) 2000, 2001, 2008, 2011, David E. O'Brien
+ * Copyright (c) 1998 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ELF_COMMON_H_
+#define _SYS_ELF_COMMON_H_ 1
+
+/*
+ * ELF definitions that are independent of architecture or word size.
+ */
+
+/*
+ * Note header. The ".note" section contains an array of notes. Each
+ * begins with this header, aligned to a word boundary. Immediately
+ * following the note header is n_namesz bytes of name, padded to the
+ * next word boundary. Then comes n_descsz bytes of descriptor, again
+ * padded to a word boundary. The values of n_namesz and n_descsz do
+ * not include the padding.
+ */
+
+typedef struct {
+ uint32_t n_namesz; /* Length of name. */
+ uint32_t n_descsz; /* Length of descriptor. */
+ uint32_t n_type; /* Type of this note. */
+} Elf_Note;
+
+/*
+ * The header for GNU-style hash sections.
+ */
+
+typedef struct {
+ uint32_t gh_nbuckets; /* Number of hash buckets. */
+ uint32_t gh_symndx; /* First visible symbol in .dynsym. */
+ uint32_t gh_maskwords; /* #maskwords used in bloom filter. */
+ uint32_t gh_shift2; /* Bloom filter shift count. */
+} Elf_GNU_Hash_Header;
+
+/* Indexes into the e_ident array. Keep synced with
+ http://www.sco.com/developers/gabi/latest/ch4.eheader.html */
+#define EI_MAG0 0 /* Magic number, byte 0. */
+#define EI_MAG1 1 /* Magic number, byte 1. */
+#define EI_MAG2 2 /* Magic number, byte 2. */
+#define EI_MAG3 3 /* Magic number, byte 3. */
+#define EI_CLASS 4 /* Class of machine. */
+#define EI_DATA 5 /* Data format. */
+#define EI_VERSION 6 /* ELF format version. */
+#define EI_OSABI 7 /* Operating system / ABI identification */
+#define EI_ABIVERSION 8 /* ABI version */
+#define OLD_EI_BRAND 8 /* Start of architecture identification. */
+#define EI_PAD 9 /* Start of padding (per SVR4 ABI). */
+#define EI_NIDENT 16 /* Size of e_ident array. */
+
+/* Values for the magic number bytes. */
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF" /* magic string */
+#define SELFMAG 4 /* magic string size */
+
+/* Values for e_ident[EI_VERSION] and e_version. */
+#define EV_NONE 0
+#define EV_CURRENT 1
+
+/* Values for e_ident[EI_CLASS]. */
+#define ELFCLASSNONE 0 /* Unknown class. */
+#define ELFCLASS32 1 /* 32-bit architecture. */
+#define ELFCLASS64 2 /* 64-bit architecture. */
+
+/* Values for e_ident[EI_DATA]. */
+#define ELFDATANONE 0 /* Unknown data format. */
+#define ELFDATA2LSB 1 /* 2's complement little-endian. */
+#define ELFDATA2MSB 2 /* 2's complement big-endian. */
+
+/* Values for e_ident[EI_OSABI]. */
+#define ELFOSABI_NONE 0 /* UNIX System V ABI */
+#define ELFOSABI_HPUX 1 /* HP-UX operating system */
+#define ELFOSABI_NETBSD 2 /* NetBSD */
+#define ELFOSABI_LINUX 3 /* GNU/Linux */
+#define ELFOSABI_HURD 4 /* GNU/Hurd */
+#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
+#define ELFOSABI_SOLARIS 6 /* Solaris */
+#define ELFOSABI_AIX 7 /* AIX */
+#define ELFOSABI_IRIX 8 /* IRIX */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD */
+#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD */
+#define ELFOSABI_OPENVMS 13 /* Open VMS */
+#define ELFOSABI_NSK 14 /* HP Non-Stop Kernel */
+#define ELFOSABI_AROS 15 /* Amiga Research OS */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+#define ELFOSABI_SYSV ELFOSABI_NONE /* symbol used in old spec */
+#define ELFOSABI_MONTEREY ELFOSABI_AIX /* Monterey */
+
+/* e_ident */
+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
+ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
+ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
+ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
+
+/* Values for e_type. */
+#define ET_NONE 0 /* Unknown type. */
+#define ET_REL 1 /* Relocatable. */
+#define ET_EXEC 2 /* Executable. */
+#define ET_DYN 3 /* Shared object. */
+#define ET_CORE 4 /* Core file. */
+#define ET_LOOS 0xfe00 /* First operating system specific. */
+#define ET_HIOS 0xfeff /* Last operating system-specific. */
+#define ET_LOPROC 0xff00 /* First processor-specific. */
+#define ET_HIPROC 0xffff /* Last processor-specific. */
+
+/* Values for e_machine. */
+#define EM_NONE 0 /* Unknown machine. */
+#define EM_M32 1 /* AT&T WE32100. */
+#define EM_SPARC 2 /* Sun SPARC. */
+#define EM_386 3 /* Intel i386. */
+#define EM_68K 4 /* Motorola 68000. */
+#define EM_88K 5 /* Motorola 88000. */
+#define EM_860 7 /* Intel i860. */
+#define EM_MIPS 8 /* MIPS R3000 Big-Endian only. */
+#define EM_S370 9 /* IBM System/370. */
+#define EM_MIPS_RS3_LE 10 /* MIPS R3000 Little-Endian. */
+#define EM_PARISC 15 /* HP PA-RISC. */
+#define EM_VPP500 17 /* Fujitsu VPP500. */
+#define EM_SPARC32PLUS 18 /* SPARC v8plus. */
+#define EM_960 19 /* Intel 80960. */
+#define EM_PPC 20 /* PowerPC 32-bit. */
+#define EM_PPC64 21 /* PowerPC 64-bit. */
+#define EM_S390 22 /* IBM System/390. */
+#define EM_V800 36 /* NEC V800. */
+#define EM_FR20 37 /* Fujitsu FR20. */
+#define EM_RH32 38 /* TRW RH-32. */
+#define EM_RCE 39 /* Motorola RCE. */
+#define EM_ARM 40 /* ARM. */
+#define EM_SH 42 /* Hitachi SH. */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit. */
+#define EM_TRICORE 44 /* Siemens TriCore embedded processor. */
+#define EM_ARC 45 /* Argonaut RISC Core. */
+#define EM_H8_300 46 /* Hitachi H8/300. */
+#define EM_H8_300H 47 /* Hitachi H8/300H. */
+#define EM_H8S 48 /* Hitachi H8S. */
+#define EM_H8_500 49 /* Hitachi H8/500. */
+#define EM_IA_64 50 /* Intel IA-64 Processor. */
+#define EM_MIPS_X 51 /* Stanford MIPS-X. */
+#define EM_COLDFIRE 52 /* Motorola ColdFire. */
+#define EM_68HC12 53 /* Motorola M68HC12. */
+#define EM_MMA 54 /* Fujitsu MMA. */
+#define EM_PCP 55 /* Siemens PCP. */
+#define EM_NCPU 56 /* Sony nCPU. */
+#define EM_NDR1 57 /* Denso NDR1 microprocessor. */
+#define EM_STARCORE 58 /* Motorola Star*Core processor. */
+#define EM_ME16 59 /* Toyota ME16 processor. */
+#define EM_ST100 60 /* STMicroelectronics ST100 processor. */
+#define EM_TINYJ 61 /* Advanced Logic Corp. TinyJ processor. */
+#define EM_X86_64 62 /* Advanced Micro Devices x86-64 */
+#define EM_AMD64 EM_X86_64 /* Advanced Micro Devices x86-64 (compat) */
+#define EM_PDSP 63 /* Sony DSP Processor. */
+#define EM_FX66 66 /* Siemens FX66 microcontroller. */
+#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16
+ microcontroller. */
+#define EM_ST7 68 /* STmicroelectronics ST7 8-bit
+ microcontroller. */
+#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller. */
+#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller. */
+#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller. */
+#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller. */
+#define EM_SVX 73 /* Silicon Graphics SVx. */
+#define EM_ST19 74 /* STMicroelectronics ST19 8-bit mc. */
+#define EM_VAX 75 /* Digital VAX. */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded
+ processor. */
+#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded
+ processor. */
+#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor. */
+#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor. */
+#define EM_MMIX 80 /* Donald Knuth's educational 64-bit proc. */
+#define EM_HUANY 81 /* Harvard University machine-independent
+ object files. */
+#define EM_PRISM 82 /* SiTera Prism. */
+#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller. */
+#define EM_FR30 84 /* Fujitsu FR30. */
+#define EM_D10V 85 /* Mitsubishi D10V. */
+#define EM_D30V 86 /* Mitsubishi D30V. */
+#define EM_V850 87 /* NEC v850. */
+#define EM_M32R 88 /* Mitsubishi M32R. */
+#define EM_MN10300 89 /* Matsushita MN10300. */
+#define EM_MN10200 90 /* Matsushita MN10200. */
+#define EM_PJ 91 /* picoJava. */
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor. */
+#define EM_ARC_A5 93 /* ARC Cores Tangent-A5. */
+#define EM_XTENSA 94 /* Tensilica Xtensa Architecture. */
+#define EM_VIDEOCORE 95 /* Alphamosaic VideoCore processor. */
+#define EM_TMM_GPP 96 /* Thompson Multimedia General Purpose
+ Processor. */
+#define EM_NS32K 97 /* National Semiconductor 32000 series. */
+#define EM_TPC 98 /* Tenor Network TPC processor. */
+#define EM_SNP1K 99 /* Trebia SNP 1000 processor. */
+#define EM_ST200 100 /* STMicroelectronics ST200 microcontroller. */
+#define EM_IP2K 101 /* Ubicom IP2xxx microcontroller family. */
+#define EM_MAX 102 /* MAX Processor. */
+#define EM_CR 103 /* National Semiconductor CompactRISC
+ microprocessor. */
+#define EM_F2MC16 104 /* Fujitsu F2MC16. */
+#define EM_MSP430 105 /* Texas Instruments embedded microcontroller
+ msp430. */
+#define EM_BLACKFIN 106 /* Analog Devices Blackfin (DSP) processor. */
+#define EM_SE_C33 107 /* S1C33 Family of Seiko Epson processors. */
+#define EM_SEP 108 /* Sharp embedded microprocessor. */
+#define EM_ARCA 109 /* Arca RISC Microprocessor. */
+#define EM_UNICORE 110 /* Microprocessor series from PKU-Unity Ltd.
+ and MPRC of Peking University */
+#define EM_AARCH64 183 /* AArch64 (64-bit ARM) */
+
+/* Non-standard or deprecated. */
+#define EM_486 6 /* Intel i486. */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
+#define EM_ALPHA_STD 41 /* Digital Alpha (standard value). */
+#define EM_ALPHA 0x9026 /* Alpha (written in the absence of an ABI) */
+
+/* e_flags for EM_ARM */
+#define EF_ARM_ABI_VERSION 0x05000000 /* ABI version 5 */
+#define EF_ARM_ABIMASK 0xFF000000
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_ABI_FLOAT_HARD 0x00000400 /* ABI version 5 and later */
+#define EF_ARM_ABI_FLOAT_SOFT 0x00000200 /* ABI version 5 and later */
+
+/* Special section indexes. */
+#define SHN_UNDEF 0 /* Undefined, missing, irrelevant. */
+#define SHN_LORESERVE 0xff00 /* First of reserved range. */
+#define SHN_LOPROC 0xff00 /* First processor-specific. */
+#define SHN_HIPROC 0xff1f /* Last processor-specific. */
+#define SHN_LOOS 0xff20 /* First operating system-specific. */
+#define SHN_HIOS 0xff3f /* Last operating system-specific. */
+#define SHN_ABS 0xfff1 /* Absolute values. */
+#define SHN_COMMON 0xfff2 /* Common data. */
+#define SHN_XINDEX 0xffff /* Escape -- index stored elsewhere. */
+#define SHN_HIRESERVE 0xffff /* Last of reserved range. */
+
+/* sh_type */
+#define SHT_NULL 0 /* inactive */
+#define SHT_PROGBITS 1 /* program defined information */
+#define SHT_SYMTAB 2 /* symbol table section */
+#define SHT_STRTAB 3 /* string table section */
+#define SHT_RELA 4 /* relocation section with addends */
+#define SHT_HASH 5 /* symbol hash table section */
+#define SHT_DYNAMIC 6 /* dynamic section */
+#define SHT_NOTE 7 /* note section */
+#define SHT_NOBITS 8 /* no space section */
+#define SHT_REL 9 /* relocation section - no addends */
+#define SHT_SHLIB 10 /* reserved - purpose unknown */
+#define SHT_DYNSYM 11 /* dynamic symbol table section */
+#define SHT_INIT_ARRAY 14 /* Initialization function pointers. */
+#define SHT_FINI_ARRAY 15 /* Termination function pointers. */
+#define SHT_PREINIT_ARRAY 16 /* Pre-initialization function ptrs. */
+#define SHT_GROUP 17 /* Section group. */
+#define SHT_SYMTAB_SHNDX 18 /* Section indexes (see SHN_XINDEX). */
+#define SHT_LOOS 0x60000000 /* First of OS specific semantics */
+#define SHT_LOSUNW 0x6ffffff4
+#define SHT_SUNW_dof 0x6ffffff4
+#define SHT_SUNW_cap 0x6ffffff5
+#define SHT_SUNW_SIGNATURE 0x6ffffff6
+#define SHT_GNU_HASH 0x6ffffff6
+#define SHT_GNU_LIBLIST 0x6ffffff7
+#define SHT_SUNW_ANNOTATE 0x6ffffff7
+#define SHT_SUNW_DEBUGSTR 0x6ffffff8
+#define SHT_SUNW_DEBUG 0x6ffffff9
+#define SHT_SUNW_move 0x6ffffffa
+#define SHT_SUNW_COMDAT 0x6ffffffb
+#define SHT_SUNW_syminfo 0x6ffffffc
+#define SHT_SUNW_verdef 0x6ffffffd
+#define SHT_GNU_verdef 0x6ffffffd /* Symbol versions provided */
+#define SHT_SUNW_verneed 0x6ffffffe
+#define SHT_GNU_verneed 0x6ffffffe /* Symbol versions required */
+#define SHT_SUNW_versym 0x6fffffff
+#define SHT_GNU_versym 0x6fffffff /* Symbol version table */
+#define SHT_HISUNW 0x6fffffff
+#define SHT_HIOS 0x6fffffff /* Last of OS specific semantics */
+#define SHT_LOPROC 0x70000000 /* reserved range for processor */
+#define SHT_AMD64_UNWIND 0x70000001 /* unwind information */
+#define SHT_ARM_EXIDX 0x70000001 /* Exception index table. */
+#define SHT_ARM_PREEMPTMAP 0x70000002 /* BPABI DLL dynamic linking
+ pre-emption map. */
+#define SHT_ARM_ATTRIBUTES 0x70000003 /* Object file compatibility
+ attributes. */
+#define SHT_ARM_DEBUGOVERLAY 0x70000004 /* See DBGOVL for details. */
+#define SHT_ARM_OVERLAYSECTION 0x70000005 /* See DBGOVL for details. */
+#define SHT_MIPS_REGINFO 0x70000006
+#define SHT_MIPS_OPTIONS 0x7000000d
+#define SHT_MIPS_DWARF 0x7000001e /* MIPS gcc uses MIPS_DWARF */
+#define SHT_HIPROC 0x7fffffff /* specific section header types */
+#define SHT_LOUSER 0x80000000 /* reserved range for application */
+#define SHT_HIUSER 0xffffffff /* specific indexes */
+
+/* Flags for sh_flags. */
+#define SHF_WRITE 0x1 /* Section contains writable data. */
+#define SHF_ALLOC 0x2 /* Section occupies memory. */
+#define SHF_EXECINSTR 0x4 /* Section contains instructions. */
+#define SHF_MERGE 0x10 /* Section may be merged. */
+#define SHF_STRINGS 0x20 /* Section contains strings. */
+#define SHF_INFO_LINK 0x40 /* sh_info holds section index. */
+#define SHF_LINK_ORDER 0x80 /* Special ordering requirements. */
+#define SHF_OS_NONCONFORMING 0x100 /* OS-specific processing required. */
+#define SHF_GROUP 0x200 /* Member of section group. */
+#define SHF_TLS 0x400 /* Section contains TLS data. */
+#define SHF_MASKOS 0x0ff00000 /* OS-specific semantics. */
+#define SHF_MASKPROC 0xf0000000 /* Processor-specific semantics. */
+
+/* Values for p_type. */
+#define PT_NULL 0 /* Unused entry. */
+#define PT_LOAD 1 /* Loadable segment. */
+#define PT_DYNAMIC 2 /* Dynamic linking information segment. */
+#define PT_INTERP 3 /* Pathname of interpreter. */
+#define PT_NOTE 4 /* Auxiliary information. */
+#define PT_SHLIB 5 /* Reserved (not used). */
+#define PT_PHDR 6 /* Location of program header itself. */
+#define PT_TLS 7 /* Thread local storage segment */
+#define PT_LOOS 0x60000000 /* First OS-specific. */
+#define PT_SUNW_UNWIND 0x6464e550 /* amd64 UNWIND program header */
+#define PT_GNU_EH_FRAME 0x6474e550
+#define PT_GNU_STACK 0x6474e551
+#define PT_GNU_RELRO 0x6474e552
+#define PT_LOSUNW 0x6ffffffa
+#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
+#define PT_SUNWSTACK 0x6ffffffb /* describes the stack segment */
+#define PT_SUNWDTRACE 0x6ffffffc /* private */
+#define PT_SUNWCAP 0x6ffffffd /* hard/soft capabilities segment */
+#define PT_HISUNW 0x6fffffff
+#define PT_HIOS 0x6fffffff /* Last OS-specific. */
+#define PT_LOPROC 0x70000000 /* First processor-specific type. */
+#define PT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for p_flags. */
+#define PF_X 0x1 /* Executable. */
+#define PF_W 0x2 /* Writable. */
+#define PF_R 0x4 /* Readable. */
+#define PF_MASKOS 0x0ff00000 /* Operating system-specific. */
+#define PF_MASKPROC 0xf0000000 /* Processor-specific. */
+
+/* Extended program header index. */
+#define PN_XNUM 0xffff
+
+/* Values for d_tag. */
+#define DT_NULL 0 /* Terminating entry. */
+#define DT_NEEDED 1 /* String table offset of a needed shared
+ library. */
+#define DT_PLTRELSZ 2 /* Total size in bytes of PLT relocations. */
+#define DT_PLTGOT 3 /* Processor-dependent address. */
+#define DT_HASH 4 /* Address of symbol hash table. */
+#define DT_STRTAB 5 /* Address of string table. */
+#define DT_SYMTAB 6 /* Address of symbol table. */
+#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */
+#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */
+#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */
+#define DT_STRSZ 10 /* Size of string table. */
+#define DT_SYMENT 11 /* Size of each symbol table entry. */
+#define DT_INIT 12 /* Address of initialization function. */
+#define DT_FINI 13 /* Address of finalization function. */
+#define DT_SONAME 14 /* String table offset of shared object
+ name. */
+#define DT_RPATH 15 /* String table offset of library path. [sup] */
+#define DT_SYMBOLIC 16 /* Indicates "symbolic" linking. [sup] */
+#define DT_REL 17 /* Address of ElfNN_Rel relocations. */
+#define DT_RELSZ 18 /* Total size of ElfNN_Rel relocations. */
+#define DT_RELENT 19 /* Size of each ElfNN_Rel relocation. */
+#define DT_PLTREL 20 /* Type of relocation used for PLT. */
+#define DT_DEBUG 21 /* Reserved (not used). */
+#define DT_TEXTREL 22 /* Indicates there may be relocations in
+ non-writable segments. [sup] */
+#define DT_JMPREL 23 /* Address of PLT relocations. */
+#define DT_BIND_NOW 24 /* [sup] */
+#define DT_INIT_ARRAY 25 /* Address of the array of pointers to
+ initialization functions */
+#define DT_FINI_ARRAY 26 /* Address of the array of pointers to
+ termination functions */
+#define DT_INIT_ARRAYSZ 27 /* Size in bytes of the array of
+ initialization functions. */
+#define DT_FINI_ARRAYSZ 28 /* Size in bytes of the array of
+ termination functions. */
+#define DT_RUNPATH 29 /* String table offset of a null-terminated
+ library search path string. */
+#define DT_FLAGS 30 /* Object specific flag values. */
+#define DT_ENCODING 32 /* Values greater than or equal to DT_ENCODING
+ and less than DT_LOOS follow the rules for
+ the interpretation of the d_un union
+ as follows: even == 'd_ptr', odd == 'd_val'
+ or none */
+#define DT_PREINIT_ARRAY 32 /* Address of the array of pointers to
+ pre-initialization functions. */
+#define DT_PREINIT_ARRAYSZ 33 /* Size in bytes of the array of
+ pre-initialization functions. */
+#define DT_MAXPOSTAGS 34 /* number of positive tags */
+#define DT_LOOS 0x6000000d /* First OS-specific */
+#define DT_SUNW_AUXILIARY 0x6000000d /* symbol auxiliary name */
+#define DT_SUNW_RTLDINF 0x6000000e /* ld.so.1 info (private) */
+#define DT_SUNW_FILTER 0x6000000f /* symbol filter name */
+#define DT_SUNW_CAP 0x60000010 /* hardware/software */
+#define DT_HIOS 0x6ffff000 /* Last OS-specific */
+
+/*
+ * DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
+ * Dyn.d_un.d_val field of the Elf*_Dyn structure.
+ */
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_CHECKSUM 0x6ffffdf8 /* elf checksum */
+#define DT_PLTPADSZ 0x6ffffdf9 /* pltpadding size */
+#define DT_MOVEENT 0x6ffffdfa /* move table entry size */
+#define DT_MOVESZ 0x6ffffdfb /* move table size */
+#define DT_FEATURE_1 0x6ffffdfc /* feature holder */
+#define DT_POSFLAG_1 0x6ffffdfd /* flags for DT_* entries, effecting */
+ /* the following DT_* entry. */
+ /* See DF_P1_* definitions */
+#define DT_SYMINSZ 0x6ffffdfe /* syminfo table size (in bytes) */
+#define DT_SYMINENT 0x6ffffdff /* syminfo entry size (in bytes) */
+#define DT_VALRNGHI 0x6ffffdff
+
+/*
+ * DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
+ * Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
+ *
+ * If any adjustment is made to the ELF object after it has been
+ * built, these entries will need to be adjusted.
+ */
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table */
+#define DT_CONFIG 0x6ffffefa /* configuration information */
+#define DT_DEPAUDIT 0x6ffffefb /* dependency auditing */
+#define DT_AUDIT 0x6ffffefc /* object auditing */
+#define DT_PLTPAD 0x6ffffefd /* pltpadding (sparcv9) */
+#define DT_MOVETAB 0x6ffffefe /* move table */
+#define DT_SYMINFO 0x6ffffeff /* syminfo table */
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0 /* Address of versym section. */
+#define DT_RELACOUNT 0x6ffffff9 /* number of RELATIVE relocations */
+#define DT_RELCOUNT 0x6ffffffa /* number of RELATIVE relocations */
+#define DT_FLAGS_1 0x6ffffffb /* state flags - see DF_1_* defs */
+#define DT_VERDEF 0x6ffffffc /* Address of verdef section. */
+#define DT_VERDEFNUM 0x6ffffffd /* Number of elems in verdef section */
+#define DT_VERNEED 0x6ffffffe /* Address of verneed section. */
+#define DT_VERNEEDNUM 0x6fffffff /* Number of elems in verneed section */
+
+#define DT_LOPROC 0x70000000 /* First processor-specific type. */
+#define DT_DEPRECATED_SPARC_REGISTER 0x7000001
+#define DT_AUXILIARY 0x7ffffffd /* shared library auxiliary name */
+#define DT_USED 0x7ffffffe /* ignored - same as needed */
+#define DT_FILTER 0x7fffffff /* shared library filter name */
+#define DT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
+/* Values for DT_FLAGS */
+#define DF_ORIGIN 0x0001 /* Indicates that the object being loaded may
+ make reference to the $ORIGIN substitution
+ string */
+#define DF_SYMBOLIC 0x0002 /* Indicates "symbolic" linking. */
+#define DF_TEXTREL 0x0004 /* Indicates there may be relocations in
+ non-writable segments. */
+#define DF_BIND_NOW 0x0008 /* Indicates that the dynamic linker should
+ process all relocations for the object
+ containing this entry before transferring
+ control to the program. */
+#define DF_STATIC_TLS 0x0010 /* Indicates that the shared object or
+ executable contains code using a static
+ thread-local storage scheme. */
+
+/* Values for DT_FLAGS_1 */
+#define DF_1_BIND_NOW 0x00000001 /* Same as DF_BIND_NOW */
+#define DF_1_GLOBAL 0x00000002 /* Set the RTLD_GLOBAL for object */
+#define DF_1_NODELETE 0x00000008 /* Set the RTLD_NODELETE for object */
+#define DF_1_LOADFLTR 0x00000010 /* Immediate loading of filtees */
+#define DF_1_NOOPEN 0x00000040 /* Do not allow loading on dlopen() */
+#define DF_1_ORIGIN 0x00000080 /* Process $ORIGIN */
+#define DF_1_INTERPOSE 0x00000400 /* Interpose all objects but main */
+#define DF_1_NODEFLIB 0x00000800 /* Do not search default paths */
+
+/* Values for n_type. Used in core files. */
+#define NT_PRSTATUS 1 /* Process status. */
+#define NT_FPREGSET 2 /* Floating point registers. */
+#define NT_PRPSINFO 3 /* Process state info. */
+#define NT_THRMISC 7 /* Thread miscellaneous info. */
+#define NT_PROCSTAT_PROC 8 /* Procstat proc data. */
+#define NT_PROCSTAT_FILES 9 /* Procstat files data. */
+#define NT_PROCSTAT_VMMAP 10 /* Procstat vmmap data. */
+#define NT_PROCSTAT_GROUPS 11 /* Procstat groups data. */
+#define NT_PROCSTAT_UMASK 12 /* Procstat umask data. */
+#define NT_PROCSTAT_RLIMIT 13 /* Procstat rlimit data. */
+#define NT_PROCSTAT_OSREL 14 /* Procstat osreldate data. */
+#define NT_PROCSTAT_PSSTRINGS 15 /* Procstat ps_strings data. */
+#define NT_PROCSTAT_AUXV 16 /* Procstat auxv data. */
+
+/* Symbol Binding - ELFNN_ST_BIND - st_info */
+#define STB_LOCAL 0 /* Local symbol */
+#define STB_GLOBAL 1 /* Global symbol */
+#define STB_WEAK 2 /* like global - lower precedence */
+#define STB_LOOS 10 /* Reserved range for operating system */
+#define STB_HIOS 12 /* specific semantics. */
+#define STB_LOPROC 13 /* reserved range for processor */
+#define STB_HIPROC 15 /* specific semantics. */
+
+/* Symbol type - ELFNN_ST_TYPE - st_info */
+#define STT_NOTYPE 0 /* Unspecified type. */
+#define STT_OBJECT 1 /* Data object. */
+#define STT_FUNC 2 /* Function. */
+#define STT_SECTION 3 /* Section. */
+#define STT_FILE 4 /* Source file. */
+#define STT_COMMON 5 /* Uninitialized common block. */
+#define STT_TLS 6 /* TLS object. */
+#define STT_NUM 7
+#define STT_LOOS 10 /* Reserved range for operating system */
+#define STT_GNU_IFUNC 10
+#define STT_HIOS 12 /* specific semantics. */
+#define STT_LOPROC 13 /* reserved range for processor */
+#define STT_HIPROC 15 /* specific semantics. */
+
+/* Symbol visibility - ELFNN_ST_VISIBILITY - st_other */
+#define STV_DEFAULT 0x0 /* Default visibility (see binding). */
+#define STV_INTERNAL 0x1 /* Special meaning in relocatable objects. */
+#define STV_HIDDEN 0x2 /* Not visible. */
+#define STV_PROTECTED 0x3 /* Visible but not preemptible. */
+#define STV_EXPORTED 0x4
+#define STV_SINGLETON 0x5
+#define STV_ELIMINATE 0x6
+
+/* Special symbol table indexes. */
+#define STN_UNDEF 0 /* Undefined symbol index. */
+
+/* Symbol versioning flags. */
+#define VER_DEF_CURRENT 1
+#define VER_DEF_IDX(x) VER_NDX(x)
+
+#define VER_FLG_BASE 0x01
+#define VER_FLG_WEAK 0x02
+
+#define VER_NEED_CURRENT 1
+#define VER_NEED_WEAK (1u << 15)
+#define VER_NEED_HIDDEN VER_NDX_HIDDEN
+#define VER_NEED_IDX(x) VER_NDX(x)
+
+#define VER_NDX_LOCAL 0
+#define VER_NDX_GLOBAL 1
+#define VER_NDX_GIVEN 2
+
+#define VER_NDX_HIDDEN (1u << 15)
+#define VER_NDX(x) ((x) & ~(1u << 15))
+
+#define CA_SUNW_NULL 0
+#define CA_SUNW_HW_1 1 /* first hardware capabilities entry */
+#define CA_SUNW_SF_1 2 /* first software capabilities entry */
+
+/*
+ * Syminfo flag values
+ */
+#define SYMINFO_FLG_DIRECT 0x0001 /* symbol ref has direct association */
+ /* to object containing defn. */
+#define SYMINFO_FLG_PASSTHRU 0x0002 /* ignored - see SYMINFO_FLG_FILTER */
+#define SYMINFO_FLG_COPY 0x0004 /* symbol is a copy-reloc */
+#define SYMINFO_FLG_LAZYLOAD 0x0008 /* object containing defn should be */
+ /* lazily-loaded */
+#define SYMINFO_FLG_DIRECTBIND 0x0010 /* ref should be bound directly to */
+ /* object containing defn. */
+#define SYMINFO_FLG_NOEXTDIRECT 0x0020 /* don't let an external reference */
+ /* directly bind to this symbol */
+#define SYMINFO_FLG_FILTER 0x0002 /* symbol ref is associated to a */
+#define SYMINFO_FLG_AUXILIARY 0x0040 /* standard or auxiliary filter */
+
+/*
+ * Syminfo.si_boundto values.
+ */
+#define SYMINFO_BT_SELF 0xffff /* symbol bound to self */
+#define SYMINFO_BT_PARENT 0xfffe /* symbol bound to parent */
+#define SYMINFO_BT_NONE 0xfffd /* no special symbol binding */
+#define SYMINFO_BT_EXTERN 0xfffc /* symbol defined as external */
+#define SYMINFO_BT_LOWRESERVE 0xff00 /* beginning of reserved entries */
+
+/*
+ * Syminfo version values.
+ */
+#define SYMINFO_NONE 0 /* Syminfo version */
+#define SYMINFO_CURRENT 1
+#define SYMINFO_NUM 2
+
+/*
+ * Relocation types.
+ *
+ * All machine architectures are defined here to allow tools on one to
+ * handle others.
+ */
+
+#define R_386_NONE 0 /* No relocation. */
+#define R_386_32 1 /* Add symbol value. */
+#define R_386_PC32 2 /* Add PC-relative symbol value. */
+#define R_386_GOT32 3 /* Add PC-relative GOT offset. */
+#define R_386_PLT32 4 /* Add PC-relative PLT offset. */
+#define R_386_COPY 5 /* Copy data from shared object. */
+#define R_386_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_386_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_386_RELATIVE 8 /* Add load address of shared object. */
+#define R_386_GOTOFF 9 /* Add GOT-relative symbol address. */
+#define R_386_GOTPC 10 /* Add PC-relative GOT table address. */
+#define R_386_TLS_TPOFF 14 /* Negative offset in static TLS block */
+#define R_386_TLS_IE 15 /* Absolute address of GOT for -ve static TLS */
+#define R_386_TLS_GOTIE 16 /* GOT entry for negative static TLS block */
+#define R_386_TLS_LE 17 /* Negative offset relative to static TLS */
+#define R_386_TLS_GD 18 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_LDM 19 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_GD_32 24 /* 32 bit offset to GOT (index,off) pair */
+#define R_386_TLS_GD_PUSH 25 /* pushl instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_CALL 26 /* call instruction for Sun ABI GD sequence */
+#define R_386_TLS_GD_POP 27 /* popl instruction for Sun ABI GD sequence */
+#define R_386_TLS_LDM_32 28 /* 32 bit offset to GOT (index,zero) pair */
+#define R_386_TLS_LDM_PUSH 29 /* pushl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_CALL 30 /* call instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDM_POP 31 /* popl instruction for Sun ABI LD sequence */
+#define R_386_TLS_LDO_32 32 /* 32 bit offset from start of TLS block */
+#define R_386_TLS_IE_32 33 /* 32 bit offset to GOT static TLS offset entry */
+#define R_386_TLS_LE_32 34 /* 32 bit offset within static TLS block */
+#define R_386_TLS_DTPMOD32 35 /* GOT entry containing TLS index */
+#define R_386_TLS_DTPOFF32 36 /* GOT entry containing TLS offset */
+#define R_386_TLS_TPOFF32 37 /* GOT entry of -ve static TLS offset */
+#define R_386_IRELATIVE 42 /* PLT entry resolved indirectly at runtime */
+
+#define R_AARCH64_RELATIVE 1027
+
+#define R_ARM_NONE 0 /* No relocation. */
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5
+#define R_ARM_ABS12 6
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+/* TLS relocations */
+#define R_ARM_TLS_DTPMOD32 17 /* ID of module containing symbol */
+#define R_ARM_TLS_DTPOFF32 18 /* Offset in TLS block */
+#define R_ARM_TLS_TPOFF32 19 /* Offset in static TLS block */
+#define R_ARM_COPY 20 /* Copy data from shared object. */
+#define R_ARM_GLOB_DAT 21 /* Set GOT entry to data address. */
+#define R_ARM_JUMP_SLOT 22 /* Set GOT entry to code address. */
+#define R_ARM_RELATIVE 23 /* Add load address of shared object. */
+#define R_ARM_GOTOFF 24 /* Add GOT-relative symbol address. */
+#define R_ARM_GOTPC 25 /* Add PC-relative GOT table address. */
+#define R_ARM_GOT32 26 /* Add PC-relative GOT offset. */
+#define R_ARM_PLT32 27 /* Add PC-relative PLT offset. */
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS32 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+
+/* Name Value Field Calculation */
+#define R_IA_64_NONE 0 /* None */
+#define R_IA_64_IMM14 0x21 /* immediate14 S + A */
+#define R_IA_64_IMM22 0x22 /* immediate22 S + A */
+#define R_IA_64_IMM64 0x23 /* immediate64 S + A */
+#define R_IA_64_DIR32MSB 0x24 /* word32 MSB S + A */
+#define R_IA_64_DIR32LSB 0x25 /* word32 LSB S + A */
+#define R_IA_64_DIR64MSB 0x26 /* word64 MSB S + A */
+#define R_IA_64_DIR64LSB 0x27 /* word64 LSB S + A */
+#define R_IA_64_GPREL22 0x2a /* immediate22 @gprel(S + A) */
+#define R_IA_64_GPREL64I 0x2b /* immediate64 @gprel(S + A) */
+#define R_IA_64_GPREL32MSB 0x2c /* word32 MSB @gprel(S + A) */
+#define R_IA_64_GPREL32LSB 0x2d /* word32 LSB @gprel(S + A) */
+#define R_IA_64_GPREL64MSB 0x2e /* word64 MSB @gprel(S + A) */
+#define R_IA_64_GPREL64LSB 0x2f /* word64 LSB @gprel(S + A) */
+#define R_IA_64_LTOFF22 0x32 /* immediate22 @ltoff(S + A) */
+#define R_IA_64_LTOFF64I 0x33 /* immediate64 @ltoff(S + A) */
+#define R_IA_64_PLTOFF22 0x3a /* immediate22 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64I 0x3b /* immediate64 @pltoff(S + A) */
+#define R_IA_64_PLTOFF64MSB 0x3e /* word64 MSB @pltoff(S + A) */
+#define R_IA_64_PLTOFF64LSB 0x3f /* word64 LSB @pltoff(S + A) */
+#define R_IA_64_FPTR64I 0x43 /* immediate64 @fptr(S + A) */
+#define R_IA_64_FPTR32MSB 0x44 /* word32 MSB @fptr(S + A) */
+#define R_IA_64_FPTR32LSB 0x45 /* word32 LSB @fptr(S + A) */
+#define R_IA_64_FPTR64MSB 0x46 /* word64 MSB @fptr(S + A) */
+#define R_IA_64_FPTR64LSB 0x47 /* word64 LSB @fptr(S + A) */
+#define R_IA_64_PCREL60B 0x48 /* immediate60 form1 S + A - P */
+#define R_IA_64_PCREL21B 0x49 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL21M 0x4a /* immediate21 form2 S + A - P */
+#define R_IA_64_PCREL21F 0x4b /* immediate21 form3 S + A - P */
+#define R_IA_64_PCREL32MSB 0x4c /* word32 MSB S + A - P */
+#define R_IA_64_PCREL32LSB 0x4d /* word32 LSB S + A - P */
+#define R_IA_64_PCREL64MSB 0x4e /* word64 MSB S + A - P */
+#define R_IA_64_PCREL64LSB 0x4f /* word64 LSB S + A - P */
+#define R_IA_64_LTOFF_FPTR22 0x52 /* immediate22 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64I 0x53 /* immediate64 @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32MSB 0x54 /* word32 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR32LSB 0x55 /* word32 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64MSB 0x56 /* word64 MSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_LTOFF_FPTR64LSB 0x57 /* word64 LSB @ltoff(@fptr(S + A)) */
+#define R_IA_64_SEGREL32MSB 0x5c /* word32 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL32LSB 0x5d /* word32 LSB @segrel(S + A) */
+#define R_IA_64_SEGREL64MSB 0x5e /* word64 MSB @segrel(S + A) */
+#define R_IA_64_SEGREL64LSB 0x5f /* word64 LSB @segrel(S + A) */
+#define R_IA_64_SECREL32MSB 0x64 /* word32 MSB @secrel(S + A) */
+#define R_IA_64_SECREL32LSB 0x65 /* word32 LSB @secrel(S + A) */
+#define R_IA_64_SECREL64MSB 0x66 /* word64 MSB @secrel(S + A) */
+#define R_IA_64_SECREL64LSB 0x67 /* word64 LSB @secrel(S + A) */
+#define R_IA_64_REL32MSB 0x6c /* word32 MSB BD + A */
+#define R_IA_64_REL32LSB 0x6d /* word32 LSB BD + A */
+#define R_IA_64_REL64MSB 0x6e /* word64 MSB BD + A */
+#define R_IA_64_REL64LSB 0x6f /* word64 LSB BD + A */
+#define R_IA_64_LTV32MSB 0x74 /* word32 MSB S + A */
+#define R_IA_64_LTV32LSB 0x75 /* word32 LSB S + A */
+#define R_IA_64_LTV64MSB 0x76 /* word64 MSB S + A */
+#define R_IA_64_LTV64LSB 0x77 /* word64 LSB S + A */
+#define R_IA_64_PCREL21BI 0x79 /* immediate21 form1 S + A - P */
+#define R_IA_64_PCREL22 0x7a /* immediate22 S + A - P */
+#define R_IA_64_PCREL64I 0x7b /* immediate64 S + A - P */
+#define R_IA_64_IPLTMSB 0x80 /* function descriptor MSB special */
+#define R_IA_64_IPLTLSB 0x81 /* function descriptor LSB speciaal */
+#define R_IA_64_SUB 0x85 /* immediate64 A - S */
+#define R_IA_64_LTOFF22X 0x86 /* immediate22 special */
+#define R_IA_64_LDXMOV 0x87 /* immediate22 special */
+#define R_IA_64_TPREL14 0x91 /* imm14 @tprel(S + A) */
+#define R_IA_64_TPREL22 0x92 /* imm22 @tprel(S + A) */
+#define R_IA_64_TPREL64I 0x93 /* imm64 @tprel(S + A) */
+#define R_IA_64_TPREL64MSB 0x96 /* word64 MSB @tprel(S + A) */
+#define R_IA_64_TPREL64LSB 0x97 /* word64 LSB @tprel(S + A) */
+#define R_IA_64_LTOFF_TPREL22 0x9a /* imm22 @ltoff(@tprel(S+A)) */
+#define R_IA_64_DTPMOD64MSB 0xa6 /* word64 MSB @dtpmod(S + A) */
+#define R_IA_64_DTPMOD64LSB 0xa7 /* word64 LSB @dtpmod(S + A) */
+#define R_IA_64_LTOFF_DTPMOD22 0xaa /* imm22 @ltoff(@dtpmod(S+A)) */
+#define R_IA_64_DTPREL14 0xb1 /* imm14 @dtprel(S + A) */
+#define R_IA_64_DTPREL22 0xb2 /* imm22 @dtprel(S + A) */
+#define R_IA_64_DTPREL64I 0xb3 /* imm64 @dtprel(S + A) */
+#define R_IA_64_DTPREL32MSB 0xb4 /* word32 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL32LSB 0xb5 /* word32 LSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64MSB 0xb6 /* word64 MSB @dtprel(S + A) */
+#define R_IA_64_DTPREL64LSB 0xb7 /* word64 LSB @dtprel(S + A) */
+#define R_IA_64_LTOFF_DTPREL22 0xba /* imm22 @ltoff(@dtprel(S+A)) */
+
+#define R_MIPS_NONE 0 /* No reloc */
+#define R_MIPS_16 1 /* Direct 16 bit */
+#define R_MIPS_32 2 /* Direct 32 bit */
+#define R_MIPS_REL32 3 /* PC relative 32 bit */
+#define R_MIPS_26 4 /* Direct 26 bit shifted */
+#define R_MIPS_HI16 5 /* High 16 bit */
+#define R_MIPS_LO16 6 /* Low 16 bit */
+#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
+#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
+#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
+#define R_MIPS_PC16 10 /* PC relative 16 bit */
+#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
+#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
+#define R_MIPS_64 18 /* Direct 64 bit */
+#define R_MIPS_GOTHI16 21 /* GOT HI 16 bit */
+#define R_MIPS_GOTLO16 22 /* GOT LO 16 bit */
+#define R_MIPS_CALLHI16 30 /* upper 16 bit GOT entry for function */
+#define R_MIPS_CALLLO16 31 /* lower 16 bit GOT entry for function */
+
+#define R_PPC_NONE 0 /* No relocation. */
+#define R_PPC_ADDR32 1
+#define R_PPC_ADDR24 2
+#define R_PPC_ADDR16 3
+#define R_PPC_ADDR16_LO 4
+#define R_PPC_ADDR16_HI 5
+#define R_PPC_ADDR16_HA 6
+#define R_PPC_ADDR14 7
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10
+#define R_PPC_REL14 11
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/*
+ * 64-bit relocations
+ */
+#define R_PPC64_ADDR64 38
+#define R_PPC64_ADDR16_HIGHER 39
+#define R_PPC64_ADDR16_HIGHERA 40
+#define R_PPC64_ADDR16_HIGHEST 41
+#define R_PPC64_ADDR16_HIGHESTA 42
+#define R_PPC64_UADDR64 43
+#define R_PPC64_REL64 44
+#define R_PPC64_PLT64 45
+#define R_PPC64_PLTREL64 46
+#define R_PPC64_TOC16 47
+#define R_PPC64_TOC16_LO 48
+#define R_PPC64_TOC16_HI 49
+#define R_PPC64_TOC16_HA 50
+#define R_PPC64_TOC 51
+#define R_PPC64_DTPMOD64 68
+#define R_PPC64_TPREL64 73
+#define R_PPC64_DTPREL64 78
+
+/*
+ * TLS relocations
+ */
+#define R_PPC_TLS 67
+#define R_PPC_DTPMOD32 68
+#define R_PPC_TPREL16 69
+#define R_PPC_TPREL16_LO 70
+#define R_PPC_TPREL16_HI 71
+#define R_PPC_TPREL16_HA 72
+#define R_PPC_TPREL32 73
+#define R_PPC_DTPREL16 74
+#define R_PPC_DTPREL16_LO 75
+#define R_PPC_DTPREL16_HI 76
+#define R_PPC_DTPREL16_HA 77
+#define R_PPC_DTPREL32 78
+#define R_PPC_GOT_TLSGD16 79
+#define R_PPC_GOT_TLSGD16_LO 80
+#define R_PPC_GOT_TLSGD16_HI 81
+#define R_PPC_GOT_TLSGD16_HA 82
+#define R_PPC_GOT_TLSLD16 83
+#define R_PPC_GOT_TLSLD16_LO 84
+#define R_PPC_GOT_TLSLD16_HI 85
+#define R_PPC_GOT_TLSLD16_HA 86
+#define R_PPC_GOT_TPREL16 87
+#define R_PPC_GOT_TPREL16_LO 88
+#define R_PPC_GOT_TPREL16_HI 89
+#define R_PPC_GOT_TPREL16_HA 90
+
+/*
+ * The remaining relocs are from the Embedded ELF ABI, and are not in the
+ * SVR4 ELF ABI.
+ */
+
+#define R_PPC_EMB_NADDR32 101
+#define R_PPC_EMB_NADDR16 102
+#define R_PPC_EMB_NADDR16_LO 103
+#define R_PPC_EMB_NADDR16_HI 104
+#define R_PPC_EMB_NADDR16_HA 105
+#define R_PPC_EMB_SDAI16 106
+#define R_PPC_EMB_SDA2I16 107
+#define R_PPC_EMB_SDA2REL 108
+#define R_PPC_EMB_SDA21 109
+#define R_PPC_EMB_MRKREF 110
+#define R_PPC_EMB_RELSEC16 111
+#define R_PPC_EMB_RELST_LO 112
+#define R_PPC_EMB_RELST_HI 113
+#define R_PPC_EMB_RELST_HA 114
+#define R_PPC_EMB_BIT_FLD 115
+#define R_PPC_EMB_RELSDA 116
+
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_PC_HH22 37
+#define R_SPARC_PC_HM10 38
+#define R_SPARC_PC_LM22 39
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_GLOB_JMP 42
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
+#define R_SPARC_DISP64 46
+#define R_SPARC_PLT64 47
+#define R_SPARC_HIX22 48
+#define R_SPARC_LOX10 49
+#define R_SPARC_H44 50
+#define R_SPARC_M44 51
+#define R_SPARC_L44 52
+#define R_SPARC_REGISTER 53
+#define R_SPARC_UA64 54
+#define R_SPARC_UA16 55
+#define R_SPARC_TLS_GD_HI22 56
+#define R_SPARC_TLS_GD_LO10 57
+#define R_SPARC_TLS_GD_ADD 58
+#define R_SPARC_TLS_GD_CALL 59
+#define R_SPARC_TLS_LDM_HI22 60
+#define R_SPARC_TLS_LDM_LO10 61
+#define R_SPARC_TLS_LDM_ADD 62
+#define R_SPARC_TLS_LDM_CALL 63
+#define R_SPARC_TLS_LDO_HIX22 64
+#define R_SPARC_TLS_LDO_LOX10 65
+#define R_SPARC_TLS_LDO_ADD 66
+#define R_SPARC_TLS_IE_HI22 67
+#define R_SPARC_TLS_IE_LO10 68
+#define R_SPARC_TLS_IE_LD 69
+#define R_SPARC_TLS_IE_LDX 70
+#define R_SPARC_TLS_IE_ADD 71
+#define R_SPARC_TLS_LE_HIX22 72
+#define R_SPARC_TLS_LE_LOX10 73
+#define R_SPARC_TLS_DTPMOD32 74
+#define R_SPARC_TLS_DTPMOD64 75
+#define R_SPARC_TLS_DTPOFF32 76
+#define R_SPARC_TLS_DTPOFF64 77
+#define R_SPARC_TLS_TPOFF32 78
+#define R_SPARC_TLS_TPOFF64 79
+
+#define R_X86_64_NONE 0 /* No relocation. */
+#define R_X86_64_64 1 /* Add 64 bit symbol value. */
+#define R_X86_64_PC32 2 /* PC-relative 32 bit signed sym value. */
+#define R_X86_64_GOT32 3 /* PC-relative 32 bit GOT offset. */
+#define R_X86_64_PLT32 4 /* PC-relative 32 bit PLT offset. */
+#define R_X86_64_COPY 5 /* Copy data from shared object. */
+#define R_X86_64_GLOB_DAT 6 /* Set GOT entry to data address. */
+#define R_X86_64_JMP_SLOT 7 /* Set GOT entry to code address. */
+#define R_X86_64_RELATIVE 8 /* Add load address of shared object. */
+#define R_X86_64_GOTPCREL 9 /* Add 32 bit signed pcrel offset to GOT. */
+#define R_X86_64_32 10 /* Add 32 bit zero extended symbol value */
+#define R_X86_64_32S 11 /* Add 32 bit sign extended symbol value */
+#define R_X86_64_16 12 /* Add 16 bit zero extended symbol value */
+#define R_X86_64_PC16 13 /* Add 16 bit signed extended pc relative symbol value */
+#define R_X86_64_8 14 /* Add 8 bit zero extended symbol value */
+#define R_X86_64_PC8 15 /* Add 8 bit signed extended pc relative symbol value */
+#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
+#define R_X86_64_DTPOFF64 17 /* Offset in TLS block */
+#define R_X86_64_TPOFF64 18 /* Offset in static TLS block */
+#define R_X86_64_TLSGD 19 /* PC relative offset to GD GOT entry */
+#define R_X86_64_TLSLD 20 /* PC relative offset to LD GOT entry */
+#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
+#define R_X86_64_GOTTPOFF 22 /* PC relative offset to IE GOT entry */
+#define R_X86_64_TPOFF32 23 /* Offset in static TLS block */
+#define R_X86_64_IRELATIVE 37
+
+
+#endif /* !_SYS_ELF_COMMON_H_ */
diff --git a/core/arch/arm/kernel/elf_load.c b/core/arch/arm/kernel/elf_load.c
new file mode 100644
index 0000000..420ba59
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <types_ext.h>
+#include <tee_api_types.h>
+#include <tee_api_defines.h>
+#include <kernel/tee_misc.h>
+#include <tee/tee_cryp_provider.h>
+#include <stdlib.h>
+#include <string.h>
+#include <util.h>
+#include <trace.h>
+#include "elf_load.h"
+#include "elf_common.h"
+#include "elf32.h"
+#include "elf64.h"
+
+struct elf_load_state {
+ bool is_32bit;
+
+ uint8_t *nwdata;
+ size_t nwdata_len;
+
+ void *hash_ctx;
+ uint32_t hash_algo;
+
+ size_t next_offs;
+
+ void *ta_head;
+ size_t ta_head_size;
+
+ void *ehdr;
+ void *phdr;
+
+ size_t vasize;
+ void *shdr;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Ehdr */
+struct elf_ehdr {
+ size_t e_phoff;
+ size_t e_shoff;
+ uint32_t e_phentsize;
+ uint32_t e_phnum;
+ uint32_t e_shentsize;
+ uint32_t e_shnum;
+};
+
+/* Replicates the fields we need from Elf{32,64}_Phdr */
+struct elf_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uintptr_t p_vaddr;
+ size_t p_filesz;
+ size_t p_memsz;
+ size_t p_offset;
+};
+
+#ifdef ARM64
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) \
+ do { \
+ if ((state)->is_32bit) { \
+ is_32bit_action; \
+ } else { \
+ is_64bit_action; \
+ } \
+ } while (0)
+#else
+/* No need to assert state->is_32bit since that is caught before this is used */
+#define DO_ACTION(state, is_32bit_action, is_64bit_action) is_32bit_action
+#endif
+
+#define COPY_EHDR(dst, src) \
+ do { \
+ (dst)->e_phoff = (src)->e_phoff; \
+ (dst)->e_shoff = (src)->e_shoff; \
+ (dst)->e_phentsize = (src)->e_phentsize; \
+ (dst)->e_phnum = (src)->e_phnum; \
+ (dst)->e_shentsize = (src)->e_shentsize; \
+ (dst)->e_shnum = (src)->e_shnum; \
+ } while (0)
+static void copy_ehdr(struct elf_ehdr *ehdr, struct elf_load_state *state)
+{
+ DO_ACTION(state, COPY_EHDR(ehdr, ((Elf32_Ehdr *)state->ehdr)),
+ COPY_EHDR(ehdr, ((Elf64_Ehdr *)state->ehdr)));
+}
+
+static uint32_t get_shdr_type(struct elf_load_state *state, size_t idx)
+{
+ DO_ACTION(state, return ((Elf32_Shdr *)state->shdr + idx)->sh_type,
+ return ((Elf64_Shdr *)state->shdr + idx)->sh_type);
+}
+
+#define COPY_PHDR(dst, src) \
+ do { \
+ (dst)->p_type = (src)->p_type; \
+ (dst)->p_vaddr = (src)->p_vaddr; \
+ (dst)->p_filesz = (src)->p_filesz; \
+ (dst)->p_memsz = (src)->p_memsz; \
+ (dst)->p_offset = (src)->p_offset; \
+ (dst)->p_flags = (src)->p_flags; \
+ } while (0)
+static void copy_phdr(struct elf_phdr *phdr, struct elf_load_state *state,
+ size_t idx)
+{
+ DO_ACTION(state, COPY_PHDR(phdr, ((Elf32_Phdr *)state->phdr + idx)),
+ COPY_PHDR(phdr, ((Elf64_Phdr *)state->phdr + idx)));
+}
+
+static TEE_Result advance_to(struct elf_load_state *state, size_t offs)
+{
+ TEE_Result res;
+
+ if (offs < state->next_offs)
+ return TEE_ERROR_BAD_STATE;
+ if (offs == state->next_offs)
+ return TEE_SUCCESS;
+
+ if (offs > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ state->nwdata + state->next_offs,
+ offs - state->next_offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs;
+ return res;
+}
+
+static TEE_Result copy_to(struct elf_load_state *state,
+ void *dst, size_t dst_size, size_t dst_offs,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+
+ res = advance_to(state, offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (!len)
+ return TEE_SUCCESS;
+
+ /* Check for integer overflow */
+ if ((len + dst_offs) < dst_offs || (len + dst_offs) > dst_size ||
+ (len + offs) < offs || (len + offs) > state->nwdata_len)
+ return TEE_ERROR_SECURITY;
+
+ memcpy((uint8_t *)dst + dst_offs, state->nwdata + offs, len);
+ res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo,
+ (uint8_t *)dst + dst_offs, len);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->next_offs = offs + len;
+ return res;
+}
+
+static TEE_Result alloc_and_copy_to(void **p, struct elf_load_state *state,
+ size_t offs, size_t len)
+{
+ TEE_Result res;
+ void *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ res = copy_to(state, buf, len, 0, offs, len);
+ if (res == TEE_SUCCESS)
+ *p = buf;
+ else
+ free(buf);
+ return res;
+}
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **ret_state)
+{
+ struct elf_load_state *state;
+
+ state = calloc(1, sizeof(*state));
+ if (!state)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->hash_ctx = hash_ctx;
+ state->hash_algo = hash_algo;
+ state->nwdata = nwdata;
+ state->nwdata_len = nwdata_len;
+ *ret_state = state;
+ return TEE_SUCCESS;
+}
+
+static TEE_Result e32_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *ehdr)
+{
+ if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
+ ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
+ ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
+ ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
+ (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
+#ifndef CFG_WITH_VFP
+ (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
+#endif
+ ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf32_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = malloc(sizeof(*ehdr));
+ if (!state->ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(state->ehdr, ehdr, sizeof(*ehdr));
+ state->is_32bit = true;
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_load_ehdr(struct elf_load_state *state, Elf32_Ehdr *eh32)
+{
+ TEE_Result res;
+ Elf64_Ehdr *ehdr = NULL;
+
+ if (eh32->e_ident[EI_VERSION] != EV_CURRENT ||
+ eh32->e_ident[EI_CLASS] != ELFCLASS64 ||
+ eh32->e_ident[EI_DATA] != ELFDATA2LSB ||
+ eh32->e_ident[EI_OSABI] != ELFOSABI_NONE ||
+ eh32->e_type != ET_DYN || eh32->e_machine != EM_AARCH64)
+ return TEE_ERROR_BAD_FORMAT;
+
+ ehdr = malloc(sizeof(*ehdr));
+ if (!ehdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ state->ehdr = ehdr;
+ memcpy(ehdr, eh32, sizeof(*eh32));
+ res = copy_to(state, ehdr, sizeof(*ehdr), sizeof(*eh32),
+ sizeof(*eh32), sizeof(*ehdr) - sizeof(*eh32));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
+ ehdr->e_shentsize != sizeof(Elf64_Shdr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ state->ehdr = ehdr;
+ state->is_32bit = false;
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_load_ehdr(struct elf_load_state *state __unused,
+ Elf32_Ehdr *eh32 __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+static TEE_Result load_head(struct elf_load_state *state, size_t head_size)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ struct elf_ehdr ehdr;
+ struct elf_phdr phdr;
+ struct elf_phdr phdr0;
+
+ copy_ehdr(&ehdr, state);
+ /*
+ * Program headers are supposed to be arranged as:
+ * PT_LOAD [0] : .ta_head ...
+ * ...
+ * PT_LOAD [n]
+ *
+ * .ta_head must be located first in the first program header,
+ * which also has to be of PT_LOAD type.
+ *
+ * A PT_DYNAMIC segment may appear, but is ignored. Any other
+ * segment except PT_LOAD and PT_DYNAMIC will cause an error. All
+ * sections not included by a PT_LOAD segment are ignored.
+ */
+ if (ehdr.e_phnum < 1)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check for integer overflow */
+ if (((uint64_t)ehdr.e_phnum * ehdr.e_phentsize) > SIZE_MAX)
+ return TEE_ERROR_SECURITY;
+
+ res = alloc_and_copy_to(&p, state, ehdr.e_phoff,
+ ehdr.e_phnum * ehdr.e_phentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->phdr = p;
+
+ /*
+ * Check that the first program header is a PT_LOAD (not strictly
+ * needed but our link script is supposed to arrange it that way)
+ * and that it starts at virtual address 0.
+ */
+ copy_phdr(&phdr0, state, 0);
+ if (phdr0.p_type != PT_LOAD || phdr0.p_vaddr != 0)
+ return TEE_ERROR_BAD_FORMAT;
+
+ /*
+ * Calculate amount of required virtual memory for TA. Find the max
+ * address used by a PT_LOAD type. Note that last PT_LOAD type
+ * dictates the total amount of needed memory. Eventual holes in
+ * the memory will also be allocated.
+ *
+ * Note that this loop will terminate at n = 0 if not earlier
+ * as we already know from above that state->phdr[0].p_type == PT_LOAD
+ */
+ n = ehdr.e_phnum;
+ do {
+ n--;
+ copy_phdr(&phdr, state, n);
+ } while (phdr.p_type != PT_LOAD);
+ state->vasize = phdr.p_vaddr + phdr.p_memsz;
+
+ /* Check for integer overflow */
+ if (state->vasize < phdr.p_vaddr)
+ return TEE_ERROR_SECURITY;
+
+ /*
+ * Read .ta_head from first segment, make sure the segment is large
+ * enough. We're only interested in seeing that the
+ * TA_FLAG_EXEC_DDR flag is set. If that's true we set that flag in
+ * the TA context to enable mapping the TA. Later when this
+ * function has returned and the hash has been verified the flags
+ * field will be updated with eventual other flags.
+ */
+ if (phdr0.p_filesz < head_size)
+ return TEE_ERROR_BAD_FORMAT;
+ res = alloc_and_copy_to(&p, state, phdr0.p_offset, head_size);
+ if (res == TEE_SUCCESS) {
+ state->ta_head = p;
+ state->ta_head_size = head_size;
+ }
+ return res;
+}
+
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit)
+{
+ TEE_Result res;
+ Elf32_Ehdr ehdr;
+
+ /*
+ * The ELF resides in shared memory, to avoid attacks based on
+ * modifying the ELF while we're parsing it here we only read each
+ * byte from the ELF once. We're also hashing the ELF while reading
+ * so we're limited to only read the ELF sequentially from start to
+ * end.
+ */
+
+ res = copy_to(state, &ehdr, sizeof(ehdr), 0, 0, sizeof(Elf32_Ehdr));
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (!IS_ELF(ehdr))
+ return TEE_ERROR_BAD_FORMAT;
+ res = e32_load_ehdr(state, &ehdr);
+ if (res == TEE_ERROR_BAD_FORMAT)
+ res = e64_load_ehdr(state, &ehdr);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = load_head(state, head_size);
+ if (res == TEE_SUCCESS) {
+ *head = state->ta_head;
+ *vasize = state->vasize;
+ *is_32bit = state->is_32bit;
+ }
+ return res;
+}
+
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags)
+{
+ struct elf_ehdr ehdr;
+
+ copy_ehdr(&ehdr, state);
+ while (*idx < ehdr.e_phnum) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, *idx);
+ (*idx)++;
+ if (phdr.p_type == PT_LOAD) {
+ if (vaddr)
+ *vaddr = phdr.p_vaddr;
+ if (size)
+ *size = phdr.p_memsz;
+ if (flags)
+ *flags = phdr.p_flags;
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+
+static TEE_Result e32_process_rel(struct elf_load_state *state, size_t rel_sidx,
+ vaddr_t vabase)
+{
+ Elf32_Ehdr *ehdr = state->ehdr;
+ Elf32_Shdr *shdr = state->shdr;
+ Elf32_Rel *rel;
+ Elf32_Rel *rel_end;
+ size_t sym_tab_idx;
+ Elf32_Sym *sym_tab = NULL;
+ size_t num_syms = 0;
+
+ if (shdr[rel_sidx].sh_type != SHT_REL)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab_idx = shdr[rel_sidx].sh_link;
+ if (sym_tab_idx) {
+ if (sym_tab_idx >= ehdr->e_shnum)
+ return TEE_ERROR_BAD_FORMAT;
+
+ if (shdr[sym_tab_idx].sh_entsize != sizeof(Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[sym_tab_idx].sh_addr > state->vasize ||
+ (shdr[sym_tab_idx].sh_addr +
+ shdr[sym_tab_idx].sh_size) > state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ sym_tab = (Elf32_Sym *)(vabase + shdr[sym_tab_idx].sh_addr);
+ if (!ALIGNMENT_IS_OK(sym_tab, Elf32_Sym))
+ return TEE_ERROR_BAD_FORMAT;
+
+ num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym);
+ }
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel = (Elf32_Rel *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rel, Elf32_Rel))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel);
+ for (; rel < rel_end; rel++) {
+ Elf32_Addr *where;
+ size_t sym_idx;
+
+ /* Check the address is inside TA memory */
+ if (rel->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf32_Addr *)(vabase + rel->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf32_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_ABS32:
+ sym_idx = ELF32_R_SYM(rel->r_info);
+ if (sym_idx >= num_syms)
+ return TEE_ERROR_BAD_FORMAT;
+
+ *where += vabase + sym_tab[sym_idx].st_value;
+ break;
+ case R_ARM_RELATIVE:
+ *where += vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %d",
+ ELF32_R_TYPE(rel->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+
+#ifdef ARM64
+static TEE_Result e64_process_rel(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase)
+{
+ Elf64_Shdr *shdr = state->shdr;
+ Elf64_Rela *rela;
+ Elf64_Rela *rela_end;
+
+ if (shdr[rel_sidx].sh_type != SHT_RELA)
+ return TEE_ERROR_NOT_IMPLEMENTED;
+
+ if (shdr[rel_sidx].sh_entsize != sizeof(Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if (shdr[rel_sidx].sh_addr >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela = (Elf64_Rela *)(vabase + shdr[rel_sidx].sh_addr);
+ if (!ALIGNMENT_IS_OK(rela, Elf64_Rela))
+ return TEE_ERROR_BAD_FORMAT;
+
+ /* Check the address is inside TA memory */
+ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+ rela_end = rela + shdr[rel_sidx].sh_size / sizeof(Elf64_Rela);
+ for (; rela < rela_end; rela++) {
+ Elf64_Addr *where;
+
+ /* Check the address is inside TA memory */
+ if (rela->r_offset >= state->vasize)
+ return TEE_ERROR_BAD_FORMAT;
+
+ where = (Elf64_Addr *)(vabase + rela->r_offset);
+ if (!ALIGNMENT_IS_OK(where, Elf64_Addr))
+ return TEE_ERROR_BAD_FORMAT;
+
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_AARCH64_RELATIVE:
+ *where = rela->r_addend + vabase;
+ break;
+ default:
+ EMSG("Unknown relocation type %zd",
+ ELF64_R_TYPE(rela->r_info));
+ return TEE_ERROR_BAD_FORMAT;
+ }
+ }
+ return TEE_SUCCESS;
+}
+#else /*ARM64*/
+static TEE_Result e64_process_rel(struct elf_load_state *state __unused,
+ size_t rel_sidx __unused, vaddr_t vabase __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*ARM64*/
+
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase)
+{
+ TEE_Result res;
+ size_t n;
+ void *p;
+ uint8_t *dst = (uint8_t *)vabase;
+ struct elf_ehdr ehdr;
+ size_t offs;
+
+ copy_ehdr(&ehdr, state);
+
+ /*
+ * Zero initialize everything to make sure that all memory not
+ * updated from the ELF is zero (covering .bss and eventual gaps).
+ */
+ memset(dst, 0, state->vasize);
+
+ /*
+ * Copy the segments
+ */
+ memcpy(dst, state->ta_head, state->ta_head_size);
+ offs = state->ta_head_size;
+ for (n = 0; n < ehdr.e_phnum; n++) {
+ struct elf_phdr phdr;
+
+ copy_phdr(&phdr, state, n);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ res = copy_to(state, dst, state->vasize,
+ phdr.p_vaddr + offs,
+ phdr.p_offset + offs,
+ phdr.p_filesz - offs);
+ if (res != TEE_SUCCESS)
+ return res;
+ offs = 0;
+ }
+
+ /*
+ * We have now loaded all segments into TA memory, now we need to
+ * process relocation information. To find relocation information
+ * we need to locate the section headers. The section headers are
+ * located somewhere between the last segment and the end of the
+ * ELF.
+ */
+ if (ehdr.e_shoff) {
+ /* We have section headers */
+ res = alloc_and_copy_to(&p, state, ehdr.e_shoff,
+ ehdr.e_shnum * ehdr.e_shentsize);
+ if (res != TEE_SUCCESS)
+ return res;
+ state->shdr = p;
+ }
+
+ /* Hash until end of ELF */
+ res = advance_to(state, state->nwdata_len);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (state->shdr) {
+ TEE_Result (*process_rel)(struct elf_load_state *state,
+ size_t rel_sidx, vaddr_t vabase);
+
+ if (state->is_32bit)
+ process_rel = e32_process_rel;
+ else
+ process_rel = e64_process_rel;
+
+ /* Process relocation */
+ for (n = 0; n < ehdr.e_shnum; n++) {
+ uint32_t sh_type = get_shdr_type(state, n);
+
+ if (sh_type == SHT_REL || sh_type == SHT_RELA) {
+ res = process_rel(state, n, vabase);
+ if (res != TEE_SUCCESS)
+ return res;
+ }
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+void elf_load_final(struct elf_load_state *state)
+{
+ if (state) {
+ free(state->ta_head);
+ free(state->ehdr);
+ free(state->phdr);
+ free(state->shdr);
+ free(state);
+ }
+}
diff --git a/core/arch/arm/kernel/elf_load.h b/core/arch/arm/kernel/elf_load.h
new file mode 100644
index 0000000..4944e3a
--- /dev/null
+++ b/core/arch/arm/kernel/elf_load.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ELF_LOAD_H
+#define ELF_LOAD_H
+
+#include <types_ext.h>
+#include <tee_api_types.h>
+
+struct elf_load_state;
+
+TEE_Result elf_load_init(void *hash_ctx, uint32_t hash_algo, uint8_t *nwdata,
+ size_t nwdata_len, struct elf_load_state **state);
+TEE_Result elf_load_head(struct elf_load_state *state, size_t head_size,
+ void **head, size_t *vasize, bool *is_32bit);
+TEE_Result elf_load_body(struct elf_load_state *state, vaddr_t vabase);
+TEE_Result elf_load_get_next_segment(struct elf_load_state *state, size_t *idx,
+ vaddr_t *vaddr, size_t *size, uint32_t *flags);
+void elf_load_final(struct elf_load_state *state);
+
+#endif /*ELF_LOAD_H*/
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
new file mode 100644
index 0000000..8f13c36
--- /dev/null
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <assert.h>
+#include <compiler.h>
+#include <inttypes.h>
+#include <keep.h>
+#include <kernel/generic_boot.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <kernel/misc.h>
+#include <kernel/asan.h>
+#include <malloc.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <sm/tee_mon.h>
+#include <trace.h>
+#include <tee/tee_cryp_provider.h>
+#include <utee_defines.h>
+#include <util.h>
+#include <stdio.h>
+
+#include <platform_config.h>
+
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+#include <sm/sm.h>
+#endif
+
+#if defined(CFG_WITH_VFP)
+#include <kernel/vfp.h>
+#endif
+
+#if defined(CFG_DT)
+#include <libfdt.h>
+#endif
+
+/*
+ * In this file we're using unsigned long to represent physical pointers as
+ * they are received in a single register when OP-TEE is initially entered.
+ * This limits 32-bit systems to only use make use of the lower 32 bits
+ * of a physical address for initial parameters.
+ *
+ * 64-bit systems on the other hand can use full 64-bit physical pointers.
+ */
+#define PADDR_INVALID ULONG_MAX
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+paddr_t ns_entry_addrs[CFG_TEE_CORE_NB_CORE] __early_bss;
+static uint32_t spin_table[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+#ifdef CFG_BOOT_SYNC_CPU
+/*
+ * Array used when booting, to synchronize cpu.
+ * When 0, the cpu has not started.
+ * When 1, it has started
+ */
+uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE] __early_bss;
+#endif
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_late(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_late);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void plat_cpu_reset_early(void)
+{
+}
+KEEP_PAGER(plat_cpu_reset_early);
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_init_gic(void)
+{
+}
+
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void main_secondary_init_gic(void)
+{
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+void init_sec_mon(unsigned long nsec_entry __maybe_unused)
+{
+ assert(nsec_entry == PADDR_INVALID);
+ /* Do nothing as we don't have a secure monitor */
+}
+#else
+/* May be overridden in plat-$(PLATFORM)/main.c */
+__weak void init_sec_mon(unsigned long nsec_entry)
+{
+ struct sm_nsec_ctx *nsec_ctx;
+
+ assert(nsec_entry != PADDR_INVALID);
+
+ /* Initialize secure monitor */
+ nsec_ctx = sm_get_nsec_ctx();
+ nsec_ctx->mon_lr = nsec_entry;
+ nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
+
+}
+#endif
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+static void init_vfp_nsec(void)
+{
+}
+#else
+static void init_vfp_nsec(void)
+{
+ /* Normal world can use CP10 and CP11 (SIMD/VFP) */
+ write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
+}
+#endif
+
+#if defined(CFG_WITH_VFP)
+
+#ifdef ARM32
+static void init_vfp_sec(void)
+{
+ uint32_t cpacr = read_cpacr();
+
+ /*
+ * Enable Advanced SIMD functionality.
+ * Enable use of D16-D31 of the Floating-point Extension register
+ * file.
+ */
+ cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
+ /*
+ * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
+ * mode.
+ */
+ cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
+ cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
+ write_cpacr(cpacr);
+}
+#endif /* ARM32 */
+
+#ifdef ARM64
+static void init_vfp_sec(void)
+{
+ /* Not using VFP until thread_kernel_enable_vfp() */
+ vfp_disable();
+}
+#endif /* ARM64 */
+
+#else /* CFG_WITH_VFP */
+
+static void init_vfp_sec(void)
+{
+ /* Not using VFP */
+}
+#endif
+
+#ifdef CFG_WITH_PAGER
+
+static size_t get_block_size(void)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned l;
+
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
+ panic("can't find mmu tables");
+
+ l = tbl_info.level - 1;
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
+ panic("can't find mmu table upper level");
+
+ return 1 << tbl_info.shift;
+}
+
+static void init_runtime(unsigned long pageable_part)
+{
+ size_t n;
+ size_t init_size = (size_t)__init_size;
+ size_t pageable_size = __pageable_end - __pageable_start;
+ size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
+ TEE_SHA256_HASH_SIZE;
+ tee_mm_entry_t *mm;
+ uint8_t *paged_store;
+ uint8_t *hashes;
+ size_t block_size;
+
+ assert(pageable_size % SMALL_PAGE_SIZE == 0);
+ assert(hash_size == (size_t)__tmp_hashes_size);
+
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ /*
+ * This needs to be initialized early to support address lookup
+ * in MEM_AREA_TEE_RAM
+ */
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
+ &tee_pager_tbl_info))
+ panic("can't find mmu tables");
+
+ if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
+ panic("Unsupported page size in translation table");
+
+ thread_init_boot_thread();
+
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+ malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
+
+ hashes = malloc(hash_size);
+ IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
+ assert(hashes);
+ memcpy(hashes, __tmp_hashes_start, hash_size);
+
+ /*
+ * Need tee_mm_sec_ddr initialized to be able to allocate secure
+ * DDR below.
+ */
+ teecore_init_ta_ram();
+
+ mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
+ assert(mm);
+ paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
+ /* Copy init part into pageable area */
+ memcpy(paged_store, __init_start, init_size);
+ /* Copy pageable part after init part into pageable area */
+ memcpy(paged_store + init_size,
+ phys_to_virt(pageable_part,
+ core_mmu_get_type_by_pa(pageable_part)),
+ __pageable_part_end - __pageable_part_start);
+
+ /* Check that hashes of what's in pageable area is OK */
+ DMSG("Checking hashes of pageable area");
+ for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
+ const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
+ const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
+ TEE_Result res;
+
+ DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
+ res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
+ if (res != TEE_SUCCESS) {
+ EMSG("Hash failed for page %zu at %p: res 0x%x",
+ n, page, res);
+ panic();
+ }
+ }
+
+ /*
+ * Copy what's not initialized in the last init page. Needed
+ * because we're not going fault in the init pages again. We can't
+ * fault in pages until we've switched to the new vector by calling
+ * thread_init_handlers() below.
+ */
+ if (init_size % SMALL_PAGE_SIZE) {
+ uint8_t *p;
+
+ memcpy(__init_start + init_size, paged_store + init_size,
+ SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
+
+ p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
+ ~SMALL_PAGE_MASK);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
+ SMALL_PAGE_SIZE);
+ }
+
+ /*
+ * Initialize the virtual memory pool used for main_mmu_l2_ttb which
+ * is supplied to tee_pager_init() below.
+ */
+ block_size = get_block_size();
+ if (!tee_mm_init(&tee_mm_vcore,
+ ROUNDDOWN(CFG_TEE_RAM_START, block_size),
+ ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
+ block_size),
+ SMALL_PAGE_SHIFT, 0))
+ panic("tee_mm_vcore init failed");
+
+ /*
+ * Assign alias area for pager end of the small page block the rest
+ * of the binary is loaded into. We're taking more than needed, but
+ * we're guaranteed to not need more than the physical amount of
+ * TZSRAM.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore,
+ (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
+ assert(mm);
+ tee_pager_init(mm);
+
+ /*
+ * Claim virtual memory which isn't paged, note that there migth be
+ * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
+ * claimed to avoid later allocations to get that memory.
+ * Linear memory (flat map core memory) ends there.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
+ (vaddr_t)(__pageable_start - tee_mm_vcore.lo));
+ assert(mm);
+
+ /*
+ * Allocate virtual memory for the pageable area and let the pager
+ * take charge of all the pages already assigned to that memory.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
+ pageable_size);
+ assert(mm);
+ if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ TEE_MATTR_PRX, paged_store, hashes))
+ panic("failed to add pageable to vcore");
+
+ tee_pager_add_pages((vaddr_t)__pageable_start,
+ ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
+ tee_pager_add_pages((vaddr_t)__pageable_start +
+ ROUNDUP(init_size, SMALL_PAGE_SIZE),
+ (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
+ SMALL_PAGE_SIZE, true);
+
+}
+#else
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+static void init_run_constructors(void)
+{
+ vaddr_t *ctor;
+
+ for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
+ ((void (*)(void))(*ctor))();
+}
+
+static void init_asan(void)
+{
+
+ /*
+ * CFG_ASAN_SHADOW_OFFSET is also supplied as
+ * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
+ * Since all the needed values to calculate the value of
+ * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
+ * calculate it in advance and hard code it into the platform
+ * conf.mk. Here where we have all the needed values we double
+ * check that the compiler is supplied the correct value.
+ */
+
+#define __ASAN_SHADOW_START \
+ ROUNDUP(CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
+ assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
+#define __CFG_ASAN_SHADOW_OFFSET \
+ (__ASAN_SHADOW_START - (CFG_TEE_RAM_START / 8))
+ COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
+#undef __ASAN_SHADOW_START
+#undef __CFG_ASAN_SHADOW_OFFSET
+
+ /*
+ * Assign area covered by the shadow area, everything from start up
+ * to the beginning of the shadow area.
+ */
+ asan_set_shadowed((void *)CFG_TEE_LOAD_ADDR, &__asan_shadow_start);
+
+ /*
+ * Add access to areas that aren't opened automatically by a
+ * constructor.
+ */
+ asan_tag_access(&__initcall_start, &__initcall_end);
+ asan_tag_access(&__ctor_list, &__ctor_end);
+ asan_tag_access(__rodata_start, __rodata_end);
+ asan_tag_access(__early_bss_start, __early_bss_end);
+ asan_tag_access(__nozi_start, __nozi_end);
+
+ init_run_constructors();
+
+ /* Everything is tagged correctly, let's start address sanitizing. */
+ asan_start();
+}
+#else /*CFG_CORE_SANITIZE_KADDRESS*/
+static void init_asan(void)
+{
+}
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+static void init_runtime(unsigned long pageable_part __unused)
+{
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ thread_init_boot_thread();
+
+ init_asan();
+ malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
+
+ /*
+ * Initialized at this stage in the pager version of this function
+ * above
+ */
+ teecore_init_ta_ram();
+}
+#endif
+
+#ifdef CFG_DT
+static int add_optee_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+
+ if (fdt_path_offset(fdt, "/firmware/optee") >= 0) {
+ IMSG("OP-TEE Device Tree node already exists!\n");
+ return 0;
+ }
+
+ offs = fdt_path_offset(fdt, "/firmware");
+ if (offs < 0) {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "firmware");
+ if (offs < 0)
+ return -1;
+ }
+
+ offs = fdt_add_subnode(fdt, offs, "optee");
+ if (offs < 0)
+ return -1;
+
+ ret = fdt_setprop_string(fdt, offs, "compatible", "linaro,optee-tz");
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_string(fdt, offs, "method", "smc");
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+static int get_dt_cell_size(void *fdt, int offs, const char *cell_name,
+ uint32_t *cell_size)
+{
+ int len;
+ const uint32_t *cell = fdt_getprop(fdt, offs, cell_name, &len);
+
+ if (len != sizeof(*cell))
+ return -1;
+ *cell_size = fdt32_to_cpu(*cell);
+ if (*cell_size != 1 && *cell_size != 2)
+ return -1;
+ return 0;
+}
+
+static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
+{
+ if (cell_size == 1) {
+ uint32_t v = cpu_to_fdt32((uint32_t)val);
+
+ memcpy(data, &v, sizeof(v));
+ } else {
+ uint64_t v = cpu_to_fdt64(val);
+
+ memcpy(data, &v, sizeof(v));
+ }
+}
+
+static int add_optee_res_mem_dt_node(void *fdt)
+{
+ int offs;
+ int ret;
+ uint32_t addr_size = 2;
+ uint32_t len_size = 2;
+ vaddr_t shm_va_start;
+ vaddr_t shm_va_end;
+ paddr_t shm_pa;
+ char subnode_name[80];
+
+ offs = fdt_path_offset(fdt, "/reserved-memory");
+ if (offs >= 0) {
+ ret = get_dt_cell_size(fdt, offs, "#address-cells", &addr_size);
+ if (ret < 0)
+ return -1;
+ ret = get_dt_cell_size(fdt, offs, "#size-cells", &len_size);
+ if (ret < 0)
+ return -1;
+ } else {
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "reserved-memory");
+ if (offs < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#address-cells", addr_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop_cell(fdt, offs, "#size-cells", len_size);
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "ranges", NULL, 0);
+ if (ret < 0)
+ return -1;
+ }
+
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
+ shm_pa = virt_to_phys((void *)shm_va_start);
+ snprintf(subnode_name, sizeof(subnode_name),
+ "optee@0x%" PRIxPA, shm_pa);
+ offs = fdt_add_subnode(fdt, offs, subnode_name);
+ if (offs >= 0) {
+ uint32_t data[addr_size + len_size] ;
+
+ set_dt_val(data, addr_size, shm_pa);
+ set_dt_val(data + addr_size, len_size,
+ shm_va_end - shm_va_start);
+ ret = fdt_setprop(fdt, offs, "reg", data, sizeof(data));
+ if (ret < 0)
+ return -1;
+ ret = fdt_setprop(fdt, offs, "no-map", NULL, 0);
+ if (ret < 0)
+ return -1;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+static void init_fdt(unsigned long phys_fdt)
+{
+ void *fdt;
+ int ret;
+
+ if (!phys_fdt) {
+ EMSG("Device Tree missing");
+ /*
+ * No need to panic as we're not using the DT in OP-TEE
+ * yet, we're only adding some nodes for normal world use.
+ * This makes the switch to using DT easier as we can boot
+ * a newer OP-TEE with older boot loaders. Once we start to
+ * initialize devices based on DT we'll likely panic
+ * instead of returning here.
+ */
+ return;
+ }
+
+ if (!core_mmu_add_mapping(MEM_AREA_IO_NSEC, phys_fdt, CFG_DTB_MAX_SIZE))
+ panic("failed to map fdt");
+
+ fdt = phys_to_virt(phys_fdt, MEM_AREA_IO_NSEC);
+ if (!fdt)
+ panic();
+
+ ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
+ if (ret < 0) {
+ EMSG("Invalid Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+
+ if (add_optee_dt_node(fdt))
+ panic("Failed to add OP-TEE Device Tree node");
+
+ if (add_optee_res_mem_dt_node(fdt))
+ panic("Failed to add OP-TEE reserved memory DT node");
+
+ ret = fdt_pack(fdt);
+ if (ret < 0) {
+ EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
+ phys_fdt, ret);
+ panic();
+ }
+}
+#else
+static void init_fdt(unsigned long phys_fdt __unused)
+{
+}
+#endif /*!CFG_DT*/
+
+static void init_primary_helper(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+ init_vfp_sec();
+
+ init_runtime(pageable_part);
+
+ IMSG("Initializing (%s)\n", core_v_str);
+
+ thread_init_primary(generic_boot_get_handlers());
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ init_fdt(fdt);
+ main_init_gic();
+ init_vfp_nsec();
+
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+ DMSG("Primary CPU switching to normal world boot\n");
+}
+
+static void init_secondary_helper(unsigned long nsec_entry)
+{
+ /*
+ * Mask asynchronous exceptions before switch to the thread vector
+ * as the thread handler requires those to be masked while
+ * executing with the temporary stack. The thread subsystem also
+ * asserts that IRQ is blocked when using most if its functions.
+ */
+ thread_set_exceptions(THREAD_EXCP_ALL);
+
+ thread_init_per_cpu();
+ init_sec_mon(nsec_entry);
+ main_secondary_init_gic();
+ init_vfp_sec();
+ init_vfp_nsec();
+
+ DMSG("Secondary CPU Switching to normal world boot\n");
+}
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+struct thread_vector_table *
+generic_boot_init_primary(unsigned long pageable_part, unsigned long u __unused,
+ unsigned long fdt)
+{
+ init_primary_helper(pageable_part, PADDR_INVALID, fdt);
+ return &thread_vector_table;
+}
+
+unsigned long generic_boot_cpu_on_handler(unsigned long a0 __maybe_unused,
+ unsigned long a1 __unused)
+{
+ DMSG("cpu %zu: a0 0x%lx", get_core_pos(), a0);
+ init_secondary_helper(PADDR_INVALID);
+ return 0;
+}
+#else
+void generic_boot_init_primary(unsigned long pageable_part,
+ unsigned long nsec_entry, unsigned long fdt)
+{
+ init_primary_helper(pageable_part, nsec_entry, fdt);
+}
+
+void generic_boot_init_secondary(unsigned long nsec_entry)
+{
+ init_secondary_helper(nsec_entry);
+}
+#endif
+
+#if defined(CFG_BOOT_SECONDARY_REQUEST)
+int generic_boot_core_release(size_t core_idx, paddr_t entry)
+{
+ if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
+ return -1;
+
+ ns_entry_addrs[core_idx] = entry;
+ dmb();
+ spin_table[core_idx] = 1;
+ dsb();
+ sev();
+
+ return 0;
+}
+
+/*
+ * spin until secondary boot request, then returns with
+ * the secondary core entry address.
+ */
+paddr_t generic_boot_core_hpen(void)
+{
+#ifdef CFG_PSCI_ARM32
+ return ns_entry_addrs[get_core_pos()];
+#else
+ do {
+ wfe();
+ } while (!spin_table[get_core_pos()]);
+ dmb();
+ return ns_entry_addrs[get_core_pos()];
+#endif
+}
+#endif
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
new file mode 100644
index 0000000..27717d5
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+#include <kernel/asan.h>
+
+.section .data
+.balign 4
+
+#ifdef CFG_BOOT_SYNC_CPU
+.equ SEM_CPU_READY, 1
+#endif
+
+#ifdef CFG_PL310
+.section .rodata.init
+panic_boot_file:
+ .asciz __FILE__
+
+/*
+ * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
+ */
+.section .text.init
+LOCAL_FUNC __assert_flat_mapped_range , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push { r4-r6, lr }
+ mov r4, r0
+ mov r5, r1
+ bl cpu_mmu_enabled
+ cmp r0, #0
+ beq 1f
+ mov r0, r4
+ bl virt_to_phys
+ cmp r0, r4
+ beq 1f
+ /*
+ * this must be compliant with the panic generic routine:
+ * __do_panic(__FILE__, __LINE__, __func__, str)
+ */
+ ldr r0, =panic_boot_file
+ mov r1, r5
+ mov r2, #0
+ mov r3, #0
+ bl __do_panic
+ b . /* should NOT return */
+1: pop { r4-r6, pc }
+UNWIND( .fnend)
+END_FUNC __assert_flat_mapped_range
+
+ /* panic if mmu is enable and vaddr != paddr (scratch lr) */
+ .macro assert_flat_mapped_range va, line
+ ldr r0, =(\va)
+ ldr r1, =\line
+ bl __assert_flat_mapped_range
+ .endm
+#endif /* CFG_PL310 */
+
+.section .text.boot
+FUNC _start , :
+ b reset
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+END_FUNC _start
+
+ .macro cpu_is_ready
+#ifdef CFG_BOOT_SYNC_CPU
+ bl get_core_pos
+ lsl r0, r0, #2
+ ldr r1,=sem_cpu_sync
+ ldr r2, =SEM_CPU_READY
+ str r2, [r1, r0]
+ dsb
+ sev
+#endif
+ .endm
+
+ .macro wait_primary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 1b
+#endif
+ .endm
+
+ .macro wait_secondary
+#ifdef CFG_BOOT_SYNC_CPU
+ ldr r0, =sem_cpu_sync
+ mov r3, #CFG_TEE_CORE_NB_CORE
+ mov r2, #SEM_CPU_READY
+ sev
+1:
+ subs r3, r3, #1
+ beq 3f
+ add r0, r0, #4
+2:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne 2b
+ b 1b
+3:
+#endif
+ .endm
+
+ /*
+ * Save boot arguments
+ * entry r0, saved r4: pagestore
+ * entry r1, saved r7: (ARMv7 standard bootarg #1)
+ * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
+ * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
+ */
+ .macro bootargs_entry
+#if defined(CFG_NS_ENTRY_ADDR)
+ ldr r5, =CFG_NS_ENTRY_ADDR
+#else
+ mov r5, lr
+#endif
+#if defined(CFG_PAGEABLE_ADDR)
+ ldr r4, =CFG_PAGEABLE_ADDR
+#else
+ mov r4, r0
+#endif
+#if defined(CFG_DT_ADDR)
+ ldr r6, =CFG_DT_ADDR
+#else
+ mov r6, r2
+#endif
+ mov r7, r1
+ .endm
+
+LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ bootargs_entry
+
+ /* Enable alignment checks and disable data and instruction cache. */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ bic r0, r0, #SCTLR_C
+ bic r0, r0, #SCTLR_I
+ write_sctlr r0
+ isb
+
+ /* Early ARM secure MP specific configuration */
+ bl plat_cpu_reset_early
+
+ ldr r0, =_start
+ write_vbar r0
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ b reset_primary
+#else
+ bl get_core_pos
+ cmp r0, #0
+ beq reset_primary
+ b reset_secondary
+#endif
+UNWIND( .fnend)
+END_FUNC reset
+
+ /*
+ * Setup sp to point to the top of the tmp stack for the current CPU:
+ * sp is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp r0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add r0, r0, #1
+ ldr r2, =stack_tmp_stride
+ ldr r1, [r2]
+ mul r2, r0, r1
+ ldr r1, =stack_tmp
+ add r1, r1, r2
+ ldr r2, =stack_tmp_offset
+ ldr r2, [r2]
+ sub sp, r1, r2
+ .endm
+
+ /*
+ * Cache maintenance during entry: handle outer cache.
+ * End address is exclusive: first byte not to be changed.
+ * Note however arm_clX_inv/cleanbyva operate on full cache lines.
+ *
+ * Use ANSI #define to trap source file line number for PL310 assertion
+ */
+ .macro __inval_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_invbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_invbyva
+ .endm
+
+ .macro __flush_cache_vrange vbase, vend, line
+#ifdef CFG_PL310
+ assert_flat_mapped_range (\vbase), (\line)
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleanbyva
+ bl pl310_base
+ ldr r1, =(\vbase)
+ ldr r2, =(\vend)
+ bl arm_cl2_cleaninvbypa
+#endif
+ ldr r0, =(\vbase)
+ ldr r1, =(\vend)
+ bl arm_cl1_d_cleaninvbyva
+ .endm
+
+#define inval_cache_vrange(vbase, vend) \
+ __inval_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#define flush_cache_vrange(vbase, vend) \
+ __flush_cache_vrange (vbase), ((vend) - 1), __LINE__
+
+#ifdef CFG_BOOT_SYNC_CPU
+#define flush_cpu_semaphores \
+ flush_cache_vrange(sem_cpu_sync, \
+ (sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
+#else
+#define flush_cpu_semaphores
+#endif
+
+LOCAL_FUNC reset_primary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ /* preserve r4-r7: bootargs */
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location and move hashes to a
+ * temporary safe location until the heap is initialized.
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before initializing pager
+ *
+ */
+ ldr r0, =__text_init_start /* dst */
+ ldr r1, =__data_end /* src */
+ ldr r2, =__tmp_hashes_end /* dst limit */
+ /* Copy backwards (as memmove) in case we're overlapping */
+ sub r2, r2, r0 /* len */
+ add r0, r0, r2
+ add r1, r1, r2
+ ldr r2, =__text_init_start
+copy_init:
+ ldmdb r1!, {r3, r8-r12, sp}
+ stmdb r0!, {r3, r8-r12, sp}
+ cmp r0, r2
+ bgt copy_init
+#endif
+
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ /* First initialize the entire shadow area with no access */
+ ldr r0, =__asan_shadow_start /* start */
+ ldr r1, =__asan_shadow_end /* limit */
+ mov r2, #ASAN_DATA_RED_ZONE
+shadow_no_access:
+ str r2, [r0], #4
+ cmp r0, r1
+ bls shadow_no_access
+
+ /* Mark the entire stack area as OK */
+ ldr r2, =CFG_ASAN_SHADOW_OFFSET
+ ldr r0, =__nozi_stack_start /* start */
+ lsr r0, r0, #ASAN_BLOCK_SHIFT
+ add r0, r0, r2
+ ldr r1, =__nozi_stack_end /* limit */
+ lsr r1, r1, #ASAN_BLOCK_SHIFT
+ add r1, r1, r2
+ mov r2, #0
+shadow_stack_access_ok:
+ strb r2, [r0], #1
+ cmp r0, r1
+ bls shadow_stack_access_ok
+#endif
+
+ set_sp
+
+ /* complete ARM secure MP common configuration */
+ bl plat_cpu_reset_late
+
+ /* Enable Console */
+ bl console_init
+
+#ifdef CFG_PL310
+ bl pl310_base
+ bl arm_cl2_config
+#endif
+
+ /*
+ * Invalidate dcache for all memory used during initialization to
+ * avoid nasty surprices when the cache is turned on. We must not
+ * invalidate memory not used by OP-TEE since we may invalidate
+ * entries used by for instance ARM Trusted Firmware.
+ */
+#ifdef CFG_WITH_PAGER
+ inval_cache_vrange(__text_start, __tmp_hashes_end)
+#else
+ inval_cache_vrange(__text_start, __end)
+#endif
+
+#ifdef CFG_PL310
+ /* Enable PL310 if not yet enabled */
+ bl pl310_base
+ bl arm_cl2_enable
+#endif
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4 /* pageable part address */
+ mov r1, r5 /* ns-entry address */
+ mov r2, r6 /* DT address */
+ bl generic_boot_init_primary
+ mov r4, r0 /* save entry test vector */
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+#ifdef CFG_WITH_PAGER
+ flush_cache_vrange(__text_start, __init_end)
+#else
+ flush_cache_vrange(__text_start, __end)
+#endif
+
+ /* release secondary boot cores and sync with them */
+ cpu_is_ready
+ flush_cpu_semaphores
+ wait_secondary
+
+#ifdef CFG_PL310_LOCKED
+ /* lock/invalidate all lines: pl310 behaves as if disable */
+ bl pl310_base
+ bl arm_cl2_lockallways
+ bl pl310_base
+ bl arm_cl2_cleaninvbyway
+#endif
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread() in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Pass the vector address returned from main_init */
+ mov r1, r4
+#else
+ /* realy standard bootarg #1 and #2 to non secure entry */
+ mov r4, #0
+ mov r3, r6 /* std bootarg #2 for register R2 */
+ mov r2, r7 /* std bootarg #1 for register R1 */
+ mov r1, #0
+#endif /* CFG_WITH_ARM_TRUSTED_FW */
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_primary
+
+
+LOCAL_FUNC unhandled_cpu , :
+UNWIND( .fnstart)
+ wfi
+ b unhandled_cpu
+UNWIND( .fnend)
+END_FUNC unhandled_cpu
+
+#if defined(CFG_WITH_ARM_TRUSTED_FW)
+FUNC cpu_on_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mov r4, r0
+ mov r5, r1
+ mov r6, lr
+ read_sctlr r0
+ orr r0, r0, #SCTLR_A
+ write_sctlr r0
+
+ ldr r0, =_start
+ write_vbar r0
+
+ mov r4, lr
+ set_sp
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov r0, r4
+ mov r1, r5
+ bl generic_boot_cpu_on_handler
+
+ bx r6
+UNWIND( .fnend)
+END_FUNC cpu_on_handler
+
+#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
+
+LOCAL_FUNC reset_secondary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ wait_primary
+
+ set_sp
+
+ bl plat_cpu_reset_late
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* if L1 is not invalidated before, do it here */
+ bl arm_cl1_d_invbysetway
+#endif
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ cpu_is_ready
+
+#if defined (CFG_BOOT_SECONDARY_REQUEST)
+ /* generic_boot_core_hpen return value (r0) is ns entry point */
+ bl generic_boot_core_hpen
+#else
+ mov r0, r5 /* ns-entry address */
+#endif
+ bl generic_boot_init_secondary
+
+ mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ mov r4, #0
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC reset_secondary
+#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
diff --git a/core/arch/arm/kernel/generic_entry_a64.S b/core/arch/arm/kernel/generic_entry_a64.S
new file mode 100644
index 0000000..5a5dd53
--- /dev/null
+++ b/core/arch/arm/kernel/generic_entry_a64.S
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <asm.S>
+#include <arm.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+
+ /*
+ * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
+ * SP_EL0 is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
+ * stack_tmp_offset
+ * SP_EL1 is assigned thread_core_local[cpu_id]
+ */
+ .macro set_sp
+ bl get_core_pos
+ cmp x0, #CFG_TEE_CORE_NB_CORE
+ /* Unsupported CPU, park it before it breaks something */
+ bge unhandled_cpu
+ add x0, x0, #1
+ adr x2, stack_tmp_stride
+ ldr w1, [x2]
+ mul x2, x0, x1
+ adrp x1, stack_tmp
+ add x1, x1, :lo12:stack_tmp
+ add x1, x1, x2
+ adr x2, stack_tmp_offset
+ ldr w2, [x2]
+ sub x1, x1, x2
+ msr spsel, #0
+ mov sp, x1
+ bl thread_get_core_local
+ msr spsel, #1
+ mov sp, x0
+ msr spsel, #0
+ .endm
+
+.section .text.boot
+FUNC _start , :
+ mov x19, x0 /* Save pagable part address */
+ mov x20, x2 /* Save DT address */
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before clearing bss
+ *
+ * When we copy init code and rodata into correct location we don't
+ * need to worry about hashes being overwritten as size of .bss,
+ * .heap, .nozi and .heap3 is much larger than the size of init
+ * code and rodata and hashes.
+ */
+ adr x0, __text_init_start /* dst */
+ adr x1, __data_end /* src */
+ adr x2, __rodata_init_end /* dst limit */
+copy_init:
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ cmp x0, x2
+ b.lt copy_init
+#endif
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Enable Console */
+ bl console_init
+
+ bl core_init_mmu_map
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19 /* pagable part address */
+ mov x1, #-1
+ mov x2, x20 /* DT address */
+ bl generic_boot_init_primary
+
+ /*
+ * In case we've touched memory that secondary CPUs will use before
+ * they have turned on their D-cache, clean and invalidate the
+ * D-cache before exiting to normal world.
+ */
+ mov x19, x0
+ adr x0, __text_start
+#ifdef CFG_WITH_PAGER
+ adrp x1, __init_end
+ add x1, x1, :lo12:__init_end
+#else
+ adrp x1, __end
+ add x1, x1, :lo12:__end
+#endif
+ sub x1, x1, x0
+ bl flush_dcache_range
+
+
+ /*
+ * Clear current thread id now to allow the thread to be reused on
+ * next entry. Matches the thread_init_boot_thread in
+ * generic_boot.c.
+ */
+ bl thread_clr_boot_thread
+
+ /* Pass the vector address returned from main_init */
+ mov x1, x19
+ mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC _start
+
+
+.section .text.cpu_on_handler
+FUNC cpu_on_handler , :
+ mov x19, x0
+ mov x20, x1
+ mov x21, x30
+
+ adr x0, reset_vect_table
+ msr vbar_el1, x0
+ isb
+
+ mrs x0, sctlr_el1
+ mov x1, #(SCTLR_I | SCTLR_A | SCTLR_SA)
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+ /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
+ set_sp
+
+ /* Enable aborts now that we can receive exceptions */
+ msr daifclr, #DAIFBIT_ABT
+
+ bl core_init_mmu_regs
+ bl cpu_mmu_enable
+ bl cpu_mmu_enable_icache
+ bl cpu_mmu_enable_dcache
+
+ mov x0, x19
+ mov x1, x20
+ mov x30, x21
+ b generic_boot_cpu_on_handler
+END_FUNC cpu_on_handler
+
+LOCAL_FUNC unhandled_cpu , :
+ wfi
+ b unhandled_cpu
+END_FUNC unhandled_cpu
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+ .align 11
+LOCAL_FUNC reset_vect_table , :
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+SynchronousExceptionSP0:
+ b SynchronousExceptionSP0
+ check_vector_size SynchronousExceptionSP0
+
+ .align 7
+IrqSP0:
+ b IrqSP0
+ check_vector_size IrqSP0
+
+ .align 7
+FiqSP0:
+ b FiqSP0
+ check_vector_size FiqSP0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA64:
+ b SynchronousExceptionA64
+ check_vector_size SynchronousExceptionA64
+
+ .align 7
+IrqA64:
+ b IrqA64
+ check_vector_size IrqA64
+
+ .align 7
+FiqA64:
+ b FiqA64
+ check_vector_size FiqA64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionA32:
+ b SynchronousExceptionA32
+ check_vector_size SynchronousExceptionA32
+
+ .align 7
+IrqA32:
+ b IrqA32
+ check_vector_size IrqA32
+
+ .align 7
+FiqA32:
+ b FiqA32
+ check_vector_size FiqA32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC reset_vect_table
diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S
new file mode 100644
index 0000000..10dac6e
--- /dev/null
+++ b/core/arch/arm/kernel/kern.ld.S
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2008-2010 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <platform_config.h>
+
+OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
+OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
+
+ENTRY(_start)
+SECTIONS
+{
+ . = CFG_TEE_LOAD_ADDR;
+
+ /* text/read-only data */
+ .text : {
+ __text_start = .;
+ KEEP(*(.text.boot.vectab1))
+ KEEP(*(.text.boot.vectab2))
+ KEEP(*(.text.boot))
+
+ . = ALIGN(8);
+ __initcall_start = .;
+ KEEP(*(.initcall1))
+ KEEP(*(.initcall2))
+ KEEP(*(.initcall3))
+ KEEP(*(.initcall4))
+ __initcall_end = .;
+
+#ifdef CFG_WITH_PAGER
+ *(.text)
+/* Include list of sections needed for paging */
+#include <text_unpaged.ld.S>
+#else
+ *(.text .text.*)
+#endif
+ *(.sram.text.glue_7* .gnu.linkonce.t.*)
+ . = ALIGN(8);
+ __text_end = .;
+ }
+
+ .rodata : ALIGN(8) {
+ __rodata_start = .;
+ *(.gnu.linkonce.r.*)
+#ifdef CFG_WITH_PAGER
+ *(.rodata .rodata.__unpaged)
+#include <rodata_unpaged.ld.S>
+#else
+ *(.rodata .rodata.*)
+
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+#endif
+ . = ALIGN(8);
+ __rodata_end = .;
+ }
+
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x9090
+ .plt : { *(.plt) }
+
+ /* .ARM.exidx is sorted, so has to go in its own output section. */
+ .ARM.exidx : {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ }
+
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
+ .data : ALIGN(8) {
+ /* writable data */
+ __data_start_rom = .;
+ /* in one segment binaries, the rom data address is on top
+ of the ram data address */
+ __early_bss_start = .;
+ *(.early_bss .early_bss.*)
+ . = ALIGN(8);
+ __early_bss_end = .;
+ __data_start = .;
+ *(.data .data.* .gnu.linkonce.d.*)
+ . = ALIGN(8);
+ }
+
+ .ctors : ALIGN(8) {
+ __ctor_list = .;
+ KEEP(*(.ctors .ctors.* .init_array .init_array.*))
+ __ctor_end = .;
+ }
+ .dtors : ALIGN(8) {
+ __dtor_list = .;
+ KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
+ __dtor_end = .;
+ }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+
+ __data_end = .;
+ /* unintialized data */
+ .bss : ALIGN(8) {
+ __bss_start = .;
+ *(.bss .bss.*)
+ *(.gnu.linkonce.b.*)
+ *(COMMON)
+ . = ALIGN(8);
+ __bss_end = .;
+ }
+
+ .heap1 (NOLOAD) : {
+ /*
+ * We're keeping track of the padding added before the
+ * .nozi section so we can do something useful with
+ * this otherwise wasted memory.
+ */
+ __heap1_start = .;
+#ifndef CFG_WITH_PAGER
+ . += CFG_CORE_HEAP_SIZE;
+#endif
+ . = ALIGN(16 * 1024);
+ __heap1_end = .;
+ }
+
+ /*
+ * Uninitialized data that shouldn't be zero initialized at
+ * runtime.
+ *
+ * L1 mmu table requires 16 KiB alignment
+ */
+ .nozi (NOLOAD) : ALIGN(16 * 1024) {
+ __nozi_start = .;
+ KEEP(*(.nozi .nozi.*))
+ . = ALIGN(16);
+ __nozi_end = .;
+ __nozi_stack_start = .;
+ KEEP(*(.nozi_stack))
+ . = ALIGN(8);
+ __nozi_stack_end = .;
+ }
+
+#ifdef CFG_WITH_PAGER
+ .heap2 (NOLOAD) : {
+ __heap2_start = .;
+ /*
+ * Reserve additional memory for heap, the total should be
+ * at least CFG_CORE_HEAP_SIZE, but count what has already
+ * been reserved in .heap1
+ */
+ . += CFG_CORE_HEAP_SIZE - (__heap1_end - __heap1_start);
+ . = ALIGN(4 * 1024);
+ __heap2_end = .;
+ }
+
+ .text_init : ALIGN(4 * 1024) {
+ __text_init_start = .;
+/*
+ * Include list of sections needed for boot initialization, this list
+ * overlaps with unpaged.ld.S but since unpaged.ld.S is first all those
+ * sections will go into the unpaged area.
+ */
+#include <text_init.ld.S>
+ . = ALIGN(8);
+ __text_init_end = .;
+ }
+
+ .rodata_init : ALIGN(8) {
+ __rodata_init_start = .;
+#include <rodata_init.ld.S>
+ . = ALIGN(8);
+ __start_phys_mem_map_section = . ;
+ KEEP(*(phys_mem_map_section))
+ __end_phys_mem_map_section = . ;
+ . = ALIGN(8);
+ __rodata_init_end = .;
+ }
+ __init_start = __text_init_start;
+ __init_end = .;
+ __init_size = __init_end - __text_init_start;
+
+ .text_pageable : ALIGN(8) {
+ __text_pageable_start = .;
+ *(.text*)
+ . = ALIGN(8);
+ __text_pageable_end = .;
+ }
+
+ .rodata_pageable : ALIGN(8) {
+ __rodata_pageable_start = .;
+ *(.rodata*)
+ /*
+ * 8 to avoid unwanted padding between __start_ta_head_section
+ * and the first structure in ta_head_section, in 64-bit
+ * builds
+ */
+ . = ALIGN(8);
+ __start_ta_head_section = . ;
+ KEEP(*(ta_head_section))
+ __stop_ta_head_section = . ;
+ . = ALIGN(4 * 1024);
+ __rodata_pageable_end = .;
+ }
+
+ __pageable_part_start = __rodata_init_end;
+ __pageable_part_end = __rodata_pageable_end;
+ __pageable_start = __text_init_start;
+ __pageable_end = __pageable_part_end;
+
+ /*
+ * Assign a safe spot to store the hashes of the pages before
+ * heap is initialized.
+ */
+ __tmp_hashes_start = __rodata_init_end;
+ __tmp_hashes_size = ((__pageable_end - __pageable_start) /
+ (4 * 1024)) * 32;
+ __tmp_hashes_end = __tmp_hashes_start + __tmp_hashes_size;
+
+ __init_mem_usage = __tmp_hashes_end - CFG_TEE_LOAD_ADDR;
+
+ ASSERT(CFG_TEE_LOAD_ADDR >= CFG_TEE_RAM_START,
+ "Load address before start of physical memory")
+ ASSERT(CFG_TEE_LOAD_ADDR < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "Load address after end of physical memory")
+ ASSERT(__tmp_hashes_end < (CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE),
+ "OP-TEE can't fit init part into available physical memory")
+ ASSERT((CFG_TEE_RAM_START + CFG_TEE_RAM_PH_SIZE - __init_end) >
+ 1 * 4096, "Too few free pages to initialize paging")
+
+
+#endif /*CFG_WITH_PAGER*/
+
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+ . = CFG_TEE_RAM_START + (CFG_TEE_RAM_VA_SIZE * 8) / 9 - 8;
+ . = ALIGN(8);
+ .asan_shadow : {
+ __asan_shadow_start = .;
+ . += CFG_TEE_RAM_VA_SIZE / 9;
+ __asan_shadow_end = .;
+ }
+#endif /*CFG_CORE_SANITIZE_KADDRESS*/
+
+ __end = .;
+
+#ifndef CFG_WITH_PAGER
+ __init_size = __data_end - CFG_TEE_LOAD_ADDR;
+ __init_mem_usage = __end - CFG_TEE_LOAD_ADDR;
+#endif
+ . = CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE;
+ _end_of_ram = .;
+
+ /DISCARD/ : {
+ /* Strip unnecessary stuff */
+ *(.comment .note .eh_frame)
+ /* Strip meta variables */
+ *(__keep_meta_vars*)
+ }
+
+}
diff --git a/core/arch/arm/kernel/link.mk b/core/arch/arm/kernel/link.mk
new file mode 100644
index 0000000..4a7bd8e
--- /dev/null
+++ b/core/arch/arm/kernel/link.mk
@@ -0,0 +1,241 @@
+link-out-dir = $(out-dir)/core
+
+link-script = $(platform-dir)/kern.ld.S
+link-script-pp = $(link-out-dir)/kern.ld
+link-script-dep = $(link-out-dir)/.kern.ld.d
+
+AWK = awk
+
+
+link-ldflags = $(LDFLAGS)
+link-ldflags += -T $(link-script-pp) -Map=$(link-out-dir)/tee.map
+link-ldflags += --sort-section=alignment
+link-ldflags += --fatal-warnings
+link-ldflags += --gc-sections
+
+link-ldadd = $(LDADD)
+link-ldadd += $(addprefix -L,$(libdirs))
+link-ldadd += $(addprefix -l,$(libnames))
+ldargs-tee.elf := $(link-ldflags) $(objs) $(link-out-dir)/version.o \
+ $(link-ldadd) $(libgcccore)
+
+link-script-cppflags := -DASM=1 \
+ $(filter-out $(CPPFLAGS_REMOVE) $(cppflags-remove), \
+ $(nostdinccore) $(CPPFLAGS) \
+ $(addprefix -I,$(incdirscore) $(link-out-dir)) \
+ $(cppflagscore))
+
+entries-unpaged += thread_init_vbar
+entries-unpaged += sm_init
+entries-unpaged += core_init_mmu_regs
+entries-unpaged += sem_cpu_sync
+entries-unpaged += generic_boot_get_handlers
+
+ldargs-all_objs := -i $(objs) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/all_objs.o
+$(link-out-dir)/all_objs.o: $(objs) $(libdeps) $(MAKEFILE_LIST)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-all_objs) -o $@
+
+cleanfiles += $(link-out-dir)/unpaged_entries.txt
+$(link-out-dir)/unpaged_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_pager/ { printf "-u%s ", $$3 }' > $@
+
+objs-unpaged-rem += core/arch/arm/tee/entry_std.o
+objs-unpaged-rem += core/arch/arm/tee/arch_svc.o
+objs-unpaged := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-unpaged-rem)), $(objs))
+ldargs-unpaged = -i --gc-sections $(addprefix -u, $(entries-unpaged))
+ldargs-unpaged-objs := $(objs-unpaged) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/unpaged.o
+$(link-out-dir)/unpaged.o: $(link-out-dir)/unpaged_entries.txt
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-unpaged) \
+ `cat $(link-out-dir)/unpaged_entries.txt` \
+ $(ldargs-unpaged-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_unpaged.ld.S
+$(link-out-dir)/text_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_unpaged.ld.S
+$(link-out-dir)/rodata_unpaged.ld.S: $(link-out-dir)/unpaged.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+
+cleanfiles += $(link-out-dir)/init_entries.txt
+$(link-out-dir)/init_entries.txt: $(link-out-dir)/all_objs.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) $< | \
+ $(AWK) '/ ____keep_init/ { printf "-u%s", $$3 }' > $@
+
+objs-init-rem += core/arch/arm/tee/arch_svc.o
+objs-init-rem += core/arch/arm/tee/arch_svc_asm.o
+objs-init-rem += core/arch/arm/tee/init.o
+objs-init-rem += core/arch/arm/tee/entry_std.o
+entries-init += _start
+objs-init := \
+ $(filter-out $(addprefix $(out-dir)/, $(objs-init-rem)), $(objs) \
+ $(link-out-dir)/version.o)
+ldargs-init := -i --gc-sections $(addprefix -u, $(entries-init))
+
+ldargs-init-objs := $(objs-init) $(link-ldadd) $(libgcccore)
+cleanfiles += $(link-out-dir)/init.o
+$(link-out-dir)/init.o: $(link-out-dir)/init_entries.txt
+ $(call gen-version-o)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-init) \
+ `cat $(link-out-dir)/init_entries.txt` \
+ $(ldargs-init-objs) -o $@
+
+cleanfiles += $(link-out-dir)/text_init.ld.S
+$(link-out-dir)/text_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/gen_ld_text_sects.awk > $@
+
+cleanfiles += $(link-out-dir)/rodata_init.ld.S
+$(link-out-dir)/rodata_init.ld.S: $(link-out-dir)/init.o
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | \
+ ${AWK} -f ./scripts/gen_ld_rodata_sects.awk > $@
+
+-include $(link-script-dep)
+
+link-script-extra-deps += $(link-out-dir)/text_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_unpaged.ld.S
+link-script-extra-deps += $(link-out-dir)/text_init.ld.S
+link-script-extra-deps += $(link-out-dir)/rodata_init.ld.S
+link-script-extra-deps += $(conf-file)
+cleanfiles += $(link-script-pp) $(link-script-dep)
+$(link-script-pp): $(link-script) $(link-script-extra-deps)
+ @$(cmd-echo-silent) ' CPP $@'
+ @mkdir -p $(dir $@)
+ $(q)$(CPPcore) -Wp,-P,-MT,$@,-MD,$(link-script-dep) \
+ $(link-script-cppflags) $< > $@
+
+define update-buildcount
+ @$(cmd-echo-silent) ' UPD $(1)'
+ $(q)if [ ! -f $(1) ]; then \
+ mkdir -p $(dir $(1)); \
+ echo 1 >$(1); \
+ else \
+ expr 0`cat $(1)` + 1 >$(1); \
+ fi
+endef
+
+version-o-cflags = $(filter-out -g3,$(core-platform-cflags) \
+ $(platform-cflags)) # Workaround objdump warning
+DATE_STR = `date -u`
+BUILD_COUNT_STR = `cat $(link-out-dir)/.buildcount`
+define gen-version-o
+ $(call update-buildcount,$(link-out-dir)/.buildcount)
+ @$(cmd-echo-silent) ' GEN $(link-out-dir)/version.o'
+ $(q)echo -e "const char core_v_str[] =" \
+ "\"$(TEE_IMPL_VERSION) \"" \
+ "\"#$(BUILD_COUNT_STR) \"" \
+ "\"$(DATE_STR) \"" \
+ "\"$(CFG_KERN_LINKER_ARCH)\";\n" \
+ | $(CCcore) $(version-o-cflags) \
+ -xc - -c -o $(link-out-dir)/version.o
+endef
+$(link-out-dir)/version.o:
+ $(call gen-version-o)
+
+all: $(link-out-dir)/tee.elf
+cleanfiles += $(link-out-dir)/tee.elf $(link-out-dir)/tee.map
+cleanfiles += $(link-out-dir)/version.o
+cleanfiles += $(link-out-dir)/.buildcount
+$(link-out-dir)/tee.elf: $(objs) $(libdeps) $(link-script-pp)
+ @$(cmd-echo-silent) ' LD $@'
+ $(q)$(LDcore) $(ldargs-tee.elf) -o $@
+
+all: $(link-out-dir)/tee.dmp
+cleanfiles += $(link-out-dir)/tee.dmp
+$(link-out-dir)/tee.dmp: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJDUMP $@'
+ $(q)$(OBJDUMPcore) -l -x -d $< > $@
+
+pageable_sections := .*_pageable
+init_sections := .*_init
+cleanfiles += $(link-out-dir)/tee-pager.bin
+$(link-out-dir)/tee-pager.bin: $(link-out-dir)/tee.elf \
+ $(link-out-dir)/tee-data_end.txt
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --remove-section="$(pageable_sections)" \
+ --remove-section="$(init_sections)" \
+ --pad-to `cat $(link-out-dir)/tee-data_end.txt` \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-pageable.bin
+$(link-out-dir)/tee-pageable.bin: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' OBJCOPY $@'
+ $(q)$(OBJCOPYcore) -O binary \
+ --only-section="$(init_sections)" \
+ --only-section="$(pageable_sections)" \
+ $< $@
+
+cleanfiles += $(link-out-dir)/tee-data_end.txt
+$(link-out-dir)/tee-data_end.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __data_end | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_size.txt
+$(link-out-dir)/tee-init_size.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep __init_size | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_load_addr.txt
+$(link-out-dir)/tee-init_load_addr.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' _start' | sed 's/ .*$$//' >> $@
+
+cleanfiles += $(link-out-dir)/tee-init_mem_usage.txt
+$(link-out-dir)/tee-init_mem_usage.txt: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ @echo -n 0x > $@
+ $(q)$(NMcore) $< | grep ' __init_mem_usage' | sed 's/ .*$$//' >> $@
+
+all: $(link-out-dir)/tee.bin
+cleanfiles += $(link-out-dir)/tee.bin
+$(link-out-dir)/tee.bin: $(link-out-dir)/tee-pager.bin \
+ $(link-out-dir)/tee-pageable.bin \
+ $(link-out-dir)/tee-init_size.txt \
+ $(link-out-dir)/tee-init_load_addr.txt \
+ $(link-out-dir)/tee-init_mem_usage.txt \
+ ./scripts/gen_hashed_bin.py
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)load_addr=`cat $(link-out-dir)/tee-init_load_addr.txt` && \
+ ./scripts/gen_hashed_bin.py \
+ --arch $(if $(filter y,$(CFG_ARM64_core)),arm64,arm32) \
+ --init_size `cat $(link-out-dir)/tee-init_size.txt` \
+ --init_load_addr_hi $$(($$load_addr >> 32 & 0xffffffff)) \
+ --init_load_addr_lo $$(($$load_addr & 0xffffffff)) \
+ --init_mem_usage `cat $(link-out-dir)/tee-init_mem_usage.txt` \
+ --tee_pager_bin $(link-out-dir)/tee-pager.bin \
+ --tee_pageable_bin $(link-out-dir)/tee-pageable.bin \
+ --out $@
+
+
+all: $(link-out-dir)/tee.symb_sizes
+cleanfiles += $(link-out-dir)/tee.symb_sizes
+$(link-out-dir)/tee.symb_sizes: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(NMcore) --print-size --reverse-sort --size-sort $< > $@
+
+cleanfiles += $(link-out-dir)/tee.mem_usage
+ifneq ($(filter mem_usage,$(MAKECMDGOALS)),)
+mem_usage: $(link-out-dir)/tee.mem_usage
+
+$(link-out-dir)/tee.mem_usage: $(link-out-dir)/tee.elf
+ @$(cmd-echo-silent) ' GEN $@'
+ $(q)$(READELFcore) -a -W $< | ${AWK} -f ./scripts/mem_usage.awk > $@
+endif
diff --git a/core/arch/arm/kernel/misc_a32.S b/core/arch/arm/kernel/misc_a32.S
new file mode 100644
index 0000000..48fd8ba
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a32.S
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <kernel/unwind.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+UNWIND( .fnstart)
+ read_mpidr r0
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ add r0, r1, r0, LSR #6
+ bx lr
+UNWIND( .fnend)
+END_FUNC get_core_pos
+
+/*
+ * uint32_t temp_set_mode(int cpu_mode)
+ * returns cpsr to be set
+ */
+LOCAL_FUNC temp_set_mode , :
+UNWIND( .fnstart)
+ mov r1, r0
+ cmp r1, #CPSR_MODE_USR /* update mode: usr -> sys */
+ moveq r1, #CPSR_MODE_SYS
+ cpsid aif /* disable interrupts */
+ mrs r0, cpsr /* get cpsr with disabled its*/
+ bic r0, #CPSR_MODE_MASK /* clear mode */
+ orr r0, r1 /* set expected mode */
+ bx lr
+UNWIND( .fnend)
+END_FUNC temp_set_mode
+
+/* uint32_t read_mode_sp(int cpu_mode) */
+FUNC read_mode_sp , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, sp /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_sp
+
+/* uint32_t read_mode_lr(int cpu_mode) */
+FUNC read_mode_lr , :
+UNWIND( .fnstart)
+ push {r4, lr}
+UNWIND( .save {r4, lr})
+ mrs r4, cpsr /* save cpsr */
+ bl temp_set_mode
+ msr cpsr, r0 /* set the new mode */
+ mov r0, lr /* get the function result */
+ msr cpsr, r4 /* back to the old mode */
+ pop {r4, pc}
+UNWIND( .fnend)
+END_FUNC read_mode_lr
diff --git a/core/arch/arm/kernel/misc_a64.S b/core/arch/arm/kernel/misc_a64.S
new file mode 100644
index 0000000..2b4da4a
--- /dev/null
+++ b/core/arch/arm/kernel/misc_a64.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+
+/* Let platforms override this if needed */
+.weak get_core_pos
+
+FUNC get_core_pos , :
+ mrs x0, mpidr_el1
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+END_FUNC get_core_pos
diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c
new file mode 100644
index 0000000..0e1b836
--- /dev/null
+++ b/core/arch/arm/kernel/mutex.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+void mutex_init(struct mutex *m)
+{
+ *m = (struct mutex)MUTEX_INITIALIZER;
+}
+
+static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
+{
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ while (true) {
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+ struct wait_queue_elem wqe;
+
+ /*
+ * If the mutex is locked we need to initialize the wqe
+ * before releasing the spinlock to guarantee that we don't
+ * miss the wakeup from mutex_unlock().
+ *
+ * If the mutex is unlocked we don't need to use the wqe at
+ * all.
+ */
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ wq_wait_init(&m->wq, &wqe);
+ } else {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (old_value == MUTEX_VALUE_LOCKED) {
+ /*
+ * Someone else is holding the lock, wait in normal
+ * world for the lock to become available.
+ */
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+ } else
+ return;
+ }
+}
+
+static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ wq_wake_one(&m->wq, m, fname, lineno);
+}
+
+static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
+ int lineno __unused)
+{
+ uint32_t old_itr_status;
+ enum mutex_value old_value;
+
+ assert_have_no_spinlock();
+ assert(thread_get_id_may_fail() != -1);
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&m->spin_lock);
+
+ old_value = m->value;
+ if (old_value == MUTEX_VALUE_UNLOCKED) {
+ m->value = MUTEX_VALUE_LOCKED;
+ thread_add_mutex(m);
+ }
+
+ cpu_spin_unlock(&m->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ return old_value == MUTEX_VALUE_UNLOCKED;
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_unlock(m, fname, lineno);
+}
+
+void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ __mutex_lock(m, fname, lineno);
+}
+
+bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
+{
+ return __mutex_trylock(m, fname, lineno);
+}
+#else
+void mutex_unlock(struct mutex *m)
+{
+ __mutex_unlock(m, NULL, -1);
+}
+
+void mutex_lock(struct mutex *m)
+{
+ __mutex_lock(m, NULL, -1);
+}
+
+bool mutex_trylock(struct mutex *m)
+{
+ return __mutex_trylock(m, NULL, -1);
+}
+#endif
+
+
+
+void mutex_destroy(struct mutex *m)
+{
+ /*
+ * Caller guarantees that no one will try to take the mutex so
+ * there's no need to take the spinlock before accessing it.
+ */
+ if (m->value != MUTEX_VALUE_UNLOCKED)
+ panic();
+ if (!wq_is_empty(&m->wq))
+ panic("waitqueue not empty");
+}
+
+void condvar_init(struct condvar *cv)
+{
+ *cv = (struct condvar)CONDVAR_INITIALIZER;
+}
+
+void condvar_destroy(struct condvar *cv)
+{
+ if (cv->m && wq_have_condvar(&cv->m->wq, cv))
+ panic();
+
+ condvar_init(cv);
+}
+
+static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
+ int lineno)
+{
+ uint32_t old_itr_status;
+ struct mutex *m;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+ cpu_spin_lock(&cv->spin_lock);
+ m = cv->m;
+ cpu_spin_unlock(&cv->spin_lock);
+ thread_unmask_exceptions(old_itr_status);
+
+ if (m)
+ wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
+
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, true /* only one */, fname, lineno);
+}
+
+void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
+{
+ cv_signal(cv, false /* all */, fname, lineno);
+}
+
+#else
+void condvar_signal(struct condvar *cv)
+{
+ cv_signal(cv, true /* only one */, NULL, -1);
+}
+
+void condvar_broadcast(struct condvar *cv)
+{
+ cv_signal(cv, false /* all */, NULL, -1);
+}
+#endif /*CFG_MUTEX_DEBUG*/
+
+static void __condvar_wait(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ uint32_t old_itr_status;
+ struct wait_queue_elem wqe;
+
+ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
+
+ /* Link this condvar to this mutex until reinitialized */
+ cpu_spin_lock(&cv->spin_lock);
+ if (cv->m && cv->m != m)
+ panic("invalid mutex");
+
+ cv->m = m;
+ cpu_spin_unlock(&cv->spin_lock);
+
+ cpu_spin_lock(&m->spin_lock);
+
+ /* Add to mutex wait queue as a condvar waiter */
+ wq_wait_init_condvar(&m->wq, &wqe, cv);
+
+ /* Unlock the mutex */
+ if (m->value != MUTEX_VALUE_LOCKED)
+ panic();
+
+ thread_rem_mutex(m);
+ m->value = MUTEX_VALUE_UNLOCKED;
+
+ cpu_spin_unlock(&m->spin_lock);
+
+ thread_unmask_exceptions(old_itr_status);
+
+ /* Wake eventual waiters */
+ wq_wake_one(&m->wq, m, fname, lineno);
+
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
+
+ mutex_lock(m);
+}
+
+#ifdef CFG_MUTEX_DEBUG
+void condvar_wait_debug(struct condvar *cv, struct mutex *m,
+ const char *fname, int lineno)
+{
+ __condvar_wait(cv, m, fname, lineno);
+}
+#else
+void condvar_wait(struct condvar *cv, struct mutex *m)
+{
+ __condvar_wait(cv, m, NULL, -1);
+}
+#endif
diff --git a/core/arch/arm/kernel/pm_stubs.c b/core/arch/arm/kernel/pm_stubs.c
new file mode 100644
index 0000000..db77e7c
--- /dev/null
+++ b/core/arch/arm/kernel/pm_stubs.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <compiler.h>
+#include <kernel/panic.h>
+#include <kernel/pm_stubs.h>
+
+unsigned long pm_panic(unsigned long a0 __unused, unsigned long a1 __unused)
+{
+ panic();
+}
+
+unsigned long pm_do_nothing(unsigned long a0 __unused,
+ unsigned long a1 __unused)
+{
+ return 0;
+}
diff --git a/core/arch/arm/kernel/proc_a32.S b/core/arch/arm/kernel/proc_a32.S
new file mode 100644
index 0000000..f0446a6
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a32.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <keep.h>
+#include <kernel/unwind.h>
+
+/*
+ * void cpu_mmu_enable(void) - enable MMU
+ *
+ * TLBs are invalidated before MMU is enabled.
+ * An DSB and ISB insures MMUs is enabled before routine returns
+ */
+FUNC cpu_mmu_enable , :
+UNWIND( .fnstart)
+ /* Invalidate TLB */
+ write_tlbiall
+
+ /* Enable the MMU */
+ read_sctlr r0
+ orr r0, r0, #SCTLR_M
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable
+KEEP_PAGER cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) - enable instruction cache */
+FUNC cpu_mmu_enable_icache , :
+UNWIND( .fnstart)
+ /* Invalidate instruction cache and branch predictor */
+ write_iciallu
+ write_bpiall
+
+ /* Enable the instruction cache */
+ read_sctlr r1
+ orr r1, r1, #SCTLR_I
+ write_sctlr r1
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_icache
+KEEP_PAGER cpu_mmu_enable_icache
+
+/* void cpu_mmu_enable_dcache(void) - enable data cache */
+FUNC cpu_mmu_enable_dcache , :
+UNWIND( .fnstart)
+ read_sctlr r0
+ orr r0, r0, #SCTLR_C
+ write_sctlr r0
+
+ dsb
+ isb
+
+ bx lr
+UNWIND( .fnend)
+END_FUNC cpu_mmu_enable_dcache
+KEEP_PAGER cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/proc_a64.S b/core/arch/arm/kernel/proc_a64.S
new file mode 100644
index 0000000..5db895a
--- /dev/null
+++ b/core/arch/arm/kernel/proc_a64.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm64.h>
+#include <asm.S>
+
+/* void cpu_mmu_enable(void) */
+FUNC cpu_mmu_enable , :
+ /* Invalidate TLB */
+ tlbi vmalle1
+
+ /*
+ * Make sure translation table writes have drained into memory and
+ * the TLB invalidation is complete.
+ */
+ dsb sy
+ isb
+
+ /* Enable the MMU */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_M
+ msr sctlr_el1, x0
+ isb
+
+ ret
+END_FUNC cpu_mmu_enable
+
+/* void cpu_mmu_enable_icache(void) */
+FUNC cpu_mmu_enable_icache , :
+ /* Invalidate instruction cache and branch predictor */
+ ic iallu
+ isb
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_icache
+
+
+/* void cpu_mmu_enable_dcache(void) */
+FUNC cpu_mmu_enable_dcache , :
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_C
+ msr sctlr_el1, x0
+ isb
+ ret
+END_FUNC cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/pseudo_ta.c b/core/arch/arm/kernel/pseudo_ta.c
new file mode 100644
index 0000000..6352a28
--- /dev/null
+++ b/core/arch/arm/kernel/pseudo_ta.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <initcall.h>
+#include <kernel/panic.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/tee_ta_manager.h>
+#include <mm/core_memprot.h>
+#include <mm/mobj.h>
+#include <sm/tee_mon.h>
+#include <stdlib.h>
+#include <string.h>
+#include <trace.h>
+#include <types_ext.h>
+
+/* Maps static TA params */
+static TEE_Result copy_in_param(struct tee_ta_param *param,
+ TEE_Param tee_param[TEE_NUM_PARAMS])
+{
+ size_t n;
+ void *va;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_INPUT:
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ tee_param[n].value.a = param->u[n].val.a;
+ tee_param[n].value.b = param->u[n].val.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ va = mobj_get_va(param->u[n].mem.mobj,
+ param->u[n].mem.offs);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ tee_param[n].memref.buffer = va;
+ tee_param[n].memref.size = param->u[n].mem.size;
+ break;
+ default:
+ memset(tee_param + n, 0, sizeof(TEE_Param));
+ break;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+static void update_out_param(TEE_Param tee_param[TEE_NUM_PARAMS],
+ struct tee_ta_param *param)
+{
+ size_t n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ param->u[n].val.a = tee_param[n].value.a;
+ param->u[n].val.b = tee_param[n].value.b;
+ break;
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ param->u[n].mem.size = tee_param[n].memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static TEE_Result pseudo_ta_enter_open_session(struct tee_ta_session *s,
+ struct tee_ta_param *param, TEE_ErrorOrigin *eo)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->create_entry_point) {
+ res = stc->pseudo_ta->create_entry_point();
+ if (res != TEE_SUCCESS)
+ goto out;
+ }
+
+ if (stc->pseudo_ta->open_session_entry_point) {
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ res = stc->pseudo_ta->open_session_entry_point(param->types,
+ tee_param,
+ &s->user_ctx);
+ update_out_param(tee_param, param);
+ }
+
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static TEE_Result pseudo_ta_enter_invoke_cmd(struct tee_ta_session *s,
+ uint32_t cmd, struct tee_ta_param *param,
+ TEE_ErrorOrigin *eo)
+{
+ TEE_Result res;
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+ TEE_Param tee_param[TEE_NUM_PARAMS];
+
+ tee_ta_push_current_session(s);
+ res = copy_in_param(param, tee_param);
+ if (res != TEE_SUCCESS) {
+ *eo = TEE_ORIGIN_TEE;
+ goto out;
+ }
+
+ *eo = TEE_ORIGIN_TRUSTED_APP;
+ res = stc->pseudo_ta->invoke_command_entry_point(s->user_ctx, cmd,
+ param->types,
+ tee_param);
+ update_out_param(tee_param, param);
+out:
+ tee_ta_pop_current_session();
+ return res;
+}
+
+static void pseudo_ta_enter_close_session(struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = to_pseudo_ta_ctx(s->ctx);
+
+ tee_ta_push_current_session(s);
+
+ if (stc->pseudo_ta->close_session_entry_point)
+ stc->pseudo_ta->close_session_entry_point(s->user_ctx);
+
+ if ((s->ctx->ref_count == 1) && stc->pseudo_ta->destroy_entry_point)
+ stc->pseudo_ta->destroy_entry_point();
+
+ tee_ta_pop_current_session();
+}
+
+static void pseudo_ta_destroy(struct tee_ta_ctx *ctx)
+{
+ free(to_pseudo_ta_ctx(ctx));
+}
+
+static const struct tee_ta_ops pseudo_ta_ops = {
+ .enter_open_session = pseudo_ta_enter_open_session,
+ .enter_invoke_cmd = pseudo_ta_enter_invoke_cmd,
+ .enter_close_session = pseudo_ta_enter_close_session,
+ .destroy = pseudo_ta_destroy,
+};
+
+
+/* Defined in link script */
+extern const struct pseudo_ta_head __start_ta_head_section;
+extern const struct pseudo_ta_head __stop_ta_head_section;
+
+/* Insures declared pseudo TAs conforms with core expectations */
+static TEE_Result verify_pseudo_tas_conformance(void)
+{
+ const struct pseudo_ta_head *start = &__start_ta_head_section;
+ const struct pseudo_ta_head *end = &__stop_ta_head_section;
+ const struct pseudo_ta_head *pta;
+
+ for (pta = start; pta < end; pta++) {
+ const struct pseudo_ta_head *pta2;
+
+ /* PTAs must all have a specific UUID */
+ for (pta2 = pta + 1; pta2 < end; pta2++)
+ if (!memcmp(&pta->uuid, &pta2->uuid, sizeof(TEE_UUID)))
+ goto err;
+
+ if (!pta->name ||
+ (pta->flags & PTA_MANDATORY_FLAGS) != PTA_MANDATORY_FLAGS ||
+ pta->flags & ~PTA_ALLOWED_FLAGS ||
+ !pta->invoke_command_entry_point)
+ goto err;
+ }
+ return TEE_SUCCESS;
+err:
+ DMSG("pseudo TA error at %p", (void *)pta);
+ panic("pta");
+}
+
+service_init(verify_pseudo_tas_conformance);
+
+/*-----------------------------------------------------------------------------
+ * Initialises a session based on the UUID or ptr to the ta
+ * Returns ptr to the session (ta_session) and a TEE_Result
+ *---------------------------------------------------------------------------*/
+TEE_Result tee_ta_init_pseudo_ta_session(const TEE_UUID *uuid,
+ struct tee_ta_session *s)
+{
+ struct pseudo_ta_ctx *stc = NULL;
+ struct tee_ta_ctx *ctx;
+ const struct pseudo_ta_head *ta;
+
+ DMSG(" Lookup for Static TA %pUl", (void *)uuid);
+
+ ta = &__start_ta_head_section;
+ while (true) {
+ if (ta >= &__stop_ta_head_section)
+ return TEE_ERROR_ITEM_NOT_FOUND;
+ if (memcmp(&ta->uuid, uuid, sizeof(TEE_UUID)) == 0)
+ break;
+ ta++;
+ }
+
+ /* Load a new TA and create a session */
+ DMSG(" Open %s", ta->name);
+ stc = calloc(1, sizeof(struct pseudo_ta_ctx));
+ if (!stc)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ ctx = &stc->ctx;
+
+ ctx->ref_count = 1;
+ s->ctx = ctx;
+ ctx->flags = ta->flags;
+ stc->pseudo_ta = ta;
+ ctx->uuid = ta->uuid;
+ ctx->ops = &pseudo_ta_ops;
+ TAILQ_INSERT_TAIL(&tee_ctxes, ctx, link);
+
+ DMSG(" %s : %pUl", stc->pseudo_ta->name, (void *)&ctx->uuid);
+
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm/kernel/spin_lock_a32.S b/core/arch/arm/kernel/spin_lock_a32.S
new file mode 100644
index 0000000..52d8e9f
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a32.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+#include <kernel/unwind.h>
+
+/* void __cpu_spin_lock(unsigned int *lock) */
+FUNC __cpu_spin_lock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+1:
+ ldrex r1, [r0]
+ cmp r1, #SPINLOCK_UNLOCK
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_lock
+
+/* int __cpu_spin_trylock(unsigned int *lock) - return 0 on success */
+FUNC __cpu_spin_trylock , :
+UNWIND( .fnstart)
+ mov r2, #SPINLOCK_LOCK
+ mov r1, r0
+1:
+ ldrex r0, [r1]
+ cmp r0, #0
+ bne 1f
+ strex r0, r2, [r1]
+ cmp r0, #0
+ bne 1b
+ dmb
+ bx lr
+1:
+ clrex
+ dmb
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock) */
+FUNC __cpu_spin_unlock , :
+UNWIND( .fnstart)
+ dmb
+ mov r1, #SPINLOCK_UNLOCK
+ str r1, [r0]
+ dsb
+ sev
+ bx lr
+UNWIND( .fnend)
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_a64.S b/core/arch/arm/kernel/spin_lock_a64.S
new file mode 100644
index 0000000..97fce42
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a64.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+
+/* void __cpu_spin_lock(unsigned int *lock); */
+FUNC __cpu_spin_lock , :
+ mov w2, #SPINLOCK_LOCK
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+END_FUNC __cpu_spin_lock
+
+/* unsigned int __cpu_spin_trylock(unsigned int *lock); */
+FUNC __cpu_spin_trylock , :
+ mov x1, x0
+ mov w2, #SPINLOCK_LOCK
+.loop: ldaxr w0, [x1]
+ cbnz w0, .cpu_spin_trylock_out
+ stxr w0, w2, [x1]
+ cbnz w0, .loop
+.cpu_spin_trylock_out:
+ ret
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock); */
+FUNC __cpu_spin_unlock , :
+ stlr wzr, [x0]
+ ret
+END_FUNC __cpu_spin_unlock
diff --git a/core/arch/arm/kernel/spin_lock_debug.c b/core/arch/arm/kernel/spin_lock_debug.c
new file mode 100644
index 0000000..2a450a5
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_debug.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <kernel/spinlock.h>
+#include "thread_private.h"
+
+void spinlock_count_incr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ l->locked_count++;
+ assert(l->locked_count);
+}
+
+void spinlock_count_decr(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->locked_count);
+ l->locked_count--;
+}
+
+bool have_spinlock(void)
+{
+ struct thread_core_local *l;
+
+ if (!thread_irq_disabled()) {
+ /*
+ * Normally we can't be holding a spinlock since doing so would
+ * imply IRQ are disabled (or the spinlock logic is flawed).
+ */
+ return false;
+ }
+
+ l = thread_get_core_local();
+
+ return !!l->locked_count;
+}
diff --git a/core/arch/arm/kernel/ssvce_a32.S b/core/arch/arm/kernel/ssvce_a32.S
new file mode 100644
index 0000000..e2850f1
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a32.S
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+/*
+ * Variable(s)
+ */
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+
+ .section .text.ssvce
+
+
+
+/*
+ * - MMU maintenaince support ---------------------------------------------
+ */
+
+
+/*
+ * void secure_mmu_unifiedtlbinvall(void);
+ */
+FUNC secure_mmu_unifiedtlbinvall , :
+UNWIND( .fnstart)
+
+ write_tlbiallis
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/*
+ * void secure_mmu_unifiedtlbinvbymva(mva);
+ *
+ * Combine VA and current ASID, and invalidate matching TLB
+ */
+FUNC secure_mmu_unifiedtlbinvbymva , :
+UNWIND( .fnstart)
+
+ b . @ Wrong code to force fix/check the routine before using it
+
+ MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
+ ANDS R1, R1, #0xFF /* Get current ASID */
+ ORR R1, R1, R0 /* Combine MVA and ASID */
+
+ MCR p15, 0, R1, c8, c7, 1 /* Invalidate Unified TLB entry by MVA */
+
+ DSB
+ ISB
+
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinvbymva
+
+/*
+ * void secure_mmu_unifiedtlbinv_curasid(void)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+UNWIND( .fnstart)
+ read_contextidr r0
+ and r0, r0, #0xff /* Get current ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/*
+ * void secure_mmu_unifiedtlbinv_byasid(unsigned int asid)
+ *
+ * Invalidate TLB matching current ASID
+ */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+UNWIND( .fnstart)
+ and r0, r0, #0xff /* Get ASID */
+ /* Invalidate unified TLB by ASID Inner Sharable */
+ write_tlbiasidis r0
+ dsb
+ isb
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * void arm_cl1_d_cleanbysetway(void)
+ */
+FUNC arm_cl1_d_cleanbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cl_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cl_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c10, 2 @ ; DCCSW Clean data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cl_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cl_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbysetway
+
+FUNC arm_cl1_d_invbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_dcache_off:
+ MOV R0, #0 @ ; set way number to 0
+_inv_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_inv_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c6, 2 @ ; DCISW Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _inv_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _inv_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbysetway
+
+FUNC arm_cl1_d_cleaninvbysetway , :
+UNWIND( .fnstart)
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cli_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cli_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c14, 2 @ ; DCCISW Clean and Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cli_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cli_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/*
+ * void arm_cl1_d_cleanbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleanbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cl_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cl_area_nextLine:
+ MCR p15, 0, R0, c7, c10, 1 @ ; Clean data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cl_area_nextLine
+
+_cl_area_exit:
+
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleanbyva
+
+/*
+ * void arm_cl1_d_invbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_invbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _inv_area_dcache_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_area_dcache_off:
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_inv_area_dcache_nl:
+ MCR p15, 0, R0, c7, c6, 1 @ ; Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _inv_area_dcache_nl
+
+_inv_area_dcache_exit:
+ DSB
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_invbyva
+
+/*
+ * void arm_cl1_d_cleaninvbyva(void *s, void *e);
+ */
+FUNC arm_cl1_d_cleaninvbyva , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cli_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cli_area_nextLine:
+ MCR p15, 0, R0, c7, c14, 1 @ ; Clean and Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cli_area_nextLine
+
+_cli_area_exit:
+ DSB @ ; synchronise
+ MOV PC, LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/*
+ * void arm_cl1_i_inv_all( void );
+ *
+ * Invalidates the whole instruction cache.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv_all , :
+UNWIND( .fnstart)
+
+ /* Invalidate Entire Instruction Cache */
+ write_icialluis
+ DSB
+
+ /* Flush entire branch target cache */
+ write_bpiallis
+
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv_all
+
+/*
+ * void arm_cl1_i_inv(void *start, void *end);
+ *
+ * Invalidates instruction cache area whose limits are given in parameters.
+ * It also invalidates the BTAC.
+ */
+FUNC arm_cl1_i_inv , :
+UNWIND( .fnstart)
+
+ CMP R0, R1 /* Check that end >= start. Otherwise return. */
+ BHI _inv_icache_exit
+
+ BIC R0, R0, #0x1F /* Mask 5 LSBits */
+_inv_icache_nextLine:
+ MCR p15, 0, R0, c7, c5, 1 /* Invalidate ICache single entry (MVA) */
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET /* Next cache line */
+ CMP R1, R0
+ BPL _inv_icache_nextLine
+ DSB
+
+ /* Flush entire branch target cache */
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+_inv_icache_exit:
+ BX LR
+UNWIND( .fnend)
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/ssvce_a64.S b/core/arch/arm/kernel/ssvce_a64.S
new file mode 100644
index 0000000..6c9bbac
--- /dev/null
+++ b/core/arch/arm/kernel/ssvce_a64.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tz_ssvce.h>
+#include <arm64.h>
+#include <asm.S>
+
+/* void secure_mmu_unifiedtlbinvall(void); */
+FUNC secure_mmu_unifiedtlbinvall , :
+ tlbi vmalle1
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinvall
+
+/* void secure_mmu_unifiedtlbinv_curasid(void) */
+FUNC secure_mmu_unifiedtlbinv_curasid , :
+ mrs x0, ttbr0_el1
+ lsr x0, x0, #TTBR_ASID_SHIFT
+ b secure_mmu_unifiedtlbinv_byasid
+END_FUNC secure_mmu_unifiedtlbinv_curasid
+
+/* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */
+FUNC secure_mmu_unifiedtlbinv_byasid , :
+ and x0, x0, #TTBR_ASID_MASK
+ tlbi aside1, x0
+ isb
+ ret
+END_FUNC secure_mmu_unifiedtlbinv_byasid
+
+/*
+ * Compatibility wrappers to be used while the rest of the code stops caring
+ * about which cache level it operates on. CL1 -> Inner cache.
+ */
+
+/* void arm_cl1_d_cleanbysetway(void); */
+FUNC arm_cl1_d_cleanbysetway , :
+ mov x0, #DCCSW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleanbysetway
+
+/* void arm_cl1_d_invbysetway(void); */
+FUNC arm_cl1_d_invbysetway , :
+ mov x0, #DCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_invbysetway
+
+/* void arm_cl1_d_cleaninvbysetway(void); */
+FUNC arm_cl1_d_cleaninvbysetway , :
+ mov x0, #DCCISW
+ b dcsw_op_all
+END_FUNC arm_cl1_d_cleaninvbysetway
+
+/* void arm_cl1_d_cleanbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleanbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ /*
+ * flush_dcache_range() does Clean+Invalidate, but that shouldn't
+ * matter to the caller.
+ */
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleanbyva
+
+/* void arm_cl1_d_invbyva(void *s, void *e); */
+FUNC arm_cl1_d_invbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b inv_dcache_range
+END_FUNC arm_cl1_d_invbyva
+
+/* void arm_cl1_d_cleaninvbyva(void *s, void *e); */
+FUNC arm_cl1_d_cleaninvbyva , :
+ sub x1, x1, x0
+ add x1, x1, #1
+ b flush_dcache_range
+END_FUNC arm_cl1_d_cleaninvbyva
+
+/* void arm_cl1_i_inv_all( void ); */
+FUNC arm_cl1_i_inv_all , :
+ ic ialluis
+ isb
+ ret
+END_FUNC arm_cl1_i_inv_all
+
+/* void arm_cl1_i_inv(void *start, void *end); */
+FUNC arm_cl1_i_inv , :
+ /*
+ * Invalidate the entire icache instead, it shouldn't matter to the
+ * caller.
+ */
+ b arm_cl1_i_inv_all
+END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk
new file mode 100644
index 0000000..cee3aee
--- /dev/null
+++ b/core/arch/arm/kernel/sub.mk
@@ -0,0 +1,45 @@
+srcs-$(CFG_WITH_USER_TA) += user_ta.c
+srcs-y += pseudo_ta.c
+srcs-y += elf_load.c
+srcs-y += tee_time.c
+
+srcs-$(CFG_SECURE_TIME_SOURCE_CNTPCT) += tee_time_arm_cntpct.c
+srcs-$(CFG_SECURE_TIME_SOURCE_REE) += tee_time_ree.c
+
+srcs-$(CFG_ARM32_core) += proc_a32.S
+srcs-$(CFG_ARM32_core) += spin_lock_a32.S
+srcs-$(CFG_ARM64_core) += proc_a64.S
+srcs-$(CFG_ARM64_core) += spin_lock_a64.S
+srcs-$(CFG_TEE_CORE_DEBUG) += spin_lock_debug.c
+srcs-$(CFG_ARM32_core) += ssvce_a32.S
+srcs-$(CFG_ARM64_core) += ssvce_a64.S
+srcs-$(CFG_ARM64_core) += cache_helpers_a64.S
+srcs-$(CFG_PL310) += tz_ssvce_pl310_a32.S
+srcs-$(CFG_PL310) += tee_l2cc_mutex.c
+
+srcs-$(CFG_ARM32_core) += thread_a32.S
+srcs-$(CFG_ARM64_core) += thread_a64.S
+srcs-y += thread.c
+srcs-y += abort.c
+srcs-$(CFG_WITH_VFP) += vfp.c
+ifeq ($(CFG_WITH_VFP),y)
+srcs-$(CFG_ARM32_core) += vfp_a32.S
+srcs-$(CFG_ARM64_core) += vfp_a64.S
+endif
+srcs-y += trace_ext.c
+srcs-$(CFG_ARM32_core) += misc_a32.S
+srcs-$(CFG_ARM64_core) += misc_a64.S
+srcs-y += mutex.c
+srcs-y += wait_queue.c
+srcs-$(CFG_PM_STUBS) += pm_stubs.c
+
+srcs-$(CFG_GENERIC_BOOT) += generic_boot.c
+ifeq ($(CFG_GENERIC_BOOT),y)
+srcs-$(CFG_ARM32_core) += generic_entry_a32.S
+srcs-$(CFG_ARM64_core) += generic_entry_a64.S
+endif
+
+ifeq ($(CFG_CORE_UNWIND),y)
+srcs-$(CFG_ARM32_core) += unwind_arm32.c
+srcs-$(CFG_ARM64_core) += unwind_arm64.c
+endif
diff --git a/core/arch/arm/kernel/tee_l2cc_mutex.c b/core/arch/arm/kernel/tee_l2cc_mutex.c
new file mode 100644
index 0000000..2afda4d
--- /dev/null
+++ b/core/arch/arm/kernel/tee_l2cc_mutex.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <kernel/tee_common.h>
+#include <kernel/tee_l2cc_mutex.h>
+#include <kernel/spinlock.h>
+#include <mm/tee_mm.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <tee_api_defines.h>
+#include <trace.h>
+
+/*
+ * l2cc_mutex_va holds teecore virtual address of TZ L2CC mutex or NULL.
+ *
+ * l2cc_mutex_pa holds TZ L2CC mutex physical address. It is relevant only
+ * if 'l2cc_mutex_va' hold a non-NULL address.
+ */
+#define MUTEX_SZ sizeof(uint32_t)
+
+static uint32_t *l2cc_mutex_va;
+static uint32_t l2cc_mutex_pa;
+static uint32_t l2cc_mutex_boot_pa;
+static unsigned int *l2cc_mutex;
+
+void tee_l2cc_store_mutex_boot_pa(uint32_t pa)
+{
+ l2cc_mutex_boot_pa = pa;
+}
+
+/*
+ * Allocate public RAM to get a L2CC mutex to shared with NSec.
+ * Return 0 on success.
+ */
+static int l2cc_mutex_alloc(void)
+{
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return -1;
+
+ l2cc_mutex_pa = l2cc_mutex_boot_pa;
+
+ va = phys_to_virt(l2cc_mutex_pa, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return -1;
+
+ *(uint32_t *)va = 0;
+ l2cc_mutex_va = va;
+ return 0;
+}
+
+static void l2cc_mutex_set(void *mutex)
+{
+ l2cc_mutex = (unsigned int *)mutex;
+}
+
+/*
+ * tee_xxx_l2cc_mutex(): Handle L2 mutex configuration requests from NSec
+ *
+ * Policy:
+ * - if NSec did not register a L2 mutex, default allocate it in public RAM.
+ * - if NSec disables L2 mutex, disable the current mutex and unregister it.
+ *
+ * Enable L2CC: NSec allows teecore to run safe outer maintance
+ * with shared mutex.
+ * Disable L2CC: NSec will run outer maintenance with locking
+ * shared mutex. teecore cannot run outer maintenance.
+ * Set L2CC: NSec proposes a Shared Memory locaiotn for the outer
+ * maintenance shared mutex.
+ * Get L2CC: NSec requests the outer maintenance shared mutex
+ * location. If NSec has successufully registered one,
+ * return its location, otherwise, allocated one in NSec
+ * and provided NSec the physical location.
+ */
+TEE_Result tee_enable_l2cc_mutex(void)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ l2cc_mutex_set(l2cc_mutex_va);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_disable_l2cc_mutex(void)
+{
+ l2cc_mutex_va = NULL;
+ l2cc_mutex_set(NULL);
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_get_l2cc_mutex(paddr_t *mutex)
+{
+ int ret;
+
+ if (!l2cc_mutex_va) {
+ ret = l2cc_mutex_alloc();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ *mutex = l2cc_mutex_pa;
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_set_l2cc_mutex(paddr_t *mutex)
+{
+ uint32_t addr;
+ void *va;
+
+ if (l2cc_mutex_va != NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+ addr = *mutex;
+ if (core_pbuf_is(CORE_MEM_NSEC_SHM, addr, MUTEX_SZ) == false)
+ return TEE_ERROR_BAD_PARAMETERS;
+ va = phys_to_virt(addr, MEM_AREA_NSEC_SHM);
+ if (!va)
+ return TEE_ERROR_BAD_PARAMETERS;
+ l2cc_mutex_pa = addr;
+ l2cc_mutex_va = va;
+ return TEE_SUCCESS;
+}
+
+void tee_l2cc_mutex_lock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_lock(l2cc_mutex);
+}
+
+void tee_l2cc_mutex_unlock(void)
+{
+ if (l2cc_mutex)
+ cpu_spin_unlock(l2cc_mutex);
+}
diff --git a/core/arch/arm/kernel/tee_time.c b/core/arch/arm/kernel/tee_time.c
new file mode 100644
index 0000000..671a8e9
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, Linaro Limied
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/thread.h>
+#include <optee_msg.h>
+#include <mm/core_mmu.h>
+
+struct time_source _time_source;
+
+TEE_Result tee_time_get_sys_time(TEE_Time *time)
+{
+ return _time_source.get_sys_time(time);
+}
+
+uint32_t tee_time_get_sys_time_protection_level(void)
+{
+ return _time_source.protection_level;
+}
+
+void tee_time_wait(uint32_t milliseconds_delay)
+{
+ struct optee_msg_param params;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params.u.value.a = milliseconds_delay;
+ thread_rpc_cmd(OPTEE_MSG_RPC_CMD_SUSPEND, 1, &params);
+}
+
+/*
+ * tee_time_get_ree_time(): this function implements the GP Internal API
+ * function TEE_GetREETime()
+ * Goal is to get the time of the Rich Execution Environment
+ * This is why this time is provided through the supplicant
+ */
+TEE_Result tee_time_get_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+ struct optee_msg_param params;
+
+ if (!time)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ memset(&params, 0, sizeof(params));
+ params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT;
+ res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_GET_TIME, 1, &params);
+ if (res == TEE_SUCCESS) {
+ time->seconds = params.u.value.a;
+ time->millis = params.u.value.b / 1000000;
+ }
+
+ return res;
+}
diff --git a/core/arch/arm/kernel/tee_time_arm_cntpct.c b/core/arch/arm/kernel/tee_time_arm_cntpct.c
new file mode 100644
index 0000000..90e7f20
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_arm_cntpct.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014, 2015 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/misc.h>
+#include <kernel/tee_time.h>
+#include <trace.h>
+#include <kernel/time_source.h>
+#include <mm/core_mmu.h>
+#include <utee_defines.h>
+
+#include <tee/tee_cryp_utl.h>
+
+#include <stdint.h>
+#include <mpa.h>
+#include <arm.h>
+
+static TEE_Result arm_cntpct_get_sys_time(TEE_Time *time)
+{
+ uint64_t cntpct = read_cntpct();
+ uint32_t cntfrq = read_cntfrq();
+
+ time->seconds = cntpct / cntfrq;
+ time->millis = (cntpct % cntfrq) / (cntfrq / TEE_TIME_MILLIS_BASE);
+
+ return TEE_SUCCESS;
+}
+
+static const struct time_source arm_cntpct_time_source = {
+ .name = "arm cntpct",
+ .protection_level = 1000,
+ .get_sys_time = arm_cntpct_get_sys_time,
+};
+
+REGISTER_TIME_SOURCE(arm_cntpct_time_source)
+
+/*
+ * We collect jitter using cntpct in 32- or 64-bit mode that is typically
+ * clocked at around 1MHz.
+ *
+ * The first time we are called, we add low 16 bits of the counter as entropy.
+ *
+ * Subsequently, accumulate 2 low bits each time by:
+ *
+ * - rotating the accumumlator by 2 bits
+ * - XORing it in 2-bit chunks with the whole CNTPCT contents
+ *
+ * and adding one byte of entropy when we reach 8 rotated bits.
+ */
+
+void plat_prng_add_jitter_entropy(void)
+{
+ uint64_t tsc = read_cntpct();
+ int bytes = 0, n;
+ static uint8_t first, bits;
+ static uint16_t acc;
+
+ if (!first) {
+ acc = tsc;
+ bytes = 2;
+ first = 1;
+ } else {
+ acc = (acc << 2) | ((acc >> 6) & 3);
+ for (n = 0; n < 64; n += 2)
+ acc ^= (tsc >> n) & 3;
+ bits += 2;
+ if (bits >= 8) {
+ bits = 0;
+ bytes = 1;
+ }
+ }
+ if (bytes) {
+ DMSG("%s: 0x%02X\n", __func__,
+ (int)acc & ((1 << (bytes * 8)) - 1));
+ tee_prng_add_entropy((uint8_t *)&acc, bytes);
+ }
+}
diff --git a/core/arch/arm/kernel/tee_time_ree.c b/core/arch/arm/kernel/tee_time_ree.c
new file mode 100644
index 0000000..d2a9bb1
--- /dev/null
+++ b/core/arch/arm/kernel/tee_time_ree.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/tee_time.h>
+#include <kernel/time_source.h>
+#include <kernel/mutex.h>
+
+static TEE_Time prev;
+
+static struct mutex time_mu = MUTEX_INITIALIZER;
+
+static TEE_Result get_monotonic_ree_time(TEE_Time *time)
+{
+ TEE_Result res;
+
+ res = tee_time_get_ree_time(time);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ mutex_lock(&time_mu);
+ if (time->seconds < prev.seconds ||
+ (time->seconds == prev.seconds &&
+ time->millis < prev.millis))
+ *time = prev; /* REE time was rolled back */
+ else
+ prev = *time;
+ mutex_unlock(&time_mu);
+
+ return res;
+}
+
+static const struct time_source ree_time_source = {
+ .name = "ree",
+ .protection_level = 100,
+ .get_sys_time = get_monotonic_ree_time,
+};
+
+REGISTER_TIME_SOURCE(ree_time_source)
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
new file mode 100644
index 0000000..c988b65
--- /dev/null
+++ b/core/arch/arm/kernel/thread.c
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_config.h>
+
+#include <arm.h>
+#include <assert.h>
+#include <keep.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread_defs.h>
+#include <kernel/thread.h>
+#include <mm/core_memprot.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <sm/sm.h>
+#include <tee/tee_fs_rpc.h>
+#include <tee/tee_cryp_utl.h>
+#include <trace.h>
+#include <util.h>
+
+#include "thread_private.h"
+
+#ifdef CFG_WITH_ARM_TRUSTED_FW
+#define STACK_TMP_OFFS 0
+#else
+#define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE
+#endif
+
+
+#ifdef ARM32
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS)
+#else
+#define STACK_TMP_SIZE (1024 + STACK_TMP_OFFS)
+#endif
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#ifdef CFG_CORE_SANITIZE_KADDRESS
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 2048
+#endif
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+
+#endif /*ARM32*/
+
+#ifdef ARM64
+#define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS)
+#define STACK_THREAD_SIZE 8192
+
+#if TRACE_LEVEL > 0
+#define STACK_ABT_SIZE 3072
+#else
+#define STACK_ABT_SIZE 1024
+#endif
+#endif /*ARM64*/
+
+struct thread_ctx threads[CFG_NUM_THREADS];
+
+static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE];
+
+#ifdef CFG_WITH_STACK_CANARIES
+#ifdef ARM32
+#define STACK_CANARY_SIZE (4 * sizeof(uint32_t))
+#endif
+#ifdef ARM64
+#define STACK_CANARY_SIZE (8 * sizeof(uint32_t))
+#endif
+#define START_CANARY_VALUE 0xdededede
+#define END_CANARY_VALUE 0xabababab
+#define GET_START_CANARY(name, stack_num) name[stack_num][0]
+#define GET_END_CANARY(name, stack_num) \
+ name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
+#else
+#define STACK_CANARY_SIZE 0
+#endif
+
+#define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
+linkage uint32_t name[num_stacks] \
+ [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \
+ sizeof(uint32_t)] \
+ __attribute__((section(".nozi_stack"), \
+ aligned(STACK_ALIGNMENT)))
+
+#define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2)
+
+#define GET_STACK(stack) \
+ ((vaddr_t)(stack) + STACK_SIZE(stack))
+
+DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, /* global */);
+DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
+#ifndef CFG_WITH_PAGER
+DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
+#endif
+
+const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]);
+const uint32_t stack_tmp_offset = STACK_TMP_OFFS + STACK_CANARY_SIZE / 2;
+
+/*
+ * These stack setup info are required by secondary boot cores before they
+ * each locally enable the pager (the mmu). Hence kept in pager sections.
+ */
+KEEP_PAGER(stack_tmp);
+KEEP_PAGER(stack_tmp_stride);
+KEEP_PAGER(stack_tmp_offset);
+
+thread_smc_handler_t thread_std_smc_handler_ptr;
+static thread_smc_handler_t thread_fast_smc_handler_ptr;
+thread_fiq_handler_t thread_fiq_handler_ptr;
+thread_pm_handler_t thread_cpu_on_handler_ptr;
+thread_pm_handler_t thread_cpu_off_handler_ptr;
+thread_pm_handler_t thread_cpu_suspend_handler_ptr;
+thread_pm_handler_t thread_cpu_resume_handler_ptr;
+thread_pm_handler_t thread_system_off_handler_ptr;
+thread_pm_handler_t thread_system_reset_handler_ptr;
+
+
+static unsigned int thread_global_lock = SPINLOCK_UNLOCK;
+static bool thread_prealloc_rpc_cache;
+
+static void init_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+#define INIT_CANARY(name) \
+ for (n = 0; n < ARRAY_SIZE(name); n++) { \
+ uint32_t *start_canary = &GET_START_CANARY(name, n); \
+ uint32_t *end_canary = &GET_END_CANARY(name, n); \
+ \
+ *start_canary = START_CANARY_VALUE; \
+ *end_canary = END_CANARY_VALUE; \
+ DMSG("#Stack canaries for %s[%zu] with top at %p\n", \
+ #name, n, (void *)(end_canary - 1)); \
+ DMSG("watch *%p\n", (void *)end_canary); \
+ }
+
+ INIT_CANARY(stack_tmp);
+ INIT_CANARY(stack_abt);
+#ifndef CFG_WITH_PAGER
+ INIT_CANARY(stack_thread);
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+#define CANARY_DIED(stack, loc, n) \
+ do { \
+ EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \
+ panic(); \
+ } while (0)
+
+void thread_check_canaries(void)
+{
+#ifdef CFG_WITH_STACK_CANARIES
+ size_t n;
+
+ for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) {
+ if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, start, n);
+ if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_tmp, end, n);
+ }
+
+ for (n = 0; n < ARRAY_SIZE(stack_abt); n++) {
+ if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_abt, start, n);
+ if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_abt, end, n);
+
+ }
+#ifndef CFG_WITH_PAGER
+ for (n = 0; n < ARRAY_SIZE(stack_thread); n++) {
+ if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE)
+ CANARY_DIED(stack_thread, start, n);
+ if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE)
+ CANARY_DIED(stack_thread, end, n);
+ }
+#endif
+#endif/*CFG_WITH_STACK_CANARIES*/
+}
+
+static void lock_global(void)
+{
+ cpu_spin_lock(&thread_global_lock);
+}
+
+static void unlock_global(void)
+{
+ cpu_spin_unlock(&thread_global_lock);
+}
+
+#ifdef ARM32
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t cpsr = read_cpsr();
+
+ return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t cpsr = read_cpsr();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
+ cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
+ write_cpsr(cpsr);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+uint32_t thread_get_exceptions(void)
+{
+ uint32_t daif = read_daif();
+
+ return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
+}
+
+void thread_set_exceptions(uint32_t exceptions)
+{
+ uint32_t daif = read_daif();
+
+ /* IRQ must not be unmasked while holding a spinlock */
+ if (!(exceptions & THREAD_EXCP_IRQ))
+ assert_have_no_spinlock();
+
+ daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
+ daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
+ write_daif(daif);
+}
+#endif /*ARM64*/
+
+uint32_t thread_mask_exceptions(uint32_t exceptions)
+{
+ uint32_t state = thread_get_exceptions();
+
+ thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
+ return state;
+}
+
+void thread_unmask_exceptions(uint32_t state)
+{
+ thread_set_exceptions(state & THREAD_EXCP_ALL);
+}
+
+
+struct thread_core_local *thread_get_core_local(void)
+{
+ uint32_t cpu_id = get_core_pos();
+
+ /*
+ * IRQs must be disabled before playing with core_local since
+ * we otherwise may be rescheduled to a different core in the
+ * middle of this function.
+ */
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+
+ assert(cpu_id < CFG_TEE_CORE_NB_CORE);
+ return &thread_core_local[cpu_id];
+}
+
+static void thread_lazy_save_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ thr->vfp_state.ns_saved = false;
+#if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW)
+ /*
+ * ARM TF saves and restores CPACR_EL1, so we must assume NS world
+ * uses VFP and always preserve the register file when secure world
+ * is about to use it
+ */
+ thr->vfp_state.ns.force_save = true;
+#endif
+ vfp_lazy_save_state_init(&thr->vfp_state.ns);
+#endif /*CFG_WITH_VFP*/
+}
+
+static void thread_lazy_restore_ns_vfp(void)
+{
+#ifdef CFG_WITH_VFP
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
+
+ if (tuv && tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
+ thr->vfp_state.ns_saved = false;
+#endif /*CFG_WITH_VFP*/
+}
+
+#ifdef ARM32
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint32_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
+ thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A;
+ /* Enable thumb mode if it's a thumb instruction */
+ if (thread->regs.pc & 1)
+ thread->regs.cpsr |= CPSR_T;
+ /* Reinitialize stack pointer */
+ thread->regs.svc_sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in r0-r7 when thread is started.
+ */
+ thread->regs.r0 = args->a0;
+ thread->regs.r1 = args->a1;
+ thread->regs.r2 = args->a2;
+ thread->regs.r3 = args->a3;
+ thread->regs.r4 = args->a4;
+ thread->regs.r5 = args->a5;
+ thread->regs.r6 = args->a6;
+ thread->regs.r7 = args->a7;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void init_regs(struct thread_ctx *thread,
+ struct thread_smc_args *args)
+{
+ thread->regs.pc = (uint64_t)thread_std_smc_entry;
+
+ /*
+ * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous
+ * abort and unmasked FIQ.
+ */
+ thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
+ DAIFBIT_IRQ | DAIFBIT_ABT);
+ /* Reinitialize stack pointer */
+ thread->regs.sp = thread->stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in x0-x7 when thread is started.
+ */
+ thread->regs.x[0] = args->a0;
+ thread->regs.x[1] = args->a1;
+ thread->regs.x[2] = args->a2;
+ thread->regs.x[3] = args->a3;
+ thread->regs.x[4] = args->a4;
+ thread->regs.x[5] = args->a5;
+ thread->regs.x[6] = args->a6;
+ thread->regs.x[7] = args->a7;
+
+ /* Set up frame pointer as per the Aarch64 AAPCS */
+ thread->regs.x[29] = 0;
+}
+#endif /*ARM64*/
+
+void thread_init_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ size_t n;
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ TAILQ_INIT(&threads[n].mutexes);
+ TAILQ_INIT(&threads[n].tsd.sess_stack);
+#ifdef CFG_SMALL_PAGE_USER_TA
+ SLIST_INIT(&threads[n].tsd.pgt_cache);
+#endif
+ }
+
+ for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
+ thread_core_local[n].curr_thread = -1;
+
+ l->curr_thread = 0;
+ threads[0].state = THREAD_STATE_ACTIVE;
+}
+
+void thread_clr_boot_thread(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
+ assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
+ assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes));
+ threads[l->curr_thread].state = THREAD_STATE_FREE;
+ l->curr_thread = -1;
+}
+
+static void thread_alloc_and_run(struct thread_smc_args *args)
+{
+ size_t n;
+ struct thread_core_local *l = thread_get_core_local();
+ bool found_thread = false;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state == THREAD_STATE_FREE) {
+ threads[n].state = THREAD_STATE_ACTIVE;
+ found_thread = true;
+ break;
+ }
+ }
+
+ unlock_global();
+
+ if (!found_thread) {
+ args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ threads[n].flags = 0;
+ init_regs(threads + n, args);
+
+ /* Save Hypervisor Client ID */
+ threads[n].hyp_clnt_id = args->a7;
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+#ifdef ARM32
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * r0-r3 when thread is resumed.
+ */
+ regs->r0 = args->a0;
+ regs->r1 = args->a1;
+ regs->r2 = args->a2;
+ regs->r3 = args->a3;
+ regs->r4 = args->a4;
+ regs->r5 = args->a5;
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void copy_a0_to_a5(struct thread_ctx_regs *regs,
+ struct thread_smc_args *args)
+{
+ /*
+ * Update returned values from RPC, values will appear in
+ * x0-x3 when thread is resumed.
+ */
+ regs->x[0] = args->a0;
+ regs->x[1] = args->a1;
+ regs->x[2] = args->a2;
+ regs->x[3] = args->a3;
+ regs->x[4] = args->a4;
+ regs->x[5] = args->a5;
+}
+#endif /*ARM64*/
+
+#ifdef ARM32
+static bool is_from_user(uint32_t cpsr)
+{
+ return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
+}
+#endif
+
+#ifdef ARM64
+static bool is_from_user(uint32_t cpsr)
+{
+ if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
+ return true;
+ if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
+ SPSR_64_MODE_EL0)
+ return true;
+ return false;
+}
+#endif
+
+static bool is_user_mode(struct thread_ctx_regs *regs)
+{
+ return is_from_user((uint32_t)regs->cpsr);
+}
+
+static void thread_resume_from_rpc(struct thread_smc_args *args)
+{
+ size_t n = args->a3; /* thread id */
+ struct thread_core_local *l = thread_get_core_local();
+ uint32_t rv = 0;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ if (n < CFG_NUM_THREADS &&
+ threads[n].state == THREAD_STATE_SUSPENDED &&
+ args->a7 == threads[n].hyp_clnt_id)
+ threads[n].state = THREAD_STATE_ACTIVE;
+ else
+ rv = OPTEE_SMC_RETURN_ERESUME;
+
+ unlock_global();
+
+ if (rv) {
+ args->a0 = rv;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ if (is_user_mode(&threads[n].regs))
+ tee_ta_update_session_utime_resume();
+
+ if (threads[n].have_user_map)
+ core_mmu_set_user_map(&threads[n].user_map);
+
+ /*
+ * Return from RPC to request service of an IRQ must not
+ * get parameters from non-secure world.
+ */
+ if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
+ copy_a0_to_a5(&threads[n].regs, args);
+ threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ }
+
+ thread_lazy_save_ns_vfp();
+ thread_resume(&threads[n].regs);
+}
+
+void thread_handle_fast_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+ thread_fast_smc_handler_ptr(args);
+ /* Fast handlers must not unmask any exceptions */
+ assert(thread_get_exceptions() == THREAD_EXCP_ALL);
+}
+
+void thread_handle_std_smc(struct thread_smc_args *args)
+{
+ thread_check_canaries();
+
+ if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC)
+ thread_resume_from_rpc(args);
+ else
+ thread_alloc_and_run(args);
+}
+
+/* Helper routine for the assembly function thread_std_smc_entry() */
+void __thread_std_smc_entry(struct thread_smc_args *args)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (!thr->rpc_arg) {
+ paddr_t parg;
+ uint64_t carg;
+ void *arg;
+
+ thread_rpc_alloc_arg(
+ OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS),
+ &parg, &carg);
+ if (!parg || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) ||
+ !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) {
+ thread_rpc_free_arg(carg);
+ args->a0 = OPTEE_SMC_RETURN_ENOMEM;
+ return;
+ }
+
+ thr->rpc_arg = arg;
+ thr->rpc_carg = carg;
+ }
+
+ thread_std_smc_handler_ptr(args);
+
+ tee_fs_rpc_cache_clear(&thr->tsd);
+ if (!thread_prealloc_rpc_cache) {
+ thread_rpc_free_arg(thr->rpc_carg);
+ thr->rpc_carg = 0;
+ thr->rpc_arg = 0;
+ }
+}
+
+void *thread_get_tmp_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ return (void *)l->tmp_stack_va_end;
+}
+
+#ifdef ARM64
+vaddr_t thread_get_saved_thread_sp(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ return threads[ct].kern_sp;
+}
+#endif /*ARM64*/
+
+bool thread_addr_is_in_stack(vaddr_t va)
+{
+ struct thread_ctx *thr;
+ int ct = thread_get_id_may_fail();
+
+ if (ct == -1)
+ return false;
+
+ thr = threads + ct;
+ return va < thr->stack_va_end &&
+ va >= (thr->stack_va_end - STACK_THREAD_SIZE);
+}
+
+void thread_state_free(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+ assert(TAILQ_EMPTY(&threads[ct].mutexes));
+
+ thread_lazy_restore_ns_vfp();
+ tee_pager_release_phys(
+ (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
+ STACK_THREAD_SIZE);
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].state = THREAD_STATE_FREE;
+ threads[ct].flags = 0;
+ l->curr_thread = -1;
+
+ unlock_global();
+}
+
+#ifdef CFG_WITH_PAGER
+static void release_unused_kernel_stack(struct thread_ctx *thr)
+{
+ vaddr_t sp = thr->regs.svc_sp;
+ vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
+ size_t len = sp - base;
+
+ tee_pager_release_phys((void *)base, len);
+}
+#else
+static void release_unused_kernel_stack(struct thread_ctx *thr __unused)
+{
+}
+#endif
+
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+
+ thread_check_canaries();
+
+ release_unused_kernel_stack(threads + ct);
+
+ if (is_from_user(cpsr)) {
+ thread_user_save_vfp();
+ tee_ta_update_session_utime_suspend();
+ tee_ta_gprof_sample_pc(pc);
+ }
+ thread_lazy_restore_ns_vfp();
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].flags |= flags;
+ threads[ct].regs.cpsr = cpsr;
+ threads[ct].regs.pc = pc;
+ threads[ct].state = THREAD_STATE_SUSPENDED;
+
+ threads[ct].have_user_map = core_mmu_user_mapping_is_active();
+ if (threads[ct].have_user_map) {
+ core_mmu_get_user_map(&threads[ct].user_map);
+ core_mmu_set_user_map(NULL);
+ }
+
+ l->curr_thread = -1;
+
+ unlock_global();
+
+ return ct;
+}
+
+#ifdef ARM32
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->tmp_stack_va_end = sp;
+ thread_set_irq_sp(sp);
+ thread_set_fiq_sp(sp);
+}
+
+static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp)
+{
+ thread_set_abt_sp(sp);
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ /*
+ * We're already using the tmp stack when this function is called
+ * so there's no need to assign it to any stack pointer. However,
+ * we'll need to restore it at different times so store it here.
+ */
+ l->tmp_stack_va_end = sp;
+}
+
+static void set_abt_stack(struct thread_core_local *l, vaddr_t sp)
+{
+ l->abt_stack_va_end = sp;
+}
+#endif /*ARM64*/
+
+bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
+{
+ if (thread_id >= CFG_NUM_THREADS)
+ return false;
+ threads[thread_id].stack_va_end = sp;
+ return true;
+}
+
+int thread_get_id_may_fail(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ thread_unmask_exceptions(exceptions);
+ return ct;
+}
+
+int thread_get_id(void)
+{
+ int ct = thread_get_id_may_fail();
+
+ assert(ct >= 0 && ct < CFG_NUM_THREADS);
+ return ct;
+}
+
+static void init_handlers(const struct thread_handlers *handlers)
+{
+ thread_std_smc_handler_ptr = handlers->std_smc;
+ thread_fast_smc_handler_ptr = handlers->fast_smc;
+ thread_fiq_handler_ptr = handlers->fiq;
+ thread_cpu_on_handler_ptr = handlers->cpu_on;
+ thread_cpu_off_handler_ptr = handlers->cpu_off;
+ thread_cpu_suspend_handler_ptr = handlers->cpu_suspend;
+ thread_cpu_resume_handler_ptr = handlers->cpu_resume;
+ thread_system_off_handler_ptr = handlers->system_off;
+ thread_system_reset_handler_ptr = handlers->system_reset;
+}
+
+#ifdef CFG_WITH_PAGER
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /*
+ * Allocate virtual memory for thread stacks.
+ */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ tee_mm_entry_t *mm;
+ vaddr_t sp;
+
+ /* Find vmem for thread stack and its protection gap */
+ mm = tee_mm_alloc(&tee_mm_vcore,
+ SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
+ assert(mm);
+
+ /* Claim eventual physical page */
+ tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
+ true);
+
+ /* Add the area to the pager */
+ tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
+ tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE,
+ TEE_MATTR_PRW | TEE_MATTR_LOCKED,
+ NULL, NULL);
+
+ /* init effective stack */
+ sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
+ if (!thread_init_stack(n, sp))
+ panic("init stack failed");
+ }
+}
+#else
+static void init_thread_stacks(void)
+{
+ size_t n;
+
+ /* Assign the thread stacks */
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
+ panic("thread_init_stack failed");
+ }
+}
+#endif /*CFG_WITH_PAGER*/
+
+void thread_init_primary(const struct thread_handlers *handlers)
+{
+ init_handlers(handlers);
+
+ /* Initialize canaries around the stacks */
+ init_canaries();
+
+ init_thread_stacks();
+ pgt_init();
+}
+
+static void init_sec_mon(size_t pos __maybe_unused)
+{
+#if !defined(CFG_WITH_ARM_TRUSTED_FW)
+ /* Initialize secure monitor */
+ sm_init(GET_STACK(stack_tmp[pos]));
+#endif
+}
+
+void thread_init_per_cpu(void)
+{
+ size_t pos = get_core_pos();
+ struct thread_core_local *l = thread_get_core_local();
+
+ init_sec_mon(pos);
+
+ set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS);
+ set_abt_stack(l, GET_STACK(stack_abt[pos]));
+
+ thread_init_vbar();
+}
+
+struct thread_specific_data *thread_get_tsd(void)
+{
+ return &threads[thread_get_id()].tsd;
+}
+
+struct thread_ctx_regs *thread_get_ctx_regs(void)
+{
+ struct thread_core_local *l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+ return &threads[l->curr_thread].regs;
+}
+
+void thread_set_irq(bool enable)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (enable) {
+ threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE;
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+ } else {
+ /*
+ * No need to disable IRQ here since it's already disabled
+ * above.
+ */
+ threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE;
+ }
+}
+
+void thread_restore_irq(void)
+{
+ /* thread_get_core_local() requires IRQs to be disabled */
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_core_local *l;
+
+ l = thread_get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE)
+ thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ);
+}
+
+#ifdef CFG_WITH_VFP
+uint32_t thread_kernel_enable_vfp(void)
+{
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (thr->vfp_state.sec_lazy_saved &&
+ !thr->vfp_state.sec_saved) {
+ /*
+ * This happens when we're handling an abort while the
+ * thread was using the VFP state.
+ */
+ vfp_lazy_save_state_final(&thr->vfp_state.sec);
+ thr->vfp_state.sec_saved = true;
+ } else if (tuv && tuv->lazy_saved && !tuv->saved) {
+ /*
+ * This can happen either during syscall or abort
+ * processing (while processing a syscall).
+ */
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+
+ vfp_enable();
+ return exceptions;
+}
+
+void thread_kernel_disable_vfp(uint32_t state)
+{
+ uint32_t exceptions;
+
+ assert(vfp_is_enabled());
+
+ vfp_disable();
+ exceptions = thread_get_exceptions();
+ assert(exceptions & THREAD_EXCP_IRQ);
+ exceptions &= ~THREAD_EXCP_IRQ;
+ exceptions |= state & THREAD_EXCP_IRQ;
+ thread_set_exceptions(exceptions);
+}
+
+void thread_kernel_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (vfp_is_enabled()) {
+ vfp_lazy_save_state_init(&thr->vfp_state.sec);
+ thr->vfp_state.sec_lazy_saved = true;
+ }
+}
+
+void thread_kernel_restore_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+ if (thr->vfp_state.sec_lazy_saved) {
+ vfp_lazy_restore_state(&thr->vfp_state.sec,
+ thr->vfp_state.sec_saved);
+ thr->vfp_state.sec_saved = false;
+ thr->vfp_state.sec_lazy_saved = false;
+ }
+}
+
+void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ assert(!vfp_is_enabled());
+
+ if (!thr->vfp_state.ns_saved) {
+ vfp_lazy_save_state_final(&thr->vfp_state.ns);
+ thr->vfp_state.ns_saved = true;
+ } else if (tuv && uvfp != tuv) {
+ if (tuv->lazy_saved && !tuv->saved) {
+ vfp_lazy_save_state_final(&tuv->vfp);
+ tuv->saved = true;
+ }
+ }
+
+ if (uvfp->lazy_saved)
+ vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+
+ thr->vfp_state.uvfp = uvfp;
+ vfp_enable();
+}
+
+void thread_user_save_vfp(void)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
+
+ assert(thread_get_exceptions() & THREAD_EXCP_IRQ);
+ if (!vfp_is_enabled())
+ return;
+
+ assert(tuv && !tuv->lazy_saved && !tuv->saved);
+ vfp_lazy_save_state_init(&tuv->vfp);
+ tuv->lazy_saved = true;
+}
+
+void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ if (uvfp == thr->vfp_state.uvfp)
+ thr->vfp_state.uvfp = NULL;
+ uvfp->lazy_saved = false;
+ uvfp->saved = false;
+}
+#endif /*CFG_WITH_VFP*/
+
+#ifdef ARM32
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (!is_32bit)
+ return false;
+
+ s = read_spsr();
+ s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
+ s |= CPSR_MODE_USR;
+ if (entry_func & 1)
+ s |= CPSR_T;
+ *spsr = s;
+ return true;
+}
+#endif
+
+#ifdef ARM64
+static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
+{
+ uint32_t s;
+
+ if (is_32bit) {
+ s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
+ s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
+ s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
+ } else {
+ s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
+ }
+
+ *spsr = s;
+ return true;
+}
+#endif
+
+uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long entry_func, bool is_32bit,
+ uint32_t *exit_status0, uint32_t *exit_status1)
+{
+ uint32_t spsr;
+
+ tee_ta_update_session_utime_resume();
+
+ if (!get_spsr(is_32bit, entry_func, &spsr)) {
+ *exit_status0 = 1; /* panic */
+ *exit_status1 = 0xbadbadba;
+ return 0;
+ }
+ return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func,
+ spsr, exit_status0, exit_status1);
+}
+
+void thread_add_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == -1);
+ m->owner_id = ct;
+ TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link);
+}
+
+void thread_rem_mutex(struct mutex *m)
+{
+ struct thread_core_local *l = thread_get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE);
+ assert(m->owner_id == ct);
+ m->owner_id = -1;
+ TAILQ_REMOVE(&threads[ct].mutexes, m, link);
+}
+
+bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].rpc_arg) {
+ *cookie = threads[n].rpc_carg;
+ threads[n].rpc_carg = 0;
+ threads[n].rpc_arg = NULL;
+ goto out;
+ }
+ }
+
+ *cookie = 0;
+ thread_prealloc_rpc_cache = false;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+bool thread_enable_prealloc_rpc_cache(void)
+{
+ bool rv;
+ size_t n;
+ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
+
+ lock_global();
+
+ for (n = 0; n < CFG_NUM_THREADS; n++) {
+ if (threads[n].state != THREAD_STATE_FREE) {
+ rv = false;
+ goto out;
+ }
+ }
+
+ rv = true;
+ thread_prealloc_rpc_cache = true;
+out:
+ unlock_global();
+ thread_unmask_exceptions(exceptions);
+ return rv;
+}
+
+static uint32_t rpc_cmd_nolock(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ const size_t params_size = sizeof(struct optee_msg_param) * num_params;
+ size_t n;
+
+ assert(arg && carg && num_params <= THREAD_RPC_MAX_NUM_PARAMS);
+
+ plat_prng_add_jitter_entropy();
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS));
+ arg->cmd = cmd;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = num_params;
+ memcpy(OPTEE_MSG_GET_PARAMS(arg), params, params_size);
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ for (n = 0; n < num_params; n++) {
+ switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) {
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ memcpy(params + n, OPTEE_MSG_GET_PARAMS(arg) + n,
+ sizeof(struct optee_msg_param));
+ break;
+ default:
+ break;
+ }
+ }
+ return arg->ret;
+}
+
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct optee_msg_param *params)
+{
+ uint32_t ret;
+
+ ret = rpc_cmd_nolock(cmd, num_params, params);
+
+ return ret;
+}
+
+static bool check_alloced_shm(paddr_t pa, size_t len, size_t align)
+{
+ if (pa & (align - 1))
+ return false;
+ return core_pbuf_is(CORE_MEM_NSEC_SHM, pa, len);
+}
+
+void thread_rpc_free_arg(uint64_t cookie)
+{
+ if (cookie) {
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_FREE
+ };
+
+ reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ }
+}
+
+void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie)
+{
+ paddr_t pa;
+ uint64_t co;
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ OPTEE_SMC_RETURN_RPC_ALLOC, size
+ };
+
+ thread_rpc(rpc_args);
+
+ pa = reg_pair_to_64(rpc_args[1], rpc_args[2]);
+ co = reg_pair_to_64(rpc_args[4], rpc_args[5]);
+ if (!check_alloced_shm(pa, size, sizeof(uint64_t))) {
+ thread_rpc_free_arg(co);
+ pa = 0;
+ co = 0;
+ }
+
+ *arg = pa;
+ *cookie = co;
+}
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc()
+ *
+ * @cookie: cookie received when allocating the buffer
+ * @bt: must be the same as supplied when allocating
+ */
+static void thread_rpc_free(unsigned int bt, uint64_t cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = cookie;
+ params[0].u.value.c = 0;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+}
+
+/**
+ * Allocates shared memory buffer via RPC
+ *
+ * @size: size in bytes of shared memory buffer
+ * @align: required alignment of buffer
+ * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_*
+ * @payload: returned physical pointer to buffer, 0 if allocation
+ * failed.
+ * @cookie: returned cookie used when freeing the buffer
+ */
+static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt,
+ paddr_t *payload, uint64_t *cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+ uint64_t carg = thr->rpc_carg;
+ struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg);
+
+ memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+ arg->num_params = 1;
+
+ params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = bt;
+ params[0].u.value.b = size;
+ params[0].u.value.c = align;
+
+ reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
+ thread_rpc(rpc_args);
+ if (arg->ret != TEE_SUCCESS)
+ goto fail;
+
+ if (arg->num_params != 1)
+ goto fail;
+
+ if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
+ goto fail;
+
+ if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) {
+ thread_rpc_free(bt, params[0].u.tmem.shm_ref);
+ goto fail;
+ }
+
+ *payload = params[0].u.tmem.buf_ptr;
+ *cookie = params[0].u.tmem.shm_ref;
+ return;
+fail:
+ *payload = 0;
+ *cookie = 0;
+}
+
+void thread_rpc_alloc_payload(size_t size, paddr_t *payload, uint64_t *cookie)
+{
+ thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, payload, cookie);
+}
+
+void thread_rpc_free_payload(uint64_t cookie)
+{
+ thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie);
+}
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
new file mode 100644
index 0000000..6d3ac35
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <kernel/abort.h>
+#include <kernel/thread_defs.h>
+#include <kernel/unwind.h>
+
+ .section .text.thread_asm
+
+LOCAL_FUNC vector_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_fast_smc
+ pop {r1-r8}
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_on_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_suspend_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_cpu_resume_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_off_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr lr, =thread_system_reset_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ mov r1, r0
+ ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization. Also used when compiled with the internal monitor, but
+ * the cpu_*_entry and system_*_entry are not used then.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+UNWIND( .fnend)
+END_FUNC thread_vector_table
+
+FUNC thread_set_abt_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_ABT
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_abt_sp
+
+FUNC thread_set_irq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_IRQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_irq_sp
+
+FUNC thread_set_fiq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ mrs r1, cpsr
+ cps #CPSR_MODE_FIQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_set_fiq_sp
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
+
+ cps #CPSR_MODE_SYS
+ ldm r12!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ ldm r12!, {r1, sp, lr}
+ msr spsr_fsxc, r1
+
+ cps #CPSR_MODE_SVC
+ ldm r12, {r1, r2}
+ push {r1, r2}
+
+ ldm r0, {r0-r12}
+
+ /* Restore CPSR and jump to the instruction to resume at */
+ rfefd sp!
+UNWIND( .fnend)
+END_FUNC thread_resume
+
+/*
+ * Disables IRQ and FIQ and saves state of thread, returns original
+ * CPSR.
+ */
+LOCAL_FUNC thread_save_state , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r12, lr}
+ /*
+ * Uses stack for temporary storage, while storing needed
+ * context in the thread context struct.
+ */
+
+ mrs r12, cpsr
+
+ cpsid aif /* Disable Async abort, IRQ and FIQ */
+
+ push {r4-r7}
+ push {r0-r3}
+
+ mov r5, r12 /* Save CPSR in a preserved register */
+ mrs r6, cpsr /* Save current CPSR */
+
+ bl thread_get_ctx_regs
+
+ pop {r1-r4} /* r0-r3 pushed above */
+ stm r0!, {r1-r4}
+ pop {r1-r4} /* r4-r7 pushed above */
+ stm r0!, {r1-r4}
+ stm r0!, {r8-r11}
+
+ pop {r12, lr}
+ stm r0!, {r12}
+
+ cps #CPSR_MODE_SYS
+ stm r0!, {sp, lr}
+
+ cps #CPSR_MODE_SVC
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+ orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */
+ msr cpsr, r6 /* Restore mode */
+
+ mov r0, r5 /* Return original CPSR */
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_save_state
+
+FUNC thread_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* Pass r0-r7 in a struct thread_smc_args */
+ push {r0-r7}
+ mov r0, sp
+ bl __thread_std_smc_entry
+ /*
+ * Load the returned r0-r3 into preserved registers and skip the
+ * "returned" r4-r7 since they will not be returned to normal
+ * world.
+ */
+ pop {r4-r7}
+ add sp, #(4 * 4)
+
+ /* Disable interrupts before switching to temporary stack */
+ cpsid aif
+ bl thread_get_tmp_sp
+ mov sp, r0
+
+ bl thread_state_free
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov r1, r4
+ mov r2, r5
+ mov r3, r6
+ mov r4, r7
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_std_smc_entry
+
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+/*
+ * r0-r2 are used to pass parameters to normal world
+ * r0-r5 are used to pass return vaule back from normal world
+ *
+ * note that r3 is used to pass "resume information", that is, which
+ * thread it is that should resume.
+ *
+ * Since the this function is following AAPCS we need to preserve r4-r5
+ * which are otherwise modified when returning back from normal world.
+ */
+UNWIND( .fnstart)
+ push {r4-r5, lr}
+UNWIND( .save {r4-r5, lr})
+ push {r0}
+UNWIND( .save {r0})
+
+ bl thread_save_state
+ mov r4, r0 /* Save original CPSR */
+
+ /*
+ * Switch to temporary stack and SVC mode. Save CPSR to resume into.
+ */
+ bl thread_get_tmp_sp
+ ldr r5, [sp] /* Get pointer to rv[] */
+ cps #CPSR_MODE_SVC /* Change to SVC mode */
+ mov sp, r0 /* Switch to tmp stack */
+
+ mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ mov r1, r4 /* CPSR to restore */
+ ldr r2, =.thread_rpc_return
+ bl thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldm r5, {r1-r3} /* Load rv[] into r0-r2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * it had when thread_save_state() was called above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop {r12} /* Get pointer to rv[] */
+ stm r12, {r0-r5} /* Store r0-r5 into rv[] */
+ pop {r4-r5, pc}
+UNWIND( .fnend)
+END_FUNC thread_rpc
+
+LOCAL_FUNC thread_fiq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /*
+ * We're saving {r0-r3} and the banked fiq registers {r8-r12}. The
+ * banked fiq registers need to be saved because the secure monitor
+ * doesn't save those. The treatment of the banked fiq registers is
+ * somewhat analogous to the lazy save of VFP registers.
+ */
+ push {r0-r3, r8-r12, lr}
+ bl thread_check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ pop {r0-r3, r8-r12, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_fiq_handler
+
+LOCAL_FUNC thread_irq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * IRQ mode is set up to use tmp stack so FIQ has to be
+ * disabled before touching the stack. We can also assign
+ * SVC sp from IRQ sp to get SVC mode into the state we
+ * need when doing the SMC below.
+ */
+ cpsid f /* Disable FIQ also */
+ sub lr, lr, #4
+ push {lr}
+ push {r12}
+
+ bl thread_save_state
+
+ mov r0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs r1, spsr
+ pop {r12}
+ pop {r2}
+ blx thread_state_suspend
+ mov r4, r0 /* Supply thread index */
+
+ /*
+ * Switch to SVC mode and copy current stack pointer as it already
+ * is the tmp stack.
+ */
+ mov r0, sp
+ cps #CPSR_MODE_SVC
+ mov sp, r0
+
+ ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr r1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov r2, #0
+ mov r3, #0
+ /* r4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+UNWIND( .fnend)
+END_FUNC thread_irq_handler
+
+FUNC thread_init_vbar , :
+UNWIND( .fnstart)
+ /* Set vector (VBAR) */
+ ldr r0, =thread_vect_table
+ write_vbar r0
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_init_vbar
+
+/*
+ * Below are low level routines handling entry and return from user mode.
+ *
+ * thread_enter_user_mode() saves all that registers user mode can change
+ * so kernel mode can restore needed registers when resuming execution
+ * after the call to thread_enter_user_mode() has returned.
+ * thread_enter_user_mode() doesn't return directly since it enters user
+ * mode instead, it's thread_unwind_user_mode() that does the
+ * returning by restoring the registers saved by thread_enter_user_mode().
+ *
+ * There's three ways for thread_enter_user_mode() to return to caller,
+ * user TA calls utee_return, user TA calls utee_panic or through an abort.
+ *
+ * Calls to utee_return or utee_panic are handled as:
+ * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which
+ * calls syscall_return() or syscall_panic().
+ *
+ * These function calls returns normally except thread_svc_handler() which
+ * which is an exception handling routine so it reads return address and
+ * SPSR to restore from the stack. syscall_return() and syscall_panic()
+ * changes return address and SPSR used by thread_svc_handler() to instead of
+ * returning into user mode as with other syscalls it returns into
+ * thread_unwind_user_mode() in kernel mode instead. When
+ * thread_svc_handler() returns the stack pointer at the point where
+ * thread_enter_user_mode() left it so this is where
+ * thread_unwind_user_mode() can operate.
+ *
+ * Aborts are handled in a similar way but by thread_abort_handler()
+ * instead, when the pager sees that it's an abort from user mode that
+ * can't be handled it updates SPSR and return address used by
+ * thread_abort_handler() to return into thread_unwind_user_mode()
+ * instead.
+ */
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Save all registers to allow syscall_return() to resume execution
+ * as if this function would have returned. This is also used in
+ * syscall_panic().
+ *
+ * If stack usage of this function is changed
+ * thread_unwind_user_mode() has to be updated.
+ */
+ push {r4-r12,lr}
+
+ ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */
+ ldr r5, [sp, #(11 * 0x4)] /* user function */
+ ldr r6, [sp, #(12 * 0x4)] /* spsr */
+
+ /*
+ * Set the saved Processors Status Register to user mode to allow
+ * entry of user mode through movs below.
+ */
+ msr spsr_cxsf, r6
+
+ /*
+ * Save old user sp and set new user sp.
+ */
+ cps #CPSR_MODE_SYS
+ mov r6, sp
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+ push {r6,r7}
+
+ /*
+ * Don't allow return from this function, return is done through
+ * thread_unwind_user_mode() below.
+ */
+ mov lr, #0
+ /* Call the user function with its arguments */
+ movs pc, r5
+UNWIND( .fnend)
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */
+ str r1, [ip]
+ ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */
+ str r2, [ip]
+
+ /* Restore old user sp */
+ pop {r4,r7}
+ cps #CPSR_MODE_SYS
+ mov sp, r4
+ cps #CPSR_MODE_SVC
+
+ pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/
+UNWIND( .fnend)
+END_FUNC thread_unwind_user_mode
+
+LOCAL_FUNC thread_abort_handler , :
+thread_abort_handler:
+thread_und_handler:
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ /*
+ * Switch to abort mode to use that stack instead.
+ */
+ cps #CPSR_MODE_ABT
+ push {r0-r11, ip}
+ cps #CPSR_MODE_UND
+ mrs r0, spsr
+ tst r0, #CPSR_T
+ subne r1, lr, #2
+ subeq r1, lr, #4
+ cps #CPSR_MODE_ABT
+ push {r0, r1}
+ msr spsr_fsxc, r0 /* In case some code reads spsr directly */
+ mov r0, #ABORT_TYPE_UNDEF
+ b .thread_abort_generic
+
+thread_dabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #8
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_DATA
+ b .thread_abort_generic
+
+thread_pabort_handler:
+ push {r0-r11, ip}
+ sub r1, lr, #4
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #ABORT_TYPE_PREFETCH
+ b .thread_abort_generic
+
+.thread_abort_generic:
+ cps #CPSR_MODE_SYS
+ mov r1, sp
+ mov r2, lr
+ cps #CPSR_MODE_ABT
+ push {r1-r3}
+ mov r1, sp
+ bl abort_handler
+ pop {r1-r3}
+ cps #CPSR_MODE_SYS
+ mov sp, r1
+ mov lr, r2
+ cps #CPSR_MODE_ABT
+ pop {r0, r1}
+ mov lr, r1
+ msr spsr_fsxc, r0
+ pop {r0-r11, ip}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_abort_handler
+
+LOCAL_FUNC thread_svc_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ push {r0-r7, lr}
+ mrs r0, spsr
+ push {r0}
+ mov r0, sp
+ bl tee_svc_handler
+ pop {r0}
+ msr spsr_fsxc, r0
+ pop {r0-r7, lr}
+ movs pc, lr
+UNWIND( .fnend)
+END_FUNC thread_svc_handler
+
+ .align 5
+LOCAL_FUNC thread_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+ b . /* Reset */
+ b thread_und_handler /* Undefined instruction */
+ b thread_svc_handler /* System call */
+ b thread_pabort_handler /* Prefetch abort */
+ b thread_dabort_handler /* Data abort */
+ b . /* Reserved */
+ b thread_irq_handler /* IRQ */
+ b thread_fiq_handler /* FIQ */
+UNWIND( .fnend)
+END_FUNC thread_vect_table
diff --git a/core/arch/arm/kernel/thread_a64.S b/core/arch/arm/kernel/thread_a64.S
new file mode 100644
index 0000000..abd482b
--- /dev/null
+++ b/core/arch/arm/kernel/thread_a64.S
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <arm64_macros.S>
+#include <arm64.h>
+#include <sm/optee_smc.h>
+#include <sm/teesmc_opteed_macros.h>
+#include <sm/teesmc_opteed.h>
+#include <asm-defines.h>
+#include <kernel/thread_defs.h>
+#include "thread_private.h"
+
+ .macro get_thread_ctx core_local, res, tmp0, tmp1
+ ldr w\tmp0, [\core_local, \
+ #THREAD_CORE_LOCAL_CURR_THREAD]
+ adr x\res, threads
+ mov x\tmp1, #THREAD_CTX_SIZE
+ madd x\res, x\tmp0, x\tmp1, x\res
+ .endm
+
+ .section .text.thread_asm
+LOCAL_FUNC vector_std_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_std_smc
+ /*
+ * Normally thread_handle_std_smc() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
+ * hasn't switched stack (error detected) it will do a normal "C"
+ * return.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_std_smc_entry
+
+LOCAL_FUNC vector_fast_smc_entry , :
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+ bl thread_handle_fast_smc
+ load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fast_smc_entry
+
+LOCAL_FUNC vector_fiq_entry , :
+ /* Secure Monitor received a FIQ and passed control to us. */
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_fiq_entry
+
+LOCAL_FUNC vector_cpu_on_entry , :
+ adr x16, thread_cpu_on_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_on_entry
+
+LOCAL_FUNC vector_cpu_off_entry , :
+ adr x16, thread_cpu_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_off_entry
+
+LOCAL_FUNC vector_cpu_suspend_entry , :
+ adr x16, thread_cpu_suspend_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_suspend_entry
+
+LOCAL_FUNC vector_cpu_resume_entry , :
+ adr x16, thread_cpu_resume_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_cpu_resume_entry
+
+LOCAL_FUNC vector_system_off_entry , :
+ adr x16, thread_system_off_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_off_entry
+
+LOCAL_FUNC vector_system_reset_entry , :
+ adr x16, thread_system_reset_handler_ptr
+ ldr x16, [x16]
+ blr x16
+ mov x1, x0
+ ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ smc #0
+ b . /* SMC should not return */
+END_FUNC vector_system_reset_entry
+
+/*
+ * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
+ * initialization.
+ *
+ * Note that ARM-TF depends on the layout of this vector table, any change
+ * in layout has to be synced with ARM-TF.
+ */
+FUNC thread_vector_table , :
+ b vector_std_smc_entry
+ b vector_fast_smc_entry
+ b vector_cpu_on_entry
+ b vector_cpu_off_entry
+ b vector_cpu_resume_entry
+ b vector_cpu_suspend_entry
+ b vector_fiq_entry
+ b vector_system_off_entry
+ b vector_system_reset_entry
+END_FUNC thread_vector_table
+
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+ load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
+ mov sp, x1
+ msr elr_el1, x2
+ msr spsr_el1, x3
+ load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
+ ldr x0, [x0, THREAD_CTX_REGS_X0]
+ eret
+END_FUNC thread_resume
+
+FUNC thread_std_smc_entry , :
+ /* pass x0-x7 in a struct thread_smc_args */
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+
+ /* Call the registered handler */
+ bl __thread_std_smc_entry
+
+ /*
+ * Load the returned x0-x3 into preserved registers and skip the
+ * "returned" x4-x7 since they will not be returned to normal
+ * world.
+ */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ bl thread_get_tmp_sp
+ mov sp, x0
+
+ bl thread_state_free
+
+ ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ mov x1, x20
+ mov x2, x21
+ mov x3, x22
+ mov x4, x23
+ smc #0
+ b . /* SMC should not return */
+END_FUNC thread_std_smc_entry
+
+/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
+FUNC thread_rpc , :
+ /* Read daif and create an SPSR */
+ mrs x1, daif
+ orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ push x0, xzr
+ push x1, x30
+ bl thread_get_ctx_regs
+ ldr x30, [sp, #8]
+ store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
+ mov x19, x0
+
+ bl thread_get_tmp_sp
+ pop x1, xzr /* Match "push x1, x30" above */
+ mov x2, sp
+ str x2, [x19, #THREAD_CTX_REGS_SP]
+ ldr x20, [sp] /* Get pointer to rv[] */
+ mov sp, x0 /* Switch to tmp stack */
+
+ adr x2, .thread_rpc_return
+ mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ bl thread_state_suspend
+ mov x4, x0 /* Supply thread index */
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */
+ smc #0
+ b . /* SMC should not return */
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * stored in THREAD_CTX above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop x16, xzr /* Get pointer to rv[] */
+ store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */
+ ret
+END_FUNC thread_rpc
+
+FUNC thread_init_vbar , :
+ adr x0, thread_vect_table
+ msr vbar_el1, x0
+ ret
+END_FUNC thread_init_vbar
+
+/*
+ * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ * unsigned long a2, unsigned long a3, unsigned long user_sp,
+ * unsigned long user_func, unsigned long spsr,
+ * uint32_t *exit_status0, uint32_t *exit_status1)
+ *
+ */
+FUNC __thread_enter_user_mode , :
+ ldr x8, [sp]
+ /*
+ * Create the and fill in the struct thread_user_mode_rec
+ */
+ sub sp, sp, #THREAD_USER_MODE_REC_SIZE
+ store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
+ store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+
+ /*
+ * Switch to SP_EL1
+ * Disable exceptions
+ * Save kern sp in x19
+ */
+ msr daifset, #DAIFBIT_ALL
+ mov x19, sp
+ msr spsel, #1
+
+ /*
+ * Save the kernel stack pointer in the thread context
+ */
+ /* get pointer to current thread context */
+ get_thread_ctx sp, 21, 20, 22
+ /*
+ * Save kernel stack pointer to ensure that el0_svc() uses
+ * correct stack pointer
+ */
+ str x19, [x21, #THREAD_CTX_KERN_SP]
+
+ /*
+ * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
+ */
+ msr spsr_el1, x6
+ /* Set user sp */
+ mov x13, x4 /* Used when running TA in Aarch32 */
+ msr sp_el0, x4 /* Used when running TA in Aarch64 */
+ /* Set user function */
+ msr elr_el1, x5
+
+ /* Jump into user mode */
+ eret
+END_FUNC __thread_enter_user_mode
+
+/*
+ * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
+ * uint32_t exit_status1);
+ * See description in thread.h
+ */
+FUNC thread_unwind_user_mode , :
+ /* Store the exit status */
+ ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
+ str w1, [x3]
+ str w2, [x4]
+ /* Restore x19..x30 */
+ load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
+ add sp, sp, #THREAD_USER_MODE_REC_SIZE
+ /* Return from the call of thread_enter_user_mode() */
+ ret
+END_FUNC thread_unwind_user_mode
+
+ /*
+ * This macro verifies that the a given vector doesn't exceed the
+ * architectural limit of 32 instructions. This is meant to be placed
+ * immedately after the last instruction in the vector. It takes the
+ * vector entry as the parameter
+ */
+ .macro check_vector_size since
+ .if (. - \since) > (32 * 4)
+ .error "Vector exceeds 32 instructions"
+ .endif
+ .endm
+
+
+ .align 11
+LOCAL_FUNC thread_vect_table , :
+ /* -----------------------------------------------------
+ * EL1 with SP0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b el1_sync_abort
+ check_vector_size sync_el1_sp0
+
+ .align 7
+irq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size irq_el1_sp0
+
+ .align 7
+fiq_el1_sp0:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size fiq_el1_sp0
+
+ .align 7
+SErrorSP0:
+ b SErrorSP0
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+SynchronousExceptionSPx:
+ b SynchronousExceptionSPx
+ check_vector_size SynchronousExceptionSPx
+
+ .align 7
+IrqSPx:
+ b IrqSPx
+ check_vector_size IrqSPx
+
+ .align 7
+FiqSPx:
+ b FiqSPx
+ check_vector_size FiqSPx
+
+ .align 7
+SErrorSPx:
+ b SErrorSPx
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH64_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a64
+
+ .align 7
+el0_irq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a64
+
+ .align 7
+el0_fiq_a64:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a64
+
+ .align 7
+SErrorA64:
+ b SErrorA64
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+ .align 7
+el0_sync_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ mrs x2, esr_el1
+ mrs x3, sp_el0
+ lsr x2, x2, #ESR_EC_SHIFT
+ cmp x2, #ESR_EC_AARCH32_SVC
+ b.eq el0_svc
+ b el0_sync_abort
+ check_vector_size el0_sync_a32
+
+ .align 7
+el0_irq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_irq
+ check_vector_size el0_irq_a32
+
+ .align 7
+el0_fiq_a32:
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ b elx_fiq
+ check_vector_size el0_fiq_a32
+
+ .align 7
+SErrorA32:
+ b SErrorA32
+ check_vector_size SErrorA32
+
+END_FUNC thread_vect_table
+
+LOCAL_FUNC el0_svc , :
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* load saved kernel sp */
+ ldr x0, [x0, #THREAD_CTX_KERN_SP]
+ /* Keep pointer to initial recod in x1 */
+ mov x1, sp
+ /* Switch to SP_EL0 and restore kernel sp */
+ msr spsel, #0
+ mov x2, sp /* Save SP_EL0 */
+ mov sp, x0
+
+ /* Make room for struct thread_svc_regs */
+ sub sp, sp, #THREAD_SVC_REG_SIZE
+ stp x30,x2, [sp, #THREAD_SVC_REG_X30]
+
+ /* Restore x0-x3 */
+ ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
+ ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
+
+ /* Prepare the argument for the handler */
+ store_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mrs x0, elr_el1
+ mrs x1, spsr_el1
+ store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ mov x0, sp
+
+ /*
+ * Unmask FIQ, Serror, and debug exceptions since we have nothing
+ * left in sp_el1. Note that the SVC handler is excepted to
+ * re-enable IRQs by itself.
+ */
+ msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
+
+ /* Call the handler */
+ bl tee_svc_handler
+
+ /* Mask all maskable exceptions since we're switching back to sp_el1 */
+ msr daifset, #DAIFBIT_ALL
+
+ /*
+ * Save kernel sp we'll had at the beginning of this function.
+ * This is when this TA has called another TA because
+ * __thread_enter_user_mode() also saves the stack pointer in this
+ * field.
+ */
+ msr spsel, #1
+ get_thread_ctx sp, 0, 1, 2
+ msr spsel, #0
+ add x1, sp, #THREAD_SVC_REG_SIZE
+ str x1, [x0, #THREAD_CTX_KERN_SP]
+
+ /* Restore registers to the required state and return*/
+ load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
+ msr elr_el1, x0
+ msr spsr_el1, x1
+ load_xregs sp, THREAD_SVC_REG_X0, 0, 14
+ mov x30, sp
+ ldr x0, [x30, #THREAD_SVC_REG_SP_EL0]
+ mov sp, x0
+ ldr x0, [x30, THREAD_SVC_REG_X0]
+ ldr x30, [x30, #THREAD_SVC_REG_X30]
+
+ eret
+END_FUNC el0_svc
+
+LOCAL_FUNC el1_sync_abort , :
+ mov x0, sp
+ msr spsel, #0
+ mov x3, sp /* Save original sp */
+
+ /*
+ * Update core local flags.
+ * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
+ */
+ ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
+ .Lsel_tmp_sp
+
+ /* Select abort stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ b .Lset_sp
+
+.Lsel_tmp_sp:
+ /* Select tmp stack */
+ ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
+
+.Lset_sp:
+ mov sp, x2
+ str w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in CORE_LOCAL */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el1_sync_abort
+
+ /* sp_el0 in x3 */
+LOCAL_FUNC el0_sync_abort , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_ABORT
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /*
+ * Save state on stack
+ */
+
+ /* load abt_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
+ /* Keep pointer to initial record in x0 */
+ mov x0, sp
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+ sub sp, sp, #THREAD_ABT_REGS_SIZE
+ mrs x2, spsr_el1
+ /* Store spsr, sp_el0 */
+ stp x2, x3, [sp, #THREAD_ABT_REG_SPSR]
+ /* Store original x0, x1 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
+ stp x2, x3, [sp, #THREAD_ABT_REG_X0]
+ /* Store original x2, x3 and x4 to x29 */
+ ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
+ store_xregs sp, THREAD_ABT_REG_X2, 2, 29
+ /* Store x30, elr_el1 */
+ mrs x0, elr_el1
+ stp x30, x0, [sp, #THREAD_ABT_REG_X30]
+
+ /*
+ * Call handler
+ */
+ mov x0, #0
+ mov x1, sp
+ bl abort_handler
+
+ /*
+ * Restore state from stack
+ */
+
+ /* Load x30, elr_el1 */
+ ldp x30, x0, [sp, #THREAD_ABT_REG_X30]
+ msr elr_el1, x0
+ /* Load x0 to x29 */
+ load_xregs sp, THREAD_ABT_REG_X0, 0, 29
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ /* Save x0 to x3 in EL1_REC */
+ store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+ /* Restore spsr_el1 and sp_el0 */
+ mrs x3, sp_el0
+ ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR]
+ msr spsr_el1, x0
+ msr sp_el0, x1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0 to x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC el0_sync_abort
+
+LOCAL_FUNC elx_irq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_TMP
+ orr w1, w1, #THREAD_CLF_IRQ
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* get pointer to current thread context in x0 */
+ get_thread_ctx sp, 0, 1, 2
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+
+ /* Store original sp_el0 */
+ str x2, [x0, #THREAD_CTX_REGS_SP]
+ /* store x4..x30 */
+ store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
+ /* Load original x0..x3 into x10..x13 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
+ /* Save original x0..x3 */
+ store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Mark current thread as suspended
+ */
+ mov w0, #THREAD_FLAGS_EXIT_ON_IRQ
+ mrs x1, spsr_el1
+ mrs x2, elr_el1
+ bl thread_state_suspend
+ mov w4, w0 /* Supply thread index */
+
+ /* Update core local flags */
+ /* Switch to SP_EL1 */
+ msr spsel, #1
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ msr spsel, #0
+
+ /*
+ * Note that we're exiting with SP_EL0 selected since the entry
+ * functions expects to have SP_EL0 selected with the tmp stack
+ * set.
+ */
+
+ ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE
+ ldr w1, =OPTEE_SMC_RETURN_RPC_IRQ
+ mov w2, #0
+ mov w3, #0
+ /* w4 is already filled in above */
+ smc #0
+ b . /* SMC should not return */
+END_FUNC elx_irq
+
+/*
+ * This struct is never used from C it's only here to visualize the
+ * layout.
+ *
+ * struct elx_fiq_rec {
+ * uint64_t x[19 - 4]; x4..x18
+ * uint64_t lr;
+ * uint64_t sp_el0;
+ * };
+ */
+#define ELX_FIQ_REC_X(x) (8 * ((x) - 4))
+#define ELX_FIQ_REC_LR (8 + ELX_FIQ_REC_X(19))
+#define ELX_FIQ_REC_SP_EL0 (8 + ELX_FIQ_REC_LR)
+#define ELX_FIQ_REC_SIZE (8 + ELX_FIQ_REC_SP_EL0)
+
+LOCAL_FUNC elx_fiq , :
+ /*
+ * Update core local flags
+ */
+ ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsl w1, w1, #THREAD_CLF_SAVED_SHIFT
+ orr w1, w1, #THREAD_CLF_FIQ
+ orr w1, w1, #THREAD_CLF_TMP
+ str w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* load tmp_stack_va_end */
+ ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
+ /* Keep original SP_EL0 */
+ mrs x2, sp_el0
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+ mov sp, x1
+
+ /*
+ * Save registers on stack that can be corrupted by a call to
+ * a C function
+ */
+ /* Make room for struct elx_fiq_rec */
+ sub sp, sp, #ELX_FIQ_REC_SIZE
+ /* Store x4..x18 */
+ store_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Store lr and original sp_el0 */
+ stp x30, x2, [sp, #ELX_FIQ_REC_LR]
+
+ bl thread_check_canaries
+ adr x16, thread_fiq_handler_ptr
+ ldr x16, [x16]
+ blr x16
+
+ /*
+ * Restore registers
+ */
+ /* Restore x4..x18 */
+ load_xregs sp, ELX_FIQ_REC_X(4), 4, 18
+ /* Load lr and original sp_el0 */
+ ldp x30, x2, [sp, #ELX_FIQ_REC_LR]
+ /* Restore SP_El0 */
+ mov sp, x2
+ /* Switch back to SP_EL1 */
+ msr spsel, #1
+
+ /* Update core local flags */
+ ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+ lsr w0, w0, #THREAD_CLF_SAVED_SHIFT
+ str w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
+
+ /* Restore x0..x3 */
+ load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
+
+ /* Return from exception */
+ eret
+END_FUNC elx_fiq
diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h
new file mode 100644
index 0000000..3d87c88
--- /dev/null
+++ b/core/arch/arm/kernel/thread_private.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_PRIVATE_H
+#define THREAD_PRIVATE_H
+
+#ifndef ASM
+
+#include <mm/core_mmu.h>
+#include <mm/pgt_cache.h>
+#include <kernel/vfp.h>
+#include <kernel/mutex.h>
+#include <kernel/thread.h>
+
+enum thread_state {
+ THREAD_STATE_FREE,
+ THREAD_STATE_SUSPENDED,
+ THREAD_STATE_ACTIVE,
+};
+
+#ifdef ARM32
+struct thread_ctx_regs {
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t pc;
+ uint32_t cpsr;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct thread_ctx_regs {
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t cpsr;
+ uint64_t x[31];
+};
+#endif /*ARM64*/
+
+#ifdef ARM64
+struct thread_user_mode_rec {
+ uint64_t exit_status0_ptr;
+ uint64_t exit_status1_ptr;
+ uint64_t x[31 - 19]; /* x19..x30 */
+};
+#endif /*ARM64*/
+
+#ifdef CFG_WITH_VFP
+struct thread_vfp_state {
+ bool ns_saved;
+ bool sec_saved;
+ bool sec_lazy_saved;
+ struct vfp_state ns;
+ struct vfp_state sec;
+ struct thread_user_vfp_state *uvfp;
+};
+
+#endif /*CFG_WITH_VFP*/
+
+struct thread_ctx {
+ struct thread_ctx_regs regs;
+ enum thread_state state;
+ vaddr_t stack_va_end;
+ uint32_t hyp_clnt_id;
+ uint32_t flags;
+ struct core_mmu_user_map user_map;
+ bool have_user_map;
+#ifdef ARM64
+ vaddr_t kern_sp; /* Saved kernel SP during user TA execution */
+#endif
+#ifdef CFG_WITH_VFP
+ struct thread_vfp_state vfp_state;
+#endif
+ void *rpc_arg;
+ uint64_t rpc_carg;
+ struct mutex_head mutexes;
+ struct thread_specific_data tsd;
+};
+
+#ifdef ARM64
+/*
+ * struct thread_core_local need to have alignment suitable for a stack
+ * pointer since SP_EL1 points to this
+ */
+#define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
+#else
+#define THREAD_CORE_LOCAL_ALIGNED
+#endif
+
+struct thread_core_local {
+ vaddr_t tmp_stack_va_end;
+ int curr_thread;
+#ifdef ARM64
+ uint32_t flags;
+ vaddr_t abt_stack_va_end;
+ uint64_t x[4];
+#endif
+#ifdef CFG_TEE_CORE_DEBUG
+ unsigned int locked_count; /* Number of spinlocks held */
+#endif
+} THREAD_CORE_LOCAL_ALIGNED;
+
+#endif /*ASM*/
+
+#ifdef ARM64
+#ifdef CFG_WITH_VFP
+#define THREAD_VFP_STATE_SIZE \
+ (16 + (16 * 32 + 16) * 2 + 16)
+#else
+#define THREAD_VFP_STATE_SIZE 0
+#endif
+
+/* Describes the flags field of struct thread_core_local */
+#define THREAD_CLF_SAVED_SHIFT 4
+#define THREAD_CLF_CURR_SHIFT 0
+#define THREAD_CLF_MASK 0xf
+#define THREAD_CLF_TMP_SHIFT 0
+#define THREAD_CLF_ABORT_SHIFT 1
+#define THREAD_CLF_IRQ_SHIFT 2
+#define THREAD_CLF_FIQ_SHIFT 3
+
+#define THREAD_CLF_TMP (1 << THREAD_CLF_TMP_SHIFT)
+#define THREAD_CLF_ABORT (1 << THREAD_CLF_ABORT_SHIFT)
+#define THREAD_CLF_IRQ (1 << THREAD_CLF_IRQ_SHIFT)
+#define THREAD_CLF_FIQ (1 << THREAD_CLF_FIQ_SHIFT)
+
+#endif /*ARM64*/
+
+#ifndef ASM
+/*
+ * Initializes VBAR for current CPU (called by thread_init_per_cpu()
+ */
+void thread_init_vbar(void);
+
+/* Handles a stdcall, r0-r7 holds the parameters */
+void thread_std_smc_entry(void);
+
+struct thread_core_local *thread_get_core_local(void);
+
+/*
+ * Resumes execution of currently active thread by restoring context and
+ * jumping to the instruction where to continue execution.
+ *
+ * Arguments supplied by non-secure world will be copied into the saved
+ * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
+ * in the flags field in the thread context.
+ */
+void thread_resume(struct thread_ctx_regs *regs);
+
+uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long user_sp,
+ unsigned long user_func, unsigned long spsr,
+ uint32_t *exit_status0, uint32_t *exit_status1);
+
+/*
+ * Private functions made available for thread_asm.S
+ */
+
+/* Returns the temp stack for current CPU */
+void *thread_get_tmp_sp(void);
+
+/*
+ * Marks the current thread as suspended. And updated the flags
+ * for the thread context (see thread resume for use of flags).
+ * Returns thread index of the thread that was suspended.
+ */
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
+
+/*
+ * Marks the current thread as free.
+ */
+void thread_state_free(void);
+
+/* Returns a pointer to the saved registers in current thread context. */
+struct thread_ctx_regs *thread_get_ctx_regs(void);
+
+#ifdef ARM32
+/* Sets sp for abort mode */
+void thread_set_abt_sp(vaddr_t sp);
+
+/* Sets sp for irq mode */
+void thread_set_irq_sp(vaddr_t sp);
+
+/* Sets sp for fiq mode */
+void thread_set_fiq_sp(vaddr_t sp);
+#endif /*ARM32*/
+
+/* Handles a fast SMC by dispatching it to the registered fast SMC handler */
+void thread_handle_fast_smc(struct thread_smc_args *args);
+
+/* Handles a std SMC by dispatching it to the registered std SMC handler */
+void thread_handle_std_smc(struct thread_smc_args *args);
+
+/*
+ * Suspends current thread and temorarily exits to non-secure world.
+ * This function returns later when non-secure world returns.
+ *
+ * The purpose of this function is to request services from non-secure
+ * world.
+ */
+#define THREAD_RPC_NUM_ARGS 6
+void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
+
+/* Checks stack canaries */
+void thread_check_canaries(void);
+
+void __thread_std_smc_entry(struct thread_smc_args *args);
+
+#endif /*ASM*/
+
+#endif /*THREAD_PRIVATE_H*/
diff --git a/core/arch/arm/kernel/trace_ext.c b/core/arch/arm/kernel/trace_ext.c
new file mode 100644
index 0000000..8b8454c
--- /dev/null
+++ b/core/arch/arm/kernel/trace_ext.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdbool.h>
+#include <trace.h>
+#include <console.h>
+#include <kernel/thread.h>
+
+const char trace_ext_prefix[] = "TEE-CORE";
+int trace_level = TRACE_LEVEL;
+
+void trace_ext_puts(const char *str)
+{
+ const char *p;
+
+ console_flush();
+
+ for (p = str; *p; p++)
+ console_putc(*p);
+
+ console_flush();
+}
+
+int trace_ext_get_thread_id(void)
+{
+ return thread_get_id_may_fail();
+}
diff --git a/core/arch/arm/kernel/tz_ssvce_pl310_a32.S b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
new file mode 100644
index 0000000..184e936
--- /dev/null
+++ b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
+#include <platform_config.h>
+
+#define PL310_LOCKDOWN_NBREGS 8
+#define PL310_LOCKDOWN_SZREG 4
+
+#define PL310_8WAYS_MASK 0x00FF
+#define PL310_16WAYS_UPPERMASK 0xFF00
+
+/*
+ * void arm_cl2_lockallways(vaddr_t base)
+ *
+ * lock all L2 caches ways for data and instruction
+ */
+FUNC arm_cl2_lockallways , :
+UNWIND( .fnstart)
+ add r1, r0, #PL310_DCACHE_LOCKDOWN_BASE
+ ldr r2, [r0, #PL310_AUX_CTRL]
+ tst r2, #PL310_AUX_16WAY_BIT
+ mov r2, #PL310_8WAYS_MASK
+ orrne r2, #PL310_16WAYS_UPPERMASK
+ mov r0, #PL310_LOCKDOWN_NBREGS
+1: /* lock Dcache and Icache */
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ str r2, [r1], #PL310_LOCKDOWN_SZREG
+ subs r0, r0, #1
+ bne 1b
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_lockallways
+
+/*
+ * Set sync operation mask according to ways associativity.
+ * Preserve r0 = pl310 iomem base address
+ */
+.macro syncbyway_set_mask reg
+ ldr \reg, [r0, #PL310_AUX_CTRL]
+ tst \reg, #PL310_AUX_16WAY_BIT
+ mov \reg, #PL310_8WAYS_MASK
+ orrne \reg, \reg, #PL310_16WAYS_UPPERMASK
+.endm
+
+/*
+ * void arm_cl2_cleaninvbyway(vaddr_t base)
+ * clean & invalidate the whole L2 cache.
+ */
+FUNC arm_cl2_cleaninvbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_FLUSH_BY_WAY]
+
+ /* Wait for all cache ways to be cleaned and invalidated */
+loop_cli_way_done:
+ ldr r2, [r0, #PL310_FLUSH_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cli_way_done
+
+ /* Cache Sync */
+
+ /* Wait for writing cache sync */
+loop_cli_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cli_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cli_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbyway
+
+/* void arm_cl2_invbyway(vaddr_t base) */
+FUNC arm_cl2_invbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_INV_BY_WAY]
+
+loop_inv_way_done:
+ ldr r2, [r0, #PL310_INV_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_inv_way_done
+
+loop_inv_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_inv_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_inv_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbyway
+
+/* void arm_cl2_cleanbyway(vaddr_t base) */
+FUNC arm_cl2_cleanbyway , :
+UNWIND( .fnstart)
+
+ syncbyway_set_mask r1
+ str r1, [r0, #PL310_CLEAN_BY_WAY]
+
+loop_cl_way_done:
+ ldr r2, [r0, #PL310_CLEAN_BY_WAY]
+ and r2, r2, r1
+ cmp r2, #0
+ bne loop_cl_way_done
+
+loop_cl_way_sync:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync
+
+ mov r1, #1
+ str r1, [r0, #PL310_SYNC]
+
+loop_cl_way_sync_done:
+ ldr r1, [r0, #PL310_SYNC]
+ cmp r1, #0
+ bne loop_cl_way_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbyway
+
+/*
+ * void _arm_cl2_xxxbypa(vaddr_t pl310_base, paddr_t start, paddr_t end,
+ * int pl310value);
+ * pl310value is one of PL310_CLEAN_BY_PA, PL310_INV_BY_PA or PL310_FLUSH_BY_PA
+ */
+LOCAL_FUNC _arm_cl2_xxxbypa , :
+UNWIND( .fnstart)
+ /* Align start address on PL310 line size */
+ and r1, #(~(PL310_LINE_SIZE - 1))
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ /*
+ * NOTE:
+ * We're assuming that if mmu is enabled PL310_BASE and SCU_BASE
+ * still have the same relative offsets from each other.
+ */
+ sub r0, r0, #(PL310_BASE - SCU_BASE)
+ mov r12, #1
+ str r12, [r0, #SCU_ERRATA744369]
+ dsb
+ add r0, r0, #(PL310_BASE - SCU_BASE)
+
+loop_cl2_xxxbypa:
+ str r1, [r0, r3]
+
+loop_xxx_pa_done:
+ ldr r12, [r0, r3]
+ and r12, r12, r1
+ cmp r12, #0
+ bne loop_xxx_pa_done
+
+ add r1, r1, #PL310_LINE_SIZE
+ cmp r2, r1
+ bpl loop_cl2_xxxbypa
+
+loop_xxx_pa_sync:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync
+
+ mov r12, #1
+ str r12, [r0, #PL310_SYNC]
+
+loop_xxx_pa_sync_done:
+ ldr r12, [r0, #PL310_SYNC]
+ cmp r12, #0
+ bne loop_xxx_pa_sync_done
+
+ mov pc, lr
+UNWIND( .fnend)
+END_FUNC _arm_cl2_xxxbypa
+
+/*
+ * void _arm_cl2_cleanbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleanbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_CLEAN_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleanbypa
+
+/*
+ * void arm_cl2_invbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_invbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_INV_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_invbypa
+
+/*
+ * void arm_cl2_cleaninvbypa(vaddr_t pl310_base, paddr_t start, paddr_t end);
+ * clean and invalidate L2 cache by physical address range.
+ */
+FUNC arm_cl2_cleaninvbypa , :
+UNWIND( .fnstart)
+ mov r3, #PL310_FLUSH_BY_PA
+ b _arm_cl2_xxxbypa
+UNWIND( .fnend)
+END_FUNC arm_cl2_cleaninvbypa
+
diff --git a/core/arch/arm/kernel/unwind_arm32.c b/core/arch/arm/kernel/unwind_arm32.c
new file mode 100644
index 0000000..7efe94b
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm32.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2015 Linaro Limited
+ * Copyright 2013-2014 Andrew Turner.
+ * Copyright 2013-2014 Ian Lepore.
+ * Copyright 2013-2014 Rui Paulo.
+ * Copyright 2013 Eitan Adler.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/misc.h>
+#include <kernel/unwind.h>
+#include <string.h>
+#include <trace.h>
+
+/* The register names */
+#define FP 11
+#define SP 13
+#define LR 14
+#define PC 15
+
+/*
+ * Definitions for the instruction interpreter.
+ *
+ * The ARM EABI specifies how to perform the frame unwinding in the
+ * Exception Handling ABI for the ARM Architecture document. To perform
+ * the unwind we need to know the initial frame pointer, stack pointer,
+ * link register and program counter. We then find the entry within the
+ * index table that points to the function the program counter is within.
+ * This gives us either a list of three instructions to process, a 31-bit
+ * relative offset to a table of instructions, or a value telling us
+ * we can't unwind any further.
+ *
+ * When we have the instructions to process we need to decode them
+ * following table 4 in section 9.3. This describes a collection of bit
+ * patterns to encode that steps to take to update the stack pointer and
+ * link register to the correct values at the start of the function.
+ */
+
+/* A special case when we are unable to unwind past this function */
+#define EXIDX_CANTUNWIND 1
+
+/*
+ * Entry types.
+ * These are the only entry types that have been seen in the kernel.
+ */
+#define ENTRY_MASK 0xff000000
+#define ENTRY_ARM_SU16 0x80000000
+#define ENTRY_ARM_LU16 0x81000000
+
+/* Instruction masks. */
+#define INSN_VSP_MASK 0xc0
+#define INSN_VSP_SIZE_MASK 0x3f
+#define INSN_STD_MASK 0xf0
+#define INSN_STD_DATA_MASK 0x0f
+#define INSN_POP_TYPE_MASK 0x08
+#define INSN_POP_COUNT_MASK 0x07
+#define INSN_VSP_LARGE_INC_MASK 0xff
+
+/* Instruction definitions */
+#define INSN_VSP_INC 0x00
+#define INSN_VSP_DEC 0x40
+#define INSN_POP_MASKED 0x80
+#define INSN_VSP_REG 0x90
+#define INSN_POP_COUNT 0xa0
+#define INSN_FINISH 0xb0
+#define INSN_POP_REGS 0xb1
+#define INSN_VSP_LARGE_INC 0xb2
+
+/* An item in the exception index table */
+struct unwind_idx {
+ uint32_t offset;
+ uint32_t insn;
+};
+
+/*
+ * These are set in the linker script. Their addresses will be
+ * either the start or end of the exception table or index.
+ */
+extern struct unwind_idx __exidx_start;
+extern struct unwind_idx __exidx_end;
+
+/* Expand a 31-bit signed value to a 32-bit signed value */
+static int32_t expand_prel31(uint32_t prel31)
+{
+
+ return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2;
+}
+
+/*
+ * Perform a binary search of the index table to find the function
+ * with the largest address that doesn't exceed addr.
+ */
+static struct unwind_idx *find_index(uint32_t addr)
+{
+ vaddr_t idx_start, idx_end;
+ unsigned int min, mid, max;
+ struct unwind_idx *start;
+ struct unwind_idx *item;
+ int32_t prel31_addr;
+ uint32_t func_addr;
+
+ start = &__exidx_start;
+ idx_start = (vaddr_t)&__exidx_start;
+ idx_end = (vaddr_t)&__exidx_end;
+
+ min = 0;
+ max = (idx_end - idx_start) / sizeof(struct unwind_idx);
+
+ while (min != max) {
+ mid = min + (max - min + 1) / 2;
+
+ item = &start[mid];
+
+ prel31_addr = expand_prel31(item->offset);
+ func_addr = (uint32_t)&item->offset + prel31_addr;
+
+ if (func_addr <= addr) {
+ min = mid;
+ } else {
+ max = mid - 1;
+ }
+ }
+
+ return &start[min];
+}
+
+/* Reads the next byte from the instruction list */
+static uint8_t unwind_exec_read_byte(struct unwind_state *state)
+{
+ uint8_t insn;
+
+ /* Read the unwind instruction */
+ insn = (*state->insn) >> (state->byte * 8);
+
+ /* Update the location of the next instruction */
+ if (state->byte == 0) {
+ state->byte = 3;
+ state->insn++;
+ state->entries--;
+ } else
+ state->byte--;
+
+ return insn;
+}
+
+/* Executes the next instruction on the list */
+static bool unwind_exec_insn(struct unwind_state *state)
+{
+ unsigned int insn;
+ uint32_t *vsp = (uint32_t *)state->registers[SP];
+ int update_vsp = 0;
+
+ /* This should never happen */
+ if (state->entries == 0)
+ return false;
+
+ /* Read the next instruction */
+ insn = unwind_exec_read_byte(state);
+
+ if ((insn & INSN_VSP_MASK) == INSN_VSP_INC) {
+ state->registers[SP] += ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_VSP_MASK) == INSN_VSP_DEC) {
+ state->registers[SP] -= ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_MASKED) {
+ unsigned int mask, reg;
+
+ /* Load the mask */
+ mask = unwind_exec_read_byte(state);
+ mask |= (insn & INSN_STD_DATA_MASK) << 8;
+
+ /* We have a refuse to unwind instruction */
+ if (mask == 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 4; mask && reg < 16; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+
+ /* If we have updated SP kep its value */
+ if (reg == SP)
+ update_vsp = 0;
+ }
+ }
+
+ } else if ((insn & INSN_STD_MASK) == INSN_VSP_REG &&
+ ((insn & INSN_STD_DATA_MASK) != 13) &&
+ ((insn & INSN_STD_DATA_MASK) != 15)) {
+ /* sp = register */
+ state->registers[SP] =
+ state->registers[insn & INSN_STD_DATA_MASK];
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_COUNT) {
+ unsigned int count, reg;
+
+ /* Read how many registers to load */
+ count = insn & INSN_POP_COUNT_MASK;
+
+ /* Update sp */
+ update_vsp = 1;
+
+ /* Pop the registers */
+ for (reg = 4; reg <= 4 + count; reg++) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+
+ /* Check if we are in the pop r14 version */
+ if ((insn & INSN_POP_TYPE_MASK) != 0) {
+ state->registers[14] = *vsp++;
+ }
+
+ } else if (insn == INSN_FINISH) {
+ /* Stop processing */
+ state->entries = 0;
+
+ } else if (insn == INSN_POP_REGS) {
+ unsigned int mask, reg;
+
+ mask = unwind_exec_read_byte(state);
+ if (mask == 0 || (mask & 0xf0) != 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 0; mask && reg < 4; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+ }
+
+ } else if ((insn & INSN_VSP_LARGE_INC_MASK) == INSN_VSP_LARGE_INC) {
+ unsigned int uleb128;
+
+ /* Read the increment value */
+ uleb128 = unwind_exec_read_byte(state);
+
+ state->registers[SP] += 0x204 + (uleb128 << 2);
+
+ } else {
+ /* We hit a new instruction that needs to be implemented */
+ DMSG("Unhandled instruction %.2x\n", insn);
+ return false;
+ }
+
+ if (update_vsp) {
+ state->registers[SP] = (uint32_t)vsp;
+ }
+
+ return true;
+}
+
+/* Performs the unwind of a function */
+static bool unwind_tab(struct unwind_state *state)
+{
+ uint32_t entry;
+
+ /* Set PC to a known value */
+ state->registers[PC] = 0;
+
+ /* Read the personality */
+ entry = *state->insn & ENTRY_MASK;
+
+ if (entry == ENTRY_ARM_SU16) {
+ state->byte = 2;
+ state->entries = 1;
+ } else if (entry == ENTRY_ARM_LU16) {
+ state->byte = 1;
+ state->entries = ((*state->insn >> 16) & 0xFF) + 1;
+ } else {
+ DMSG("Unknown entry: %x\n", entry);
+ return true;
+ }
+
+ while (state->entries > 0) {
+ if (!unwind_exec_insn(state))
+ return true;
+ }
+
+ /*
+ * The program counter was not updated, load it from the link register.
+ */
+ if (state->registers[PC] == 0) {
+ state->registers[PC] = state->registers[LR];
+
+ /*
+ * If the program counter changed, flag it in the update mask.
+ */
+ if (state->start_pc != state->registers[PC])
+ state->update_mask |= 1 << PC;
+ }
+
+ return false;
+}
+
+bool unwind_stack(struct unwind_state *state)
+{
+ struct unwind_idx *index;
+ bool finished;
+
+ /* Reset the mask of updated registers */
+ state->update_mask = 0;
+
+ /* The pc value is correct and will be overwritten, save it */
+ state->start_pc = state->registers[PC];
+
+ /* Find the item to run */
+ index = find_index(state->start_pc);
+
+ finished = false;
+ if (index->insn != EXIDX_CANTUNWIND) {
+ if (index->insn & (1U << 31)) {
+ /* The data is within the instruction */
+ state->insn = &index->insn;
+ } else {
+ /* A prel31 offset to the unwind table */
+ state->insn = (uint32_t *)
+ ((uintptr_t)&index->insn +
+ expand_prel31(index->insn));
+ }
+ /* Run the unwind function */
+ finished = unwind_tab(state);
+ }
+
+ /* This is the top of the stack, finish */
+ if (index->insn == EXIDX_CANTUNWIND)
+ finished = true;
+
+ return !finished;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(state.registers, 0, sizeof(state.registers));
+ /* r7: Thumb-style frame pointer */
+ state.registers[7] = read_r7();
+ /* r11: ARM-style frame pointer */
+ state.registers[FP] = read_fp();
+ state.registers[SP] = read_sp();
+ state.registers[LR] = read_lr();
+ state.registers[PC] = (uint32_t)print_stack;
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%08" PRIx32, state.registers[PC]);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
+
+/*
+ * These functions are referenced but never used
+ */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void);
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/core/arch/arm/kernel/unwind_arm64.c b/core/arch/arm/kernel/unwind_arm64.c
new file mode 100644
index 0000000..10b70ef
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm64.c
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arm.h>
+#include <kernel/unwind.h>
+#include <kernel/thread.h>
+#include <string.h>
+#include <trace.h>
+
+bool unwind_stack(struct unwind_state *frame)
+{
+ uint64_t fp;
+
+ fp = frame->fp;
+ if (!thread_addr_is_in_stack(fp))
+ return false;
+
+ frame->sp = fp + 0x10;
+ /* FP to previous frame (X29) */
+ frame->fp = *(uint64_t *)(fp);
+ /* LR (X30) */
+ frame->pc = *(uint64_t *)(fp + 8) - 4;
+
+ return true;
+}
+
+#if defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0)
+
+void print_stack(int level)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = read_pc();
+ state.fp = read_fp();
+
+ do {
+ switch (level) {
+ case TRACE_FLOW:
+ FMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_DEBUG:
+ DMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_INFO:
+ IMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ case TRACE_ERROR:
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ break;
+ default:
+ break;
+ }
+ } while (unwind_stack(&state));
+}
+
+#endif /* defined(CFG_CORE_UNWIND) && (TRACE_LEVEL > 0) */
diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c
new file mode 100644
index 0000000..a63fb22
--- /dev/null
+++ b/core/arch/arm/kernel/user_ta.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015-2017 Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <compiler.h>
+#include <keep.h>
+#include <kernel/panic.h>
+#include <kernel/tee_ta_manager.h>
+#include <kernel/thread.h>
+#include <kernel/user_ta.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/pgt_cache.h>
+#include <mm/tee_mm.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg_supplicant.h>
+#include <signed_hdr.h>
+#include <stdlib.h>
+#include <ta_pub_key.h>
+#include <tee/tee_cryp_provider.h>
+#include <tee/tee_cryp_utl.h>
+#include <tee/tee_obj.h>
+#include <tee/tee_svc_cryp.h>
+#include <tee/tee_svc.h>
+#include <tee/tee_svc_storage.h>
+#include <tee/uuid.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <utee_defines.h>
+#include <util.h>
+
+#include "elf_load.h"
+#include "elf_common.h"
+
+#define STACK_ALIGNMENT (sizeof(long) * 2)
+
+static TEE_Result load_header(const struct shdr *signed_ta,
+ struct shdr **sec_shdr)
+{
+ size_t s;
+
+ if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta)))
+ return TEE_ERROR_SECURITY;
+
+ s = SHDR_GET_SIZE(signed_ta);
+ if (!tee_vbuf_is_non_sec(signed_ta, s))
+ return TEE_ERROR_SECURITY;
+
+ /* Copy signed header into secure memory */
+ *sec_shdr = malloc(s);
+ if (!*sec_shdr)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ memcpy(*sec_shdr, signed_ta, s);
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result check_shdr(struct shdr *shdr)
+{
+ struct rsa_public_key key;
+ TEE_Result res;
+ uint32_t e = TEE_U32_TO_BIG_ENDIAN(ta_pub_key_exponent);
+ size_t hash_size;
+
+ if (shdr->magic != SHDR_MAGIC || shdr->img_type != SHDR_TA)
+ return TEE_ERROR_SECURITY;
+
+ if (TEE_ALG_GET_MAIN_ALG(shdr->algo) != TEE_MAIN_ALGO_RSA)
+ return TEE_ERROR_SECURITY;
+
+ res = tee_hash_get_digest_size(TEE_DIGEST_HASH_TO_ALGO(shdr->algo),
+ &hash_size);
+ if (res != TEE_SUCCESS)
+ return res;
+ if (hash_size != shdr->hash_size)
+ return TEE_ERROR_SECURITY;
+
+ if (!crypto_ops.acipher.alloc_rsa_public_key ||
+ !crypto_ops.acipher.free_rsa_public_key ||
+ !crypto_ops.acipher.rsassa_verify ||
+ !crypto_ops.bignum.bin2bn)
+ return TEE_ERROR_NOT_SUPPORTED;
+
+ res = crypto_ops.acipher.alloc_rsa_public_key(&key, shdr->sig_size);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ res = crypto_ops.bignum.bin2bn((uint8_t *)&e, sizeof(e), key.e);
+ if (res != TEE_SUCCESS)
+ goto out;
+ res = crypto_ops.bignum.bin2bn(ta_pub_key_modulus,
+ ta_pub_key_modulus_size, key.n);
+ if (res != TEE_SUCCESS)
+ goto out;
+
+ res = crypto_ops.acipher.rsassa_verify(shdr->algo, &key, -1,
+ SHDR_GET_HASH(shdr), shdr->hash_size,
+ SHDR_GET_SIG(shdr), shdr->sig_size);
+out: