summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Delaunay <patrick.delaunay@foss.st.com>2021-05-07 14:50:35 +0200
committerTom Rini <trini@konsulko.com>2021-06-07 10:48:40 -0400
commitade4e0428f4d85454fdb3818702facc7728a274a (patch)
tree09d104dbed9bf79b7e4d05a799e18cc9485836ff
parent7dc6068fc10d699558176da364a3e7a6cfccdaa1 (diff)
downloadu-boot-ade4e0428f4d85454fdb3818702facc7728a274a.tar.gz
u-boot-ade4e0428f4d85454fdb3818702facc7728a274a.tar.bz2
u-boot-ade4e0428f4d85454fdb3818702facc7728a274a.zip
stm32mp: don't map the reserved region with no-map property
No more map the reserved region with "no-map" property by marking the corresponding TLB entries with invalid entry (=0) to avoid speculative access. The device tree parsing done in lmb_init_and_reserve() takes a long time when it is executed without data cache, so it is called in enable_caches() before to disable it. This patch fixes an issue where predictive read access on secure DDR OP-TEE reserved area are caught by firewall. Series-cc: marex Series-cc: pch Series-cc: marek.bykowski@gmail.com Series-cc: Ard Biesheuvel <ardb@kernel.org> Series-cc: Etienne Carriere <etienne.carriere@linaro.org> Signed-off-by: Patrick Delaunay <patrick.delaunay@foss.st.com>
-rw-r--r--arch/arm/mach-stm32mp/cpu.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/arm/mach-stm32mp/cpu.c b/arch/arm/mach-stm32mp/cpu.c
index 8115d58b19..592bfd413d 100644
--- a/arch/arm/mach-stm32mp/cpu.c
+++ b/arch/arm/mach-stm32mp/cpu.c
@@ -12,6 +12,7 @@
#include <env.h>
#include <init.h>
#include <log.h>
+#include <lmb.h>
#include <misc.h>
#include <net.h>
#include <asm/io.h>
@@ -90,6 +91,8 @@
*/
u8 early_tlb[PGTABLE_SIZE] __section(".data") __aligned(0x4000);
+struct lmb lmb;
+
#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
#ifndef CONFIG_TFABOOT
static void security_init(void)
@@ -221,6 +224,8 @@ void dram_bank_mmu_setup(int bank)
int i;
phys_addr_t start;
phys_size_t size;
+ bool use_lmb = false;
+ enum dcache_option option;
if (IS_ENABLED(CONFIG_SPL_BUILD)) {
start = ALIGN_DOWN(STM32_SYSRAM_BASE, MMU_SECTION_SIZE);
@@ -229,6 +234,7 @@ void dram_bank_mmu_setup(int bank)
/* bd->bi_dram is available only after relocation */
start = bd->bi_dram[bank].start;
size = bd->bi_dram[bank].size;
+ use_lmb = true;
} else {
/* mark cacheable and executable the beggining of the DDR */
start = STM32_DDR_BASE;
@@ -237,8 +243,12 @@ void dram_bank_mmu_setup(int bank)
for (i = start >> MMU_SECTION_SHIFT;
i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
- i++)
- set_section_dcache(i, DCACHE_DEFAULT_OPTION);
+ i++) {
+ option = DCACHE_DEFAULT_OPTION;
+ if (use_lmb && lmb_is_reserved_flags(&lmb, i << MMU_SECTION_SHIFT, LMB_NOMAP))
+ option = 0; /* INVALID ENTRY in TLB */
+ set_section_dcache(i, option);
+ }
}
/*
* initialize the MMU and activate cache in SPL or in U-Boot pre-reloc stage
@@ -302,6 +312,9 @@ int arch_cpu_init(void)
void enable_caches(void)
{
+ /* parse device tree when data cache is still activated */
+ lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
+
/* I-cache is already enabled in start.S: icache_enable() not needed */
/* deactivate the data cache, early enabled in arch_cpu_init() */