AArch32: Disable Secure Cycle Counter
[project/bcm63xx/atf.git] / bl32 / sp_min / aarch32 / entrypoint.S
index e145511d1b8a5026963316b5195e6d49b23a38f0..0a684754cef8b643ee7f94b794d9c74308198689 100644 (file)
@@ -1,33 +1,56 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
-#include <bl_common.h>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
 #include <context.h>
 #include <el3_common_macros.S>
-#include <runtime_svc.h>
-#include <smcc_helpers.h>
-#include <smcc_macros.S>
-#include <xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
 
        .globl  sp_min_vector_table
        .globl  sp_min_entrypoint
        .globl  sp_min_warm_entrypoint
+       .globl  sp_min_handle_smc
+       .globl  sp_min_handle_fiq
 
+       .macro route_fiq_to_sp_min reg
+               /* -----------------------------------------------------
+                * FIQs are secure interrupts trapped by Monitor and non
+                * secure is not allowed to mask the FIQs.
+                * -----------------------------------------------------
+                */
+               ldcopr  \reg, SCR
+               orr     \reg, \reg, #SCR_FIQ_BIT
+               bic     \reg, \reg, #SCR_FW_BIT
+               stcopr  \reg, SCR
+       .endm
+
+       .macro clrex_on_monitor_entry
+#if (ARM_ARCH_MAJOR == 7)
+       /*
+        * ARMv7 architectures need to clear the exclusive access when
+        * entering Monitor mode.
+        */
+       clrex
+#endif
+       .endm
 
 vector_base sp_min_vector_table
        b       sp_min_entrypoint
        b       plat_panic_handler      /* Undef */
-       b       handle_smc              /* Syscall */
+       b       sp_min_handle_smc       /* Syscall */
        b       plat_panic_handler      /* Prefetch abort */
        b       plat_panic_handler      /* Data abort */
        b       plat_panic_handler      /* Reserved */
        b       plat_panic_handler      /* IRQ */
-       b       plat_panic_handler      /* FIQ */
+       b       sp_min_handle_fiq       /* FIQ */
 
 
 /*
@@ -41,20 +64,22 @@ func sp_min_entrypoint
         * specific structure
         * ---------------------------------------------------------------
         */
-       mov     r11, r0
-       mov     r12, r1
+       mov     r9, r0
+       mov     r10, r1
+       mov     r11, r2
+       mov     r12, r3
 
        /* ---------------------------------------------------------------------
         * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
         * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
         * and primary/secondary CPU logic should not be executed in this case.
         *
-        * Also, assume that the previous bootloader has already set up the CPU
-        * endianness and has initialised the memory.
+        * Also, assume that the previous bootloader has already initialised the
+        * SCTLR, including the CPU endianness, and has initialised the memory.
         * ---------------------------------------------------------------------
         */
        el3_entrypoint_common                                   \
-               _set_endian=0                                   \
+               _init_sctlr=0                                   \
                _warm_boot_mailbox=0                            \
                _secondary_cold_boot=0                          \
                _init_memory=0                                  \
@@ -65,8 +90,6 @@ func sp_min_entrypoint
         * Relay the previous bootloader's arguments to the platform layer
         * ---------------------------------------------------------------------
         */
-       mov     r0, r11
-       mov     r1, r12
 #else
        /* ---------------------------------------------------------------------
         * For RESET_TO_SP_MIN systems which have a programmable reset address,
@@ -75,7 +98,7 @@ func sp_min_entrypoint
         * ---------------------------------------------------------------------
         */
        el3_entrypoint_common                                   \
-               _set_endian=1                                   \
+               _init_sctlr=1                                   \
                _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS  \
                _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU      \
                _init_memory=1                                  \
@@ -88,11 +111,22 @@ func sp_min_entrypoint
         * Zero the arguments passed to the platform layer to reflect that.
         * ---------------------------------------------------------------------
         */
-       mov     r0, #0
-       mov     r1, #0
+       mov     r9, #0
+       mov     r10, #0
+       mov     r11, #0
+       mov     r12, #0
+
 #endif /* RESET_TO_SP_MIN */
 
-       bl      sp_min_early_platform_setup
+#if SP_MIN_WITH_SECURE_FIQ
+       route_fiq_to_sp_min r4
+#endif
+
+       mov     r0, r9
+       mov     r1, r10
+       mov     r2, r11
+       mov     r3, r12
+       bl      sp_min_early_platform_setup2
        bl      sp_min_plat_arch_setup
 
        /* Jump to the main function */
@@ -126,11 +160,13 @@ endfunc sp_min_entrypoint
 /*
  * SMC handling function for SP_MIN.
  */
-func handle_smc
+func sp_min_handle_smc
        /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
        str     lr, [sp, #SMC_CTX_LR_MON]
 
-       smcc_save_gp_mode_regs
+       smccc_save_gp_mode_regs
+
+       clrex_on_monitor_entry
 
        /*
         * `sp` still points to `smc_ctx_t`. Save it to a register
@@ -163,7 +199,41 @@ func handle_smc
 
        /* `r0` points to `smc_ctx_t` */
        b       sp_min_exit
-endfunc handle_smc
+endfunc sp_min_handle_smc
+
+/*
+ * Secure Interrupts handling function for SP_MIN.
+ */
+func sp_min_handle_fiq
+#if !SP_MIN_WITH_SECURE_FIQ
+       b plat_panic_handler
+#else
+       /* FIQ has a +4 offset for lr compared to preferred return address */
+       sub     lr, lr, #4
+       /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+       str     lr, [sp, #SMC_CTX_LR_MON]
+
+       smccc_save_gp_mode_regs
+
+       clrex_on_monitor_entry
+
+       /* load run-time stack */
+       mov     r2, sp
+       ldr     sp, [r2, #SMC_CTX_SP_MON]
+
+       /* Switch to Secure Mode */
+       ldr     r0, [r2, #SMC_CTX_SCR]
+       bic     r0, #SCR_NS_BIT
+       stcopr  r0, SCR
+       isb
+
+       push    {r2, r3}
+       bl      sp_min_fiq
+       pop     {r0, r3}
+
+       b       sp_min_exit
+#endif
+endfunc sp_min_handle_fiq
 
 /*
  * The Warm boot entrypoint for SP_MIN.
@@ -174,7 +244,7 @@ func sp_min_warm_entrypoint
         * 'el3_entrypoint_common' must be skipped:
         *
         *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
-        *    programming the reset address do we need to set the CPU endianness.
+        *    programming the reset address do we need to initialied the SCTLR.
         *    In other cases, we assume this has been taken care by the
         *    entrypoint code.
         *
@@ -187,7 +257,7 @@ func sp_min_warm_entrypoint
         *    it has been done once and for all on the cold boot path.
         */
        el3_entrypoint_common                                   \
-               _set_endian=PROGRAMMABLE_RESET_ADDRESS          \
+               _init_sctlr=PROGRAMMABLE_RESET_ADDRESS          \
                _warm_boot_mailbox=0                            \
                _secondary_cold_boot=0                          \
                _init_memory=0                                  \
@@ -210,14 +280,15 @@ func sp_min_warm_entrypoint
         * enter coherency (as CPUs already are); and there's no reason to have
         * caches disabled either.
         */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+       mov     r0, #0
+#else
        mov     r0, #DISABLE_DCACHE
+#endif
        bl      bl32_plat_enable_mmu
 
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-       ldcopr  r0, SCTLR
-       orr     r0, r0, #SCTLR_C_BIT
-       stcopr  r0, SCTLR
-       isb
+#if SP_MIN_WITH_SECURE_FIQ
+       route_fiq_to_sp_min r0
 #endif
 
        bl      sp_min_warm_boot