AArch32: Disable Secure Cycle Counter
[project/bcm63xx/atf.git] / bl32 / sp_min / aarch32 / entrypoint.S
index ebbee5acf0833a5b7e9f64c7856bd126fc1aff65..0a684754cef8b643ee7f94b794d9c74308198689 100644 (file)
@@ -1,33 +1,56 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
-#include <bl_common.h>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
 #include <context.h>
 #include <el3_common_macros.S>
-#include <runtime_svc.h>
-#include <smcc_helpers.h>
-#include <smcc_macros.S>
-#include <xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
 
        .globl  sp_min_vector_table
        .globl  sp_min_entrypoint
        .globl  sp_min_warm_entrypoint
-
+       .globl  sp_min_handle_smc
+       .globl  sp_min_handle_fiq
+
+       .macro route_fiq_to_sp_min reg
+               /* -----------------------------------------------------
+                * FIQs are secure interrupts trapped by Monitor and non
+                * secure is not allowed to mask the FIQs.
+                * -----------------------------------------------------
+                */
+               ldcopr  \reg, SCR
+               orr     \reg, \reg, #SCR_FIQ_BIT
+               bic     \reg, \reg, #SCR_FW_BIT
+               stcopr  \reg, SCR
+       .endm
+
+       .macro clrex_on_monitor_entry
+#if (ARM_ARCH_MAJOR == 7)
+       /*
+        * ARMv7 architectures need to clear the exclusive access when
+        * entering Monitor mode.
+        */
+       clrex
+#endif
+       .endm
 
 vector_base sp_min_vector_table
        b       sp_min_entrypoint
        b       plat_panic_handler      /* Undef */
-       b       handle_smc              /* Syscall */
+       b       sp_min_handle_smc       /* Syscall */
        b       plat_panic_handler      /* Prefetch abort */
        b       plat_panic_handler      /* Data abort */
        b       plat_panic_handler      /* Reserved */
        b       plat_panic_handler      /* IRQ */
-       b       plat_panic_handler      /* FIQ */
+       b       sp_min_handle_fiq       /* FIQ */
 
 
 /*
@@ -41,20 +64,22 @@ func sp_min_entrypoint
         * specific structure
         * ---------------------------------------------------------------
         */
-       mov     r11, r0
-       mov     r12, r1
+       mov     r9, r0
+       mov     r10, r1
+       mov     r11, r2
+       mov     r12, r3
 
        /* ---------------------------------------------------------------------
         * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
         * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
         * and primary/secondary CPU logic should not be executed in this case.
         *
-        * Also, assume that the previous bootloader has already set up the CPU
-        * endianness and has initialised the memory.
+        * Also, assume that the previous bootloader has already initialised the
+        * SCTLR, including the CPU endianness, and has initialised the memory.
         * ---------------------------------------------------------------------
         */
        el3_entrypoint_common                                   \
-               _set_endian=0                                   \
+               _init_sctlr=0                                   \
                _warm_boot_mailbox=0                            \
                _secondary_cold_boot=0                          \
                _init_memory=0                                  \
@@ -65,8 +90,6 @@ func sp_min_entrypoint
         * Relay the previous bootloader's arguments to the platform layer
         * ---------------------------------------------------------------------
         */
-       mov     r0, r11
-       mov     r1, r12
 #else
        /* ---------------------------------------------------------------------
         * For RESET_TO_SP_MIN systems which have a programmable reset address,
@@ -75,7 +98,7 @@ func sp_min_entrypoint
         * ---------------------------------------------------------------------
         */
        el3_entrypoint_common                                   \
-               _set_endian=1                                   \
+               _init_sctlr=1                                   \
                _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS  \
                _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU      \
                _init_memory=1                                  \
@@ -88,11 +111,22 @@ func sp_min_entrypoint
         * Zero the arguments passed to the platform layer to reflect that.
         * ---------------------------------------------------------------------
         */
-       mov     r0, #0
-       mov     r1, #0
+       mov     r9, #0
+       mov     r10, #0
+       mov     r11, #0
+       mov     r12, #0
+
 #endif /* RESET_TO_SP_MIN */
 
-       bl      sp_min_early_platform_setup
+#if SP_MIN_WITH_SECURE_FIQ
+       route_fiq_to_sp_min r4
+#endif
+
+       mov     r0, r9
+       mov     r1, r10
+       mov     r2, r11
+       mov     r3, r12
+       bl      sp_min_early_platform_setup2
        bl      sp_min_plat_arch_setup
 
        /* Jump to the main function */
@@ -115,21 +149,10 @@ func sp_min_entrypoint
        sub     r1, r1, r0
        bl      clean_dcache_range
 
-       /* Program the registers in cpu_context and exit monitor mode */
-       mov     r0, #NON_SECURE
-       bl      cm_get_context
-
-       /* Restore the SCR */
-       ldr     r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
-       stcopr  r2, SCR
-       isb
-
-       /* Restore the SCTLR  */
-       ldr     r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
-       stcopr  r2, SCTLR
-
        bl      smc_get_next_ctx
-       /* The other cpu_context registers have been copied to smc context */
+
+       /* r0 points to `smc_ctx_t` */
+       /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
        b       sp_min_exit
 endfunc sp_min_entrypoint
 
@@ -137,46 +160,80 @@ endfunc sp_min_entrypoint
 /*
  * SMC handling function for SP_MIN.
  */
-func handle_smc
-       smcc_save_gp_mode_regs
+func sp_min_handle_smc
+       /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+       str     lr, [sp, #SMC_CTX_LR_MON]
+
+       smccc_save_gp_mode_regs
 
-       /* r0 points to smc_context */
-       mov     r2, r0                          /* handle */
-       ldcopr  r0, SCR
+       clrex_on_monitor_entry
 
        /*
-        * Save SCR in stack. r1 is pushed to meet the 8 byte
-        * stack alignment requirement.
+        * `sp` still points to `smc_ctx_t`. Save it to a register
+        * and restore the C runtime stack pointer to `sp`.
         */
-       push    {r0, r1}
+       mov     r2, sp                          /* handle */
+       ldr     sp, [r2, #SMC_CTX_SP_MON]
+
+       ldr     r0, [r2, #SMC_CTX_SCR]
        and     r3, r0, #SCR_NS_BIT             /* flags */
 
        /* Switch to Secure Mode*/
        bic     r0, #SCR_NS_BIT
        stcopr  r0, SCR
        isb
+
        ldr     r0, [r2, #SMC_CTX_GPREG_R0]     /* smc_fid */
        /* Check whether an SMC64 is issued */
        tst     r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
-       beq     1f      /* SMC32 is detected */
+       beq     1f
+       /* SMC32 is not detected. Return error back to caller */
        mov     r0, #SMC_UNK
        str     r0, [r2, #SMC_CTX_GPREG_R0]
        mov     r0, r2
-       b       2f      /* Skip handling the SMC */
+       b       sp_min_exit
 1:
+       /* SMC32 is detected */
        mov     r1, #0                          /* cookie */
        bl      handle_runtime_svc
-2:
-       /* r0 points to smc context */
 
-       /* Restore SCR from stack */
-       pop     {r1, r2}
-       stcopr  r1, SCR
+       /* `r0` points to `smc_ctx_t` */
+       b       sp_min_exit
+endfunc sp_min_handle_smc
+
+/*
+ * Secure Interrupts handling function for SP_MIN.
+ */
+func sp_min_handle_fiq
+#if !SP_MIN_WITH_SECURE_FIQ
+       b plat_panic_handler
+#else
+       /* FIQ has a +4 offset for lr compared to preferred return address */
+       sub     lr, lr, #4
+       /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+       str     lr, [sp, #SMC_CTX_LR_MON]
+
+       smccc_save_gp_mode_regs
+
+       clrex_on_monitor_entry
+
+       /* load run-time stack */
+       mov     r2, sp
+       ldr     sp, [r2, #SMC_CTX_SP_MON]
+
+       /* Switch to Secure Mode */
+       ldr     r0, [r2, #SMC_CTX_SCR]
+       bic     r0, #SCR_NS_BIT
+       stcopr  r0, SCR
        isb
 
-       b       sp_min_exit
-endfunc handle_smc
+       push    {r2, r3}
+       bl      sp_min_fiq
+       pop     {r0, r3}
 
+       b       sp_min_exit
+#endif
+endfunc sp_min_handle_fiq
 
 /*
  * The Warm boot entrypoint for SP_MIN.
@@ -187,7 +244,7 @@ func sp_min_warm_entrypoint
         * 'el3_entrypoint_common' must be skipped:
         *
         *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
-        *    programming the reset address do we need to set the CPU endianness.
+        *    programming the reset address do we need to initialied the SCTLR.
         *    In other cases, we assume this has been taken care by the
         *    entrypoint code.
         *
@@ -200,7 +257,7 @@ func sp_min_warm_entrypoint
         *    it has been done once and for all on the cold boot path.
         */
        el3_entrypoint_common                                   \
-               _set_endian=PROGRAMMABLE_RESET_ADDRESS          \
+               _init_sctlr=PROGRAMMABLE_RESET_ADDRESS          \
                _warm_boot_mailbox=0                            \
                _secondary_cold_boot=0                          \
                _init_memory=0                                  \
@@ -223,34 +280,21 @@ func sp_min_warm_entrypoint
         * enter coherency (as CPUs already are); and there's no reason to have
         * caches disabled either.
         */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+       mov     r0, #0
+#else
        mov     r0, #DISABLE_DCACHE
+#endif
        bl      bl32_plat_enable_mmu
 
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-       ldcopr  r0, SCTLR
-       orr     r0, r0, #SCTLR_C_BIT
-       stcopr  r0, SCTLR
-       isb
+#if SP_MIN_WITH_SECURE_FIQ
+       route_fiq_to_sp_min r0
 #endif
 
        bl      sp_min_warm_boot
-
-       /* Program the registers in cpu_context and exit monitor mode */
-       mov     r0, #NON_SECURE
-       bl      cm_get_context
-
-       /* Restore the SCR */
-       ldr     r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
-       stcopr  r2, SCR
-       isb
-
-       /* Restore the SCTLR  */
-       ldr     r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
-       stcopr  r2, SCTLR
-
        bl      smc_get_next_ctx
-
-       /* The other cpu_context registers have been copied to smc context */
+       /* r0 points to `smc_ctx_t` */
+       /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
        b       sp_min_exit
 endfunc sp_min_warm_entrypoint
 
@@ -261,6 +305,5 @@ endfunc sp_min_warm_entrypoint
  * Arguments : r0 must point to the SMC context to restore from.
  */
 func sp_min_exit
-       smcc_restore_gp_mode_regs
-       eret
+       monitor_exit
 endfunc sp_min_exit