AArch32: Disable Secure Cycle Counter
[project/bcm63xx/atf.git] / bl32 / sp_min / aarch32 / entrypoint.S
index d868c53db156e88876f931f894982a20906e1e31..0a684754cef8b643ee7f94b794d9c74308198689 100644 (file)
@@ -1,22 +1,24 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
-#include <bl_common.h>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
 #include <context.h>
 #include <el3_common_macros.S>
-#include <runtime_svc.h>
-#include <smcc_helpers.h>
-#include <smcc_macros.S>
-#include <xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
 
        .globl  sp_min_vector_table
        .globl  sp_min_entrypoint
        .globl  sp_min_warm_entrypoint
+       .globl  sp_min_handle_smc
+       .globl  sp_min_handle_fiq
 
        .macro route_fiq_to_sp_min reg
                /* -----------------------------------------------------
                stcopr  \reg, SCR
        .endm
 
+       .macro clrex_on_monitor_entry
+#if (ARM_ARCH_MAJOR == 7)
+       /*
+        * ARMv7 architectures need to clear the exclusive access when
+        * entering Monitor mode.
+        */
+       clrex
+#endif
+       .endm
+
 vector_base sp_min_vector_table
        b       sp_min_entrypoint
        b       plat_panic_handler      /* Undef */
-       b       handle_smc              /* Syscall */
+       b       sp_min_handle_smc       /* Syscall */
        b       plat_panic_handler      /* Prefetch abort */
        b       plat_panic_handler      /* Data abort */
        b       plat_panic_handler      /* Reserved */
        b       plat_panic_handler      /* IRQ */
-       b       handle_fiq              /* FIQ */
+       b       sp_min_handle_fiq       /* FIQ */
 
 
 /*
@@ -52,8 +64,10 @@ func sp_min_entrypoint
         * specific structure
         * ---------------------------------------------------------------
         */
-       mov     r11, r0
-       mov     r12, r1
+       mov     r9, r0
+       mov     r10, r1
+       mov     r11, r2
+       mov     r12, r3
 
        /* ---------------------------------------------------------------------
         * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
@@ -76,8 +90,6 @@ func sp_min_entrypoint
         * Relay the previous bootloader's arguments to the platform layer
         * ---------------------------------------------------------------------
         */
-       mov     r0, r11
-       mov     r1, r12
 #else
        /* ---------------------------------------------------------------------
         * For RESET_TO_SP_MIN systems which have a programmable reset address,
@@ -99,15 +111,22 @@ func sp_min_entrypoint
         * Zero the arguments passed to the platform layer to reflect that.
         * ---------------------------------------------------------------------
         */
-       mov     r0, #0
-       mov     r1, #0
+       mov     r9, #0
+       mov     r10, #0
+       mov     r11, #0
+       mov     r12, #0
+
 #endif /* RESET_TO_SP_MIN */
 
 #if SP_MIN_WITH_SECURE_FIQ
        route_fiq_to_sp_min r4
 #endif
 
-       bl      sp_min_early_platform_setup
+       mov     r0, r9
+       mov     r1, r10
+       mov     r2, r11
+       mov     r3, r12
+       bl      sp_min_early_platform_setup2
        bl      sp_min_plat_arch_setup
 
        /* Jump to the main function */
@@ -141,11 +160,13 @@ endfunc sp_min_entrypoint
 /*
  * SMC handling function for SP_MIN.
  */
-func handle_smc
+func sp_min_handle_smc
        /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
        str     lr, [sp, #SMC_CTX_LR_MON]
 
-       smcc_save_gp_mode_regs
+       smccc_save_gp_mode_regs
+
+       clrex_on_monitor_entry
 
        /*
         * `sp` still points to `smc_ctx_t`. Save it to a register
@@ -178,12 +199,12 @@ func handle_smc
 
        /* `r0` points to `smc_ctx_t` */
        b       sp_min_exit
-endfunc handle_smc
+endfunc sp_min_handle_smc
 
 /*
  * Secure Interrupts handling function for SP_MIN.
  */
-func handle_fiq
+func sp_min_handle_fiq
 #if !SP_MIN_WITH_SECURE_FIQ
        b plat_panic_handler
 #else
@@ -192,13 +213,9 @@ func handle_fiq
        /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
        str     lr, [sp, #SMC_CTX_LR_MON]
 
-       smcc_save_gp_mode_regs
+       smccc_save_gp_mode_regs
 
-       /*
-        * AArch32 architectures need to clear the exclusive access when
-        * entering Monitor mode.
-        */
-       clrex
+       clrex_on_monitor_entry
 
        /* load run-time stack */
        mov     r2, sp
@@ -216,7 +233,7 @@ func handle_fiq
 
        b       sp_min_exit
 #endif
-endfunc handle_fiq
+endfunc sp_min_handle_fiq
 
 /*
  * The Warm boot entrypoint for SP_MIN.
@@ -263,20 +280,17 @@ func sp_min_warm_entrypoint
         * enter coherency (as CPUs already are); and there's no reason to have
         * caches disabled either.
         */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+       mov     r0, #0
+#else
        mov     r0, #DISABLE_DCACHE
+#endif
        bl      bl32_plat_enable_mmu
 
 #if SP_MIN_WITH_SECURE_FIQ
        route_fiq_to_sp_min r0
 #endif
 
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-       ldcopr  r0, SCTLR
-       orr     r0, r0, #SCTLR_C_BIT
-       stcopr  r0, SCTLR
-       isb
-#endif
-
        bl      sp_min_warm_boot
        bl      smc_get_next_ctx
        /* r0 points to `smc_ctx_t` */