/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
-#include <bl_common.h>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
#include <context.h>
#include <el3_common_macros.S>
-#include <runtime_svc.h>
-#include <smcc_helpers.h>
-#include <smcc_macros.S>
-#include <xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
.globl sp_min_vector_table
.globl sp_min_entrypoint
* specific structure
* ---------------------------------------------------------------
*/
- mov r11, r0
- mov r12, r1
+ mov r9, r0
+ mov r10, r1
+ mov r11, r2
+ mov r12, r3
/* ---------------------------------------------------------------------
* For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
* Relay the previous bootloader's arguments to the platform layer
* ---------------------------------------------------------------------
*/
- mov r0, r11
- mov r1, r12
#else
/* ---------------------------------------------------------------------
* For RESET_TO_SP_MIN systems which have a programmable reset address,
* Zero the arguments passed to the platform layer to reflect that.
* ---------------------------------------------------------------------
*/
- mov r0, #0
- mov r1, #0
+ mov r9, #0
+ mov r10, #0
+ mov r11, #0
+ mov r12, #0
+
#endif /* RESET_TO_SP_MIN */
#if SP_MIN_WITH_SECURE_FIQ
route_fiq_to_sp_min r4
#endif
- bl sp_min_early_platform_setup
+ mov r0, r9
+ mov r1, r10
+ mov r2, r11
+ mov r3, r12
+ bl sp_min_early_platform_setup2
bl sp_min_plat_arch_setup
/* Jump to the main function */
/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
str lr, [sp, #SMC_CTX_LR_MON]
- smcc_save_gp_mode_regs
+ smccc_save_gp_mode_regs
clrex_on_monitor_entry
stcopr r0, SCR
isb
- /*
- * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
- * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
- * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
- */
- ldcopr r0, PMCR
- orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
- stcopr r0, PMCR
-
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
str lr, [sp, #SMC_CTX_LR_MON]
- smcc_save_gp_mode_regs
+ smccc_save_gp_mode_regs
clrex_on_monitor_entry
stcopr r0, SCR
isb
- /*
- * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
- * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
- * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
- */
- ldcopr r0, PMCR
- orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
- stcopr r0, PMCR
-
push {r2, r3}
bl sp_min_fiq
pop {r0, r3}
* enter coherency (as CPUs already are); and there's no reason to have
* caches disabled either.
*/
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+ mov r0, #0
+#else
mov r0, #DISABLE_DCACHE
+#endif
bl bl32_plat_enable_mmu
#if SP_MIN_WITH_SECURE_FIQ
route_fiq_to_sp_min r0
#endif
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
- ldcopr r0, SCTLR
- orr r0, r0, #SCTLR_C_BIT
- stcopr r0, SCTLR
- isb
-#endif
-
bl sp_min_warm_boot
bl smc_get_next_ctx
/* r0 points to `smc_ctx_t` */