ARMv7 requires the clear exclusive access at monitor entry
[project/bcm63xx/atf.git] / bl32 / sp_min / aarch32 / entrypoint.S
1 /*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <asm_macros.S>
9 #include <bl_common.h>
10 #include <context.h>
11 #include <el3_common_macros.S>
12 #include <runtime_svc.h>
13 #include <smcc_helpers.h>
14 #include <smcc_macros.S>
15 #include <xlat_tables_defs.h>
16
17 .globl sp_min_vector_table
18 .globl sp_min_entrypoint
19 .globl sp_min_warm_entrypoint
20
21 .macro route_fiq_to_sp_min reg
22 /* -----------------------------------------------------
23 * FIQs are secure interrupts trapped by Monitor and non
24 * secure is not allowed to mask the FIQs.
25 * -----------------------------------------------------
26 */
27 ldcopr \reg, SCR
28 orr \reg, \reg, #SCR_FIQ_BIT
29 bic \reg, \reg, #SCR_FW_BIT
30 stcopr \reg, SCR
31 .endm
32
33 .macro clrex_on_monitor_entry
34 #if (ARM_ARCH_MAJOR == 7)
35 /*
36 * ARMv7 architectures need to clear the exclusive access when
37 * entering Monitor mode.
38 */
39 clrex
40 #endif
41 .endm
42
43 vector_base sp_min_vector_table
44 b sp_min_entrypoint
45 b plat_panic_handler /* Undef */
46 b handle_smc /* Syscall */
47 b plat_panic_handler /* Prefetch abort */
48 b plat_panic_handler /* Data abort */
49 b plat_panic_handler /* Reserved */
50 b plat_panic_handler /* IRQ */
51 b handle_fiq /* FIQ */
52
53
54 /*
55 * The Cold boot/Reset entrypoint for SP_MIN
56 */
57 func sp_min_entrypoint
58 #if !RESET_TO_SP_MIN
59 /* ---------------------------------------------------------------
60 * Preceding bootloader has populated r0 with a pointer to a
61 * 'bl_params_t' structure & r1 with a pointer to platform
62 * specific structure
63 * ---------------------------------------------------------------
64 */
65 mov r11, r0
66 mov r12, r1
67
68 /* ---------------------------------------------------------------------
69 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
70 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
71 * and primary/secondary CPU logic should not be executed in this case.
72 *
73 * Also, assume that the previous bootloader has already initialised the
74 * SCTLR, including the CPU endianness, and has initialised the memory.
75 * ---------------------------------------------------------------------
76 */
77 el3_entrypoint_common \
78 _init_sctlr=0 \
79 _warm_boot_mailbox=0 \
80 _secondary_cold_boot=0 \
81 _init_memory=0 \
82 _init_c_runtime=1 \
83 _exception_vectors=sp_min_vector_table
84
85 /* ---------------------------------------------------------------------
86 * Relay the previous bootloader's arguments to the platform layer
87 * ---------------------------------------------------------------------
88 */
89 mov r0, r11
90 mov r1, r12
91 #else
92 /* ---------------------------------------------------------------------
93 * For RESET_TO_SP_MIN systems which have a programmable reset address,
94 * sp_min_entrypoint() is executed only on the cold boot path so we can
95 * skip the warm boot mailbox mechanism.
96 * ---------------------------------------------------------------------
97 */
98 el3_entrypoint_common \
99 _init_sctlr=1 \
100 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
101 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
102 _init_memory=1 \
103 _init_c_runtime=1 \
104 _exception_vectors=sp_min_vector_table
105
106 /* ---------------------------------------------------------------------
107 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
108 * to run so there's no argument to relay from a previous bootloader.
109 * Zero the arguments passed to the platform layer to reflect that.
110 * ---------------------------------------------------------------------
111 */
112 mov r0, #0
113 mov r1, #0
114 #endif /* RESET_TO_SP_MIN */
115
116 #if SP_MIN_WITH_SECURE_FIQ
117 route_fiq_to_sp_min r4
118 #endif
119
120 bl sp_min_early_platform_setup
121 bl sp_min_plat_arch_setup
122
123 /* Jump to the main function */
124 bl sp_min_main
125
126 /* -------------------------------------------------------------
127 * Clean the .data & .bss sections to main memory. This ensures
128 * that any global data which was initialised by the primary CPU
129 * is visible to secondary CPUs before they enable their data
130 * caches and participate in coherency.
131 * -------------------------------------------------------------
132 */
133 ldr r0, =__DATA_START__
134 ldr r1, =__DATA_END__
135 sub r1, r1, r0
136 bl clean_dcache_range
137
138 ldr r0, =__BSS_START__
139 ldr r1, =__BSS_END__
140 sub r1, r1, r0
141 bl clean_dcache_range
142
143 bl smc_get_next_ctx
144
145 /* r0 points to `smc_ctx_t` */
146 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
147 b sp_min_exit
148 endfunc sp_min_entrypoint
149
150
151 /*
152 * SMC handling function for SP_MIN.
153 */
154 func handle_smc
155 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
156 str lr, [sp, #SMC_CTX_LR_MON]
157
158 smcc_save_gp_mode_regs
159
160 clrex_on_monitor_entry
161
162 /*
163 * `sp` still points to `smc_ctx_t`. Save it to a register
164 * and restore the C runtime stack pointer to `sp`.
165 */
166 mov r2, sp /* handle */
167 ldr sp, [r2, #SMC_CTX_SP_MON]
168
169 ldr r0, [r2, #SMC_CTX_SCR]
170 and r3, r0, #SCR_NS_BIT /* flags */
171
172 /* Switch to Secure Mode*/
173 bic r0, #SCR_NS_BIT
174 stcopr r0, SCR
175 isb
176
177 /*
178 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
179 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
180 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
181 */
182 ldcopr r0, PMCR
183 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
184 stcopr r0, PMCR
185
186 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
187 /* Check whether an SMC64 is issued */
188 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
189 beq 1f
190 /* SMC32 is not detected. Return error back to caller */
191 mov r0, #SMC_UNK
192 str r0, [r2, #SMC_CTX_GPREG_R0]
193 mov r0, r2
194 b sp_min_exit
195 1:
196 /* SMC32 is detected */
197 mov r1, #0 /* cookie */
198 bl handle_runtime_svc
199
200 /* `r0` points to `smc_ctx_t` */
201 b sp_min_exit
202 endfunc handle_smc
203
204 /*
205 * Secure Interrupts handling function for SP_MIN.
206 */
207 func handle_fiq
208 #if !SP_MIN_WITH_SECURE_FIQ
209 b plat_panic_handler
210 #else
211 /* FIQ has a +4 offset for lr compared to preferred return address */
212 sub lr, lr, #4
213 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
214 str lr, [sp, #SMC_CTX_LR_MON]
215
216 smcc_save_gp_mode_regs
217
218 clrex_on_monitor_entry
219
220 /* load run-time stack */
221 mov r2, sp
222 ldr sp, [r2, #SMC_CTX_SP_MON]
223
224 /* Switch to Secure Mode */
225 ldr r0, [r2, #SMC_CTX_SCR]
226 bic r0, #SCR_NS_BIT
227 stcopr r0, SCR
228 isb
229
230 /*
231 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
232 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
233 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
234 */
235 ldcopr r0, PMCR
236 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
237 stcopr r0, PMCR
238
239 push {r2, r3}
240 bl sp_min_fiq
241 pop {r0, r3}
242
243 b sp_min_exit
244 #endif
245 endfunc handle_fiq
246
247 /*
248 * The Warm boot entrypoint for SP_MIN.
249 */
250 func sp_min_warm_entrypoint
251 /*
252 * On the warm boot path, most of the EL3 initialisations performed by
253 * 'el3_entrypoint_common' must be skipped:
254 *
255 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
256 * programming the reset address do we need to initialied the SCTLR.
257 * In other cases, we assume this has been taken care by the
258 * entrypoint code.
259 *
260 * - No need to determine the type of boot, we know it is a warm boot.
261 *
262 * - Do not try to distinguish between primary and secondary CPUs, this
263 * notion only exists for a cold boot.
264 *
265 * - No need to initialise the memory or the C runtime environment,
266 * it has been done once and for all on the cold boot path.
267 */
268 el3_entrypoint_common \
269 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
270 _warm_boot_mailbox=0 \
271 _secondary_cold_boot=0 \
272 _init_memory=0 \
273 _init_c_runtime=0 \
274 _exception_vectors=sp_min_vector_table
275
276 /*
277 * We're about to enable MMU and participate in PSCI state coordination.
278 *
279 * The PSCI implementation invokes platform routines that enable CPUs to
280 * participate in coherency. On a system where CPUs are not
281 * cache-coherent without appropriate platform specific programming,
282 * having caches enabled until such time might lead to coherency issues
283 * (resulting from stale data getting speculatively fetched, among
284 * others). Therefore we keep data caches disabled even after enabling
285 * the MMU for such platforms.
286 *
287 * On systems with hardware-assisted coherency, or on single cluster
288 * platforms, such platform specific programming is not required to
289 * enter coherency (as CPUs already are); and there's no reason to have
290 * caches disabled either.
291 */
292 mov r0, #DISABLE_DCACHE
293 bl bl32_plat_enable_mmu
294
295 #if SP_MIN_WITH_SECURE_FIQ
296 route_fiq_to_sp_min r0
297 #endif
298
299 #if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
300 ldcopr r0, SCTLR
301 orr r0, r0, #SCTLR_C_BIT
302 stcopr r0, SCTLR
303 isb
304 #endif
305
306 bl sp_min_warm_boot
307 bl smc_get_next_ctx
308 /* r0 points to `smc_ctx_t` */
309 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
310 b sp_min_exit
311 endfunc sp_min_warm_entrypoint
312
313 /*
314 * The function to restore the registers from SMC context and return
315 * to the mode restored to SPSR.
316 *
317 * Arguments : r0 must point to the SMC context to restore from.
318 */
319 func sp_min_exit
320 monitor_exit
321 endfunc sp_min_exit