6d2ec1c52e3f40ee1903733ad5167489b09f5d71
[project/bcm63xx/atf.git] / lib / aarch32 / misc_helpers.S
1 /*
2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <asm_macros.S>
9 #include <assert_macros.S>
10
11 .globl smc
12 .globl zeromem
13 .globl zero_normalmem
14 .globl memcpy4
15 .globl disable_mmu_icache_secure
16 .globl disable_mmu_secure
17
18 func smc
19 /*
20 * For AArch32 only r0-r3 will be in the registers;
21 * rest r4-r6 will be pushed on to the stack. So here, we'll
22 * have to load them from the stack to registers r4-r6 explicitly.
23 * Clobbers: r4-r6
24 */
25 ldm sp, {r4, r5, r6}
26 smc #0
27 endfunc smc
28
29 /* -----------------------------------------------------------------------
30 * void zeromem(void *mem, unsigned int length)
31 *
32 * Initialise a region in normal memory to 0. This functions complies with the
33 * AAPCS and can be called from C code.
34 *
35 * -----------------------------------------------------------------------
36 */
37 func zeromem
38 /*
39 * Readable names for registers
40 *
41 * Registers r0, r1 and r2 are also set by zeromem which
42 * branches into the fallback path directly, so cursor, length and
43 * stop_address should not be retargeted to other registers.
44 */
45 cursor .req r0 /* Start address and then current address */
46 length .req r1 /* Length in bytes of the region to zero out */
47 /*
48 * Reusing the r1 register as length is only used at the beginning of
49 * the function.
50 */
51 stop_address .req r1 /* Address past the last zeroed byte */
52 zeroreg1 .req r2 /* Source register filled with 0 */
53 zeroreg2 .req r3 /* Source register filled with 0 */
54 tmp .req r12 /* Temporary scratch register */
55
56 mov zeroreg1, #0
57
58 /* stop_address is the address past the last to zero */
59 add stop_address, cursor, length
60
61 /*
62 * Length cannot be used anymore as it shares the same register with
63 * stop_address.
64 */
65 .unreq length
66
67 /*
68 * If the start address is already aligned to 8 bytes, skip this loop.
69 */
70 tst cursor, #(8-1)
71 beq .Lzeromem_8bytes_aligned
72
73 /* Calculate the next address aligned to 8 bytes */
74 orr tmp, cursor, #(8-1)
75 adds tmp, tmp, #1
76 /* If it overflows, fallback to byte per byte zeroing */
77 beq .Lzeromem_1byte_aligned
78 /* If the next aligned address is after the stop address, fall back */
79 cmp tmp, stop_address
80 bhs .Lzeromem_1byte_aligned
81
82 /* zero byte per byte */
83 1:
84 strb zeroreg1, [cursor], #1
85 cmp cursor, tmp
86 bne 1b
87
88 /* zero 8 bytes at a time */
89 .Lzeromem_8bytes_aligned:
90
91 /* Calculate the last 8 bytes aligned address. */
92 bic tmp, stop_address, #(8-1)
93
94 cmp cursor, tmp
95 bhs 2f
96
97 mov zeroreg2, #0
98 1:
99 stmia cursor!, {zeroreg1, zeroreg2}
100 cmp cursor, tmp
101 blo 1b
102 2:
103
104 /* zero byte per byte */
105 .Lzeromem_1byte_aligned:
106 cmp cursor, stop_address
107 beq 2f
108 1:
109 strb zeroreg1, [cursor], #1
110 cmp cursor, stop_address
111 bne 1b
112 2:
113 bx lr
114
115 .unreq cursor
116 /*
117 * length is already unreq'ed to reuse the register for another
118 * variable.
119 */
120 .unreq stop_address
121 .unreq zeroreg1
122 .unreq zeroreg2
123 .unreq tmp
124 endfunc zeromem
125
126 /*
127 * AArch32 does not have special ways of zeroing normal memory as AArch64 does
128 * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
129 */
130 .equ zero_normalmem, zeromem
131
132 /* --------------------------------------------------------------------------
133 * void memcpy4(void *dest, const void *src, unsigned int length)
134 *
135 * Copy length bytes from memory area src to memory area dest.
136 * The memory areas should not overlap.
137 * Destination and source addresses must be 4-byte aligned.
138 * --------------------------------------------------------------------------
139 */
140 func memcpy4
141 #if ENABLE_ASSERTIONS
142 orr r3, r0, r1
143 tst r3, #0x3
144 ASM_ASSERT(eq)
145 #endif
146 /* copy 4 bytes at a time */
147 m_loop4:
148 cmp r2, #4
149 blo m_loop1
150 ldr r3, [r1], #4
151 str r3, [r0], #4
152 sub r2, r2, #4
153 b m_loop4
154 /* copy byte per byte */
155 m_loop1:
156 cmp r2,#0
157 beq m_end
158 ldrb r3, [r1], #1
159 strb r3, [r0], #1
160 subs r2, r2, #1
161 bne m_loop1
162 m_end:
163 bx lr
164 endfunc memcpy4
165
166 /* ---------------------------------------------------------------------------
167 * Disable the MMU in Secure State
168 * ---------------------------------------------------------------------------
169 */
170
171 func disable_mmu_secure
172 mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
173 do_disable_mmu:
174 #if ERRATA_A9_794073
175 stcopr r0, BPIALL
176 dsb
177 #endif
178 ldcopr r0, SCTLR
179 bic r0, r0, r1
180 stcopr r0, SCTLR
181 isb // ensure MMU is off
182 dsb sy
183 bx lr
184 endfunc disable_mmu_secure
185
186
187 func disable_mmu_icache_secure
188 ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
189 b do_disable_mmu
190 endfunc disable_mmu_icache_secure