2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <asm_macros.S>
9 #include <assert_macros.S>
15 .globl disable_mmu_icache_secure
16 .globl disable_mmu_secure
20 * For AArch32 only r0-r3 will be in the registers;
21 * rest r4-r6 will be pushed on to the stack. So here, we'll
22 * have to load them from the stack to registers r4-r6 explicitly.
29 /* -----------------------------------------------------------------------
30 * void zeromem(void *mem, unsigned int length)
32 * Initialise a region in normal memory to 0. This functions complies with the
33 * AAPCS and can be called from C code.
35 * -----------------------------------------------------------------------
39 * Readable names for registers
41 * Registers r0, r1 and r2 are also set by zeromem which
42 * branches into the fallback path directly, so cursor, length and
43 * stop_address should not be retargeted to other registers.
45 cursor .req r0 /* Start address and then current address */
46 length .req r1 /* Length in bytes of the region to zero out */
48 * Reusing the r1 register as length is only used at the beginning of
51 stop_address .req r1 /* Address past the last zeroed byte */
52 zeroreg1 .req r2 /* Source register filled with 0 */
53 zeroreg2 .req r3 /* Source register filled with 0 */
54 tmp .req r12 /* Temporary scratch register */
58 /* stop_address is the address past the last to zero */
59 add stop_address, cursor, length
62 * Length cannot be used anymore as it shares the same register with
68 * If the start address is already aligned to 8 bytes, skip this loop.
71 beq .Lzeromem_8bytes_aligned
73 /* Calculate the next address aligned to 8 bytes */
74 orr tmp, cursor, #(8-1)
76 /* If it overflows, fallback to byte per byte zeroing */
77 beq .Lzeromem_1byte_aligned
78 /* If the next aligned address is after the stop address, fall back */
80 bhs .Lzeromem_1byte_aligned
82 /* zero byte per byte */
84 strb zeroreg1, [cursor], #1
88 /* zero 8 bytes at a time */
89 .Lzeromem_8bytes_aligned:
91 /* Calculate the last 8 bytes aligned address. */
92 bic tmp, stop_address, #(8-1)
99 stmia cursor!, {zeroreg1, zeroreg2}
104 /* zero byte per byte */
105 .Lzeromem_1byte_aligned:
106 cmp cursor, stop_address
109 strb zeroreg1, [cursor], #1
110 cmp cursor, stop_address
117 * length is already unreq'ed to reuse the register for another
127 * AArch32 does not have special ways of zeroing normal memory as AArch64 does
128 * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
130 .equ zero_normalmem, zeromem
132 /* --------------------------------------------------------------------------
133 * void memcpy4(void *dest, const void *src, unsigned int length)
135 * Copy length bytes from memory area src to memory area dest.
136 * The memory areas should not overlap.
137 * Destination and source addresses must be 4-byte aligned.
138 * --------------------------------------------------------------------------
141 #if ENABLE_ASSERTIONS
146 /* copy 4 bytes at a time */
154 /* copy byte per byte */
166 /* ---------------------------------------------------------------------------
167 * Disable the MMU in Secure State
168 * ---------------------------------------------------------------------------
171 func disable_mmu_secure
172 mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
181 isb // ensure MMU is off
184 endfunc disable_mmu_secure
187 func disable_mmu_icache_secure
188 ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
190 endfunc disable_mmu_icache_secure