Adding new optional PSCI hook pwr_domain_on_finish_late
[project/bcm63xx/atf.git] / lib / psci / psci_on.c
1 /*
2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stddef.h>
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <plat/common/platform.h>
17
18 #include "psci_private.h"
19
20 /*
21 * Helper functions for the CPU level spinlocks
22 */
23 static inline void psci_spin_lock_cpu(int idx)
24 {
25 spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock);
26 }
27
28 static inline void psci_spin_unlock_cpu(int idx)
29 {
30 spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock);
31 }
32
33 /*******************************************************************************
34 * This function checks whether a cpu which has been requested to be turned on
35 * is OFF to begin with.
36 ******************************************************************************/
37 static int cpu_on_validate_state(aff_info_state_t aff_state)
38 {
39 if (aff_state == AFF_STATE_ON)
40 return PSCI_E_ALREADY_ON;
41
42 if (aff_state == AFF_STATE_ON_PENDING)
43 return PSCI_E_ON_PENDING;
44
45 assert(aff_state == AFF_STATE_OFF);
46 return PSCI_E_SUCCESS;
47 }
48
49 /*******************************************************************************
50 * Generic handler which is called to physically power on a cpu identified by
51 * its mpidr. It performs the generic, architectural, platform setup and state
52 * management to power on the target cpu e.g. it will ensure that
53 * enough information is stashed for it to resume execution in the non-secure
54 * security state.
55 *
56 * The state of all the relevant power domains are changed after calling the
57 * platform handler as it can return error.
58 ******************************************************************************/
59 int psci_cpu_on_start(u_register_t target_cpu,
60 const entry_point_info_t *ep)
61 {
62 int rc;
63 aff_info_state_t target_aff_state;
64 int target_idx = plat_core_pos_by_mpidr(target_cpu);
65
66 /* Calling function must supply valid input arguments */
67 assert(target_idx >= 0);
68 assert(ep != NULL);
69
70 /*
71 * This function must only be called on platforms where the
72 * CPU_ON platform hooks have been implemented.
73 */
74 assert((psci_plat_pm_ops->pwr_domain_on != NULL) &&
75 (psci_plat_pm_ops->pwr_domain_on_finish != NULL));
76
77 /* Protect against multiple CPUs trying to turn ON the same target CPU */
78 psci_spin_lock_cpu(target_idx);
79
80 /*
81 * Generic management: Ensure that the cpu is off to be
82 * turned on.
83 * Perform cache maintanence ahead of reading the target CPU state to
84 * ensure that the data is not stale.
85 * There is a theoretical edge case where the cache may contain stale
86 * data for the target CPU data - this can occur under the following
87 * conditions:
88 * - the target CPU is in another cluster from the current
89 * - the target CPU was the last CPU to shutdown on its cluster
90 * - the cluster was removed from coherency as part of the CPU shutdown
91 *
92 * In this case the cache maintenace that was performed as part of the
93 * target CPUs shutdown was not seen by the current CPU's cluster. And
94 * so the cache may contain stale data for the target CPU.
95 */
96 flush_cpu_data_by_index((unsigned int)target_idx,
97 psci_svc_cpu_data.aff_info_state);
98 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
99 if (rc != PSCI_E_SUCCESS)
100 goto exit;
101
102 /*
103 * Call the cpu on handler registered by the Secure Payload Dispatcher
104 * to let it do any bookeeping. If the handler encounters an error, it's
105 * expected to assert within
106 */
107 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL))
108 psci_spd_pm->svc_on(target_cpu);
109
110 /*
111 * Set the Affinity info state of the target cpu to ON_PENDING.
112 * Flush aff_info_state as it will be accessed with caches
113 * turned OFF.
114 */
115 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
116 flush_cpu_data_by_index((unsigned int)target_idx,
117 psci_svc_cpu_data.aff_info_state);
118
119 /*
120 * The cache line invalidation by the target CPU after setting the
121 * state to OFF (see psci_do_cpu_off()), could cause the update to
122 * aff_info_state to be invalidated. Retry the update if the target
123 * CPU aff_info_state is not ON_PENDING.
124 */
125 target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
126 if (target_aff_state != AFF_STATE_ON_PENDING) {
127 assert(target_aff_state == AFF_STATE_OFF);
128 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
129 flush_cpu_data_by_index((unsigned int)target_idx,
130 psci_svc_cpu_data.aff_info_state);
131
132 assert(psci_get_aff_info_state_by_idx(target_idx) ==
133 AFF_STATE_ON_PENDING);
134 }
135
136 /*
137 * Perform generic, architecture and platform specific handling.
138 */
139 /*
140 * Plat. management: Give the platform the current state
141 * of the target cpu to allow it to perform the necessary
142 * steps to power on.
143 */
144 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
145 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
146
147 if (rc == PSCI_E_SUCCESS)
148 /* Store the re-entry information for the non-secure world. */
149 cm_init_context_by_index((unsigned int)target_idx, ep);
150 else {
151 /* Restore the state on error. */
152 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
153 flush_cpu_data_by_index((unsigned int)target_idx,
154 psci_svc_cpu_data.aff_info_state);
155 }
156
157 exit:
158 psci_spin_unlock_cpu(target_idx);
159 return rc;
160 }
161
162 /*******************************************************************************
163 * The following function finish an earlier power on request. They
164 * are called by the common finisher routine in psci_common.c. The `state_info`
165 * is the psci_power_state from which this CPU has woken up from.
166 ******************************************************************************/
167 void psci_cpu_on_finish(int cpu_idx, const psci_power_state_t *state_info)
168 {
169 /*
170 * Plat. management: Perform the platform specific actions
171 * for this cpu e.g. enabling the gic or zeroing the mailbox
172 * register. The actual state of this cpu has already been
173 * changed.
174 */
175 psci_plat_pm_ops->pwr_domain_on_finish(state_info);
176
177 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
178 /*
179 * Arch. management: Enable data cache and manage stack memory
180 */
181 psci_do_pwrup_cache_maintenance();
182 #endif
183
184 /*
185 * Plat. management: Perform any platform specific actions which
186 * can only be done with the cpu and the cluster guaranteed to
187 * be coherent.
188 */
189 if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL)
190 psci_plat_pm_ops->pwr_domain_on_finish_late(state_info);
191
192 /*
193 * All the platform specific actions for turning this cpu
194 * on have completed. Perform enough arch.initialization
195 * to run in the non-secure address space.
196 */
197 psci_arch_setup();
198
199 /*
200 * Lock the CPU spin lock to make sure that the context initialization
201 * is done. Since the lock is only used in this function to create
202 * a synchronization point with cpu_on_start(), it can be released
203 * immediately.
204 */
205 psci_spin_lock_cpu(cpu_idx);
206 psci_spin_unlock_cpu(cpu_idx);
207
208 /* Ensure we have been explicitly woken up by another cpu */
209 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
210
211 /*
212 * Call the cpu on finish handler registered by the Secure Payload
213 * Dispatcher to let it do any bookeeping. If the handler encounters an
214 * error, it's expected to assert within
215 */
216 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL))
217 psci_spd_pm->svc_on_finish(0);
218
219 PUBLISH_EVENT(psci_cpu_on_finish);
220
221 /* Populate the mpidr field within the cpu node array */
222 /* This needs to be done only once */
223 psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
224
225 /*
226 * Generic management: Now we just need to retrieve the
227 * information that we had stashed away during the cpu_on
228 * call to set this cpu on its way.
229 */
230 cm_prepare_el3_exit(NON_SECURE);
231 }