#ifndef __ARM_DEF_H__
#define __ARM_DEF_H__
+#include <arch.h>
#include <common_def.h>
#include <platform_def.h>
#include <tbbr_img_def.h>
#define ARM_CACHE_WRITEBACK_SHIFT 6
+/*
+ * Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
+ * power levels have a 1:1 mapping with the MPIDR affinity levels.
+ */
+#define ARM_PWR_LVL0 MPIDR_AFFLVL0
+#define ARM_PWR_LVL1 MPIDR_AFFLVL1
+
+/*
+ * Macros for local power states in ARM platforms encoded by State-ID field
+ * within the power-state parameter.
+ */
+/* Local power state for power domains in Run state. */
+#define ARM_LOCAL_STATE_RUN 0
+/* Local power state for retention. Valid only for CPU power domains */
+#define ARM_LOCAL_STATE_RET 1
+/* Local power state for OFF/power-down. Valid for CPU and cluster power
+ domains */
+#define ARM_LOCAL_STATE_OFF 2
+
/* Memory location options for TSP */
#define ARM_TRUSTED_SRAM_ID 0
#define ARM_TRUSTED_DRAM_ID 1
#define ADDR_SPACE_SIZE (1ull << 32)
-#define PLATFORM_NUM_AFFS (ARM_CLUSTER_COUNT + \
+#define PLAT_NUM_PWR_DOMAINS (ARM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
-#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
+#define PLAT_MAX_PWR_LVL ARM_PWR_LVL1
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE ARM_LOCAL_STATE_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE ARM_LOCAL_STATE_OFF
+
#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER0_CORE_COUNT + \
PLAT_ARM_CLUSTER1_CORE_COUNT)
void arm_tzc_setup(void);
/* PM utility functions */
-int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state);
-int arm_validate_power_state(unsigned int power_state);
+int arm_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state);
+
+/* Topology utility function */
+int arm_check_mpidr(u_register_t mpidr);
/* BL1 utility functions */
void arm_bl1_early_platform_setup(void);
unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
-void plat_arm_topology_setup(void);
+unsigned int plat_arm_calc_core_pos(u_register_t mpidr);
#endif /* __PLAT_ARM_H__ */
#include "../fvp_def.h"
.globl plat_secondary_cold_boot_setup
- .globl platform_get_entrypoint
+ .globl plat_get_my_entrypoint
.globl platform_mem_init
- .globl platform_is_primary_cpu
+ .globl plat_is_my_cpu_primary
.macro fvp_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =V2M_SYSREGS_BASE + V2M_SYS_ID
/* -----------------------------------------------------
- * void platform_get_entrypoint (unsigned int mpid);
+ * unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between
- * a cold and warm boot.
+ * a cold and warm boot on the current CPU.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* reset all cpus will read the same WK field
* -----------------------------------------------------
*/
-func platform_get_entrypoint
+func plat_get_my_entrypoint
mov x9, x30 // lr
- mov x2, x0
+ mrs x2, mpidr_el1
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
* ---------------------------------------------
*/
ldr x10, =MBOX_BASE
- bl platform_get_core_pos
+ bl plat_my_core_pos
lsl x0, x0, #ARM_CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
-endfunc platform_get_entrypoint
+endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
endfunc platform_mem_init
-func platform_is_primary_cpu
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #FVP_PRIMARY_CPU
cset x0, eq
ret
-endfunc platform_is_primary_cpu
+endfunc plat_is_my_cpu_primary
#include "fvp_def.h"
#include "fvp_private.h"
+unsigned long wakeup_address;
typedef volatile struct mailbox {
unsigned long value __aligned(CACHE_WRITEBACK_GRANULE);
uint64_t linear_id;
mailbox_t *fvp_mboxes;
- linear_id = platform_get_core_pos(mpidr);
+ linear_id = plat_arm_calc_core_pos(mpidr);
fvp_mboxes = (mailbox_t *)MBOX_BASE;
fvp_mboxes[linear_id].value = address;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
}
/*******************************************************************************
- * FVP handler called when an affinity instance is about to enter standby.
+ * FVP handler called when a CPU is about to enter standby.
******************************************************************************/
-void fvp_affinst_standby(unsigned int power_state)
+void fvp_cpu_standby(plat_local_state_t cpu_state)
{
+
+ assert(cpu_state == ARM_LOCAL_STATE_RET);
+
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
}
/*******************************************************************************
- * FVP handler called when an affinity instance is about to be turned on. The
- * level and mpidr determine the affinity instance.
+ * FVP handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
******************************************************************************/
-int fvp_affinst_on(unsigned long mpidr,
- unsigned long sec_entrypoint,
- unsigned int afflvl,
- unsigned int state)
+int fvp_pwr_domain_on(u_register_t mpidr)
{
int rc = PSCI_E_SUCCESS;
unsigned int psysr;
- /*
- * It's possible to turn on only affinity level 0 i.e. a cpu
- * on the FVP. Ignore any other affinity level.
- */
- if (afflvl != MPIDR_AFFLVL0)
- return rc;
-
/*
* Ensure that we do not cancel an inflight power off request
* for the target cpu. That would leave it in a zombie wfi.
psysr = fvp_pwrc_read_psysr(mpidr);
} while (psysr & PSYSR_AFF_L0);
- fvp_program_mailbox(mpidr, sec_entrypoint);
+ fvp_program_mailbox(mpidr, wakeup_address);
fvp_pwrc_write_pponr(mpidr);
return rc;
}
/*******************************************************************************
- * FVP handler called when an affinity instance is about to be turned off. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
+ * FVP handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-void fvp_affinst_off(unsigned int afflvl,
- unsigned int state)
+void fvp_pwr_domain_off(const psci_power_state_t *target_state)
{
- /* Determine if any platform actions need to be executed */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
- return;
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
/*
- * If execution reaches this stage then this affinity level will be
- * suspended. Perform at least the cpu specific actions followed the
- * cluster specific operations if applicable.
+ * If execution reaches this stage then this power domain will be
+ * suspended. Perform at least the cpu specific actions followed
+ * by the cluster specific operations if applicable.
*/
fvp_cpu_pwrdwn_common();
- if (afflvl != MPIDR_AFFLVL0)
+ if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+ ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
- * FVP handler called when an affinity instance is about to be suspended. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
+ * FVP handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-void fvp_affinst_suspend(unsigned long sec_entrypoint,
- unsigned int afflvl,
- unsigned int state)
+void fvp_pwr_domain_suspend(const psci_power_state_t *target_state)
{
unsigned long mpidr;
- /* Determine if any platform actions need to be executed. */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
+ /*
+ * FVP has retention only at cpu level. Just return
+ * as nothing is to be done for retention.
+ */
+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_RET)
return;
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
+
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Program the jump address for the this cpu */
- fvp_program_mailbox(mpidr, sec_entrypoint);
+ fvp_program_mailbox(mpidr, wakeup_address);
/* Program the power controller to enable wakeup interrupts. */
fvp_pwrc_set_wen(mpidr);
fvp_cpu_pwrdwn_common();
/* Perform the common cluster specific operations */
- if (afflvl != MPIDR_AFFLVL0)
+ if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+ ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
- * FVP handler called when an affinity instance has just been powered on after
- * being turned off earlier. The level and mpidr determine the affinity
- * instance. The 'state' arg. allows the platform to decide whether the cluster
- * was turned off prior to wakeup and do what's necessary to setup it up
- * correctly.
+ * FVP handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
******************************************************************************/
-void fvp_affinst_on_finish(unsigned int afflvl,
- unsigned int state)
+void fvp_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long mpidr;
- /* Determine if any platform actions need to be executed. */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
- return;
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Perform the common cluster specific operations */
- if (afflvl != MPIDR_AFFLVL0) {
+ if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+ ARM_LOCAL_STATE_OFF) {
/*
* This CPU might have woken up whilst the cluster was
* attempting to power down. In this case the FVP power
}
/*******************************************************************************
- * FVP handler called when an affinity instance has just been powered on after
- * having been suspended earlier. The level and mpidr determine the affinity
- * instance.
+ * FVP handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
-void fvp_affinst_suspend_finish(unsigned int afflvl,
- unsigned int state)
+void fvp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
- fvp_affinst_on_finish(afflvl, state);
+ /*
+ * Nothing to be done on waking up from retention from CPU level.
+ */
+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_RET)
+ return;
+
+ fvp_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
-static const plat_pm_ops_t fvp_plat_pm_ops = {
- .affinst_standby = fvp_affinst_standby,
- .affinst_on = fvp_affinst_on,
- .affinst_off = fvp_affinst_off,
- .affinst_suspend = fvp_affinst_suspend,
- .affinst_on_finish = fvp_affinst_on_finish,
- .affinst_suspend_finish = fvp_affinst_suspend_finish,
+static const plat_psci_ops_t fvp_plat_psci_ops = {
+ .cpu_standby = fvp_cpu_standby,
+ .pwr_domain_on = fvp_pwr_domain_on,
+ .pwr_domain_off = fvp_pwr_domain_off,
+ .pwr_domain_suspend = fvp_pwr_domain_suspend,
+ .pwr_domain_on_finish = fvp_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = fvp_pwr_domain_suspend_finish,
.system_off = fvp_system_off,
.system_reset = fvp_system_reset,
.validate_power_state = arm_validate_power_state
};
/*******************************************************************************
- * Export the platform specific power ops & initialize the fvp power controller
+ * Export the platform specific psci ops & initialize the fvp power controller
******************************************************************************/
-int platform_setup_pm(const plat_pm_ops_t **plat_ops)
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
{
- *plat_ops = &fvp_plat_pm_ops;
+ *psci_ops = &fvp_plat_psci_ops;
+ wakeup_address = sec_entrypoint;
+
+ flush_dcache_range((unsigned long)&wakeup_address,
+ sizeof(wakeup_address));
return 0;
}
*/
#include <arch.h>
-#include <assert.h>
+#include <plat_arm.h>
#include <platform_def.h>
-/* TODO: Reusing psci error codes & state information. Get our own! */
-#include <psci.h>
#include "drivers/pwrc/fvp_pwrc.h"
-#include "fvp_def.h"
-/* We treat '255' as an invalid affinity instance */
-#define AFFINST_INVAL 0xff
-
-/*******************************************************************************
- * We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
- * flavour has a different topology. The common bit is that there can be a max.
- * of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
- * a tree like data structure which caters to these maximum bounds. It simply
- * marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
- * cluster 1 on the Foundation FVP. The 'data' field is currently unused.
- ******************************************************************************/
-typedef struct affinity_info {
- unsigned char sibling;
- unsigned char child;
- unsigned char state;
- unsigned int data;
-} affinity_info_t;
-
-/*******************************************************************************
- * The following two data structures store the topology tree for the fvp. There
- * is a separate array for each affinity level i.e. cpus and clusters. The child
- * and sibling references allow traversal inside and in between the two arrays.
- ******************************************************************************/
-static affinity_info_t fvp_aff1_topology_map[ARM_CLUSTER_COUNT];
-static affinity_info_t fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
-
-/* Simple global variable to safeguard us from stupidity */
-static unsigned int topology_setup_done;
-
-/*******************************************************************************
- * This function implements a part of the critical interface between the psci
- * generic layer and the platform to allow the former to detect the platform
- * topology. psci queries the platform to determine how many affinity instances
- * are present at a particular level for a given mpidr e.g. consider a dual
- * cluster platform where each cluster has 4 cpus. A call to this function with
- * (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
- * Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
- * This is 'cause we are effectively asking how many affinity level 1 instances
- * are implemented under affinity level 2 instance 0.
- ******************************************************************************/
-unsigned int plat_get_aff_count(unsigned int aff_lvl,
- unsigned long mpidr)
-{
- unsigned int aff_count = 1, ctr;
- unsigned char parent_aff_id;
-
- assert(topology_setup_done == 1);
-
- switch (aff_lvl) {
- case 3:
- case 2:
- /*
- * Assert if the parent affinity instance is not 0.
- * This also takes care of level 3 in an obfuscated way
- */
- parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
- assert(parent_aff_id == 0);
-
- /*
- * Report that we implement a single instance of
- * affinity levels 2 & 3 which are AFF_ABSENT
- */
- break;
- case 1:
- /* Assert if the parent affinity instance is not 0. */
- parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
- assert(parent_aff_id == 0);
-
- /* Fetch the starting index in the aff1 array */
- for (ctr = 0;
- fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
- ctr = fvp_aff1_topology_map[ctr].sibling) {
- aff_count++;
- }
-
- break;
- case 0:
- /* Assert if the cluster id is anything apart from 0 or 1 */
- parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
- assert(parent_aff_id < ARM_CLUSTER_COUNT);
-
- /* Fetch the starting index in the aff0 array */
- for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
- fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
- ctr = fvp_aff0_topology_map[ctr].sibling) {
- aff_count++;
- }
-
- break;
- default:
- assert(0);
- }
-
- return aff_count;
-}
+/*
+ * The FVP power domain tree does not have a single system level power domain
+ * i.e. a single root node. The first entry in the power domain descriptor
+ * specifies the number of power domains at the highest power level. For the FVP
+ * this is 2 i.e. the number of cluster power domains.
+ */
+#define FVP_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
+
+/* The FVP power domain tree descriptor */
+const unsigned char arm_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ FVP_PWR_DOMAINS_AT_MAX_PWR_LVL,
+ /* No of children for the first node */
+ PLAT_ARM_CLUSTER0_CORE_COUNT,
+ /* No of children for the second node */
+ PLAT_ARM_CLUSTER1_CORE_COUNT
+};
/*******************************************************************************
* This function implements a part of the critical interface between the psci
- * generic layer and the platform to allow the former to detect the state of a
- * affinity instance in the platform topology. psci queries the platform to
- * determine whether an affinity instance is present or absent. This caters for
- * topologies where an intermediate affinity level instance is missing e.g.
- * consider a platform which implements a single cluster with 4 cpus and there
- * is another cpu sitting directly on the interconnect along with the cluster.
- * The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
- * cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
- * 1 is however missing but needs to be accounted to reach this single cpu in
- * the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
- * applicable to the FVP but depicted as an example.
- ******************************************************************************/
-unsigned int plat_get_aff_state(unsigned int aff_lvl,
- unsigned long mpidr)
-{
- unsigned int aff_state = PSCI_AFF_ABSENT, idx;
- idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-
- assert(topology_setup_done == 1);
-
- switch (aff_lvl) {
- case 3:
- case 2:
- /* Report affinity levels 2 & 3 as absent */
- break;
- case 1:
- aff_state = fvp_aff1_topology_map[idx].state;
- break;
- case 0:
- /*
- * First get start index of the aff0 in its array & then add
- * to it the affinity id that we want the state of
- */
- idx = fvp_aff1_topology_map[idx].child;
- idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
- aff_state = fvp_aff0_topology_map[idx].state;
- break;
- default:
- assert(0);
- }
-
- return aff_state;
-}
-
-/*******************************************************************************
- * This function populates the FVP specific topology information depending upon
- * the FVP flavour its running on. We construct all the mpidrs we can handle
- * and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
******************************************************************************/
-void plat_arm_topology_setup(void)
+int plat_core_pos_by_mpidr(u_register_t mpidr)
{
- unsigned char aff0, aff1, aff_state, aff0_offset = 0;
- unsigned long mpidr;
-
- topology_setup_done = 0;
-
- for (aff1 = 0; aff1 < ARM_CLUSTER_COUNT; aff1++) {
-
- fvp_aff1_topology_map[aff1].child = aff0_offset;
- fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
-
- for (aff0 = 0; aff0 < FVP_MAX_CPUS_PER_CLUSTER; aff0++) {
-
- mpidr = aff1 << MPIDR_AFF1_SHIFT;
- mpidr |= aff0 << MPIDR_AFF0_SHIFT;
-
- if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
- /*
- * Presence of even a single aff0 indicates
- * presence of parent aff1 on the FVP.
- */
- aff_state = PSCI_AFF_PRESENT;
- fvp_aff1_topology_map[aff1].state =
- PSCI_AFF_PRESENT;
- } else {
- aff_state = PSCI_AFF_ABSENT;
- }
-
- fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
- fvp_aff0_topology_map[aff0_offset].state = aff_state;
- fvp_aff0_topology_map[aff0_offset].sibling =
- aff0_offset + 1;
-
- /* Increment the absolute number of aff0s traversed */
- aff0_offset++;
- }
-
- /* Tie-off the last aff0 sibling to -1 to avoid overflow */
- fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
- }
+ if (arm_check_mpidr(mpidr) == -1)
+ return -1;
- /* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
- fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
+ if (fvp_pwrc_read_psysr(mpidr) == PSYSR_INVALID)
+ return -1;
- topology_setup_done = 1;
+ return plat_arm_calc_core_pos(mpidr);
}
# POSSIBILITY OF SUCH DAMAGE.
#
-
PLAT_INCLUDES := -Iplat/arm/board/fvp/include
plat/arm/board/fvp/aarch64/fvp_helpers.S \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT := 0
+
include plat/arm/board/common/board_common.mk
include plat/arm/common/arm_common.mk
#
# TSP source files specific to FVP platform
-BL32_SOURCES += plat/arm/board/fvp/tsp/fvp_tsp_setup.c
+BL32_SOURCES += plat/arm/board/fvp/fvp_topology.c \
+ plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c \
+ plat/arm/board/fvp/tsp/fvp_tsp_setup.c
include plat/arm/common/tsp/arm_tsp.mk
# power down sequence
SKIP_A57_L1_FLUSH_PWR_DWN := 1
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT := 0
+
include plat/arm/board/common/board_css.mk
include plat/arm/common/arm_common.mk
include plat/arm/soc/common/soc_css.mk
# POSSIBILITY OF SUCH DAMAGE.
#
+BL32_SOURCES += plat/arm/css/common/css_topology.c
+
include plat/arm/common/tsp/arm_tsp.mk
#include <asm_macros.S>
#include <platform_def.h>
-
+ .weak plat_arm_calc_core_pos
+ .weak plat_my_core_pos
.globl plat_crash_console_init
.globl plat_crash_console_putc
+ /* -----------------------------------------------------
+ * unsigned int plat_my_core_pos(void)
+ * This function uses the plat_arm_calc_core_pos()
+ * definition to get the index of the calling CPU.
+ * -----------------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ b plat_arm_calc_core_pos
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
+ * Helper function to calculate the core position.
+ * With this function: CorePos = (ClusterId * 4) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func plat_arm_calc_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc plat_arm_calc_core_pos
+
/* ---------------------------------------------
* int plat_crash_console_init(void)
* Function to initialize the crash console
/* Initialize power controller before setting up topology */
plat_arm_pwrc_setup();
-
- /* Topologies are best known to the platform. */
- plat_arm_topology_setup();
}
void bl31_platform_setup(void)
plat/arm/common/arm_security.c \
plat/arm/common/arm_topology.c \
plat/common/plat_gic.c \
- plat/common/aarch64/platform_mp_stack.S
+ plat/common/aarch64/platform_mp_stack.S \
+ plat/common/aarch64/plat_psci_common.c
ifneq (${TRUSTED_BOARD_BOOT},0)
#include <errno.h>
#include <psci.h>
-
/*******************************************************************************
- * ARM standard platform utility function which is used to determine if any
- * platform actions should be performed for the specified affinity instance
- * given its state. Nothing needs to be done if the 'state' is not off or if
- * this is not the highest affinity level which will enter the 'state'.
+ * ARM standard platform handler called to check the validity of the power state
+ * parameter.
******************************************************************************/
-int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state)
+int arm_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
{
- unsigned int max_phys_off_afflvl;
-
- assert(afflvl <= MPIDR_AFFLVL1);
+ int pstate = psci_get_pstate_type(power_state);
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int i;
- if (state != PSCI_STATE_OFF)
- return -EAGAIN;
+ assert(req_state);
- /*
- * Find the highest affinity level which will be suspended and postpone
- * all the platform specific actions until that level is hit.
- */
- max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
- assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
- if (afflvl != max_phys_off_afflvl)
- return -EAGAIN;
-
- return 0;
-}
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
-/*******************************************************************************
- * ARM standard platform handler called to check the validity of the power state
- * parameter.
- ******************************************************************************/
-int arm_validate_power_state(unsigned int power_state)
-{
/* Sanity check the requested state */
- if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
+ if (pstate == PSTATE_TYPE_STANDBY) {
/*
- * It's possible to enter standby only on affinity level 0
- * (i.e. a CPU on ARM standard platforms).
- * Ignore any other affinity level.
+ * It's possible to enter standby only on power level 0
+ * Ignore any other power level.
*/
- if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
+ if (pwr_lvl != ARM_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[ARM_PWR_LVL0] =
+ ARM_LOCAL_STATE_RET;
+ } else {
+ for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] =
+ ARM_LOCAL_STATE_OFF;
}
/*
#include <arch.h>
#include <psci.h>
+#include <plat_arm.h>
#include <platform_def.h>
-/*
- * Weak definitions use fixed topology. Strong definitions could make topology
- * configurable
- */
-#pragma weak plat_get_aff_count
-#pragma weak plat_get_aff_state
-#pragma weak plat_arm_topology_setup
-
+#define get_arm_cluster_core_count(mpidr)\
+ (((mpidr) & 0x100) ? PLAT_ARM_CLUSTER1_CORE_COUNT :\
+ PLAT_ARM_CLUSTER0_CORE_COUNT)
-unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
-{
- /* Report 1 (absent) instance at levels higher that the cluster level */
- if (aff_lvl > MPIDR_AFFLVL1)
- return 1;
-
- if (aff_lvl == MPIDR_AFFLVL1)
- return ARM_CLUSTER_COUNT;
+/* The power domain tree descriptor which need to be exported by ARM platforms */
+extern const unsigned char arm_power_domain_tree_desc[];
- return mpidr & 0x100 ? PLAT_ARM_CLUSTER1_CORE_COUNT :
- PLAT_ARM_CLUSTER0_CORE_COUNT;
-}
-unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
+/*******************************************************************************
+ * This function returns the ARM default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
{
- return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
+ return arm_power_domain_tree_desc;
}
-void plat_arm_topology_setup(void)
+/*******************************************************************************
+ * This function validates an MPIDR by checking whether it falls within the
+ * acceptable bounds. An error code (-1) is returned if an incorrect mpidr
+ * is passed.
+ ******************************************************************************/
+int arm_check_mpidr(u_register_t mpidr)
{
+ unsigned int cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+ return -1;
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ if (cluster_id >= ARM_CLUSTER_COUNT)
+ return -1;
+
+ /* Validate cpu_id by checking whether it represents a CPU in
+ one of the two clusters present on the platform. */
+ if (cpu_id >= get_arm_cluster_core_count(mpidr))
+ return -1;
+
+ return 0;
}
# TSP source files common to ARM standard platforms
BL32_SOURCES += drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \
+ plat/arm/common/arm_topology.c \
plat/arm/common/tsp/arm_tsp_setup.c \
plat/common/aarch64/platform_mp_stack.S \
plat/common/plat_gic.c
#include <css_def.h>
.weak plat_secondary_cold_boot_setup
- .weak platform_get_entrypoint
+ .weak plat_get_my_entrypoint
.weak platform_mem_init
- .globl platform_get_core_pos
- .weak platform_is_primary_cpu
-
+ .globl plat_arm_calc_core_pos
+ .weak plat_is_my_cpu_primary
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
endfunc plat_secondary_cold_boot_setup
/* -----------------------------------------------------
- * void platform_get_entrypoint (unsigned int mpid);
+ * unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between
- * a cold and warm boot.
+ * a cold and warm boot on the current CPU.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* TODO: Not a good idea to save lr in a temp reg
* -----------------------------------------------------
*/
-func platform_get_entrypoint
+func plat_get_my_entrypoint
mov x9, x30 // lr
- bl platform_get_core_pos
+ bl plat_my_core_pos
ldr x1, =TRUSTED_MAILBOXES_BASE
lsl x0, x0, #TRUSTED_MAILBOX_SHIFT
ldr x0, [x1, x0]
ret x9
-endfunc platform_get_entrypoint
+endfunc plat_get_my_entrypoint
- /*
- * Override the default implementation to swap the cluster order.
- * This is necessary in order to match the format of the boot
- * information passed by the SCP and read in platform_is_primary_cpu
- * below.
+ /* -----------------------------------------------------------
+ * unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
+ * Function to calculate the core position by
+ * swapping the cluster order. This is necessary in order to
+ * match the format of the boot information passed by the SCP
+ * and read in platform_is_primary_cpu below.
+ * -----------------------------------------------------------
*/
-func platform_get_core_pos
+func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order
add x0, x1, x0, LSR #6
ret
-endfunc platform_get_core_pos
+endfunc plat_arm_calc_core_pos
/* -----------------------------------------------------
* void platform_mem_init(void);
endfunc platform_mem_init
/* -----------------------------------------------------
- * unsigned int platform_is_primary_cpu (unsigned int mpid);
+ * unsigned int plat_is_my_cpu_primary (void);
*
- * Given the mpidr say whether this cpu is the primary
+ * Find out whether the current cpu is the primary
* cpu (applicable ony after a cold boot)
* -----------------------------------------------------
*/
-func platform_is_primary_cpu
+func plat_is_my_cpu_primary
mov x9, x30
- bl platform_get_core_pos
+ bl plat_my_core_pos
ldr x1, =SCP_BOOT_CFG_ADDR
ldr x1, [x1]
ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH
cmp x0, x1
cset x0, eq
ret x9
-endfunc platform_is_primary_cpu
+endfunc plat_is_my_cpu_primary
BL31_SOURCES += plat/arm/css/common/css_mhu.c \
plat/arm/css/common/css_pm.c \
- plat/arm/css/common/css_scpi.c
+ plat/arm/css/common/css_scpi.c \
+ plat/arm/css/common/css_topology.c
ifneq (${RESET_TO_BL31},0)
#include <psci.h>
#include "css_scpi.h"
+unsigned long wakeup_address;
+
/*******************************************************************************
* Private function to program the mailbox for a cpu before it is released
* from reset.
uint64_t linear_id;
uint64_t mbox;
- linear_id = platform_get_core_pos(mpidr);
+ linear_id = plat_arm_calc_core_pos(mpidr);
mbox = TRUSTED_MAILBOXES_BASE + (linear_id << TRUSTED_MAILBOX_SHIFT);
*((uint64_t *) mbox) = address;
flush_dcache_range(mbox, sizeof(mbox));
}
/*******************************************************************************
- * Handler called when an affinity instance is about to be turned on. The
+ * Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance.
******************************************************************************/
-int32_t css_affinst_on(uint64_t mpidr,
- uint64_t sec_entrypoint,
- uint32_t afflvl,
- uint32_t state)
+int css_pwr_domain_on(u_register_t mpidr)
{
/*
- * SCP takes care of powering up higher affinity levels so we
+ * SCP takes care of powering up parent power domains so we
* only need to care about level 0
*/
- if (afflvl != MPIDR_AFFLVL0)
- return PSCI_E_SUCCESS;
/*
* Setup mailbox with address for CPU entrypoint when it next powers up
*/
- css_program_mailbox(mpidr, sec_entrypoint);
+ css_program_mailbox(mpidr, wakeup_address);
scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
scpi_power_on);
}
/*******************************************************************************
- * Handler called when an affinity instance has just been powered on after
- * being turned off earlier. The level and mpidr determine the affinity
- * instance. The 'state' arg. allows the platform to decide whether the cluster
- * was turned off prior to wakeup and do what's necessary to setup it up
- * correctly.
+ * Handler called when a power level has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
******************************************************************************/
-void css_affinst_on_finish(uint32_t afflvl, uint32_t state)
+void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
- unsigned long mpidr;
-
- /* Determine if any platform actions need to be executed. */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
- return;
-
- /* Get the mpidr for this cpu */
- mpidr = read_mpidr_el1();
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
/*
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
- if (afflvl != MPIDR_AFFLVL0)
- cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+ if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+ ARM_LOCAL_STATE_OFF)
+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
/* Enable the gic cpu interface */
arm_gic_cpuif_setup();
arm_gic_pcpu_distif_setup();
/* Clear the mailbox for this cpu. */
- css_program_mailbox(mpidr, 0);
+ css_program_mailbox(read_mpidr_el1(), 0);
}
/*******************************************************************************
* Common function called while turning a cpu off or suspending it. It is called
* from css_off() or css_suspend() when these functions in turn are called for
- * the highest affinity level which will be powered down. It performs the
- * actions common to the OFF and SUSPEND calls.
+ * power domain at the highest power level which will be powered down. It
+ * performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/
-static void css_power_down_common(uint32_t afflvl)
+static void css_power_down_common(const psci_power_state_t *target_state)
{
uint32_t cluster_state = scpi_power_on;
arm_gic_cpuif_deactivate();
/* Cluster is to be turned off, so disable coherency */
- if (afflvl > MPIDR_AFFLVL0) {
+ if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+ ARM_LOCAL_STATE_OFF) {
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
cluster_state = scpi_power_off;
}
}
/*******************************************************************************
- * Handler called when an affinity instance is about to be turned off. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take
- * appropriate actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-static void css_affinst_off(uint32_t afflvl, uint32_t state)
+static void css_pwr_domain_off(const psci_power_state_t *target_state)
{
- /* Determine if any platform actions need to be executed */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
- return;
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
- css_power_down_common(afflvl);
+ css_power_down_common(target_state);
}
/*******************************************************************************
- * Handler called when an affinity instance is about to be suspended. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions. The 'sec_entrypoint' determines the address in BL3-1 from where
- * execution should resume.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-static void css_affinst_suspend(uint64_t sec_entrypoint,
- uint32_t afflvl,
- uint32_t state)
+static void css_pwr_domain_suspend(const psci_power_state_t *target_state)
{
- /* Determine if any platform actions need to be executed */
- if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
+ /*
+ * Juno has retention only at cpu level. Just return
+ * as nothing is to be done for retention.
+ */
+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_RET)
return;
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
+
/*
* Setup mailbox with address for CPU entrypoint when it next powers up.
*/
- css_program_mailbox(read_mpidr_el1(), sec_entrypoint);
+ css_program_mailbox(read_mpidr_el1(), wakeup_address);
- css_power_down_common(afflvl);
+ css_power_down_common(target_state);
}
/*******************************************************************************
- * Handler called when an affinity instance has just been powered on after
- * having been suspended earlier. The level and mpidr determine the affinity
- * instance.
+ * Handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
-static void css_affinst_suspend_finish(uint32_t afflvl,
- uint32_t state)
+static void css_pwr_domain_suspend_finish(
+ const psci_power_state_t *target_state)
{
- css_affinst_on_finish(afflvl, state);
+ /*
+ * Return as nothing is to be done on waking up from retention.
+ */
+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_RET)
+ return;
+
+ css_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
}
/*******************************************************************************
- * Handler called when an affinity instance is about to enter standby.
+ * Handler called when the CPU power domain is about to enter standby.
******************************************************************************/
-void css_affinst_standby(unsigned int power_state)
+void css_cpu_standby(plat_local_state_t cpu_state)
{
unsigned int scr;
+ assert(cpu_state == ARM_LOCAL_STATE_RET);
+
scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT);
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
-static const plat_pm_ops_t css_ops = {
- .affinst_on = css_affinst_on,
- .affinst_on_finish = css_affinst_on_finish,
- .affinst_off = css_affinst_off,
- .affinst_standby = css_affinst_standby,
- .affinst_suspend = css_affinst_suspend,
- .affinst_suspend_finish = css_affinst_suspend_finish,
+static const plat_psci_ops_t css_ops = {
+ .pwr_domain_on = css_pwr_domain_on,
+ .pwr_domain_on_finish = css_pwr_domain_on_finish,
+ .pwr_domain_off = css_pwr_domain_off,
+ .cpu_standby = css_cpu_standby,
+ .pwr_domain_suspend = css_pwr_domain_suspend,
+ .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = arm_validate_power_state
};
/*******************************************************************************
- * Export the platform specific power ops.
+ * Export the platform specific psci ops.
******************************************************************************/
-int32_t platform_setup_pm(const plat_pm_ops_t **plat_ops)
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
{
- *plat_ops = &css_ops;
+ *psci_ops = &css_ops;
+
+ wakeup_address = sec_entrypoint;
+ flush_dcache_range((unsigned long)&wakeup_address,
+ sizeof(wakeup_address));
return 0;
}
--- /dev/null
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <plat_arm.h>
+
+/*
+ * On ARM platforms, by default the cluster power level is treated as the
+ * highest. The first entry in the power domain descriptor specifies the
+ * number of cluster power domains i.e. 2.
+ */
+#define CSS_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
+
+/*
+ * The CSS power domain tree descriptor. The cluster power domains are
+ * arranged so that when the PSCI generic code creates the power domain tree,
+ * the indices of the CPU power domain nodes it allocates match the linear
+ * indices returned by plat_core_pos_by_mpidr() i.e.
+ * CLUSTER1 CPUs are allocated indices from 0 to 3 and the higher indices for
+ * CLUSTER0 CPUs.
+ */
+const unsigned char arm_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ CSS_PWR_DOMAINS_AT_MAX_PWR_LVL,
+ /* No of children for the first node */
+ PLAT_ARM_CLUSTER1_CORE_COUNT,
+ /* No of children for the second node */
+ PLAT_ARM_CLUSTER0_CORE_COUNT
+};
+
+
+/******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is
+ * returned in case the MPIDR is invalid.
+ *****************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ if (arm_check_mpidr(mpidr) == 0)
+ return plat_arm_calc_core_pos(mpidr);
+
+ return -1;
+}