diff options
Diffstat (limited to 'plat/rockchip/common')
-rw-r--r-- | plat/rockchip/common/aarch64/plat_helpers.S | 114 | ||||
-rw-r--r-- | plat/rockchip/common/drivers/pmu/pmu_com.h | 2 | ||||
-rw-r--r-- | plat/rockchip/common/include/plat_private.h | 20 | ||||
-rw-r--r-- | plat/rockchip/common/plat_pm.c | 115 | ||||
-rw-r--r-- | plat/rockchip/common/plat_topology.c | 10 |
5 files changed, 137 insertions, 124 deletions
diff --git a/plat/rockchip/common/aarch64/plat_helpers.S b/plat/rockchip/common/aarch64/plat_helpers.S index 1bbb6145..d06d4cba 100644 --- a/plat/rockchip/common/aarch64/plat_helpers.S +++ b/plat/rockchip/common/aarch64/plat_helpers.S @@ -35,6 +35,7 @@ #include <cortex_a72.h> #include <plat_private.h> #include <platform_def.h> +#include <plat_pmu_macros.S> .globl cpuson_entry_point .globl cpuson_flags @@ -47,68 +48,6 @@ .globl plat_my_core_pos .globl plat_reset_handler - -#define RK_REVISION(rev) RK_PLAT_CFG##rev -#define RK_HANDLER(rev) plat_reset_handler_juno_r##rev -#define JUMP_TO_HANDLER_IF_RK_R(revision) \ - jump_to_handler RK_REVISION(revision), RK_HANDLER(revision) - - /* - * Helper macro to jump to the given handler if the board revision - * matches. - * Expects the Juno board revision in x0. - * - */ - .macro jump_to_handler _revision, _handler - cmp x0, #\_revision - b.eq \_handler - .endm - - /* - * Helper macro that reads the part number of the current CPU and jumps - * to the given label if it matches the CPU MIDR provided. - */ - .macro jump_if_cpu_midr _cpu_midr, _label - mrs x0, midr_el1 - ubfx x0, x0, MIDR_PN_SHIFT, #12 - cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) - b.eq \_label - .endm - - /* - * Platform reset handler for rockchip. - * only A53 cores - */ -func RK_HANDLER(0) - ret -endfunc RK_HANDLER(0) - - /* - * Platform reset handler for rockchip. - * - Cortex-A53 processor cluster; - * - Cortex-A72 processor cluster. - * - * This handler does the following: - * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72 - * - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72 - */ -func RK_HANDLER(1) - /* - * Nothing to do on Cortex-A53. - * - */ - jump_if_cpu_midr CORTEX_A72_MIDR, A72 - ret - -A72: - /* Cortex-A72 specific settings */ - mov x0, #((2 << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ - (0x1 << 5)) - msr L2CTLR_EL1, x0 - isb - ret -endfunc RK_HANDLER(1) - /* * void plat_reset_handler(void); * @@ -117,22 +56,30 @@ endfunc RK_HANDLER(1) * */ func plat_reset_handler - - mov x0, RK_PLAT_AARCH_CFG - - JUMP_TO_HANDLER_IF_RK_R(0) - JUMP_TO_HANDLER_IF_RK_R(1) - - /* SOC type is not supported */ -not_supported: - b not_supported + mrs x0, midr_el1 + ubfx x0, x0, MIDR_PN_SHIFT, #12 + cmp w0, #((CORTEX_A72_MIDR >> MIDR_PN_SHIFT) & MIDR_PN_MASK) + b.eq handler_a72 + b handler_end +handler_a72: + /* + * This handler does the following: + * Set the L2 Data RAM latency for Cortex-A72. + * Set the L2 Tag RAM latency to for Cortex-A72. + */ + mov x0, #((2 << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (0x1 << 5)) + msr L2CTLR_EL1, x0 + isb +handler_end: + ret endfunc plat_reset_handler func plat_my_core_pos mrs x0, mpidr_el1 and x1, x0, #MPIDR_CPU_MASK and x0, x0, #MPIDR_CLUSTER_MASK - add x0, x1, x0, LSR #6 + add x0, x1, x0, LSR #PLAT_RK_CLST_TO_CPUID_SHIFT ret endfunc plat_my_core_pos @@ -192,30 +139,30 @@ endfunc plat_crash_console_putc .align 16 func platform_cpu_warmboot mrs x0, MPIDR_EL1 - and x1, x0, #MPIDR_CPU_MASK - and x0, x0, #MPIDR_CLUSTER_MASK + and x19, x0, #MPIDR_CPU_MASK + and x20, x0, #MPIDR_CLUSTER_MASK + mov x0, x20 + func_rockchip_clst_warmboot /* -------------------------------------------------------------------- * big cluster id is 1 * big cores id is from 0-3, little cores id 4-7 * -------------------------------------------------------------------- */ - add x0, x1, x0, lsr #6 + add x21, x19, x20, lsr #PLAT_RK_CLST_TO_CPUID_SHIFT /* -------------------------------------------------------------------- * get per cpuup flag * -------------------------------------------------------------------- */ adr x4, cpuson_flags - add x4, x4, x0, lsl #2 + add x4, x4, x21, lsl #2 ldr w1, [x4] /* -------------------------------------------------------------------- * check cpuon reason * -------------------------------------------------------------------- */ - ldr w3, =PMU_CPU_AUTO_PWRDN - cmp w1, w3 + cmp w1, PMU_CPU_AUTO_PWRDN b.eq boot_entry - ldr w3, =PMU_CPU_HOTPLUG - cmp w1, w3 + cmp w1, PMU_CPU_HOTPLUG b.eq boot_entry /* -------------------------------------------------------------------- * If the boot core cpuson_flags or cpuson_entry_point is not @@ -226,15 +173,13 @@ wfe_loop: wfe b wfe_loop boot_entry: - mov w1, #0 - str w1, [x4] + str wzr, [x4] /* -------------------------------------------------------------------- * get per cpuup boot addr * -------------------------------------------------------------------- */ adr x5, cpuson_entry_point - ldr x2, [x5, x0, lsl #3] - + ldr x2, [x5, x21, lsl #3] br x2 endfunc platform_cpu_warmboot @@ -252,3 +197,4 @@ cpuson_flags: .rept PLATFORM_CORE_COUNT .word 0 .endr +rockchip_clst_warmboot_data diff --git a/plat/rockchip/common/drivers/pmu/pmu_com.h b/plat/rockchip/common/drivers/pmu/pmu_com.h index 4cffb614..a6d3186f 100644 --- a/plat/rockchip/common/drivers/pmu/pmu_com.h +++ b/plat/rockchip/common/drivers/pmu/pmu_com.h @@ -31,7 +31,7 @@ * Use this macro to instantiate lock before it is used in below * rockchip_pd_lock_xxx() macros */ -DEFINE_BAKERY_LOCK(rockchip_pd_lock); +DECLARE_BAKERY_LOCK(rockchip_pd_lock); /* * These are wrapper macros to the powe domain Bakery Lock API. diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h index 031a3413..71998998 100644 --- a/plat/rockchip/common/include/plat_private.h +++ b/plat/rockchip/common/include/plat_private.h @@ -35,6 +35,7 @@ #include <mmio.h> #include <stdint.h> #include <xlat_tables.h> +#include <psci.h> /****************************************************************************** * For rockchip socs pm ops @@ -45,6 +46,12 @@ struct rockchip_pm_ops_cb { int (*cores_pwr_dm_on_finish)(void); int (*cores_pwr_dm_suspend)(void); int (*cores_pwr_dm_resume)(void); + /* hlvl is used for clusters or system level */ + int (*hlvl_pwr_dm_suspend)(uint32_t lvl, plat_local_state_t lvl_state); + int (*hlvl_pwr_dm_resume)(uint32_t lvl, plat_local_state_t lvl_state); + int (*hlvl_pwr_dm_off)(uint32_t lvl, plat_local_state_t lvl_state); + int (*hlvl_pwr_dm_on_finish)(uint32_t lvl, + plat_local_state_t lvl_state); int (*sys_pwr_dm_suspend)(void); int (*sys_pwr_dm_resume)(void); void (*sys_gbl_soft_reset)(void) __dead2; @@ -109,6 +116,7 @@ void plat_rockchip_pmusram_prepare(void); void plat_rockchip_pmu_init(void); void plat_rockchip_soc_init(void); void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops); +uintptr_t plat_get_sec_entrypoint(void); void platform_cpu_warmboot(void); @@ -126,10 +134,12 @@ extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT]; extern const mmap_region_t plat_rk_mmap[]; #endif /* __ASSEMBLY__ */ -/* only Cortex-A53 */ -#define RK_PLAT_CFG0 0 - -/* include Cortex-A72 */ -#define RK_PLAT_CFG1 1 +/****************************************************************************** + * cpu up status + * The bits of macro value is not more than 12 bits for cmp instruction! + ******************************************************************************/ +#define PMU_CPU_HOTPLUG 0xf00 +#define PMU_CPU_AUTO_PWRDN 0xf0 +#define PMU_CLST_RET 0xa5 #endif /* __PLAT_PRIVATE_H__ */ diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c index d20a683b..b6291bbf 100644 --- a/plat/rockchip/common/plat_pm.c +++ b/plat/rockchip/common/plat_pm.c @@ -50,21 +50,6 @@ static uintptr_t rockchip_sec_entrypoint; static struct rockchip_pm_ops_cb *rockchip_ops; -static void plat_rockchip_sys_pwr_domain_resume(void) -{ - if (rockchip_ops && rockchip_ops->sys_pwr_dm_resume) - rockchip_ops->sys_pwr_dm_resume(); -} - -static void plat_rockchip_cores_pwr_domain_resume(void) -{ - if (rockchip_ops && rockchip_ops->cores_pwr_dm_resume) - rockchip_ops->cores_pwr_dm_resume(); - - /* Program the gic per-cpu distributor or re-distributor interface */ - plat_rockchip_gic_cpuif_enable(); -} - /******************************************************************************* * Rockchip standard platform handler called to check the validity of the power * state parameter. @@ -96,6 +81,10 @@ int rockchip_validate_power_state(unsigned int power_state, for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++) req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; + + for (i = (pwr_lvl + 1); i <= PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = + PLAT_MAX_RET_STATE; } /* We expect the 'state id' to be zero */ @@ -154,14 +143,28 @@ int rockchip_pwr_domain_on(u_register_t mpidr) ******************************************************************************/ void rockchip_pwr_domain_off(const psci_power_state_t *target_state) { + uint32_t lvl; + plat_local_state_t lvl_state; + assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); plat_rockchip_gic_cpuif_disable(); if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) plat_cci_disable(); - if (rockchip_ops && rockchip_ops->cores_pwr_dm_off) - rockchip_ops->cores_pwr_dm_off(); + + if (!rockchip_ops || !rockchip_ops->cores_pwr_dm_off) + return; + + rockchip_ops->cores_pwr_dm_off(); + + if (!rockchip_ops->hlvl_pwr_dm_off) + return; + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + rockchip_ops->hlvl_pwr_dm_off(lvl, lvl_state); + } } /******************************************************************************* @@ -170,17 +173,19 @@ void rockchip_pwr_domain_off(const psci_power_state_t *target_state) ******************************************************************************/ void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state) { - if (RK_CORE_PWR_STATE(target_state) == PLAT_MAX_RET_STATE) - return; + uint32_t lvl; + plat_local_state_t lvl_state; - assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); + if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) + return; - if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { - if (rockchip_ops && rockchip_ops->sys_pwr_dm_suspend) + if (rockchip_ops) { + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE && + rockchip_ops->sys_pwr_dm_suspend) { rockchip_ops->sys_pwr_dm_suspend(); - } else { - if (rockchip_ops && rockchip_ops->cores_pwr_dm_suspend) + } else if (rockchip_ops->cores_pwr_dm_suspend) { rockchip_ops->cores_pwr_dm_suspend(); + } } /* Prevent interrupts from spuriously waking up this cpu */ @@ -189,6 +194,14 @@ void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state) /* Perform the common cluster specific operations */ if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) plat_cci_disable(); + + if (!rockchip_ops || !rockchip_ops->hlvl_pwr_dm_suspend) + return; + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + rockchip_ops->hlvl_pwr_dm_suspend(lvl, lvl_state); + } } /******************************************************************************* @@ -198,10 +211,24 @@ void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state) ******************************************************************************/ void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state) { + uint32_t lvl; + plat_local_state_t lvl_state; + assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); - if (rockchip_ops && rockchip_ops->cores_pwr_dm_on_finish) + if (!rockchip_ops) + goto comm_finish; + + if (rockchip_ops->hlvl_pwr_dm_on_finish) { + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + rockchip_ops->hlvl_pwr_dm_on_finish(lvl, lvl_state); + } + } + + if (rockchip_ops->cores_pwr_dm_on_finish) rockchip_ops->cores_pwr_dm_on_finish(); +comm_finish: /* Perform the common cluster specific operations */ if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { @@ -225,15 +252,37 @@ void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state) ******************************************************************************/ void rockchip_pwr_domain_suspend_finish(const psci_power_state_t *target_state) { + uint32_t lvl; + plat_local_state_t lvl_state; + /* Nothing to be done on waking up from retention from CPU level */ - if (RK_CORE_PWR_STATE(target_state) == PLAT_MAX_RET_STATE) + if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) return; /* Perform system domain restore if woken up from system suspend */ - if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) - plat_rockchip_sys_pwr_domain_resume(); - else - plat_rockchip_cores_pwr_domain_resume(); + if (!rockchip_ops) + goto comm_finish; + + if (rockchip_ops->hlvl_pwr_dm_resume) { + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + rockchip_ops->hlvl_pwr_dm_resume(lvl, lvl_state); + } + } + + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE && + rockchip_ops->sys_pwr_dm_resume) { + rockchip_ops->sys_pwr_dm_resume(); + } else if (rockchip_ops->cores_pwr_dm_resume) { + rockchip_ops->cores_pwr_dm_resume(); + } + +comm_finish: + /* + * Program the gic per-cpu distributor + * or re-distributor interface + */ + plat_rockchip_gic_cpuif_enable(); /* Perform the common cluster specific operations */ if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { @@ -288,6 +337,12 @@ int plat_setup_psci_ops(uintptr_t sec_entrypoint, return 0; } +uintptr_t plat_get_sec_entrypoint(void) +{ + assert(rockchip_sec_entrypoint); + return rockchip_sec_entrypoint; +} + void plat_setup_rockchip_pm_ops(struct rockchip_pm_ops_cb *ops) { rockchip_ops = ops; diff --git a/plat/rockchip/common/plat_topology.c b/plat/rockchip/common/plat_topology.c index 911978a0..8a139455 100644 --- a/plat/rockchip/common/plat_topology.c +++ b/plat/rockchip/common/plat_topology.c @@ -45,11 +45,13 @@ int plat_core_pos_by_mpidr(u_register_t mpidr) { unsigned int cluster_id, cpu_id; - cpu_id = MPIDR_AFFLVL0_VAL(mpidr); - cluster_id = MPIDR_AFFLVL1_VAL(mpidr); + cpu_id = mpidr & MPIDR_AFFLVL_MASK; + cluster_id = mpidr & MPIDR_CLUSTER_MASK; - if (cluster_id >= PLATFORM_CLUSTER_COUNT) + cpu_id += (cluster_id >> PLAT_RK_CLST_TO_CPUID_SHIFT); + + if (cpu_id >= PLATFORM_CORE_COUNT) return -1; - return ((cluster_id * PLATFORM_CLUSTER0_CORE_COUNT) + cpu_id); + return cpu_id; } |