diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/cpus/aarch32/aem_generic.S | 4 | ||||
-rw-r--r-- | lib/cpus/aarch32/cortex_a32.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch32/cpu_helpers.S | 59 | ||||
-rw-r--r-- | lib/cpus/aarch64/aem_generic.S | 8 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a35.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a53.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a57.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a72.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a73.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/cpu_helpers.S | 44 | ||||
-rw-r--r-- | lib/cpus/aarch64/denver.S | 5 | ||||
-rw-r--r-- | lib/psci/aarch32/psci_helpers.S | 19 | ||||
-rw-r--r-- | lib/psci/aarch64/psci_helpers.S | 20 |
13 files changed, 88 insertions, 101 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S index 10ea4e47..3d6064c9 100644 --- a/lib/cpus/aarch32/aem_generic.S +++ b/lib/cpus/aarch32/aem_generic.S @@ -65,4 +65,6 @@ func aem_generic_cluster_pwr_dwn endfunc aem_generic_cluster_pwr_dwn /* cpu_ops for Base AEM FVP */ -declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 +declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ + aem_generic_core_pwr_dwn, \ + aem_generic_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S index f2b85a31..f631c4cf 100644 --- a/lib/cpus/aarch32/cortex_a32.S +++ b/lib/cpus/aarch32/cortex_a32.S @@ -141,4 +141,7 @@ func cortex_a32_cluster_pwr_dwn b cortex_a32_disable_smp endfunc cortex_a32_cluster_pwr_dwn -declare_cpu_ops cortex_a32, CORTEX_A32_MIDR +declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \ + cortex_a32_reset_func, \ + cortex_a32_core_pwr_dwn, \ + cortex_a32_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index a4dfe5f2..900d158c 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -70,50 +70,39 @@ endfunc reset_handler #if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ /* - * The prepare core power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + * void prepare_cpu_pwr_dwn(unsigned int power_level) + * + * Prepare CPU power down function for all platforms. The function takes + * a domain level to be powered down as its parameter. After the cpu_ops + * pointer is retrieved from cpu_data, the handler for requested power + * level is called. */ - .globl prepare_core_pwr_dwn -func prepare_core_pwr_dwn - /* r12 is pushed to meet the 8 byte stack alignment requirement */ - push {r12, lr} - bl _cpu_data - pop {r12, lr} - - ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] -#if ASM_ASSERTION - cmp r1, #0 - ASM_ASSERT(ne) -#endif - - /* Get the cpu_ops core_pwr_dwn handler */ - ldr r0, [r1, #CPU_PWR_DWN_CORE] - bx r0 -endfunc prepare_core_pwr_dwn - + .globl prepare_cpu_pwr_dwn +func prepare_cpu_pwr_dwn /* - * The prepare cluster power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the + * power down handler for the last power level */ - .globl prepare_cluster_pwr_dwn -func prepare_cluster_pwr_dwn - /* r12 is pushed to meet the 8 byte stack alignment requirement */ - push {r12, lr} + mov r2, #(CPU_MAX_PWR_DWN_OPS - 1) + cmp r0, r2 + movhi r0, r2 + + push {r0, lr} bl _cpu_data - pop {r12, lr} + pop {r2, lr} - ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR] #if ASM_ASSERTION - cmp r1, #0 + cmp r0, #0 ASM_ASSERT(ne) #endif - /* Get the cpu_ops cluster_pwr_dwn handler */ - ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] - bx r0 -endfunc prepare_cluster_pwr_dwn + /* Get the appropriate power down handler */ + mov r1, #CPU_PWR_DWN_OPS + add r1, r1, r2, lsl #2 + ldr r1, [r0, r1] + bx r1 +endfunc prepare_cpu_pwr_dwn /* * Initializes the cpu_ops_ptr if not already initialized diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S index 0ab5253e..0cedd855 100644 --- a/lib/cpus/aarch64/aem_generic.S +++ b/lib/cpus/aarch64/aem_generic.S @@ -90,7 +90,11 @@ endfunc aem_generic_cpu_reg_dump /* cpu_ops for Base AEM FVP */ -declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 +declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ + aem_generic_core_pwr_dwn, \ + aem_generic_cluster_pwr_dwn /* cpu_ops for Foundation FVP */ -declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1 +declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \ + aem_generic_core_pwr_dwn, \ + aem_generic_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S index ba29d6d4..c17c8f17 100644 --- a/lib/cpus/aarch64/cortex_a35.S +++ b/lib/cpus/aarch64/cortex_a35.S @@ -157,4 +157,7 @@ func cortex_a35_cpu_reg_dump ret endfunc cortex_a35_cpu_reg_dump -declare_cpu_ops cortex_a35, CORTEX_A35_MIDR +declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \ + cortex_a35_reset_func, \ + cortex_a35_core_pwr_dwn, \ + cortex_a35_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S index ed546e7e..06be9ce6 100644 --- a/lib/cpus/aarch64/cortex_a53.S +++ b/lib/cpus/aarch64/cortex_a53.S @@ -244,4 +244,7 @@ func cortex_a53_cpu_reg_dump ret endfunc cortex_a53_cpu_reg_dump -declare_cpu_ops cortex_a53, CORTEX_A53_MIDR +declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \ + cortex_a53_reset_func, \ + cortex_a53_core_pwr_dwn, \ + cortex_a53_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index d6b181d0..e531b1e3 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -488,4 +488,7 @@ func cortex_a57_cpu_reg_dump endfunc cortex_a57_cpu_reg_dump -declare_cpu_ops cortex_a57, CORTEX_A57_MIDR +declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \ + cortex_a57_reset_func, \ + cortex_a57_core_pwr_dwn, \ + cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index 9f04fb72..fffc99f5 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -242,4 +242,7 @@ func cortex_a72_cpu_reg_dump endfunc cortex_a72_cpu_reg_dump -declare_cpu_ops cortex_a72, CORTEX_A72_MIDR +declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \ + cortex_a72_reset_func, \ + cortex_a72_core_pwr_dwn, \ + cortex_a72_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index e1615dbd..49d5449a 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -153,4 +153,7 @@ func cortex_a73_cpu_reg_dump ret endfunc cortex_a73_cpu_reg_dump -declare_cpu_ops cortex_a73, CORTEX_A73_MIDR +declare_cpu_ops cortex_a73, CORTEX_A73_MIDR, \ + cortex_a73_reset_func, \ + cortex_a73_core_pwr_dwn, \ + cortex_a73_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index dab933c7..ec7f1dde 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -74,31 +74,23 @@ endfunc reset_handler #if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ /* - * The prepare core power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_core in the cpu_ops is invoked. + * void prepare_cpu_pwr_dwn(unsigned int power_level) + * + * Prepare CPU power down function for all platforms. The function takes + * a domain level to be powered down as its parameter. After the cpu_ops + * pointer is retrieved from cpu_data, the handler for requested power + * level is called. */ - .globl prepare_core_pwr_dwn -func prepare_core_pwr_dwn - mrs x1, tpidr_el3 - ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] -#if ASM_ASSERTION - cmp x0, #0 - ASM_ASSERT(ne) -#endif - - /* Get the cpu_ops core_pwr_dwn handler */ - ldr x1, [x0, #CPU_PWR_DWN_CORE] - br x1 -endfunc prepare_core_pwr_dwn - + .globl prepare_cpu_pwr_dwn +func prepare_cpu_pwr_dwn /* - * The prepare cluster power down function for all platforms. After - * the cpu_ops pointer is retrieved from cpu_data, the corresponding - * pwr_dwn_cluster in the cpu_ops is invoked. + * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the + * power down handler for the last power level */ - .globl prepare_cluster_pwr_dwn -func prepare_cluster_pwr_dwn + mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1) + cmp x0, x2 + csel x2, x2, x0, hi + mrs x1, tpidr_el3 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] #if ASM_ASSERTION @@ -106,10 +98,12 @@ func prepare_cluster_pwr_dwn ASM_ASSERT(ne) #endif - /* Get the cpu_ops cluster_pwr_dwn handler */ - ldr x1, [x0, #CPU_PWR_DWN_CLUSTER] + /* Get the appropriate power down handler */ + mov x1, #CPU_PWR_DWN_OPS + add x1, x1, x2, lsl #3 + ldr x1, [x0, x1] br x1 -endfunc prepare_cluster_pwr_dwn +endfunc prepare_cpu_pwr_dwn /* diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S index bce05737..0b61440d 100644 --- a/lib/cpus/aarch64/denver.S +++ b/lib/cpus/aarch64/denver.S @@ -163,4 +163,7 @@ func denver_cpu_reg_dump ret endfunc denver_cpu_reg_dump -declare_cpu_ops denver, DENVER_1_0_MIDR +declare_cpu_ops denver, DENVER_1_0_MIDR, \ + denver_reset_func, \ + denver_core_pwr_dwn, \ + denver_cluster_pwr_dwn diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S index 5a41ff31..9f991dfe 100644 --- a/lib/psci/aarch32/psci_helpers.S +++ b/lib/psci/aarch32/psci_helpers.S @@ -65,22 +65,13 @@ func psci_do_pwrdown_cache_maintenance bl do_stack_maintenance /* --------------------------------------------- - * Determine how many levels of cache will be - * subject to cache maintenance. Power level - * 0 implies that only the cpu is being powered - * down. Only the L1 data cache needs to be - * flushed to the PoU in this case. For a higher - * power level we are assuming that a flush - * of L1 data and L2 unified cache is enough. - * This information should be provided by the - * platform. + * Invoke CPU-specifc power down operations for + * the appropriate level * --------------------------------------------- */ - cmp r4, #PSCI_CPU_PWR_LVL - pop {r4,lr} - - beq prepare_core_pwr_dwn - b prepare_cluster_pwr_dwn + mov r0, r4 + pop {r4, lr} + b prepare_cpu_pwr_dwn endfunc psci_do_pwrdown_cache_maintenance diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S index eaa17c72..108f0687 100644 --- a/lib/psci/aarch64/psci_helpers.S +++ b/lib/psci/aarch64/psci_helpers.S @@ -59,24 +59,11 @@ func psci_do_pwrdown_cache_maintenance stp x19, x20, [sp,#-16]! /* --------------------------------------------- - * Determine to how many levels of cache will be - * subject to cache maintenance. Power level - * 0 implies that only the cpu is being powered - * down. Only the L1 data cache needs to be - * flushed to the PoU in this case. For a higher - * power level we are assuming that a flush - * of L1 data and L2 unified cache is enough. - * This information should be provided by the - * platform. + * Invoke CPU-specific power down operations for + * the appropriate level * --------------------------------------------- */ - cmp w0, #PSCI_CPU_PWR_LVL - b.eq do_core_pwr_dwn - bl prepare_cluster_pwr_dwn - b do_stack_maintenance - -do_core_pwr_dwn: - bl prepare_core_pwr_dwn + bl prepare_cpu_pwr_dwn /* --------------------------------------------- * Do stack maintenance by flushing the used @@ -84,7 +71,6 @@ do_core_pwr_dwn: * remainder. * --------------------------------------------- */ -do_stack_maintenance: bl plat_get_my_stack /* --------------------------------------------- |