summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortkasivajhula <tkasivajhula@nvidia.com>2010-04-23 17:17:14 -0700
committerGary King <gking@nvidia.com>2010-04-23 17:20:30 -0700
commit0785ea5fe3a61b0ef9011b81fa593d3553a21f9e (patch)
treec2d5bb39ed2cd2f3582bb659695638cdfa562bc8
parentfeddf1576d157d1a688ca4c5e09bfacc79bccdb9 (diff)
tegra power: Clean low power implementations (LP2, LP1, LP0)
Get rid of some unneeded variables, indented and cleaned things up. Change-Id: I06bf8df81de7f1e68f7175f5f507836a2a88a608 Reviewed-on: http://git-master/r/1145 Reviewed-by: Trivikram Kasivajhula <tkasivajhula@nvidia.com> Tested-by: Trivikram Kasivajhula <tkasivajhula@nvidia.com> Reviewed-by: Gary King <gking@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/idle-t2.c3
-rw-r--r--arch/arm/mach-tegra/power-lp.S1837
-rw-r--r--arch/arm/mach-tegra/power-t2.c8
3 files changed, 917 insertions, 931 deletions
diff --git a/arch/arm/mach-tegra/idle-t2.c b/arch/arm/mach-tegra/idle-t2.c
index 33625294df9f..c9aeb8b6c7a7 100644
--- a/arch/arm/mach-tegra/idle-t2.c
+++ b/arch/arm/mach-tegra/idle-t2.c
@@ -39,7 +39,7 @@ extern void cpu_ap20_do_lp0(void);
extern void resume(unsigned int state);
extern uintptr_t g_resume, g_contextSavePA, g_contextSaveVA;
extern uintptr_t g_iramContextSaveVA;
-extern NvU32 g_NumActiveCPUs, g_ArmPerif;
+extern NvU32 g_ArmPerif;
extern NvU32 g_enterLP2PA;
extern volatile void *g_pPMC, *g_pAHB, *g_pCLK_RST_CONTROLLER, *g_pRtc;
extern volatile void *g_pEMC, *g_pMC, *g_pAPB_MISC, *g_pIRAM, *g_pTimerus;
@@ -251,7 +251,6 @@ void __init NvAp20InitFlowController(void)
g_iramContextSaveVA =
(uintptr_t)kmalloc(AVP_CONTEXT_SAVE_AREA_SIZE, GFP_ATOMIC);
g_contextSavePA = virt_to_phys((void*)g_contextSaveVA);
- g_NumActiveCPUs = num_online_cpus();
g_enterLP2PA = virt_to_phys((void*)enter_lp2);
NvOsBootArgGet(NvBootArgKey_WarmBoot,
diff --git a/arch/arm/mach-tegra/power-lp.S b/arch/arm/mach-tegra/power-lp.S
index 3706ef1cf68a..8b6ecf79ce32 100644
--- a/arch/arm/mach-tegra/power-lp.S
+++ b/arch/arm/mach-tegra/power-lp.S
@@ -41,6 +41,7 @@
#define EVP_PA_BASE 0x6000f000
#define CSITE_PA_BASE 0x70040000
#define APB_MISC_BASE 0x70000000
+#define RTC_PA_BASE 0x7000e000
#define TEMP_RESET_VECTOR 8
#define TEMP_SCLK_BURST_POLICY 16
#define TEMP_CCLK_BURST_POLICY 20
@@ -58,70 +59,70 @@
ENTRY(enter_power_state)
//We should be in SVC mode
//with IRQs turned off
- mrs r2, CPSR
+ mrs r2, CPSR
stmfd sp!, {r0-r12, lr}
stmfd sp!, {r0}
- cmp r1, #0
- bne save_arm_state
+ cmp r1, #0
+ bne save_arm_state
//Wait for everyone else to spin down
wait_for_other_cores:
stmfd sp!, {r1}
- bl check_for_cpu1_reset
+ bl check_for_cpu1_reset
ldmfd sp!, {r1}
- cmp r0, #0
- beq finish_power_state
+ cmp r0, #0
+ beq finish_power_state
//Save the local timers
stmfd sp!, {r1}
- bl save_local_timers
+ bl save_local_timers
ldmfd sp!, {r1}
//Ok we can save state for core0 now
save_arm_state:
//Get the context save pointer for the core
- ldr r0, =g_contextSaveVA
- ldr r0, [r0]
- mov r2, #0x800
+ ldr r0, =g_contextSaveVA
+ ldr r0, [r0]
+ mov r2, #0x800
//r0 = r0 + r1 * r2
smlabb r0, r1, r2, r0
- ldr r1, =g_ArmPerif
- ldr r1, [r1]
+ ldr r1, =g_ArmPerif
+ ldr r1, [r1]
//We need r0 = virtual context save
//We need R1 = SCU VA
- b ArmCortexA9Save
+ b ArmCortexA9Save
ArmCortexA9Saved:
//All cores but core 0 must be reset
- mrc p15, 0, r2, c0, c0, 5
+ mrc p15, 0, r2, c0, c0, 5
ands r2, r2, #0x3
- bne reset_slave
+ bne reset_slave
//Check which power state we want to enter
ldmfd sp!, {r0}
//Is it LP2?
- cmp r0, #0
- ldreq r2, =g_enterLP2PA
- ldreq r2, [r2]
- beq transition_to_state
+ cmp r0, #0
+ ldreq r2, =g_enterLP2PA
+ ldreq r2, [r2]
+ beq transition_to_state
- ldr r4, =g_pIRAM
- ldr r4, [r4]
+ ldr r4, =g_pIRAM
+ ldr r4, [r4]
//Is it LP1?
- cmp r0, #1
- ldreq r5, =exit_lp1_end
- ldreq r6, =enter_lp1
- beq copy_to_iram
+ cmp r0, #1
+ ldreq r5, =exit_lp1_end
+ ldreq r6, =enter_lp1
+ beq copy_to_iram
//Is it LP0?
- cmp r0, #2
- ldr r5, =enter_lp0_end
- ldr r6, =enter_lp0
+ cmp r0, #2
+ ldr r5, =enter_lp0_end
+ ldr r6, =enter_lp0
//For LP0, the AVP stores its continuation address at the first
//location in IRAM. Before we overwrite IRAM with the LP0 entry
@@ -129,72 +130,72 @@ ArmCortexA9Saved:
//register dedicated for this purposed.
//R1 = *g_pIRAM
- ldr r1, [r4]
+ ldr r1, [r4]
//R3 = &(g_pPMC)
- ldr r3, =g_pPMC
+ ldr r3, =g_pPMC
//R3 = g_pPMC
- ldr r3, [r3]
+ ldr r3, [r3]
//Store in scratch39
- str r1, [r3, #APBDEV_PMC_SCRATCH39_0]
+ str r1, [r3, #APBDEV_PMC_SCRATCH39_0]
//Flush L2 rams for LP0
- ldr r1, =g_pPL310
- ldr r1, [r1]
+ ldr r1, =g_pPL310
+ ldr r1, [r1]
- ldr r2, [r1, #PL310_CONTROL_0]
- ands r2, r2, #0x1
- beq copy_to_iram
+ ldr r2, [r1, #PL310_CONTROL_0]
+ ands r2, r2, #0x1
+ beq copy_to_iram
//Clear L2 restoration ptr
- ldr r3, =g_contextSaveVA
- ldr r3, [r3]
+ ldr r3, =g_contextSaveVA
+ ldr r3, [r3]
- mov r2, #0
- str r2, [r3, #0x30]
+ mov r2, #0
+ str r2, [r3, #0x30]
dsb
//Lock all ways
- add r10, r1, #PL310_DATA_LOCKDOWN0_0
+ add r10, r1, #PL310_DATA_LOCKDOWN0_0
//16 lockdown registers => 64 bytes register space
- add r2, r10, #64
+ add r2, r10, #64
//r3 = 0xff = ALL_WAYS
- mov r3, #0xff
+ mov r3, #0xff
lock_all_ways:
- str r3, [r10]
- add r10, r10, #4
+ str r3, [r10]
+ add r10, r10, #4
- cmp r10, r2
- bne lock_all_ways
+ cmp r10, r2
+ bne lock_all_ways
//Clean all ways
- mov r2, #0xff
- str r2, [r1, #PL310_CLEAN_BY_WAY_0]
+ mov r2, #0xff
+ str r2, [r1, #PL310_CLEAN_BY_WAY_0]
wait_for_l2_flush:
- ldr r2, [r1, #PL310_CLEAN_BY_WAY_0]
- cmp r2, #0
- bne wait_for_l2_flush
+ ldr r2, [r1, #PL310_CLEAN_BY_WAY_0]
+ cmp r2, #0
+ bne wait_for_l2_flush
//Issue a cache sync
- mov r2, #0
- str r2, [r1, #PL310_CACHE_SYNC_0]
+ mov r2, #0
+ str r2, [r1, #PL310_CACHE_SYNC_0]
dsb
//Unlock all ways
- add r10, r1, #PL310_DATA_LOCKDOWN0_0
+ add r10, r1, #PL310_DATA_LOCKDOWN0_0
//16 lockdown registers => 64 bytes register space
- add r2, r10, #64
+ add r2, r10, #64
//r3 = 0 = BITMAP_NO_WAYS
- mov r3, #0
+ mov r3, #0
unlock_all_ways:
- str r3, [r10]
- add r10, r10, #4
+ str r3, [r10]
+ add r10, r10, #4
- cmp r10, r2
- bne unlock_all_ways
+ cmp r10, r2
+ bne unlock_all_ways
copy_to_iram:
//Copy the enter_lp0 function to IRAM using 8x4 block moves.
@@ -205,23 +206,23 @@ copy_to_iram:
//R6 = source address to copy code from
//r2 is the source address
- cpy r2, r6
+ cpy r2, r6
//r3 is the size to copy
- sub r3, r5, r6
+ sub r3, r5, r6
copy_code:
//Load source
- ldmia r2!, {r5-r12}
+ ldmia r2!, {r5-r12}
//Store at destination
- stmia r4!, {r5-r12}
+ stmia r4!, {r5-r12}
//Decrement count
- subs r3, r3, #32
- bgt copy_code
+ subs r3, r3, #32
+ bgt copy_code
//Get the physical address of IRAM
//This is where we will jump to start LP0
- ldr r2, =g_IramPA
- ldr r2, [r2]
+ ldr r2, =g_IramPA
+ ldr r2, [r2]
//We are the master. We should
//turn of MMUs and caches.
@@ -229,111 +230,94 @@ copy_code:
transition_to_state:
//Turn off caches and MMU
- mrc p15, 0, r3, c1, c0, 0
- bic r3, r3, #(1<<12) //I-Cache
- bic r3, r3, #(1<<11) //Branch-pred
- bic r3, r3, #(1<<2) //D-Cache
- bic r3, r3, #(1<<0) //MMU
-
- ldr r0, =g_modifiedPlls
- ldr r0, [r0]
- ldr r1, =g_wakeupCcbp
- ldr r1, [r1]
-
- mov r10, #0
- mcr p15, 0, r10, c8, c7, 0 // invalidate TLB
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, r3, #(1<<12) //I-Cache
+ bic r3, r3, #(1<<11) //Branch-pred
+ bic r3, r3, #(1<<2) //D-Cache
+ bic r3, r3, #(1<<0) //MMU
+
+ ldr r0, =g_modifiedPlls
+ ldr r0, [r0]
+ ldr r1, =g_wakeupCcbp
+ ldr r1, [r1]
+
+ mov r10, #0
+ mcr p15, 0, r10, c8, c7, 0 // invalidate TLB
dsb
.align 5
//Disable L1 caches and MMU
- mcr p15, 0, r3, c1, c0, 0
+ mcr p15, 0, r3, c1, c0, 0
//Jump to the appropriate LPx function
//bl enter_lp2
- bx r2
-
- ldr r2, =g_Sync
- mov r3, #1
- str r3, [r2]
-
- b finish_power_state
+ bx r2
reset_slave:
//Reset the slave cores
- mov r0, #1
- mov r1, #1
- bl reset_cpu
- b .
-
- ldr r2, =g_Sync
-wait_for_master:
- ldr r3, [r2]
- cmp r3, #1
- bne wait_for_master
-
- //Reset the sync variable
- //and increment g_ActiveCpus.
- mov r3, #0
- str r3, [r2]
+ mov r0, #1
+ mov r1, #1
+ bl reset_cpu
+ b .
finish_power_state:
ldmfd sp!, {r0}
ldmfd sp!, {r0-r12, lr}
- bx lr
+ bx lr
.ltorg
ENDPROC(EnterPowerState)
ENTRY(enter_lp2)
- ldr r5, =PMC_PA_BASE //R5 = PMC PA base address
- ldr r6, =FLOW_PA_BASE //R6 = FLOW PA base address
- ldr r7, =TIMERUS_PA_BASE //R7 = TIMERUS PA base address
- ldr r8, =CLK_RST_PA_BASE //R8 = CLK PA base address
- ldr r9, =EVP_PA_BASE //R9 = EVP PA base address
+ ldr r5, =PMC_PA_BASE //R5 = PMC PA base address
+ ldr r6, =FLOW_PA_BASE //R6 = FLOW PA base address
+ ldr r7, =TIMERUS_PA_BASE //R7 = TIMERUS PA base address
+ ldr r8, =CLK_RST_PA_BASE //R8 = CLK PA base address
+ ldr r9, =EVP_PA_BASE //R9 = EVP PA base address
//This funny little instruction obtains a piece of memory
//that is relative to the PC. We can't use literals
//as the MMU has been turned off.
- add r12, pc, #TempStoreArea-(.+8)
+ add r12, pc, #TempStoreArea-(.+8)
//Save the input paramters in temp region
stmia r12, {r0-r1}
//Save old reset vector
- ldr r2, [r9, #EVP_CPU_RESET_VECTOR_0]
- str r2, [r12, #TEMP_RESET_VECTOR]
+ ldr r2, [r9, #EVP_CPU_RESET_VECTOR_0]
+ str r2, [r12, #TEMP_RESET_VECTOR]
//Save pllx base
- ldr r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
- str r2, [r12, #TEMP_PLLX_BASE]
+ ldr r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
+ str r2, [r12, #TEMP_PLLX_BASE]
//Store WB2 entry point in reset
- add r2, pc, #exit_lp2-(.+8)
- str r2, [r9, #EVP_CPU_RESET_VECTOR_0]
+ add r2, pc, #exit_lp2-(.+8)
+ str r2, [r9, #EVP_CPU_RESET_VECTOR_0]
//Make sure SIDE_EFFECT_LP0 is not set
- ldr r2, [r5, #APBDEV_PMC_CNTRL_0]
+ ldr r2, [r5, #APBDEV_PMC_CNTRL_0]
//Unset the SIDE_EFFECT bit
- bic r2, r2, #(1<<14)
- str r2, [r5, #APBDEV_PMC_CNTRL_0]
+ bic r2, r2, #(1<<14)
+ str r2, [r5, #APBDEV_PMC_CNTRL_0]
//Powergate the cpu by setting the ENABLE bit
- ldr r2, [r6, #FLOW_CTLR_CPU_CSR_0]
- orr r2, r2, #(1<<0)
- str r2, [r6, #FLOW_CTLR_CPU_CSR_0]
+ ldr r2, [r6, #FLOW_CTLR_CPU_CSR_0]
+ orr r2, r2, #(1<<0)
+ str r2, [r6, #FLOW_CTLR_CPU_CSR_0]
//Put the CPU on the desired clock source for wakeup,
// wait 2us for switch to complete, and disable PLLX
- str r1, [r8, #CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0]
- ldr r11, [r7]
+ str r1, [r8, #CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0]
+ ldr r11, [r7]
cclk_delay:
- ldr r2, [r7]
- sub r2, r2, r11
- cmp r2, #2
- ble cclk_delay
-
- ldr r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
- bic r2, r2, #(1<<30) //Clear PllX ENABLE
- str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
+ ldr r2, [r7]
+ sub r2, r2, r11
+ cmp r2, #2
+ ble cclk_delay
+
+ ldr r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
+ bic r2, r2, #(1<<30) //Clear PllX ENABLE
+ str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
dmb
//Get the microsecond count before LP2
@@ -341,11 +325,11 @@ cclk_delay:
str r2, [r5, #APBDEV_PMC_SCRATCH38_0]
//Finally, halt the CPU
- mov r2, #0
- orr r2, r2, #(4<<29) //STOP_UNTIL_IRQ
- orr r2, r2, #(1<<10) //IRQ_0 event
- orr r2, r2, #(1<<8) //FIQ_0 event
- str r2, [r6, #FLOW_CTLR_HALT_CPU_EVENTS_0]
+ mov r2, #0
+ orr r2, r2, #(4<<29) //STOP_UNTIL_IRQ
+ orr r2, r2, #(1<<10) //IRQ_0 event
+ orr r2, r2, #(1<<8) //FIQ_0 event
+ str r2, [r6, #FLOW_CTLR_HALT_CPU_EVENTS_0]
DoWFI:
dsb
@@ -355,36 +339,36 @@ ENDPROC(enter_lp2)
ENTRY(exit_lp2)
//R5 = PMC PA base address
- add r5, pc, #lp1_literals-(.+4)
+ add r5, pc, #lp_literals-(.+4)
ldr r5, [r5]
//R6 = FLOW PA base address
- add r6, pc, #lp1_literals-(.+0)
+ add r6, pc, #lp_literals-(.+0)
ldr r6, [r6]
//R7 = TIMERUS PA base address
- add r7, pc, #lp1_literals-(.-4)
+ add r7, pc, #lp_literals-(.-4)
ldr r7, [r7]
//R8 = CLK PA base address
- add r8, pc, #lp1_literals-(.-8)
+ add r8, pc, #lp_literals-(.-8)
ldr r8, [r8]
//R9 = EVP PA base address
- add r9, pc, #lp1_literals-(.-12)
+ add r9, pc, #lp_literals-(.-12)
ldr r9, [r9]
//R10 = CSITE PA base address
- add r10, pc, #lp1_literals-(.-12)
+ add r10, pc, #lp_literals-(.-12)
ldr r10, [r10]
//Check which core we are by checking the MPIDR
- mrc p15, 0, r2, c0, c0, 5
+ mrc p15, 0, r2, c0, c0, 5
ands r2, r2, #0x3
- bne skip_cpu0_restore
+ bne skip_cpu0_restore
//This funny little instruction obtains a piece of memory
//that is relative to the PC. We can't use literals
//as the MMU has been turned off.
- add r12, pc, #TempStoreArea-(.+8)
+ add r12, pc, #TempStoreArea-(.+8)
//Get the current microsecond count
- ldr r11, [r7, #0]
+ ldr r11, [r7, #0]
//NOTE: Any low-power state that cuts power to the CPU power island
//but not to the island containing the APB bus on which
@@ -403,67 +387,66 @@ ENTRY(exit_lp2)
//functional until after CPU state restoration.
//Assert CoreSight reset.
- ldr r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
- orr r0, r0, #(1<<9)
- str r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
+ ldr r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
+ orr r0, r0, #(1<<9)
+ str r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
//Hold CoreSight reset for 2us.
- add r1, r11, #2
+ add r1, r11, #2
reset_poll:
- ldr r2, [r7, #0]
- cmp r2, r1
- ble reset_poll
+ ldr r2, [r7, #0]
+ cmp r2, r1
+ ble reset_poll
//De-assert CoreSight reset.
- bic r0, r0, #(1<<9)
- str r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
+ bic r0, r0, #(1<<9)
+ str r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
//Unlock debugger access by writing special "CSACCESS"
- add r0, pc, #lp1_literals-(.-20)
- ldr r0, [r0]
- add r1, pc, #lp1_literals-(.-24) //R1 = CPU0 lock offset
- ldr r1, [r1]
- add r2, pc, #lp1_literals-(.-28) //R2 = CPU1 lock offset
- ldr r2, [r2]
- str r0, [r10, r1] //Unlock CPU0
- str r0, [r10, r2] //Unlock CPU1
+ add r0, pc, #lp_literals-(.-20)
+ ldr r0, [r0]
+ add r1, pc, #lp_literals-(.-24) //R1 = CPU0 lock offset
+ ldr r1, [r1]
+ add r2, pc, #lp_literals-(.-28) //R2 = CPU1 lock offset
+ ldr r2, [r2]
+ str r0, [r10, r1] //Unlock CPU0
+ str r0, [r10, r2] //Unlock CPU1
//Make sure we no longer powergate the CPU island when halting.
- ldr r1, [r6, #FLOW_CTLR_CPU_CSR_0]
- bic r1, r1, #(1<<0)
- str r1, [r6, #FLOW_CTLR_CPU_CSR_0]
+ ldr r1, [r6, #FLOW_CTLR_CPU_CSR_0]
+ bic r1, r1, #(1<<0)
+ str r1, [r6, #FLOW_CTLR_CPU_CSR_0]
//Restore the input parameters passed to enter_lp2
- ldm r12, {r0-r1}
+ ldm r12, {r0-r1}
//Put the CPU on the desired clock source for wakeup
- str r1, [r8, #CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0]
-
+ str r1, [r8, #CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0]
//Enable PLL-X and restore the values
- ldr r2, [r12, #TEMP_PLLX_BASE]
- mov r3, #(1<<30) //PllX ENABLE
- orr r2, r2, r3
- str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
+ ldr r2, [r12, #TEMP_PLLX_BASE]
+ mov r3, #(1<<30) //PllX ENABLE
+ orr r2, r2, r3
+ str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
//Restore the reset vector target.
- ldr r1, [r12, #TEMP_RESET_VECTOR]
- str r1, [r9, #EVP_CPU_RESET_VECTOR_0]
+ ldr r1, [r12, #TEMP_RESET_VECTOR]
+ str r1, [r9, #EVP_CPU_RESET_VECTOR_0]
skip_cpu0_restore:
- mrc p15, 0, r2, c0, c0, 5
+ mrc p15, 0, r2, c0, c0, 5
ands r2, r2, #0x3
//Write to reset vector to allow platsmp to continue
- str r2, [r9, #EVP_CPU_RESET_VECTOR_0]
+ str r2, [r9, #EVP_CPU_RESET_VECTOR_0]
//Only get the timer for CPU0
- bne skip_lp2_time
+ bne skip_lp2_time
//Get the microsecond count after LP2
- str r11, [r5, #APBDEV_PMC_SCRATCH39_0]
+ str r11, [r5, #APBDEV_PMC_SCRATCH39_0]
skip_lp2_time:
//Set lr to the resume function
- ldr lr, [r5, #APBDEV_PMC_SCRATCH1_0]
- bx lr
+ ldr lr, [r5, #APBDEV_PMC_SCRATCH1_0]
+ bx lr
TempStoreArea:
//Create some empty space. We can't use literals
@@ -474,17 +457,17 @@ TempStoreArea:
ENDPROC(exit_lp2)
ENTRY(enter_lp1)
- add r4, pc, #lp1_literals-(.+8)
+ add r4, pc, #lp_literals-(.+8)
ldr r4, [r4]
- add r5, pc, #lp1_literals-(.+4)
+ add r5, pc, #lp_literals-(.+4)
ldr r5, [r5]
- add r6, pc, #lp1_literals-(.+0)
+ add r6, pc, #lp_literals-(.+0)
ldr r6, [r6]
- add r7, pc, #lp1_literals-(.-4)
+ add r7, pc, #lp_literals-(.-4)
ldr r7, [r7]
- add r8, pc, #lp1_literals-(.-8)
+ add r8, pc, #lp_literals-(.-8)
ldr r8, [r8]
- add r9, pc, #lp1_literals-(.-12)
+ add r9, pc, #lp_literals-(.-12)
ldr r9, [r9]
add r12, pc, #TemporaryStore-(.+8)
@@ -495,7 +478,7 @@ ENTRY(enter_lp1)
ldr r2, [r8, #CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0]
ldr r3, [r8, #CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0]
stmia r12, {r0 - r3}
- sub r12, r12, #8
+ sub r12, r12, #8
add r2, pc, #exit_lp1-(.+8)
str r2, [r9, #EVP_CPU_RESET_VECTOR_0]
@@ -516,9 +499,9 @@ is_idle1:
mov r2, #1
str r2, [r4, #0xE0]
ldr r2, [r4, #0x10]
- ands r2, r2, #3, 8
- moveq r0, #1, 24
- movne r0, #3, 24
+ ands r2, r2, #0x3000000
+ moveq r0, #0x100
+ movne r0, #0x300
//Poll until all devices are in self refresh
is_self1:
@@ -609,19 +592,26 @@ enter_lp1_end:
ENDPROC(enter_lp1)
ENTRY(exit_lp1)
- add r4, pc, #lp1_literals-(.+8)
+ //R4 = EMC_PA_BASE
+ add r4, pc, #lp_literals-(.+8)
ldr r4, [r4]
- add r5, pc, #lp1_literals-(.+4)
+ //R5 = PMC_PA_BASE
+ add r5, pc, #lp_literals-(.+4)
ldr r5, [r5]
- add r6, pc, #lp1_literals-(.+0)
+ //R6 = FLOW_PA_BASE
+ add r6, pc, #lp_literals-(.+0)
ldr r6, [r6]
- add r7, pc, #lp1_literals-(.-4)
+ //R7 = TIMERUS_PA_BASE
+ add r7, pc, #lp_literals-(.-4)
ldr r7, [r7]
- add r8, pc, #lp1_literals-(.-8)
+ //R8 = CLK_RST_PA_BASE
+ add r8, pc, #lp_literals-(.-8)
ldr r8, [r8]
- add r9, pc, #lp1_literals-(.-12)
+ //R9 = EVP_PA_BASE
+ add r9, pc, #lp_literals-(.-12)
ldr r9, [r9]
- add r10, pc, #lp1_literals-(.-16)
+ //R10 = CSITE_PA_BASE
+ add r10, pc, #lp_literals-(.-16)
ldr r10, [r10]
//R12 = Temporary iram store
@@ -647,11 +637,11 @@ reset_poll1:
str r0, [r8, #CLK_RST_CONTROLLER_RST_DEVICES_U_0]
//Unlock debugger access by writing special "CSACCESS"
- add r0, pc, #lp1_literals-(.-20)
+ add r0, pc, #lp_literals-(.-20)
ldr r0, [r0]
- add r1, pc, #lp1_literals-(.-24) //R1 = CPU0 lock offset
+ add r1, pc, #lp_literals-(.-24) //R1 = CPU0 lock offset
ldr r1, [r1]
- add r2, pc, #lp1_literals-(.-28) //R2 = CPU1 lock offset
+ add r2, pc, #lp_literals-(.-28) //R2 = CPU1 lock offset
ldr r2, [r2]
str r0, [r10, r1] //Unlock CPU0
str r0, [r10, r2] //Unlock CPU1
@@ -665,21 +655,21 @@ reset_poll1:
ldr r2, [r8, #CLK_RST_CONTROLLER_PLLM_BASE_0]
mov r3, #(1<<30) //PllM ENABLE
tst r2, r3
- orreq r2, r2, r3
- streq r2, [r8, #CLK_RST_CONTROLLER_PLLM_BASE_0]
+ orreq r2, r2, r3
+ streq r2, [r8, #CLK_RST_CONTROLLER_PLLM_BASE_0]
//Enable PLL-P
ldr r2, [r8, #CLK_RST_CONTROLLER_PLLP_BASE_0]
mov r3, #(1<<30) //PllP ENABLE
tst r2, r3
- orreq r2, r2, r3
- streq r2, [r8, #CLK_RST_CONTROLLER_PLLP_BASE_0]
+ orreq r2, r2, r3
+ streq r2, [r8, #CLK_RST_CONTROLLER_PLLP_BASE_0]
//Enable PLL-X with restored values
- ldr r2, [r12, #TEMP_PLLX_BASE]
- mov r3, #(1<<30) //PllX ENABLE
- orr r2, r2, r3
- str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
+ ldr r2, [r12, #TEMP_PLLX_BASE]
+ mov r3, #(1<<30) //PllX ENABLE
+ orr r2, r2, r3
+ str r2, [r8, #CLK_RST_CONTROLLER_PLLX_BASE_0]
//Configure CPU island to not be power gated
ldr r2, [r6, #8]
@@ -738,9 +728,9 @@ pll_delay:
//Confirm that all chips have exited self-refresh
ldr r1, [r4, #EMC_ADR_CFG_0]
- ands r1, r1, #3, 8
- moveq r0, #1, 24
- movne r0, #3, 24
+ ands r1, r1, #3, 8
+ moveq r0, #1, 24
+ movne r0, #3, 24
//Poll until all chips have exited self-refresh
is_auto:
@@ -757,14 +747,14 @@ is_auto:
ldr lr, [r5, #APBDEV_PMC_SCRATCH1_0]
str r11, [r5, #APBDEV_PMC_SCRATCH39_0]
bx lr
-lp1_literals:
- .word 0x7000f400
- .word 0x7000e400
- .word 0x60007000
- .word 0x60005010
- .word 0x60006000
- .word 0x6000f000
- .word 0x70040000
+lp_literals:
+ .word EMC_PA_BASE
+ .word PMC_PA_BASE
+ .word FLOW_PA_BASE
+ .word TIMERUS_PA_BASE
+ .word CLK_RST_PA_BASE
+ .word EVP_PA_BASE
+ .word CSITE_PA_BASE
.word 0xC5ACCE55
.word CSITE_CPUDBG0_LAR_0
.word CSITE_CPUDBG1_LAR_0
@@ -794,14 +784,19 @@ exit_lp1_end:
ENDPROC(exit_lp1)
ENTRY(enter_lp0)
+ //R4 = EMC_PA_BASE
add r4, pc, #lp0_literals-(.+8) //EMC base
ldr r4, [r4]
+ //R5 = PMC_PA_BASE
add r5, pc, #lp0_literals-(.+4) //PMC base
ldr r5, [r5]
+ //R6 = FLOW_PA_BASE
add r6, pc, #lp0_literals-(.+0) //FLOW base
ldr r6, [r6]
+ //R7 = TIMERUS_PA_BASE
add r7, pc, #lp0_literals-(.-4) //TIMERUS base
ldr r7, [r7]
+ //R8 = RTC_PA_BASE
add r8, pc, #lp0_literals-(.-8) //RTC base
ldr r8, [r8]
@@ -822,9 +817,9 @@ is_idle:
mov r2, #1
str r2, [r4, #0xE0]
ldr r2, [r4, #0x10]
- ands r2, r2, #3, 8
- moveq r0, #1, 24
- movne r0, #3, 24
+ ands r2, r2, #0x3000000
+ moveq r0, #0x100
+ movne r0, #0x300
//Poll until all devices are in self refresh
is_self:
@@ -847,28 +842,28 @@ is_self:
#if DEBUG_FORCE_RTC_WAKEUP_SEC
//r0 = RTC_BASE
- mov r0, r8
+ mov r0, r8
//setup rtc wake
- ldr r2, [r0, #0x10] //milli
- ldr r2, [r0, #0x8] //shadow
+ ldr r2, [r0, #0x10] //milli
+ ldr r2, [r0, #0x8] //shadow
- add r2, r2, #DEBUG_FORCE_RTC_WAKEUP_SEC
+ add r2, r2, #DEBUG_FORCE_RTC_WAKEUP_SEC
rtc_idle1:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle1
- str r2, [r0, #0x14]
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle1
+ str r2, [r0, #0x14]
rtc_idle2:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle2
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle2
//intr mask alarm0
- mov r2, #1
- str r2, [r0, #0x28]
+ mov r2, #1
+ str r2, [r0, #0x28]
rtc_idle3:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle3
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle3
#endif
//Save the microsecond count before LP0 in SCRATCH38
ldr r2, [r7]
@@ -882,43 +877,43 @@ do_wfi:
wfe
b do_wfi
lp0_literals:
- .word 0x7000f400
- .word 0x7000e400
- .word 0x60007000
- .word 0x60005010
- .word 0x7000e000
+ .word EMC_PA_BASE
+ .word PMC_PA_BASE
+ .word FLOW_PA_BASE
+ .word TIMERUS_PA_BASE
+ .word RTC_PA_BASE
enter_lp0_end:
ENDPROC(enter_lp0)
ENTRY(exit_power_state)
//Switch to SVC state
- cpsid if, #0x13
+ cpsid if, #0x13
add r5, pc, #lp0_literals-(.+4) //PMC base
ldr r5, [r5]
//Check which core we are by checking the MPIDR
- mrc p15, 0, r2, c0, c0, 5
+ mrc p15, 0, r2, c0, c0, 5
ands r2, r2, #0x3
- bne restore_slave
+ bne restore_slave
//Disable DPD sample
- mov r3, #0
- str r3, [r5, #APBDEV_PMC_DPD_SAMPLE_0]
+ mov r3, #0
+ str r3, [r5, #APBDEV_PMC_DPD_SAMPLE_0]
//Disable DPD enable
- str r3, [r5, #APBDEV_PMC_DPD_ENABLE_0]
+ str r3, [r5, #APBDEV_PMC_DPD_ENABLE_0]
restore_slave:
//Get the physical pointer to cpu context save area
- ldr r0, [r5, #APBDEV_PMC_SCRATCH37_0]
- mov r3, #0x800
+ ldr r0, [r5, #APBDEV_PMC_SCRATCH37_0]
+ mov r3, #0x800
//r0 = r0 + r2 * r3
smlabb r0, r2, r3, r0
//Perform ARM restore (r0 = context save ptr)
- b ArmCortexA9PhysicalRestore
+ b ArmCortexA9PhysicalRestore
ArmCortexA9PhysicalRestored:
@@ -951,46 +946,40 @@ pll_wait:
#if DEBUG_FORCE_RTC_WAKEUP_SEC
//Clear the pending rtc interrupt
- ldr r0, =g_pRtc
- ldr r0, [r0]
+ ldr r0, =g_pRtc
+ ldr r0, [r0]
rtc_idle4:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle4
- mov r2, #0
- str r2, [r0, #0x28]
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle4
+ mov r2, #0
+ str r2, [r0, #0x28]
rtc_idle5:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle5
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle5
//clear interrupt
- mov r2, #1
- str r2, [r0, #0x2c]
+ mov r2, #1
+ str r2, [r0, #0x2c]
rtc_idle6:
- ldr r1, [r0, #0x4]
- tst r1, #0x1
- bne rtc_idle6
+ ldr r1, [r0, #0x4]
+ tst r1, #0x1
+ bne rtc_idle6
#endif
skip_pll:
//Restore the cpu virtual context
- b ArmCortexA9VirtualRestore
+ b ArmCortexA9VirtualRestore
ArmCortexA9VirtualRestored:
- //Everything should be restored now. Increment active CPUs
- ldr r1, =g_NumActiveCPUs
- ldr r2, [r1]
- adds r2, r2, #1
- str r2, [r1]
- dmb
-
+ //Everything should be restored now
//Check which core we are by checking the MPIDR
- mrc p15, 0, r2, c0, c0, 5
+ mrc p15, 0, r2, c0, c0, 5
ands r2, r2, #0x3
- bne skip_local_timer_restore
+ bne skip_local_timer_restore
//Restore the local timers
- bl restore_local_timers
+ bl restore_local_timers
skip_local_timer_restore:
//Restore the stack registers
@@ -998,648 +987,648 @@ skip_local_timer_restore:
ldmfd sp!, {r0-r12, lr}
//Restore the CPSR stored in r2
- msr CPSR_fsxc, r2
- mov r0, #1
- bx lr
+ msr CPSR_fsxc, r2
+ mov r0, #1
+ bx lr
ENDPROC(exit_power_state)
ENTRY(ArmCortexA9Save)
- add r0, r0, #0x44
- str sp, [r0], #4
+ add r0, r0, #0x44
+ str sp, [r0], #4
stmia r0!, {r1 - r12, lr}
- sub r11, r0, #0x7C
- mrs r4, cpsr
- mrs r5, spsr
+ sub r11, r0, #0x7C
+ mrs r4, cpsr
+ mrs r5, spsr
stmia r0!, {r4, r5}
movs r4, #0x44
movs r5, #0
movs r6, #0
loop:
- str r5, [r11, r6]
- add r6, r6, #4
- cmp r6, r4
- bne loop
- str r11, [r11, #0x34]
- str r1, [r11, #0x38]
- mov r4, r1
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOffDMsave
- mov r6, #0x1000
- sub r6, r6, #1
- bic r5, r4, r6
+ str r5, [r11, r6]
+ add r6, r6, #4
+ cmp r6, r4
+ bne loop
+ str r11, [r11, #0x34]
+ str r1, [r11, #0x38]
+ mov r4, r1
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOffDMsave
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r4, r6
//VA to PA translation register in r5
- mcr p15, 0, r5, c7, c8, 1
+ mcr p15, 0, r5, c7, c8, 1
isb
//r5 = PA register
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
//Translation failed!
- bne .
- bic r5, r5, r6
- and r4, r4, r6
- orr r4, r4, r5
+ bne .
+ bic r5, r5, r6
+ and r4, r4, r6
+ orr r4, r4, r5
MmuOffDMsave:
- str r4, [r11, #0x3C]
- str r0, [r11, #0x18]
- mrc p15, 0, r8, c9, c12, 0
- bic r1, r8, #1
- mcr p15, 0, r1, c9, c12, 0
+ str r4, [r11, #0x3C]
+ str r0, [r11, #0x18]
+ mrc p15, 0, r8, c9, c12, 0
+ bic r1, r8, #1
+ mcr p15, 0, r1, c9, c12, 0
isb
- mrc p15, 0, r9, c9, c12, 3
- mrc p15, 0, r10, c9, c12, 5
+ mrc p15, 0, r9, c9, c12, 3
+ mrc p15, 0, r10, c9, c12, 5
stmia r0!, {r8 - r10}
ubfx r9, r8, #11, #5
- tst r9, r9
- beq PMonsavecontinue
+ tst r9, r9
+ beq PMonsavecontinue
PMonsaveloop:
subs r9, r9, #1
- mcr p15, 0, r9, c9, c12, 5
+ mcr p15, 0, r9, c9, c12, 5
isb
- mrc p15, 0, r3, c9, c13, 1
- mrc p15, 0, r4, c9, c13, 2
+ mrc p15, 0, r3, c9, c13, 1
+ mrc p15, 0, r4, c9, c13, 2
stmia r0!, {r3, r4}
- bne PMonsaveloop
+ bne PMonsaveloop
PMonsavecontinue:
- mrc p15, 0, r1, c9, c13, 0
- mrc p15, 0, r2, c9, c14, 0
- mrc p15, 0, r3, c9, c14, 1
- mrc p15, 0, r4, c9, c12, 1
+ mrc p15, 0, r1, c9, c13, 0
+ mrc p15, 0, r2, c9, c14, 0
+ mrc p15, 0, r3, c9, c14, 1
+ mrc p15, 0, r4, c9, c12, 1
stmia r0!, {r1 - r4}
- str r0, [r11, #0x10]
- cps 0x1f //sys
- str sp, [r0], #4
- str lr, [r0], #4
- cps 0x17 //abt
- str sp, [r0], #4
- mrs r4, spsr
+ str r0, [r11, #0x10]
+ cps 0x1f //sys
+ str sp, [r0], #4
+ str lr, [r0], #4
+ cps 0x17 //abt
+ str sp, [r0], #4
+ mrs r4, spsr
stmia r0!, {r4, lr}
- cps 0x1b //und
- str sp, [r0], #4
- mrs r4, spsr
+ cps 0x1b //und
+ str sp, [r0], #4
+ mrs r4, spsr
stmia r0!, {r4, lr}
- cps 0x12 //irq
- str sp, [r0], #4
- mrs r4, spsr
+ cps 0x12 //irq
+ str sp, [r0], #4
+ mrs r4, spsr
stmia r0!, {r4, lr}
- cps 0x11 //fiq
- str sp, [r0], #4
- mrs r4, spsr
+ cps 0x11 //fiq
+ str sp, [r0], #4
+ mrs r4, spsr
stmia r0!, {r4, r8 - r12, lr}
- cps 0x13 //svc
- mrc p15, 2, r3, c0, c0, 0
- str r3, [r0], #4
- mrc p15, 0, r4, c1, c0, 1
- mrc p15, 0, r5, c1, c0, 0
- mrc p15, 0, r6, c1, c0, 2
- str r4, [r11, #4]
- str r5, [r11, #8]
- str r6, [r11, #0xC] //VFPSave
- str r0, [r11, #0x1C]
- mrc p15, 0, r9, c1, c0, 2
- orr r2, r9, #0xf00000
- mcr p15, 0, r2, c1, c0, 2
+ cps 0x13 //svc
+ mrc p15, 2, r3, c0, c0, 0
+ str r3, [r0], #4
+ mrc p15, 0, r4, c1, c0, 1
+ mrc p15, 0, r5, c1, c0, 0
+ mrc p15, 0, r6, c1, c0, 2
+ str r4, [r11, #4]
+ str r5, [r11, #8]
+ str r6, [r11, #0xC] //VFPSave
+ str r0, [r11, #0x1C]
+ mrc p15, 0, r9, c1, c0, 2
+ orr r2, r9, #0xf00000
+ mcr p15, 0, r2, c1, c0, 2
isb
- mrc p15, 0, r2, c1, c0, 2
- and r2, r2, #0xf00000
- cmp r2, #0xf00000
- beq do_fpu_saveVFPsave
+ mrc p15, 0, r2, c1, c0, 2
+ and r2, r2, #0xf00000
+ cmp r2, #0xf00000
+ beq do_fpu_saveVFPsave
movs r2, #0
- str r2, [r11, #0x1C]
- b exit_fpu_saveVFPsave
+ str r2, [r11, #0x1C]
+ b exit_fpu_saveVFPsave
do_fpu_saveVFPsave:
VFPFMRX r10,FPEXC
- str r10, [r0], #4
- ldr r2, =0x40000000
+ str r10, [r0], #4
+ ldr r2, =0x40000000
VFPFMXR FPEXC,r2
VFPFMRX r2,FPSCR
- str r2, [r0], #4
+ str r2, [r0], #4
VFPFSTMIA r0, r1 @ fstmiad r0!, {d0-d15}. clobbers r1
VFPFMRX r2, MVFR0
- and r2, r2, #0xF
- cmp r2, #2
- blt exit_fpu_saveVFPsave
+ and r2, r2, #0xF
+ cmp r2, #2
+ blt exit_fpu_saveVFPsave
//fstmiad r0!, {d16-d31} //??
VFPFMXR FPEXC,r10
exit_fpu_saveVFPsave:
- mcr p15, 0, r9, c1, c0, 2
- str r0, [r11, #0x28]
- ldr r1, [r11, #0x38]
- mov r6, #1, 24
- add r1, r1, r6
- ldr r2, [r1, #4]
- ldr r3, [r1, #8]
- ldr r4, [r1]
+ mcr p15, 0, r9, c1, c0, 2
+ str r0, [r11, #0x28]
+ ldr r1, [r11, #0x38]
+ mov r6, #1, 24
+ add r1, r1, r6
+ ldr r2, [r1, #4]
+ ldr r3, [r1, #8]
+ ldr r4, [r1]
stmia r0!, {r2 - r4}
- mov r2, #0
- str r2, [r1, #4]
- mrc p15, 0, r7, c0, c0, 5
+ mov r2, #0
+ str r2, [r1, #4]
+ mrc p15, 0, r7, c0, c0, 5
ubfx r7, r7, #0, #2
- ldr r1, [r11, #0x38]
- mov r6, #0x1000
- add r1, r1, r6
- cmp r7, #0
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000
+ add r1, r1, r6
+ cmp r7, #0
movne r2, #1
- bne next1CA9GICsave
- ldr r2, [r1, #4]
+ bne next1CA9GICsave
+ ldr r2, [r1, #4]
ubfx r2, r2, #0, #5
- add r2, r2, #1
+ add r2, r2, #1
next1CA9GICsave:
- mov r3, r2
+ mov r3, r2
loop1CA9GICsave:
- ldr r5, [r1, #0x100]
- str r5, [r0], #4
- add r1, r1, #4
+ ldr r5, [r1, #0x100]
+ str r5, [r0], #4
+ add r1, r1, #4
subs r3, r3, #1
- bne loop1CA9GICsave
- ldr r1, [r11, #0x38]
- mov r6, #0x1000 //
- add r1, r1, r6
- mov r3, r2, lsl #3
+ bne loop1CA9GICsave
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000 //
+ add r1, r1, r6
+ mov r3, r2, lsl #3
loop2CA9GICsave:
- ldr r4, [r1, #0x400]
- ldr r5, [r1, #0x800]
+ ldr r4, [r1, #0x400]
+ ldr r5, [r1, #0x800]
stmia r0!, {r4, r5}
- add r1, r1, #4
+ add r1, r1, #4
subs r3, r3, #1
- bne loop2CA9GICsave
- ldr r1, [r11, #0x38]
- mov r6, #0x1000 //
- add r1, r1, r6
- mov r3, r2, lsl #1
+ bne loop2CA9GICsave
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000 //
+ add r1, r1, r6
+ mov r3, r2, lsl #1
loop3CA9GICsave:
- ldr r4, [r1, #0xC00]
- str r4, [r0], #4
- add r1, r1, #4
+ ldr r4, [r1, #0xC00]
+ str r4, [r0], #4
+ add r1, r1, #4
subs r3, r3, #1
- bne loop3CA9GICsave
- cmp r7, #0
- bne continueCA9GICsave
- ldr r1, [r11, #0x38]
- mov r6, #0x1000 //
- add r1, r1, r6
- ldr r2, [r1] //distributor control
- str r2, [r0], #4
+ bne loop3CA9GICsave
+ cmp r7, #0
+ bne continueCA9GICsave
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000
+ add r1, r1, r6
+ ldr r2, [r1] //distributor control
+ str r2, [r0], #4
continueCA9GICsave:
- mov r4, r0
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOffDMmmu
- mov r6, #0x1000 //
- sub r6, r6, #1
- bic r5, r4, r6
- mcr p15, 0, r5, c7, c8, 1
+ mov r4, r0
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOffDMmmu
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r4, r6
+ mcr p15, 0, r5, c7, c8, 1
isb
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
- bne .
- bic r5, r5, r6
- and r4, r4, r6
- orr r4, r4, r5
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
+ bne .
+ bic r5, r5, r6
+ and r4, r4, r6
+ orr r4, r4, r5
MmuOffDMmmu:
- str r4, [r11, #0x20]
- mrc p15, 0, r5, c2, c0, 0
- mrc p15, 0, r6, c2, c0, 1
- mrc p15, 0, r7, c2, c0, 2
+ str r4, [r11, #0x20]
+ mrc p15, 0, r5, c2, c0, 0
+ mrc p15, 0, r6, c2, c0, 1
+ mrc p15, 0, r7, c2, c0, 2
stmia r0!, {r5 - r7}
- mrc p15, 0, r4, c3, c0, 0
- mrc p15, 0, r5, c7, c4, 0
- mrc p15, 0, r6, c10, c2, 0
- mrc p15, 0, r7, c10, c2, 1
+ mrc p15, 0, r4, c3, c0, 0
+ mrc p15, 0, r5, c7, c4, 0
+ mrc p15, 0, r6, c10, c2, 0
+ mrc p15, 0, r7, c10, c2, 1
stmia r0!, {r4 - r7}
- mrc p15, 0, r4, c12, c0, 0
- str r4, [r0], #4
- mrc p15, 0, r4, c13, c0, 1
- mrc p15, 0, r5, c13, c0, 2
- mrc p15, 0, r6, c13, c0, 3
- mrc p15, 0, r7, c13, c0, 4
+ mrc p15, 0, r4, c12, c0, 0
+ str r4, [r0], #4
+ mrc p15, 0, r4, c13, c0, 1
+ mrc p15, 0, r5, c13, c0, 2
+ mrc p15, 0, r6, c13, c0, 3
+ mrc p15, 0, r7, c13, c0, 4
stmia r0!, {r4 - r7}
//Disable the D-Cache
- mrc p15, 0, r4, c1, c0, 0
- bic r4, r4, #4
- mcr p15, 0, r4, c1, c0, 0
+ mrc p15, 0, r4, c1, c0, 0
+ bic r4, r4, #4
+ mcr p15, 0, r4, c1, c0, 0
isb
- mov r1, #0
- mcr p15, 2, r1, c0, c0, 0
+ mov r1, #0
+ mcr p15, 2, r1, c0, c0, 0
isb
- mrc p15, 1, r7, c0, c0, 0
+ mrc p15, 1, r7, c0, c0, 0
ubfx r3, r7, #0, #3
- add r3, r3, #4
+ add r3, r3, #4
ubfx r4, r7, #13, #15
ubfx r5, r7, #3, #10
- cmp r5, #3
+ cmp r5, #3
cmpeq r3, #5
- bne PWRDNcleangeneric
- mov r1, r4, lsl #5
- add r2, r1, #0x40000000
- add r3, r1, #0x80000000
- add r4, r1, #0xc0000000
+ bne PWRDNcleangeneric
+ mov r1, r4, lsl #5
+ add r2, r1, #0x40000000
+ add r3, r1, #0x80000000
+ add r4, r1, #0xc0000000
PWRDNcleanoptloop:
- mcr p15, 0, r1, c7, c14, 2
- mcr p15, 0, r2, c7, c14, 2
- mcr p15, 0, r3, c7, c14, 2
- mcr p15, 0, r4, c7, c14, 2
+ mcr p15, 0, r1, c7, c14, 2
+ mcr p15, 0, r2, c7, c14, 2
+ mcr p15, 0, r3, c7, c14, 2
+ mcr p15, 0, r4, c7, c14, 2
subs r1, r1, #0x20
- sub r2, r2, #0x20
- sub r3, r3, #0x20
- sub r4, r4, #0x20
- bpl PWRDNcleanoptloop
- b PWRDNcleanend
+ sub r2, r2, #0x20
+ sub r3, r3, #0x20
+ sub r4, r4, #0x20
+ bpl PWRDNcleanoptloop
+ b PWRDNcleanend
PWRDNcleangeneric:
- clz r6, r5
+ clz r6, r5
PWRDNcleanloopl:
- mov r2, r5
+ mov r2, r5
PWRDNcleanloop2:
- mov r7, r2, lsl r6
- mov r1, r4, lsl r3
- orr r7, r7, r1
- mcr p15, 0, r7, c7, c14, 2
+ mov r7, r2, lsl r6
+ mov r1, r4, lsl r3
+ orr r7, r7, r1
+ mcr p15, 0, r7, c7, c14, 2
subs r2, r2, #1
- bge PWRDNcleanloop2
+ bge PWRDNcleanloop2
subs r4, r4, #1
- bge PWRDNcleanloopl
+ bge PWRDNcleanloopl
PWRDNcleanend:
dsb
- mrc p15, 0, r4, c1, c0, 1
- bic r4, r4, #0x40
- mcr p15, 0, r4, c1, c0, 1
+ mrc p15, 0, r4, c1, c0, 1
+ bic r4, r4, #0x40
+ mcr p15, 0, r4, c1, c0, 1
isb
- mrc p15, 0, r7, c0, c0, 5
+ mrc p15, 0, r7, c0, c0, 5
ubfx r7, r7, #0, #2
- cmp r7, #0
- bne NotCPU0save
- mov r4, r0
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOffDMscu
- mov r6, #0x1000
- sub r6, r6, #1
- bic r5, r4, r6
- mcr p15, 0, r5, c7, c8, 1
+ cmp r7, #0
+ bne NotCPU0save
+ mov r4, r0
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOffDMscu
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r4, r6
+ mcr p15, 0, r5, c7, c8, 1
isb
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
- bne .
- bic r5, r5, r6
- and r4, r4, r6
- orr r4, r4, r5
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
+ bne .
+ bic r5, r5, r6
+ and r4, r4, r6
+ orr r4, r4, r5
MmuOffDMscu:
- str r4, [r11, #0x24]
- ldr r1, [r11, #0x38]
- ldr r2, [r1, #0x40]
- ldr r3, [r1, #0x44]
- ldr r4, [r1, #0x50]
- ldr r5, [r1, #0x54]
- ldr r6, [r1]
+ str r4, [r11, #0x24]
+ ldr r1, [r11, #0x38]
+ ldr r2, [r1, #0x40]
+ ldr r3, [r1, #0x44]
+ ldr r4, [r1, #0x50]
+ ldr r5, [r1, #0x54]
+ ldr r6, [r1]
stmia r0!, {r2 - r6}
NotCPU0save:
- mov r4, r0
+ mov r4, r0
DMpl310:
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOff
- mov r6, #0x1000
- sub r6, r6, #1
- bic r5, r4, r6
- mcr p15, 0, r5, c7, c8, 1
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOff
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r4, r6
+ mcr p15, 0, r5, c7, c8, 1
isb
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
- bne .
- bic r5, r5, r6
- and r4, r4, r6
- orr r4, r4, r5
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
+ bne .
+ bic r5, r5, r6
+ and r4, r4, r6
+ orr r4, r4, r5
MmuOff:
- str r4, [r11, #0x30]
- ldr r1, [r11, #0x38]
- mov r2, #0x3200 //
- add r1, r1, r2
- ldr r2, [r1]
- ldr r3, [r1, #4]
- ldr r4, [r1, #8]
- ldr r5, [r1, #0xC]
- ldr r6, [r1, #0x10]
- ldr r7, [r1, #0x14]
+ str r4, [r11, #0x30]
+ ldr r1, [r11, #0x38]
+ mov r2, #0x3200 //
+ add r1, r1, r2
+ ldr r2, [r1]
+ ldr r3, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r5, [r1, #0xC]
+ ldr r6, [r1, #0x10]
+ ldr r7, [r1, #0x14]
stmia r0!, {r2 - r7}
- add r1, r1, #0x700
- ldr r2, [r1], #4
- ldr r3, [r1], #4
- ldr r4, [r1], #4
- ldr r5, [r1], #4
+ add r1, r1, #0x700
+ ldr r2, [r1], #4
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
stmia r0!, {r2 - r5}
- ldr r2, [r1], #4
- ldr r3, [r1], #4
- ldr r4, [r1], #4
- ldr r5, [r1], #4
+ ldr r2, [r1], #4
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
stmia r0!, {r2 - r5}
- ldr r2, [r1], #4
- ldr r3, [r1], #4
- ldr r4, [r1], #4
- ldr r5, [r1], #4
+ ldr r2, [r1], #4
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
stmia r0!, {r2 - r5}
- ldr r2, [r1], #4
- ldr r3, [r1], #4
- ldr r4, [r1], #4
- ldr r5, [r1], #4
+ ldr r2, [r1], #4
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
stmia r0!, {r2 - r5}
- add r1, r1, #0x10
- ldr r2, [r1]
- ldr r3, [r1, #4]
+ add r1, r1, #0x10
+ ldr r2, [r1]
+ ldr r3, [r1, #4]
stmia r0!, {r2, r3}
- add r1, r1, #0x2b0
- ldr r2, [r1]
- ldr r3, [r1, #4]
+ add r1, r1, #0x2b0
+ ldr r2, [r1]
+ ldr r3, [r1, #4]
stmia r0!, {r2, r3}
- ldr r2, [r1, #0x340]
- str r2, [r0], #4
- sub r1, r1, #0xB00
- ldr r2, [r1]
- ldr r3, [r1, #4]
- ldr r4, [r1, #8]
- ldr r5, [r1, #0xC]
+ ldr r2, [r1, #0x340]
+ str r2, [r0], #4
+ sub r1, r1, #0xB00
+ ldr r2, [r1]
+ ldr r3, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r5, [r1, #0xC]
stmia r0!, {r2 - r5}
CheckWayOperationsSYSCACHEsave:
- ldr r2, [r1, #0x6FC]
- cmp r2, #0
- bne CheckWayOperationsSYSCACHEsave
+ ldr r2, [r1, #0x6FC]
+ cmp r2, #0
+ bne CheckWayOperationsSYSCACHEsave
CheckUnlockOperationSYSCACHEsave:
- ldr r2, [r1, #0x854]
- cmp r2, #0
- bne CheckUnlockOperationSYSCACHEsave
- ldr r4, [r11, #0x20]
- mov r2, #0x1F
- bic r4, r4, r2
- ldr r1, [r11, #0x38]
- mov r2, #0x3700
- add r1, r1, r2
- mov r3, r0
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOffDMsyscache1
- mov r6, #0x1000
- sub r6, r6, #1
- bic r5, r3, r6
- mcr p15, 0, r5, c7, c8, 1
+ ldr r2, [r1, #0x854]
+ cmp r2, #0
+ bne CheckUnlockOperationSYSCACHEsave
+ ldr r4, [r11, #0x20]
+ mov r2, #0x1F
+ bic r4, r4, r2
+ ldr r1, [r11, #0x38]
+ mov r2, #0x3700
+ add r1, r1, r2
+ mov r3, r0
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOffDMsyscache1
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r3, r6
+ mcr p15, 0, r5, c7, c8, 1
isb
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
- bne .
- bic r5, r5, r6
- and r3, r3, r6
- orr r3, r3, r5
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
+ bne .
+ bic r5, r5, r6
+ and r3, r3, r6
+ orr r3, r3, r5
MmuOffDMsyscache1:
- str r4, [r1, #0xB0]
- add r4, r4, #0x20
- cmp r4, r3
- blt MmuOffDMsyscache1
- mov r4, r11
- mrc p15, 0, r5, c1, c0, 0
- tst r5, #1
- beq MmuOffDMsyscache2
- mov r6, #0x1000
- sub r6, r6, #1
- bic r5, r4, r6
- mcr p15, 0, r5, c7, c8, 1
+ str r4, [r1, #0xB0]
+ add r4, r4, #0x20
+ cmp r4, r3
+ blt MmuOffDMsyscache1
+ mov r4, r11
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #1
+ beq MmuOffDMsyscache2
+ mov r6, #0x1000
+ sub r6, r6, #1
+ bic r5, r4, r6
+ mcr p15, 0, r5, c7, c8, 1
isb
- mrc p15, 0, r5, c7, c4, 0
- tst r5, #1
- bne .
- bic r5, r5, r6
- and r4, r4, r6
- orr r4, r4, r5
+ mrc p15, 0, r5, c7, c4, 0
+ tst r5, #1
+ bne .
+ bic r5, r5, r6
+ and r4, r4, r6
+ orr r4, r4, r5
MmuOffDMsyscache2:
- mov r2, #0x44
- add r2, r2, r4
+ mov r2, #0x44
+ add r2, r2, r4
SYSCACHEclean2:
- str r4, [r1, #0xB0]
- add r4, r4, #0x20
- cmp r4, r2
- blt SYSCACHEclean2
+ str r4, [r1, #0xB0]
+ add r4, r4, #0x20
+ cmp r4, r2
+ blt SYSCACHEclean2
NotCPU0savex:
- str r0, [r11, #0x14]
-_lc010_006156_:
- mrc p14, 0, r1, c0, c1, 0
- str r1, [r0], #4
- mrc p14, 0, r1, c0, c6, 0
- mrc p14, 0, r2, c0, c7, 0
- mrc p14, 0, r3, c7, c9, 6
+ str r0, [r11, #0x14]
+_lc010_006156_:
+ mrc p14, 0, r1, c0, c1, 0
+ str r1, [r0], #4
+ mrc p14, 0, r1, c0, c6, 0
+ mrc p14, 0, r2, c0, c7, 0
+ mrc p14, 0, r3, c7, c9, 6
stmia r0!, {r1 - r3}
- mrc p14, 0, r2, c0, c0, 4
- mrc p14, 0, r3, c0, c0, 5
+ mrc p14, 0, r2, c0, c0, 4
+ mrc p14, 0, r3, c0, c0, 5
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c1, 4
- mrc p14, 0, r3, c0, c1, 5
+ mrc p14, 0, r2, c0, c1, 4
+ mrc p14, 0, r3, c0, c1, 5
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c2, 4
- mrc p14, 0, r3, c0, c2, 5
+ mrc p14, 0, r2, c0, c2, 4
+ mrc p14, 0, r3, c0, c2, 5
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c3, 4
- mrc p14, 0, r3, c0, c3, 5
+ mrc p14, 0, r2, c0, c3, 4
+ mrc p14, 0, r3, c0, c3, 5
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c4, 4
- mrc p14, 0, r3, c0, c4, 5
+ mrc p14, 0, r2, c0, c4, 4
+ mrc p14, 0, r3, c0, c4, 5
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c5, 4
- mrc p14, 0, r3, c0, c5, 5
+ mrc p14, 0, r2, c0, c5, 4
+ mrc p14, 0, r3, c0, c5, 5
stmia r0!, {r2, r3}
_lc025_006469_:
- mrc p14, 0, r2, c0, c0, 6
- mrc p14, 0, r3, c0, c0, 7
+ mrc p14, 0, r2, c0, c0, 6
+ mrc p14, 0, r3, c0, c0, 7
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c1, 6
- mrc p14, 0, r3, c0, c1, 7
+ mrc p14, 0, r2, c0, c1, 6
+ mrc p14, 0, r3, c0, c1, 7
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c2, 6
- mrc p14, 0, r3, c0, c2, 7
+ mrc p14, 0, r2, c0, c2, 6
+ mrc p14, 0, r3, c0, c2, 7
stmia r0!, {r2, r3}
- mrc p14, 0, r2, c0, c3, 6
- mrc p14, 0, r3, c0, c3, 7
+ mrc p14, 0, r2, c0, c3, 6
+ mrc p14, 0, r3, c0, c3, 7
stmia r0!, {r2, r3}
_lc099_006768_:
- mov r0, r11
- b ArmCortexA9Saved
+ mov r0, r11
+ b ArmCortexA9Saved
ENDPROC(ArmCortexA9Save)
ENTRY(ArmCortexA9PhysicalRestore)
cpsid aif, #0x13 //
- mov r11, r0
- mcr p15, 0, r0, c7, c5, 0
- mcr p15, 0, r0, c7, c5, 6
- mov r1, #0x1800 //
- mcr p15, 0, r1, c1, c0, 0
+ mov r11, r0
+ mcr p15, 0, r0, c7, c5, 0
+ mcr p15, 0, r0, c7, c5, 6
+ mov r1, #0x1800 //
+ mcr p15, 0, r1, c1, c0, 0
isb
- mov r1, #0
- mcr p15, 2, r1, c0, c0, 0
+ mov r1, #0
+ mcr p15, 2, r1, c0, c0, 0
isb
- mrc p15, 1, r7, c0, c0, 0
+ mrc p15, 1, r7, c0, c0, 0
ubfx r3, r7, #0, #3
- add r3, r3, #4
+ add r3, r3, #4
ubfx r4, r7, #13, #15
ubfx r5, r7, #3, #10
- cmp r5, #3
+ cmp r5, #3
cmpeq r3, #5
- bne PWRUPinvgeneric
- mov r1, r4, lsl #5
- add r2, r1, #0x40000000
- add r3, r1, #0x80000000
- add r4, r1, #0xc0000000
+ bne PWRUPinvgeneric
+ mov r1, r4, lsl #5
+ add r2, r1, #0x40000000
+ add r3, r1, #0x80000000
+ add r4, r1, #0xc0000000
PWRUPinvoptloop:
- mcr p15, 0, r1, c7, c6, 2
- mcr p15, 0, r2, c7, c6, 2
- mcr p15, 0, r3, c7, c6, 2
- mcr p15, 0, r4, c7, c6, 2
+ mcr p15, 0, r1, c7, c6, 2
+ mcr p15, 0, r2, c7, c6, 2
+ mcr p15, 0, r3, c7, c6, 2
+ mcr p15, 0, r4, c7, c6, 2
subs r1, r1, #0x20
- sub r2, r2, #0x20
- sub r3, r3, #0x20
- sub r4, r4, #0x20
- bpl PWRUPinvoptloop
- b PWRUPinvend
+ sub r2, r2, #0x20
+ sub r3, r3, #0x20
+ sub r4, r4, #0x20
+ bpl PWRUPinvoptloop
+ b PWRUPinvend
PWRUPinvgeneric:
- clz r6, r5
+ clz r6, r5
PWRUPinvloopl:
- mov r2, r5
+ mov r2, r5
PWRUPinvloop2:
- mov r7, r2, lsl r6
- mov r1, r4, lsl r3
- orr r7, r7, r1
- mcr p15, 0, r7, c7, c6, 2
+ mov r7, r2, lsl r6
+ mov r1, r4, lsl r3
+ orr r7, r7, r1
+ mcr p15, 0, r7, c7, c6, 2
subs r2, r2, #1
- bge PWRUPinvloop2
+ bge PWRUPinvloop2
subs r4, r4, #1
- bge PWRUPinvloopl
+ bge PWRUPinvloopl
PWRUPinvend:
dsb
- mrc p15, 0, r7, c0, c0, 5
+ mrc p15, 0, r7, c0, c0, 5
ubfx r7, r7, #0, #2
- cmp r7, #0
- bne NotCPU0restore
+ cmp r7, #0
+ bne NotCPU0restore
CPU0restore:
- ldr r0, [r11, #0x24]
- cmp r0, #0
- beq CA9SCUrestorecontinue
- ldr r1, [r11, #0x3C]
+ ldr r0, [r11, #0x24]
+ cmp r0, #0
+ beq CA9SCUrestorecontinue
+ ldr r1, [r11, #0x3C]
ldmia r0!, {r2 - r6}
- str r2, [r1, #0x40]
- str r3, [r1, #0x44]
- str r4, [r1, #0x50]
- str r5, [r1, #0x54]
- mov r7, #0x10000
- sub r7, r7, #1
- str r7, [r1, #0xC]
- str r6, [r1]
+ str r2, [r1, #0x40]
+ str r3, [r1, #0x44]
+ str r4, [r1, #0x50]
+ str r5, [r1, #0x54]
+ mov r7, #0x10000
+ sub r7, r7, #1
+ str r7, [r1, #0xC]
+ str r6, [r1]
CA9SCUrestorecontinue:
- ldr r0, [r11, #0x30]
- cmp r0, #0
- beq SYSCACHErestorecontinue
- ldr r1, [r11, #0x3C]
- mov r2, #0x3100
- add r1, r1, r2
+ ldr r0, [r11, #0x30]
+ cmp r0, #0
+ beq SYSCACHErestorecontinue
+ ldr r1, [r11, #0x3C]
+ mov r2, #0x3100
+ add r1, r1, r2
movs r2, #0
SYSCACHErestorel2_cache_disable_loop:
- ldr r3, [r1]
- and r3, r3, #1
- cmp r3, #0
- beq SYSCACHErestorel2_cache_disable_skip
- str r2, [r1]
+ ldr r3, [r1]
+ and r3, r3, #1
+ cmp r3, #0
+ beq SYSCACHErestorel2_cache_disable_skip
+ str r2, [r1]
dsb
- b SYSCACHErestorel2_cache_disable_loop
+ b SYSCACHErestorel2_cache_disable_loop
SYSCACHErestorel2_cache_disable_skip:
- ldr r0, [r11, #0x30]
+ ldr r0, [r11, #0x30]
ldmia r0!, {r2 - r7}
- add r1, r1, #0x100
- str r2, [r1]
- str r3, [r1, #4]
- str r4, [r1, #8]
- str r5, [r1, #0xC]
- str r6, [r1, #0x10]
- str r7, [r1, #0x14]
- add r1, r1, #0x700
+ add r1, r1, #0x100
+ str r2, [r1]
+ str r3, [r1, #4]
+ str r4, [r1, #8]
+ str r5, [r1, #0xC]
+ str r6, [r1, #0x10]
+ str r7, [r1, #0x14]
+ add r1, r1, #0x700
ldmia r0!, {r2 - r5}
- str r2, [r1], #4
- str r3, [r1], #4
- str r4, [r1], #4
- str r5, [r1], #4
+ str r2, [r1], #4
+ str r3, [r1], #4
+ str r4, [r1], #4
+ str r5, [r1], #4
ldmia r0!, {r2 - r5}
- str r2, [r1], #4
- str r3, [r1], #4
- str r4, [r1], #4
- str r5, [r1], #4
+ str r2, [r1], #4
+ str r3, [r1], #4
+ str r4, [r1], #4
+ str r5, [r1], #4
ldmia r0!, {r2 - r5}
- str r2, [r1], #4
- str r3, [r1], #4
- str r4, [r1], #4
- str r5, [r1], #4
+ str r2, [r1], #4
+ str r3, [r1], #4
+ str r4, [r1], #4
+ str r5, [r1], #4
ldmia r0!, {r2 - r5}
- str r2, [r1], #4
- str r3, [r1], #4
- str r4, [r1], #4
- str r5, [r1], #4
- add r1, r1, #0x10
+ str r2, [r1], #4
+ str r3, [r1], #4
+ str r4, [r1], #4
+ str r5, [r1], #4
+ add r1, r1, #0x10
ldmia r0!, {r2, r3}
- str r2, [r1]
- str r3, [r1, #4]
- add r1, r1, #0x2B0
+ str r2, [r1]
+ str r3, [r1, #4]
+ add r1, r1, #0x2B0
ldmia r0!, {r2, r3}
- str r2, [r1]
- str r3, [r1, #4]
- ldr r2, [r0], #4
- str r2, [r1, #0x340]
- sub r1, r1, #0xB00
+ str r2, [r1]
+ str r3, [r1, #4]
+ ldr r2, [r0], #4
+ str r2, [r1, #0x340]
+ sub r1, r1, #0xB00
ldmia r0!, {r2 - r5}
- str r3, [r1, #4]
- str r4, [r1, #8]
- str r5, [r1, #0xC]
- str r2, [r1]
+ str r3, [r1, #4]
+ str r4, [r1, #8]
+ str r5, [r1, #0xC]
+ str r2, [r1]
dsb
SYSCACHErestorecontinue:
NotCPU0restore:
- ldr r0, [r11, #0x20]
+ ldr r0, [r11, #0x20]
ldmia r0!, {r5 - r7}
- mcr p15, 0, r5, c2, c0, 0
- mcr p15, 0, r6, c2, c0, 1
- mcr p15, 0, r7, c2, c0, 2
+ mcr p15, 0, r5, c2, c0, 0
+ mcr p15, 0, r6, c2, c0, 1
+ mcr p15, 0, r7, c2, c0, 2
ldmia r0!, {r4 - r7}
- mcr p15, 0, r4, c3, c0, 0
- mcr p15, 0, r5, c7, c4, 0
- mcr p15, 0, r6, c10, c2, 0
- mcr p15, 0, r7, c10, c2, 1
- ldr r4, [r0], #4
- mcr p15, 0, r4, c12, c0, 0
+ mcr p15, 0, r4, c3, c0, 0
+ mcr p15, 0, r5, c7, c4, 0
+ mcr p15, 0, r6, c10, c2, 0
+ mcr p15, 0, r7, c10, c2, 1
+ ldr r4, [r0], #4
+ mcr p15, 0, r4, c12, c0, 0
ldmia r0!, {r4 - r7}
- mcr p15, 0, r4, c13, c0, 1
- mcr p15, 0, r5, c13, c0, 2
- mcr p15, 0, r6, c13, c0, 3
- mcr p15, 0, r7, c13, c0, 4
- ldr r11, [r11, #0x34]
- ldr lr, =VirtualRestore
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0
+ mcr p15, 0, r4, c13, c0, 1
+ mcr p15, 0, r5, c13, c0, 2
+ mcr p15, 0, r6, c13, c0, 3
+ mcr p15, 0, r7, c13, c0, 4
+ ldr r11, [r11, #0x34]
+ ldr lr, =VirtualRestore
+ mov r1, #0
+ mcr p15, 0, r1, c8, c7, 0
isb
- mov r2, #0
- orr r2, #(1<<11) //Z bit on
- orr r2, #(1<<12) //I cache on
- orr r2, #(1<<28) //Enable Tex Remap
- orr r2, #(1<<0) //MMU on
+ mov r2, #0
+ orr r2, #(1<<11) //Z bit on
+ orr r2, #(1<<12) //I cache on
+ orr r2, #(1<<28) //Enable Tex Remap
+ orr r2, #(1<<0) //MMU on
andeq r0, r0, r0
andeq r0, r0, r0
andeq r0, r0, r0
andeq r0, r0, r0
.align 5
- mcr p15, 0, r2, c1, c0, 0
- bx lr
- mov r0, r0
- mov r0, r0
- mov r0, r0
- mov r0, r0
+ mcr p15, 0, r2, c1, c0, 0
+ bx lr
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
VirtualRestore:
- mcr p15, 0, r1, c8, c7, 0
+ mcr p15, 0, r1, c8, c7, 0
dsb
isb
- add r0, r11, #0x44
- ldr r0, [r0]
- ldr r0, [r0]
- b ArmCortexA9PhysicalRestored
+ add r0, r11, #0x44
+ ldr r0, [r0]
+ ldr r0, [r0]
+ b ArmCortexA9PhysicalRestored
andeq r1, r0, r1, lsl #16
andeq r0, r0, r0
.ltorg
@@ -1647,237 +1636,237 @@ VirtualRestore:
ENDPROC(ArmCortexA9PhysicalRestore)
ENTRY(ArmCortexA9VirtualRestore)
- mrc p15, 0, r7, c0, c0, 5
- tst r7, r7
- bpl CA9GICrestorecontinue
+ mrc p15, 0, r7, c0, c0, 5
+ tst r7, r7
+ bpl CA9GICrestorecontinue
ubfx r7, r7, #0, #2
- ldr r0, [r11, #0x28]
- cmp r0, #0
- beq CA9GICrestorecontinue
- ldr r1, [r11, #0x38]
- mov r6, #1, 24
- add r1, r1, r6
+ ldr r0, [r11, #0x28]
+ cmp r0, #0
+ beq CA9GICrestorecontinue
+ ldr r1, [r11, #0x38]
+ mov r6, #1, 24
+ add r1, r1, r6
ldmia r0!, {r2 - r4}
- str r2, [r1, #4]
- str r3, [r1, #8]
- str r4, [r1]
- ldr r1, [r11, #0x38]
- mov r6, #0x1000
- add r1, r1, r6
- cmp r7, #0
+ str r2, [r1, #4]
+ str r3, [r1, #8]
+ str r4, [r1]
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000
+ add r1, r1, r6
+ cmp r7, #0
movne r2, #1
- bne next1CA9GICrestore
- ldr r2, [r1, #4]
+ bne next1CA9GICrestore
+ ldr r2, [r1, #4]
ubfx r2, r2, #0, #5
- add r2, r2, #1
+ add r2, r2, #1
next1CA9GICrestore:
- mov r3, r2
+ mov r3, r2
loop1CA9GICrestore:
- ldr r5, [r0], #4
- str r5, [r1, #0x100]
- add r1, r1, #4
+ ldr r5, [r0], #4
+ str r5, [r1, #0x100]
+ add r1, r1, #4
subs r3, r3, #1
- bne loop1CA9GICrestore
- ldr r1, [r11, #0x38]
- mov r6, #0x1000
- add r1, r1, r6
- mov r3, r2, lsl #3
+ bne loop1CA9GICrestore
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000
+ add r1, r1, r6
+ mov r3, r2, lsl #3
loop2CA9GICrestore:
ldmia r0!, {r4, r5}
- str r4, [r1, #0x400]
- str r5, [r1, #0x800]
- add r1, r1, #4
+ str r4, [r1, #0x400]
+ str r5, [r1, #0x800]
+ add r1, r1, #4
subs r3, r3, #1
- bne loop2CA9GICrestore
- ldr r1, [r11, #0x38]
- mov r6, #0x1000 //
- add r1, r1, r6
- mov r3, r2, lsl #1
+ bne loop2CA9GICrestore
+ ldr r1, [r11, #0x38]
+ mov r6, #0x1000
+ add r1, r1, r6
+ mov r3, r2, lsl #1
loop3CA9GICrestore:
- ldr r4, [r0], #4
- str r4, [r1, #0xC00]
- add r1, r1, #4
+ ldr r4, [r0], #4
+ str r4, [r1, #0xC00]
+ add r1, r1, #4
subs r3, r3, #1
- bne loop3CA9GICrestore
- cmp r7, #0
- bne CA9GICrestorecontinue
- ldr r1, [r11, #0x38]
- mov r6, #1, 20
- add r1, r1, r6
- ldr r2, [r0], #4
- str r2, [r1]
+ bne loop3CA9GICrestore
+ cmp r7, #0
+ bne CA9GICrestorecontinue
+ ldr r1, [r11, #0x38]
+ mov r6, #1, 20
+ add r1, r1, r6
+ ldr r2, [r0], #4
+ str r2, [r1]
CA9GICrestorecontinue:
- ldr r0, [r11, #0x1C]
- cmp r0, #0
- beq exit_fpu_restoreVFPrestore
- mrc p15, 0, r2, c1, c0, 2
- orr r2, r2, #0x00F00000
- mcr p15, 0, r2, c1, c0, 2
- ldr r2, =0x40000000
+ ldr r0, [r11, #0x1C]
+ cmp r0, #0
+ beq exit_fpu_restoreVFPrestore
+ mrc p15, 0, r2, c1, c0, 2
+ orr r2, r2, #0x00F00000
+ mcr p15, 0, r2, c1, c0, 2
+ ldr r2, =0x40000000
VFPFMXR FPEXC,r2
ldmia r0!, {r9, r10}
VFPFLDMIA r0, r4 @ r4 clobbered (immediately loaded in exit_fpu_re)
VFPFMRX r2, MVFR0
- and r2, r2, #0xF
- cmp r2, #2
- blt complete_fpu_restoreVFPrestore
+ and r2, r2, #0xF
+ cmp r2, #2
+ blt complete_fpu_restoreVFPrestore
//fldmiad r0!, {d16-d31} //??
complete_fpu_restoreVFPrestore:
VFPFMXR FPSCR,r10
VFPFMXR FPEXC,r9
exit_fpu_restoreVFPrestore:
- ldr r0, [r11, #0x10]
- cps #0x1f //sys
- ldr sp, [r0], #4
- ldr lr, [r0], #4
- cps #0x17 //abt
- ldr sp, [r0], #4
+ ldr r0, [r11, #0x10]
+ cps #0x1f //sys
+ ldr sp, [r0], #4
+ ldr lr, [r0], #4
+ cps #0x17 //abt
+ ldr sp, [r0], #4
ldmia r0!, {r4, lr}
- msr spsr_cxsf, r4
- cps #0x1b //und
- ldr sp, [r0], #4
+ msr spsr_cxsf, r4
+ cps #0x1b //und
+ ldr sp, [r0], #4
ldmia r0!, {r4, lr}
- msr spsr_cxsf, r4
- cps #0x12 //irq
- ldr sp, [r0], #4
+ msr spsr_cxsf, r4
+ cps #0x12 //irq
+ ldr sp, [r0], #4
ldmia r0!, {r4, lr}
- msr spsr_cxsf, r4
- cps 0x11 //fiq
- ldr sp, [r0], #4
+ msr spsr_cxsf, r4
+ cps 0x11 //fiq
+ ldr sp, [r0], #4
ldmia r0!, {r4, r8 - r12, lr}
- msr spsr_cxsf, r4
- cps 0x13 //svc
- ldr r3, [r0], #4
- mcr p15, 2, r3, c0, c0, 0
- ldr r4, [r11, #4]
- ldr r5, [r11, #8]
- mcr p15, 0, r4, c1, c0, 1
+ msr spsr_cxsf, r4
+ cps 0x13 //svc
+ ldr r3, [r0], #4
+ mcr p15, 2, r3, c0, c0, 0
+ ldr r4, [r11, #4]
+ ldr r5, [r11, #8]
+ mcr p15, 0, r4, c1, c0, 1
isb
- mcr p15, 0, r5, c1, c0, 0
+ mcr p15, 0, r5, c1, c0, 0
isb
- ldr r6, [r11, #0xC]
- mcr p15, 0, r6, c1, c0, 2
- isb
- ldr r0, [r11, #0x14]
- cmp r0, #0
- beq SkipDbgRestore
+ ldr r6, [r11, #0xC]
+ mcr p15, 0, r6, c1, c0, 2
+ isb
+ ldr r0, [r11, #0x14]
+ cmp r0, #0
+ beq SkipDbgRestore
DBGrestore:
- ldr r5, [r0], #4
+ ldr r5, [r0], #4
ldmia r0!, {r1 - r3}
- mcr p14, 0, r1, c0, c6, 0
- mcr p14, 0, r2, c0, c7, 0
- mcr p14, 0, r3, c7, c8, 6
+ mcr p14, 0, r1, c0, c6, 0
+ mcr p14, 0, r2, c0, c7, 0
+ mcr p14, 0, r3, c7, c8, 6
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c0, 4
- mcr p14, 0, r3, c0, c0, 5
+ mcr p14, 0, r2, c0, c0, 4
+ mcr p14, 0, r3, c0, c0, 5
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c1, 4
- mcr p14, 0, r3, c0, c1, 5
+ mcr p14, 0, r2, c0, c1, 4
+ mcr p14, 0, r3, c0, c1, 5
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c2, 4
- mcr p14, 0, r3, c0, c2, 5
+ mcr p14, 0, r2, c0, c2, 4
+ mcr p14, 0, r3, c0, c2, 5
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c3, 4
- mcr p14, 0, r3, c0, c3, 5
+ mcr p14, 0, r2, c0, c3, 4
+ mcr p14, 0, r3, c0, c3, 5
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c4, 4
- mcr p14, 0, r3, c0, c4, 5
+ mcr p14, 0, r2, c0, c4, 4
+ mcr p14, 0, r3, c0, c4, 5
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c5, 4
- mcr p14, 0, r3, c0, c5, 5
+ mcr p14, 0, r2, c0, c5, 4
+ mcr p14, 0, r3, c0, c5, 5
_lc025_008056_:
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c0, 6
- mcr p14, 0, r3, c0, c0, 7
+ mcr p14, 0, r2, c0, c0, 6
+ mcr p14, 0, r3, c0, c0, 7
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c1, 6
- mcr p14, 0, r3, c0, c1, 7
+ mcr p14, 0, r2, c0, c1, 6
+ mcr p14, 0, r3, c0, c1, 7
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c2, 6
- mcr p14, 0, r3, c0, c2, 7
+ mcr p14, 0, r2, c0, c2, 6
+ mcr p14, 0, r3, c0, c2, 7
ldmia r0!, {r2, r3}
- mcr p14, 0, r2, c0, c3, 6
- mcr p14, 0, r3, c0, c3, 7
+ mcr p14, 0, r2, c0, c3, 6
+ mcr p14, 0, r3, c0, c3, 7
_lc040_008312_:
isb
- mcr p14, 0, r5, c0, c2, 2
+ mcr p14, 0, r5, c0, c2, 2
_lc099_008377_:
isb
SkipDbgRestore:
- ldr r0, [r11, #0x18]
+ ldr r0, [r11, #0x18]
ldmia r0!, {r8 - r10}
- mov r1, #0
- mvn r2, #0
- mcr p15, 0, r2, c9, c14, 2
- mcr p15, 0, r2, c9, c12, 3
+ mov r1, #0
+ mvn r2, #0
+ mcr p15, 0, r2, c9, c14, 2
+ mcr p15, 0, r2, c9, c12, 3
isb
ubfx r12, r8, #11, #5
- tst r12, r12
- beq PMonrestorecontinue
- mov r3, r12
- mov r4, #1
- mov r4, r4, lsl r3
- sub r4, r4, #1
+ tst r12, r12
+ beq PMonrestorecontinue
+ mov r3, r12
+ mov r4, #1
+ mov r4, r4, lsl r3
+ sub r4, r4, #1
PMonrestoreloop1:
subs r3, r3, #1
- mcr p15, 0, r3, c9, c12, 5
+ mcr p15, 0, r3, c9, c12, 5
isb
- mrc p15, 0, r5, c9, c13, 1
- bfc r5, #0, #8
- mcr p15, 0, r5, c9, c13, 1
- mcr p15, 0, r2, c9, c13, 2
+ mrc p15, 0, r5, c9, c13, 1
+ bfc r5, #0, #8
+ mcr p15, 0, r5, c9, c13, 1
+ mcr p15, 0, r2, c9, c13, 2
isb
- bne PMonrestoreloop1
- mov r3, #1
- bic r5, r9, #1<<31
- mcr p15, 0, r5, c9, c12, 1
- mcr p15, 0, r3, c9, c12, 0
+ bne PMonrestoreloop1
+ mov r3, #1
+ bic r5, r9, #1<<31
+ mcr p15, 0, r5, c9, c12, 1
+ mcr p15, 0, r3, c9, c12, 0
isb
- mcr p15, 0, r9, c9, c12, 4
+ mcr p15, 0, r9, c9, c12, 4
isb
- mcr p15, 0, r4, c9, c12, 2
+ mcr p15, 0, r4, c9, c12, 2
PMonrestoreloop2:
subs r12, r12, #1
- mcr p15, 0, r12, c9, c12, 5
+ mcr p15, 0, r12, c9, c12, 5
isb
ldmia r0!, {r3, r4}
- mcr p15, 0, r3, c9, c13, 1
- mcr p15, 0, r4, c9, c13, 2
+ mcr p15, 0, r3, c9, c13, 1
+ mcr p15, 0, r4, c9, c13, 2
isb
- bne PMonrestoreloop2
+ bne PMonrestoreloop2
PMonrestorecontinue:
- tst r9, #0x80000000
- beq PMonrestorecontinue2
- mcr p15, 0, r2, c9, c13, 0
+ tst r9, #0x80000000
+ beq PMonrestorecontinue2
+ mcr p15, 0, r2, c9, c13, 0
isb
- mov r3, #0x80000000
- mcr p15, 0, r3, c9, c12, 1
+ mov r3, #0x80000000
+ mcr p15, 0, r3, c9, c12, 1
isb
PMonrestoreloop3:
- mrc p15, 0, r4, c9, c12, 3
+ mrc p15, 0, r4, c9, c12, 3
movs r4, r4
- bpl PMonrestoreloop3
- mcr p15, 0, r3, c9, c12, 2
+ bpl PMonrestoreloop3
+ mcr p15, 0, r3, c9, c12, 2
PMonrestorecontinue2:
- mcr p15, 0, r1, c9, c12, 0
+ mcr p15, 0, r1, c9, c12, 0
isb
ldmia r0!, {r1 - r4}
- mcr p15, 0, r1, c9, c13, 0
- mcr p15, 0, r2, c9, c14, 0
- mcr p15, 0, r3, c9, c14, 1
- mcr p15, 0, r4, c9, c12, 1
- mcr p15, 0, r10, c9, c12, 5
+ mcr p15, 0, r1, c9, c13, 0
+ mcr p15, 0, r2, c9, c14, 0
+ mcr p15, 0, r3, c9, c14, 1
+ mcr p15, 0, r4, c9, c12, 1
+ mcr p15, 0, r10, c9, c12, 5
isb
- mcr p15, 0, r8, c9, c12, 0
+ mcr p15, 0, r8, c9, c12, 0
isb
- add r0, r11, #0x7C
+ add r0, r11, #0x7C
ldmia r0, {r1, r2}
- msr cpsr_cxsf, r1
- msr spsr_cxsf, r2
- sub r0, r0, #0x38
- ldr sp, [r0], #4
+ msr cpsr_cxsf, r1
+ msr spsr_cxsf, r2
+ sub r0, r0, #0x38
+ ldr sp, [r0], #4
ldmia r0, {r1 - r12, lr}
- sub r0, r0, #0x48
- b ArmCortexA9VirtualRestored
+ sub r0, r0, #0x48
+ b ArmCortexA9VirtualRestored
.ltorg
ENDPROC(ArmCortexA9VirtualRestore)
diff --git a/arch/arm/mach-tegra/power-t2.c b/arch/arm/mach-tegra/power-t2.c
index 758523d91ca2..a608ea1724b1 100644
--- a/arch/arm/mach-tegra/power-t2.c
+++ b/arch/arm/mach-tegra/power-t2.c
@@ -51,7 +51,7 @@ extern volatile void *g_pKBC;
uintptr_t g_resume = 0, g_contextSavePA = 0, g_contextSaveVA = 0;
uintptr_t g_iramContextSaveVA = 0;
NvU32 g_modifiedPlls;
-NvU32 g_wakeupCcbp = 0, g_NumActiveCPUs, g_Sync = 0, g_ArmPerif = 0;
+NvU32 g_wakeupCcbp = 0, g_ArmPerif = 0;
NvU32 g_enterLP2PA = 0;
NvU32 g_localTimerLoadRegister, g_localTimerCntrlRegister;
NvU32 g_coreSightClock, g_currentCcbp, g_currentCcdiv;
@@ -194,8 +194,7 @@ void cpu_ap20_do_lp1(void)
//We're back
enable_irq(INT_SYS_STATS_MON);
- g_NumActiveCPUs = num_online_cpus();
- // Assembly LP1 code explicitly turn on PLLX,PLLM and PLLP so no need to enable it
+ // Assembly LP1 code explicitly turn on PLLX,PLLM and PLLP so no need to enable it
if (g_modifiedPlls & PowerPllC) {
enable_pll(PowerPllC, NV_TRUE);
NvOsWaitUS(300);
@@ -252,8 +251,7 @@ void cpu_ap20_do_lp2(void)
{
//We're back
enable_irq(INT_SYS_STATS_MON);
-
- g_NumActiveCPUs = num_online_cpus();
+
//Delay if needed
if (g_modifiedPlls & PowerPllC)