1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
|
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2013
* David Feng <fenghua@phytium.com.cn>
*/
#include <bootstage.h>
#include <command.h>
#include <time.h>
#include <asm/global_data.h>
#include <asm/system.h>
#include <linux/bitops.h>
DECLARE_GLOBAL_DATA_PTR;
/*
* Generic timer implementation of get_tbclk()
*/
unsigned long notrace get_tbclk(void)
{
unsigned long cntfrq;
asm volatile("mrs %0, cntfrq_el0" : "=r" (cntfrq));
return cntfrq;
}
#ifdef CONFIG_SYS_FSL_ERRATUM_A008585
/*
* FSL erratum A-008585 says that the ARM generic timer counter "has the
* potential to contain an erroneous value for a small number of core
* clock cycles every time the timer value changes".
* This sometimes leads to a consecutive counter read returning a lower
* value than the previous one, thus reporting the time to go backwards.
* The workaround is to read the counter twice and only return when the value
* was the same in both reads.
* Assumes that the CPU runs in much higher frequency than the timer.
*/
unsigned long timer_read_counter(void)
{
unsigned long cntpct;
unsigned long temp;
isb();
asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct));
asm volatile("mrs %0, cntpct_el0" : "=r" (temp));
while (temp != cntpct) {
asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct));
asm volatile("mrs %0, cntpct_el0" : "=r" (temp));
}
return cntpct;
}
#elif CONFIG_SUNXI_A64_TIMER_ERRATUM
/*
* This erratum sometimes flips the lower 11 bits of the counter value
* to all 0's or all 1's, leading to jumps forwards or backwards.
* Backwards jumps might be interpreted all roll-overs and be treated as
* huge jumps forward.
* The workaround is to check whether the lower 11 bits of the counter are
* all 0 or all 1, then discard this value and read again.
* This occasionally discards valid values, but will catch all erroneous
* reads and fixes the problem reliably. Also this mostly requires only a
* single read, so does not have any significant overhead.
* The algorithm was conceived by Samuel Holland.
*/
unsigned long timer_read_counter(void)
{
unsigned long cntpct;
isb();
do {
asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct));
} while (((cntpct + 1) & GENMASK(10, 0)) <= 1);
return cntpct;
}
#else
/*
* timer_read_counter() using the Arm Generic Timer (aka arch timer).
*/
unsigned long notrace timer_read_counter(void)
{
unsigned long cntpct;
isb();
asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct));
return cntpct;
}
#endif
uint64_t notrace get_ticks(void)
{
unsigned long ticks = timer_read_counter();
gd->arch.tbl = ticks;
return ticks;
}
unsigned long usec2ticks(unsigned long usec)
{
ulong ticks;
if (usec < 1000)
ticks = ((usec * (get_tbclk()/1000)) + 500) / 1000;
else
ticks = ((usec / 10) * (get_tbclk() / 100000));
return ticks;
}
ulong timer_get_boot_us(void)
{
u64 val = get_ticks() * 1000000;
return val / get_tbclk();
}
#if CONFIG_IS_ENABLED(ARMV8_UDELAY_EVENT_STREAM)
void __udelay(unsigned long usec)
{
u64 target = get_ticks() + usec_to_tick(usec);
/* At EL2 or above, use the event stream to avoid polling CNTPCT_EL0 so often */
if (current_el() >= 2) {
u32 cnthctl_val;
const u8 event_period = 0x7;
asm volatile("mrs %0, cnthctl_el2" : "=r" (cnthctl_val));
asm volatile("msr cnthctl_el2, %0" : : "r"
(cnthctl_val | CNTHCTL_EL2_EVNT_EN | CNTHCTL_EL2_EVNT_I(event_period)));
while (get_ticks() + (1ULL << event_period) <= target)
wfe();
/* Reset the event stream */
asm volatile("msr cnthctl_el2, %0" : : "r" (cnthctl_val));
}
/* Fall back to polling CNTPCT_EL0 */
while (get_ticks() <= target)
;
}
#endif
|