1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
|
/*
* Copyright 2011-2013 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/suspend.h>
#include <linux/genalloc.h>
#include <asm/cacheflush.h>
#include <asm/fncpy.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include <asm/tlb.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
#include "common.h"
#include "hardware.h"
#define CCR 0x0
#define BM_CCR_WB_COUNT (0x7 << 16)
#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
#define BM_CCR_RBC_EN (0x1 << 27)
#define CLPCR 0x54
#define BP_CLPCR_LPM 0
#define BM_CLPCR_LPM (0x3 << 0)
#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
#define BM_CLPCR_SBYOS (0x1 << 6)
#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
#define BM_CLPCR_VSTBY (0x1 << 8)
#define BP_CLPCR_STBY_COUNT 9
#define BM_CLPCR_STBY_COUNT (0x3 << 9)
#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
#define CGPR 0x64
#define BM_CGPR_INT_MEM_CLK_LPM (0x1 << 17)
#define MX6_INT_IOMUXC 32
static struct gen_pool *iram_pool;
static void *suspend_iram_base;
static unsigned long iram_size, iram_paddr;
static int (*suspend_in_iram_fn)(void *iram_vbase,
unsigned long iram_pbase, unsigned int cpu_type);
static unsigned int cpu_type;
static void __iomem *ccm_base;
void imx6_set_cache_lpm_in_wait(bool enable)
{
if ((cpu_is_imx6q() && imx_get_soc_revision() >
IMX_CHIP_REVISION_1_1) ||
(cpu_is_imx6dl() && imx_get_soc_revision() >
IMX_CHIP_REVISION_1_0)) {
u32 val;
val = readl_relaxed(ccm_base + CGPR);
if (enable)
val |= BM_CGPR_INT_MEM_CLK_LPM;
else
val &= ~BM_CGPR_INT_MEM_CLK_LPM;
writel_relaxed(val, ccm_base + CGPR);
}
}
static void imx6_enable_rbc(bool enable)
{
u32 val;
/*
* need to mask all interrupts in GPC before
* operating RBC configurations
*/
imx_gpc_mask_all();
/* configure RBC enable bit */
val = readl_relaxed(ccm_base + CCR);
val &= ~BM_CCR_RBC_EN;
val |= enable ? BM_CCR_RBC_EN : 0;
writel_relaxed(val, ccm_base + CCR);
/* configure RBC count */
val = readl_relaxed(ccm_base + CCR);
val &= ~BM_CCR_RBC_BYPASS_COUNT;
val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
writel(val, ccm_base + CCR);
/*
* need to delay at least 2 cycles of CKIL(32K)
* due to hardware design requirement, which is
* ~61us, here we use 65us for safe
*/
udelay(65);
/* restore GPC interrupt mask settings */
imx_gpc_restore_all();
}
static void imx6_enable_wb(bool enable)
{
u32 val;
/* configure well bias enable bit */
val = readl_relaxed(ccm_base + CLPCR);
val &= ~BM_CLPCR_WB_PER_AT_LPM;
val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
writel_relaxed(val, ccm_base + CLPCR);
/* configure well bias count */
val = readl_relaxed(ccm_base + CCR);
val &= ~BM_CCR_WB_COUNT;
val |= enable ? BM_CCR_WB_COUNT : 0;
writel_relaxed(val, ccm_base + CCR);
}
int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
{
u32 val = readl_relaxed(ccm_base + CLPCR);
struct irq_desc *desc = irq_to_desc(MX6_INT_IOMUXC);
/*
* CCM state machine has restriction, before enabling
* LPM mode, need to make sure last LPM mode is waked up
* by dsm_wakeup_signal, which means the wakeup source
* must be seen by GPC, then CCM will clean its state machine
* and re-sample necessary signal to decide whether it can
* enter LPM mode. We force irq #32 to be always pending,
* unmask it before we enable LPM mode and mask it after LPM
* is enabled, this flow will make sure CCM state machine in
* reliable status before entering LPM mode. Otherwise, CCM
* may enter LPM mode by mistake which will cause system bus
* locked by CPU access not finished, as when CCM enter
* LPM mode, CPU will stop running.
*/
imx_gpc_irq_unmask(&desc->irq_data);
val &= ~BM_CLPCR_LPM;
switch (mode) {
case WAIT_CLOCKED:
break;
case WAIT_UNCLOCKED:
val |= 0x1 << BP_CLPCR_LPM;
val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
val &= ~BM_CLPCR_VSTBY;
val &= ~BM_CLPCR_SBYOS;
if (cpu_is_imx6sl())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
else
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
break;
case STOP_POWER_ON:
val |= 0x2 << BP_CLPCR_LPM;
val &= ~BM_CLPCR_VSTBY;
val &= ~BM_CLPCR_SBYOS;
if (cpu_is_imx6sl()) {
val |= BM_CLPCR_BYPASS_PMIC_READY;
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
} else {
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
}
break;
case WAIT_UNCLOCKED_POWER_OFF:
val |= 0x1 << BP_CLPCR_LPM;
val &= ~BM_CLPCR_VSTBY;
val &= ~BM_CLPCR_SBYOS;
break;
case STOP_POWER_OFF:
val |= 0x2 << BP_CLPCR_LPM;
val |= 0x3 << BP_CLPCR_STBY_COUNT;
val |= BM_CLPCR_VSTBY;
val |= BM_CLPCR_SBYOS;
if (cpu_is_imx6sl()) {
val |= BM_CLPCR_BYPASS_PMIC_READY;
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
} else {
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
}
break;
default:
imx_gpc_irq_mask(&desc->irq_data);
return -EINVAL;
}
writel_relaxed(val, ccm_base + CLPCR);
imx_gpc_irq_mask(&desc->irq_data);
return 0;
}
static int imx6_suspend_finish(unsigned long val)
{
/*
* call low level suspend function in iram,
* as we need to float DDR IO.
*/
local_flush_tlb_all();
suspend_in_iram_fn(suspend_iram_base, iram_paddr, cpu_type);
return 0;
}
static int imx6_pm_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
imx6_set_lpm(STOP_POWER_ON);
imx6_set_cache_lpm_in_wait(true);
imx_gpc_pre_suspend(false);
if (cpu_is_imx6sl())
imx6sl_set_wait_clk(true);
/* Zzz ... */
cpu_do_idle();
if (cpu_is_imx6sl())
imx6sl_set_wait_clk(false);
imx_gpc_post_resume();
imx6_set_lpm(WAIT_CLOCKED);
break;
case PM_SUSPEND_MEM:
imx6_enable_wb(true);
imx6_set_cache_lpm_in_wait(false);
imx6_set_lpm(STOP_POWER_OFF);
imx_gpc_pre_suspend(true);
imx_anatop_pre_suspend();
imx_set_cpu_jump(0, v7_cpu_resume);
/* Zzz ... */
cpu_suspend(0, imx6_suspend_finish);
if (!cpu_is_imx6sl())
imx_smp_prepare();
imx_anatop_post_resume();
imx_gpc_post_resume();
imx6_enable_rbc(false);
imx6_enable_wb(false);
imx6_set_cache_lpm_in_wait(true);
imx6_set_lpm(WAIT_CLOCKED);
break;
default:
return -EINVAL;
}
return 0;
}
static struct map_desc imx6_pm_io_desc[] __initdata = {
imx_map_entry(MX6Q, MMDC_P0, MT_DEVICE),
imx_map_entry(MX6Q, MMDC_P1, MT_DEVICE),
imx_map_entry(MX6Q, SRC, MT_DEVICE),
imx_map_entry(MX6Q, IOMUXC, MT_DEVICE),
imx_map_entry(MX6Q, CCM, MT_DEVICE),
imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
imx_map_entry(MX6Q, GPC, MT_DEVICE),
imx_map_entry(MX6Q, L2, MT_DEVICE),
};
void __init imx6_pm_map_io(void)
{
iotable_init(imx6_pm_io_desc, ARRAY_SIZE(imx6_pm_io_desc));
}
static int imx6_pm_valid(suspend_state_t state)
{
return (state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM);
}
static const struct platform_suspend_ops imx6_pm_ops = {
.enter = imx6_pm_enter,
.valid = imx6_pm_valid,
};
void imx6_pm_set_ccm_base(void __iomem *base)
{
if (!base)
pr_warn("ccm base is NULL!\n");
ccm_base = base;
}
void __init imx6_pm_init(void)
{
struct device_node *node;
unsigned long iram_base;
struct platform_device *pdev;
node = of_find_compatible_node(NULL, NULL, "mmio-sram");
if (!node) {
pr_err("failed to find ocram node!\n");
return;
}
pdev = of_find_device_by_node(node);
if (!pdev) {
pr_err("failed to find ocram device!\n");
return;
}
iram_pool = dev_get_gen_pool(&pdev->dev);
if (!iram_pool) {
pr_err("iram pool unavailable!\n");
return;
}
iram_size = MX6_SUSPEND_IRAM_SIZE;
iram_base = gen_pool_alloc(iram_pool, iram_size);
if (!iram_base) {
pr_err("unable to alloc iram!\n");
return;
}
iram_paddr = gen_pool_virt_to_phys(iram_pool, iram_base);
suspend_iram_base = __arm_ioremap(iram_paddr, iram_size,
MT_MEMORY_NONCACHED);
suspend_in_iram_fn = (void *)fncpy(suspend_iram_base,
&imx6_suspend, iram_size);
suspend_set_ops(&imx6_pm_ops);
/* Set cpu_type for DSM */
if (cpu_is_imx6q())
cpu_type = MXC_CPU_IMX6Q;
else if (cpu_is_imx6dl())
cpu_type = MXC_CPU_IMX6DL;
else
cpu_type = MXC_CPU_IMX6SL;
}
|