]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/blackfin/mach-common/dpmc_modes.S
hwmon: (ina2xx) fix sysfs shunt resistor read access
[mirror_ubuntu-bionic-kernel.git] / arch / blackfin / mach-common / dpmc_modes.S
1 /*
2 * Copyright 2004-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7 #include <linux/linkage.h>
8 #include <asm/blackfin.h>
9 #include <mach/irq.h>
10 #include <asm/dpmc.h>
11
12 .section .l1.text
13 ENTRY(_sleep_mode)
14 [--SP] = (R7:4, P5:3);
15 [--SP] = RETS;
16
17 call _set_sic_iwr;
18
19 P0.H = hi(PLL_CTL);
20 P0.L = lo(PLL_CTL);
21 R1 = W[P0](z);
22 BITSET (R1, 3);
23 W[P0] = R1.L;
24
25 CLI R2;
26 SSYNC;
27 IDLE;
28 STI R2;
29
30 call _test_pll_locked;
31
32 R0 = IWR_ENABLE(0);
33 R1 = IWR_DISABLE_ALL;
34 R2 = IWR_DISABLE_ALL;
35
36 call _set_sic_iwr;
37
38 P0.H = hi(PLL_CTL);
39 P0.L = lo(PLL_CTL);
40 R7 = w[p0](z);
41 BITCLR (R7, 3);
42 BITCLR (R7, 5);
43 w[p0] = R7.L;
44 IDLE;
45
46 bfin_init_pm_bench_cycles;
47
48 call _test_pll_locked;
49
50 RETS = [SP++];
51 (R7:4, P5:3) = [SP++];
52 RTS;
53 ENDPROC(_sleep_mode)
54
55 /*
56 * This func never returns as it puts the part into hibernate, and
57 * is only called from do_hibernate, so we don't bother saving or
58 * restoring any of the normal C runtime state. When we wake up,
59 * the entry point will be in do_hibernate and not here.
60 *
61 * We accept just one argument -- the value to write to VR_CTL.
62 */
63
64 ENTRY(_hibernate_mode)
65 /* Save/setup the regs we need early for minor pipeline optimization */
66 R4 = R0;
67
68 P3.H = hi(VR_CTL);
69 P3.L = lo(VR_CTL);
70 /* Disable all wakeup sources */
71 R0 = IWR_DISABLE_ALL;
72 R1 = IWR_DISABLE_ALL;
73 R2 = IWR_DISABLE_ALL;
74 call _set_sic_iwr;
75 call _set_dram_srfs;
76 SSYNC;
77
78 /* Finally, we climb into our cave to hibernate */
79 W[P3] = R4.L;
80
81 bfin_init_pm_bench_cycles;
82
83 CLI R2;
84 IDLE;
85 .Lforever:
86 jump .Lforever;
87 ENDPROC(_hibernate_mode)
88
89 ENTRY(_sleep_deeper)
90 [--SP] = (R7:4, P5:3);
91 [--SP] = RETS;
92
93 CLI R4;
94
95 P3 = R0;
96 P4 = R1;
97 P5 = R2;
98
99 R0 = IWR_ENABLE(0);
100 R1 = IWR_DISABLE_ALL;
101 R2 = IWR_DISABLE_ALL;
102
103 call _set_sic_iwr;
104 call _set_dram_srfs; /* Set SDRAM Self Refresh */
105
106 P0.H = hi(PLL_DIV);
107 P0.L = lo(PLL_DIV);
108 R6 = W[P0](z);
109 R0.L = 0xF;
110 W[P0] = R0.l; /* Set Max VCO to SCLK divider */
111
112 P0.H = hi(PLL_CTL);
113 P0.L = lo(PLL_CTL);
114 R5 = W[P0](z);
115 R0.L = (CONFIG_MIN_VCO_HZ/CONFIG_CLKIN_HZ) << 9;
116 W[P0] = R0.l; /* Set Min CLKIN to VCO multiplier */
117
118 SSYNC;
119 IDLE;
120
121 call _test_pll_locked;
122
123 P0.H = hi(VR_CTL);
124 P0.L = lo(VR_CTL);
125 R7 = W[P0](z);
126 R1 = 0x6;
127 R1 <<= 16;
128 R2 = 0x0404(Z);
129 R1 = R1|R2;
130
131 R2 = DEPOSIT(R7, R1);
132 W[P0] = R2; /* Set Min Core Voltage */
133
134 SSYNC;
135 IDLE;
136
137 call _test_pll_locked;
138
139 R0 = P3;
140 R1 = P4;
141 R3 = P5;
142 call _set_sic_iwr; /* Set Awake from IDLE */
143
144 P0.H = hi(PLL_CTL);
145 P0.L = lo(PLL_CTL);
146 R0 = W[P0](z);
147 BITSET (R0, 3);
148 W[P0] = R0.L; /* Turn CCLK OFF */
149 SSYNC;
150 IDLE;
151
152 call _test_pll_locked;
153
154 R0 = IWR_ENABLE(0);
155 R1 = IWR_DISABLE_ALL;
156 R2 = IWR_DISABLE_ALL;
157
158 call _set_sic_iwr; /* Set Awake from IDLE PLL */
159
160 P0.H = hi(VR_CTL);
161 P0.L = lo(VR_CTL);
162 W[P0]= R7;
163
164 SSYNC;
165 IDLE;
166
167 bfin_init_pm_bench_cycles;
168
169 call _test_pll_locked;
170
171 P0.H = hi(PLL_DIV);
172 P0.L = lo(PLL_DIV);
173 W[P0]= R6; /* Restore CCLK and SCLK divider */
174
175 P0.H = hi(PLL_CTL);
176 P0.L = lo(PLL_CTL);
177 w[p0] = R5; /* Restore VCO multiplier */
178 IDLE;
179 call _test_pll_locked;
180
181 call _unset_dram_srfs; /* SDRAM Self Refresh Off */
182
183 STI R4;
184
185 RETS = [SP++];
186 (R7:4, P5:3) = [SP++];
187 RTS;
188 ENDPROC(_sleep_deeper)
189
190 ENTRY(_set_dram_srfs)
191 /* set the dram to self refresh mode */
192 SSYNC;
193 #if defined(EBIU_RSTCTL) /* DDR */
194 P0.H = hi(EBIU_RSTCTL);
195 P0.L = lo(EBIU_RSTCTL);
196 R2 = [P0];
197 BITSET(R2, 3); /* SRREQ enter self-refresh mode */
198 [P0] = R2;
199 SSYNC;
200 1:
201 R2 = [P0];
202 CC = BITTST(R2, 4);
203 if !CC JUMP 1b;
204 #else /* SDRAM */
205 P0.L = lo(EBIU_SDGCTL);
206 P0.H = hi(EBIU_SDGCTL);
207 P1.L = lo(EBIU_SDSTAT);
208 P1.H = hi(EBIU_SDSTAT);
209
210 R2 = [P0];
211 BITSET(R2, 24); /* SRFS enter self-refresh mode */
212 [P0] = R2;
213 SSYNC;
214
215 1:
216 R2 = w[P1];
217 SSYNC;
218 cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
219 if !cc jump 1b;
220
221 R2 = [P0];
222 BITCLR(R2, 0); /* SCTLE disable CLKOUT */
223 [P0] = R2;
224 #endif
225 RTS;
226 ENDPROC(_set_dram_srfs)
227
228 ENTRY(_unset_dram_srfs)
229 /* set the dram out of self refresh mode */
230
231 #if defined(EBIU_RSTCTL) /* DDR */
232 P0.H = hi(EBIU_RSTCTL);
233 P0.L = lo(EBIU_RSTCTL);
234 R2 = [P0];
235 BITCLR(R2, 3); /* clear SRREQ bit */
236 [P0] = R2;
237 #elif defined(EBIU_SDGCTL) /* SDRAM */
238 /* release CLKOUT from self-refresh */
239 P0.L = lo(EBIU_SDGCTL);
240 P0.H = hi(EBIU_SDGCTL);
241
242 R2 = [P0];
243 BITSET(R2, 0); /* SCTLE enable CLKOUT */
244 [P0] = R2
245 SSYNC;
246
247 /* release SDRAM from self-refresh */
248 R2 = [P0];
249 BITCLR(R2, 24); /* clear SRFS bit */
250 [P0] = R2
251 #endif
252
253 SSYNC;
254 RTS;
255 ENDPROC(_unset_dram_srfs)
256
257 ENTRY(_set_sic_iwr)
258 #ifdef SIC_IWR0
259 P0.H = hi(SYSMMR_BASE);
260 P0.L = lo(SYSMMR_BASE);
261 [P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
262 [P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
263 # ifdef SIC_IWR2
264 [P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
265 # endif
266 #else
267 P0.H = hi(SIC_IWR);
268 P0.L = lo(SIC_IWR);
269 [P0] = R0;
270 #endif
271
272 SSYNC;
273 RTS;
274 ENDPROC(_set_sic_iwr)
275
276 ENTRY(_test_pll_locked)
277 P0.H = hi(PLL_STAT);
278 P0.L = lo(PLL_STAT);
279 1:
280 R0 = W[P0] (Z);
281 CC = BITTST(R0,5);
282 IF !CC JUMP 1b;
283 RTS;
284 ENDPROC(_test_pll_locked)
285
286 .section .text
287 ENTRY(_do_hibernate)
288 bfin_cpu_reg_save;
289 bfin_sys_mmr_save;
290 bfin_core_mmr_save;
291
292 /* Setup args to hibernate mode early for pipeline optimization */
293 R0 = M3;
294 P1.H = _hibernate_mode;
295 P1.L = _hibernate_mode;
296
297 /* Save Magic, return address and Stack Pointer */
298 P0 = 0;
299 R1.H = 0xDEAD; /* Hibernate Magic */
300 R1.L = 0xBEEF;
301 R2.H = .Lpm_resume_here;
302 R2.L = .Lpm_resume_here;
303 [P0++] = R1; /* Store Hibernate Magic */
304 [P0++] = R2; /* Save Return Address */
305 [P0++] = SP; /* Save Stack Pointer */
306
307 /* Must use an indirect call as we need to jump to L1 */
308 call (P1); /* Goodbye */
309
310 .Lpm_resume_here:
311
312 bfin_core_mmr_restore;
313 bfin_sys_mmr_restore;
314 bfin_cpu_reg_restore;
315
316 [--sp] = RETI; /* Clear Global Interrupt Disable */
317 SP += 4;
318
319 RTS;
320 ENDPROC(_do_hibernate)