]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
9994a338 PM |
2 | /* |
3 | * This file contains miscellaneous low-level functions. | |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
5 | * | |
6 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | |
7 | * and Paul Mackerras. | |
8 | * | |
3d1229d6 ME |
9 | * kexec bits: |
10 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | |
11 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | |
674bfa48 SP |
12 | * PPC44x port. Copyright (C) 2011, IBM Corporation |
13 | * Author: Suzuki Poulose <suzuki@in.ibm.com> | |
9994a338 PM |
14 | */ |
15 | ||
9994a338 PM |
16 | #include <linux/sys.h> |
17 | #include <asm/unistd.h> | |
18 | #include <asm/errno.h> | |
19 | #include <asm/reg.h> | |
20 | #include <asm/page.h> | |
21 | #include <asm/cache.h> | |
22 | #include <asm/cputable.h> | |
23 | #include <asm/mmu.h> | |
24 | #include <asm/ppc_asm.h> | |
25 | #include <asm/thread_info.h> | |
26 | #include <asm/asm-offsets.h> | |
3d1229d6 ME |
27 | #include <asm/processor.h> |
28 | #include <asm/kexec.h> | |
f048aace | 29 | #include <asm/bug.h> |
46f52210 | 30 | #include <asm/ptrace.h> |
9445aa1a | 31 | #include <asm/export.h> |
2c86cd18 | 32 | #include <asm/feature-fixups.h> |
9994a338 PM |
33 | |
34 | .text | |
35 | ||
cbc9565e BH |
36 | /* |
37 | * We store the saved ksp_limit in the unused part | |
38 | * of the STACK_FRAME_OVERHEAD | |
39 | */ | |
85218827 KG |
40 | _GLOBAL(call_do_softirq) |
41 | mflr r0 | |
42 | stw r0,4(r1) | |
cbc9565e | 43 | lwz r10,THREAD+KSP_LIMIT(r2) |
a7916a1d | 44 | stw r3, THREAD+KSP_LIMIT(r2) |
85218827 KG |
45 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) |
46 | mr r1,r3 | |
cbc9565e | 47 | stw r10,8(r1) |
85218827 | 48 | bl __do_softirq |
cbc9565e | 49 | lwz r10,8(r1) |
85218827 KG |
50 | lwz r1,0(r1) |
51 | lwz r0,4(r1) | |
cbc9565e | 52 | stw r10,THREAD+KSP_LIMIT(r2) |
85218827 KG |
53 | mtlr r0 |
54 | blr | |
55 | ||
1a18a664 | 56 | /* |
1e35f29c | 57 | * void call_do_irq(struct pt_regs *regs, void *sp); |
1a18a664 | 58 | */ |
0366a1c7 | 59 | _GLOBAL(call_do_irq) |
85218827 KG |
60 | mflr r0 |
61 | stw r0,4(r1) | |
cbc9565e | 62 | lwz r10,THREAD+KSP_LIMIT(r2) |
a7916a1d | 63 | stw r4, THREAD+KSP_LIMIT(r2) |
0366a1c7 BH |
64 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) |
65 | mr r1,r4 | |
cbc9565e | 66 | stw r10,8(r1) |
0366a1c7 | 67 | bl __do_irq |
cbc9565e | 68 | lwz r10,8(r1) |
85218827 KG |
69 | lwz r1,0(r1) |
70 | lwz r0,4(r1) | |
cbc9565e | 71 | stw r10,THREAD+KSP_LIMIT(r2) |
85218827 KG |
72 | mtlr r0 |
73 | blr | |
85218827 | 74 | |
f2783c15 PM |
75 | /* |
76 | * This returns the high 64 bits of the product of two 64-bit numbers. | |
77 | */ | |
78 | _GLOBAL(mulhdu) | |
79 | cmpwi r6,0 | |
80 | cmpwi cr1,r3,0 | |
81 | mr r10,r4 | |
82 | mulhwu r4,r4,r5 | |
83 | beq 1f | |
84 | mulhwu r0,r10,r6 | |
85 | mullw r7,r10,r5 | |
86 | addc r7,r0,r7 | |
87 | addze r4,r4 | |
88 | 1: beqlr cr1 /* all done if high part of A is 0 */ | |
f2783c15 | 89 | mullw r9,r3,r5 |
737b01fc | 90 | mulhwu r10,r3,r5 |
f2783c15 | 91 | beq 2f |
737b01fc CL |
92 | mullw r0,r3,r6 |
93 | mulhwu r8,r3,r6 | |
f2783c15 PM |
94 | addc r7,r0,r7 |
95 | adde r4,r4,r8 | |
737b01fc | 96 | addze r10,r10 |
f2783c15 | 97 | 2: addc r4,r4,r9 |
737b01fc | 98 | addze r3,r10 |
f2783c15 PM |
99 | blr |
100 | ||
9994a338 PM |
101 | /* |
102 | * reloc_got2 runs through the .got2 section adding an offset | |
103 | * to each entry. | |
104 | */ | |
105 | _GLOBAL(reloc_got2) | |
106 | mflr r11 | |
107 | lis r7,__got2_start@ha | |
108 | addi r7,r7,__got2_start@l | |
109 | lis r8,__got2_end@ha | |
110 | addi r8,r8,__got2_end@l | |
111 | subf r8,r7,r8 | |
112 | srwi. r8,r8,2 | |
113 | beqlr | |
114 | mtctr r8 | |
115 | bl 1f | |
116 | 1: mflr r0 | |
117 | lis r4,1b@ha | |
118 | addi r4,r4,1b@l | |
119 | subf r0,r4,r0 | |
120 | add r7,r0,r7 | |
121 | 2: lwz r0,0(r7) | |
122 | add r0,r0,r3 | |
123 | stw r0,0(r7) | |
124 | addi r7,r7,4 | |
125 | bdnz 2b | |
126 | mtlr r11 | |
127 | blr | |
128 | ||
9994a338 PM |
129 | /* |
130 | * call_setup_cpu - call the setup_cpu function for this cpu | |
131 | * r3 = data offset, r24 = cpu number | |
132 | * | |
133 | * Setup function is called with: | |
134 | * r3 = data offset | |
135 | * r4 = ptr to CPU spec (relocated) | |
136 | */ | |
137 | _GLOBAL(call_setup_cpu) | |
138 | addis r4,r3,cur_cpu_spec@ha | |
139 | addi r4,r4,cur_cpu_spec@l | |
140 | lwz r4,0(r4) | |
141 | add r4,r4,r3 | |
142 | lwz r5,CPU_SPEC_SETUP(r4) | |
b26f100d | 143 | cmpwi 0,r5,0 |
9994a338 PM |
144 | add r5,r5,r3 |
145 | beqlr | |
146 | mtctr r5 | |
147 | bctr | |
148 | ||
d7cceda9 | 149 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32) |
9994a338 PM |
150 | |
151 | /* This gets called by via-pmu.c to switch the PLL selection | |
152 | * on 750fx CPU. This function should really be moved to some | |
153 | * other place (as most of the cpufreq code in via-pmu | |
154 | */ | |
155 | _GLOBAL(low_choose_750fx_pll) | |
156 | /* Clear MSR:EE */ | |
157 | mfmsr r7 | |
158 | rlwinm r0,r7,0,17,15 | |
159 | mtmsr r0 | |
160 | ||
161 | /* If switching to PLL1, disable HID0:BTIC */ | |
162 | cmplwi cr0,r3,0 | |
163 | beq 1f | |
164 | mfspr r5,SPRN_HID0 | |
165 | rlwinm r5,r5,0,27,25 | |
166 | sync | |
167 | mtspr SPRN_HID0,r5 | |
168 | isync | |
169 | sync | |
170 | ||
171 | 1: | |
172 | /* Calc new HID1 value */ | |
173 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ | |
174 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ | |
175 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ | |
176 | or r4,r4,r5 | |
177 | mtspr SPRN_HID1,r4 | |
178 | ||
05486089 | 179 | #ifdef CONFIG_SMP |
9994a338 | 180 | /* Store new HID1 image */ |
f7354cca | 181 | lwz r6,TASK_CPU(r2) |
9994a338 | 182 | slwi r6,r6,2 |
05486089 CL |
183 | #else |
184 | li r6, 0 | |
185 | #endif | |
9994a338 PM |
186 | addis r6,r6,nap_save_hid1@ha |
187 | stw r4,nap_save_hid1@l(r6) | |
188 | ||
189 | /* If switching to PLL0, enable HID0:BTIC */ | |
190 | cmplwi cr0,r3,0 | |
191 | bne 1f | |
192 | mfspr r5,SPRN_HID0 | |
193 | ori r5,r5,HID0_BTIC | |
194 | sync | |
195 | mtspr SPRN_HID0,r5 | |
196 | isync | |
197 | sync | |
198 | ||
199 | 1: | |
200 | /* Return */ | |
201 | mtmsr r7 | |
202 | blr | |
203 | ||
204 | _GLOBAL(low_choose_7447a_dfs) | |
205 | /* Clear MSR:EE */ | |
206 | mfmsr r7 | |
207 | rlwinm r0,r7,0,17,15 | |
208 | mtmsr r0 | |
209 | ||
210 | /* Calc new HID1 value */ | |
211 | mfspr r4,SPRN_HID1 | |
212 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ | |
213 | sync | |
214 | mtspr SPRN_HID1,r4 | |
215 | sync | |
216 | isync | |
217 | ||
218 | /* Return */ | |
219 | mtmsr r7 | |
220 | blr | |
221 | ||
d7cceda9 | 222 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ |
9994a338 PM |
223 | |
224 | /* | |
225 | * complement mask on the msr then "or" some values on. | |
226 | * _nmask_and_or_msr(nmask, value_to_or) | |
227 | */ | |
228 | _GLOBAL(_nmask_and_or_msr) | |
229 | mfmsr r0 /* Get current msr */ | |
230 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ | |
231 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ | |
232 | SYNC /* Some chip revs have problems here... */ | |
233 | mtmsr r0 /* Update machine state */ | |
234 | isync | |
235 | blr /* Done */ | |
236 | ||
9dae8afd BH |
237 | #ifdef CONFIG_40x |
238 | ||
239 | /* | |
240 | * Do an IO access in real mode | |
241 | */ | |
242 | _GLOBAL(real_readb) | |
243 | mfmsr r7 | |
362957c2 | 244 | rlwinm r0,r7,0,~MSR_DR |
9dae8afd BH |
245 | sync |
246 | mtmsr r0 | |
247 | sync | |
248 | isync | |
249 | lbz r3,0(r3) | |
250 | sync | |
251 | mtmsr r7 | |
252 | sync | |
253 | isync | |
254 | blr | |
255 | ||
256 | /* | |
257 | * Do an IO access in real mode | |
258 | */ | |
259 | _GLOBAL(real_writeb) | |
260 | mfmsr r7 | |
362957c2 | 261 | rlwinm r0,r7,0,~MSR_DR |
9dae8afd BH |
262 | sync |
263 | mtmsr r0 | |
264 | sync | |
265 | isync | |
266 | stb r3,0(r4) | |
267 | sync | |
268 | mtmsr r7 | |
269 | sync | |
270 | isync | |
271 | blr | |
272 | ||
273 | #endif /* CONFIG_40x */ | |
9994a338 | 274 | |
0ba3418b | 275 | |
9994a338 PM |
276 | /* |
277 | * Flush instruction cache. | |
278 | * This is a no-op on the 601. | |
279 | */ | |
766d45cb | 280 | #ifndef CONFIG_PPC_8xx |
9994a338 | 281 | _GLOBAL(flush_instruction_cache) |
766d45cb | 282 | #if defined(CONFIG_4xx) |
9994a338 PM |
283 | #ifdef CONFIG_403GCX |
284 | li r3, 512 | |
285 | mtctr r3 | |
286 | lis r4, KERNELBASE@h | |
287 | 1: iccci 0, r4 | |
288 | addi r4, r4, 16 | |
289 | bdnz 1b | |
290 | #else | |
291 | lis r3, KERNELBASE@h | |
292 | iccci 0,r3 | |
293 | #endif | |
8ae679c4 | 294 | #elif defined(CONFIG_FSL_BOOKE) |
9994a338 PM |
295 | BEGIN_FTR_SECTION |
296 | mfspr r3,SPRN_L1CSR0 | |
297 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC | |
298 | /* msync; isync recommended here */ | |
299 | mtspr SPRN_L1CSR0,r3 | |
300 | isync | |
301 | blr | |
4508dc21 | 302 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) |
9994a338 PM |
303 | mfspr r3,SPRN_L1CSR1 |
304 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR | |
305 | mtspr SPRN_L1CSR1,r3 | |
306 | #else | |
307 | mfspr r3,SPRN_PVR | |
308 | rlwinm r3,r3,16,16,31 | |
309 | cmpwi 0,r3,1 | |
310 | beqlr /* for 601, do nothing */ | |
311 | /* 603/604 processor - use invalidate-all bit in HID0 */ | |
312 | mfspr r3,SPRN_HID0 | |
313 | ori r3,r3,HID0_ICFI | |
314 | mtspr SPRN_HID0,r3 | |
766d45cb | 315 | #endif /* CONFIG_4xx */ |
9994a338 PM |
316 | isync |
317 | blr | |
9445aa1a | 318 | EXPORT_SYMBOL(flush_instruction_cache) |
766d45cb | 319 | #endif /* CONFIG_PPC_8xx */ |
9994a338 PM |
320 | |
321 | /* | |
322 | * Write any modified data cache blocks out to memory | |
323 | * and invalidate the corresponding instruction cache blocks. | |
324 | * This is a no-op on the 601. | |
325 | * | |
326 | * flush_icache_range(unsigned long start, unsigned long stop) | |
327 | */ | |
6f698df1 | 328 | _GLOBAL(flush_icache_range) |
9994a338 | 329 | BEGIN_FTR_SECTION |
0ce63670 | 330 | PURGE_PREFETCHED_INS |
9994a338 | 331 | blr /* for 601, do nothing */ |
4508dc21 | 332 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
716fa91d | 333 | rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT |
9994a338 | 334 | subf r4,r3,r4 |
716fa91d | 335 | addi r4,r4,L1_CACHE_BYTES - 1 |
7dffb720 | 336 | srwi. r4,r4,L1_CACHE_SHIFT |
9994a338 PM |
337 | beqlr |
338 | mtctr r4 | |
339 | mr r6,r3 | |
340 | 1: dcbst 0,r3 | |
7dffb720 | 341 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
342 | bdnz 1b |
343 | sync /* wait for dcbst's to get to ram */ | |
14d75752 | 344 | #ifndef CONFIG_44x |
9994a338 PM |
345 | mtctr r4 |
346 | 2: icbi 0,r6 | |
7dffb720 | 347 | addi r6,r6,L1_CACHE_BYTES |
9994a338 | 348 | bdnz 2b |
14d75752 JB |
349 | #else |
350 | /* Flash invalidate on 44x because we are passed kmapped addresses and | |
351 | this doesn't work for userspace pages due to the virtually tagged | |
352 | icache. Sigh. */ | |
353 | iccci 0, r0 | |
354 | #endif | |
9994a338 PM |
355 | sync /* additional sync needed on g4 */ |
356 | isync | |
357 | blr | |
6f698df1 | 358 | _ASM_NOKPROBE_SYMBOL(flush_icache_range) |
9445aa1a | 359 | EXPORT_SYMBOL(flush_icache_range) |
6f698df1 | 360 | |
9994a338 PM |
361 | /* |
362 | * Flush a particular page from the data cache to RAM. | |
363 | * Note: this is necessary because the instruction cache does *not* | |
364 | * snoop from the data cache. | |
365 | * This is a no-op on the 601 which has a unified cache. | |
366 | * | |
367 | * void __flush_dcache_icache(void *page) | |
368 | */ | |
369 | _GLOBAL(__flush_dcache_icache) | |
370 | BEGIN_FTR_SECTION | |
0ce63670 | 371 | PURGE_PREFETCHED_INS |
4508dc21 DG |
372 | blr |
373 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | |
ca9153a3 IY |
374 | rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ |
375 | li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ | |
9994a338 PM |
376 | mtctr r4 |
377 | mr r6,r3 | |
378 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 379 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
380 | bdnz 0b |
381 | sync | |
e7f75ad0 | 382 | #ifdef CONFIG_44x |
b98ac05d BH |
383 | /* We don't flush the icache on 44x. Those have a virtual icache |
384 | * and we don't have access to the virtual address here (it's | |
385 | * not the page vaddr but where it's mapped in user space). The | |
386 | * flushing of the icache on these is handled elsewhere, when | |
387 | * a change in the address space occurs, before returning to | |
388 | * user space | |
389 | */ | |
e7f75ad0 DK |
390 | BEGIN_MMU_FTR_SECTION |
391 | blr | |
392 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) | |
393 | #endif /* CONFIG_44x */ | |
9994a338 PM |
394 | mtctr r4 |
395 | 1: icbi 0,r6 | |
7dffb720 | 396 | addi r6,r6,L1_CACHE_BYTES |
9994a338 PM |
397 | bdnz 1b |
398 | sync | |
399 | isync | |
400 | blr | |
401 | ||
e7f75ad0 | 402 | #ifndef CONFIG_BOOKE |
9994a338 PM |
403 | /* |
404 | * Flush a particular page from the data cache to RAM, identified | |
405 | * by its physical address. We turn off the MMU so we can just use | |
406 | * the physical address (this may be a highmem page without a kernel | |
407 | * mapping). | |
408 | * | |
409 | * void __flush_dcache_icache_phys(unsigned long physaddr) | |
410 | */ | |
411 | _GLOBAL(__flush_dcache_icache_phys) | |
412 | BEGIN_FTR_SECTION | |
0ce63670 | 413 | PURGE_PREFETCHED_INS |
9994a338 | 414 | blr /* for 601, do nothing */ |
4508dc21 | 415 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
9994a338 PM |
416 | mfmsr r10 |
417 | rlwinm r0,r10,0,28,26 /* clear DR */ | |
418 | mtmsr r0 | |
419 | isync | |
ca9153a3 IY |
420 | rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ |
421 | li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ | |
9994a338 PM |
422 | mtctr r4 |
423 | mr r6,r3 | |
424 | 0: dcbst 0,r3 /* Write line to ram */ | |
7dffb720 | 425 | addi r3,r3,L1_CACHE_BYTES |
9994a338 PM |
426 | bdnz 0b |
427 | sync | |
428 | mtctr r4 | |
429 | 1: icbi 0,r6 | |
7dffb720 | 430 | addi r6,r6,L1_CACHE_BYTES |
9994a338 PM |
431 | bdnz 1b |
432 | sync | |
433 | mtmsr r10 /* restore DR */ | |
434 | isync | |
435 | blr | |
e7f75ad0 | 436 | #endif /* CONFIG_BOOKE */ |
9994a338 | 437 | |
9994a338 PM |
438 | /* |
439 | * Copy a whole page. We use the dcbz instruction on the destination | |
440 | * to reduce memory traffic (it eliminates the unnecessary reads of | |
441 | * the destination into cache). This requires that the destination | |
442 | * is cacheable. | |
443 | */ | |
444 | #define COPY_16_BYTES \ | |
445 | lwz r6,4(r4); \ | |
446 | lwz r7,8(r4); \ | |
447 | lwz r8,12(r4); \ | |
448 | lwzu r9,16(r4); \ | |
449 | stw r6,4(r3); \ | |
450 | stw r7,8(r3); \ | |
451 | stw r8,12(r3); \ | |
452 | stwu r9,16(r3) | |
453 | ||
454 | _GLOBAL(copy_page) | |
455 | addi r3,r3,-4 | |
456 | addi r4,r4,-4 | |
457 | ||
9994a338 PM |
458 | li r5,4 |
459 | ||
460 | #if MAX_COPY_PREFETCH > 1 | |
461 | li r0,MAX_COPY_PREFETCH | |
462 | li r11,4 | |
463 | mtctr r0 | |
464 | 11: dcbt r11,r4 | |
7dffb720 | 465 | addi r11,r11,L1_CACHE_BYTES |
9994a338 PM |
466 | bdnz 11b |
467 | #else /* MAX_COPY_PREFETCH == 1 */ | |
468 | dcbt r5,r4 | |
7dffb720 | 469 | li r11,L1_CACHE_BYTES+4 |
9994a338 | 470 | #endif /* MAX_COPY_PREFETCH */ |
ca9153a3 | 471 | li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
9994a338 PM |
472 | crclr 4*cr0+eq |
473 | 2: | |
474 | mtctr r0 | |
475 | 1: | |
476 | dcbt r11,r4 | |
477 | dcbz r5,r3 | |
478 | COPY_16_BYTES | |
7dffb720 | 479 | #if L1_CACHE_BYTES >= 32 |
9994a338 | 480 | COPY_16_BYTES |
7dffb720 | 481 | #if L1_CACHE_BYTES >= 64 |
9994a338 PM |
482 | COPY_16_BYTES |
483 | COPY_16_BYTES | |
7dffb720 | 484 | #if L1_CACHE_BYTES >= 128 |
9994a338 PM |
485 | COPY_16_BYTES |
486 | COPY_16_BYTES | |
487 | COPY_16_BYTES | |
488 | COPY_16_BYTES | |
489 | #endif | |
490 | #endif | |
491 | #endif | |
492 | bdnz 1b | |
493 | beqlr | |
494 | crnot 4*cr0+eq,4*cr0+eq | |
495 | li r0,MAX_COPY_PREFETCH | |
496 | li r11,4 | |
497 | b 2b | |
9445aa1a | 498 | EXPORT_SYMBOL(copy_page) |
9994a338 | 499 | |
9994a338 PM |
500 | /* |
501 | * Extended precision shifts. | |
502 | * | |
503 | * Updated to be valid for shift counts from 0 to 63 inclusive. | |
504 | * -- Gabriel | |
505 | * | |
506 | * R3/R4 has 64 bit value | |
507 | * R5 has shift count | |
508 | * result in R3/R4 | |
509 | * | |
510 | * ashrdi3: arithmetic right shift (sign propagation) | |
511 | * lshrdi3: logical right shift | |
512 | * ashldi3: left shift | |
513 | */ | |
514 | _GLOBAL(__ashrdi3) | |
515 | subfic r6,r5,32 | |
516 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
517 | addi r7,r5,32 # could be xori, or addi with -32 | |
518 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
519 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 | |
520 | sraw r7,r3,r7 # t2 = MSW >> (count-32) | |
521 | or r4,r4,r6 # LSW |= t1 | |
522 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 | |
523 | sraw r3,r3,r5 # MSW = MSW >> count | |
524 | or r4,r4,r7 # LSW |= t2 | |
525 | blr | |
9445aa1a | 526 | EXPORT_SYMBOL(__ashrdi3) |
9994a338 PM |
527 | |
528 | _GLOBAL(__ashldi3) | |
529 | subfic r6,r5,32 | |
530 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count | |
531 | addi r7,r5,32 # could be xori, or addi with -32 | |
532 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) | |
533 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) | |
534 | or r3,r3,r6 # MSW |= t1 | |
535 | slw r4,r4,r5 # LSW = LSW << count | |
536 | or r3,r3,r7 # MSW |= t2 | |
537 | blr | |
9445aa1a | 538 | EXPORT_SYMBOL(__ashldi3) |
9994a338 PM |
539 | |
540 | _GLOBAL(__lshrdi3) | |
541 | subfic r6,r5,32 | |
542 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | |
543 | addi r7,r5,32 # could be xori, or addi with -32 | |
544 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | |
545 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) | |
546 | or r4,r4,r6 # LSW |= t1 | |
547 | srw r3,r3,r5 # MSW = MSW >> count | |
548 | or r4,r4,r7 # LSW |= t2 | |
549 | blr | |
9445aa1a | 550 | EXPORT_SYMBOL(__lshrdi3) |
9994a338 | 551 | |
41b93b23 BB |
552 | /* |
553 | * 64-bit comparison: __cmpdi2(s64 a, s64 b) | |
554 | * Returns 0 if a < b, 1 if a == b, 2 if a > b. | |
555 | */ | |
556 | _GLOBAL(__cmpdi2) | |
557 | cmpw r3,r5 | |
558 | li r3,1 | |
559 | bne 1f | |
560 | cmplw r4,r6 | |
561 | beqlr | |
562 | 1: li r3,0 | |
563 | bltlr | |
564 | li r3,2 | |
565 | blr | |
9445aa1a | 566 | EXPORT_SYMBOL(__cmpdi2) |
95ff54f5 PM |
567 | /* |
568 | * 64-bit comparison: __ucmpdi2(u64 a, u64 b) | |
569 | * Returns 0 if a < b, 1 if a == b, 2 if a > b. | |
570 | */ | |
571 | _GLOBAL(__ucmpdi2) | |
572 | cmplw r3,r5 | |
573 | li r3,1 | |
574 | bne 1f | |
575 | cmplw r4,r6 | |
576 | beqlr | |
577 | 1: li r3,0 | |
578 | bltlr | |
579 | li r3,2 | |
580 | blr | |
9445aa1a | 581 | EXPORT_SYMBOL(__ucmpdi2) |
95ff54f5 | 582 | |
ca9d7aea DW |
583 | _GLOBAL(__bswapdi2) |
584 | rotlwi r9,r4,8 | |
585 | rotlwi r10,r3,8 | |
586 | rlwimi r9,r4,24,0,7 | |
587 | rlwimi r10,r3,24,0,7 | |
588 | rlwimi r9,r4,24,16,23 | |
589 | rlwimi r10,r3,24,16,23 | |
590 | mr r3,r9 | |
591 | mr r4,r10 | |
592 | blr | |
9445aa1a | 593 | EXPORT_SYMBOL(__bswapdi2) |
ca9d7aea | 594 | |
69e3cea8 BH |
595 | #ifdef CONFIG_SMP |
596 | _GLOBAL(start_secondary_resume) | |
597 | /* Reset stack */ | |
7306e83c | 598 | rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT |
69e3cea8 BH |
599 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD |
600 | li r3,0 | |
6de06f31 | 601 | stw r3,0(r1) /* Zero the stack frame pointer */ |
69e3cea8 BH |
602 | bl start_secondary |
603 | b . | |
604 | #endif /* CONFIG_SMP */ | |
605 | ||
9994a338 PM |
606 | /* |
607 | * This routine is just here to keep GCC happy - sigh... | |
608 | */ | |
609 | _GLOBAL(__main) | |
610 | blr | |
3d1229d6 | 611 | |
da665885 | 612 | #ifdef CONFIG_KEXEC_CORE |
3d1229d6 ME |
613 | /* |
614 | * Must be relocatable PIC code callable as a C function. | |
615 | */ | |
616 | .globl relocate_new_kernel | |
617 | relocate_new_kernel: | |
618 | /* r3 = page_list */ | |
619 | /* r4 = reboot_code_buffer */ | |
620 | /* r5 = start_address */ | |
621 | ||
b3df895a SAS |
622 | #ifdef CONFIG_FSL_BOOKE |
623 | ||
624 | mr r29, r3 | |
625 | mr r30, r4 | |
626 | mr r31, r5 | |
627 | ||
628 | #define ENTRY_MAPPING_KEXEC_SETUP | |
629 | #include "fsl_booke_entry_mapping.S" | |
630 | #undef ENTRY_MAPPING_KEXEC_SETUP | |
631 | ||
632 | mr r3, r29 | |
633 | mr r4, r30 | |
634 | mr r5, r31 | |
635 | ||
674bfa48 | 636 | li r0, 0 |
68343020 | 637 | #elif defined(CONFIG_44x) |
674bfa48 | 638 | |
68343020 SP |
639 | /* Save our parameters */ |
640 | mr r29, r3 | |
641 | mr r30, r4 | |
642 | mr r31, r5 | |
643 | ||
644 | #ifdef CONFIG_PPC_47x | |
645 | /* Check for 47x cores */ | |
646 | mfspr r3,SPRN_PVR | |
647 | srwi r3,r3,16 | |
4450022b AP |
648 | cmplwi cr0,r3,PVR_476FPE@h |
649 | beq setup_map_47x | |
68343020 SP |
650 | cmplwi cr0,r3,PVR_476@h |
651 | beq setup_map_47x | |
652 | cmplwi cr0,r3,PVR_476_ISS@h | |
653 | beq setup_map_47x | |
654 | #endif /* CONFIG_PPC_47x */ | |
655 | ||
674bfa48 SP |
656 | /* |
657 | * Code for setting up 1:1 mapping for PPC440x for KEXEC | |
658 | * | |
659 | * We cannot switch off the MMU on PPC44x. | |
660 | * So we: | |
661 | * 1) Invalidate all the mappings except the one we are running from. | |
662 | * 2) Create a tmp mapping for our code in the other address space(TS) and | |
663 | * jump to it. Invalidate the entry we started in. | |
664 | * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. | |
665 | * 4) Jump to the 1:1 mapping in original TS. | |
666 | * 5) Invalidate the tmp mapping. | |
667 | * | |
668 | * - Based on the kexec support code for FSL BookE | |
674bfa48 SP |
669 | * |
670 | */ | |
674bfa48 | 671 | |
f13bfcc6 SP |
672 | /* |
673 | * Load the PID with kernel PID (0). | |
674 | * Also load our MSR_IS and TID to MMUCR for TLB search. | |
675 | */ | |
676 | li r3, 0 | |
677 | mtspr SPRN_PID, r3 | |
674bfa48 SP |
678 | mfmsr r4 |
679 | andi. r4,r4,MSR_IS@l | |
680 | beq wmmucr | |
681 | oris r3,r3,PPC44x_MMUCR_STS@h | |
682 | wmmucr: | |
683 | mtspr SPRN_MMUCR,r3 | |
684 | sync | |
685 | ||
686 | /* | |
687 | * Invalidate all the TLB entries except the current entry | |
688 | * where we are running from | |
689 | */ | |
690 | bl 0f /* Find our address */ | |
691 | 0: mflr r5 /* Make it accessible */ | |
692 | tlbsx r23,0,r5 /* Find entry we are in */ | |
693 | li r4,0 /* Start at TLB entry 0 */ | |
694 | li r3,0 /* Set PAGEID inval value */ | |
695 | 1: cmpw r23,r4 /* Is this our entry? */ | |
696 | beq skip /* If so, skip the inval */ | |
697 | tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | |
698 | skip: | |
699 | addi r4,r4,1 /* Increment */ | |
700 | cmpwi r4,64 /* Are we done? */ | |
701 | bne 1b /* If not, repeat */ | |
702 | isync | |
703 | ||
704 | /* Create a temp mapping and jump to it */ | |
705 | andi. r6, r23, 1 /* Find the index to use */ | |
706 | addi r24, r6, 1 /* r24 will contain 1 or 2 */ | |
707 | ||
708 | mfmsr r9 /* get the MSR */ | |
709 | rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ | |
710 | xori r7, r5, 1 /* Use the other address space */ | |
711 | ||
712 | /* Read the current mapping entries */ | |
713 | tlbre r3, r23, PPC44x_TLB_PAGEID | |
714 | tlbre r4, r23, PPC44x_TLB_XLAT | |
715 | tlbre r5, r23, PPC44x_TLB_ATTRIB | |
716 | ||
717 | /* Save our current XLAT entry */ | |
718 | mr r25, r4 | |
719 | ||
720 | /* Extract the TLB PageSize */ | |
721 | li r10, 1 /* r10 will hold PageSize */ | |
722 | rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ | |
723 | ||
724 | /* XXX: As of now we use 256M, 4K pages */ | |
725 | cmpwi r11, PPC44x_TLB_256M | |
726 | bne tlb_4k | |
727 | rotlwi r10, r10, 28 /* r10 = 256M */ | |
728 | b write_out | |
729 | tlb_4k: | |
730 | cmpwi r11, PPC44x_TLB_4K | |
731 | bne default | |
732 | rotlwi r10, r10, 12 /* r10 = 4K */ | |
733 | b write_out | |
734 | default: | |
735 | rotlwi r10, r10, 10 /* r10 = 1K */ | |
736 | ||
737 | write_out: | |
738 | /* | |
739 | * Write out the tmp 1:1 mapping for this code in other address space | |
740 | * Fixup EPN = RPN , TS=other address space | |
741 | */ | |
742 | insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ | |
743 | ||
744 | /* Write out the tmp mapping entries */ | |
745 | tlbwe r3, r24, PPC44x_TLB_PAGEID | |
746 | tlbwe r4, r24, PPC44x_TLB_XLAT | |
747 | tlbwe r5, r24, PPC44x_TLB_ATTRIB | |
748 | ||
749 | subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ | |
750 | not r10, r11 /* Mask for PageNum */ | |
751 | ||
752 | /* Switch to other address space in MSR */ | |
753 | insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ | |
754 | ||
755 | bl 1f | |
756 | 1: mflr r8 | |
757 | addi r8, r8, (2f-1b) /* Find the target offset */ | |
758 | ||
759 | /* Jump to the tmp mapping */ | |
760 | mtspr SPRN_SRR0, r8 | |
761 | mtspr SPRN_SRR1, r9 | |
762 | rfi | |
763 | ||
764 | 2: | |
765 | /* Invalidate the entry we were executing from */ | |
766 | li r3, 0 | |
767 | tlbwe r3, r23, PPC44x_TLB_PAGEID | |
768 | ||
769 | /* attribute fields. rwx for SUPERVISOR mode */ | |
770 | li r5, 0 | |
771 | ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | |
772 | ||
773 | /* Create 1:1 mapping in 256M pages */ | |
774 | xori r7, r7, 1 /* Revert back to Original TS */ | |
775 | ||
776 | li r8, 0 /* PageNumber */ | |
777 | li r6, 3 /* TLB Index, start at 3 */ | |
778 | ||
779 | next_tlb: | |
780 | rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ | |
781 | mr r4, r3 /* RPN = EPN */ | |
782 | ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ | |
783 | insrwi r3, r7, 1, 23 /* Set TS from r7 */ | |
784 | ||
785 | tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ | |
786 | tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ | |
787 | tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ | |
788 | ||
789 | addi r8, r8, 1 /* Increment PN */ | |
790 | addi r6, r6, 1 /* Increment TLB Index */ | |
791 | cmpwi r8, 8 /* Are we done ? */ | |
792 | bne next_tlb | |
793 | isync | |
794 | ||
795 | /* Jump to the new mapping 1:1 */ | |
796 | li r9,0 | |
797 | insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ | |
798 | ||
799 | bl 1f | |
800 | 1: mflr r8 | |
801 | and r8, r8, r11 /* Get our offset within page */ | |
802 | addi r8, r8, (2f-1b) | |
803 | ||
804 | and r5, r25, r10 /* Get our target PageNum */ | |
805 | or r8, r8, r5 /* Target jump address */ | |
806 | ||
807 | mtspr SPRN_SRR0, r8 | |
808 | mtspr SPRN_SRR1, r9 | |
809 | rfi | |
810 | 2: | |
811 | /* Invalidate the tmp entry we used */ | |
812 | li r3, 0 | |
813 | tlbwe r3, r24, PPC44x_TLB_PAGEID | |
814 | sync | |
68343020 SP |
815 | b ppc44x_map_done |
816 | ||
817 | #ifdef CONFIG_PPC_47x | |
818 | ||
819 | /* 1:1 mapping for 47x */ | |
820 | ||
821 | setup_map_47x: | |
822 | ||
823 | /* | |
824 | * Load the kernel pid (0) to PID and also to MMUCR[TID]. | |
825 | * Also set the MSR IS->MMUCR STS | |
826 | */ | |
827 | li r3, 0 | |
828 | mtspr SPRN_PID, r3 /* Set PID */ | |
829 | mfmsr r4 /* Get MSR */ | |
830 | andi. r4, r4, MSR_IS@l /* TS=1? */ | |
831 | beq 1f /* If not, leave STS=0 */ | |
832 | oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ | |
833 | 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ | |
834 | sync | |
835 | ||
836 | /* Find the entry we are running from */ | |
837 | bl 2f | |
838 | 2: mflr r23 | |
839 | tlbsx r23, 0, r23 | |
840 | tlbre r24, r23, 0 /* TLB Word 0 */ | |
841 | tlbre r25, r23, 1 /* TLB Word 1 */ | |
842 | tlbre r26, r23, 2 /* TLB Word 2 */ | |
843 | ||
844 | ||
845 | /* | |
846 | * Invalidates all the tlb entries by writing to 256 RPNs(r4) | |
847 | * of 4k page size in all 4 ways (0-3 in r3). | |
848 | * This would invalidate the entire UTLB including the one we are | |
849 | * running from. However the shadow TLB entries would help us | |
850 | * to continue the execution, until we flush them (rfi/isync). | |
851 | */ | |
852 | addis r3, 0, 0x8000 /* specify the way */ | |
853 | addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ | |
854 | addi r5, 0, 0 | |
855 | b clear_utlb_entry | |
856 | ||
857 | /* Align the loop to speed things up. from head_44x.S */ | |
858 | .align 6 | |
859 | ||
860 | clear_utlb_entry: | |
861 | ||
862 | tlbwe r4, r3, 0 | |
863 | tlbwe r5, r3, 1 | |
864 | tlbwe r5, r3, 2 | |
865 | addis r3, r3, 0x2000 /* Increment the way */ | |
866 | cmpwi r3, 0 | |
867 | bne clear_utlb_entry | |
868 | addis r3, 0, 0x8000 | |
869 | addis r4, r4, 0x100 /* Increment the EPN */ | |
870 | cmpwi r4, 0 | |
871 | bne clear_utlb_entry | |
872 | ||
873 | /* Create the entries in the other address space */ | |
874 | mfmsr r5 | |
875 | rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ | |
876 | xori r7, r7, 1 /* r7 = !TS */ | |
877 | ||
878 | insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ | |
879 | ||
880 | /* | |
881 | * write out the TLB entries for the tmp mapping | |
882 | * Use way '0' so that we could easily invalidate it later. | |
883 | */ | |
884 | lis r3, 0x8000 /* Way '0' */ | |
885 | ||
886 | tlbwe r24, r3, 0 | |
887 | tlbwe r25, r3, 1 | |
888 | tlbwe r26, r3, 2 | |
889 | ||
890 | /* Update the msr to the new TS */ | |
891 | insrwi r5, r7, 1, 26 | |
892 | ||
893 | bl 1f | |
894 | 1: mflr r6 | |
895 | addi r6, r6, (2f-1b) | |
896 | ||
897 | mtspr SPRN_SRR0, r6 | |
898 | mtspr SPRN_SRR1, r5 | |
899 | rfi | |
900 | ||
901 | /* | |
902 | * Now we are in the tmp address space. | |
903 | * Create a 1:1 mapping for 0-2GiB in the original TS. | |
904 | */ | |
905 | 2: | |
906 | li r3, 0 | |
907 | li r4, 0 /* TLB Word 0 */ | |
908 | li r5, 0 /* TLB Word 1 */ | |
909 | li r6, 0 | |
910 | ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ | |
911 | ||
912 | li r8, 0 /* PageIndex */ | |
913 | ||
914 | xori r7, r7, 1 /* revert back to original TS */ | |
915 | ||
916 | write_utlb: | |
917 | rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ | |
918 | /* ERPN = 0 as we don't use memory above 2G */ | |
919 | ||
920 | mr r4, r5 /* EPN = RPN */ | |
921 | ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) | |
922 | insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ | |
923 | ||
924 | tlbwe r4, r3, 0 /* Write out the entries */ | |
925 | tlbwe r5, r3, 1 | |
926 | tlbwe r6, r3, 2 | |
927 | addi r8, r8, 1 | |
928 | cmpwi r8, 8 /* Have we completed ? */ | |
929 | bne write_utlb | |
930 | ||
931 | /* make sure we complete the TLB write up */ | |
932 | isync | |
933 | ||
934 | /* | |
935 | * Prepare to jump to the 1:1 mapping. | |
936 | * 1) Extract page size of the tmp mapping | |
937 | * DSIZ = TLB_Word0[22:27] | |
938 | * 2) Calculate the physical address of the address | |
939 | * to jump to. | |
940 | */ | |
941 | rlwinm r10, r24, 0, 22, 27 | |
942 | ||
943 | cmpwi r10, PPC47x_TLB0_4K | |
944 | bne 0f | |
945 | li r10, 0x1000 /* r10 = 4k */ | |
946 | bl 1f | |
947 | ||
948 | 0: | |
949 | /* Defaults to 256M */ | |
950 | lis r10, 0x1000 | |
951 | ||
952 | bl 1f | |
953 | 1: mflr r4 | |
954 | addi r4, r4, (2f-1b) /* virtual address of 2f */ | |
955 | ||
956 | subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ | |
957 | not r10, r11 /* Pagemask = ~(offsetmask) */ | |
958 | ||
959 | and r5, r25, r10 /* Physical page */ | |
960 | and r6, r4, r11 /* offset within the current page */ | |
961 | ||
962 | or r5, r5, r6 /* Physical address for 2f */ | |
963 | ||
964 | /* Switch the TS in MSR to the original one */ | |
965 | mfmsr r8 | |
966 | insrwi r8, r7, 1, 26 | |
967 | ||
968 | mtspr SPRN_SRR1, r8 | |
969 | mtspr SPRN_SRR0, r5 | |
970 | rfi | |
971 | ||
972 | 2: | |
973 | /* Invalidate the tmp mapping */ | |
974 | lis r3, 0x8000 /* Way '0' */ | |
975 | ||
976 | clrrwi r24, r24, 12 /* Clear the valid bit */ | |
977 | tlbwe r24, r3, 0 | |
978 | tlbwe r25, r3, 1 | |
979 | tlbwe r26, r3, 2 | |
980 | ||
981 | /* Make sure we complete the TLB write and flush the shadow TLB */ | |
982 | isync | |
983 | ||
984 | #endif | |
985 | ||
986 | ppc44x_map_done: | |
987 | ||
674bfa48 SP |
988 | |
989 | /* Restore the parameters */ | |
990 | mr r3, r29 | |
991 | mr r4, r30 | |
992 | mr r5, r31 | |
993 | ||
b3df895a SAS |
994 | li r0, 0 |
995 | #else | |
3d1229d6 ME |
996 | li r0, 0 |
997 | ||
998 | /* | |
999 | * Set Machine Status Register to a known status, | |
1000 | * switch the MMU off and jump to 1: in a single step. | |
1001 | */ | |
1002 | ||
1003 | mr r8, r0 | |
1004 | ori r8, r8, MSR_RI|MSR_ME | |
1005 | mtspr SPRN_SRR1, r8 | |
1006 | addi r8, r4, 1f - relocate_new_kernel | |
1007 | mtspr SPRN_SRR0, r8 | |
1008 | sync | |
1009 | rfi | |
1010 | ||
1011 | 1: | |
b3df895a | 1012 | #endif |
3d1229d6 ME |
1013 | /* from this point address translation is turned off */ |
1014 | /* and interrupts are disabled */ | |
1015 | ||
1016 | /* set a new stack at the bottom of our page... */ | |
1017 | /* (not really needed now) */ | |
d9178f4c | 1018 | addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ |
3d1229d6 ME |
1019 | stw r0, 0(r1) |
1020 | ||
1021 | /* Do the copies */ | |
1022 | li r6, 0 /* checksum */ | |
1023 | mr r0, r3 | |
1024 | b 1f | |
1025 | ||
1026 | 0: /* top, read another word for the indirection page */ | |
1027 | lwzu r0, 4(r3) | |
1028 | ||
1029 | 1: | |
1030 | /* is it a destination page? (r8) */ | |
1031 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ | |
1032 | beq 2f | |
1033 | ||
1034 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
1035 | b 0b | |
1036 | ||
1037 | 2: /* is it an indirection page? (r3) */ | |
1038 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ | |
1039 | beq 2f | |
1040 | ||
1041 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
1042 | subi r3, r3, 4 | |
1043 | b 0b | |
1044 | ||
1045 | 2: /* are we done? */ | |
1046 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ | |
1047 | beq 2f | |
1048 | b 3f | |
1049 | ||
1050 | 2: /* is it a source page? (r9) */ | |
1051 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ | |
1052 | beq 0b | |
1053 | ||
1054 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ | |
1055 | ||
1056 | li r7, PAGE_SIZE / 4 | |
1057 | mtctr r7 | |
1058 | subi r9, r9, 4 | |
1059 | subi r8, r8, 4 | |
1060 | 9: | |
1061 | lwzu r0, 4(r9) /* do the copy */ | |
1062 | xor r6, r6, r0 | |
1063 | stwu r0, 4(r8) | |
1064 | dcbst 0, r8 | |
1065 | sync | |
1066 | icbi 0, r8 | |
1067 | bdnz 9b | |
1068 | ||
1069 | addi r9, r9, 4 | |
1070 | addi r8, r8, 4 | |
1071 | b 0b | |
1072 | ||
1073 | 3: | |
1074 | ||
1075 | /* To be certain of avoiding problems with self-modifying code | |
1076 | * execute a serializing instruction here. | |
1077 | */ | |
1078 | isync | |
1079 | sync | |
1080 | ||
4562c986 MM |
1081 | mfspr r3, SPRN_PIR /* current core we are running on */ |
1082 | mr r4, r5 /* load physical address of chunk called */ | |
1083 | ||
3d1229d6 ME |
1084 | /* jump to the entry point, usually the setup routine */ |
1085 | mtlr r5 | |
1086 | blrl | |
1087 | ||
1088 | 1: b 1b | |
1089 | ||
1090 | relocate_new_kernel_end: | |
1091 | ||
1092 | .globl relocate_new_kernel_size | |
1093 | relocate_new_kernel_size: | |
1094 | .long relocate_new_kernel_end - relocate_new_kernel | |
1095 | #endif |