]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/kernel/misc_64.S
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / misc_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
127efeb2
SR
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
9994a338
PM
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
9994a338
PM
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
6cb7bfeb 26#include <asm/thread_info.h>
1fc711f7 27#include <asm/kexec.h>
46f52210 28#include <asm/ptrace.h>
cf904e30 29#include <asm/mmu.h>
9445aa1a 30#include <asm/export.h>
2c86cd18 31#include <asm/feature-fixups.h>
9994a338
PM
32
33 .text
34
9994a338
PM
35_GLOBAL(call_do_softirq)
36 mflr r0
37 std r0,16(r1)
4ae2dcb6 38 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338 39 mr r1,r3
b1576fec 40 bl __do_softirq
9994a338
PM
41 ld r1,0(r1)
42 ld r0,16(r1)
43 mtlr r0
44 blr
45
0366a1c7 46_GLOBAL(call_do_irq)
9994a338
PM
47 mflr r0
48 std r0,16(r1)
0366a1c7
BH
49 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
50 mr r1,r4
b1576fec 51 bl __do_irq
9994a338
PM
52 ld r1,0(r1)
53 ld r0,16(r1)
54 mtlr r0
55 blr
9994a338 56
9994a338
PM
57 .section ".toc","aw"
58PPC64_CACHES:
59 .tc ppc64_caches[TC],ppc64_caches
60 .section ".text"
61
62/*
63 * Write any modified data cache blocks out to memory
64 * and invalidate the corresponding instruction cache blocks.
65 *
66 * flush_icache_range(unsigned long start, unsigned long stop)
67 *
68 * flush all bytes from start through stop-1 inclusive
69 */
70
8f5f525d 71_GLOBAL_TOC(flush_icache_range)
abb29c3b 72BEGIN_FTR_SECTION
0ce63670 73 PURGE_PREFETCHED_INS
abb29c3b
KH
74 blr
75END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
9994a338
PM
76/*
77 * Flush the data cache to memory
78 *
79 * Different systems have different cache line sizes
80 * and in some cases i-cache and d-cache line sizes differ from
81 * each other.
82 */
83 ld r10,PPC64_CACHES@toc(r2)
bd067f83 84 lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
9994a338
PM
85 addi r5,r7,-1
86 andc r6,r3,r5 /* round low to line bdy */
87 subf r8,r6,r4 /* compute length */
88 add r8,r8,r5 /* ensure we get enough */
bd067f83 89 lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */
9994a338
PM
90 srw. r8,r8,r9 /* compute line count */
91 beqlr /* nothing to do? */
92 mtctr r8
931: dcbst 0,r6
94 add r6,r6,r7
95 bdnz 1b
96 sync
97
98/* Now invalidate the instruction cache */
99
bd067f83 100 lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */
9994a338
PM
101 addi r5,r7,-1
102 andc r6,r3,r5 /* round low to line bdy */
103 subf r8,r6,r4 /* compute length */
104 add r8,r8,r5
bd067f83 105 lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */
9994a338
PM
106 srw. r8,r8,r9 /* compute line count */
107 beqlr /* nothing to do? */
108 mtctr r8
1092: icbi 0,r6
110 add r6,r6,r7
111 bdnz 2b
112 isync
113 blr
6f698df1 114_ASM_NOKPROBE_SYMBOL(flush_icache_range)
9445aa1a 115EXPORT_SYMBOL(flush_icache_range)
6f698df1 116
9994a338
PM
117/*
118 * Like above, but only do the D-cache.
119 *
120 * flush_dcache_range(unsigned long start, unsigned long stop)
121 *
122 * flush all bytes from start to stop-1 inclusive
123 */
8f5f525d 124_GLOBAL_TOC(flush_dcache_range)
9994a338
PM
125
126/*
127 * Flush the data cache to memory
128 *
129 * Different systems have different cache line sizes
130 */
131 ld r10,PPC64_CACHES@toc(r2)
bd067f83 132 lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
9994a338
PM
133 addi r5,r7,-1
134 andc r6,r3,r5 /* round low to line bdy */
135 subf r8,r6,r4 /* compute length */
136 add r8,r8,r5 /* ensure we get enough */
bd067f83 137 lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
9994a338
PM
138 srw. r8,r8,r9 /* compute line count */
139 beqlr /* nothing to do? */
140 mtctr r8
1410: dcbst 0,r6
142 add r6,r6,r7
143 bdnz 0b
144 sync
145 blr
9445aa1a 146EXPORT_SYMBOL(flush_dcache_range)
9994a338 147
9994a338
PM
148_GLOBAL(flush_inval_dcache_range)
149 ld r10,PPC64_CACHES@toc(r2)
bd067f83 150 lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
9994a338
PM
151 addi r5,r7,-1
152 andc r6,r3,r5 /* round low to line bdy */
153 subf r8,r6,r4 /* compute length */
154 add r8,r8,r5 /* ensure we get enough */
bd067f83 155 lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
9994a338
PM
156 srw. r8,r8,r9 /* compute line count */
157 beqlr /* nothing to do? */
158 sync
159 isync
160 mtctr r8
1610: dcbf 0,r6
162 add r6,r6,r7
163 bdnz 0b
164 sync
165 isync
166 blr
167
168
169/*
170 * Flush a particular page from the data cache to RAM.
171 * Note: this is necessary because the instruction cache does *not*
172 * snoop from the data cache.
173 *
174 * void __flush_dcache_icache(void *page)
175 */
176_GLOBAL(__flush_dcache_icache)
177/*
178 * Flush the data cache to memory
179 *
180 * Different systems have different cache line sizes
181 */
182
0ce63670
KH
183BEGIN_FTR_SECTION
184 PURGE_PREFETCHED_INS
185 blr
186END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
187
9994a338
PM
188/* Flush the dcache */
189 ld r7,PPC64_CACHES@toc(r2)
190 clrrdi r3,r3,PAGE_SHIFT /* Page align */
bd067f83
BH
191 lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */
192 lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */
9994a338
PM
193 mr r6,r3
194 mtctr r4
1950: dcbst 0,r6
196 add r6,r6,r5
197 bdnz 0b
198 sync
199
200/* Now invalidate the icache */
201
bd067f83
BH
202 lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */
203 lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */
9994a338
PM
204 mtctr r4
2051: icbi 0,r3
206 add r3,r3,r5
207 bdnz 1b
208 isync
209 blr
9994a338 210
ca9d7aea 211_GLOBAL(__bswapdi2)
9445aa1a 212EXPORT_SYMBOL(__bswapdi2)
ca9d7aea
DW
213 srdi r8,r3,32
214 rlwinm r7,r3,8,0xffffffff
215 rlwimi r7,r3,24,0,7
216 rlwinm r9,r8,8,0xffffffff
217 rlwimi r7,r3,24,16,23
218 rlwimi r9,r8,24,0,7
219 rlwimi r9,r8,24,16,23
220 sldi r7,r7,32
221 or r3,r7,r9
222 blr
3f639ee8 223
7191b615 224
2d6f0c3a 225#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
7191b615
BH
226_GLOBAL(rmci_on)
227 sync
228 isync
229 li r3,0x100
230 rldicl r3,r3,32,0
231 mfspr r5,SPRN_HID4
232 or r5,r5,r3
233 sync
234 mtspr SPRN_HID4,r5
235 isync
236 slbia
237 isync
238 sync
239 blr
240
241_GLOBAL(rmci_off)
242 sync
243 isync
244 li r3,0x100
245 rldicl r3,r3,32,0
246 mfspr r5,SPRN_HID4
247 andc r5,r5,r3
248 sync
249 mtspr SPRN_HID4,r5
250 isync
251 slbia
252 isync
253 sync
254 blr
2d6f0c3a
ME
255#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
256
257#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
7191b615 258
9994a338
PM
259/*
260 * Do an IO access in real mode
261 */
262_GLOBAL(real_readb)
263 mfmsr r7
264 ori r0,r7,MSR_DR
265 xori r0,r0,MSR_DR
266 sync
267 mtmsrd r0
268 sync
269 isync
270 mfspr r6,SPRN_HID4
271 rldicl r5,r6,32,0
272 ori r5,r5,0x100
273 rldicl r5,r5,32,0
274 sync
275 mtspr SPRN_HID4,r5
276 isync
277 slbia
278 isync
279 lbz r3,0(r3)
280 sync
281 mtspr SPRN_HID4,r6
282 isync
283 slbia
284 isync
285 mtmsrd r7
286 sync
287 isync
288 blr
289
290 /*
291 * Do an IO access in real mode
292 */
293_GLOBAL(real_writeb)
294 mfmsr r7
295 ori r0,r7,MSR_DR
296 xori r0,r0,MSR_DR
297 sync
298 mtmsrd r0
299 sync
300 isync
301 mfspr r6,SPRN_HID4
302 rldicl r5,r6,32,0
303 ori r5,r5,0x100
304 rldicl r5,r5,32,0
305 sync
306 mtspr SPRN_HID4,r5
307 isync
308 slbia
309 isync
310 stb r3,0(r4)
311 sync
312 mtspr SPRN_HID4,r6
313 isync
314 slbia
315 isync
316 mtmsrd r7
317 sync
318 isync
319 blr
320#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
321
39c870d5
OJ
322#ifdef CONFIG_PPC_PASEMI
323
39c870d5
OJ
324_GLOBAL(real_205_readb)
325 mfmsr r7
326 ori r0,r7,MSR_DR
327 xori r0,r0,MSR_DR
328 sync
329 mtmsrd r0
330 sync
331 isync
e55174e9 332 LBZCIX(R3,R0,R3)
39c870d5
OJ
333 isync
334 mtmsrd r7
335 sync
336 isync
337 blr
338
339_GLOBAL(real_205_writeb)
340 mfmsr r7
341 ori r0,r7,MSR_DR
342 xori r0,r0,MSR_DR
343 sync
344 mtmsrd r0
345 sync
346 isync
e55174e9 347 STBCIX(R3,R0,R4)
39c870d5
OJ
348 isync
349 mtmsrd r7
350 sync
351 isync
352 blr
353
354#endif /* CONFIG_PPC_PASEMI */
355
356
e48f7eb2 357#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
4350147a
BH
358/*
359 * SCOM access functions for 970 (FX only for now)
360 *
361 * unsigned long scom970_read(unsigned int address);
362 * void scom970_write(unsigned int address, unsigned long value);
363 *
364 * The address passed in is the 24 bits register address. This code
365 * is 970 specific and will not check the status bits, so you should
366 * know what you are doing.
367 */
368_GLOBAL(scom970_read)
369 /* interrupts off */
370 mfmsr r4
371 ori r0,r4,MSR_EE
372 xori r0,r0,MSR_EE
373 mtmsrd r0,1
374
375 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
376 * (including parity). On current CPUs they must be 0'd,
377 * and finally or in RW bit
378 */
379 rlwinm r3,r3,8,0,15
380 ori r3,r3,0x8000
381
382 /* do the actual scom read */
383 sync
384 mtspr SPRN_SCOMC,r3
385 isync
386 mfspr r3,SPRN_SCOMD
387 isync
388 mfspr r0,SPRN_SCOMC
389 isync
390
391 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
392 * that's the best we can do). Not implemented yet as we don't use
393 * the scom on any of the bogus CPUs yet, but may have to be done
394 * ultimately
395 */
396
397 /* restore interrupts */
398 mtmsrd r4,1
399 blr
400
401
402_GLOBAL(scom970_write)
403 /* interrupts off */
404 mfmsr r5
405 ori r0,r5,MSR_EE
406 xori r0,r0,MSR_EE
407 mtmsrd r0,1
408
409 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
410 * (including parity). On current CPUs they must be 0'd.
411 */
412
413 rlwinm r3,r3,8,0,15
414
415 sync
416 mtspr SPRN_SCOMD,r4 /* write data */
417 isync
418 mtspr SPRN_SCOMC,r3 /* write command */
419 isync
420 mfspr 3,SPRN_SCOMC
421 isync
422
423 /* restore interrupts */
424 mtmsrd r5,1
425 blr
e48f7eb2 426#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
4350147a 427
9994a338
PM
428/* kexec_wait(phys_cpu)
429 *
430 * wait for the flag to change, indicating this kernel is going away but
431 * the slave code for the next one is at addresses 0 to 100.
432 *
3d2cea73
MM
433 * This is used by all slaves, even those that did not find a matching
434 * paca in the secondary startup code.
9994a338
PM
435 *
436 * Physical (hardware) cpu id should be in r3.
437 */
438_GLOBAL(kexec_wait)
439 bl 1f
4401: mflr r5
441 addi r5,r5,kexec_flag-1b
442
44399: HMT_LOW
da665885 444#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
9994a338
PM
445 lwz r4,0(r5)
446 cmpwi 0,r4,0
ffebf5f3
SMJ
447 beq 99b
448#ifdef CONFIG_PPC_BOOK3S_64
449 li r10,0x60
450 mfmsr r11
451 clrrdi r11,r11,1 /* Clear MSR_LE */
452 mtsrr0 r10
453 mtsrr1 r11
454 rfid
455#else
ae73e4cc
SW
456 /* Create TLB entry in book3e_secondary_core_init */
457 li r4,0
ffebf5f3
SMJ
458 ba 0x60
459#endif
9994a338 460#endif
9994a338
PM
461
462/* this can be in text because we won't change it until we are
463 * running in real anyways
464 */
465kexec_flag:
466 .long 0
467
468
da665885 469#ifdef CONFIG_KEXEC_CORE
cf904e30
TC
470#ifdef CONFIG_PPC_BOOK3E
471/*
472 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
473 * for a core to identity map v:0 to p:0. This current implementation
474 * assumes that 1G is enough for kexec.
475 */
476kexec_create_tlb:
477 /*
478 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
479 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
480 */
481 PPC_TLBILX_ALL(0,R0)
482 sync
483 isync
484
485 mfspr r10,SPRN_TLB1CFG
486 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
487 subi r10,r10,1 /* Last entry: no conflict with kernel text */
488 lis r9,MAS0_TLBSEL(1)@h
489 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
490
491/* Set up a temp identity mapping v:0 to p:0 and return to it. */
492#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
493#define M_IF_NEEDED MAS2_M
494#else
495#define M_IF_NEEDED 0
496#endif
497 mtspr SPRN_MAS0,r9
498
499 lis r9,(MAS1_VALID|MAS1_IPROT)@h
500 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
501 mtspr SPRN_MAS1,r9
502
503 LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
504 mtspr SPRN_MAS2,r9
505
506 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
507 mtspr SPRN_MAS3,r9
508 li r9,0
509 mtspr SPRN_MAS7,r9
510
511 tlbwe
512 isync
513 blr
514#endif
9994a338
PM
515
516/* kexec_smp_wait(void)
517 *
518 * call with interrupts off
519 * note: this is a terminal routine, it does not save lr
520 *
521 * get phys id from paca
9994a338 522 * switch to real mode
3d2cea73 523 * mark the paca as no longer used
9994a338
PM
524 * join other cpus in kexec_wait(phys_id)
525 */
526_GLOBAL(kexec_smp_wait)
527 lhz r3,PACAHWCPUID(r13)
9994a338 528 bl real_mode
3d2cea73
MM
529
530 li r4,KEXEC_STATE_REAL_MODE
531 stb r4,PACAKEXECSTATE(r13)
532 SYNC
533
b1576fec 534 b kexec_wait
9994a338
PM
535
536/*
537 * switch to real mode (turn mmu off)
538 * we use the early kernel trick that the hardware ignores bits
539 * 0 and 1 (big endian) of the effective address in real mode
540 *
541 * don't overwrite r3 here, it is live for kexec_wait above.
542 */
543real_mode: /* assume normal blr return */
cf904e30
TC
544#ifdef CONFIG_PPC_BOOK3E
545 /* Create an identity mapping. */
546 b kexec_create_tlb
547#else
9994a338
PM
5481: li r9,MSR_RI
549 li r10,MSR_DR|MSR_IR
550 mflr r11 /* return address to SRR0 */
551 mfmsr r12
552 andc r9,r12,r9
553 andc r10,r12,r10
554
555 mtmsrd r9,1
556 mtspr SPRN_SRR1,r10
557 mtspr SPRN_SRR0,r11
558 rfid
cf904e30 559#endif
9994a338
PM
560
561/*
b970b41e
BH
562 * kexec_sequence(newstack, start, image, control, clear_all(),
563 copy_with_mmu_off)
9994a338
PM
564 *
565 * does the grungy work with stack switching and real mode switches
566 * also does simple calls to other code
567 */
568
569_GLOBAL(kexec_sequence)
570 mflr r0
571 std r0,16(r1)
572
573 /* switch stacks to newstack -- &kexec_stack.stack */
4ae2dcb6 574 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338
PM
575 mr r1,r3
576
577 li r0,0
578 std r0,16(r1)
579
1e2a516e
BS
580BEGIN_FTR_SECTION
581 /*
582 * This is the best time to turn AMR/IAMR off.
583 * key 0 is used in radix for supervisor<->user
584 * protection, but on hash key 0 is reserved
585 * ideally we want to enter with a clean state.
586 * NOTE, we rely on r0 being 0 from above.
587 */
588 mtspr SPRN_IAMR,r0
2621e945 589BEGIN_FTR_SECTION_NESTED(42)
1e2a516e 590 mtspr SPRN_AMOR,r0
2621e945 591END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
1e2a516e
BS
592END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
593
9994a338
PM
594 /* save regs for local vars on new stack.
595 * yes, we won't go back, but ...
596 */
597 std r31,-8(r1)
598 std r30,-16(r1)
599 std r29,-24(r1)
600 std r28,-32(r1)
601 std r27,-40(r1)
602 std r26,-48(r1)
603 std r25,-56(r1)
604
4ae2dcb6 605 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
9994a338
PM
606
607 /* save args into preserved regs */
608 mr r31,r3 /* newstack (both) */
609 mr r30,r4 /* start (real) */
610 mr r29,r5 /* image (virt) */
611 mr r28,r6 /* control, unused */
612 mr r27,r7 /* clear_all() fn desc */
b970b41e 613 mr r26,r8 /* copy_with_mmu_off */
9994a338
PM
614 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
615
616 /* disable interrupts, we are overwriting kernel data next */
96eea642
TC
617#ifdef CONFIG_PPC_BOOK3E
618 wrteei 0
619#else
9994a338
PM
620 mfmsr r3
621 rlwinm r3,r3,0,17,15
622 mtmsrd r3,1
96eea642 623#endif
9994a338 624
b970b41e
BH
625 /* We need to turn the MMU off unless we are in hash mode
626 * under a hypervisor
627 */
628 cmpdi r26,0
629 beq 1f
630 bl real_mode
6311:
9994a338
PM
632 /* copy dest pages, flush whole dest image */
633 mr r3,r29
b1576fec 634 bl kexec_copy_flush /* (image) */
9994a338 635
b970b41e
BH
636 /* turn off mmu now if not done earlier */
637 cmpdi r26,0
638 bne 1f
9994a338
PM
639 bl real_mode
640
ee46a90b 641 /* copy 0x100 bytes starting at start to 0 */
b970b41e 6421: li r3,0
ee46a90b
MM
643 mr r4,r30 /* start, aka phys mem offset */
644 li r5,0x100
645 li r6,0
b1576fec 646 bl copy_and_flush /* (dest, src, copy limit, start offset) */
ee46a90b
MM
6471: /* assume normal blr return */
648
649 /* release other cpus to the new kernel secondary start at 0x60 */
650 mflr r5
651 li r6,1
652 stw r6,kexec_flag-1b(5)
653
fc48bad5
BH
654 cmpdi r27,0
655 beq 1f
656
9994a338 657 /* clear out hardware hash page table and tlb */
f55d9665 658#ifdef PPC64_ELF_ABI_v1
cc7efbf9
AB
659 ld r12,0(r27) /* deref function descriptor */
660#else
661 mr r12,r27
662#endif
663 mtctr r12
7025776e 664 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
9994a338
PM
665
666/*
667 * kexec image calling is:
668 * the first 0x100 bytes of the entry point are copied to 0
669 *
670 * all slaves branch to slave = 0x60 (absolute)
671 * slave(phys_cpu_id);
672 *
673 * master goes to start = entry point
674 * start(phys_cpu_id, start, 0);
675 *
676 *
677 * a wrapper is needed to call existing kernels, here is an approximate
678 * description of one method:
679 *
680 * v2: (2.6.10)
681 * start will be near the boot_block (maybe 0x100 bytes before it?)
682 * it will have a 0x60, which will b to boot_block, where it will wait
683 * and 0 will store phys into struct boot-block and load r3 from there,
684 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
685 *
686 * v1: (2.6.9)
687 * boot block will have all cpus scanning device tree to see if they
688 * are the boot cpu ?????
689 * other device tree differences (prop sizes, va vs pa, etc)...
690 */
fc48bad5 6911: mr r3,r25 # my phys cpu
9994a338
PM
692 mr r4,r30 # start, aka phys mem offset
693 mtlr 4
694 li r5,0
1767c8f3 695 blr /* image->start(physid, image->start, 0); */
da665885 696#endif /* CONFIG_KEXEC_CORE */