]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/kernel/misc_64.S
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / misc_64.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
9994a338 2/*
9994a338
PM
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
127efeb2 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9994a338
PM
10 */
11
9994a338
PM
12#include <linux/sys.h>
13#include <asm/unistd.h>
14#include <asm/errno.h>
15#include <asm/processor.h>
16#include <asm/page.h>
17#include <asm/cache.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
6cb7bfeb 21#include <asm/thread_info.h>
1fc711f7 22#include <asm/kexec.h>
46f52210 23#include <asm/ptrace.h>
cf904e30 24#include <asm/mmu.h>
9445aa1a 25#include <asm/export.h>
2c86cd18 26#include <asm/feature-fixups.h>
9994a338
PM
27
28 .text
29
9994a338
PM
30_GLOBAL(call_do_softirq)
31 mflr r0
32 std r0,16(r1)
4ae2dcb6 33 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338 34 mr r1,r3
b1576fec 35 bl __do_softirq
9994a338
PM
36 ld r1,0(r1)
37 ld r0,16(r1)
38 mtlr r0
39 blr
40
0366a1c7 41_GLOBAL(call_do_irq)
9994a338
PM
42 mflr r0
43 std r0,16(r1)
0366a1c7
BH
44 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
45 mr r1,r4
b1576fec 46 bl __do_irq
9994a338
PM
47 ld r1,0(r1)
48 ld r0,16(r1)
49 mtlr r0
50 blr
9994a338 51
9994a338
PM
52 .section ".toc","aw"
53PPC64_CACHES:
54 .tc ppc64_caches[TC],ppc64_caches
55 .section ".text"
56
57/*
58 * Write any modified data cache blocks out to memory
59 * and invalidate the corresponding instruction cache blocks.
60 *
61 * flush_icache_range(unsigned long start, unsigned long stop)
62 *
63 * flush all bytes from start through stop-1 inclusive
64 */
65
8f5f525d 66_GLOBAL_TOC(flush_icache_range)
abb29c3b 67BEGIN_FTR_SECTION
0ce63670 68 PURGE_PREFETCHED_INS
abb29c3b
KH
69 blr
70END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
9994a338
PM
71/*
72 * Flush the data cache to memory
73 *
74 * Different systems have different cache line sizes
75 * and in some cases i-cache and d-cache line sizes differ from
76 * each other.
77 */
78 ld r10,PPC64_CACHES@toc(r2)
bd067f83 79 lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
9994a338
PM
80 addi r5,r7,-1
81 andc r6,r3,r5 /* round low to line bdy */
82 subf r8,r6,r4 /* compute length */
83 add r8,r8,r5 /* ensure we get enough */
bd067f83 84 lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */
9994a338
PM
85 srw. r8,r8,r9 /* compute line count */
86 beqlr /* nothing to do? */
87 mtctr r8
881: dcbst 0,r6
89 add r6,r6,r7
90 bdnz 1b
91 sync
92
93/* Now invalidate the instruction cache */
94
bd067f83 95 lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */
9994a338
PM
96 addi r5,r7,-1
97 andc r6,r3,r5 /* round low to line bdy */
98 subf r8,r6,r4 /* compute length */
99 add r8,r8,r5
bd067f83 100 lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */
9994a338
PM
101 srw. r8,r8,r9 /* compute line count */
102 beqlr /* nothing to do? */
103 mtctr r8
1042: icbi 0,r6
105 add r6,r6,r7
106 bdnz 2b
107 isync
108 blr
6f698df1 109_ASM_NOKPROBE_SYMBOL(flush_icache_range)
9445aa1a 110EXPORT_SYMBOL(flush_icache_range)
6f698df1 111
9994a338
PM
112/*
113 * Like above, but only do the D-cache.
114 *
115 * flush_dcache_range(unsigned long start, unsigned long stop)
116 *
117 * flush all bytes from start to stop-1 inclusive
118 */
8f5f525d 119_GLOBAL_TOC(flush_dcache_range)
9994a338
PM
120
121/*
122 * Flush the data cache to memory
123 *
124 * Different systems have different cache line sizes
125 */
126 ld r10,PPC64_CACHES@toc(r2)
bd067f83 127 lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
9994a338
PM
128 addi r5,r7,-1
129 andc r6,r3,r5 /* round low to line bdy */
130 subf r8,r6,r4 /* compute length */
131 add r8,r8,r5 /* ensure we get enough */
bd067f83 132 lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
9994a338
PM
133 srw. r8,r8,r9 /* compute line count */
134 beqlr /* nothing to do? */
135 mtctr r8
1360: dcbst 0,r6
137 add r6,r6,r7
138 bdnz 0b
139 sync
140 blr
9445aa1a 141EXPORT_SYMBOL(flush_dcache_range)
9994a338 142
9994a338
PM
143_GLOBAL(flush_inval_dcache_range)
144 ld r10,PPC64_CACHES@toc(r2)
bd067f83 145 lwz r7,DCACHEL1BLOCKSIZE(r10) /* Get dcache block size */
9994a338
PM
146 addi r5,r7,-1
147 andc r6,r3,r5 /* round low to line bdy */
148 subf r8,r6,r4 /* compute length */
149 add r8,r8,r5 /* ensure we get enough */
bd067f83 150 lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
9994a338
PM
151 srw. r8,r8,r9 /* compute line count */
152 beqlr /* nothing to do? */
153 sync
154 isync
155 mtctr r8
1560: dcbf 0,r6
157 add r6,r6,r7
158 bdnz 0b
159 sync
160 isync
161 blr
162
163
164/*
165 * Flush a particular page from the data cache to RAM.
166 * Note: this is necessary because the instruction cache does *not*
167 * snoop from the data cache.
168 *
169 * void __flush_dcache_icache(void *page)
170 */
171_GLOBAL(__flush_dcache_icache)
172/*
173 * Flush the data cache to memory
174 *
175 * Different systems have different cache line sizes
176 */
177
0ce63670
KH
178BEGIN_FTR_SECTION
179 PURGE_PREFETCHED_INS
180 blr
181END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
182
9994a338
PM
183/* Flush the dcache */
184 ld r7,PPC64_CACHES@toc(r2)
185 clrrdi r3,r3,PAGE_SHIFT /* Page align */
bd067f83
BH
186 lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */
187 lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */
9994a338
PM
188 mr r6,r3
189 mtctr r4
1900: dcbst 0,r6
191 add r6,r6,r5
192 bdnz 0b
193 sync
194
195/* Now invalidate the icache */
196
bd067f83
BH
197 lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */
198 lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */
9994a338
PM
199 mtctr r4
2001: icbi 0,r3
201 add r3,r3,r5
202 bdnz 1b
203 isync
204 blr
9994a338 205
ca9d7aea 206_GLOBAL(__bswapdi2)
9445aa1a 207EXPORT_SYMBOL(__bswapdi2)
ca9d7aea
DW
208 srdi r8,r3,32
209 rlwinm r7,r3,8,0xffffffff
210 rlwimi r7,r3,24,0,7
211 rlwinm r9,r8,8,0xffffffff
212 rlwimi r7,r3,24,16,23
213 rlwimi r9,r8,24,0,7
214 rlwimi r9,r8,24,16,23
215 sldi r7,r7,32
216 or r3,r7,r9
217 blr
3f639ee8 218
7191b615 219
2d6f0c3a 220#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
7191b615
BH
221_GLOBAL(rmci_on)
222 sync
223 isync
224 li r3,0x100
225 rldicl r3,r3,32,0
226 mfspr r5,SPRN_HID4
227 or r5,r5,r3
228 sync
229 mtspr SPRN_HID4,r5
230 isync
231 slbia
232 isync
233 sync
234 blr
235
236_GLOBAL(rmci_off)
237 sync
238 isync
239 li r3,0x100
240 rldicl r3,r3,32,0
241 mfspr r5,SPRN_HID4
242 andc r5,r5,r3
243 sync
244 mtspr SPRN_HID4,r5
245 isync
246 slbia
247 isync
248 sync
249 blr
2d6f0c3a
ME
250#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
251
252#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
7191b615 253
9994a338
PM
254/*
255 * Do an IO access in real mode
256 */
257_GLOBAL(real_readb)
258 mfmsr r7
259 ori r0,r7,MSR_DR
260 xori r0,r0,MSR_DR
261 sync
262 mtmsrd r0
263 sync
264 isync
265 mfspr r6,SPRN_HID4
266 rldicl r5,r6,32,0
267 ori r5,r5,0x100
268 rldicl r5,r5,32,0
269 sync
270 mtspr SPRN_HID4,r5
271 isync
272 slbia
273 isync
274 lbz r3,0(r3)
275 sync
276 mtspr SPRN_HID4,r6
277 isync
278 slbia
279 isync
280 mtmsrd r7
281 sync
282 isync
283 blr
284
285 /*
286 * Do an IO access in real mode
287 */
288_GLOBAL(real_writeb)
289 mfmsr r7
290 ori r0,r7,MSR_DR
291 xori r0,r0,MSR_DR
292 sync
293 mtmsrd r0
294 sync
295 isync
296 mfspr r6,SPRN_HID4
297 rldicl r5,r6,32,0
298 ori r5,r5,0x100
299 rldicl r5,r5,32,0
300 sync
301 mtspr SPRN_HID4,r5
302 isync
303 slbia
304 isync
305 stb r3,0(r4)
306 sync
307 mtspr SPRN_HID4,r6
308 isync
309 slbia
310 isync
311 mtmsrd r7
312 sync
313 isync
314 blr
315#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
316
39c870d5
OJ
317#ifdef CONFIG_PPC_PASEMI
318
39c870d5
OJ
319_GLOBAL(real_205_readb)
320 mfmsr r7
321 ori r0,r7,MSR_DR
322 xori r0,r0,MSR_DR
323 sync
324 mtmsrd r0
325 sync
326 isync
e55174e9 327 LBZCIX(R3,R0,R3)
39c870d5
OJ
328 isync
329 mtmsrd r7
330 sync
331 isync
332 blr
333
334_GLOBAL(real_205_writeb)
335 mfmsr r7
336 ori r0,r7,MSR_DR
337 xori r0,r0,MSR_DR
338 sync
339 mtmsrd r0
340 sync
341 isync
e55174e9 342 STBCIX(R3,R0,R4)
39c870d5
OJ
343 isync
344 mtmsrd r7
345 sync
346 isync
347 blr
348
349#endif /* CONFIG_PPC_PASEMI */
350
351
e48f7eb2 352#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
4350147a
BH
353/*
354 * SCOM access functions for 970 (FX only for now)
355 *
356 * unsigned long scom970_read(unsigned int address);
357 * void scom970_write(unsigned int address, unsigned long value);
358 *
359 * The address passed in is the 24 bits register address. This code
360 * is 970 specific and will not check the status bits, so you should
361 * know what you are doing.
362 */
363_GLOBAL(scom970_read)
364 /* interrupts off */
365 mfmsr r4
366 ori r0,r4,MSR_EE
367 xori r0,r0,MSR_EE
368 mtmsrd r0,1
369
370 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
371 * (including parity). On current CPUs they must be 0'd,
372 * and finally or in RW bit
373 */
374 rlwinm r3,r3,8,0,15
375 ori r3,r3,0x8000
376
377 /* do the actual scom read */
378 sync
379 mtspr SPRN_SCOMC,r3
380 isync
381 mfspr r3,SPRN_SCOMD
382 isync
383 mfspr r0,SPRN_SCOMC
384 isync
385
386 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
387 * that's the best we can do). Not implemented yet as we don't use
388 * the scom on any of the bogus CPUs yet, but may have to be done
389 * ultimately
390 */
391
392 /* restore interrupts */
393 mtmsrd r4,1
394 blr
395
396
397_GLOBAL(scom970_write)
398 /* interrupts off */
399 mfmsr r5
400 ori r0,r5,MSR_EE
401 xori r0,r0,MSR_EE
402 mtmsrd r0,1
403
404 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
405 * (including parity). On current CPUs they must be 0'd.
406 */
407
408 rlwinm r3,r3,8,0,15
409
410 sync
411 mtspr SPRN_SCOMD,r4 /* write data */
412 isync
413 mtspr SPRN_SCOMC,r3 /* write command */
414 isync
415 mfspr 3,SPRN_SCOMC
416 isync
417
418 /* restore interrupts */
419 mtmsrd r5,1
420 blr
e48f7eb2 421#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
4350147a 422
9994a338
PM
423/* kexec_wait(phys_cpu)
424 *
425 * wait for the flag to change, indicating this kernel is going away but
426 * the slave code for the next one is at addresses 0 to 100.
427 *
3d2cea73
MM
428 * This is used by all slaves, even those that did not find a matching
429 * paca in the secondary startup code.
9994a338
PM
430 *
431 * Physical (hardware) cpu id should be in r3.
432 */
433_GLOBAL(kexec_wait)
434 bl 1f
4351: mflr r5
436 addi r5,r5,kexec_flag-1b
437
43899: HMT_LOW
da665885 439#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
9994a338
PM
440 lwz r4,0(r5)
441 cmpwi 0,r4,0
ffebf5f3
SMJ
442 beq 99b
443#ifdef CONFIG_PPC_BOOK3S_64
444 li r10,0x60
445 mfmsr r11
446 clrrdi r11,r11,1 /* Clear MSR_LE */
447 mtsrr0 r10
448 mtsrr1 r11
449 rfid
450#else
ae73e4cc
SW
451 /* Create TLB entry in book3e_secondary_core_init */
452 li r4,0
ffebf5f3
SMJ
453 ba 0x60
454#endif
9994a338 455#endif
9994a338
PM
456
457/* this can be in text because we won't change it until we are
458 * running in real anyways
459 */
460kexec_flag:
461 .long 0
462
463
da665885 464#ifdef CONFIG_KEXEC_CORE
cf904e30
TC
465#ifdef CONFIG_PPC_BOOK3E
466/*
467 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
468 * for a core to identity map v:0 to p:0. This current implementation
469 * assumes that 1G is enough for kexec.
470 */
471kexec_create_tlb:
472 /*
473 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
474 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
475 */
476 PPC_TLBILX_ALL(0,R0)
477 sync
478 isync
479
480 mfspr r10,SPRN_TLB1CFG
481 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
482 subi r10,r10,1 /* Last entry: no conflict with kernel text */
483 lis r9,MAS0_TLBSEL(1)@h
484 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
485
486/* Set up a temp identity mapping v:0 to p:0 and return to it. */
487#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
488#define M_IF_NEEDED MAS2_M
489#else
490#define M_IF_NEEDED 0
491#endif
492 mtspr SPRN_MAS0,r9
493
494 lis r9,(MAS1_VALID|MAS1_IPROT)@h
495 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
496 mtspr SPRN_MAS1,r9
497
498 LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
499 mtspr SPRN_MAS2,r9
500
501 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
502 mtspr SPRN_MAS3,r9
503 li r9,0
504 mtspr SPRN_MAS7,r9
505
506 tlbwe
507 isync
508 blr
509#endif
9994a338
PM
510
511/* kexec_smp_wait(void)
512 *
513 * call with interrupts off
514 * note: this is a terminal routine, it does not save lr
515 *
516 * get phys id from paca
9994a338 517 * switch to real mode
3d2cea73 518 * mark the paca as no longer used
9994a338
PM
519 * join other cpus in kexec_wait(phys_id)
520 */
521_GLOBAL(kexec_smp_wait)
522 lhz r3,PACAHWCPUID(r13)
9994a338 523 bl real_mode
3d2cea73
MM
524
525 li r4,KEXEC_STATE_REAL_MODE
526 stb r4,PACAKEXECSTATE(r13)
527 SYNC
528
b1576fec 529 b kexec_wait
9994a338
PM
530
531/*
532 * switch to real mode (turn mmu off)
533 * we use the early kernel trick that the hardware ignores bits
534 * 0 and 1 (big endian) of the effective address in real mode
535 *
536 * don't overwrite r3 here, it is live for kexec_wait above.
537 */
538real_mode: /* assume normal blr return */
cf904e30
TC
539#ifdef CONFIG_PPC_BOOK3E
540 /* Create an identity mapping. */
541 b kexec_create_tlb
542#else
9994a338
PM
5431: li r9,MSR_RI
544 li r10,MSR_DR|MSR_IR
545 mflr r11 /* return address to SRR0 */
546 mfmsr r12
547 andc r9,r12,r9
548 andc r10,r12,r10
549
550 mtmsrd r9,1
551 mtspr SPRN_SRR1,r10
552 mtspr SPRN_SRR0,r11
553 rfid
cf904e30 554#endif
9994a338
PM
555
556/*
b970b41e
BH
557 * kexec_sequence(newstack, start, image, control, clear_all(),
558 copy_with_mmu_off)
9994a338
PM
559 *
560 * does the grungy work with stack switching and real mode switches
561 * also does simple calls to other code
562 */
563
564_GLOBAL(kexec_sequence)
565 mflr r0
566 std r0,16(r1)
567
568 /* switch stacks to newstack -- &kexec_stack.stack */
4ae2dcb6 569 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338
PM
570 mr r1,r3
571
572 li r0,0
573 std r0,16(r1)
574
1e2a516e
BS
575BEGIN_FTR_SECTION
576 /*
577 * This is the best time to turn AMR/IAMR off.
578 * key 0 is used in radix for supervisor<->user
579 * protection, but on hash key 0 is reserved
580 * ideally we want to enter with a clean state.
581 * NOTE, we rely on r0 being 0 from above.
582 */
583 mtspr SPRN_IAMR,r0
2621e945 584BEGIN_FTR_SECTION_NESTED(42)
1e2a516e 585 mtspr SPRN_AMOR,r0
2621e945 586END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
1e2a516e
BS
587END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
588
9994a338
PM
589 /* save regs for local vars on new stack.
590 * yes, we won't go back, but ...
591 */
592 std r31,-8(r1)
593 std r30,-16(r1)
594 std r29,-24(r1)
595 std r28,-32(r1)
596 std r27,-40(r1)
597 std r26,-48(r1)
598 std r25,-56(r1)
599
4ae2dcb6 600 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
9994a338
PM
601
602 /* save args into preserved regs */
603 mr r31,r3 /* newstack (both) */
604 mr r30,r4 /* start (real) */
605 mr r29,r5 /* image (virt) */
606 mr r28,r6 /* control, unused */
607 mr r27,r7 /* clear_all() fn desc */
b970b41e 608 mr r26,r8 /* copy_with_mmu_off */
9994a338
PM
609 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
610
611 /* disable interrupts, we are overwriting kernel data next */
96eea642
TC
612#ifdef CONFIG_PPC_BOOK3E
613 wrteei 0
614#else
9994a338
PM
615 mfmsr r3
616 rlwinm r3,r3,0,17,15
617 mtmsrd r3,1
96eea642 618#endif
9994a338 619
b970b41e
BH
620 /* We need to turn the MMU off unless we are in hash mode
621 * under a hypervisor
622 */
623 cmpdi r26,0
624 beq 1f
625 bl real_mode
6261:
9994a338
PM
627 /* copy dest pages, flush whole dest image */
628 mr r3,r29
b1576fec 629 bl kexec_copy_flush /* (image) */
9994a338 630
b970b41e
BH
631 /* turn off mmu now if not done earlier */
632 cmpdi r26,0
633 bne 1f
9994a338
PM
634 bl real_mode
635
ee46a90b 636 /* copy 0x100 bytes starting at start to 0 */
b970b41e 6371: li r3,0
ee46a90b
MM
638 mr r4,r30 /* start, aka phys mem offset */
639 li r5,0x100
640 li r6,0
b1576fec 641 bl copy_and_flush /* (dest, src, copy limit, start offset) */
ee46a90b
MM
6421: /* assume normal blr return */
643
644 /* release other cpus to the new kernel secondary start at 0x60 */
645 mflr r5
646 li r6,1
647 stw r6,kexec_flag-1b(5)
648
fc48bad5
BH
649 cmpdi r27,0
650 beq 1f
651
9994a338 652 /* clear out hardware hash page table and tlb */
f55d9665 653#ifdef PPC64_ELF_ABI_v1
cc7efbf9
AB
654 ld r12,0(r27) /* deref function descriptor */
655#else
656 mr r12,r27
657#endif
658 mtctr r12
7025776e 659 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
9994a338
PM
660
661/*
662 * kexec image calling is:
663 * the first 0x100 bytes of the entry point are copied to 0
664 *
665 * all slaves branch to slave = 0x60 (absolute)
666 * slave(phys_cpu_id);
667 *
668 * master goes to start = entry point
669 * start(phys_cpu_id, start, 0);
670 *
671 *
672 * a wrapper is needed to call existing kernels, here is an approximate
673 * description of one method:
674 *
675 * v2: (2.6.10)
676 * start will be near the boot_block (maybe 0x100 bytes before it?)
677 * it will have a 0x60, which will b to boot_block, where it will wait
678 * and 0 will store phys into struct boot-block and load r3 from there,
679 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
680 *
681 * v1: (2.6.9)
682 * boot block will have all cpus scanning device tree to see if they
683 * are the boot cpu ?????
684 * other device tree differences (prop sizes, va vs pa, etc)...
685 */
fc48bad5 6861: mr r3,r25 # my phys cpu
9994a338
PM
687 mr r4,r30 # start, aka phys mem offset
688 mtlr 4
689 li r5,0
1767c8f3 690 blr /* image->start(physid, image->start, 0); */
da665885 691#endif /* CONFIG_KEXEC_CORE */