]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame_incremental - arch/powerpc/kernel/head_64.S
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux...
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / kernel / head_64.S
... / ...
CommitLineData
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 *
12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
14 *
15 * This file contains the entry point for the 64-bit kernel along
16 * with some early initialization code common to all 64-bit powerpc
17 * variants.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24
25#include <linux/threads.h>
26#include <linux/init.h>
27#include <asm/reg.h>
28#include <asm/page.h>
29#include <asm/mmu.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/bug.h>
33#include <asm/cputable.h>
34#include <asm/setup.h>
35#include <asm/hvcall.h>
36#include <asm/thread_info.h>
37#include <asm/firmware.h>
38#include <asm/page_64.h>
39#include <asm/irqflags.h>
40#include <asm/kvm_book3s_asm.h>
41#include <asm/ptrace.h>
42#include <asm/hw_irq.h>
43#include <asm/cputhreads.h>
44
45/* The physical memory is laid out such that the secondary processor
46 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
47 * using the layout described in exceptions-64s.S
48 */
49
50/*
51 * Entering into this code we make the following assumptions:
52 *
53 * For pSeries or server processors:
54 * 1. The MMU is off & open firmware is running in real mode.
55 * 2. The kernel is entered at __start
56 * -or- For OPAL entry:
57 * 1. The MMU is off, processor in HV mode, primary CPU enters at 0
58 * with device-tree in gpr3. We also get OPAL base in r8 and
59 * entry in r9 for debugging purposes
60 * 2. Secondary processors enter at 0x60 with PIR in gpr3
61 *
62 * For Book3E processors:
63 * 1. The MMU is on running in AS0 in a state defined in ePAPR
64 * 2. The kernel is entered at __start
65 */
66
67 .text
68 .globl _stext
69_stext:
70_GLOBAL(__start)
71 /* NOP this out unconditionally */
72BEGIN_FTR_SECTION
73 FIXUP_ENDIAN
74 b __start_initialization_multiplatform
75END_FTR_SECTION(0, 1)
76
77 /* Catch branch to 0 in real mode */
78 trap
79
80 /* Secondary processors spin on this value until it becomes non-zero.
81 * When non-zero, it contains the real address of the function the cpu
82 * should jump to.
83 */
84 .balign 8
85 .globl __secondary_hold_spinloop
86__secondary_hold_spinloop:
87 .llong 0x0
88
89 /* Secondary processors write this value with their cpu # */
90 /* after they enter the spin loop immediately below. */
91 .globl __secondary_hold_acknowledge
92__secondary_hold_acknowledge:
93 .llong 0x0
94
95#ifdef CONFIG_RELOCATABLE
96 /* This flag is set to 1 by a loader if the kernel should run
97 * at the loaded address instead of the linked address. This
98 * is used by kexec-tools to keep the the kdump kernel in the
99 * crash_kernel region. The loader is responsible for
100 * observing the alignment requirement.
101 */
102 /* Do not move this variable as kexec-tools knows about it. */
103 . = 0x5c
104 .globl __run_at_load
105__run_at_load:
106 .long 0x72756e30 /* "run0" -- relocate to 0 by default */
107#endif
108
109 . = 0x60
110/*
111 * The following code is used to hold secondary processors
112 * in a spin loop after they have entered the kernel, but
113 * before the bulk of the kernel has been relocated. This code
114 * is relocated to physical address 0x60 before prom_init is run.
115 * All of it must fit below the first exception vector at 0x100.
116 * Use .globl here not _GLOBAL because we want __secondary_hold
117 * to be the actual text address, not a descriptor.
118 */
119 .globl __secondary_hold
120__secondary_hold:
121 FIXUP_ENDIAN
122#ifndef CONFIG_PPC_BOOK3E
123 mfmsr r24
124 ori r24,r24,MSR_RI
125 mtmsrd r24 /* RI on */
126#endif
127 /* Grab our physical cpu number */
128 mr r24,r3
129 /* stash r4 for book3e */
130 mr r25,r4
131
132 /* Tell the master cpu we're here */
133 /* Relocation is off & we are located at an address less */
134 /* than 0x100, so only need to grab low order offset. */
135 std r24,__secondary_hold_acknowledge-_stext(0)
136 sync
137
138 li r26,0
139#ifdef CONFIG_PPC_BOOK3E
140 tovirt(r26,r26)
141#endif
142 /* All secondary cpus wait here until told to start. */
143100: ld r12,__secondary_hold_spinloop-_stext(r26)
144 cmpdi 0,r12,0
145 beq 100b
146
147#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
148#ifdef CONFIG_PPC_BOOK3E
149 tovirt(r12,r12)
150#endif
151 mtctr r12
152 mr r3,r24
153 /*
154 * it may be the case that other platforms have r4 right to
155 * begin with, this gives us some safety in case it is not
156 */
157#ifdef CONFIG_PPC_BOOK3E
158 mr r4,r25
159#else
160 li r4,0
161#endif
162 /* Make sure that patched code is visible */
163 isync
164 bctr
165#else
166 BUG_OPCODE
167#endif
168
169/* This value is used to mark exception frames on the stack. */
170 .section ".toc","aw"
171exception_marker:
172 .tc ID_72656773_68657265[TC],0x7265677368657265
173 .text
174
175/*
176 * On server, we include the exception vectors code here as it
177 * relies on absolute addressing which is only possible within
178 * this compilation unit
179 */
180#ifdef CONFIG_PPC_BOOK3S
181#include "exceptions-64s.S"
182#endif
183
184#ifdef CONFIG_PPC_BOOK3E
185/*
186 * The booting_thread_hwid holds the thread id we want to boot in cpu
187 * hotplug case. It is set by cpu hotplug code, and is invalid by default.
188 * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
189 * bit field.
190 */
191 .globl booting_thread_hwid
192booting_thread_hwid:
193 .long INVALID_THREAD_HWID
194 .align 3
195/*
196 * start a thread in the same core
197 * input parameters:
198 * r3 = the thread physical id
199 * r4 = the entry point where thread starts
200 */
201_GLOBAL(book3e_start_thread)
202 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
203 cmpi 0, r3, 0
204 beq 10f
205 cmpi 0, r3, 1
206 beq 11f
207 /* If the thread id is invalid, just exit. */
208 b 13f
20910:
210 mttmr TMRN_IMSR0, r5
211 mttmr TMRN_INIA0, r4
212 b 12f
21311:
214 mttmr TMRN_IMSR1, r5
215 mttmr TMRN_INIA1, r4
21612:
217 isync
218 li r6, 1
219 sld r6, r6, r3
220 mtspr SPRN_TENS, r6
22113:
222 blr
223
224/*
225 * stop a thread in the same core
226 * input parameter:
227 * r3 = the thread physical id
228 */
229_GLOBAL(book3e_stop_thread)
230 cmpi 0, r3, 0
231 beq 10f
232 cmpi 0, r3, 1
233 beq 10f
234 /* If the thread id is invalid, just exit. */
235 b 13f
23610:
237 li r4, 1
238 sld r4, r4, r3
239 mtspr SPRN_TENC, r4
24013:
241 blr
242
243_GLOBAL(fsl_secondary_thread_init)
244 mfspr r4,SPRN_BUCSR
245
246 /* Enable branch prediction */
247 lis r3,BUCSR_INIT@h
248 ori r3,r3,BUCSR_INIT@l
249 mtspr SPRN_BUCSR,r3
250 isync
251
252 /*
253 * Fix PIR to match the linear numbering in the device tree.
254 *
255 * On e6500, the reset value of PIR uses the low three bits for
256 * the thread within a core, and the upper bits for the core
257 * number. There are two threads per core, so shift everything
258 * but the low bit right by two bits so that the cpu numbering is
259 * continuous.
260 *
261 * If the old value of BUCSR is non-zero, this thread has run
262 * before. Thus, we assume we are coming from kexec or a similar
263 * scenario, and PIR is already set to the correct value. This
264 * is a bit of a hack, but there are limited opportunities for
265 * getting information into the thread and the alternatives
266 * seemed like they'd be overkill. We can't tell just by looking
267 * at the old PIR value which state it's in, since the same value
268 * could be valid for one thread out of reset and for a different
269 * thread in Linux.
270 */
271
272 mfspr r3, SPRN_PIR
273 cmpwi r4,0
274 bne 1f
275 rlwimi r3, r3, 30, 2, 30
276 mtspr SPRN_PIR, r3
2771:
278#endif
279
280_GLOBAL(generic_secondary_thread_init)
281 mr r24,r3
282
283 /* turn on 64-bit mode */
284 bl enable_64b_mode
285
286 /* get a valid TOC pointer, wherever we're mapped at */
287 bl relative_toc
288 tovirt(r2,r2)
289
290#ifdef CONFIG_PPC_BOOK3E
291 /* Book3E initialization */
292 mr r3,r24
293 bl book3e_secondary_thread_init
294#endif
295 b generic_secondary_common_init
296
297/*
298 * On pSeries and most other platforms, secondary processors spin
299 * in the following code.
300 * At entry, r3 = this processor's number (physical cpu id)
301 *
302 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
303 * this core already exists (setup via some other mechanism such
304 * as SCOM before entry).
305 */
306_GLOBAL(generic_secondary_smp_init)
307 FIXUP_ENDIAN
308 mr r24,r3
309 mr r25,r4
310
311 /* turn on 64-bit mode */
312 bl enable_64b_mode
313
314 /* get a valid TOC pointer, wherever we're mapped at */
315 bl relative_toc
316 tovirt(r2,r2)
317
318#ifdef CONFIG_PPC_BOOK3E
319 /* Book3E initialization */
320 mr r3,r24
321 mr r4,r25
322 bl book3e_secondary_core_init
323
324/*
325 * After common core init has finished, check if the current thread is the
326 * one we wanted to boot. If not, start the specified thread and stop the
327 * current thread.
328 */
329 LOAD_REG_ADDR(r4, booting_thread_hwid)
330 lwz r3, 0(r4)
331 li r5, INVALID_THREAD_HWID
332 cmpw r3, r5
333 beq 20f
334
335 /*
336 * The value of booting_thread_hwid has been stored in r3,
337 * so make it invalid.
338 */
339 stw r5, 0(r4)
340
341 /*
342 * Get the current thread id and check if it is the one we wanted.
343 * If not, start the one specified in booting_thread_hwid and stop
344 * the current thread.
345 */
346 mfspr r8, SPRN_TIR
347 cmpw r3, r8
348 beq 20f
349
350 /* start the specified thread */
351 LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
352 ld r4, 0(r5)
353 bl book3e_start_thread
354
355 /* stop the current thread */
356 mr r3, r8
357 bl book3e_stop_thread
35810:
359 b 10b
36020:
361#endif
362
363generic_secondary_common_init:
364 /* Set up a paca value for this processor. Since we have the
365 * physical cpu id in r24, we need to search the pacas to find
366 * which logical id maps to our physical one.
367 */
368 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
369 ld r13,0(r13) /* Get base vaddr of paca array */
370#ifndef CONFIG_SMP
371 addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
372 b kexec_wait /* wait for next kernel if !SMP */
373#else
374 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
375 lwz r7,0(r7) /* also the max paca allocated */
376 li r5,0 /* logical cpu id */
3771: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
378 cmpw r6,r24 /* Compare to our id */
379 beq 2f
380 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
381 addi r5,r5,1
382 cmpw r5,r7 /* Check if more pacas exist */
383 blt 1b
384
385 mr r3,r24 /* not found, copy phys to r3 */
386 b kexec_wait /* next kernel might do better */
387
3882: SET_PACA(r13)
389#ifdef CONFIG_PPC_BOOK3E
390 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
391 mtspr SPRN_SPRG_TLB_EXFRAME,r12
392#endif
393
394 /* From now on, r24 is expected to be logical cpuid */
395 mr r24,r5
396
397 /* See if we need to call a cpu state restore handler */
398 LOAD_REG_ADDR(r23, cur_cpu_spec)
399 ld r23,0(r23)
400 ld r12,CPU_SPEC_RESTORE(r23)
401 cmpdi 0,r12,0
402 beq 3f
403#if !defined(_CALL_ELF) || _CALL_ELF != 2
404 ld r12,0(r12)
405#endif
406 mtctr r12
407 bctrl
408
4093: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
410 lwarx r4,0,r3
411 subi r4,r4,1
412 stwcx. r4,0,r3
413 bne 3b
414 isync
415
4164: HMT_LOW
417 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
418 /* start. */
419 cmpwi 0,r23,0
420 beq 4b /* Loop until told to go */
421
422 sync /* order paca.run and cur_cpu_spec */
423 isync /* In case code patching happened */
424
425 /* Create a temp kernel stack for use before relocation is on. */
426 ld r1,PACAEMERGSP(r13)
427 subi r1,r1,STACK_FRAME_OVERHEAD
428
429 b __secondary_start
430#endif /* SMP */
431
432/*
433 * Turn the MMU off.
434 * Assumes we're mapped EA == RA if the MMU is on.
435 */
436#ifdef CONFIG_PPC_BOOK3S
437__mmu_off:
438 mfmsr r3
439 andi. r0,r3,MSR_IR|MSR_DR
440 beqlr
441 mflr r4
442 andc r3,r3,r0
443 mtspr SPRN_SRR0,r4
444 mtspr SPRN_SRR1,r3
445 sync
446 rfid
447 b . /* prevent speculative execution */
448#endif
449
450
451/*
452 * Here is our main kernel entry point. We support currently 2 kind of entries
453 * depending on the value of r5.
454 *
455 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
456 * in r3...r7
457 *
458 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
459 * DT block, r4 is a physical pointer to the kernel itself
460 *
461 */
462__start_initialization_multiplatform:
463 /* Make sure we are running in 64 bits mode */
464 bl enable_64b_mode
465
466 /* Get TOC pointer (current runtime address) */
467 bl relative_toc
468
469 /* find out where we are now */
470 bcl 20,31,$+4
4710: mflr r26 /* r26 = runtime addr here */
472 addis r26,r26,(_stext - 0b)@ha
473 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
474
475 /*
476 * Are we booted from a PROM Of-type client-interface ?
477 */
478 cmpldi cr0,r5,0
479 beq 1f
480 b __boot_from_prom /* yes -> prom */
4811:
482 /* Save parameters */
483 mr r31,r3
484 mr r30,r4
485#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
486 /* Save OPAL entry */
487 mr r28,r8
488 mr r29,r9
489#endif
490
491#ifdef CONFIG_PPC_BOOK3E
492 bl start_initialization_book3e
493 b __after_prom_start
494#else
495 /* Setup some critical 970 SPRs before switching MMU off */
496 mfspr r0,SPRN_PVR
497 srwi r0,r0,16
498 cmpwi r0,0x39 /* 970 */
499 beq 1f
500 cmpwi r0,0x3c /* 970FX */
501 beq 1f
502 cmpwi r0,0x44 /* 970MP */
503 beq 1f
504 cmpwi r0,0x45 /* 970GX */
505 bne 2f
5061: bl __cpu_preinit_ppc970
5072:
508
509 /* Switch off MMU if not already off */
510 bl __mmu_off
511 b __after_prom_start
512#endif /* CONFIG_PPC_BOOK3E */
513
514__boot_from_prom:
515#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
516 /* Save parameters */
517 mr r31,r3
518 mr r30,r4
519 mr r29,r5
520 mr r28,r6
521 mr r27,r7
522
523 /*
524 * Align the stack to 16-byte boundary
525 * Depending on the size and layout of the ELF sections in the initial
526 * boot binary, the stack pointer may be unaligned on PowerMac
527 */
528 rldicr r1,r1,0,59
529
530#ifdef CONFIG_RELOCATABLE
531 /* Relocate code for where we are now */
532 mr r3,r26
533 bl relocate
534#endif
535
536 /* Restore parameters */
537 mr r3,r31
538 mr r4,r30
539 mr r5,r29
540 mr r6,r28
541 mr r7,r27
542
543 /* Do all of the interaction with OF client interface */
544 mr r8,r26
545 bl prom_init
546#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
547
548 /* We never return. We also hit that trap if trying to boot
549 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
550 trap
551
552__after_prom_start:
553#ifdef CONFIG_RELOCATABLE
554 /* process relocations for the final address of the kernel */
555 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
556 sldi r25,r25,32
557#if defined(CONFIG_PPC_BOOK3E)
558 tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
559#endif
560 lwz r7,__run_at_load-_stext(r26)
561#if defined(CONFIG_PPC_BOOK3E)
562 tophys(r26,r26)
563#endif
564 cmplwi cr0,r7,1 /* flagged to stay where we are ? */
565 bne 1f
566 add r25,r25,r26
5671: mr r3,r25
568 bl relocate
569#if defined(CONFIG_PPC_BOOK3E)
570 /* IVPR needs to be set after relocation. */
571 bl init_core_book3e
572#endif
573#endif
574
575/*
576 * We need to run with _stext at physical address PHYSICAL_START.
577 * This will leave some code in the first 256B of
578 * real memory, which are reserved for software use.
579 *
580 * Note: This process overwrites the OF exception vectors.
581 */
582 li r3,0 /* target addr */
583#ifdef CONFIG_PPC_BOOK3E
584 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
585#endif
586 mr. r4,r26 /* In some cases the loader may */
587#if defined(CONFIG_PPC_BOOK3E)
588 tovirt(r4,r4)
589#endif
590 beq 9f /* have already put us at zero */
591 li r6,0x100 /* Start offset, the first 0x100 */
592 /* bytes were copied earlier. */
593
594#ifdef CONFIG_RELOCATABLE
595/*
596 * Check if the kernel has to be running as relocatable kernel based on the
597 * variable __run_at_load, if it is set the kernel is treated as relocatable
598 * kernel, otherwise it will be moved to PHYSICAL_START
599 */
600#if defined(CONFIG_PPC_BOOK3E)
601 tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
602#endif
603 lwz r7,__run_at_load-_stext(r26)
604 cmplwi cr0,r7,1
605 bne 3f
606
607#ifdef CONFIG_PPC_BOOK3E
608 LOAD_REG_ADDR(r5, __end_interrupts)
609 LOAD_REG_ADDR(r11, _stext)
610 sub r5,r5,r11
611#else
612 /* just copy interrupts */
613 LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
614#endif
615 b 5f
6163:
617#endif
618 lis r5,(copy_to_here - _stext)@ha
619 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
620
621 bl copy_and_flush /* copy the first n bytes */
622 /* this includes the code being */
623 /* executed here. */
624 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
625 addi r12,r8,(4f - _stext)@l /* that we just made */
626 mtctr r12
627 bctr
628
629.balign 8
630p_end: .llong _end - _stext
631
6324: /* Now copy the rest of the kernel up to _end */
633 addis r5,r26,(p_end - _stext)@ha
634 ld r5,(p_end - _stext)@l(r5) /* get _end */
6355: bl copy_and_flush /* copy the rest */
636
6379: b start_here_multiplatform
638
639/*
640 * Copy routine used to copy the kernel to start at physical address 0
641 * and flush and invalidate the caches as needed.
642 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
643 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
644 *
645 * Note: this routine *only* clobbers r0, r6 and lr
646 */
647_GLOBAL(copy_and_flush)
648 addi r5,r5,-8
649 addi r6,r6,-8
6504: li r0,8 /* Use the smallest common */
651 /* denominator cache line */
652 /* size. This results in */
653 /* extra cache line flushes */
654 /* but operation is correct. */
655 /* Can't get cache line size */
656 /* from NACA as it is being */
657 /* moved too. */
658
659 mtctr r0 /* put # words/line in ctr */
6603: addi r6,r6,8 /* copy a cache line */
661 ldx r0,r6,r4
662 stdx r0,r6,r3
663 bdnz 3b
664 dcbst r6,r3 /* write it to memory */
665 sync
666 icbi r6,r3 /* flush the icache line */
667 cmpld 0,r6,r5
668 blt 4b
669 sync
670 addi r5,r5,8
671 addi r6,r6,8
672 isync
673 blr
674
675.align 8
676copy_to_here:
677
678#ifdef CONFIG_SMP
679#ifdef CONFIG_PPC_PMAC
680/*
681 * On PowerMac, secondary processors starts from the reset vector, which
682 * is temporarily turned into a call to one of the functions below.
683 */
684 .section ".text";
685 .align 2 ;
686
687 .globl __secondary_start_pmac_0
688__secondary_start_pmac_0:
689 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
690 li r24,0
691 b 1f
692 li r24,1
693 b 1f
694 li r24,2
695 b 1f
696 li r24,3
6971:
698
699_GLOBAL(pmac_secondary_start)
700 /* turn on 64-bit mode */
701 bl enable_64b_mode
702
703 li r0,0
704 mfspr r3,SPRN_HID4
705 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
706 sync
707 mtspr SPRN_HID4,r3
708 isync
709 sync
710 slbia
711
712 /* get TOC pointer (real address) */
713 bl relative_toc
714 tovirt(r2,r2)
715
716 /* Copy some CPU settings from CPU 0 */
717 bl __restore_cpu_ppc970
718
719 /* pSeries do that early though I don't think we really need it */
720 mfmsr r3
721 ori r3,r3,MSR_RI
722 mtmsrd r3 /* RI on */
723
724 /* Set up a paca value for this processor. */
725 LOAD_REG_ADDR(r4,paca) /* Load paca pointer */
726 ld r4,0(r4) /* Get base vaddr of paca array */
727 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
728 add r13,r13,r4 /* for this processor. */
729 SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
730
731 /* Mark interrupts soft and hard disabled (they might be enabled
732 * in the PACA when doing hotplug)
733 */
734 li r0,0
735 stb r0,PACASOFTIRQEN(r13)
736 li r0,PACA_IRQ_HARD_DIS
737 stb r0,PACAIRQHAPPENED(r13)
738
739 /* Create a temp kernel stack for use before relocation is on. */
740 ld r1,PACAEMERGSP(r13)
741 subi r1,r1,STACK_FRAME_OVERHEAD
742
743 b __secondary_start
744
745#endif /* CONFIG_PPC_PMAC */
746
747/*
748 * This function is called after the master CPU has released the
749 * secondary processors. The execution environment is relocation off.
750 * The paca for this processor has the following fields initialized at
751 * this point:
752 * 1. Processor number
753 * 2. Segment table pointer (virtual address)
754 * On entry the following are set:
755 * r1 = stack pointer (real addr of temp stack)
756 * r24 = cpu# (in Linux terms)
757 * r13 = paca virtual address
758 * SPRG_PACA = paca virtual address
759 */
760 .section ".text";
761 .align 2 ;
762
763 .globl __secondary_start
764__secondary_start:
765 /* Set thread priority to MEDIUM */
766 HMT_MEDIUM
767
768 /* Initialize the kernel stack */
769 LOAD_REG_ADDR(r3, current_set)
770 sldi r28,r24,3 /* get current_set[cpu#] */
771 ldx r14,r3,r28
772 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
773 std r14,PACAKSAVE(r13)
774
775 /* Do early setup for that CPU (SLB and hash table pointer) */
776 bl early_setup_secondary
777
778 /*
779 * setup the new stack pointer, but *don't* use this until
780 * translation is on.
781 */
782 mr r1, r14
783
784 /* Clear backchain so we get nice backtraces */
785 li r7,0
786 mtlr r7
787
788 /* Mark interrupts soft and hard disabled (they might be enabled
789 * in the PACA when doing hotplug)
790 */
791 stb r7,PACASOFTIRQEN(r13)
792 li r0,PACA_IRQ_HARD_DIS
793 stb r0,PACAIRQHAPPENED(r13)
794
795 /* enable MMU and jump to start_secondary */
796 LOAD_REG_ADDR(r3, start_secondary_prolog)
797 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
798
799 mtspr SPRN_SRR0,r3
800 mtspr SPRN_SRR1,r4
801 RFI
802 b . /* prevent speculative execution */
803
804/*
805 * Running with relocation on at this point. All we want to do is
806 * zero the stack back-chain pointer and get the TOC virtual address
807 * before going into C code.
808 */
809start_secondary_prolog:
810 ld r2,PACATOC(r13)
811 li r3,0
812 std r3,0(r1) /* Zero the stack frame pointer */
813 bl start_secondary
814 b .
815/*
816 * Reset stack pointer and call start_secondary
817 * to continue with online operation when woken up
818 * from cede in cpu offline.
819 */
820_GLOBAL(start_secondary_resume)
821 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
822 li r3,0
823 std r3,0(r1) /* Zero the stack frame pointer */
824 bl start_secondary
825 b .
826#endif
827
828/*
829 * This subroutine clobbers r11 and r12
830 */
831enable_64b_mode:
832 mfmsr r11 /* grab the current MSR */
833#ifdef CONFIG_PPC_BOOK3E
834 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
835 mtmsr r11
836#else /* CONFIG_PPC_BOOK3E */
837 li r12,(MSR_64BIT | MSR_ISF)@highest
838 sldi r12,r12,48
839 or r11,r11,r12
840 mtmsrd r11
841 isync
842#endif
843 blr
844
845/*
846 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
847 * by the toolchain). It computes the correct value for wherever we
848 * are running at the moment, using position-independent code.
849 *
850 * Note: The compiler constructs pointers using offsets from the
851 * TOC in -mcmodel=medium mode. After we relocate to 0 but before
852 * the MMU is on we need our TOC to be a virtual address otherwise
853 * these pointers will be real addresses which may get stored and
854 * accessed later with the MMU on. We use tovirt() at the call
855 * sites to handle this.
856 */
857_GLOBAL(relative_toc)
858 mflr r0
859 bcl 20,31,$+4
8600: mflr r11
861 ld r2,(p_toc - 0b)(r11)
862 add r2,r2,r11
863 mtlr r0
864 blr
865
866.balign 8
867p_toc: .llong __toc_start + 0x8000 - 0b
868
869/*
870 * This is where the main kernel code starts.
871 */
872start_here_multiplatform:
873 /* set up the TOC */
874 bl relative_toc
875 tovirt(r2,r2)
876
877 /* Clear out the BSS. It may have been done in prom_init,
878 * already but that's irrelevant since prom_init will soon
879 * be detached from the kernel completely. Besides, we need
880 * to clear it now for kexec-style entry.
881 */
882 LOAD_REG_ADDR(r11,__bss_stop)
883 LOAD_REG_ADDR(r8,__bss_start)
884 sub r11,r11,r8 /* bss size */
885 addi r11,r11,7 /* round up to an even double word */
886 srdi. r11,r11,3 /* shift right by 3 */
887 beq 4f
888 addi r8,r8,-8
889 li r0,0
890 mtctr r11 /* zero this many doublewords */
8913: stdu r0,8(r8)
892 bdnz 3b
8934:
894
895#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
896 /* Setup OPAL entry */
897 LOAD_REG_ADDR(r11, opal)
898 std r28,0(r11);
899 std r29,8(r11);
900#endif
901
902#ifndef CONFIG_PPC_BOOK3E
903 mfmsr r6
904 ori r6,r6,MSR_RI
905 mtmsrd r6 /* RI on */
906#endif
907
908#ifdef CONFIG_RELOCATABLE
909 /* Save the physical address we're running at in kernstart_addr */
910 LOAD_REG_ADDR(r4, kernstart_addr)
911 clrldi r0,r25,2
912 std r0,0(r4)
913#endif
914
915 /* The following gets the stack set up with the regs */
916 /* pointing to the real addr of the kernel stack. This is */
917 /* all done to support the C function call below which sets */
918 /* up the htab. This is done because we have relocated the */
919 /* kernel but are still running in real mode. */
920
921 LOAD_REG_ADDR(r3,init_thread_union)
922
923 /* set up a stack pointer */
924 addi r1,r3,THREAD_SIZE
925 li r0,0
926 stdu r0,-STACK_FRAME_OVERHEAD(r1)
927
928 /*
929 * Do very early kernel initializations, including initial hash table
930 * and SLB setup before we turn on relocation.
931 */
932
933 /* Restore parameters passed from prom_init/kexec */
934 mr r3,r31
935 bl early_setup /* also sets r13 and SPRG_PACA */
936
937 LOAD_REG_ADDR(r3, start_here_common)
938 ld r4,PACAKMSR(r13)
939 mtspr SPRN_SRR0,r3
940 mtspr SPRN_SRR1,r4
941 RFI
942 b . /* prevent speculative execution */
943
944 /* This is where all platforms converge execution */
945
946start_here_common:
947 /* relocation is on at this point */
948 std r1,PACAKSAVE(r13)
949
950 /* Load the TOC (virtual address) */
951 ld r2,PACATOC(r13)
952
953 /* Do more system initializations in virtual mode */
954 bl setup_system
955
956 /* Mark interrupts soft and hard disabled (they might be enabled
957 * in the PACA when doing hotplug)
958 */
959 li r0,0
960 stb r0,PACASOFTIRQEN(r13)
961 li r0,PACA_IRQ_HARD_DIS
962 stb r0,PACAIRQHAPPENED(r13)
963
964 /* Generic kernel entry */
965 bl start_kernel
966
967 /* Not reached */
968 BUG_OPCODE
969
970/*
971 * We put a few things here that have to be page-aligned.
972 * This stuff goes at the beginning of the bss, which is page-aligned.
973 */
974 .section ".bss"
975
976 .align PAGE_SHIFT
977
978 .globl empty_zero_page
979empty_zero_page:
980 .space PAGE_SIZE
981
982 .globl swapper_pg_dir
983swapper_pg_dir:
984 .space PGD_TABLE_SIZE