]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - arch/powerpc/kernel/misc_64.S
initramfs: Escape colons in depfile
[mirror_ubuntu-disco-kernel.git] / arch / powerpc / kernel / misc_64.S
1 /*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17 #include <linux/sys.h>
18 #include <asm/unistd.h>
19 #include <asm/errno.h>
20 #include <asm/processor.h>
21 #include <asm/page.h>
22 #include <asm/cache.h>
23 #include <asm/ppc_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/cputable.h>
26 #include <asm/thread_info.h>
27 #include <asm/kexec.h>
28 #include <asm/ptrace.h>
29 #include <asm/mmu.h>
30 #include <asm/export.h>
31
32 .text
33
34 _GLOBAL(call_do_softirq)
35 mflr r0
36 std r0,16(r1)
37 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
38 mr r1,r3
39 bl __do_softirq
40 ld r1,0(r1)
41 ld r0,16(r1)
42 mtlr r0
43 blr
44
45 _GLOBAL(call_do_irq)
46 mflr r0
47 std r0,16(r1)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
49 mr r1,r4
50 bl __do_irq
51 ld r1,0(r1)
52 ld r0,16(r1)
53 mtlr r0
54 blr
55
56 .section ".toc","aw"
57 PPC64_CACHES:
58 .tc ppc64_caches[TC],ppc64_caches
59 .section ".text"
60
61 /*
62 * Write any modified data cache blocks out to memory
63 * and invalidate the corresponding instruction cache blocks.
64 *
65 * flush_icache_range(unsigned long start, unsigned long stop)
66 *
67 * flush all bytes from start through stop-1 inclusive
68 */
69
70 _KPROBE(flush_icache_range)
71 BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS
73 blr
74 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
75 /*
76 * Flush the data cache to memory
77 *
78 * Different systems have different cache line sizes
79 * and in some cases i-cache and d-cache line sizes differ from
80 * each other.
81 */
82 ld r10,PPC64_CACHES@toc(r2)
83 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
84 addi r5,r7,-1
85 andc r6,r3,r5 /* round low to line bdy */
86 subf r8,r6,r4 /* compute length */
87 add r8,r8,r5 /* ensure we get enough */
88 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
89 srw. r8,r8,r9 /* compute line count */
90 beqlr /* nothing to do? */
91 mtctr r8
92 1: dcbst 0,r6
93 add r6,r6,r7
94 bdnz 1b
95 sync
96
97 /* Now invalidate the instruction cache */
98
99 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
100 addi r5,r7,-1
101 andc r6,r3,r5 /* round low to line bdy */
102 subf r8,r6,r4 /* compute length */
103 add r8,r8,r5
104 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
105 srw. r8,r8,r9 /* compute line count */
106 beqlr /* nothing to do? */
107 mtctr r8
108 2: icbi 0,r6
109 add r6,r6,r7
110 bdnz 2b
111 isync
112 blr
113 .previous .text
114 EXPORT_SYMBOL(flush_icache_range)
115 /*
116 * Like above, but only do the D-cache.
117 *
118 * flush_dcache_range(unsigned long start, unsigned long stop)
119 *
120 * flush all bytes from start to stop-1 inclusive
121 */
122 _GLOBAL(flush_dcache_range)
123
124 /*
125 * Flush the data cache to memory
126 *
127 * Different systems have different cache line sizes
128 */
129 ld r10,PPC64_CACHES@toc(r2)
130 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
131 addi r5,r7,-1
132 andc r6,r3,r5 /* round low to line bdy */
133 subf r8,r6,r4 /* compute length */
134 add r8,r8,r5 /* ensure we get enough */
135 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
136 srw. r8,r8,r9 /* compute line count */
137 beqlr /* nothing to do? */
138 mtctr r8
139 0: dcbst 0,r6
140 add r6,r6,r7
141 bdnz 0b
142 sync
143 blr
144 EXPORT_SYMBOL(flush_dcache_range)
145
146 /*
147 * Like above, but works on non-mapped physical addresses.
148 * Use only for non-LPAR setups ! It also assumes real mode
149 * is cacheable. Used for flushing out the DART before using
150 * it as uncacheable memory
151 *
152 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
153 *
154 * flush all bytes from start to stop-1 inclusive
155 */
156 _GLOBAL(flush_dcache_phys_range)
157 ld r10,PPC64_CACHES@toc(r2)
158 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
159 addi r5,r7,-1
160 andc r6,r3,r5 /* round low to line bdy */
161 subf r8,r6,r4 /* compute length */
162 add r8,r8,r5 /* ensure we get enough */
163 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
164 srw. r8,r8,r9 /* compute line count */
165 beqlr /* nothing to do? */
166 mfmsr r5 /* Disable MMU Data Relocation */
167 ori r0,r5,MSR_DR
168 xori r0,r0,MSR_DR
169 sync
170 mtmsr r0
171 sync
172 isync
173 mtctr r8
174 0: dcbst 0,r6
175 add r6,r6,r7
176 bdnz 0b
177 sync
178 isync
179 mtmsr r5 /* Re-enable MMU Data Relocation */
180 sync
181 isync
182 blr
183
184 _GLOBAL(flush_inval_dcache_range)
185 ld r10,PPC64_CACHES@toc(r2)
186 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
187 addi r5,r7,-1
188 andc r6,r3,r5 /* round low to line bdy */
189 subf r8,r6,r4 /* compute length */
190 add r8,r8,r5 /* ensure we get enough */
191 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
192 srw. r8,r8,r9 /* compute line count */
193 beqlr /* nothing to do? */
194 sync
195 isync
196 mtctr r8
197 0: dcbf 0,r6
198 add r6,r6,r7
199 bdnz 0b
200 sync
201 isync
202 blr
203
204
205 /*
206 * Flush a particular page from the data cache to RAM.
207 * Note: this is necessary because the instruction cache does *not*
208 * snoop from the data cache.
209 *
210 * void __flush_dcache_icache(void *page)
211 */
212 _GLOBAL(__flush_dcache_icache)
213 /*
214 * Flush the data cache to memory
215 *
216 * Different systems have different cache line sizes
217 */
218
219 BEGIN_FTR_SECTION
220 PURGE_PREFETCHED_INS
221 blr
222 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
223
224 /* Flush the dcache */
225 ld r7,PPC64_CACHES@toc(r2)
226 clrrdi r3,r3,PAGE_SHIFT /* Page align */
227 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
228 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
229 mr r6,r3
230 mtctr r4
231 0: dcbst 0,r6
232 add r6,r6,r5
233 bdnz 0b
234 sync
235
236 /* Now invalidate the icache */
237
238 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
239 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
240 mtctr r4
241 1: icbi 0,r3
242 add r3,r3,r5
243 bdnz 1b
244 isync
245 blr
246
247 _GLOBAL(__bswapdi2)
248 EXPORT_SYMBOL(__bswapdi2)
249 srdi r8,r3,32
250 rlwinm r7,r3,8,0xffffffff
251 rlwimi r7,r3,24,0,7
252 rlwinm r9,r8,8,0xffffffff
253 rlwimi r7,r3,24,16,23
254 rlwimi r9,r8,24,0,7
255 rlwimi r9,r8,24,16,23
256 sldi r7,r7,32
257 or r3,r7,r9
258 blr
259
260
261 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
262 _GLOBAL(rmci_on)
263 sync
264 isync
265 li r3,0x100
266 rldicl r3,r3,32,0
267 mfspr r5,SPRN_HID4
268 or r5,r5,r3
269 sync
270 mtspr SPRN_HID4,r5
271 isync
272 slbia
273 isync
274 sync
275 blr
276
277 _GLOBAL(rmci_off)
278 sync
279 isync
280 li r3,0x100
281 rldicl r3,r3,32,0
282 mfspr r5,SPRN_HID4
283 andc r5,r5,r3
284 sync
285 mtspr SPRN_HID4,r5
286 isync
287 slbia
288 isync
289 sync
290 blr
291 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
292
293 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
294
295 /*
296 * Do an IO access in real mode
297 */
298 _GLOBAL(real_readb)
299 mfmsr r7
300 ori r0,r7,MSR_DR
301 xori r0,r0,MSR_DR
302 sync
303 mtmsrd r0
304 sync
305 isync
306 mfspr r6,SPRN_HID4
307 rldicl r5,r6,32,0
308 ori r5,r5,0x100
309 rldicl r5,r5,32,0
310 sync
311 mtspr SPRN_HID4,r5
312 isync
313 slbia
314 isync
315 lbz r3,0(r3)
316 sync
317 mtspr SPRN_HID4,r6
318 isync
319 slbia
320 isync
321 mtmsrd r7
322 sync
323 isync
324 blr
325
326 /*
327 * Do an IO access in real mode
328 */
329 _GLOBAL(real_writeb)
330 mfmsr r7
331 ori r0,r7,MSR_DR
332 xori r0,r0,MSR_DR
333 sync
334 mtmsrd r0
335 sync
336 isync
337 mfspr r6,SPRN_HID4
338 rldicl r5,r6,32,0
339 ori r5,r5,0x100
340 rldicl r5,r5,32,0
341 sync
342 mtspr SPRN_HID4,r5
343 isync
344 slbia
345 isync
346 stb r3,0(r4)
347 sync
348 mtspr SPRN_HID4,r6
349 isync
350 slbia
351 isync
352 mtmsrd r7
353 sync
354 isync
355 blr
356 #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
357
358 #ifdef CONFIG_PPC_PASEMI
359
360 _GLOBAL(real_205_readb)
361 mfmsr r7
362 ori r0,r7,MSR_DR
363 xori r0,r0,MSR_DR
364 sync
365 mtmsrd r0
366 sync
367 isync
368 LBZCIX(R3,R0,R3)
369 isync
370 mtmsrd r7
371 sync
372 isync
373 blr
374
375 _GLOBAL(real_205_writeb)
376 mfmsr r7
377 ori r0,r7,MSR_DR
378 xori r0,r0,MSR_DR
379 sync
380 mtmsrd r0
381 sync
382 isync
383 STBCIX(R3,R0,R4)
384 isync
385 mtmsrd r7
386 sync
387 isync
388 blr
389
390 #endif /* CONFIG_PPC_PASEMI */
391
392
393 #if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
394 /*
395 * SCOM access functions for 970 (FX only for now)
396 *
397 * unsigned long scom970_read(unsigned int address);
398 * void scom970_write(unsigned int address, unsigned long value);
399 *
400 * The address passed in is the 24 bits register address. This code
401 * is 970 specific and will not check the status bits, so you should
402 * know what you are doing.
403 */
404 _GLOBAL(scom970_read)
405 /* interrupts off */
406 mfmsr r4
407 ori r0,r4,MSR_EE
408 xori r0,r0,MSR_EE
409 mtmsrd r0,1
410
411 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
412 * (including parity). On current CPUs they must be 0'd,
413 * and finally or in RW bit
414 */
415 rlwinm r3,r3,8,0,15
416 ori r3,r3,0x8000
417
418 /* do the actual scom read */
419 sync
420 mtspr SPRN_SCOMC,r3
421 isync
422 mfspr r3,SPRN_SCOMD
423 isync
424 mfspr r0,SPRN_SCOMC
425 isync
426
427 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
428 * that's the best we can do). Not implemented yet as we don't use
429 * the scom on any of the bogus CPUs yet, but may have to be done
430 * ultimately
431 */
432
433 /* restore interrupts */
434 mtmsrd r4,1
435 blr
436
437
438 _GLOBAL(scom970_write)
439 /* interrupts off */
440 mfmsr r5
441 ori r0,r5,MSR_EE
442 xori r0,r0,MSR_EE
443 mtmsrd r0,1
444
445 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
446 * (including parity). On current CPUs they must be 0'd.
447 */
448
449 rlwinm r3,r3,8,0,15
450
451 sync
452 mtspr SPRN_SCOMD,r4 /* write data */
453 isync
454 mtspr SPRN_SCOMC,r3 /* write command */
455 isync
456 mfspr 3,SPRN_SCOMC
457 isync
458
459 /* restore interrupts */
460 mtmsrd r5,1
461 blr
462 #endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
463
464 /* kexec_wait(phys_cpu)
465 *
466 * wait for the flag to change, indicating this kernel is going away but
467 * the slave code for the next one is at addresses 0 to 100.
468 *
469 * This is used by all slaves, even those that did not find a matching
470 * paca in the secondary startup code.
471 *
472 * Physical (hardware) cpu id should be in r3.
473 */
474 _GLOBAL(kexec_wait)
475 bl 1f
476 1: mflr r5
477 addi r5,r5,kexec_flag-1b
478
479 99: HMT_LOW
480 #ifdef CONFIG_KEXEC /* use no memory without kexec */
481 lwz r4,0(r5)
482 cmpwi 0,r4,0
483 beq 99b
484 #ifdef CONFIG_PPC_BOOK3S_64
485 li r10,0x60
486 mfmsr r11
487 clrrdi r11,r11,1 /* Clear MSR_LE */
488 mtsrr0 r10
489 mtsrr1 r11
490 rfid
491 #else
492 /* Create TLB entry in book3e_secondary_core_init */
493 li r4,0
494 ba 0x60
495 #endif
496 #endif
497
498 /* this can be in text because we won't change it until we are
499 * running in real anyways
500 */
501 kexec_flag:
502 .long 0
503
504
505 #ifdef CONFIG_KEXEC
506 #ifdef CONFIG_PPC_BOOK3E
507 /*
508 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
509 * for a core to identity map v:0 to p:0. This current implementation
510 * assumes that 1G is enough for kexec.
511 */
512 kexec_create_tlb:
513 /*
514 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
515 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
516 */
517 PPC_TLBILX_ALL(0,R0)
518 sync
519 isync
520
521 mfspr r10,SPRN_TLB1CFG
522 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
523 subi r10,r10,1 /* Last entry: no conflict with kernel text */
524 lis r9,MAS0_TLBSEL(1)@h
525 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
526
527 /* Set up a temp identity mapping v:0 to p:0 and return to it. */
528 #if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
529 #define M_IF_NEEDED MAS2_M
530 #else
531 #define M_IF_NEEDED 0
532 #endif
533 mtspr SPRN_MAS0,r9
534
535 lis r9,(MAS1_VALID|MAS1_IPROT)@h
536 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
537 mtspr SPRN_MAS1,r9
538
539 LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
540 mtspr SPRN_MAS2,r9
541
542 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
543 mtspr SPRN_MAS3,r9
544 li r9,0
545 mtspr SPRN_MAS7,r9
546
547 tlbwe
548 isync
549 blr
550 #endif
551
552 /* kexec_smp_wait(void)
553 *
554 * call with interrupts off
555 * note: this is a terminal routine, it does not save lr
556 *
557 * get phys id from paca
558 * switch to real mode
559 * mark the paca as no longer used
560 * join other cpus in kexec_wait(phys_id)
561 */
562 _GLOBAL(kexec_smp_wait)
563 lhz r3,PACAHWCPUID(r13)
564 bl real_mode
565
566 li r4,KEXEC_STATE_REAL_MODE
567 stb r4,PACAKEXECSTATE(r13)
568 SYNC
569
570 b kexec_wait
571
572 /*
573 * switch to real mode (turn mmu off)
574 * we use the early kernel trick that the hardware ignores bits
575 * 0 and 1 (big endian) of the effective address in real mode
576 *
577 * don't overwrite r3 here, it is live for kexec_wait above.
578 */
579 real_mode: /* assume normal blr return */
580 #ifdef CONFIG_PPC_BOOK3E
581 /* Create an identity mapping. */
582 b kexec_create_tlb
583 #else
584 1: li r9,MSR_RI
585 li r10,MSR_DR|MSR_IR
586 mflr r11 /* return address to SRR0 */
587 mfmsr r12
588 andc r9,r12,r9
589 andc r10,r12,r10
590
591 mtmsrd r9,1
592 mtspr SPRN_SRR1,r10
593 mtspr SPRN_SRR0,r11
594 rfid
595 #endif
596
597 /*
598 * kexec_sequence(newstack, start, image, control, clear_all())
599 *
600 * does the grungy work with stack switching and real mode switches
601 * also does simple calls to other code
602 */
603
604 _GLOBAL(kexec_sequence)
605 mflr r0
606 std r0,16(r1)
607
608 /* switch stacks to newstack -- &kexec_stack.stack */
609 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
610 mr r1,r3
611
612 li r0,0
613 std r0,16(r1)
614
615 /* save regs for local vars on new stack.
616 * yes, we won't go back, but ...
617 */
618 std r31,-8(r1)
619 std r30,-16(r1)
620 std r29,-24(r1)
621 std r28,-32(r1)
622 std r27,-40(r1)
623 std r26,-48(r1)
624 std r25,-56(r1)
625
626 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
627
628 /* save args into preserved regs */
629 mr r31,r3 /* newstack (both) */
630 mr r30,r4 /* start (real) */
631 mr r29,r5 /* image (virt) */
632 mr r28,r6 /* control, unused */
633 mr r27,r7 /* clear_all() fn desc */
634 mr r26,r8 /* spare */
635 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
636
637 /* disable interrupts, we are overwriting kernel data next */
638 #ifdef CONFIG_PPC_BOOK3E
639 wrteei 0
640 #else
641 mfmsr r3
642 rlwinm r3,r3,0,17,15
643 mtmsrd r3,1
644 #endif
645
646 /* copy dest pages, flush whole dest image */
647 mr r3,r29
648 bl kexec_copy_flush /* (image) */
649
650 /* turn off mmu */
651 bl real_mode
652
653 /* copy 0x100 bytes starting at start to 0 */
654 li r3,0
655 mr r4,r30 /* start, aka phys mem offset */
656 li r5,0x100
657 li r6,0
658 bl copy_and_flush /* (dest, src, copy limit, start offset) */
659 1: /* assume normal blr return */
660
661 /* release other cpus to the new kernel secondary start at 0x60 */
662 mflr r5
663 li r6,1
664 stw r6,kexec_flag-1b(5)
665
666 #ifndef CONFIG_PPC_BOOK3E
667 /* clear out hardware hash page table and tlb */
668 #ifdef PPC64_ELF_ABI_v1
669 ld r12,0(r27) /* deref function descriptor */
670 #else
671 mr r12,r27
672 #endif
673 mtctr r12
674 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
675 #endif /* !CONFIG_PPC_BOOK3E */
676
677 /*
678 * kexec image calling is:
679 * the first 0x100 bytes of the entry point are copied to 0
680 *
681 * all slaves branch to slave = 0x60 (absolute)
682 * slave(phys_cpu_id);
683 *
684 * master goes to start = entry point
685 * start(phys_cpu_id, start, 0);
686 *
687 *
688 * a wrapper is needed to call existing kernels, here is an approximate
689 * description of one method:
690 *
691 * v2: (2.6.10)
692 * start will be near the boot_block (maybe 0x100 bytes before it?)
693 * it will have a 0x60, which will b to boot_block, where it will wait
694 * and 0 will store phys into struct boot-block and load r3 from there,
695 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
696 *
697 * v1: (2.6.9)
698 * boot block will have all cpus scanning device tree to see if they
699 * are the boot cpu ?????
700 * other device tree differences (prop sizes, va vs pa, etc)...
701 */
702 mr r3,r25 # my phys cpu
703 mr r4,r30 # start, aka phys mem offset
704 mtlr 4
705 li r5,0
706 blr /* image->start(physid, image->start, 0); */
707 #endif /* CONFIG_KEXEC */