]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/misc_64.S
powerpc/pci: Remove duplicate check in pcibios_fixup_bus()
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / misc_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
127efeb2
SR
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
9994a338
PM
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
9994a338
PM
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
6cb7bfeb 26#include <asm/thread_info.h>
1fc711f7 27#include <asm/kexec.h>
46f52210 28#include <asm/ptrace.h>
9994a338
PM
29
30 .text
31
9994a338
PM
32_GLOBAL(call_do_softirq)
33 mflr r0
34 std r0,16(r1)
4ae2dcb6 35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338
PM
36 mr r1,r3
37 bl .__do_softirq
38 ld r1,0(r1)
39 ld r0,16(r1)
40 mtlr r0
41 blr
42
b9e5b4e6 43_GLOBAL(call_handle_irq)
7d12e780 44 ld r8,0(r6)
9994a338
PM
45 mflr r0
46 std r0,16(r1)
b9e5b4e6 47 mtctr r8
4ae2dcb6 48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
7d12e780 49 mr r1,r5
b9e5b4e6 50 bctrl
9994a338
PM
51 ld r1,0(r1)
52 ld r0,16(r1)
53 mtlr r0
54 blr
9994a338 55
9994a338
PM
56 .section ".toc","aw"
57PPC64_CACHES:
58 .tc ppc64_caches[TC],ppc64_caches
59 .section ".text"
60
61/*
62 * Write any modified data cache blocks out to memory
63 * and invalidate the corresponding instruction cache blocks.
64 *
65 * flush_icache_range(unsigned long start, unsigned long stop)
66 *
67 * flush all bytes from start through stop-1 inclusive
68 */
69
3b04c300 70_KPROBE(flush_icache_range)
abb29c3b
KH
71BEGIN_FTR_SECTION
72 blr
73END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
9994a338
PM
74/*
75 * Flush the data cache to memory
76 *
77 * Different systems have different cache line sizes
78 * and in some cases i-cache and d-cache line sizes differ from
79 * each other.
80 */
81 ld r10,PPC64_CACHES@toc(r2)
82 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
83 addi r5,r7,-1
84 andc r6,r3,r5 /* round low to line bdy */
85 subf r8,r6,r4 /* compute length */
86 add r8,r8,r5 /* ensure we get enough */
87 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
88 srw. r8,r8,r9 /* compute line count */
89 beqlr /* nothing to do? */
90 mtctr r8
911: dcbst 0,r6
92 add r6,r6,r7
93 bdnz 1b
94 sync
95
96/* Now invalidate the instruction cache */
97
98 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
99 addi r5,r7,-1
100 andc r6,r3,r5 /* round low to line bdy */
101 subf r8,r6,r4 /* compute length */
102 add r8,r8,r5
103 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
104 srw. r8,r8,r9 /* compute line count */
105 beqlr /* nothing to do? */
106 mtctr r8
1072: icbi 0,r6
108 add r6,r6,r7
109 bdnz 2b
110 isync
111 blr
112 .previous .text
113/*
114 * Like above, but only do the D-cache.
115 *
116 * flush_dcache_range(unsigned long start, unsigned long stop)
117 *
118 * flush all bytes from start to stop-1 inclusive
119 */
120_GLOBAL(flush_dcache_range)
121
122/*
123 * Flush the data cache to memory
124 *
125 * Different systems have different cache line sizes
126 */
127 ld r10,PPC64_CACHES@toc(r2)
128 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
129 addi r5,r7,-1
130 andc r6,r3,r5 /* round low to line bdy */
131 subf r8,r6,r4 /* compute length */
132 add r8,r8,r5 /* ensure we get enough */
133 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
134 srw. r8,r8,r9 /* compute line count */
135 beqlr /* nothing to do? */
136 mtctr r8
1370: dcbst 0,r6
138 add r6,r6,r7
139 bdnz 0b
140 sync
141 blr
142
143/*
144 * Like above, but works on non-mapped physical addresses.
145 * Use only for non-LPAR setups ! It also assumes real mode
146 * is cacheable. Used for flushing out the DART before using
147 * it as uncacheable memory
148 *
149 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
150 *
151 * flush all bytes from start to stop-1 inclusive
152 */
153_GLOBAL(flush_dcache_phys_range)
154 ld r10,PPC64_CACHES@toc(r2)
155 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
156 addi r5,r7,-1
157 andc r6,r3,r5 /* round low to line bdy */
158 subf r8,r6,r4 /* compute length */
159 add r8,r8,r5 /* ensure we get enough */
160 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
161 srw. r8,r8,r9 /* compute line count */
162 beqlr /* nothing to do? */
163 mfmsr r5 /* Disable MMU Data Relocation */
164 ori r0,r5,MSR_DR
165 xori r0,r0,MSR_DR
166 sync
167 mtmsr r0
168 sync
169 isync
170 mtctr r8
1710: dcbst 0,r6
172 add r6,r6,r7
173 bdnz 0b
174 sync
175 isync
176 mtmsr r5 /* Re-enable MMU Data Relocation */
177 sync
178 isync
179 blr
180
181_GLOBAL(flush_inval_dcache_range)
182 ld r10,PPC64_CACHES@toc(r2)
183 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
184 addi r5,r7,-1
185 andc r6,r3,r5 /* round low to line bdy */
186 subf r8,r6,r4 /* compute length */
187 add r8,r8,r5 /* ensure we get enough */
188 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
189 srw. r8,r8,r9 /* compute line count */
190 beqlr /* nothing to do? */
191 sync
192 isync
193 mtctr r8
1940: dcbf 0,r6
195 add r6,r6,r7
196 bdnz 0b
197 sync
198 isync
199 blr
200
201
202/*
203 * Flush a particular page from the data cache to RAM.
204 * Note: this is necessary because the instruction cache does *not*
205 * snoop from the data cache.
206 *
207 * void __flush_dcache_icache(void *page)
208 */
209_GLOBAL(__flush_dcache_icache)
210/*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215
216/* Flush the dcache */
217 ld r7,PPC64_CACHES@toc(r2)
218 clrrdi r3,r3,PAGE_SHIFT /* Page align */
219 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
220 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
221 mr r6,r3
222 mtctr r4
2230: dcbst 0,r6
224 add r6,r6,r5
225 bdnz 0b
226 sync
227
228/* Now invalidate the icache */
229
230 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
231 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
232 mtctr r4
2331: icbi 0,r3
234 add r3,r3,r5
235 bdnz 1b
236 isync
237 blr
9994a338 238
ca9d7aea
DW
239_GLOBAL(__bswapdi2)
240 srdi r8,r3,32
241 rlwinm r7,r3,8,0xffffffff
242 rlwimi r7,r3,24,0,7
243 rlwinm r9,r8,8,0xffffffff
244 rlwimi r7,r3,24,16,23
245 rlwimi r9,r8,24,0,7
246 rlwimi r9,r8,24,16,23
247 sldi r7,r7,32
248 or r3,r7,r9
249 blr
3f639ee8 250
9994a338
PM
251#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
252/*
253 * Do an IO access in real mode
254 */
255_GLOBAL(real_readb)
256 mfmsr r7
257 ori r0,r7,MSR_DR
258 xori r0,r0,MSR_DR
259 sync
260 mtmsrd r0
261 sync
262 isync
263 mfspr r6,SPRN_HID4
264 rldicl r5,r6,32,0
265 ori r5,r5,0x100
266 rldicl r5,r5,32,0
267 sync
268 mtspr SPRN_HID4,r5
269 isync
270 slbia
271 isync
272 lbz r3,0(r3)
273 sync
274 mtspr SPRN_HID4,r6
275 isync
276 slbia
277 isync
278 mtmsrd r7
279 sync
280 isync
281 blr
282
283 /*
284 * Do an IO access in real mode
285 */
286_GLOBAL(real_writeb)
287 mfmsr r7
288 ori r0,r7,MSR_DR
289 xori r0,r0,MSR_DR
290 sync
291 mtmsrd r0
292 sync
293 isync
294 mfspr r6,SPRN_HID4
295 rldicl r5,r6,32,0
296 ori r5,r5,0x100
297 rldicl r5,r5,32,0
298 sync
299 mtspr SPRN_HID4,r5
300 isync
301 slbia
302 isync
303 stb r3,0(r4)
304 sync
305 mtspr SPRN_HID4,r6
306 isync
307 slbia
308 isync
309 mtmsrd r7
310 sync
311 isync
312 blr
313#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
314
39c870d5
OJ
315#ifdef CONFIG_PPC_PASEMI
316
39c870d5
OJ
317_GLOBAL(real_205_readb)
318 mfmsr r7
319 ori r0,r7,MSR_DR
320 xori r0,r0,MSR_DR
321 sync
322 mtmsrd r0
323 sync
324 isync
e55174e9 325 LBZCIX(R3,R0,R3)
39c870d5
OJ
326 isync
327 mtmsrd r7
328 sync
329 isync
330 blr
331
332_GLOBAL(real_205_writeb)
333 mfmsr r7
334 ori r0,r7,MSR_DR
335 xori r0,r0,MSR_DR
336 sync
337 mtmsrd r0
338 sync
339 isync
e55174e9 340 STBCIX(R3,R0,R4)
39c870d5
OJ
341 isync
342 mtmsrd r7
343 sync
344 isync
345 blr
346
347#endif /* CONFIG_PPC_PASEMI */
348
349
e48f7eb2 350#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
4350147a
BH
351/*
352 * SCOM access functions for 970 (FX only for now)
353 *
354 * unsigned long scom970_read(unsigned int address);
355 * void scom970_write(unsigned int address, unsigned long value);
356 *
357 * The address passed in is the 24 bits register address. This code
358 * is 970 specific and will not check the status bits, so you should
359 * know what you are doing.
360 */
361_GLOBAL(scom970_read)
362 /* interrupts off */
363 mfmsr r4
364 ori r0,r4,MSR_EE
365 xori r0,r0,MSR_EE
366 mtmsrd r0,1
367
368 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
369 * (including parity). On current CPUs they must be 0'd,
370 * and finally or in RW bit
371 */
372 rlwinm r3,r3,8,0,15
373 ori r3,r3,0x8000
374
375 /* do the actual scom read */
376 sync
377 mtspr SPRN_SCOMC,r3
378 isync
379 mfspr r3,SPRN_SCOMD
380 isync
381 mfspr r0,SPRN_SCOMC
382 isync
383
384 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
385 * that's the best we can do). Not implemented yet as we don't use
386 * the scom on any of the bogus CPUs yet, but may have to be done
387 * ultimately
388 */
389
390 /* restore interrupts */
391 mtmsrd r4,1
392 blr
393
394
395_GLOBAL(scom970_write)
396 /* interrupts off */
397 mfmsr r5
398 ori r0,r5,MSR_EE
399 xori r0,r0,MSR_EE
400 mtmsrd r0,1
401
402 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
403 * (including parity). On current CPUs they must be 0'd.
404 */
405
406 rlwinm r3,r3,8,0,15
407
408 sync
409 mtspr SPRN_SCOMD,r4 /* write data */
410 isync
411 mtspr SPRN_SCOMC,r3 /* write command */
412 isync
413 mfspr 3,SPRN_SCOMC
414 isync
415
416 /* restore interrupts */
417 mtmsrd r5,1
418 blr
e48f7eb2 419#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
4350147a
BH
420
421
9994a338
PM
422/*
423 * disable_kernel_fp()
424 * Disable the FPU.
425 */
426_GLOBAL(disable_kernel_fp)
427 mfmsr r3
428 rldicl r0,r3,(63-MSR_FP_LG),1
429 rldicl r3,r0,(MSR_FP_LG+1),0
430 mtmsrd r3 /* disable use of fpu now */
431 isync
432 blr
433
9994a338
PM
434/* kexec_wait(phys_cpu)
435 *
436 * wait for the flag to change, indicating this kernel is going away but
437 * the slave code for the next one is at addresses 0 to 100.
438 *
3d2cea73
MM
439 * This is used by all slaves, even those that did not find a matching
440 * paca in the secondary startup code.
9994a338
PM
441 *
442 * Physical (hardware) cpu id should be in r3.
443 */
444_GLOBAL(kexec_wait)
445 bl 1f
4461: mflr r5
447 addi r5,r5,kexec_flag-1b
448
44999: HMT_LOW
450#ifdef CONFIG_KEXEC /* use no memory without kexec */
451 lwz r4,0(r5)
452 cmpwi 0,r4,0
453 bnea 0x60
454#endif
455 b 99b
456
457/* this can be in text because we won't change it until we are
458 * running in real anyways
459 */
460kexec_flag:
461 .long 0
462
463
464#ifdef CONFIG_KEXEC
465
466/* kexec_smp_wait(void)
467 *
468 * call with interrupts off
469 * note: this is a terminal routine, it does not save lr
470 *
471 * get phys id from paca
9994a338 472 * switch to real mode
3d2cea73 473 * mark the paca as no longer used
9994a338
PM
474 * join other cpus in kexec_wait(phys_id)
475 */
476_GLOBAL(kexec_smp_wait)
477 lhz r3,PACAHWCPUID(r13)
9994a338 478 bl real_mode
3d2cea73
MM
479
480 li r4,KEXEC_STATE_REAL_MODE
481 stb r4,PACAKEXECSTATE(r13)
482 SYNC
483
9994a338
PM
484 b .kexec_wait
485
486/*
487 * switch to real mode (turn mmu off)
488 * we use the early kernel trick that the hardware ignores bits
489 * 0 and 1 (big endian) of the effective address in real mode
490 *
491 * don't overwrite r3 here, it is live for kexec_wait above.
492 */
493real_mode: /* assume normal blr return */
4941: li r9,MSR_RI
495 li r10,MSR_DR|MSR_IR
496 mflr r11 /* return address to SRR0 */
497 mfmsr r12
498 andc r9,r12,r9
499 andc r10,r12,r10
500
501 mtmsrd r9,1
502 mtspr SPRN_SRR1,r10
503 mtspr SPRN_SRR0,r11
504 rfid
505
506
507/*
1767c8f3 508 * kexec_sequence(newstack, start, image, control, clear_all())
9994a338
PM
509 *
510 * does the grungy work with stack switching and real mode switches
511 * also does simple calls to other code
512 */
513
514_GLOBAL(kexec_sequence)
515 mflr r0
516 std r0,16(r1)
517
518 /* switch stacks to newstack -- &kexec_stack.stack */
4ae2dcb6 519 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338
PM
520 mr r1,r3
521
522 li r0,0
523 std r0,16(r1)
524
525 /* save regs for local vars on new stack.
526 * yes, we won't go back, but ...
527 */
528 std r31,-8(r1)
529 std r30,-16(r1)
530 std r29,-24(r1)
531 std r28,-32(r1)
532 std r27,-40(r1)
533 std r26,-48(r1)
534 std r25,-56(r1)
535
4ae2dcb6 536 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
9994a338
PM
537
538 /* save args into preserved regs */
539 mr r31,r3 /* newstack (both) */
540 mr r30,r4 /* start (real) */
541 mr r29,r5 /* image (virt) */
542 mr r28,r6 /* control, unused */
543 mr r27,r7 /* clear_all() fn desc */
1767c8f3 544 mr r26,r8 /* spare */
9994a338
PM
545 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
546
547 /* disable interrupts, we are overwriting kernel data next */
548 mfmsr r3
549 rlwinm r3,r3,0,17,15
550 mtmsrd r3,1
551
552 /* copy dest pages, flush whole dest image */
553 mr r3,r29
554 bl .kexec_copy_flush /* (image) */
555
556 /* turn off mmu */
557 bl real_mode
558
ee46a90b
MM
559 /* copy 0x100 bytes starting at start to 0 */
560 li r3,0
561 mr r4,r30 /* start, aka phys mem offset */
562 li r5,0x100
563 li r6,0
564 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
5651: /* assume normal blr return */
566
567 /* release other cpus to the new kernel secondary start at 0x60 */
568 mflr r5
569 li r6,1
570 stw r6,kexec_flag-1b(5)
571
9994a338
PM
572 /* clear out hardware hash page table and tlb */
573 ld r5,0(r27) /* deref function descriptor */
574 mtctr r5
8d950cb8 575 bctrl /* ppc_md.hpte_clear_all(void); */
9994a338
PM
576
577/*
578 * kexec image calling is:
579 * the first 0x100 bytes of the entry point are copied to 0
580 *
581 * all slaves branch to slave = 0x60 (absolute)
582 * slave(phys_cpu_id);
583 *
584 * master goes to start = entry point
585 * start(phys_cpu_id, start, 0);
586 *
587 *
588 * a wrapper is needed to call existing kernels, here is an approximate
589 * description of one method:
590 *
591 * v2: (2.6.10)
592 * start will be near the boot_block (maybe 0x100 bytes before it?)
593 * it will have a 0x60, which will b to boot_block, where it will wait
594 * and 0 will store phys into struct boot-block and load r3 from there,
595 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
596 *
597 * v1: (2.6.9)
598 * boot block will have all cpus scanning device tree to see if they
599 * are the boot cpu ?????
600 * other device tree differences (prop sizes, va vs pa, etc)...
601 */
9994a338
PM
602 mr r3,r25 # my phys cpu
603 mr r4,r30 # start, aka phys mem offset
604 mtlr 4
605 li r5,0
1767c8f3 606 blr /* image->start(physid, image->start, 0); */
9994a338 607#endif /* CONFIG_KEXEC */