]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/misc_64.S
powerpc: remove the unused function disable_kernel_fp()
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / misc_64.S
1 /*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17 #include <linux/sys.h>
18 #include <asm/unistd.h>
19 #include <asm/errno.h>
20 #include <asm/processor.h>
21 #include <asm/page.h>
22 #include <asm/cache.h>
23 #include <asm/ppc_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/cputable.h>
26 #include <asm/thread_info.h>
27 #include <asm/kexec.h>
28 #include <asm/ptrace.h>
29
30 .text
31
32 _GLOBAL(call_do_softirq)
33 mflr r0
34 std r0,16(r1)
35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
36 mr r1,r3
37 bl .__do_softirq
38 ld r1,0(r1)
39 ld r0,16(r1)
40 mtlr r0
41 blr
42
43 _GLOBAL(call_handle_irq)
44 ld r8,0(r6)
45 mflr r0
46 std r0,16(r1)
47 mtctr r8
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
49 mr r1,r5
50 bctrl
51 ld r1,0(r1)
52 ld r0,16(r1)
53 mtlr r0
54 blr
55
56 .section ".toc","aw"
57 PPC64_CACHES:
58 .tc ppc64_caches[TC],ppc64_caches
59 .section ".text"
60
61 /*
62 * Write any modified data cache blocks out to memory
63 * and invalidate the corresponding instruction cache blocks.
64 *
65 * flush_icache_range(unsigned long start, unsigned long stop)
66 *
67 * flush all bytes from start through stop-1 inclusive
68 */
69
70 _KPROBE(flush_icache_range)
71 BEGIN_FTR_SECTION
72 blr
73 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
74 /*
75 * Flush the data cache to memory
76 *
77 * Different systems have different cache line sizes
78 * and in some cases i-cache and d-cache line sizes differ from
79 * each other.
80 */
81 ld r10,PPC64_CACHES@toc(r2)
82 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
83 addi r5,r7,-1
84 andc r6,r3,r5 /* round low to line bdy */
85 subf r8,r6,r4 /* compute length */
86 add r8,r8,r5 /* ensure we get enough */
87 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
88 srw. r8,r8,r9 /* compute line count */
89 beqlr /* nothing to do? */
90 mtctr r8
91 1: dcbst 0,r6
92 add r6,r6,r7
93 bdnz 1b
94 sync
95
96 /* Now invalidate the instruction cache */
97
98 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
99 addi r5,r7,-1
100 andc r6,r3,r5 /* round low to line bdy */
101 subf r8,r6,r4 /* compute length */
102 add r8,r8,r5
103 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
104 srw. r8,r8,r9 /* compute line count */
105 beqlr /* nothing to do? */
106 mtctr r8
107 2: icbi 0,r6
108 add r6,r6,r7
109 bdnz 2b
110 isync
111 blr
112 .previous .text
113 /*
114 * Like above, but only do the D-cache.
115 *
116 * flush_dcache_range(unsigned long start, unsigned long stop)
117 *
118 * flush all bytes from start to stop-1 inclusive
119 */
120 _GLOBAL(flush_dcache_range)
121
122 /*
123 * Flush the data cache to memory
124 *
125 * Different systems have different cache line sizes
126 */
127 ld r10,PPC64_CACHES@toc(r2)
128 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
129 addi r5,r7,-1
130 andc r6,r3,r5 /* round low to line bdy */
131 subf r8,r6,r4 /* compute length */
132 add r8,r8,r5 /* ensure we get enough */
133 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
134 srw. r8,r8,r9 /* compute line count */
135 beqlr /* nothing to do? */
136 mtctr r8
137 0: dcbst 0,r6
138 add r6,r6,r7
139 bdnz 0b
140 sync
141 blr
142
143 /*
144 * Like above, but works on non-mapped physical addresses.
145 * Use only for non-LPAR setups ! It also assumes real mode
146 * is cacheable. Used for flushing out the DART before using
147 * it as uncacheable memory
148 *
149 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
150 *
151 * flush all bytes from start to stop-1 inclusive
152 */
153 _GLOBAL(flush_dcache_phys_range)
154 ld r10,PPC64_CACHES@toc(r2)
155 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
156 addi r5,r7,-1
157 andc r6,r3,r5 /* round low to line bdy */
158 subf r8,r6,r4 /* compute length */
159 add r8,r8,r5 /* ensure we get enough */
160 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
161 srw. r8,r8,r9 /* compute line count */
162 beqlr /* nothing to do? */
163 mfmsr r5 /* Disable MMU Data Relocation */
164 ori r0,r5,MSR_DR
165 xori r0,r0,MSR_DR
166 sync
167 mtmsr r0
168 sync
169 isync
170 mtctr r8
171 0: dcbst 0,r6
172 add r6,r6,r7
173 bdnz 0b
174 sync
175 isync
176 mtmsr r5 /* Re-enable MMU Data Relocation */
177 sync
178 isync
179 blr
180
181 _GLOBAL(flush_inval_dcache_range)
182 ld r10,PPC64_CACHES@toc(r2)
183 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
184 addi r5,r7,-1
185 andc r6,r3,r5 /* round low to line bdy */
186 subf r8,r6,r4 /* compute length */
187 add r8,r8,r5 /* ensure we get enough */
188 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
189 srw. r8,r8,r9 /* compute line count */
190 beqlr /* nothing to do? */
191 sync
192 isync
193 mtctr r8
194 0: dcbf 0,r6
195 add r6,r6,r7
196 bdnz 0b
197 sync
198 isync
199 blr
200
201
202 /*
203 * Flush a particular page from the data cache to RAM.
204 * Note: this is necessary because the instruction cache does *not*
205 * snoop from the data cache.
206 *
207 * void __flush_dcache_icache(void *page)
208 */
209 _GLOBAL(__flush_dcache_icache)
210 /*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215
216 /* Flush the dcache */
217 ld r7,PPC64_CACHES@toc(r2)
218 clrrdi r3,r3,PAGE_SHIFT /* Page align */
219 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
220 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
221 mr r6,r3
222 mtctr r4
223 0: dcbst 0,r6
224 add r6,r6,r5
225 bdnz 0b
226 sync
227
228 /* Now invalidate the icache */
229
230 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
231 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
232 mtctr r4
233 1: icbi 0,r3
234 add r3,r3,r5
235 bdnz 1b
236 isync
237 blr
238
239 _GLOBAL(__bswapdi2)
240 srdi r8,r3,32
241 rlwinm r7,r3,8,0xffffffff
242 rlwimi r7,r3,24,0,7
243 rlwinm r9,r8,8,0xffffffff
244 rlwimi r7,r3,24,16,23
245 rlwimi r9,r8,24,0,7
246 rlwimi r9,r8,24,16,23
247 sldi r7,r7,32
248 or r3,r7,r9
249 blr
250
251 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
252
253 _GLOBAL(rmci_on)
254 sync
255 isync
256 li r3,0x100
257 rldicl r3,r3,32,0
258 mfspr r5,SPRN_HID4
259 or r5,r5,r3
260 sync
261 mtspr SPRN_HID4,r5
262 isync
263 slbia
264 isync
265 sync
266 blr
267
268 _GLOBAL(rmci_off)
269 sync
270 isync
271 li r3,0x100
272 rldicl r3,r3,32,0
273 mfspr r5,SPRN_HID4
274 andc r5,r5,r3
275 sync
276 mtspr SPRN_HID4,r5
277 isync
278 slbia
279 isync
280 sync
281 blr
282
283 /*
284 * Do an IO access in real mode
285 */
286 _GLOBAL(real_readb)
287 mfmsr r7
288 ori r0,r7,MSR_DR
289 xori r0,r0,MSR_DR
290 sync
291 mtmsrd r0
292 sync
293 isync
294 mfspr r6,SPRN_HID4
295 rldicl r5,r6,32,0
296 ori r5,r5,0x100
297 rldicl r5,r5,32,0
298 sync
299 mtspr SPRN_HID4,r5
300 isync
301 slbia
302 isync
303 lbz r3,0(r3)
304 sync
305 mtspr SPRN_HID4,r6
306 isync
307 slbia
308 isync
309 mtmsrd r7
310 sync
311 isync
312 blr
313
314 /*
315 * Do an IO access in real mode
316 */
317 _GLOBAL(real_writeb)
318 mfmsr r7
319 ori r0,r7,MSR_DR
320 xori r0,r0,MSR_DR
321 sync
322 mtmsrd r0
323 sync
324 isync
325 mfspr r6,SPRN_HID4
326 rldicl r5,r6,32,0
327 ori r5,r5,0x100
328 rldicl r5,r5,32,0
329 sync
330 mtspr SPRN_HID4,r5
331 isync
332 slbia
333 isync
334 stb r3,0(r4)
335 sync
336 mtspr SPRN_HID4,r6
337 isync
338 slbia
339 isync
340 mtmsrd r7
341 sync
342 isync
343 blr
344 #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
345
346 #ifdef CONFIG_PPC_PASEMI
347
348 _GLOBAL(real_205_readb)
349 mfmsr r7
350 ori r0,r7,MSR_DR
351 xori r0,r0,MSR_DR
352 sync
353 mtmsrd r0
354 sync
355 isync
356 LBZCIX(R3,R0,R3)
357 isync
358 mtmsrd r7
359 sync
360 isync
361 blr
362
363 _GLOBAL(real_205_writeb)
364 mfmsr r7
365 ori r0,r7,MSR_DR
366 xori r0,r0,MSR_DR
367 sync
368 mtmsrd r0
369 sync
370 isync
371 STBCIX(R3,R0,R4)
372 isync
373 mtmsrd r7
374 sync
375 isync
376 blr
377
378 #endif /* CONFIG_PPC_PASEMI */
379
380
381 #if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
382 /*
383 * SCOM access functions for 970 (FX only for now)
384 *
385 * unsigned long scom970_read(unsigned int address);
386 * void scom970_write(unsigned int address, unsigned long value);
387 *
388 * The address passed in is the 24 bits register address. This code
389 * is 970 specific and will not check the status bits, so you should
390 * know what you are doing.
391 */
392 _GLOBAL(scom970_read)
393 /* interrupts off */
394 mfmsr r4
395 ori r0,r4,MSR_EE
396 xori r0,r0,MSR_EE
397 mtmsrd r0,1
398
399 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
400 * (including parity). On current CPUs they must be 0'd,
401 * and finally or in RW bit
402 */
403 rlwinm r3,r3,8,0,15
404 ori r3,r3,0x8000
405
406 /* do the actual scom read */
407 sync
408 mtspr SPRN_SCOMC,r3
409 isync
410 mfspr r3,SPRN_SCOMD
411 isync
412 mfspr r0,SPRN_SCOMC
413 isync
414
415 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
416 * that's the best we can do). Not implemented yet as we don't use
417 * the scom on any of the bogus CPUs yet, but may have to be done
418 * ultimately
419 */
420
421 /* restore interrupts */
422 mtmsrd r4,1
423 blr
424
425
426 _GLOBAL(scom970_write)
427 /* interrupts off */
428 mfmsr r5
429 ori r0,r5,MSR_EE
430 xori r0,r0,MSR_EE
431 mtmsrd r0,1
432
433 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
434 * (including parity). On current CPUs they must be 0'd.
435 */
436
437 rlwinm r3,r3,8,0,15
438
439 sync
440 mtspr SPRN_SCOMD,r4 /* write data */
441 isync
442 mtspr SPRN_SCOMC,r3 /* write command */
443 isync
444 mfspr 3,SPRN_SCOMC
445 isync
446
447 /* restore interrupts */
448 mtmsrd r5,1
449 blr
450 #endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
451
452 /* kexec_wait(phys_cpu)
453 *
454 * wait for the flag to change, indicating this kernel is going away but
455 * the slave code for the next one is at addresses 0 to 100.
456 *
457 * This is used by all slaves, even those that did not find a matching
458 * paca in the secondary startup code.
459 *
460 * Physical (hardware) cpu id should be in r3.
461 */
462 _GLOBAL(kexec_wait)
463 bl 1f
464 1: mflr r5
465 addi r5,r5,kexec_flag-1b
466
467 99: HMT_LOW
468 #ifdef CONFIG_KEXEC /* use no memory without kexec */
469 lwz r4,0(r5)
470 cmpwi 0,r4,0
471 bnea 0x60
472 #endif
473 b 99b
474
475 /* this can be in text because we won't change it until we are
476 * running in real anyways
477 */
478 kexec_flag:
479 .long 0
480
481
482 #ifdef CONFIG_KEXEC
483
484 /* kexec_smp_wait(void)
485 *
486 * call with interrupts off
487 * note: this is a terminal routine, it does not save lr
488 *
489 * get phys id from paca
490 * switch to real mode
491 * mark the paca as no longer used
492 * join other cpus in kexec_wait(phys_id)
493 */
494 _GLOBAL(kexec_smp_wait)
495 lhz r3,PACAHWCPUID(r13)
496 bl real_mode
497
498 li r4,KEXEC_STATE_REAL_MODE
499 stb r4,PACAKEXECSTATE(r13)
500 SYNC
501
502 b .kexec_wait
503
504 /*
505 * switch to real mode (turn mmu off)
506 * we use the early kernel trick that the hardware ignores bits
507 * 0 and 1 (big endian) of the effective address in real mode
508 *
509 * don't overwrite r3 here, it is live for kexec_wait above.
510 */
511 real_mode: /* assume normal blr return */
512 1: li r9,MSR_RI
513 li r10,MSR_DR|MSR_IR
514 mflr r11 /* return address to SRR0 */
515 mfmsr r12
516 andc r9,r12,r9
517 andc r10,r12,r10
518
519 mtmsrd r9,1
520 mtspr SPRN_SRR1,r10
521 mtspr SPRN_SRR0,r11
522 rfid
523
524
525 /*
526 * kexec_sequence(newstack, start, image, control, clear_all())
527 *
528 * does the grungy work with stack switching and real mode switches
529 * also does simple calls to other code
530 */
531
532 _GLOBAL(kexec_sequence)
533 mflr r0
534 std r0,16(r1)
535
536 /* switch stacks to newstack -- &kexec_stack.stack */
537 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
538 mr r1,r3
539
540 li r0,0
541 std r0,16(r1)
542
543 /* save regs for local vars on new stack.
544 * yes, we won't go back, but ...
545 */
546 std r31,-8(r1)
547 std r30,-16(r1)
548 std r29,-24(r1)
549 std r28,-32(r1)
550 std r27,-40(r1)
551 std r26,-48(r1)
552 std r25,-56(r1)
553
554 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
555
556 /* save args into preserved regs */
557 mr r31,r3 /* newstack (both) */
558 mr r30,r4 /* start (real) */
559 mr r29,r5 /* image (virt) */
560 mr r28,r6 /* control, unused */
561 mr r27,r7 /* clear_all() fn desc */
562 mr r26,r8 /* spare */
563 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
564
565 /* disable interrupts, we are overwriting kernel data next */
566 mfmsr r3
567 rlwinm r3,r3,0,17,15
568 mtmsrd r3,1
569
570 /* copy dest pages, flush whole dest image */
571 mr r3,r29
572 bl .kexec_copy_flush /* (image) */
573
574 /* turn off mmu */
575 bl real_mode
576
577 /* copy 0x100 bytes starting at start to 0 */
578 li r3,0
579 mr r4,r30 /* start, aka phys mem offset */
580 li r5,0x100
581 li r6,0
582 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
583 1: /* assume normal blr return */
584
585 /* release other cpus to the new kernel secondary start at 0x60 */
586 mflr r5
587 li r6,1
588 stw r6,kexec_flag-1b(5)
589
590 /* clear out hardware hash page table and tlb */
591 ld r5,0(r27) /* deref function descriptor */
592 mtctr r5
593 bctrl /* ppc_md.hpte_clear_all(void); */
594
595 /*
596 * kexec image calling is:
597 * the first 0x100 bytes of the entry point are copied to 0
598 *
599 * all slaves branch to slave = 0x60 (absolute)
600 * slave(phys_cpu_id);
601 *
602 * master goes to start = entry point
603 * start(phys_cpu_id, start, 0);
604 *
605 *
606 * a wrapper is needed to call existing kernels, here is an approximate
607 * description of one method:
608 *
609 * v2: (2.6.10)
610 * start will be near the boot_block (maybe 0x100 bytes before it?)
611 * it will have a 0x60, which will b to boot_block, where it will wait
612 * and 0 will store phys into struct boot-block and load r3 from there,
613 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
614 *
615 * v1: (2.6.9)
616 * boot block will have all cpus scanning device tree to see if they
617 * are the boot cpu ?????
618 * other device tree differences (prop sizes, va vs pa, etc)...
619 */
620 mr r3,r25 # my phys cpu
621 mr r4,r30 # start, aka phys mem offset
622 mtlr 4
623 li r5,0
624 blr /* image->start(physid, image->start, 0); */
625 #endif /* CONFIG_KEXEC */