]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/sparc64/kernel/ktlb.S
[SPARC]: BUG_ON() Conversion in arch/sparc/kernel/ioport.c
[mirror_ubuntu-artful-kernel.git] / arch / sparc64 / kernel / ktlb.S
CommitLineData
2a7e2990
DM
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/
8
9#include <linux/config.h>
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15 .text
16 .align 32
17
2a7e2990
DM
18/*
19 * On a second level vpte miss, check whether the original fault is to the OBP
20 * range (note that this is only possible for instruction miss, data misses to
21 * obp range do not use vpte). If so, go back directly to the faulting address.
22 * This is because we want to read the tpc, otherwise we have no way of knowing
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
24 * also ensures no vpte range addresses are dropped into tlb while obp is
25 * executing (see inherit_locked_prom_mappings() rant).
26 */
27sparc64_vpte_nucleus:
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
31 */
32 sethi %hi(LOW_OBP_ADDRESS), %g5
33 cmp %g4, %g5
1ac4f5eb 34 blu,pn %xcc, kern_vpte
2a7e2990
DM
35 mov 0x1, %g5
36 sllx %g5, 32, %g5
37 cmp %g4, %g5
1ac4f5eb 38 blu,pn %xcc, vpte_insn_obp
2a7e2990
DM
39 nop
40
41 /* These two instructions are patched by paginig_init(). */
1ac4f5eb
DM
42kern_vpte:
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
2a7e2990
DM
45
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
49
50vpte_noent:
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
52 * skip over the trap instruction so that the top level
53 * TLB miss handler will thing this %g5 value is just an
54 * invalid PTE, thus branching to full fault processing.
55 */
56 mov TLB_SFSR, %g1
57 stxa %g4, [%g1 + %g1] ASI_DMMU
58 done
59
1ac4f5eb 60vpte_insn_obp:
2a7e2990
DM
61 /* Behave as if we are at TL0. */
62 wrpr %g0, 1, %tl
63 rdpr %tpc, %g4 /* Find original faulting iaddr */
64 srlx %g4, 13, %g4 /* Throw out context bits */
65 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
66
67 /* Restore previous TAG_ACCESS. */
68 mov TLB_SFSR, %g1
69 stxa %g4, [%g1 + %g1] ASI_IMMU
70
c9c10830
DM
71 sethi %hi(prom_trans), %g5
72 or %g5, %lo(prom_trans), %g5
73
741: ldx [%g5 + 0x00], %g6 ! base
75 brz,a,pn %g6, longpath ! no more entries, fail
76 mov TLB_SFSR, %g1 ! and restore %g1
77 ldx [%g5 + 0x08], %g1 ! len
78 add %g6, %g1, %g1 ! end
79 cmp %g6, %g4
80 bgu,pt %xcc, 2f
81 cmp %g4, %g1
82 bgeu,pt %xcc, 2f
83 ldx [%g5 + 0x10], %g1 ! PTE
84
85 /* TLB load, restore %g1, and return from trap. */
86 sub %g4, %g6, %g6
87 add %g1, %g6, %g5
88 mov TLB_SFSR, %g1
2a7e2990
DM
89 stxa %g5, [%g0] ASI_ITLB_DATA_IN
90 retry
91
c9c10830
DM
922: ba,pt %xcc, 1b
93 add %g5, (3 * 8), %g5 ! next entry
2a7e2990 94
c9c10830
DM
95kvmap_do_obp:
96 sethi %hi(prom_trans), %g5
97 or %g5, %lo(prom_trans), %g5
98 srlx %g4, 13, %g4
99 sllx %g4, 13, %g4
100
1011: ldx [%g5 + 0x00], %g6 ! base
102 brz,a,pn %g6, longpath ! no more entries, fail
103 mov TLB_SFSR, %g1 ! and restore %g1
104 ldx [%g5 + 0x08], %g1 ! len
105 add %g6, %g1, %g1 ! end
106 cmp %g6, %g4
107 bgu,pt %xcc, 2f
108 cmp %g4, %g1
109 bgeu,pt %xcc, 2f
110 ldx [%g5 + 0x10], %g1 ! PTE
111
112 /* TLB load, restore %g1, and return from trap. */
113 sub %g4, %g6, %g6
114 add %g1, %g6, %g5
115 mov TLB_SFSR, %g1
2a7e2990
DM
116 stxa %g5, [%g0] ASI_DTLB_DATA_IN
117 retry
118
c9c10830
DM
1192: ba,pt %xcc, 1b
120 add %g5, (3 * 8), %g5 ! next entry
121
2a7e2990
DM
122/*
123 * On a first level data miss, check whether this is to the OBP range (note
124 * that such accesses can be made by prom, as well as by kernel using
125 * prom_getproperty on "address"), and if so, do not use vpte access ...
126 * rather, use information saved during inherit_prom_mappings() using 8k
127 * pagesize.
128 */
129 .align 32
130kvmap:
56425306
DM
131 brgez,pn %g4, kvmap_nonlinear
132 nop
133
134#ifdef CONFIG_DEBUG_PAGEALLOC
135 .globl kvmap_linear_patch
136kvmap_linear_patch:
137#endif
138 ba,pt %xcc, kvmap_load
2a7e2990
DM
139 xor %g2, %g4, %g5
140
56425306
DM
141#ifdef CONFIG_DEBUG_PAGEALLOC
142 sethi %hi(swapper_pg_dir), %g5
143 or %g5, %lo(swapper_pg_dir), %g5
144 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
145 srlx %g6, 64 - PAGE_SHIFT, %g6
146 andn %g6, 0x3, %g6
147 lduw [%g5 + %g6], %g5
148 brz,pn %g5, longpath
149 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
150 srlx %g6, 64 - PAGE_SHIFT, %g6
151 sllx %g5, 11, %g5
152 andn %g6, 0x3, %g6
153 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
154 brz,pn %g5, longpath
155 sllx %g4, 64 - PMD_SHIFT, %g6
156 srlx %g6, 64 - PAGE_SHIFT, %g6
157 sllx %g5, 11, %g5
158 andn %g6, 0x7, %g6
159 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
160 brz,pn %g5, longpath
161 nop
162 ba,a,pt %xcc, kvmap_load
163#endif
164
2a7e2990
DM
165kvmap_nonlinear:
166 sethi %hi(MODULES_VADDR), %g5
167 cmp %g4, %g5
168 blu,pn %xcc, longpath
169 mov (VMALLOC_END >> 24), %g5
170 sllx %g5, 24, %g5
171 cmp %g4, %g5
172 bgeu,pn %xcc, longpath
173 nop
174
175kvmap_check_obp:
176 sethi %hi(LOW_OBP_ADDRESS), %g5
177 cmp %g4, %g5
178 blu,pn %xcc, kvmap_vmalloc_addr
179 mov 0x1, %g5
180 sllx %g5, 32, %g5
181 cmp %g4, %g5
1ac4f5eb 182 blu,pn %xcc, kvmap_do_obp
2a7e2990
DM
183 nop
184
185kvmap_vmalloc_addr:
186 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
187 ldxa [%g3 + %g6] ASI_N, %g5
188 brgez,pn %g5, longpath
189 nop
190
191kvmap_load:
192 /* PTE is valid, load into TLB and return from trap. */
193 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
194 retry