]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
79add627 | 6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
7 | * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org |
8 | * Carsten Langgaard, carstenl@mips.com | |
9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. | |
10 | */ | |
eaa38d63 | 11 | #include <linux/cpu_pm.h> |
1da177e4 LT |
12 | #include <linux/init.h> |
13 | #include <linux/sched.h> | |
631330f5 | 14 | #include <linux/smp.h> |
1da177e4 | 15 | #include <linux/mm.h> |
fd062c84 | 16 | #include <linux/hugetlb.h> |
f2e3656d | 17 | #include <linux/module.h> |
1da177e4 LT |
18 | |
19 | #include <asm/cpu.h> | |
69f24d17 | 20 | #include <asm/cpu-type.h> |
1da177e4 LT |
21 | #include <asm/bootinfo.h> |
22 | #include <asm/mmu_context.h> | |
23 | #include <asm/pgtable.h> | |
c01905ee | 24 | #include <asm/tlb.h> |
3d18c983 | 25 | #include <asm/tlbmisc.h> |
1da177e4 LT |
26 | |
27 | extern void build_tlb_refill_handler(void); | |
28 | ||
2a21c730 | 29 | /* |
c579d310 HC |
30 | * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, |
31 | * unfortunately, itlb is not totally transparent to software. | |
2a21c730 | 32 | */ |
14bd8c08 RB |
33 | static inline void flush_itlb(void) |
34 | { | |
35 | switch (current_cpu_type()) { | |
36 | case CPU_LOONGSON2: | |
c579d310 | 37 | case CPU_LOONGSON3: |
14bd8c08 RB |
38 | write_c0_diag(4); |
39 | break; | |
40 | default: | |
41 | break; | |
42 | } | |
43 | } | |
2a21c730 | 44 | |
14bd8c08 RB |
45 | static inline void flush_itlb_vm(struct vm_area_struct *vma) |
46 | { | |
47 | if (vma->vm_flags & VM_EXEC) | |
48 | flush_itlb(); | |
49 | } | |
2a21c730 | 50 | |
1da177e4 LT |
51 | void local_flush_tlb_all(void) |
52 | { | |
53 | unsigned long flags; | |
54 | unsigned long old_ctx; | |
75b5b5e0 | 55 | int entry, ftlbhighset; |
1da177e4 | 56 | |
b633648c | 57 | local_irq_save(flags); |
1da177e4 LT |
58 | /* Save old context and create impossible VPN2 value */ |
59 | old_ctx = read_c0_entryhi(); | |
f1014d1b | 60 | htw_stop(); |
1da177e4 LT |
61 | write_c0_entrylo0(0); |
62 | write_c0_entrylo1(0); | |
63 | ||
64 | entry = read_c0_wired(); | |
65 | ||
66 | /* Blast 'em all away. */ | |
75b5b5e0 LY |
67 | if (cpu_has_tlbinv) { |
68 | if (current_cpu_data.tlbsizevtlb) { | |
69 | write_c0_index(0); | |
70 | mtc0_tlbw_hazard(); | |
71 | tlbinvf(); /* invalidate VTLB */ | |
72 | } | |
73 | ftlbhighset = current_cpu_data.tlbsizevtlb + | |
74 | current_cpu_data.tlbsizeftlbsets; | |
75 | for (entry = current_cpu_data.tlbsizevtlb; | |
76 | entry < ftlbhighset; | |
77 | entry++) { | |
78 | write_c0_index(entry); | |
79 | mtc0_tlbw_hazard(); | |
80 | tlbinvf(); /* invalidate one FTLB set */ | |
81 | } | |
601cfa7b LY |
82 | } else { |
83 | while (entry < current_cpu_data.tlbsize) { | |
84 | /* Make sure all entries differ. */ | |
85 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | |
86 | write_c0_index(entry); | |
87 | mtc0_tlbw_hazard(); | |
88 | tlb_write_indexed(); | |
89 | entry++; | |
90 | } | |
1da177e4 LT |
91 | } |
92 | tlbw_use_hazard(); | |
93 | write_c0_entryhi(old_ctx); | |
f1014d1b | 94 | htw_start(); |
14bd8c08 | 95 | flush_itlb(); |
b633648c | 96 | local_irq_restore(flags); |
1da177e4 | 97 | } |
f2e3656d | 98 | EXPORT_SYMBOL(local_flush_tlb_all); |
1da177e4 | 99 | |
172546bf TS |
100 | /* All entries common to a mm share an asid. To effectively flush |
101 | these entries, we just bump the asid. */ | |
1da177e4 LT |
102 | void local_flush_tlb_mm(struct mm_struct *mm) |
103 | { | |
172546bf TS |
104 | int cpu; |
105 | ||
106 | preempt_disable(); | |
1da177e4 | 107 | |
172546bf TS |
108 | cpu = smp_processor_id(); |
109 | ||
110 | if (cpu_context(cpu, mm) != 0) { | |
111 | drop_mmu_context(mm, cpu); | |
112 | } | |
113 | ||
114 | preempt_enable(); | |
1da177e4 LT |
115 | } |
116 | ||
117 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
118 | unsigned long end) | |
119 | { | |
120 | struct mm_struct *mm = vma->vm_mm; | |
121 | int cpu = smp_processor_id(); | |
122 | ||
123 | if (cpu_context(cpu, mm) != 0) { | |
a5e696e5 | 124 | unsigned long size, flags; |
1da177e4 | 125 | |
b633648c | 126 | local_irq_save(flags); |
ac53c4fc DD |
127 | start = round_down(start, PAGE_SIZE << 1); |
128 | end = round_up(end, PAGE_SIZE << 1); | |
129 | size = (end - start) >> (PAGE_SHIFT + 1); | |
75b5b5e0 LY |
130 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
131 | current_cpu_data.tlbsize / 8 : | |
132 | current_cpu_data.tlbsize / 2)) { | |
1da177e4 LT |
133 | int oldpid = read_c0_entryhi(); |
134 | int newpid = cpu_asid(cpu, mm); | |
135 | ||
f1014d1b | 136 | htw_stop(); |
1da177e4 LT |
137 | while (start < end) { |
138 | int idx; | |
139 | ||
140 | write_c0_entryhi(start | newpid); | |
ac53c4fc | 141 | start += (PAGE_SIZE << 1); |
1da177e4 LT |
142 | mtc0_tlbw_hazard(); |
143 | tlb_probe(); | |
432bef2a | 144 | tlb_probe_hazard(); |
1da177e4 LT |
145 | idx = read_c0_index(); |
146 | write_c0_entrylo0(0); | |
147 | write_c0_entrylo1(0); | |
148 | if (idx < 0) | |
149 | continue; | |
150 | /* Make sure all entries differ. */ | |
172546bf | 151 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
152 | mtc0_tlbw_hazard(); |
153 | tlb_write_indexed(); | |
154 | } | |
155 | tlbw_use_hazard(); | |
156 | write_c0_entryhi(oldpid); | |
f1014d1b | 157 | htw_start(); |
1da177e4 LT |
158 | } else { |
159 | drop_mmu_context(mm, cpu); | |
160 | } | |
14bd8c08 | 161 | flush_itlb(); |
b633648c | 162 | local_irq_restore(flags); |
1da177e4 LT |
163 | } |
164 | } | |
165 | ||
166 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
167 | { | |
a5e696e5 | 168 | unsigned long size, flags; |
1da177e4 | 169 | |
b633648c | 170 | local_irq_save(flags); |
1da177e4 LT |
171 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
172 | size = (size + 1) >> 1; | |
75b5b5e0 LY |
173 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
174 | current_cpu_data.tlbsize / 8 : | |
175 | current_cpu_data.tlbsize / 2)) { | |
1da177e4 LT |
176 | int pid = read_c0_entryhi(); |
177 | ||
178 | start &= (PAGE_MASK << 1); | |
179 | end += ((PAGE_SIZE << 1) - 1); | |
180 | end &= (PAGE_MASK << 1); | |
f1014d1b | 181 | htw_stop(); |
1da177e4 LT |
182 | |
183 | while (start < end) { | |
184 | int idx; | |
185 | ||
186 | write_c0_entryhi(start); | |
187 | start += (PAGE_SIZE << 1); | |
188 | mtc0_tlbw_hazard(); | |
189 | tlb_probe(); | |
432bef2a | 190 | tlb_probe_hazard(); |
1da177e4 LT |
191 | idx = read_c0_index(); |
192 | write_c0_entrylo0(0); | |
193 | write_c0_entrylo1(0); | |
194 | if (idx < 0) | |
195 | continue; | |
196 | /* Make sure all entries differ. */ | |
172546bf | 197 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
198 | mtc0_tlbw_hazard(); |
199 | tlb_write_indexed(); | |
200 | } | |
201 | tlbw_use_hazard(); | |
202 | write_c0_entryhi(pid); | |
f1014d1b | 203 | htw_start(); |
1da177e4 LT |
204 | } else { |
205 | local_flush_tlb_all(); | |
206 | } | |
14bd8c08 | 207 | flush_itlb(); |
b633648c | 208 | local_irq_restore(flags); |
1da177e4 LT |
209 | } |
210 | ||
211 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
212 | { | |
213 | int cpu = smp_processor_id(); | |
214 | ||
215 | if (cpu_context(cpu, vma->vm_mm) != 0) { | |
216 | unsigned long flags; | |
217 | int oldpid, newpid, idx; | |
218 | ||
219 | newpid = cpu_asid(cpu, vma->vm_mm); | |
220 | page &= (PAGE_MASK << 1); | |
b633648c | 221 | local_irq_save(flags); |
1da177e4 | 222 | oldpid = read_c0_entryhi(); |
f1014d1b | 223 | htw_stop(); |
1da177e4 LT |
224 | write_c0_entryhi(page | newpid); |
225 | mtc0_tlbw_hazard(); | |
226 | tlb_probe(); | |
432bef2a | 227 | tlb_probe_hazard(); |
1da177e4 LT |
228 | idx = read_c0_index(); |
229 | write_c0_entrylo0(0); | |
230 | write_c0_entrylo1(0); | |
231 | if (idx < 0) | |
232 | goto finish; | |
233 | /* Make sure all entries differ. */ | |
172546bf | 234 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
235 | mtc0_tlbw_hazard(); |
236 | tlb_write_indexed(); | |
237 | tlbw_use_hazard(); | |
238 | ||
239 | finish: | |
240 | write_c0_entryhi(oldpid); | |
f1014d1b | 241 | htw_start(); |
14bd8c08 | 242 | flush_itlb_vm(vma); |
b633648c | 243 | local_irq_restore(flags); |
1da177e4 LT |
244 | } |
245 | } | |
246 | ||
247 | /* | |
248 | * This one is only used for pages with the global bit set so we don't care | |
249 | * much about the ASID. | |
250 | */ | |
251 | void local_flush_tlb_one(unsigned long page) | |
252 | { | |
253 | unsigned long flags; | |
254 | int oldpid, idx; | |
255 | ||
b633648c | 256 | local_irq_save(flags); |
1da177e4 | 257 | oldpid = read_c0_entryhi(); |
f1014d1b | 258 | htw_stop(); |
172546bf | 259 | page &= (PAGE_MASK << 1); |
1da177e4 LT |
260 | write_c0_entryhi(page); |
261 | mtc0_tlbw_hazard(); | |
262 | tlb_probe(); | |
432bef2a | 263 | tlb_probe_hazard(); |
1da177e4 LT |
264 | idx = read_c0_index(); |
265 | write_c0_entrylo0(0); | |
266 | write_c0_entrylo1(0); | |
267 | if (idx >= 0) { | |
268 | /* Make sure all entries differ. */ | |
172546bf | 269 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
270 | mtc0_tlbw_hazard(); |
271 | tlb_write_indexed(); | |
272 | tlbw_use_hazard(); | |
273 | } | |
274 | write_c0_entryhi(oldpid); | |
f1014d1b | 275 | htw_start(); |
14bd8c08 | 276 | flush_itlb(); |
b633648c | 277 | local_irq_restore(flags); |
1da177e4 LT |
278 | } |
279 | ||
280 | /* | |
281 | * We will need multiple versions of update_mmu_cache(), one that just | |
282 | * updates the TLB with the new pte(s), and another which also checks | |
283 | * for the R4k "end of page" hardware bug and does the needy. | |
284 | */ | |
285 | void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |
286 | { | |
287 | unsigned long flags; | |
288 | pgd_t *pgdp; | |
c6e8b587 | 289 | pud_t *pudp; |
1da177e4 LT |
290 | pmd_t *pmdp; |
291 | pte_t *ptep; | |
292 | int idx, pid; | |
293 | ||
294 | /* | |
295 | * Handle debugger faulting in for debugee. | |
296 | */ | |
297 | if (current->active_mm != vma->vm_mm) | |
298 | return; | |
299 | ||
b633648c | 300 | local_irq_save(flags); |
172546bf | 301 | |
6a8dff6a | 302 | htw_stop(); |
48c4ac97 | 303 | pid = read_c0_entryhi() & ASID_MASK; |
1da177e4 LT |
304 | address &= (PAGE_MASK << 1); |
305 | write_c0_entryhi(address | pid); | |
306 | pgdp = pgd_offset(vma->vm_mm, address); | |
307 | mtc0_tlbw_hazard(); | |
308 | tlb_probe(); | |
432bef2a | 309 | tlb_probe_hazard(); |
c6e8b587 RB |
310 | pudp = pud_offset(pgdp, address); |
311 | pmdp = pmd_offset(pudp, address); | |
1da177e4 | 312 | idx = read_c0_index(); |
aa1762f4 | 313 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
314 | /* this could be a huge page */ |
315 | if (pmd_huge(*pmdp)) { | |
316 | unsigned long lo; | |
317 | write_c0_pagemask(PM_HUGE_MASK); | |
318 | ptep = (pte_t *)pmdp; | |
6dd9344c | 319 | lo = pte_to_entrylo(pte_val(*ptep)); |
fd062c84 DD |
320 | write_c0_entrylo0(lo); |
321 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); | |
322 | ||
323 | mtc0_tlbw_hazard(); | |
324 | if (idx < 0) | |
325 | tlb_write_random(); | |
326 | else | |
327 | tlb_write_indexed(); | |
fb944c9b | 328 | tlbw_use_hazard(); |
fd062c84 DD |
329 | write_c0_pagemask(PM_DEFAULT_MASK); |
330 | } else | |
331 | #endif | |
332 | { | |
333 | ptep = pte_offset_map(pmdp, address); | |
1da177e4 | 334 | |
34adb28d | 335 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
fd062c84 DD |
336 | write_c0_entrylo0(ptep->pte_high); |
337 | ptep++; | |
338 | write_c0_entrylo1(ptep->pte_high); | |
1da177e4 | 339 | #else |
6dd9344c DD |
340 | write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); |
341 | write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); | |
1da177e4 | 342 | #endif |
fd062c84 DD |
343 | mtc0_tlbw_hazard(); |
344 | if (idx < 0) | |
345 | tlb_write_random(); | |
346 | else | |
347 | tlb_write_indexed(); | |
348 | } | |
1da177e4 | 349 | tlbw_use_hazard(); |
6a8dff6a | 350 | htw_start(); |
14bd8c08 | 351 | flush_itlb_vm(vma); |
b633648c | 352 | local_irq_restore(flags); |
1da177e4 LT |
353 | } |
354 | ||
694b8c35 ML |
355 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
356 | unsigned long entryhi, unsigned long pagemask) | |
1da177e4 LT |
357 | { |
358 | unsigned long flags; | |
359 | unsigned long wired; | |
360 | unsigned long old_pagemask; | |
361 | unsigned long old_ctx; | |
362 | ||
b633648c | 363 | local_irq_save(flags); |
1da177e4 LT |
364 | /* Save old context and create impossible VPN2 value */ |
365 | old_ctx = read_c0_entryhi(); | |
f1014d1b | 366 | htw_stop(); |
1da177e4 LT |
367 | old_pagemask = read_c0_pagemask(); |
368 | wired = read_c0_wired(); | |
369 | write_c0_wired(wired + 1); | |
370 | write_c0_index(wired); | |
432bef2a | 371 | tlbw_use_hazard(); /* What is the hazard here? */ |
1da177e4 LT |
372 | write_c0_pagemask(pagemask); |
373 | write_c0_entryhi(entryhi); | |
374 | write_c0_entrylo0(entrylo0); | |
375 | write_c0_entrylo1(entrylo1); | |
376 | mtc0_tlbw_hazard(); | |
377 | tlb_write_indexed(); | |
378 | tlbw_use_hazard(); | |
379 | ||
380 | write_c0_entryhi(old_ctx); | |
432bef2a | 381 | tlbw_use_hazard(); /* What is the hazard here? */ |
f1014d1b | 382 | htw_start(); |
1da177e4 LT |
383 | write_c0_pagemask(old_pagemask); |
384 | local_flush_tlb_all(); | |
b633648c | 385 | local_irq_restore(flags); |
1da177e4 LT |
386 | } |
387 | ||
970d032f RB |
388 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
389 | ||
390 | int __init has_transparent_hugepage(void) | |
391 | { | |
392 | unsigned int mask; | |
393 | unsigned long flags; | |
394 | ||
b633648c | 395 | local_irq_save(flags); |
970d032f RB |
396 | write_c0_pagemask(PM_HUGE_MASK); |
397 | back_to_back_c0_hazard(); | |
398 | mask = read_c0_pagemask(); | |
399 | write_c0_pagemask(PM_DEFAULT_MASK); | |
400 | ||
b633648c | 401 | local_irq_restore(flags); |
970d032f RB |
402 | |
403 | return mask == PM_HUGE_MASK; | |
404 | } | |
405 | ||
406 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
407 | ||
d377732c RM |
408 | /* |
409 | * Used for loading TLB entries before trap_init() has started, when we | |
410 | * don't actually want to add a wired entry which remains throughout the | |
411 | * lifetime of the system | |
412 | */ | |
413 | ||
6ee1d934 | 414 | int temp_tlb_entry __cpuinitdata; |
d377732c RM |
415 | |
416 | __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |
417 | unsigned long entryhi, unsigned long pagemask) | |
418 | { | |
419 | int ret = 0; | |
420 | unsigned long flags; | |
421 | unsigned long wired; | |
422 | unsigned long old_pagemask; | |
423 | unsigned long old_ctx; | |
424 | ||
425 | local_irq_save(flags); | |
426 | /* Save old context and create impossible VPN2 value */ | |
6a8dff6a | 427 | htw_stop(); |
d377732c RM |
428 | old_ctx = read_c0_entryhi(); |
429 | old_pagemask = read_c0_pagemask(); | |
430 | wired = read_c0_wired(); | |
431 | if (--temp_tlb_entry < wired) { | |
432 | printk(KERN_WARNING | |
433 | "No TLB space left for add_temporary_entry\n"); | |
434 | ret = -ENOSPC; | |
435 | goto out; | |
436 | } | |
437 | ||
438 | write_c0_index(temp_tlb_entry); | |
439 | write_c0_pagemask(pagemask); | |
440 | write_c0_entryhi(entryhi); | |
441 | write_c0_entrylo0(entrylo0); | |
442 | write_c0_entrylo1(entrylo1); | |
443 | mtc0_tlbw_hazard(); | |
444 | tlb_write_indexed(); | |
445 | tlbw_use_hazard(); | |
446 | ||
447 | write_c0_entryhi(old_ctx); | |
448 | write_c0_pagemask(old_pagemask); | |
6a8dff6a | 449 | htw_start(); |
d377732c RM |
450 | out: |
451 | local_irq_restore(flags); | |
452 | return ret; | |
453 | } | |
454 | ||
078a55fc | 455 | static int ntlb; |
41c594ab RB |
456 | static int __init set_ntlb(char *str) |
457 | { | |
458 | get_option(&str, &ntlb); | |
459 | return 1; | |
460 | } | |
461 | ||
462 | __setup("ntlb=", set_ntlb); | |
463 | ||
eaa38d63 JH |
464 | /* |
465 | * Configure TLB (for init or after a CPU has been powered off). | |
466 | */ | |
467 | static void r4k_tlb_configure(void) | |
1da177e4 | 468 | { |
1da177e4 LT |
469 | /* |
470 | * You should never change this register: | |
471 | * - On R4600 1.7 the tlbp never hits for pages smaller than | |
472 | * the value in the c0_pagemask register. | |
473 | * - The entire mm handling assumes the c0_pagemask register to | |
a7c2996e | 474 | * be set to fixed-size pages. |
1da177e4 | 475 | */ |
1da177e4 LT |
476 | write_c0_pagemask(PM_DEFAULT_MASK); |
477 | write_c0_wired(0); | |
cde15b59 RB |
478 | if (current_cpu_type() == CPU_R10000 || |
479 | current_cpu_type() == CPU_R12000 || | |
480 | current_cpu_type() == CPU_R14000) | |
481 | write_c0_framemask(0); | |
6dd9344c | 482 | |
05857c64 | 483 | if (cpu_has_rixi) { |
6dd9344c DD |
484 | /* |
485 | * Enable the no read, no exec bits, and enable large virtual | |
486 | * address. | |
487 | */ | |
488 | u32 pg = PG_RIE | PG_XIE; | |
489 | #ifdef CONFIG_64BIT | |
490 | pg |= PG_ELPA; | |
491 | #endif | |
492 | write_c0_pagegrain(pg); | |
493 | } | |
494 | ||
d377732c RM |
495 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
496 | ||
70342287 | 497 | /* From this point on the ARC firmware is dead. */ |
1da177e4 LT |
498 | local_flush_tlb_all(); |
499 | ||
c6281edb | 500 | /* Did I tell you that ARC SUCKS? */ |
eaa38d63 JH |
501 | } |
502 | ||
503 | void tlb_init(void) | |
504 | { | |
505 | r4k_tlb_configure(); | |
c6281edb | 506 | |
41c594ab RB |
507 | if (ntlb) { |
508 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | |
509 | int wired = current_cpu_data.tlbsize - ntlb; | |
510 | write_c0_wired(wired); | |
511 | write_c0_index(wired-1); | |
49a89efb | 512 | printk("Restricting TLB to %d entries\n", ntlb); |
41c594ab RB |
513 | } else |
514 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); | |
515 | } | |
516 | ||
1da177e4 LT |
517 | build_tlb_refill_handler(); |
518 | } | |
eaa38d63 JH |
519 | |
520 | static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, | |
521 | void *v) | |
522 | { | |
523 | switch (cmd) { | |
524 | case CPU_PM_ENTER_FAILED: | |
525 | case CPU_PM_EXIT: | |
526 | r4k_tlb_configure(); | |
527 | break; | |
528 | } | |
529 | ||
530 | return NOTIFY_OK; | |
531 | } | |
532 | ||
533 | static struct notifier_block r4k_tlb_pm_notifier_block = { | |
534 | .notifier_call = r4k_tlb_pm_notifier, | |
535 | }; | |
536 | ||
537 | static int __init r4k_tlb_init_pm(void) | |
538 | { | |
539 | return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); | |
540 | } | |
541 | arch_initcall(r4k_tlb_init_pm); |