]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/tlb_64.c
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/percpu.h>
28 #include <linux/hardirq.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
34 DEFINE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
36 /* This is declared as we are using the more or less generic
37 * include/asm-powerpc/tlb.h file -- tgall
39 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
40 DEFINE_PER_CPU(struct pte_freelist_batch
*, pte_freelist_cur
);
41 unsigned long pte_freelist_forced_free
;
43 struct pte_freelist_batch
47 pgtable_free_t tables
[0];
50 DEFINE_PER_CPU(struct pte_freelist_batch
*, pte_freelist_cur
);
51 unsigned long pte_freelist_forced_free
;
53 #define PTE_FREELIST_SIZE \
54 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
55 / sizeof(pgtable_free_t))
58 static void pte_free_smp_sync(void *arg
)
60 /* Do nothing, just ensure we sync with all CPUs */
64 /* This is only called when we are critically out of memory
65 * (and fail to get a page in pte_free_tlb).
67 static void pgtable_free_now(pgtable_free_t pgf
)
69 pte_freelist_forced_free
++;
71 smp_call_function(pte_free_smp_sync
, NULL
, 0, 1);
76 static void pte_free_rcu_callback(struct rcu_head
*head
)
78 struct pte_freelist_batch
*batch
=
79 container_of(head
, struct pte_freelist_batch
, rcu
);
82 for (i
= 0; i
< batch
->index
; i
++)
83 pgtable_free(batch
->tables
[i
]);
85 free_page((unsigned long)batch
);
88 static void pte_free_submit(struct pte_freelist_batch
*batch
)
90 INIT_RCU_HEAD(&batch
->rcu
);
91 call_rcu(&batch
->rcu
, pte_free_rcu_callback
);
94 void pgtable_free_tlb(struct mmu_gather
*tlb
, pgtable_free_t pgf
)
96 /* This is safe since tlb_gather_mmu has disabled preemption */
97 cpumask_t local_cpumask
= cpumask_of_cpu(smp_processor_id());
98 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
100 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
101 cpus_equal(tlb
->mm
->cpu_vm_mask
, local_cpumask
)) {
106 if (*batchp
== NULL
) {
107 *batchp
= (struct pte_freelist_batch
*)__get_free_page(GFP_ATOMIC
);
108 if (*batchp
== NULL
) {
109 pgtable_free_now(pgf
);
112 (*batchp
)->index
= 0;
114 (*batchp
)->tables
[(*batchp
)->index
++] = pgf
;
115 if ((*batchp
)->index
== PTE_FREELIST_SIZE
) {
116 pte_free_submit(*batchp
);
122 * A linux PTE was changed and the corresponding hash table entry
123 * neesd to be flushed. This function will either perform the flush
124 * immediately or will batch it up if the current CPU has an active
127 * Must be called from within some kind of spinlock/non-preempt region...
129 void hpte_need_flush(struct mm_struct
*mm
, unsigned long addr
,
130 pte_t
*ptep
, unsigned long pte
, int huge
)
132 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
133 unsigned long vsid
, vaddr
;
140 /* We mask the address for the base page size. Huge pages will
141 * have applied their own masking already
145 /* Get page size (maybe move back to caller).
147 * NOTE: when using special 64K mappings in 4K environment like
148 * for SPEs, we obtain the page size from the slice, which thus
149 * must still exist (and thus the VMA not reused) at the time
153 #ifdef CONFIG_HUGETLB_PAGE
154 psize
= mmu_huge_psize
;
157 psize
= pte_pagesize_index(mm
, addr
, pte
); /* shutup gcc */
160 psize
= pte_pagesize_index(mm
, addr
, pte
);
162 /* Build full vaddr */
163 if (!is_kernel_addr(addr
)) {
164 vsid
= get_vsid(mm
->context
.id
, addr
);
167 vsid
= get_kernel_vsid(addr
);
168 vaddr
= (vsid
<< 28 ) | (addr
& 0x0fffffff);
169 rpte
= __real_pte(__pte(pte
), ptep
);
172 * Check if we have an active batch on this CPU. If not, just
173 * flush now and return. For now, we don global invalidates
174 * in that case, might be worth testing the mm cpu mask though
175 * and decide to use local invalidates instead...
177 if (!batch
->active
) {
178 flush_hash_page(vaddr
, rpte
, psize
, 0);
183 * This can happen when we are in the middle of a TLB batch and
184 * we encounter memory pressure (eg copy_page_range when it tries
185 * to allocate a new pte). If we have to reclaim memory and end
186 * up scanning and resetting referenced bits then our batch context
187 * will change mid stream.
189 * We also need to ensure only one page size is present in a given
192 if (i
!= 0 && (mm
!= batch
->mm
|| batch
->psize
!= psize
)) {
193 __flush_tlb_pending(batch
);
198 batch
->psize
= psize
;
200 batch
->pte
[i
] = rpte
;
201 batch
->vaddr
[i
] = vaddr
;
203 if (i
>= PPC64_TLB_BATCH_NR
)
204 __flush_tlb_pending(batch
);
208 * This function is called when terminating an mmu batch or when a batch
209 * is full. It will perform the flush of all the entries currently stored
212 * Must be called from within some kind of spinlock/non-preempt region...
214 void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
)
220 tmp
= cpumask_of_cpu(smp_processor_id());
221 if (cpus_equal(batch
->mm
->cpu_vm_mask
, tmp
))
224 flush_hash_page(batch
->vaddr
[0], batch
->pte
[0],
225 batch
->psize
, local
);
227 flush_hash_range(i
, local
);
231 void pte_free_finish(void)
233 /* This is safe since tlb_gather_mmu has disabled preemption */
234 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
238 pte_free_submit(*batchp
);
243 * __flush_hash_table_range - Flush all HPTEs for a given address range
244 * from the hash table (and the TLB). But keeps
245 * the linux PTEs intact.
247 * @mm : mm_struct of the target address space (generally init_mm)
248 * @start : starting address
249 * @end : ending address (not included in the flush)
251 * This function is mostly to be used by some IO hotplug code in order
252 * to remove all hash entries from a given address range used to map IO
253 * space on a removed PCI-PCI bidge without tearing down the full mapping
254 * since 64K pages may overlap with other bridges when using 64K pages
255 * with 4K HW pages on IO space.
257 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
258 * and is implemented for small size rather than speed.
260 #ifdef CONFIG_HOTPLUG
262 void __flush_hash_table_range(struct mm_struct
*mm
, unsigned long start
,
267 start
= _ALIGN_DOWN(start
, PAGE_SIZE
);
268 end
= _ALIGN_UP(end
, PAGE_SIZE
);
272 /* Note: Normally, we should only ever use a batch within a
273 * PTE locked section. This violates the rule, but will work
274 * since we don't actually modify the PTEs, we just flush the
275 * hash while leaving the PTEs intact (including their reference
276 * to being hashed). This is not the most performance oriented
277 * way to do things but is fine for our needs here.
279 local_irq_save(flags
);
280 arch_enter_lazy_mmu_mode();
281 for (; start
< end
; start
+= PAGE_SIZE
) {
282 pte_t
*ptep
= find_linux_pte(mm
->pgd
, start
);
287 pte
= pte_val(*ptep
);
288 if (!(pte
& _PAGE_HASHPTE
))
290 hpte_need_flush(mm
, start
, ptep
, pte
, 0);
292 arch_leave_lazy_mmu_mode();
293 local_irq_restore(flags
);
296 #endif /* CONFIG_HOTPLUG */