]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/include/asm/tlb.h
Merge tag 'pm+acpi-3.15-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafae...
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / include / asm / tlb.h
CommitLineData
1da177e4
LT
1#ifndef _S390_TLB_H
2#define _S390_TLB_H
3
4/*
ba8a9229
MS
5 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
1da177e4 22 */
ba8a9229
MS
23
24#include <linux/mm.h>
c84ca008 25#include <linux/pagemap.h>
ba8a9229
MS
26#include <linux/swap.h>
27#include <asm/processor.h>
28#include <asm/pgalloc.h>
ba8a9229
MS
29#include <asm/tlbflush.h>
30
ba8a9229
MS
31struct mmu_gather {
32 struct mm_struct *mm;
36409f63 33 struct mmu_table_batch *batch;
ba8a9229 34 unsigned int fullmm;
215b28a5 35 unsigned long start, end;
ba8a9229
MS
36};
37
36409f63
MS
38struct mmu_table_batch {
39 struct rcu_head rcu;
40 unsigned int nr;
41 void *tables[0];
42};
ba8a9229 43
36409f63
MS
44#define MAX_TABLE_BATCH \
45 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
46
47extern void tlb_table_flush(struct mmu_gather *tlb);
48extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
68f03921
PZ
49
50static inline void tlb_gather_mmu(struct mmu_gather *tlb,
51 struct mm_struct *mm,
2b047252
LT
52 unsigned long start,
53 unsigned long end)
68f03921 54{
ba8a9229 55 tlb->mm = mm;
2b047252
LT
56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
36409f63 59 tlb->batch = NULL;
ba8a9229
MS
60}
61
68f03921 62static inline void tlb_flush_mmu(struct mmu_gather *tlb)
ba8a9229 63{
5c474a1e 64 __tlb_flush_mm_lazy(tlb->mm);
36409f63 65 tlb_table_flush(tlb);
ba8a9229
MS
66}
67
68static inline void tlb_finish_mmu(struct mmu_gather *tlb,
69 unsigned long start, unsigned long end)
70{
5c474a1e 71 tlb_flush_mmu(tlb);
ba8a9229 72}
1da177e4
LT
73
74/*
ba8a9229 75 * Release the page cache reference for a pte removed by
68f03921 76 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
ba8a9229 77 * has already been freed, so just do free_page_and_swap_cache.
1da177e4 78 */
68f03921
PZ
79static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
80{
81 free_page_and_swap_cache(page);
82 return 1; /* avoid calling tlb_flush_mmu */
83}
84
ba8a9229
MS
85static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
86{
87 free_page_and_swap_cache(page);
88}
1da177e4 89
ba8a9229
MS
90/*
91 * pte_free_tlb frees a pte table and clears the CRSTE for the
92 * page table from the tlb.
93 */
9e1b32ca
BH
94static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
95 unsigned long address)
ba8a9229 96{
02a8f3ab 97 page_table_free_rcu(tlb, (unsigned long *) pte);
ba8a9229 98}
1da177e4 99
ba8a9229
MS
100/*
101 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
102 * segment table entry from the tlb.
6252d702
MS
103 * If the mm uses a two level page table the single pmd is freed
104 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
105 * to avoid the double free of the pmd in this case.
ba8a9229 106 */
9e1b32ca
BH
107static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
108 unsigned long address)
ba8a9229 109{
f4815ac6 110#ifdef CONFIG_64BIT
6252d702
MS
111 if (tlb->mm->context.asce_limit <= (1UL << 31))
112 return;
02a8f3ab 113 tlb_remove_table(tlb, pmd);
1da177e4 114#endif
ba8a9229
MS
115}
116
5a216a20
MS
117/*
118 * pud_free_tlb frees a pud table and clears the CRSTE for the
119 * region third table entry from the tlb.
6252d702
MS
120 * If the mm uses a three level page table the single pud is freed
121 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
122 * to avoid the double free of the pud in this case.
5a216a20 123 */
9e1b32ca
BH
124static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
125 unsigned long address)
5a216a20 126{
f4815ac6 127#ifdef CONFIG_64BIT
6252d702
MS
128 if (tlb->mm->context.asce_limit <= (1UL << 42))
129 return;
02a8f3ab 130 tlb_remove_table(tlb, pud);
5a216a20
MS
131#endif
132}
190a1d72 133
ba8a9229
MS
134#define tlb_start_vma(tlb, vma) do { } while (0)
135#define tlb_end_vma(tlb, vma) do { } while (0)
136#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
1ae1c1d0 137#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
ba8a9229
MS
138#define tlb_migrate_finish(mm) do { } while (0)
139
140#endif /* _S390_TLB_H */