]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/cris/arch-v10/mm/tlb.c
sched/headers: Prepare to remove the <linux/mm_types.h> dependency from <linux/sched.h>
[mirror_ubuntu-artful-kernel.git] / arch / cris / arch-v10 / mm / tlb.c
1 /*
2 * linux/arch/cris/arch-v10/mm/tlb.c
3 *
4 * Low level TLB handling
5 *
6 *
7 * Copyright (C) 2000-2007 Axis Communications AB
8 *
9 * Authors: Bjorn Wesen (bjornw@axis.com)
10 *
11 */
12
13 #include <linux/mm_types.h>
14
15 #include <asm/tlb.h>
16 #include <asm/mmu_context.h>
17 #include <arch/svinto.h>
18
19 #define D(x)
20
21 /* The TLB can host up to 64 different mm contexts at the same time.
22 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
23 * page_id that has to match to give a hit. In page_id_map, we keep track
24 * of which mm's we have assigned which page_id's, so that we know when
25 * to invalidate TLB entries.
26 *
27 * The last page_id is never running - it is used as an invalid page_id
28 * so we can make TLB entries that will never match.
29 *
30 * Notice that we need to make the flushes atomic, otherwise an interrupt
31 * handler that uses vmalloced memory might cause a TLB load in the middle
32 * of a flush causing.
33 */
34
35 /* invalidate all TLB entries */
36
37 void
38 flush_tlb_all(void)
39 {
40 int i;
41 unsigned long flags;
42
43 /* the vpn of i & 0xf is so we dont write similar TLB entries
44 * in the same 4-way entry group. details...
45 */
46
47 local_irq_save(flags);
48 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
49 *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
50 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
51 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
52
53 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
54 IO_STATE(R_TLB_LO, valid, no ) |
55 IO_STATE(R_TLB_LO, kernel,no ) |
56 IO_STATE(R_TLB_LO, we, no ) |
57 IO_FIELD(R_TLB_LO, pfn, 0 ) );
58 }
59 local_irq_restore(flags);
60 D(printk("tlb: flushed all\n"));
61 }
62
63 /* invalidate the selected mm context only */
64
65 void
66 flush_tlb_mm(struct mm_struct *mm)
67 {
68 int i;
69 int page_id = mm->context.page_id;
70 unsigned long flags;
71
72 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
73
74 if(page_id == NO_CONTEXT)
75 return;
76
77 /* mark the TLB entries that match the page_id as invalid.
78 * here we could also check the _PAGE_GLOBAL bit and NOT flush
79 * global pages. is it worth the extra I/O ?
80 */
81
82 local_irq_save(flags);
83 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
84 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
85 if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
86 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
87 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
88
89 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
90 IO_STATE(R_TLB_LO, valid, no ) |
91 IO_STATE(R_TLB_LO, kernel,no ) |
92 IO_STATE(R_TLB_LO, we, no ) |
93 IO_FIELD(R_TLB_LO, pfn, 0 ) );
94 }
95 }
96 local_irq_restore(flags);
97 }
98
99 /* invalidate a single page */
100
101 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
102 {
103 struct mm_struct *mm = vma->vm_mm;
104 int page_id = mm->context.page_id;
105 int i;
106 unsigned long flags;
107
108 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
109
110 if(page_id == NO_CONTEXT)
111 return;
112
113 addr &= PAGE_MASK; /* perhaps not necessary */
114
115 /* invalidate those TLB entries that match both the mm context
116 * and the virtual address requested
117 */
118
119 local_irq_save(flags);
120 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
121 unsigned long tlb_hi;
122 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
123 tlb_hi = *R_TLB_HI;
124 if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
125 (tlb_hi & PAGE_MASK) == addr) {
126 *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
127 addr; /* same addr as before works. */
128
129 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
130 IO_STATE(R_TLB_LO, valid, no ) |
131 IO_STATE(R_TLB_LO, kernel,no ) |
132 IO_STATE(R_TLB_LO, we, no ) |
133 IO_FIELD(R_TLB_LO, pfn, 0 ) );
134 }
135 }
136 local_irq_restore(flags);
137 }
138
139 /*
140 * Initialize the context related info for a new mm_struct
141 * instance.
142 */
143
144 int
145 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
146 {
147 mm->context.page_id = NO_CONTEXT;
148 return 0;
149 }
150
151 /* called in schedule() just before actually doing the switch_to */
152
153 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
154 struct task_struct *tsk)
155 {
156 if (prev != next) {
157 /* make sure we have a context */
158 get_mmu_context(next);
159
160 /* remember the pgd for the fault handlers
161 * this is similar to the pgd register in some other CPU's.
162 * we need our own copy of it because current and active_mm
163 * might be invalid at points where we still need to derefer
164 * the pgd.
165 */
166
167 per_cpu(current_pgd, smp_processor_id()) = next->pgd;
168
169 /* switch context in the MMU */
170
171 D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
172 next->context, next));
173
174 *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
175 page_id, next->context.page_id);
176 }
177 }
178