]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/cris/arch-v10/mm/tlb.c
2 * linux/arch/cris/arch-v10/mm/tlb.c
4 * Low level TLB handling
7 * Copyright (C) 2000-2007 Axis Communications AB
9 * Authors: Bjorn Wesen (bjornw@axis.com)
13 #include <linux/mm_types.h>
16 #include <asm/mmu_context.h>
17 #include <arch/svinto.h>
21 /* The TLB can host up to 64 different mm contexts at the same time.
22 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
23 * page_id that has to match to give a hit. In page_id_map, we keep track
24 * of which mm's we have assigned which page_id's, so that we know when
25 * to invalidate TLB entries.
27 * The last page_id is never running - it is used as an invalid page_id
28 * so we can make TLB entries that will never match.
30 * Notice that we need to make the flushes atomic, otherwise an interrupt
31 * handler that uses vmalloced memory might cause a TLB load in the middle
35 /* invalidate all TLB entries */
43 /* the vpn of i & 0xf is so we dont write similar TLB entries
44 * in the same 4-way entry group. details...
47 local_irq_save(flags
);
48 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
49 *R_TLB_SELECT
= ( IO_FIELD(R_TLB_SELECT
, index
, i
) );
50 *R_TLB_HI
= ( IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
51 IO_FIELD(R_TLB_HI
, vpn
, i
& 0xf ) );
53 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
54 IO_STATE(R_TLB_LO
, valid
, no
) |
55 IO_STATE(R_TLB_LO
, kernel
,no
) |
56 IO_STATE(R_TLB_LO
, we
, no
) |
57 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
59 local_irq_restore(flags
);
60 D(printk("tlb: flushed all\n"));
63 /* invalidate the selected mm context only */
66 flush_tlb_mm(struct mm_struct
*mm
)
69 int page_id
= mm
->context
.page_id
;
72 D(printk("tlb: flush mm context %d (%p)\n", page_id
, mm
));
74 if(page_id
== NO_CONTEXT
)
77 /* mark the TLB entries that match the page_id as invalid.
78 * here we could also check the _PAGE_GLOBAL bit and NOT flush
79 * global pages. is it worth the extra I/O ?
82 local_irq_save(flags
);
83 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
84 *R_TLB_SELECT
= IO_FIELD(R_TLB_SELECT
, index
, i
);
85 if (IO_EXTRACT(R_TLB_HI
, page_id
, *R_TLB_HI
) == page_id
) {
86 *R_TLB_HI
= ( IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
87 IO_FIELD(R_TLB_HI
, vpn
, i
& 0xf ) );
89 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
90 IO_STATE(R_TLB_LO
, valid
, no
) |
91 IO_STATE(R_TLB_LO
, kernel
,no
) |
92 IO_STATE(R_TLB_LO
, we
, no
) |
93 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
96 local_irq_restore(flags
);
99 /* invalidate a single page */
101 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
103 struct mm_struct
*mm
= vma
->vm_mm
;
104 int page_id
= mm
->context
.page_id
;
108 D(printk("tlb: flush page %p in context %d (%p)\n", addr
, page_id
, mm
));
110 if(page_id
== NO_CONTEXT
)
113 addr
&= PAGE_MASK
; /* perhaps not necessary */
115 /* invalidate those TLB entries that match both the mm context
116 * and the virtual address requested
119 local_irq_save(flags
);
120 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
121 unsigned long tlb_hi
;
122 *R_TLB_SELECT
= IO_FIELD(R_TLB_SELECT
, index
, i
);
124 if (IO_EXTRACT(R_TLB_HI
, page_id
, tlb_hi
) == page_id
&&
125 (tlb_hi
& PAGE_MASK
) == addr
) {
126 *R_TLB_HI
= IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
127 addr
; /* same addr as before works. */
129 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
130 IO_STATE(R_TLB_LO
, valid
, no
) |
131 IO_STATE(R_TLB_LO
, kernel
,no
) |
132 IO_STATE(R_TLB_LO
, we
, no
) |
133 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
136 local_irq_restore(flags
);
140 * Initialize the context related info for a new mm_struct
145 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
147 mm
->context
.page_id
= NO_CONTEXT
;
151 /* called in schedule() just before actually doing the switch_to */
153 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
154 struct task_struct
*tsk
)
157 /* make sure we have a context */
158 get_mmu_context(next
);
160 /* remember the pgd for the fault handlers
161 * this is similar to the pgd register in some other CPU's.
162 * we need our own copy of it because current and active_mm
163 * might be invalid at points where we still need to derefer
167 per_cpu(current_pgd
, smp_processor_id()) = next
->pgd
;
169 /* switch context in the MMU */
171 D(printk(KERN_DEBUG
"switching mmu_context to %d (%p)\n",
172 next
->context
, next
));
174 *R_MMU_CONTEXT
= IO_FIELD(R_MMU_CONTEXT
,
175 page_id
, next
->context
.page_id
);