]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/um/kernel/tlb.c
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/sched.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include "as-layout.h"
15 struct host_vm_change
{
17 enum { NONE
, MMAP
, MUNMAP
, MPROTECT
} type
;
43 #define INIT_HVC(mm, force) \
44 ((struct host_vm_change) \
45 { .ops = { { .type = NONE } }, \
46 .id = &mm->context.id, \
51 static int do_ops(struct host_vm_change
*hvc
, int end
,
54 struct host_vm_op
*op
;
57 for (i
= 0; i
< end
&& !ret
; i
++) {
61 ret
= map(hvc
->id
, op
->u
.mmap
.addr
, op
->u
.mmap
.len
,
62 op
->u
.mmap
.prot
, op
->u
.mmap
.fd
,
63 op
->u
.mmap
.offset
, finished
, &hvc
->data
);
66 ret
= unmap(hvc
->id
, op
->u
.munmap
.addr
,
67 op
->u
.munmap
.len
, finished
, &hvc
->data
);
70 ret
= protect(hvc
->id
, op
->u
.mprotect
.addr
,
71 op
->u
.mprotect
.len
, op
->u
.mprotect
.prot
,
72 finished
, &hvc
->data
);
75 printk(KERN_ERR
"Unknown op type %d in do_ops\n",
84 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
85 unsigned int prot
, struct host_vm_change
*hvc
)
88 struct host_vm_op
*last
;
91 fd
= phys_mapping(phys
, &offset
);
92 if (hvc
->index
!= 0) {
93 last
= &hvc
->ops
[hvc
->index
- 1];
94 if ((last
->type
== MMAP
) &&
95 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
96 (last
->u
.mmap
.prot
== prot
) && (last
->u
.mmap
.fd
== fd
) &&
97 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)) {
98 last
->u
.mmap
.len
+= len
;
103 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
104 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
108 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
110 .u
= { .mmap
= { .addr
= virt
,
119 static int add_munmap(unsigned long addr
, unsigned long len
,
120 struct host_vm_change
*hvc
)
122 struct host_vm_op
*last
;
125 if (hvc
->index
!= 0) {
126 last
= &hvc
->ops
[hvc
->index
- 1];
127 if ((last
->type
== MUNMAP
) &&
128 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)) {
129 last
->u
.munmap
.len
+= len
;
134 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
135 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
139 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
141 .u
= { .munmap
= { .addr
= addr
,
146 static int add_mprotect(unsigned long addr
, unsigned long len
,
147 unsigned int prot
, struct host_vm_change
*hvc
)
149 struct host_vm_op
*last
;
152 if (hvc
->index
!= 0) {
153 last
= &hvc
->ops
[hvc
->index
- 1];
154 if ((last
->type
== MPROTECT
) &&
155 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
156 (last
->u
.mprotect
.prot
== prot
)) {
157 last
->u
.mprotect
.len
+= len
;
162 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
163 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
167 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
169 .u
= { .mprotect
= { .addr
= addr
,
175 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
177 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
179 struct host_vm_change
*hvc
)
182 int r
, w
, x
, prot
, ret
= 0;
184 pte
= pte_offset_kernel(pmd
, addr
);
186 if ((addr
>= STUB_START
) && (addr
< STUB_END
))
192 if (!pte_young(*pte
)) {
195 } else if (!pte_dirty(*pte
))
198 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
199 (x
? UM_PROT_EXEC
: 0));
200 if (hvc
->force
|| pte_newpage(*pte
)) {
201 if (pte_present(*pte
))
202 ret
= add_mmap(addr
, pte_val(*pte
) & PAGE_MASK
,
203 PAGE_SIZE
, prot
, hvc
);
205 ret
= add_munmap(addr
, PAGE_SIZE
, hvc
);
206 } else if (pte_newprot(*pte
))
207 ret
= add_mprotect(addr
, PAGE_SIZE
, prot
, hvc
);
208 *pte
= pte_mkuptodate(*pte
);
209 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
< end
) && !ret
));
213 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
215 struct host_vm_change
*hvc
)
221 pmd
= pmd_offset(pud
, addr
);
223 next
= pmd_addr_end(addr
, end
);
224 if (!pmd_present(*pmd
)) {
225 if (hvc
->force
|| pmd_newpage(*pmd
)) {
226 ret
= add_munmap(addr
, next
- addr
, hvc
);
227 pmd_mkuptodate(*pmd
);
230 else ret
= update_pte_range(pmd
, addr
, next
, hvc
);
231 } while (pmd
++, addr
= next
, ((addr
< end
) && !ret
));
235 static inline int update_pud_range(pgd_t
*pgd
, unsigned long addr
,
237 struct host_vm_change
*hvc
)
243 pud
= pud_offset(pgd
, addr
);
245 next
= pud_addr_end(addr
, end
);
246 if (!pud_present(*pud
)) {
247 if (hvc
->force
|| pud_newpage(*pud
)) {
248 ret
= add_munmap(addr
, next
- addr
, hvc
);
249 pud_mkuptodate(*pud
);
252 else ret
= update_pmd_range(pud
, addr
, next
, hvc
);
253 } while (pud
++, addr
= next
, ((addr
< end
) && !ret
));
257 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
258 unsigned long end_addr
, int force
)
261 struct host_vm_change hvc
;
262 unsigned long addr
= start_addr
, next
;
265 hvc
= INIT_HVC(mm
, force
);
266 pgd
= pgd_offset(mm
, addr
);
268 next
= pgd_addr_end(addr
, end_addr
);
269 if (!pgd_present(*pgd
)) {
270 if (force
|| pgd_newpage(*pgd
)) {
271 ret
= add_munmap(addr
, next
- addr
, &hvc
);
272 pgd_mkuptodate(*pgd
);
275 else ret
= update_pud_range(pgd
, addr
, next
, &hvc
);
276 } while (pgd
++, addr
= next
, ((addr
< end_addr
) && !ret
));
279 ret
= do_ops(&hvc
, hvc
.index
, 1);
281 /* This is not an else because ret is modified above */
283 printk(KERN_ERR
"fix_range_common: failed, killing current "
285 force_sig(SIGKILL
, current
);
289 static int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
291 struct mm_struct
*mm
;
296 unsigned long addr
, last
;
297 int updated
= 0, err
;
300 for (addr
= start
; addr
< end
;) {
301 pgd
= pgd_offset(mm
, addr
);
302 if (!pgd_present(*pgd
)) {
303 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
306 if (pgd_newpage(*pgd
)) {
308 err
= os_unmap_memory((void *) addr
,
311 panic("munmap failed, errno = %d\n",
318 pud
= pud_offset(pgd
, addr
);
319 if (!pud_present(*pud
)) {
320 last
= ADD_ROUND(addr
, PUD_SIZE
);
323 if (pud_newpage(*pud
)) {
325 err
= os_unmap_memory((void *) addr
,
328 panic("munmap failed, errno = %d\n",
335 pmd
= pmd_offset(pud
, addr
);
336 if (!pmd_present(*pmd
)) {
337 last
= ADD_ROUND(addr
, PMD_SIZE
);
340 if (pmd_newpage(*pmd
)) {
342 err
= os_unmap_memory((void *) addr
,
345 panic("munmap failed, errno = %d\n",
352 pte
= pte_offset_kernel(pmd
, addr
);
353 if (!pte_present(*pte
) || pte_newpage(*pte
)) {
355 err
= os_unmap_memory((void *) addr
,
358 panic("munmap failed, errno = %d\n",
360 if (pte_present(*pte
))
362 pte_val(*pte
) & PAGE_MASK
,
365 else if (pte_newprot(*pte
)) {
367 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
374 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
380 struct mm_struct
*mm
= vma
->vm_mm
;
382 int r
, w
, x
, prot
, err
= 0;
385 address
&= PAGE_MASK
;
386 pgd
= pgd_offset(mm
, address
);
387 if (!pgd_present(*pgd
))
390 pud
= pud_offset(pgd
, address
);
391 if (!pud_present(*pud
))
394 pmd
= pmd_offset(pud
, address
);
395 if (!pmd_present(*pmd
))
398 pte
= pte_offset_kernel(pmd
, address
);
403 if (!pte_young(*pte
)) {
406 } else if (!pte_dirty(*pte
)) {
410 mm_id
= &mm
->context
.id
;
411 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
412 (x
? UM_PROT_EXEC
: 0));
413 if (pte_newpage(*pte
)) {
414 if (pte_present(*pte
)) {
415 unsigned long long offset
;
418 fd
= phys_mapping(pte_val(*pte
) & PAGE_MASK
, &offset
);
419 err
= map(mm_id
, address
, PAGE_SIZE
, prot
, fd
, offset
,
422 else err
= unmap(mm_id
, address
, PAGE_SIZE
, 1, &flush
);
424 else if (pte_newprot(*pte
))
425 err
= protect(mm_id
, address
, PAGE_SIZE
, prot
, 1, &flush
);
430 *pte
= pte_mkuptodate(*pte
);
435 printk(KERN_ERR
"Failed to flush page for address 0x%lx\n", address
);
436 force_sig(SIGKILL
, current
);
439 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
441 return pgd_offset(mm
, address
);
444 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
446 return pud_offset(pgd
, address
);
449 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
451 return pmd_offset(pud
, address
);
454 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
456 return pte_offset_kernel(pmd
, address
);
459 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
461 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
462 pud_t
*pud
= pud_offset(pgd
, addr
);
463 pmd_t
*pmd
= pmd_offset(pud
, addr
);
465 return pte_offset_map(pmd
, addr
);
468 void flush_tlb_all(void)
470 flush_tlb_mm(current
->mm
);
473 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
475 flush_tlb_kernel_range_common(start
, end
);
478 void flush_tlb_kernel_vm(void)
480 flush_tlb_kernel_range_common(start_vm
, end_vm
);
483 void __flush_tlb_one(unsigned long addr
)
485 flush_tlb_kernel_range_common(addr
, addr
+ PAGE_SIZE
);
488 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
489 unsigned long end_addr
, int force
)
491 fix_range_common(mm
, start_addr
, end_addr
, force
);
494 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
497 if (vma
->vm_mm
== NULL
)
498 flush_tlb_kernel_range_common(start
, end
);
499 else fix_range(vma
->vm_mm
, start
, end
, 0);
502 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
506 * Don't bother flushing if this address space is about to be
509 if (atomic_read(&mm
->mm_users
) == 0)
512 fix_range(mm
, start
, end
, 0);
515 void flush_tlb_mm(struct mm_struct
*mm
)
517 struct vm_area_struct
*vma
= mm
->mmap
;
519 while (vma
!= NULL
) {
520 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 0);
525 void force_flush_all(void)
527 struct mm_struct
*mm
= current
->mm
;
528 struct vm_area_struct
*vma
= mm
->mmap
;
530 while (vma
!= NULL
) {
531 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 1);