]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/um/kernel/tlb.c
Merge tag 'media/v4.10-3' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-artful-kernel.git] / arch / um / kernel / tlb.c
CommitLineData
5e1f65a6 1/*
ba180fd4 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1da177e4
LT
3 * Licensed under the GPL
4 */
5
8192ab42 6#include <linux/mm.h>
73395a00 7#include <linux/module.h>
8192ab42
JD
8#include <linux/sched.h>
9#include <asm/pgtable.h>
10#include <asm/tlbflush.h>
37185b33
AV
11#include <as-layout.h>
12#include <mem_user.h>
13#include <os.h>
14#include <skas.h>
468f6597 15#include <kern_util.h>
1da177e4 16
1466abf2
JD
17struct host_vm_change {
18 struct host_vm_op {
19 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
20 union {
21 struct {
22 unsigned long addr;
23 unsigned long len;
24 unsigned int prot;
25 int fd;
26 __u64 offset;
27 } mmap;
28 struct {
29 unsigned long addr;
30 unsigned long len;
31 } munmap;
32 struct {
33 unsigned long addr;
34 unsigned long len;
35 unsigned int prot;
36 } mprotect;
37 } u;
38 } ops[1];
39 int index;
40 struct mm_id *id;
41 void *data;
42 int force;
43};
44
45#define INIT_HVC(mm, force) \
46 ((struct host_vm_change) \
47 { .ops = { { .type = NONE } }, \
48 .id = &mm->context.id, \
49 .data = NULL, \
50 .index = 0, \
51 .force = force })
52
70c8205f
RW
53static void report_enomem(void)
54{
55 printk(KERN_ERR "UML ran out of memory on the host side! "
56 "This can happen due to a memory limitation or "
57 "vm.max_map_count has been reached.\n");
58}
59
1466abf2
JD
60static int do_ops(struct host_vm_change *hvc, int end,
61 int finished)
62{
63 struct host_vm_op *op;
64 int i, ret = 0;
65
66 for (i = 0; i < end && !ret; i++) {
67 op = &hvc->ops[i];
c5d4bb17 68 switch (op->type) {
1466abf2
JD
69 case MMAP:
70 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
71 op->u.mmap.prot, op->u.mmap.fd,
72 op->u.mmap.offset, finished, &hvc->data);
73 break;
74 case MUNMAP:
75 ret = unmap(hvc->id, op->u.munmap.addr,
76 op->u.munmap.len, finished, &hvc->data);
77 break;
78 case MPROTECT:
79 ret = protect(hvc->id, op->u.mprotect.addr,
80 op->u.mprotect.len, op->u.mprotect.prot,
81 finished, &hvc->data);
82 break;
83 default:
84 printk(KERN_ERR "Unknown op type %d in do_ops\n",
85 op->type);
62179d47 86 BUG();
1466abf2
JD
87 break;
88 }
89 }
90
70c8205f
RW
91 if (ret == -ENOMEM)
92 report_enomem();
93
1466abf2
JD
94 return ret;
95}
96
c5600490 97static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
1466abf2 98 unsigned int prot, struct host_vm_change *hvc)
c5600490 99{
5e1f65a6 100 __u64 offset;
c5600490 101 struct host_vm_op *last;
07bf731e 102 int fd, ret = 0;
c5600490
JD
103
104 fd = phys_mapping(phys, &offset);
1466abf2
JD
105 if (hvc->index != 0) {
106 last = &hvc->ops[hvc->index - 1];
ba180fd4 107 if ((last->type == MMAP) &&
c5600490 108 (last->u.mmap.addr + last->u.mmap.len == virt) &&
16dd07bc 109 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
ba180fd4 110 (last->u.mmap.offset + last->u.mmap.len == offset)) {
c5600490 111 last->u.mmap.len += len;
07bf731e 112 return 0;
c5600490
JD
113 }
114 }
115
1466abf2
JD
116 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
117 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
118 hvc->index = 0;
c5600490
JD
119 }
120
1466abf2
JD
121 hvc->ops[hvc->index++] = ((struct host_vm_op)
122 { .type = MMAP,
123 .u = { .mmap = { .addr = virt,
124 .len = len,
125 .prot = prot,
126 .fd = fd,
127 .offset = offset }
07bf731e
BS
128 } });
129 return ret;
c5600490
JD
130}
131
132static int add_munmap(unsigned long addr, unsigned long len,
1466abf2 133 struct host_vm_change *hvc)
c5600490
JD
134{
135 struct host_vm_op *last;
07bf731e 136 int ret = 0;
c5600490 137
284e6d39
RW
138 if ((addr >= STUB_START) && (addr < STUB_END))
139 return -EINVAL;
140
1466abf2
JD
141 if (hvc->index != 0) {
142 last = &hvc->ops[hvc->index - 1];
ba180fd4
JD
143 if ((last->type == MUNMAP) &&
144 (last->u.munmap.addr + last->u.mmap.len == addr)) {
c5600490 145 last->u.munmap.len += len;
07bf731e 146 return 0;
c5600490
JD
147 }
148 }
149
1466abf2
JD
150 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
151 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
152 hvc->index = 0;
c5600490
JD
153 }
154
1466abf2
JD
155 hvc->ops[hvc->index++] = ((struct host_vm_op)
156 { .type = MUNMAP,
157 .u = { .munmap = { .addr = addr,
158 .len = len } } });
07bf731e 159 return ret;
c5600490
JD
160}
161
16dd07bc 162static int add_mprotect(unsigned long addr, unsigned long len,
1466abf2 163 unsigned int prot, struct host_vm_change *hvc)
c5600490
JD
164{
165 struct host_vm_op *last;
07bf731e 166 int ret = 0;
c5600490 167
1466abf2
JD
168 if (hvc->index != 0) {
169 last = &hvc->ops[hvc->index - 1];
ba180fd4 170 if ((last->type == MPROTECT) &&
c5600490 171 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
ba180fd4 172 (last->u.mprotect.prot == prot)) {
c5600490 173 last->u.mprotect.len += len;
07bf731e 174 return 0;
c5600490
JD
175 }
176 }
177
1466abf2
JD
178 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
179 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
180 hvc->index = 0;
c5600490
JD
181 }
182
1466abf2
JD
183 hvc->ops[hvc->index++] = ((struct host_vm_op)
184 { .type = MPROTECT,
185 .u = { .mprotect = { .addr = addr,
186 .len = len,
187 .prot = prot } } });
07bf731e 188 return ret;
c5600490
JD
189}
190
1da177e4
LT
191#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
192
7f0536f8 193static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
1466abf2
JD
194 unsigned long end,
195 struct host_vm_change *hvc)
7f0536f8
JD
196{
197 pte_t *pte;
16dd07bc 198 int r, w, x, prot, ret = 0;
7f0536f8
JD
199
200 pte = pte_offset_kernel(pmd, addr);
201 do {
3963333f
JD
202 if ((addr >= STUB_START) && (addr < STUB_END))
203 continue;
204
7f0536f8
JD
205 r = pte_read(*pte);
206 w = pte_write(*pte);
207 x = pte_exec(*pte);
208 if (!pte_young(*pte)) {
209 r = 0;
210 w = 0;
0b4e273f 211 } else if (!pte_dirty(*pte))
7f0536f8 212 w = 0;
0b4e273f 213
16dd07bc
JD
214 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
215 (x ? UM_PROT_EXEC : 0));
1466abf2 216 if (hvc->force || pte_newpage(*pte)) {
ba180fd4 217 if (pte_present(*pte))
7f0536f8 218 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
1466abf2 219 PAGE_SIZE, prot, hvc);
0b4e273f
JD
220 else
221 ret = add_munmap(addr, PAGE_SIZE, hvc);
222 } else if (pte_newprot(*pte))
1466abf2 223 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
7f0536f8 224 *pte = pte_mkuptodate(*pte);
909e90d3 225 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
7f0536f8
JD
226 return ret;
227}
228
229static inline int update_pmd_range(pud_t *pud, unsigned long addr,
1466abf2
JD
230 unsigned long end,
231 struct host_vm_change *hvc)
7f0536f8
JD
232{
233 pmd_t *pmd;
234 unsigned long next;
235 int ret = 0;
236
237 pmd = pmd_offset(pud, addr);
238 do {
239 next = pmd_addr_end(addr, end);
ba180fd4 240 if (!pmd_present(*pmd)) {
1466abf2
JD
241 if (hvc->force || pmd_newpage(*pmd)) {
242 ret = add_munmap(addr, next - addr, hvc);
7f0536f8
JD
243 pmd_mkuptodate(*pmd);
244 }
245 }
1466abf2 246 else ret = update_pte_range(pmd, addr, next, hvc);
909e90d3 247 } while (pmd++, addr = next, ((addr < end) && !ret));
7f0536f8
JD
248 return ret;
249}
250
251static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
1466abf2
JD
252 unsigned long end,
253 struct host_vm_change *hvc)
7f0536f8
JD
254{
255 pud_t *pud;
256 unsigned long next;
257 int ret = 0;
258
259 pud = pud_offset(pgd, addr);
260 do {
261 next = pud_addr_end(addr, end);
ba180fd4 262 if (!pud_present(*pud)) {
1466abf2
JD
263 if (hvc->force || pud_newpage(*pud)) {
264 ret = add_munmap(addr, next - addr, hvc);
7f0536f8
JD
265 pud_mkuptodate(*pud);
266 }
267 }
1466abf2 268 else ret = update_pmd_range(pud, addr, next, hvc);
909e90d3 269 } while (pud++, addr = next, ((addr < end) && !ret));
7f0536f8
JD
270 return ret;
271}
272
1da177e4 273void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
1466abf2 274 unsigned long end_addr, int force)
1da177e4 275{
7f0536f8 276 pgd_t *pgd;
1466abf2 277 struct host_vm_change hvc;
7f0536f8 278 unsigned long addr = start_addr, next;
1466abf2 279 int ret = 0;
1da177e4 280
1466abf2 281 hvc = INIT_HVC(mm, force);
7f0536f8
JD
282 pgd = pgd_offset(mm, addr);
283 do {
284 next = pgd_addr_end(addr, end_addr);
ba180fd4
JD
285 if (!pgd_present(*pgd)) {
286 if (force || pgd_newpage(*pgd)) {
1466abf2 287 ret = add_munmap(addr, next - addr, &hvc);
7f0536f8 288 pgd_mkuptodate(*pgd);
5e1f65a6 289 }
5e1f65a6 290 }
1466abf2 291 else ret = update_pud_range(pgd, addr, next, &hvc);
909e90d3 292 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
5e1f65a6 293
ba180fd4 294 if (!ret)
1466abf2 295 ret = do_ops(&hvc, hvc.index, 1);
07bf731e 296
7f0536f8 297 /* This is not an else because ret is modified above */
ba180fd4
JD
298 if (ret) {
299 printk(KERN_ERR "fix_range_common: failed, killing current "
468f6597
RW
300 "process: %d\n", task_tgid_vnr(current));
301 /* We are under mmap_sem, release it such that current can terminate */
302 up_write(&current->mm->mmap_sem);
07bf731e 303 force_sig(SIGKILL, current);
ccaee5f8 304 do_signal(&current->thread.regs);
07bf731e 305 }
1da177e4
LT
306}
307
c75d053b 308static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
1da177e4 309{
5e1f65a6
JD
310 struct mm_struct *mm;
311 pgd_t *pgd;
312 pud_t *pud;
313 pmd_t *pmd;
314 pte_t *pte;
315 unsigned long addr, last;
316 int updated = 0, err;
317
318 mm = &init_mm;
ba180fd4 319 for (addr = start; addr < end;) {
5e1f65a6 320 pgd = pgd_offset(mm, addr);
ba180fd4 321 if (!pgd_present(*pgd)) {
5e1f65a6 322 last = ADD_ROUND(addr, PGDIR_SIZE);
ba180fd4 323 if (last > end)
5e1f65a6 324 last = end;
ba180fd4 325 if (pgd_newpage(*pgd)) {
5e1f65a6
JD
326 updated = 1;
327 err = os_unmap_memory((void *) addr,
328 last - addr);
ba180fd4 329 if (err < 0)
5e1f65a6
JD
330 panic("munmap failed, errno = %d\n",
331 -err);
332 }
333 addr = last;
334 continue;
335 }
336
337 pud = pud_offset(pgd, addr);
ba180fd4 338 if (!pud_present(*pud)) {
5e1f65a6 339 last = ADD_ROUND(addr, PUD_SIZE);
ba180fd4 340 if (last > end)
5e1f65a6 341 last = end;
ba180fd4 342 if (pud_newpage(*pud)) {
5e1f65a6
JD
343 updated = 1;
344 err = os_unmap_memory((void *) addr,
345 last - addr);
ba180fd4 346 if (err < 0)
5e1f65a6
JD
347 panic("munmap failed, errno = %d\n",
348 -err);
349 }
350 addr = last;
351 continue;
352 }
353
354 pmd = pmd_offset(pud, addr);
ba180fd4 355 if (!pmd_present(*pmd)) {
5e1f65a6 356 last = ADD_ROUND(addr, PMD_SIZE);
ba180fd4 357 if (last > end)
5e1f65a6 358 last = end;
ba180fd4 359 if (pmd_newpage(*pmd)) {
5e1f65a6
JD
360 updated = 1;
361 err = os_unmap_memory((void *) addr,
362 last - addr);
ba180fd4 363 if (err < 0)
5e1f65a6
JD
364 panic("munmap failed, errno = %d\n",
365 -err);
366 }
367 addr = last;
368 continue;
369 }
370
371 pte = pte_offset_kernel(pmd, addr);
ba180fd4 372 if (!pte_present(*pte) || pte_newpage(*pte)) {
5e1f65a6
JD
373 updated = 1;
374 err = os_unmap_memory((void *) addr,
375 PAGE_SIZE);
ba180fd4 376 if (err < 0)
5e1f65a6
JD
377 panic("munmap failed, errno = %d\n",
378 -err);
ba180fd4 379 if (pte_present(*pte))
5e1f65a6
JD
380 map_memory(addr,
381 pte_val(*pte) & PAGE_MASK,
382 PAGE_SIZE, 1, 1, 1);
383 }
ba180fd4 384 else if (pte_newprot(*pte)) {
5e1f65a6
JD
385 updated = 1;
386 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
387 }
388 addr += PAGE_SIZE;
389 }
ba180fd4 390 return updated;
1da177e4
LT
391}
392
77bf4400
JD
393void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
394{
395 pgd_t *pgd;
396 pud_t *pud;
397 pmd_t *pmd;
398 pte_t *pte;
399 struct mm_struct *mm = vma->vm_mm;
400 void *flush = NULL;
401 int r, w, x, prot, err = 0;
402 struct mm_id *mm_id;
403
404 address &= PAGE_MASK;
405 pgd = pgd_offset(mm, address);
ba180fd4 406 if (!pgd_present(*pgd))
77bf4400
JD
407 goto kill;
408
409 pud = pud_offset(pgd, address);
ba180fd4 410 if (!pud_present(*pud))
77bf4400
JD
411 goto kill;
412
413 pmd = pmd_offset(pud, address);
ba180fd4 414 if (!pmd_present(*pmd))
77bf4400
JD
415 goto kill;
416
417 pte = pte_offset_kernel(pmd, address);
418
419 r = pte_read(*pte);
420 w = pte_write(*pte);
421 x = pte_exec(*pte);
422 if (!pte_young(*pte)) {
423 r = 0;
424 w = 0;
425 } else if (!pte_dirty(*pte)) {
426 w = 0;
427 }
428
6c738ffa 429 mm_id = &mm->context.id;
77bf4400
JD
430 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
431 (x ? UM_PROT_EXEC : 0));
ba180fd4
JD
432 if (pte_newpage(*pte)) {
433 if (pte_present(*pte)) {
77bf4400
JD
434 unsigned long long offset;
435 int fd;
436
437 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
438 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
439 1, &flush);
440 }
441 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
442 }
ba180fd4 443 else if (pte_newprot(*pte))
77bf4400
JD
444 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
445
70c8205f
RW
446 if (err) {
447 if (err == -ENOMEM)
448 report_enomem();
449
77bf4400 450 goto kill;
70c8205f 451 }
77bf4400
JD
452
453 *pte = pte_mkuptodate(*pte);
454
455 return;
456
457kill:
ba180fd4 458 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
77bf4400
JD
459 force_sig(SIGKILL, current);
460}
461
1da177e4
LT
462pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
463{
ba180fd4 464 return pgd_offset(mm, address);
1da177e4
LT
465}
466
467pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
468{
ba180fd4 469 return pud_offset(pgd, address);
1da177e4
LT
470}
471
472pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
473{
ba180fd4 474 return pmd_offset(pud, address);
1da177e4
LT
475}
476
477pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
478{
ba180fd4 479 return pte_offset_kernel(pmd, address);
1da177e4
LT
480}
481
482pte_t *addr_pte(struct task_struct *task, unsigned long addr)
483{
5e1f65a6
JD
484 pgd_t *pgd = pgd_offset(task->mm, addr);
485 pud_t *pud = pud_offset(pgd, addr);
486 pmd_t *pmd = pmd_offset(pud, addr);
1da177e4 487
ba180fd4 488 return pte_offset_map(pmd, addr);
1da177e4
LT
489}
490
d67b569f
JD
491void flush_tlb_all(void)
492{
5e1f65a6 493 flush_tlb_mm(current->mm);
d67b569f
JD
494}
495
496void flush_tlb_kernel_range(unsigned long start, unsigned long end)
497{
6aa802ce 498 flush_tlb_kernel_range_common(start, end);
d67b569f
JD
499}
500
501void flush_tlb_kernel_vm(void)
502{
6aa802ce 503 flush_tlb_kernel_range_common(start_vm, end_vm);
d67b569f
JD
504}
505
506void __flush_tlb_one(unsigned long addr)
507{
ba180fd4 508 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
77bf4400
JD
509}
510
77bf4400
JD
511static void fix_range(struct mm_struct *mm, unsigned long start_addr,
512 unsigned long end_addr, int force)
513{
1466abf2 514 fix_range_common(mm, start_addr, end_addr, force);
d67b569f
JD
515}
516
517void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
518 unsigned long end)
519{
ba180fd4
JD
520 if (vma->vm_mm == NULL)
521 flush_tlb_kernel_range_common(start, end);
522 else fix_range(vma->vm_mm, start, end, 0);
d67b569f 523}
73395a00 524EXPORT_SYMBOL(flush_tlb_range);
d67b569f 525
0b4e273f
JD
526void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
527 unsigned long end)
d67b569f 528{
ba180fd4
JD
529 /*
530 * Don't bother flushing if this address space is about to be
531 * destroyed.
532 */
533 if (atomic_read(&mm->mm_users) == 0)
534 return;
77bf4400 535
0b4e273f
JD
536 fix_range(mm, start, end, 0);
537}
538
539void flush_tlb_mm(struct mm_struct *mm)
540{
541 struct vm_area_struct *vma = mm->mmap;
542
543 while (vma != NULL) {
544 fix_range(mm, vma->vm_start, vma->vm_end, 0);
545 vma = vma->vm_next;
546 }
d67b569f
JD
547}
548
549void force_flush_all(void)
550{
77bf4400
JD
551 struct mm_struct *mm = current->mm;
552 struct vm_area_struct *vma = mm->mmap;
553
ba180fd4 554 while (vma != NULL) {
77bf4400
JD
555 fix_range(mm, vma->vm_start, vma->vm_end, 1);
556 vma = vma->vm_next;
557 }
d67b569f 558}