]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/metag/include/asm/mmu_context.h
sched/headers: Prepare to remove the <linux/mm_types.h> dependency from <linux/sched.h>
[mirror_ubuntu-artful-kernel.git] / arch / metag / include / asm / mmu_context.h
1 #ifndef __METAG_MMU_CONTEXT_H
2 #define __METAG_MMU_CONTEXT_H
3
4 #include <asm-generic/mm_hooks.h>
5
6 #include <asm/page.h>
7 #include <asm/mmu.h>
8 #include <asm/tlbflush.h>
9 #include <asm/cacheflush.h>
10
11 #include <linux/io.h>
12 #include <linux/mm_types.h>
13
14 static inline void enter_lazy_tlb(struct mm_struct *mm,
15 struct task_struct *tsk)
16 {
17 }
18
19 static inline int init_new_context(struct task_struct *tsk,
20 struct mm_struct *mm)
21 {
22 #ifndef CONFIG_METAG_META21_MMU
23 /* We use context to store a pointer to the page holding the
24 * pgd of a process while it is running. While a process is not
25 * running the pgd and context fields should be equal.
26 */
27 mm->context.pgd_base = (unsigned long) mm->pgd;
28 #endif
29 #ifdef CONFIG_METAG_USER_TCM
30 INIT_LIST_HEAD(&mm->context.tcm);
31 #endif
32 return 0;
33 }
34
35 #ifdef CONFIG_METAG_USER_TCM
36
37 #include <linux/slab.h>
38 #include <asm/tcm.h>
39
40 static inline void destroy_context(struct mm_struct *mm)
41 {
42 struct tcm_allocation *pos, *n;
43
44 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
45 tcm_free(pos->tag, pos->addr, pos->size);
46 list_del(&pos->list);
47 kfree(pos);
48 }
49 }
50 #else
51 #define destroy_context(mm) do { } while (0)
52 #endif
53
54 #ifdef CONFIG_METAG_META21_MMU
55 static inline void load_pgd(pgd_t *pgd, int thread)
56 {
57 unsigned long phys0 = mmu_phys0_addr(thread);
58 unsigned long phys1 = mmu_phys1_addr(thread);
59
60 /*
61 * 0x900 2Gb address space
62 * The permission bits apply to MMU table region which gives a 2MB
63 * window into physical memory. We especially don't want userland to be
64 * able to access this.
65 */
66 metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
67 _PAGE_PRESENT, phys0);
68 /* Set new MMU base address */
69 metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
70 }
71 #endif
72
73 static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
74 {
75 #ifdef CONFIG_METAG_META21_MMU
76 load_pgd(next->pgd, hard_processor_id());
77 #else
78 unsigned int i;
79
80 /* prev->context == prev->pgd in the case where we are initially
81 switching from the init task to the first process. */
82 if (prev->context.pgd_base != (unsigned long) prev->pgd) {
83 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
84 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
85 } else
86 prev->pgd = (pgd_t *)mmu_get_base();
87
88 next->pgd = prev->pgd;
89 prev->pgd = (pgd_t *) prev->context.pgd_base;
90
91 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
92 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
93
94 flush_cache_all();
95 #endif
96 flush_tlb_all();
97 }
98
99 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
100 struct task_struct *tsk)
101 {
102 if (prev != next)
103 switch_mmu(prev, next);
104 }
105
106 static inline void activate_mm(struct mm_struct *prev_mm,
107 struct mm_struct *next_mm)
108 {
109 switch_mmu(prev_mm, next_mm);
110 }
111
112 #define deactivate_mm(tsk, mm) do { } while (0)
113
114 #endif