]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f5df8e26 JH |
2 | #ifndef __METAG_MMU_CONTEXT_H |
3 | #define __METAG_MMU_CONTEXT_H | |
4 | ||
5 | #include <asm-generic/mm_hooks.h> | |
6 | ||
7 | #include <asm/page.h> | |
8 | #include <asm/mmu.h> | |
9 | #include <asm/tlbflush.h> | |
10 | #include <asm/cacheflush.h> | |
11 | ||
12 | #include <linux/io.h> | |
589ee628 | 13 | #include <linux/mm_types.h> |
f5df8e26 JH |
14 | |
15 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
16 | struct task_struct *tsk) | |
17 | { | |
18 | } | |
19 | ||
20 | static inline int init_new_context(struct task_struct *tsk, | |
21 | struct mm_struct *mm) | |
22 | { | |
23 | #ifndef CONFIG_METAG_META21_MMU | |
24 | /* We use context to store a pointer to the page holding the | |
25 | * pgd of a process while it is running. While a process is not | |
26 | * running the pgd and context fields should be equal. | |
27 | */ | |
28 | mm->context.pgd_base = (unsigned long) mm->pgd; | |
29 | #endif | |
30 | #ifdef CONFIG_METAG_USER_TCM | |
31 | INIT_LIST_HEAD(&mm->context.tcm); | |
32 | #endif | |
33 | return 0; | |
34 | } | |
35 | ||
36 | #ifdef CONFIG_METAG_USER_TCM | |
37 | ||
38 | #include <linux/slab.h> | |
39 | #include <asm/tcm.h> | |
40 | ||
41 | static inline void destroy_context(struct mm_struct *mm) | |
42 | { | |
43 | struct tcm_allocation *pos, *n; | |
44 | ||
45 | list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { | |
46 | tcm_free(pos->tag, pos->addr, pos->size); | |
47 | list_del(&pos->list); | |
48 | kfree(pos); | |
49 | } | |
50 | } | |
51 | #else | |
52 | #define destroy_context(mm) do { } while (0) | |
53 | #endif | |
54 | ||
55 | #ifdef CONFIG_METAG_META21_MMU | |
56 | static inline void load_pgd(pgd_t *pgd, int thread) | |
57 | { | |
58 | unsigned long phys0 = mmu_phys0_addr(thread); | |
59 | unsigned long phys1 = mmu_phys1_addr(thread); | |
60 | ||
61 | /* | |
62 | * 0x900 2Gb address space | |
63 | * The permission bits apply to MMU table region which gives a 2MB | |
64 | * window into physical memory. We especially don't want userland to be | |
65 | * able to access this. | |
66 | */ | |
67 | metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE | | |
68 | _PAGE_PRESENT, phys0); | |
69 | /* Set new MMU base address */ | |
70 | metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1); | |
71 | } | |
72 | #endif | |
73 | ||
74 | static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next) | |
75 | { | |
76 | #ifdef CONFIG_METAG_META21_MMU | |
77 | load_pgd(next->pgd, hard_processor_id()); | |
78 | #else | |
79 | unsigned int i; | |
80 | ||
81 | /* prev->context == prev->pgd in the case where we are initially | |
82 | switching from the init task to the first process. */ | |
83 | if (prev->context.pgd_base != (unsigned long) prev->pgd) { | |
84 | for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++) | |
85 | ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; | |
86 | } else | |
87 | prev->pgd = (pgd_t *)mmu_get_base(); | |
88 | ||
89 | next->pgd = prev->pgd; | |
90 | prev->pgd = (pgd_t *) prev->context.pgd_base; | |
91 | ||
92 | for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++) | |
93 | next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i]; | |
94 | ||
95 | flush_cache_all(); | |
96 | #endif | |
97 | flush_tlb_all(); | |
98 | } | |
99 | ||
100 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
101 | struct task_struct *tsk) | |
102 | { | |
103 | if (prev != next) | |
104 | switch_mmu(prev, next); | |
105 | } | |
106 | ||
107 | static inline void activate_mm(struct mm_struct *prev_mm, | |
108 | struct mm_struct *next_mm) | |
109 | { | |
110 | switch_mmu(prev_mm, next_mm); | |
111 | } | |
112 | ||
113 | #define deactivate_mm(tsk, mm) do { } while (0) | |
114 | ||
115 | #endif |