]>
Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * MMU context allocation for 64-bit kernels. | |
3 | * | |
4 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | */ | |
12 | ||
14cf11af PM |
13 | #include <linux/sched.h> |
14 | #include <linux/kernel.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/idr.h> | |
4b16f8e2 | 21 | #include <linux/export.h> |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
851d2e2f | 23 | #include <linux/slab.h> |
14cf11af PM |
24 | |
25 | #include <asm/mmu_context.h> | |
5c1f6ee9 | 26 | #include <asm/pgalloc.h> |
14cf11af | 27 | |
9d670280 | 28 | #include "icswx.h" |
851d2e2f | 29 | |
14cf11af | 30 | static DEFINE_SPINLOCK(mmu_context_lock); |
7317ac87 | 31 | static DEFINE_IDA(mmu_context_ida); |
14cf11af | 32 | |
e85a4710 | 33 | int __init_new_context(void) |
14cf11af PM |
34 | { |
35 | int index; | |
36 | int err; | |
37 | ||
38 | again: | |
7317ac87 | 39 | if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) |
14cf11af PM |
40 | return -ENOMEM; |
41 | ||
42 | spin_lock(&mmu_context_lock); | |
7317ac87 | 43 | err = ida_get_new_above(&mmu_context_ida, 1, &index); |
14cf11af PM |
44 | spin_unlock(&mmu_context_lock); |
45 | ||
46 | if (err == -EAGAIN) | |
47 | goto again; | |
48 | else if (err) | |
49 | return err; | |
50 | ||
c60ac569 | 51 | if (index > MAX_USER_CONTEXT) { |
f86c9747 | 52 | spin_lock(&mmu_context_lock); |
7317ac87 | 53 | ida_remove(&mmu_context_ida, index); |
f86c9747 | 54 | spin_unlock(&mmu_context_lock); |
14cf11af PM |
55 | return -ENOMEM; |
56 | } | |
57 | ||
e85a4710 AG |
58 | return index; |
59 | } | |
60 | EXPORT_SYMBOL_GPL(__init_new_context); | |
7e381c0f AK |
61 | static int radix__init_new_context(struct mm_struct *mm, int index) |
62 | { | |
63 | unsigned long rts_field; | |
64 | ||
65 | /* | |
66 | * set the process table entry, | |
67 | */ | |
b23d9c5b | 68 | rts_field = radix__get_tree_size(); |
7e381c0f AK |
69 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); |
70 | return 0; | |
71 | } | |
e85a4710 AG |
72 | |
73 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
74 | { | |
75 | int index; | |
76 | ||
77 | index = __init_new_context(); | |
78 | if (index < 0) | |
79 | return index; | |
80 | ||
7e381c0f AK |
81 | if (radix_enabled()) { |
82 | radix__init_new_context(mm, index); | |
83 | } else { | |
84 | ||
85 | /* The old code would re-promote on fork, we don't do that | |
86 | * when using slices as it could cause problem promoting slices | |
87 | * that have been forced down to 4K | |
2d566537 AK |
88 | * |
89 | * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check | |
90 | * explicitly against context.id == 0. This ensures that we | |
91 | * properly initialize context slice details for newly allocated | |
92 | * mm's (which will have id == 0) and don't alter context slice | |
93 | * inherited via fork (which will have id != 0). | |
94 | * | |
95 | * We should not be calling init_new_context() on init_mm. Hence a | |
96 | * check against 0 is ok. | |
7e381c0f | 97 | */ |
2d566537 | 98 | if (mm->context.id == 0) |
7e381c0f AK |
99 | slice_set_user_psize(mm, mmu_virtual_psize); |
100 | subpage_prot_init_new_context(mm); | |
101 | } | |
9dfe5c53 | 102 | mm->context.id = index; |
851d2e2f THFL |
103 | #ifdef CONFIG_PPC_ICSWX |
104 | mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | |
105 | if (!mm->context.cop_lockp) { | |
106 | __destroy_context(index); | |
107 | subpage_prot_free(mm); | |
79af2187 | 108 | mm->context.id = MMU_NO_CONTEXT; |
851d2e2f THFL |
109 | return -ENOMEM; |
110 | } | |
111 | spin_lock_init(mm->context.cop_lockp); | |
112 | #endif /* CONFIG_PPC_ICSWX */ | |
14cf11af | 113 | |
5c1f6ee9 AK |
114 | #ifdef CONFIG_PPC_64K_PAGES |
115 | mm->context.pte_frag = NULL; | |
15b244a8 AK |
116 | #endif |
117 | #ifdef CONFIG_SPAPR_TCE_IOMMU | |
118 | mm_iommu_init(&mm->context); | |
5c1f6ee9 | 119 | #endif |
14cf11af PM |
120 | return 0; |
121 | } | |
122 | ||
e85a4710 | 123 | void __destroy_context(int context_id) |
14cf11af PM |
124 | { |
125 | spin_lock(&mmu_context_lock); | |
7317ac87 | 126 | ida_remove(&mmu_context_ida, context_id); |
14cf11af | 127 | spin_unlock(&mmu_context_lock); |
e85a4710 AG |
128 | } |
129 | EXPORT_SYMBOL_GPL(__destroy_context); | |
14cf11af | 130 | |
5c1f6ee9 AK |
131 | #ifdef CONFIG_PPC_64K_PAGES |
132 | static void destroy_pagetable_page(struct mm_struct *mm) | |
133 | { | |
134 | int count; | |
135 | void *pte_frag; | |
136 | struct page *page; | |
137 | ||
138 | pte_frag = mm->context.pte_frag; | |
139 | if (!pte_frag) | |
140 | return; | |
141 | ||
142 | page = virt_to_page(pte_frag); | |
143 | /* drop all the pending references */ | |
144 | count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; | |
145 | /* We allow PTE_FRAG_NR fragments from a PTE page */ | |
fe896d18 | 146 | if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { |
5c1f6ee9 AK |
147 | pgtable_page_dtor(page); |
148 | free_hot_cold_page(page, 0); | |
149 | } | |
150 | } | |
151 | ||
152 | #else | |
153 | static inline void destroy_pagetable_page(struct mm_struct *mm) | |
154 | { | |
155 | return; | |
156 | } | |
157 | #endif | |
158 | ||
159 | ||
e85a4710 AG |
160 | void destroy_context(struct mm_struct *mm) |
161 | { | |
15b244a8 AK |
162 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
163 | mm_iommu_cleanup(&mm->context); | |
164 | #endif | |
5c1f6ee9 | 165 | |
851d2e2f THFL |
166 | #ifdef CONFIG_PPC_ICSWX |
167 | drop_cop(mm->context.acop, mm); | |
168 | kfree(mm->context.cop_lockp); | |
169 | mm->context.cop_lockp = NULL; | |
170 | #endif /* CONFIG_PPC_ICSWX */ | |
5c1f6ee9 | 171 | |
7e381c0f AK |
172 | if (radix_enabled()) |
173 | process_tb[mm->context.id].prtb1 = 0; | |
174 | else | |
175 | subpage_prot_free(mm); | |
5c1f6ee9 | 176 | destroy_pagetable_page(mm); |
e85a4710 | 177 | __destroy_context(mm->context.id); |
5e8e7b40 | 178 | mm->context.id = MMU_NO_CONTEXT; |
14cf11af | 179 | } |
7e381c0f AK |
180 | |
181 | #ifdef CONFIG_PPC_RADIX_MMU | |
182 | void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
183 | { | |
184 | mtspr(SPRN_PID, next->context.id); | |
185 | asm volatile("isync": : :"memory"); | |
186 | } | |
187 | #endif |