2 * MMU context allocation for 64-bit kernels.
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
19 #include <linux/spinlock.h>
20 #include <linux/idr.h>
21 #include <linux/export.h>
22 #include <linux/gfp.h>
23 #include <linux/slab.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgalloc.h>
30 static DEFINE_SPINLOCK(mmu_context_lock
);
31 static DEFINE_IDA(mmu_context_ida
);
33 static int alloc_context_id(int min_id
, int max_id
)
38 if (!ida_pre_get(&mmu_context_ida
, GFP_KERNEL
))
41 spin_lock(&mmu_context_lock
);
42 err
= ida_get_new_above(&mmu_context_ida
, min_id
, &index
);
43 spin_unlock(&mmu_context_lock
);
51 spin_lock(&mmu_context_lock
);
52 ida_remove(&mmu_context_ida
, index
);
53 spin_unlock(&mmu_context_lock
);
60 void hash__reserve_context_id(int id
)
65 if (!ida_pre_get(&mmu_context_ida
, GFP_KERNEL
))
68 spin_lock(&mmu_context_lock
);
69 rc
= ida_get_new_above(&mmu_context_ida
, id
, &result
);
70 spin_unlock(&mmu_context_lock
);
71 } while (rc
== -EAGAIN
);
73 WARN(result
!= id
, "mmu: Failed to reserve context id %d (rc %d)\n", id
, result
);
76 int hash__alloc_context_id(void)
80 if (mmu_has_feature(MMU_FTR_68_BIT_VA
))
81 max
= MAX_USER_CONTEXT
;
83 max
= MAX_USER_CONTEXT_65BIT_VA
;
85 return alloc_context_id(MIN_USER_CONTEXT
, max
);
87 EXPORT_SYMBOL_GPL(hash__alloc_context_id
);
89 static int hash__init_new_context(struct mm_struct
*mm
)
93 index
= hash__alloc_context_id();
98 * We do switch_slb() early in fork, even before we setup the
99 * mm->context.addr_limit. Default to max task size so that we copy the
100 * default values to paca which will help us to handle slb miss early.
102 mm
->context
.addr_limit
= DEFAULT_MAP_WINDOW_USER64
;
105 * The old code would re-promote on fork, we don't do that when using
106 * slices as it could cause problem promoting slices that have been
109 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
110 * explicitly against context.id == 0. This ensures that we properly
111 * initialize context slice details for newly allocated mm's (which will
112 * have id == 0) and don't alter context slice inherited via fork (which
113 * will have id != 0).
115 * We should not be calling init_new_context() on init_mm. Hence a
116 * check against 0 is OK.
118 if (mm
->context
.id
== 0)
119 slice_set_user_psize(mm
, mmu_virtual_psize
);
121 subpage_prot_init_new_context(mm
);
126 static int radix__init_new_context(struct mm_struct
*mm
)
128 unsigned long rts_field
;
131 max_id
= (1 << mmu_pid_bits
) - 1;
132 index
= alloc_context_id(mmu_base_pid
, max_id
);
137 * set the process table entry,
139 rts_field
= radix__get_tree_size();
140 process_tb
[index
].prtb0
= cpu_to_be64(rts_field
| __pa(mm
->pgd
) | RADIX_PGD_INDEX_SIZE
);
143 * Order the above store with subsequent update of the PID
144 * register (at which point HW can start loading/caching
145 * the entry) and the corresponding load by the MMU from
148 asm volatile("ptesync;isync" : : : "memory");
150 mm
->context
.npu_context
= NULL
;
155 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
160 index
= radix__init_new_context(mm
);
162 index
= hash__init_new_context(mm
);
167 mm
->context
.id
= index
;
168 #ifdef CONFIG_PPC_ICSWX
169 mm
->context
.cop_lockp
= kmalloc(sizeof(spinlock_t
), GFP_KERNEL
);
170 if (!mm
->context
.cop_lockp
) {
171 __destroy_context(index
);
172 subpage_prot_free(mm
);
173 mm
->context
.id
= MMU_NO_CONTEXT
;
176 spin_lock_init(mm
->context
.cop_lockp
);
177 #endif /* CONFIG_PPC_ICSWX */
179 #ifdef CONFIG_PPC_64K_PAGES
180 mm
->context
.pte_frag
= NULL
;
182 #ifdef CONFIG_SPAPR_TCE_IOMMU
188 void __destroy_context(int context_id
)
190 spin_lock(&mmu_context_lock
);
191 ida_remove(&mmu_context_ida
, context_id
);
192 spin_unlock(&mmu_context_lock
);
194 EXPORT_SYMBOL_GPL(__destroy_context
);
196 #ifdef CONFIG_PPC_64K_PAGES
197 static void destroy_pagetable_page(struct mm_struct
*mm
)
203 pte_frag
= mm
->context
.pte_frag
;
207 page
= virt_to_page(pte_frag
);
208 /* drop all the pending references */
209 count
= ((unsigned long)pte_frag
& ~PAGE_MASK
) >> PTE_FRAG_SIZE_SHIFT
;
210 /* We allow PTE_FRAG_NR fragments from a PTE page */
211 if (page_ref_sub_and_test(page
, PTE_FRAG_NR
- count
)) {
212 pgtable_page_dtor(page
);
213 free_hot_cold_page(page
, 0);
218 static inline void destroy_pagetable_page(struct mm_struct
*mm
)
224 void destroy_context(struct mm_struct
*mm
)
226 #ifdef CONFIG_SPAPR_TCE_IOMMU
227 WARN_ON_ONCE(!list_empty(&mm
->context
.iommu_group_mem_list
));
229 #ifdef CONFIG_PPC_ICSWX
230 drop_cop(mm
->context
.acop
, mm
);
231 kfree(mm
->context
.cop_lockp
);
232 mm
->context
.cop_lockp
= NULL
;
233 #endif /* CONFIG_PPC_ICSWX */
235 if (radix_enabled()) {
237 * Radix doesn't have a valid bit in the process table
238 * entries. However we know that at least P9 implementation
239 * will avoid caching an entry with an invalid RTS field,
240 * and 0 is invalid. So this will do.
242 process_tb
[mm
->context
.id
].prtb0
= 0;
244 subpage_prot_free(mm
);
245 destroy_pagetable_page(mm
);
246 __destroy_context(mm
->context
.id
);
247 mm
->context
.id
= MMU_NO_CONTEXT
;
250 #ifdef CONFIG_PPC_RADIX_MMU
251 void radix__switch_mmu_context(struct mm_struct
*prev
, struct mm_struct
*next
)
254 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
256 mtspr(SPRN_PID
, next
->context
.id
);
258 asm volatile(PPC_INVALIDATE_ERAT
: : :"memory");
260 mtspr(SPRN_PID
, next
->context
.id
);