]>
Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * MMU context allocation for 64-bit kernels. | |
3 | * | |
4 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | */ | |
12 | ||
14cf11af PM |
13 | #include <linux/sched.h> |
14 | #include <linux/kernel.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/idr.h> | |
4b16f8e2 | 21 | #include <linux/export.h> |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
851d2e2f | 23 | #include <linux/slab.h> |
14cf11af PM |
24 | |
25 | #include <asm/mmu_context.h> | |
5c1f6ee9 | 26 | #include <asm/pgalloc.h> |
14cf11af | 27 | |
9d670280 | 28 | #include "icswx.h" |
851d2e2f | 29 | |
14cf11af | 30 | static DEFINE_SPINLOCK(mmu_context_lock); |
7317ac87 | 31 | static DEFINE_IDA(mmu_context_ida); |
14cf11af | 32 | |
e85a4710 | 33 | int __init_new_context(void) |
14cf11af PM |
34 | { |
35 | int index; | |
36 | int err; | |
37 | ||
38 | again: | |
7317ac87 | 39 | if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) |
14cf11af PM |
40 | return -ENOMEM; |
41 | ||
42 | spin_lock(&mmu_context_lock); | |
7317ac87 | 43 | err = ida_get_new_above(&mmu_context_ida, 1, &index); |
14cf11af PM |
44 | spin_unlock(&mmu_context_lock); |
45 | ||
46 | if (err == -EAGAIN) | |
47 | goto again; | |
48 | else if (err) | |
49 | return err; | |
50 | ||
c60ac569 | 51 | if (index > MAX_USER_CONTEXT) { |
f86c9747 | 52 | spin_lock(&mmu_context_lock); |
7317ac87 | 53 | ida_remove(&mmu_context_ida, index); |
f86c9747 | 54 | spin_unlock(&mmu_context_lock); |
14cf11af PM |
55 | return -ENOMEM; |
56 | } | |
57 | ||
e85a4710 AG |
58 | return index; |
59 | } | |
60 | EXPORT_SYMBOL_GPL(__init_new_context); | |
61 | ||
62 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
63 | { | |
64 | int index; | |
65 | ||
66 | index = __init_new_context(); | |
67 | if (index < 0) | |
68 | return index; | |
69 | ||
d0f13e3c BH |
70 | /* The old code would re-promote on fork, we don't do that |
71 | * when using slices as it could cause problem promoting slices | |
72 | * that have been forced down to 4K | |
73 | */ | |
e8ff0646 | 74 | if (slice_mm_new_context(mm)) |
d0f13e3c | 75 | slice_set_user_psize(mm, mmu_virtual_psize); |
d28513bc | 76 | subpage_prot_init_new_context(mm); |
9dfe5c53 | 77 | mm->context.id = index; |
851d2e2f THFL |
78 | #ifdef CONFIG_PPC_ICSWX |
79 | mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | |
80 | if (!mm->context.cop_lockp) { | |
81 | __destroy_context(index); | |
82 | subpage_prot_free(mm); | |
79af2187 | 83 | mm->context.id = MMU_NO_CONTEXT; |
851d2e2f THFL |
84 | return -ENOMEM; |
85 | } | |
86 | spin_lock_init(mm->context.cop_lockp); | |
87 | #endif /* CONFIG_PPC_ICSWX */ | |
14cf11af | 88 | |
5c1f6ee9 AK |
89 | #ifdef CONFIG_PPC_64K_PAGES |
90 | mm->context.pte_frag = NULL; | |
15b244a8 AK |
91 | #endif |
92 | #ifdef CONFIG_SPAPR_TCE_IOMMU | |
93 | mm_iommu_init(&mm->context); | |
5c1f6ee9 | 94 | #endif |
14cf11af PM |
95 | return 0; |
96 | } | |
97 | ||
e85a4710 | 98 | void __destroy_context(int context_id) |
14cf11af PM |
99 | { |
100 | spin_lock(&mmu_context_lock); | |
7317ac87 | 101 | ida_remove(&mmu_context_ida, context_id); |
14cf11af | 102 | spin_unlock(&mmu_context_lock); |
e85a4710 AG |
103 | } |
104 | EXPORT_SYMBOL_GPL(__destroy_context); | |
14cf11af | 105 | |
5c1f6ee9 AK |
106 | #ifdef CONFIG_PPC_64K_PAGES |
107 | static void destroy_pagetable_page(struct mm_struct *mm) | |
108 | { | |
109 | int count; | |
110 | void *pte_frag; | |
111 | struct page *page; | |
112 | ||
113 | pte_frag = mm->context.pte_frag; | |
114 | if (!pte_frag) | |
115 | return; | |
116 | ||
117 | page = virt_to_page(pte_frag); | |
118 | /* drop all the pending references */ | |
119 | count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; | |
120 | /* We allow PTE_FRAG_NR fragments from a PTE page */ | |
121 | count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count); | |
122 | if (!count) { | |
123 | pgtable_page_dtor(page); | |
124 | free_hot_cold_page(page, 0); | |
125 | } | |
126 | } | |
127 | ||
128 | #else | |
129 | static inline void destroy_pagetable_page(struct mm_struct *mm) | |
130 | { | |
131 | return; | |
132 | } | |
133 | #endif | |
134 | ||
135 | ||
e85a4710 AG |
136 | void destroy_context(struct mm_struct *mm) |
137 | { | |
15b244a8 AK |
138 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
139 | mm_iommu_cleanup(&mm->context); | |
140 | #endif | |
5c1f6ee9 | 141 | |
851d2e2f THFL |
142 | #ifdef CONFIG_PPC_ICSWX |
143 | drop_cop(mm->context.acop, mm); | |
144 | kfree(mm->context.cop_lockp); | |
145 | mm->context.cop_lockp = NULL; | |
146 | #endif /* CONFIG_PPC_ICSWX */ | |
5c1f6ee9 AK |
147 | |
148 | destroy_pagetable_page(mm); | |
e85a4710 | 149 | __destroy_context(mm->context.id); |
d28513bc | 150 | subpage_prot_free(mm); |
5e8e7b40 | 151 | mm->context.id = MMU_NO_CONTEXT; |
14cf11af | 152 | } |