]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/kernel/ldt.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / arch / x86 / kernel / ldt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
6 *
7 * This handles calls from both 32bit and 64bit mode.
8 */
9
10 #include <linux/errno.h>
11 #include <linux/gfp.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19
20 #include <asm/ldt.h>
21 #include <asm/desc.h>
22 #include <asm/mmu_context.h>
23 #include <asm/syscalls.h>
24
25 static void refresh_ldt_segments(void)
26 {
27 #ifdef CONFIG_X86_64
28 unsigned short sel;
29
30 /*
31 * Make sure that the cached DS and ES descriptors match the updated
32 * LDT.
33 */
34 savesegment(ds, sel);
35 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
36 loadsegment(ds, sel);
37
38 savesegment(es, sel);
39 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
40 loadsegment(es, sel);
41 #endif
42 }
43
44 /* context.lock is held for us, so we don't need any locking. */
45 static void flush_ldt(void *__mm)
46 {
47 struct mm_struct *mm = __mm;
48 mm_context_t *pc;
49
50 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
51 return;
52
53 pc = &mm->context;
54 set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
55
56 refresh_ldt_segments();
57 }
58
59 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
60 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
61 {
62 struct ldt_struct *new_ldt;
63 unsigned int alloc_size;
64
65 if (num_entries > LDT_ENTRIES)
66 return NULL;
67
68 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
69 if (!new_ldt)
70 return NULL;
71
72 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
73 alloc_size = num_entries * LDT_ENTRY_SIZE;
74
75 /*
76 * Xen is very picky: it requires a page-aligned LDT that has no
77 * trailing nonzero bytes in any page that contains LDT descriptors.
78 * Keep it simple: zero the whole allocation and never allocate less
79 * than PAGE_SIZE.
80 */
81 if (alloc_size > PAGE_SIZE)
82 new_ldt->entries = vzalloc(alloc_size);
83 else
84 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
85
86 if (!new_ldt->entries) {
87 kfree(new_ldt);
88 return NULL;
89 }
90
91 new_ldt->nr_entries = num_entries;
92 return new_ldt;
93 }
94
95 /* After calling this, the LDT is immutable. */
96 static void finalize_ldt_struct(struct ldt_struct *ldt)
97 {
98 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
99 }
100
101 /* context.lock is held */
102 static void install_ldt(struct mm_struct *current_mm,
103 struct ldt_struct *ldt)
104 {
105 /* Synchronizes with lockless_dereference in load_mm_ldt. */
106 smp_store_release(&current_mm->context.ldt, ldt);
107
108 /* Activate the LDT for all CPUs using current_mm. */
109 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
110 }
111
112 static void free_ldt_struct(struct ldt_struct *ldt)
113 {
114 if (likely(!ldt))
115 return;
116
117 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
118 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
119 vfree_atomic(ldt->entries);
120 else
121 free_page((unsigned long)ldt->entries);
122 kfree(ldt);
123 }
124
125 /*
126 * we do not have to muck with descriptors here, that is
127 * done in switch_mm() as needed.
128 */
129 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
130 {
131 struct ldt_struct *new_ldt;
132 struct mm_struct *old_mm;
133 int retval = 0;
134
135 mutex_init(&mm->context.lock);
136 old_mm = current->mm;
137 if (!old_mm) {
138 mm->context.ldt = NULL;
139 return 0;
140 }
141
142 mutex_lock(&old_mm->context.lock);
143 if (!old_mm->context.ldt) {
144 mm->context.ldt = NULL;
145 goto out_unlock;
146 }
147
148 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
149 if (!new_ldt) {
150 retval = -ENOMEM;
151 goto out_unlock;
152 }
153
154 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
155 new_ldt->nr_entries * LDT_ENTRY_SIZE);
156 finalize_ldt_struct(new_ldt);
157
158 mm->context.ldt = new_ldt;
159
160 out_unlock:
161 mutex_unlock(&old_mm->context.lock);
162 return retval;
163 }
164
165 /*
166 * No need to lock the MM as we are the last user
167 *
168 * 64bit: Don't touch the LDT register - we're already in the next thread.
169 */
170 void destroy_context_ldt(struct mm_struct *mm)
171 {
172 free_ldt_struct(mm->context.ldt);
173 mm->context.ldt = NULL;
174 }
175
176 static int read_ldt(void __user *ptr, unsigned long bytecount)
177 {
178 struct mm_struct *mm = current->mm;
179 unsigned long entries_size;
180 int retval;
181
182 mutex_lock(&mm->context.lock);
183
184 if (!mm->context.ldt) {
185 retval = 0;
186 goto out_unlock;
187 }
188
189 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
190 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
191
192 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
193 if (entries_size > bytecount)
194 entries_size = bytecount;
195
196 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
197 retval = -EFAULT;
198 goto out_unlock;
199 }
200
201 if (entries_size != bytecount) {
202 /* Zero-fill the rest and pretend we read bytecount bytes. */
203 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
204 retval = -EFAULT;
205 goto out_unlock;
206 }
207 }
208 retval = bytecount;
209
210 out_unlock:
211 mutex_unlock(&mm->context.lock);
212 return retval;
213 }
214
215 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
216 {
217 /* CHECKME: Can we use _one_ random number ? */
218 #ifdef CONFIG_X86_32
219 unsigned long size = 5 * sizeof(struct desc_struct);
220 #else
221 unsigned long size = 128;
222 #endif
223 if (bytecount > size)
224 bytecount = size;
225 if (clear_user(ptr, bytecount))
226 return -EFAULT;
227 return bytecount;
228 }
229
230 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
231 {
232 struct mm_struct *mm = current->mm;
233 struct ldt_struct *new_ldt, *old_ldt;
234 unsigned int old_nr_entries, new_nr_entries;
235 struct user_desc ldt_info;
236 struct desc_struct ldt;
237 int error;
238
239 error = -EINVAL;
240 if (bytecount != sizeof(ldt_info))
241 goto out;
242 error = -EFAULT;
243 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
244 goto out;
245
246 error = -EINVAL;
247 if (ldt_info.entry_number >= LDT_ENTRIES)
248 goto out;
249 if (ldt_info.contents == 3) {
250 if (oldmode)
251 goto out;
252 if (ldt_info.seg_not_present == 0)
253 goto out;
254 }
255
256 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
257 LDT_empty(&ldt_info)) {
258 /* The user wants to clear the entry. */
259 memset(&ldt, 0, sizeof(ldt));
260 } else {
261 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
262 error = -EINVAL;
263 goto out;
264 }
265
266 fill_ldt(&ldt, &ldt_info);
267 if (oldmode)
268 ldt.avl = 0;
269 }
270
271 mutex_lock(&mm->context.lock);
272
273 old_ldt = mm->context.ldt;
274 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
275 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
276
277 error = -ENOMEM;
278 new_ldt = alloc_ldt_struct(new_nr_entries);
279 if (!new_ldt)
280 goto out_unlock;
281
282 if (old_ldt)
283 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
284
285 new_ldt->entries[ldt_info.entry_number] = ldt;
286 finalize_ldt_struct(new_ldt);
287
288 install_ldt(mm, new_ldt);
289 free_ldt_struct(old_ldt);
290 error = 0;
291
292 out_unlock:
293 mutex_unlock(&mm->context.lock);
294 out:
295 return error;
296 }
297
298 asmlinkage int sys_modify_ldt(int func, void __user *ptr,
299 unsigned long bytecount)
300 {
301 int ret = -ENOSYS;
302
303 switch (func) {
304 case 0:
305 ret = read_ldt(ptr, bytecount);
306 break;
307 case 1:
308 ret = write_ldt(ptr, bytecount, 1);
309 break;
310 case 2:
311 ret = read_default_ldt(ptr, bytecount);
312 break;
313 case 0x11:
314 ret = write_ldt(ptr, bytecount, 0);
315 break;
316 }
317 return ret;
318 }