]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds |
4 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
5 | * Copyright (C) 2002 Andi Kleen | |
78aa1f66 | 6 | * |
1da177e4 | 7 | * This handles calls from both 32bit and 64bit mode. |
c2b3496b PZ |
8 | * |
9 | * Lock order: | |
10 | * contex.ldt_usr_sem | |
11 | * mmap_sem | |
12 | * context.lock | |
1da177e4 LT |
13 | */ |
14 | ||
15 | #include <linux/errno.h> | |
5a0e3ad6 | 16 | #include <linux/gfp.h> |
1da177e4 LT |
17 | #include <linux/sched.h> |
18 | #include <linux/string.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/smp.h> | |
da20ab35 | 21 | #include <linux/syscalls.h> |
37868fe1 | 22 | #include <linux/slab.h> |
1da177e4 | 23 | #include <linux/vmalloc.h> |
423a5405 | 24 | #include <linux/uaccess.h> |
1da177e4 | 25 | |
1da177e4 | 26 | #include <asm/ldt.h> |
f55f0501 | 27 | #include <asm/tlb.h> |
1da177e4 | 28 | #include <asm/desc.h> |
70f5088d | 29 | #include <asm/mmu_context.h> |
bbc1f698 | 30 | #include <asm/syscalls.h> |
1da177e4 | 31 | |
a6323757 AL |
32 | static void refresh_ldt_segments(void) |
33 | { | |
34 | #ifdef CONFIG_X86_64 | |
35 | unsigned short sel; | |
36 | ||
37 | /* | |
38 | * Make sure that the cached DS and ES descriptors match the updated | |
39 | * LDT. | |
40 | */ | |
41 | savesegment(ds, sel); | |
42 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) | |
43 | loadsegment(ds, sel); | |
44 | ||
45 | savesegment(es, sel); | |
46 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) | |
47 | loadsegment(es, sel); | |
48 | #endif | |
49 | } | |
50 | ||
c2b3496b | 51 | /* context.lock is held by the task which issued the smp function call */ |
3d28ebce | 52 | static void flush_ldt(void *__mm) |
1da177e4 | 53 | { |
3d28ebce | 54 | struct mm_struct *mm = __mm; |
37868fe1 | 55 | |
3d28ebce | 56 | if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) |
37868fe1 AL |
57 | return; |
58 | ||
f55f0501 | 59 | load_mm_ldt(mm); |
a6323757 AL |
60 | |
61 | refresh_ldt_segments(); | |
1da177e4 | 62 | } |
1da177e4 | 63 | |
37868fe1 | 64 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |
bbf79d21 | 65 | static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) |
1da177e4 | 66 | { |
37868fe1 | 67 | struct ldt_struct *new_ldt; |
990e9dc3 | 68 | unsigned int alloc_size; |
37868fe1 | 69 | |
bbf79d21 | 70 | if (num_entries > LDT_ENTRIES) |
37868fe1 AL |
71 | return NULL; |
72 | ||
73 | new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); | |
74 | if (!new_ldt) | |
75 | return NULL; | |
76 | ||
77 | BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); | |
bbf79d21 | 78 | alloc_size = num_entries * LDT_ENTRY_SIZE; |
37868fe1 AL |
79 | |
80 | /* | |
81 | * Xen is very picky: it requires a page-aligned LDT that has no | |
82 | * trailing nonzero bytes in any page that contains LDT descriptors. | |
83 | * Keep it simple: zero the whole allocation and never allocate less | |
84 | * than PAGE_SIZE. | |
85 | */ | |
86 | if (alloc_size > PAGE_SIZE) | |
87 | new_ldt->entries = vzalloc(alloc_size); | |
1da177e4 | 88 | else |
f454b478 | 89 | new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); |
1da177e4 | 90 | |
37868fe1 AL |
91 | if (!new_ldt->entries) { |
92 | kfree(new_ldt); | |
93 | return NULL; | |
94 | } | |
77e463d1 | 95 | |
f55f0501 AL |
96 | /* The new LDT isn't aliased for PTI yet. */ |
97 | new_ldt->slot = -1; | |
98 | ||
bbf79d21 | 99 | new_ldt->nr_entries = num_entries; |
37868fe1 AL |
100 | return new_ldt; |
101 | } | |
38ffbe66 | 102 | |
f55f0501 AL |
103 | /* |
104 | * If PTI is enabled, this maps the LDT into the kernelmode and | |
105 | * usermode tables for the given mm. | |
106 | * | |
107 | * There is no corresponding unmap function. Even if the LDT is freed, we | |
108 | * leave the PTEs around until the slot is reused or the mm is destroyed. | |
109 | * This is harmless: the LDT is always in ordinary memory, and no one will | |
110 | * access the freed slot. | |
111 | * | |
112 | * If we wanted to unmap freed LDTs, we'd also need to do a flush to make | |
113 | * it useful, and the flush would slow down modify_ldt(). | |
114 | */ | |
115 | static int | |
116 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |
117 | { | |
118 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
119 | bool is_vmalloc, had_top_level_entry; | |
120 | unsigned long va; | |
121 | spinlock_t *ptl; | |
122 | pgd_t *pgd; | |
123 | int i; | |
124 | ||
125 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
126 | return 0; | |
127 | ||
128 | /* | |
129 | * Any given ldt_struct should have map_ldt_struct() called at most | |
130 | * once. | |
131 | */ | |
132 | WARN_ON(ldt->slot != -1); | |
133 | ||
134 | /* | |
135 | * Did we already have the top level entry allocated? We can't | |
136 | * use pgd_none() for this because it doens't do anything on | |
137 | * 4-level page table kernels. | |
138 | */ | |
139 | pgd = pgd_offset(mm, LDT_BASE_ADDR); | |
140 | had_top_level_entry = (pgd->pgd != 0); | |
141 | ||
142 | is_vmalloc = is_vmalloc_addr(ldt->entries); | |
143 | ||
144 | for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { | |
145 | unsigned long offset = i << PAGE_SHIFT; | |
146 | const void *src = (char *)ldt->entries + offset; | |
147 | unsigned long pfn; | |
148 | pte_t pte, *ptep; | |
149 | ||
150 | va = (unsigned long)ldt_slot_va(slot) + offset; | |
151 | pfn = is_vmalloc ? vmalloc_to_pfn(src) : | |
152 | page_to_pfn(virt_to_page(src)); | |
153 | /* | |
154 | * Treat the PTI LDT range as a *userspace* range. | |
155 | * get_locked_pte() will allocate all needed pagetables | |
156 | * and account for them in this mm. | |
157 | */ | |
158 | ptep = get_locked_pte(mm, va, &ptl); | |
159 | if (!ptep) | |
160 | return -ENOMEM; | |
9f5cb6b3 TG |
161 | /* |
162 | * Map it RO so the easy to find address is not a primary | |
163 | * target via some kernel interface which misses a | |
164 | * permission check. | |
165 | */ | |
166 | pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); | |
f55f0501 AL |
167 | set_pte_at(mm, va, ptep, pte); |
168 | pte_unmap_unlock(ptep, ptl); | |
169 | } | |
170 | ||
171 | if (mm->context.ldt) { | |
172 | /* | |
173 | * We already had an LDT. The top-level entry should already | |
174 | * have been allocated and synchronized with the usermode | |
175 | * tables. | |
176 | */ | |
177 | WARN_ON(!had_top_level_entry); | |
178 | if (static_cpu_has(X86_FEATURE_PTI)) | |
179 | WARN_ON(!kernel_to_user_pgdp(pgd)->pgd); | |
180 | } else { | |
181 | /* | |
182 | * This is the first time we're mapping an LDT for this process. | |
183 | * Sync the pgd to the usermode tables. | |
184 | */ | |
185 | WARN_ON(had_top_level_entry); | |
186 | if (static_cpu_has(X86_FEATURE_PTI)) { | |
187 | WARN_ON(kernel_to_user_pgdp(pgd)->pgd); | |
188 | set_pgd(kernel_to_user_pgdp(pgd), *pgd); | |
189 | } | |
190 | } | |
191 | ||
192 | va = (unsigned long)ldt_slot_va(slot); | |
193 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); | |
194 | ||
195 | ldt->slot = slot; | |
196 | #endif | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static void free_ldt_pgtables(struct mm_struct *mm) | |
201 | { | |
202 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
203 | struct mmu_gather tlb; | |
204 | unsigned long start = LDT_BASE_ADDR; | |
205 | unsigned long end = start + (1UL << PGDIR_SHIFT); | |
206 | ||
207 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
208 | return; | |
209 | ||
210 | tlb_gather_mmu(&tlb, mm, start, end); | |
211 | free_pgd_range(&tlb, start, end, start, end); | |
212 | tlb_finish_mmu(&tlb, start, end); | |
213 | #endif | |
214 | } | |
215 | ||
37868fe1 AL |
216 | /* After calling this, the LDT is immutable. */ |
217 | static void finalize_ldt_struct(struct ldt_struct *ldt) | |
218 | { | |
bbf79d21 | 219 | paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); |
1da177e4 LT |
220 | } |
221 | ||
c2b3496b | 222 | static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) |
1da177e4 | 223 | { |
c2b3496b PZ |
224 | mutex_lock(&mm->context.lock); |
225 | ||
3382290e | 226 | /* Synchronizes with READ_ONCE in load_mm_ldt. */ |
c2b3496b | 227 | smp_store_release(&mm->context.ldt, ldt); |
37868fe1 | 228 | |
c2b3496b PZ |
229 | /* Activate the LDT for all CPUs using currents mm. */ |
230 | on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); | |
231 | ||
232 | mutex_unlock(&mm->context.lock); | |
37868fe1 | 233 | } |
78aa1f66 | 234 | |
37868fe1 AL |
235 | static void free_ldt_struct(struct ldt_struct *ldt) |
236 | { | |
237 | if (likely(!ldt)) | |
238 | return; | |
38ffbe66 | 239 | |
bbf79d21 BP |
240 | paravirt_free_ldt(ldt->entries, ldt->nr_entries); |
241 | if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) | |
8d5341a6 | 242 | vfree_atomic(ldt->entries); |
37868fe1 | 243 | else |
f454b478 | 244 | free_page((unsigned long)ldt->entries); |
37868fe1 | 245 | kfree(ldt); |
1da177e4 LT |
246 | } |
247 | ||
248 | /* | |
a4828f81 TG |
249 | * Called on fork from arch_dup_mmap(). Just copy the current LDT state, |
250 | * the new task is not running, so nothing can be installed. | |
1da177e4 | 251 | */ |
a4828f81 | 252 | int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) |
1da177e4 | 253 | { |
37868fe1 | 254 | struct ldt_struct *new_ldt; |
1da177e4 LT |
255 | int retval = 0; |
256 | ||
a4828f81 | 257 | if (!old_mm) |
37868fe1 | 258 | return 0; |
37868fe1 AL |
259 | |
260 | mutex_lock(&old_mm->context.lock); | |
a4828f81 | 261 | if (!old_mm->context.ldt) |
37868fe1 | 262 | goto out_unlock; |
37868fe1 | 263 | |
bbf79d21 | 264 | new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); |
37868fe1 AL |
265 | if (!new_ldt) { |
266 | retval = -ENOMEM; | |
267 | goto out_unlock; | |
268 | } | |
269 | ||
270 | memcpy(new_ldt->entries, old_mm->context.ldt->entries, | |
bbf79d21 | 271 | new_ldt->nr_entries * LDT_ENTRY_SIZE); |
37868fe1 AL |
272 | finalize_ldt_struct(new_ldt); |
273 | ||
f55f0501 AL |
274 | retval = map_ldt_struct(mm, new_ldt, 0); |
275 | if (retval) { | |
276 | free_ldt_pgtables(mm); | |
277 | free_ldt_struct(new_ldt); | |
278 | goto out_unlock; | |
279 | } | |
37868fe1 AL |
280 | mm->context.ldt = new_ldt; |
281 | ||
282 | out_unlock: | |
283 | mutex_unlock(&old_mm->context.lock); | |
1da177e4 LT |
284 | return retval; |
285 | } | |
286 | ||
287 | /* | |
77e463d1 TG |
288 | * No need to lock the MM as we are the last user |
289 | * | |
290 | * 64bit: Don't touch the LDT register - we're already in the next thread. | |
1da177e4 | 291 | */ |
39a0526f | 292 | void destroy_context_ldt(struct mm_struct *mm) |
1da177e4 | 293 | { |
37868fe1 AL |
294 | free_ldt_struct(mm->context.ldt); |
295 | mm->context.ldt = NULL; | |
1da177e4 LT |
296 | } |
297 | ||
f55f0501 AL |
298 | void ldt_arch_exit_mmap(struct mm_struct *mm) |
299 | { | |
300 | free_ldt_pgtables(mm); | |
301 | } | |
302 | ||
78aa1f66 | 303 | static int read_ldt(void __user *ptr, unsigned long bytecount) |
1da177e4 | 304 | { |
78aa1f66 | 305 | struct mm_struct *mm = current->mm; |
bbf79d21 BP |
306 | unsigned long entries_size; |
307 | int retval; | |
1da177e4 | 308 | |
c2b3496b | 309 | down_read(&mm->context.ldt_usr_sem); |
37868fe1 AL |
310 | |
311 | if (!mm->context.ldt) { | |
312 | retval = 0; | |
313 | goto out_unlock; | |
314 | } | |
315 | ||
78aa1f66 TG |
316 | if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) |
317 | bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; | |
1da177e4 | 318 | |
bbf79d21 BP |
319 | entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; |
320 | if (entries_size > bytecount) | |
321 | entries_size = bytecount; | |
1da177e4 | 322 | |
bbf79d21 | 323 | if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { |
37868fe1 AL |
324 | retval = -EFAULT; |
325 | goto out_unlock; | |
326 | } | |
327 | ||
bbf79d21 | 328 | if (entries_size != bytecount) { |
37868fe1 | 329 | /* Zero-fill the rest and pretend we read bytecount bytes. */ |
bbf79d21 | 330 | if (clear_user(ptr + entries_size, bytecount - entries_size)) { |
37868fe1 AL |
331 | retval = -EFAULT; |
332 | goto out_unlock; | |
1da177e4 LT |
333 | } |
334 | } | |
37868fe1 AL |
335 | retval = bytecount; |
336 | ||
337 | out_unlock: | |
c2b3496b | 338 | up_read(&mm->context.ldt_usr_sem); |
37868fe1 | 339 | return retval; |
1da177e4 LT |
340 | } |
341 | ||
78aa1f66 | 342 | static int read_default_ldt(void __user *ptr, unsigned long bytecount) |
1da177e4 | 343 | { |
77e463d1 TG |
344 | /* CHECKME: Can we use _one_ random number ? */ |
345 | #ifdef CONFIG_X86_32 | |
346 | unsigned long size = 5 * sizeof(struct desc_struct); | |
347 | #else | |
348 | unsigned long size = 128; | |
349 | #endif | |
350 | if (bytecount > size) | |
351 | bytecount = size; | |
1da177e4 LT |
352 | if (clear_user(ptr, bytecount)) |
353 | return -EFAULT; | |
78aa1f66 | 354 | return bytecount; |
1da177e4 LT |
355 | } |
356 | ||
78aa1f66 | 357 | static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
1da177e4 | 358 | { |
70f5088d | 359 | struct mm_struct *mm = current->mm; |
990e9dc3 | 360 | struct ldt_struct *new_ldt, *old_ldt; |
bbf79d21 | 361 | unsigned int old_nr_entries, new_nr_entries; |
990e9dc3 | 362 | struct user_desc ldt_info; |
5af72502 | 363 | struct desc_struct ldt; |
1da177e4 | 364 | int error; |
1da177e4 LT |
365 | |
366 | error = -EINVAL; | |
1da177e4 LT |
367 | if (bytecount != sizeof(ldt_info)) |
368 | goto out; | |
78aa1f66 | 369 | error = -EFAULT; |
70f5088d | 370 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) |
1da177e4 LT |
371 | goto out; |
372 | ||
373 | error = -EINVAL; | |
374 | if (ldt_info.entry_number >= LDT_ENTRIES) | |
375 | goto out; | |
376 | if (ldt_info.contents == 3) { | |
377 | if (oldmode) | |
378 | goto out; | |
379 | if (ldt_info.seg_not_present == 0) | |
380 | goto out; | |
381 | } | |
382 | ||
37868fe1 AL |
383 | if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || |
384 | LDT_empty(&ldt_info)) { | |
385 | /* The user wants to clear the entry. */ | |
386 | memset(&ldt, 0, sizeof(ldt)); | |
387 | } else { | |
388 | if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { | |
389 | error = -EINVAL; | |
390 | goto out; | |
1da177e4 | 391 | } |
37868fe1 AL |
392 | |
393 | fill_ldt(&ldt, &ldt_info); | |
394 | if (oldmode) | |
395 | ldt.avl = 0; | |
1da177e4 LT |
396 | } |
397 | ||
c2b3496b PZ |
398 | if (down_write_killable(&mm->context.ldt_usr_sem)) |
399 | return -EINTR; | |
37868fe1 | 400 | |
bbf79d21 BP |
401 | old_ldt = mm->context.ldt; |
402 | old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; | |
403 | new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); | |
37868fe1 AL |
404 | |
405 | error = -ENOMEM; | |
bbf79d21 | 406 | new_ldt = alloc_ldt_struct(new_nr_entries); |
37868fe1 | 407 | if (!new_ldt) |
34273f41 | 408 | goto out_unlock; |
34273f41 | 409 | |
37868fe1 | 410 | if (old_ldt) |
bbf79d21 BP |
411 | memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); |
412 | ||
37868fe1 AL |
413 | new_ldt->entries[ldt_info.entry_number] = ldt; |
414 | finalize_ldt_struct(new_ldt); | |
1da177e4 | 415 | |
f55f0501 AL |
416 | /* |
417 | * If we are using PTI, map the new LDT into the userspace pagetables. | |
418 | * If there is already an LDT, use the other slot so that other CPUs | |
419 | * will continue to use the old LDT until install_ldt() switches | |
420 | * them over to the new LDT. | |
421 | */ | |
422 | error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); | |
423 | if (error) { | |
a62d6985 TG |
424 | /* |
425 | * This only can fail for the first LDT setup. If an LDT is | |
426 | * already installed then the PTE page is already | |
427 | * populated. Mop up a half populated page table. | |
428 | */ | |
7f414195 TG |
429 | if (!WARN_ON_ONCE(old_ldt)) |
430 | free_ldt_pgtables(mm); | |
a62d6985 | 431 | free_ldt_struct(new_ldt); |
f55f0501 AL |
432 | goto out_unlock; |
433 | } | |
434 | ||
37868fe1 AL |
435 | install_ldt(mm, new_ldt); |
436 | free_ldt_struct(old_ldt); | |
1da177e4 LT |
437 | error = 0; |
438 | ||
439 | out_unlock: | |
c2b3496b | 440 | up_write(&mm->context.ldt_usr_sem); |
1da177e4 LT |
441 | out: |
442 | return error; | |
443 | } | |
444 | ||
da20ab35 DH |
445 | SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , |
446 | unsigned long , bytecount) | |
1da177e4 LT |
447 | { |
448 | int ret = -ENOSYS; | |
449 | ||
450 | switch (func) { | |
451 | case 0: | |
452 | ret = read_ldt(ptr, bytecount); | |
453 | break; | |
454 | case 1: | |
455 | ret = write_ldt(ptr, bytecount, 1); | |
456 | break; | |
457 | case 2: | |
458 | ret = read_default_ldt(ptr, bytecount); | |
459 | break; | |
460 | case 0x11: | |
461 | ret = write_ldt(ptr, bytecount, 0); | |
462 | break; | |
463 | } | |
da20ab35 DH |
464 | /* |
465 | * The SYSCALL_DEFINE() macros give us an 'unsigned long' | |
466 | * return type, but tht ABI for sys_modify_ldt() expects | |
467 | * 'int'. This cast gives us an int-sized value in %rax | |
468 | * for the return code. The 'unsigned' is necessary so | |
469 | * the compiler does not try to sign-extend the negative | |
470 | * return codes into the high half of the register when | |
471 | * taking the value from int->long. | |
472 | */ | |
473 | return (unsigned int)ret; | |
1da177e4 | 474 | } |