]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/ldt.c
2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
6 * This handles calls from both 32bit and 64bit mode.
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
19 #include <linux/smp.h>
20 #include <linux/syscalls.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/uaccess.h>
28 #include <asm/mmu_context.h>
29 #include <asm/syscalls.h>
31 static void refresh_ldt_segments(void)
37 * Make sure that the cached DS and ES descriptors match the updated
41 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
45 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
50 /* context.lock is held by the task which issued the smp function call */
51 static void flush_ldt(void *__mm
)
53 struct mm_struct
*mm
= __mm
;
55 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) != mm
)
60 refresh_ldt_segments();
63 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
64 static struct ldt_struct
*alloc_ldt_struct(unsigned int num_entries
)
66 struct ldt_struct
*new_ldt
;
67 unsigned int alloc_size
;
69 if (num_entries
> LDT_ENTRIES
)
72 new_ldt
= kmalloc(sizeof(struct ldt_struct
), GFP_KERNEL
);
76 BUILD_BUG_ON(LDT_ENTRY_SIZE
!= sizeof(struct desc_struct
));
77 alloc_size
= num_entries
* LDT_ENTRY_SIZE
;
80 * Xen is very picky: it requires a page-aligned LDT that has no
81 * trailing nonzero bytes in any page that contains LDT descriptors.
82 * Keep it simple: zero the whole allocation and never allocate less
85 if (alloc_size
> PAGE_SIZE
)
86 new_ldt
->entries
= vzalloc(alloc_size
);
88 new_ldt
->entries
= (void *)get_zeroed_page(GFP_KERNEL
);
90 if (!new_ldt
->entries
) {
95 /* The new LDT isn't aliased for PTI yet. */
98 new_ldt
->nr_entries
= num_entries
;
103 * If PTI is enabled, this maps the LDT into the kernelmode and
104 * usermode tables for the given mm.
106 * There is no corresponding unmap function. Even if the LDT is freed, we
107 * leave the PTEs around until the slot is reused or the mm is destroyed.
108 * This is harmless: the LDT is always in ordinary memory, and no one will
109 * access the freed slot.
111 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
112 * it useful, and the flush would slow down modify_ldt().
115 map_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
, int slot
)
117 #ifdef CONFIG_PAGE_TABLE_ISOLATION
118 bool is_vmalloc
, had_top_level_entry
;
124 if (!static_cpu_has(X86_FEATURE_PTI
))
128 * Any given ldt_struct should have map_ldt_struct() called at most
131 WARN_ON(ldt
->slot
!= -1);
134 * Did we already have the top level entry allocated? We can't
135 * use pgd_none() for this because it doens't do anything on
136 * 4-level page table kernels.
138 pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
139 had_top_level_entry
= (pgd
->pgd
!= 0);
141 is_vmalloc
= is_vmalloc_addr(ldt
->entries
);
143 for (i
= 0; i
* PAGE_SIZE
< ldt
->nr_entries
* LDT_ENTRY_SIZE
; i
++) {
144 unsigned long offset
= i
<< PAGE_SHIFT
;
145 const void *src
= (char *)ldt
->entries
+ offset
;
149 va
= (unsigned long)ldt_slot_va(slot
) + offset
;
150 pfn
= is_vmalloc
? vmalloc_to_pfn(src
) :
151 page_to_pfn(virt_to_page(src
));
153 * Treat the PTI LDT range as a *userspace* range.
154 * get_locked_pte() will allocate all needed pagetables
155 * and account for them in this mm.
157 ptep
= get_locked_pte(mm
, va
, &ptl
);
161 * Map it RO so the easy to find address is not a primary
162 * target via some kernel interface which misses a
165 pte
= pfn_pte(pfn
, __pgprot(__PAGE_KERNEL_RO
& ~_PAGE_GLOBAL
));
166 set_pte_at(mm
, va
, ptep
, pte
);
167 pte_unmap_unlock(ptep
, ptl
);
170 if (mm
->context
.ldt
) {
172 * We already had an LDT. The top-level entry should already
173 * have been allocated and synchronized with the usermode
176 WARN_ON(!had_top_level_entry
);
177 if (static_cpu_has(X86_FEATURE_PTI
))
178 WARN_ON(!kernel_to_user_pgdp(pgd
)->pgd
);
181 * This is the first time we're mapping an LDT for this process.
182 * Sync the pgd to the usermode tables.
184 WARN_ON(had_top_level_entry
);
185 if (static_cpu_has(X86_FEATURE_PTI
)) {
186 WARN_ON(kernel_to_user_pgdp(pgd
)->pgd
);
187 set_pgd(kernel_to_user_pgdp(pgd
), *pgd
);
191 va
= (unsigned long)ldt_slot_va(slot
);
192 flush_tlb_mm_range(mm
, va
, va
+ LDT_SLOT_STRIDE
, 0);
199 static void free_ldt_pgtables(struct mm_struct
*mm
)
201 #ifdef CONFIG_PAGE_TABLE_ISOLATION
202 struct mmu_gather tlb
;
203 unsigned long start
= LDT_BASE_ADDR
;
204 unsigned long end
= start
+ (1UL << PGDIR_SHIFT
);
206 if (!static_cpu_has(X86_FEATURE_PTI
))
209 tlb_gather_mmu(&tlb
, mm
, start
, end
);
210 free_pgd_range(&tlb
, start
, end
, start
, end
);
211 tlb_finish_mmu(&tlb
, start
, end
);
215 /* After calling this, the LDT is immutable. */
216 static void finalize_ldt_struct(struct ldt_struct
*ldt
)
218 paravirt_alloc_ldt(ldt
->entries
, ldt
->nr_entries
);
221 static void install_ldt(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
223 mutex_lock(&mm
->context
.lock
);
225 /* Synchronizes with READ_ONCE in load_mm_ldt. */
226 smp_store_release(&mm
->context
.ldt
, ldt
);
228 /* Activate the LDT for all CPUs using currents mm. */
229 on_each_cpu_mask(mm_cpumask(mm
), flush_ldt
, mm
, true);
231 mutex_unlock(&mm
->context
.lock
);
234 static void free_ldt_struct(struct ldt_struct
*ldt
)
239 paravirt_free_ldt(ldt
->entries
, ldt
->nr_entries
);
240 if (ldt
->nr_entries
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
241 vfree_atomic(ldt
->entries
);
243 free_page((unsigned long)ldt
->entries
);
248 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
249 * the new task is not running, so nothing can be installed.
251 int ldt_dup_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
253 struct ldt_struct
*new_ldt
;
259 mutex_lock(&old_mm
->context
.lock
);
260 if (!old_mm
->context
.ldt
)
263 new_ldt
= alloc_ldt_struct(old_mm
->context
.ldt
->nr_entries
);
269 memcpy(new_ldt
->entries
, old_mm
->context
.ldt
->entries
,
270 new_ldt
->nr_entries
* LDT_ENTRY_SIZE
);
271 finalize_ldt_struct(new_ldt
);
273 retval
= map_ldt_struct(mm
, new_ldt
, 0);
275 free_ldt_pgtables(mm
);
276 free_ldt_struct(new_ldt
);
279 mm
->context
.ldt
= new_ldt
;
282 mutex_unlock(&old_mm
->context
.lock
);
287 * No need to lock the MM as we are the last user
289 * 64bit: Don't touch the LDT register - we're already in the next thread.
291 void destroy_context_ldt(struct mm_struct
*mm
)
293 free_ldt_struct(mm
->context
.ldt
);
294 mm
->context
.ldt
= NULL
;
297 void ldt_arch_exit_mmap(struct mm_struct
*mm
)
299 free_ldt_pgtables(mm
);
302 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
304 struct mm_struct
*mm
= current
->mm
;
305 unsigned long entries_size
;
308 down_read(&mm
->context
.ldt_usr_sem
);
310 if (!mm
->context
.ldt
) {
315 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
316 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
318 entries_size
= mm
->context
.ldt
->nr_entries
* LDT_ENTRY_SIZE
;
319 if (entries_size
> bytecount
)
320 entries_size
= bytecount
;
322 if (copy_to_user(ptr
, mm
->context
.ldt
->entries
, entries_size
)) {
327 if (entries_size
!= bytecount
) {
328 /* Zero-fill the rest and pretend we read bytecount bytes. */
329 if (clear_user(ptr
+ entries_size
, bytecount
- entries_size
)) {
337 up_read(&mm
->context
.ldt_usr_sem
);
341 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
343 /* CHECKME: Can we use _one_ random number ? */
345 unsigned long size
= 5 * sizeof(struct desc_struct
);
347 unsigned long size
= 128;
349 if (bytecount
> size
)
351 if (clear_user(ptr
, bytecount
))
356 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
358 struct mm_struct
*mm
= current
->mm
;
359 struct ldt_struct
*new_ldt
, *old_ldt
;
360 unsigned int old_nr_entries
, new_nr_entries
;
361 struct user_desc ldt_info
;
362 struct desc_struct ldt
;
366 if (bytecount
!= sizeof(ldt_info
))
369 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
373 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
375 if (ldt_info
.contents
== 3) {
378 if (ldt_info
.seg_not_present
== 0)
382 if ((oldmode
&& !ldt_info
.base_addr
&& !ldt_info
.limit
) ||
383 LDT_empty(&ldt_info
)) {
384 /* The user wants to clear the entry. */
385 memset(&ldt
, 0, sizeof(ldt
));
387 if (!IS_ENABLED(CONFIG_X86_16BIT
) && !ldt_info
.seg_32bit
) {
392 fill_ldt(&ldt
, &ldt_info
);
397 if (down_write_killable(&mm
->context
.ldt_usr_sem
))
400 old_ldt
= mm
->context
.ldt
;
401 old_nr_entries
= old_ldt
? old_ldt
->nr_entries
: 0;
402 new_nr_entries
= max(ldt_info
.entry_number
+ 1, old_nr_entries
);
405 new_ldt
= alloc_ldt_struct(new_nr_entries
);
410 memcpy(new_ldt
->entries
, old_ldt
->entries
, old_nr_entries
* LDT_ENTRY_SIZE
);
412 new_ldt
->entries
[ldt_info
.entry_number
] = ldt
;
413 finalize_ldt_struct(new_ldt
);
416 * If we are using PTI, map the new LDT into the userspace pagetables.
417 * If there is already an LDT, use the other slot so that other CPUs
418 * will continue to use the old LDT until install_ldt() switches
419 * them over to the new LDT.
421 error
= map_ldt_struct(mm
, new_ldt
, old_ldt
? !old_ldt
->slot
: 0);
424 * This only can fail for the first LDT setup. If an LDT is
425 * already installed then the PTE page is already
426 * populated. Mop up a half populated page table.
428 if (!WARN_ON_ONCE(old_ldt
))
429 free_ldt_pgtables(mm
);
430 free_ldt_struct(new_ldt
);
434 install_ldt(mm
, new_ldt
);
435 free_ldt_struct(old_ldt
);
439 up_write(&mm
->context
.ldt_usr_sem
);
444 SYSCALL_DEFINE3(modify_ldt
, int , func
, void __user
* , ptr
,
445 unsigned long , bytecount
)
451 ret
= read_ldt(ptr
, bytecount
);
454 ret
= write_ldt(ptr
, bytecount
, 1);
457 ret
= read_default_ldt(ptr
, bytecount
);
460 ret
= write_ldt(ptr
, bytecount
, 0);
464 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
465 * return type, but tht ABI for sys_modify_ldt() expects
466 * 'int'. This cast gives us an int-sized value in %rax
467 * for the return code. The 'unsigned' is necessary so
468 * the compiler does not try to sign-extend the negative
469 * return codes into the high half of the register when
470 * taking the value from int->long.
472 return (unsigned int)ret
;