]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/ldt.c
2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
6 * This handles calls from both 32bit and 64bit mode.
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
14 #include <linux/smp.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
21 #include <asm/mmu_context.h>
22 #include <asm/syscalls.h>
24 /* context.lock is held for us, so we don't need any locking. */
25 static void flush_ldt(void *__mm
)
27 struct mm_struct
*mm
= __mm
;
30 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) != mm
)
34 set_ldt(pc
->ldt
->entries
, pc
->ldt
->nr_entries
);
37 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
38 static struct ldt_struct
*alloc_ldt_struct(unsigned int num_entries
)
40 struct ldt_struct
*new_ldt
;
41 unsigned int alloc_size
;
43 if (num_entries
> LDT_ENTRIES
)
46 new_ldt
= kmalloc(sizeof(struct ldt_struct
), GFP_KERNEL
);
50 BUILD_BUG_ON(LDT_ENTRY_SIZE
!= sizeof(struct desc_struct
));
51 alloc_size
= num_entries
* LDT_ENTRY_SIZE
;
54 * Xen is very picky: it requires a page-aligned LDT that has no
55 * trailing nonzero bytes in any page that contains LDT descriptors.
56 * Keep it simple: zero the whole allocation and never allocate less
59 if (alloc_size
> PAGE_SIZE
)
60 new_ldt
->entries
= vzalloc(alloc_size
);
62 new_ldt
->entries
= (void *)get_zeroed_page(GFP_KERNEL
);
64 if (!new_ldt
->entries
) {
69 new_ldt
->nr_entries
= num_entries
;
73 /* After calling this, the LDT is immutable. */
74 static void finalize_ldt_struct(struct ldt_struct
*ldt
)
76 paravirt_alloc_ldt(ldt
->entries
, ldt
->nr_entries
);
79 /* context.lock is held */
80 static void install_ldt(struct mm_struct
*current_mm
,
81 struct ldt_struct
*ldt
)
83 /* Synchronizes with lockless_dereference in load_mm_ldt. */
84 smp_store_release(¤t_mm
->context
.ldt
, ldt
);
86 /* Activate the LDT for all CPUs using current_mm. */
87 on_each_cpu_mask(mm_cpumask(current_mm
), flush_ldt
, current_mm
, true);
90 static void free_ldt_struct(struct ldt_struct
*ldt
)
95 paravirt_free_ldt(ldt
->entries
, ldt
->nr_entries
);
96 if (ldt
->nr_entries
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
97 vfree_atomic(ldt
->entries
);
99 free_page((unsigned long)ldt
->entries
);
104 * we do not have to muck with descriptors here, that is
105 * done in switch_mm() as needed.
107 int init_new_context_ldt(struct task_struct
*tsk
, struct mm_struct
*mm
)
109 struct ldt_struct
*new_ldt
;
110 struct mm_struct
*old_mm
;
113 mutex_init(&mm
->context
.lock
);
114 old_mm
= current
->mm
;
116 mm
->context
.ldt
= NULL
;
120 mutex_lock(&old_mm
->context
.lock
);
121 if (!old_mm
->context
.ldt
) {
122 mm
->context
.ldt
= NULL
;
126 new_ldt
= alloc_ldt_struct(old_mm
->context
.ldt
->nr_entries
);
132 memcpy(new_ldt
->entries
, old_mm
->context
.ldt
->entries
,
133 new_ldt
->nr_entries
* LDT_ENTRY_SIZE
);
134 finalize_ldt_struct(new_ldt
);
136 mm
->context
.ldt
= new_ldt
;
139 mutex_unlock(&old_mm
->context
.lock
);
144 * No need to lock the MM as we are the last user
146 * 64bit: Don't touch the LDT register - we're already in the next thread.
148 void destroy_context_ldt(struct mm_struct
*mm
)
150 free_ldt_struct(mm
->context
.ldt
);
151 mm
->context
.ldt
= NULL
;
154 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
156 struct mm_struct
*mm
= current
->mm
;
157 unsigned long entries_size
;
160 mutex_lock(&mm
->context
.lock
);
162 if (!mm
->context
.ldt
) {
167 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
168 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
170 entries_size
= mm
->context
.ldt
->nr_entries
* LDT_ENTRY_SIZE
;
171 if (entries_size
> bytecount
)
172 entries_size
= bytecount
;
174 if (copy_to_user(ptr
, mm
->context
.ldt
->entries
, entries_size
)) {
179 if (entries_size
!= bytecount
) {
180 /* Zero-fill the rest and pretend we read bytecount bytes. */
181 if (clear_user(ptr
+ entries_size
, bytecount
- entries_size
)) {
189 mutex_unlock(&mm
->context
.lock
);
193 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
195 /* CHECKME: Can we use _one_ random number ? */
197 unsigned long size
= 5 * sizeof(struct desc_struct
);
199 unsigned long size
= 128;
201 if (bytecount
> size
)
203 if (clear_user(ptr
, bytecount
))
208 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
210 struct mm_struct
*mm
= current
->mm
;
211 struct ldt_struct
*new_ldt
, *old_ldt
;
212 unsigned int old_nr_entries
, new_nr_entries
;
213 struct user_desc ldt_info
;
214 struct desc_struct ldt
;
218 if (bytecount
!= sizeof(ldt_info
))
221 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
225 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
227 if (ldt_info
.contents
== 3) {
230 if (ldt_info
.seg_not_present
== 0)
234 if ((oldmode
&& !ldt_info
.base_addr
&& !ldt_info
.limit
) ||
235 LDT_empty(&ldt_info
)) {
236 /* The user wants to clear the entry. */
237 memset(&ldt
, 0, sizeof(ldt
));
239 if (!IS_ENABLED(CONFIG_X86_16BIT
) && !ldt_info
.seg_32bit
) {
244 fill_ldt(&ldt
, &ldt_info
);
249 mutex_lock(&mm
->context
.lock
);
251 old_ldt
= mm
->context
.ldt
;
252 old_nr_entries
= old_ldt
? old_ldt
->nr_entries
: 0;
253 new_nr_entries
= max(ldt_info
.entry_number
+ 1, old_nr_entries
);
256 new_ldt
= alloc_ldt_struct(new_nr_entries
);
261 memcpy(new_ldt
->entries
, old_ldt
->entries
, old_nr_entries
* LDT_ENTRY_SIZE
);
263 new_ldt
->entries
[ldt_info
.entry_number
] = ldt
;
264 finalize_ldt_struct(new_ldt
);
266 install_ldt(mm
, new_ldt
);
267 free_ldt_struct(old_ldt
);
271 mutex_unlock(&mm
->context
.lock
);
276 asmlinkage
int sys_modify_ldt(int func
, void __user
*ptr
,
277 unsigned long bytecount
)
283 ret
= read_ldt(ptr
, bytecount
);
286 ret
= write_ldt(ptr
, bytecount
, 1);
289 ret
= read_default_ldt(ptr
, bytecount
);
292 ret
= write_ldt(ptr
, bytecount
, 0);