]>
Commit | Line | Data |
---|---|---|
57319d80 QR |
1 | /* |
2 | * mpx.c - Memory Protection eXtensions | |
3 | * | |
4 | * Copyright (c) 2014, Intel Corporation. | |
5 | * Qiaowei Ren <qiaowei.ren@intel.com> | |
6 | * Dave Hansen <dave.hansen@intel.com> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
fcc7ffd6 | 9 | #include <linux/slab.h> |
57319d80 QR |
10 | #include <linux/syscalls.h> |
11 | #include <linux/sched/sysctl.h> | |
12 | ||
fe3d197f | 13 | #include <asm/insn.h> |
57319d80 | 14 | #include <asm/mman.h> |
1de4fa14 | 15 | #include <asm/mmu_context.h> |
57319d80 | 16 | #include <asm/mpx.h> |
fe3d197f | 17 | #include <asm/processor.h> |
78f7f1e5 | 18 | #include <asm/fpu/internal.h> |
57319d80 | 19 | |
e7126cf5 DH |
20 | #define CREATE_TRACE_POINTS |
21 | #include <asm/trace/mpx.h> | |
22 | ||
57319d80 QR |
23 | static const char *mpx_mapping_name(struct vm_area_struct *vma) |
24 | { | |
25 | return "[mpx]"; | |
26 | } | |
27 | ||
28 | static struct vm_operations_struct mpx_vma_ops = { | |
29 | .name = mpx_mapping_name, | |
30 | }; | |
31 | ||
1de4fa14 DH |
32 | static int is_mpx_vma(struct vm_area_struct *vma) |
33 | { | |
34 | return (vma->vm_ops == &mpx_vma_ops); | |
35 | } | |
36 | ||
57319d80 QR |
37 | /* |
38 | * This is really a simplified "vm_mmap". it only handles MPX | |
39 | * bounds tables (the bounds directory is user-allocated). | |
40 | * | |
41 | * Later on, we use the vma->vm_ops to uniquely identify these | |
42 | * VMAs. | |
43 | */ | |
44 | static unsigned long mpx_mmap(unsigned long len) | |
45 | { | |
46 | unsigned long ret; | |
47 | unsigned long addr, pgoff; | |
48 | struct mm_struct *mm = current->mm; | |
49 | vm_flags_t vm_flags; | |
50 | struct vm_area_struct *vma; | |
51 | ||
eb099e5b DH |
52 | /* Only bounds table can be allocated here */ |
53 | if (len != MPX_BT_SIZE_BYTES) | |
57319d80 QR |
54 | return -EINVAL; |
55 | ||
56 | down_write(&mm->mmap_sem); | |
57 | ||
58 | /* Too many mappings? */ | |
59 | if (mm->map_count > sysctl_max_map_count) { | |
60 | ret = -ENOMEM; | |
61 | goto out; | |
62 | } | |
63 | ||
64 | /* Obtain the address to map to. we verify (or select) it and ensure | |
65 | * that it represents a valid section of the address space. | |
66 | */ | |
67 | addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE); | |
68 | if (addr & ~PAGE_MASK) { | |
69 | ret = addr; | |
70 | goto out; | |
71 | } | |
72 | ||
73 | vm_flags = VM_READ | VM_WRITE | VM_MPX | | |
74 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | |
75 | ||
76 | /* Set pgoff according to addr for anon_vma */ | |
77 | pgoff = addr >> PAGE_SHIFT; | |
78 | ||
79 | ret = mmap_region(NULL, addr, len, vm_flags, pgoff); | |
80 | if (IS_ERR_VALUE(ret)) | |
81 | goto out; | |
82 | ||
83 | vma = find_vma(mm, ret); | |
84 | if (!vma) { | |
85 | ret = -ENOMEM; | |
86 | goto out; | |
87 | } | |
88 | vma->vm_ops = &mpx_vma_ops; | |
89 | ||
90 | if (vm_flags & VM_LOCKED) { | |
91 | up_write(&mm->mmap_sem); | |
92 | mm_populate(ret, len); | |
93 | return ret; | |
94 | } | |
95 | ||
96 | out: | |
97 | up_write(&mm->mmap_sem); | |
98 | return ret; | |
99 | } | |
fcc7ffd6 DH |
100 | |
101 | enum reg_type { | |
102 | REG_TYPE_RM = 0, | |
103 | REG_TYPE_INDEX, | |
104 | REG_TYPE_BASE, | |
105 | }; | |
106 | ||
68c009c4 DH |
107 | static int get_reg_offset(struct insn *insn, struct pt_regs *regs, |
108 | enum reg_type type) | |
fcc7ffd6 DH |
109 | { |
110 | int regno = 0; | |
111 | ||
112 | static const int regoff[] = { | |
113 | offsetof(struct pt_regs, ax), | |
114 | offsetof(struct pt_regs, cx), | |
115 | offsetof(struct pt_regs, dx), | |
116 | offsetof(struct pt_regs, bx), | |
117 | offsetof(struct pt_regs, sp), | |
118 | offsetof(struct pt_regs, bp), | |
119 | offsetof(struct pt_regs, si), | |
120 | offsetof(struct pt_regs, di), | |
121 | #ifdef CONFIG_X86_64 | |
122 | offsetof(struct pt_regs, r8), | |
123 | offsetof(struct pt_regs, r9), | |
124 | offsetof(struct pt_regs, r10), | |
125 | offsetof(struct pt_regs, r11), | |
126 | offsetof(struct pt_regs, r12), | |
127 | offsetof(struct pt_regs, r13), | |
128 | offsetof(struct pt_regs, r14), | |
129 | offsetof(struct pt_regs, r15), | |
130 | #endif | |
131 | }; | |
132 | int nr_registers = ARRAY_SIZE(regoff); | |
133 | /* | |
134 | * Don't possibly decode a 32-bit instructions as | |
135 | * reading a 64-bit-only register. | |
136 | */ | |
137 | if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) | |
138 | nr_registers -= 8; | |
139 | ||
140 | switch (type) { | |
141 | case REG_TYPE_RM: | |
142 | regno = X86_MODRM_RM(insn->modrm.value); | |
143 | if (X86_REX_B(insn->rex_prefix.value) == 1) | |
144 | regno += 8; | |
145 | break; | |
146 | ||
147 | case REG_TYPE_INDEX: | |
148 | regno = X86_SIB_INDEX(insn->sib.value); | |
149 | if (X86_REX_X(insn->rex_prefix.value) == 1) | |
150 | regno += 8; | |
151 | break; | |
152 | ||
153 | case REG_TYPE_BASE: | |
154 | regno = X86_SIB_BASE(insn->sib.value); | |
155 | if (X86_REX_B(insn->rex_prefix.value) == 1) | |
156 | regno += 8; | |
157 | break; | |
158 | ||
159 | default: | |
160 | pr_err("invalid register type"); | |
161 | BUG(); | |
162 | break; | |
163 | } | |
164 | ||
165 | if (regno > nr_registers) { | |
166 | WARN_ONCE(1, "decoded an instruction with an invalid register"); | |
167 | return -EINVAL; | |
168 | } | |
169 | return regoff[regno]; | |
170 | } | |
171 | ||
172 | /* | |
173 | * return the address being referenced be instruction | |
174 | * for rm=3 returning the content of the rm reg | |
175 | * for rm!=3 calculates the address using SIB and Disp | |
176 | */ | |
177 | static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) | |
178 | { | |
68c009c4 DH |
179 | unsigned long addr, base, indx; |
180 | int addr_offset, base_offset, indx_offset; | |
fcc7ffd6 DH |
181 | insn_byte_t sib; |
182 | ||
183 | insn_get_modrm(insn); | |
184 | insn_get_sib(insn); | |
185 | sib = insn->sib.value; | |
186 | ||
187 | if (X86_MODRM_MOD(insn->modrm.value) == 3) { | |
188 | addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); | |
189 | if (addr_offset < 0) | |
190 | goto out_err; | |
191 | addr = regs_get_register(regs, addr_offset); | |
192 | } else { | |
193 | if (insn->sib.nbytes) { | |
194 | base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); | |
195 | if (base_offset < 0) | |
196 | goto out_err; | |
197 | ||
198 | indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); | |
199 | if (indx_offset < 0) | |
200 | goto out_err; | |
201 | ||
202 | base = regs_get_register(regs, base_offset); | |
203 | indx = regs_get_register(regs, indx_offset); | |
204 | addr = base + indx * (1 << X86_SIB_SCALE(sib)); | |
205 | } else { | |
206 | addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); | |
207 | if (addr_offset < 0) | |
208 | goto out_err; | |
209 | addr = regs_get_register(regs, addr_offset); | |
210 | } | |
211 | addr += insn->displacement.value; | |
212 | } | |
213 | return (void __user *)addr; | |
214 | out_err: | |
215 | return (void __user *)-1; | |
216 | } | |
217 | ||
218 | static int mpx_insn_decode(struct insn *insn, | |
219 | struct pt_regs *regs) | |
220 | { | |
221 | unsigned char buf[MAX_INSN_SIZE]; | |
222 | int x86_64 = !test_thread_flag(TIF_IA32); | |
223 | int not_copied; | |
224 | int nr_copied; | |
225 | ||
226 | not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf)); | |
227 | nr_copied = sizeof(buf) - not_copied; | |
228 | /* | |
229 | * The decoder _should_ fail nicely if we pass it a short buffer. | |
230 | * But, let's not depend on that implementation detail. If we | |
231 | * did not get anything, just error out now. | |
232 | */ | |
233 | if (!nr_copied) | |
234 | return -EFAULT; | |
235 | insn_init(insn, buf, nr_copied, x86_64); | |
236 | insn_get_length(insn); | |
237 | /* | |
238 | * copy_from_user() tries to get as many bytes as we could see in | |
239 | * the largest possible instruction. If the instruction we are | |
240 | * after is shorter than that _and_ we attempt to copy from | |
241 | * something unreadable, we might get a short read. This is OK | |
242 | * as long as the read did not stop in the middle of the | |
243 | * instruction. Check to see if we got a partial instruction. | |
244 | */ | |
245 | if (nr_copied < insn->length) | |
246 | return -EFAULT; | |
247 | ||
248 | insn_get_opcode(insn); | |
249 | /* | |
250 | * We only _really_ need to decode bndcl/bndcn/bndcu | |
251 | * Error out on anything else. | |
252 | */ | |
253 | if (insn->opcode.bytes[0] != 0x0f) | |
254 | goto bad_opcode; | |
255 | if ((insn->opcode.bytes[1] != 0x1a) && | |
256 | (insn->opcode.bytes[1] != 0x1b)) | |
257 | goto bad_opcode; | |
258 | ||
259 | return 0; | |
260 | bad_opcode: | |
261 | return -EINVAL; | |
262 | } | |
263 | ||
264 | /* | |
265 | * If a bounds overflow occurs then a #BR is generated. This | |
266 | * function decodes MPX instructions to get violation address | |
267 | * and set this address into extended struct siginfo. | |
268 | * | |
269 | * Note that this is not a super precise way of doing this. | |
270 | * Userspace could have, by the time we get here, written | |
271 | * anything it wants in to the instructions. We can not | |
272 | * trust anything about it. They might not be valid | |
273 | * instructions or might encode invalid registers, etc... | |
274 | * | |
275 | * The caller is expected to kfree() the returned siginfo_t. | |
276 | */ | |
46a6e0cf | 277 | siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) |
fcc7ffd6 | 278 | { |
a84eeaa9 | 279 | const struct bndreg *bndregs, *bndreg; |
fe3d197f | 280 | siginfo_t *info = NULL; |
fcc7ffd6 DH |
281 | struct insn insn; |
282 | uint8_t bndregno; | |
283 | int err; | |
fcc7ffd6 DH |
284 | |
285 | err = mpx_insn_decode(&insn, regs); | |
286 | if (err) | |
287 | goto err_out; | |
288 | ||
289 | /* | |
290 | * We know at this point that we are only dealing with | |
291 | * MPX instructions. | |
292 | */ | |
293 | insn_get_modrm(&insn); | |
294 | bndregno = X86_MODRM_REG(insn.modrm.value); | |
295 | if (bndregno > 3) { | |
296 | err = -EINVAL; | |
297 | goto err_out; | |
298 | } | |
a84eeaa9 DH |
299 | /* get bndregs field from current task's xsave area */ |
300 | bndregs = get_xsave_field_ptr(XSTATE_BNDREGS); | |
fe3d197f DH |
301 | if (!bndregs) { |
302 | err = -EINVAL; | |
303 | goto err_out; | |
304 | } | |
305 | /* now go select the individual register in the set of 4 */ | |
306 | bndreg = &bndregs[bndregno]; | |
307 | ||
fcc7ffd6 DH |
308 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
309 | if (!info) { | |
310 | err = -ENOMEM; | |
311 | goto err_out; | |
312 | } | |
313 | /* | |
314 | * The registers are always 64-bit, but the upper 32 | |
315 | * bits are ignored in 32-bit mode. Also, note that the | |
316 | * upper bounds are architecturally represented in 1's | |
317 | * complement form. | |
318 | * | |
319 | * The 'unsigned long' cast is because the compiler | |
320 | * complains when casting from integers to different-size | |
321 | * pointers. | |
322 | */ | |
fe3d197f DH |
323 | info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound; |
324 | info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound; | |
fcc7ffd6 DH |
325 | info->si_addr_lsb = 0; |
326 | info->si_signo = SIGSEGV; | |
327 | info->si_errno = 0; | |
328 | info->si_code = SEGV_BNDERR; | |
329 | info->si_addr = mpx_get_addr_ref(&insn, regs); | |
330 | /* | |
331 | * We were not able to extract an address from the instruction, | |
332 | * probably because there was something invalid in it. | |
333 | */ | |
334 | if (info->si_addr == (void *)-1) { | |
335 | err = -EINVAL; | |
336 | goto err_out; | |
337 | } | |
97efebf1 | 338 | trace_mpx_bounds_register_exception(info->si_addr, bndreg); |
fcc7ffd6 DH |
339 | return info; |
340 | err_out: | |
fe3d197f DH |
341 | /* info might be NULL, but kfree() handles that */ |
342 | kfree(info); | |
fcc7ffd6 DH |
343 | return ERR_PTR(err); |
344 | } | |
fe3d197f | 345 | |
46a6e0cf | 346 | static __user void *mpx_get_bounds_dir(void) |
fe3d197f | 347 | { |
a84eeaa9 | 348 | const struct bndcsr *bndcsr; |
fe3d197f DH |
349 | |
350 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) | |
351 | return MPX_INVALID_BOUNDS_DIR; | |
352 | ||
814564a0 DH |
353 | /* |
354 | * 32-bit binaries on 64-bit kernels are currently | |
355 | * unsupported. | |
356 | */ | |
357 | if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32)) | |
358 | return MPX_INVALID_BOUNDS_DIR; | |
fe3d197f DH |
359 | /* |
360 | * The bounds directory pointer is stored in a register | |
361 | * only accessible if we first do an xsave. | |
362 | */ | |
a84eeaa9 | 363 | bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); |
fe3d197f DH |
364 | if (!bndcsr) |
365 | return MPX_INVALID_BOUNDS_DIR; | |
366 | ||
367 | /* | |
368 | * Make sure the register looks valid by checking the | |
369 | * enable bit. | |
370 | */ | |
371 | if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG)) | |
372 | return MPX_INVALID_BOUNDS_DIR; | |
373 | ||
374 | /* | |
375 | * Lastly, mask off the low bits used for configuration | |
376 | * flags, and return the address of the bounds table. | |
377 | */ | |
378 | return (void __user *)(unsigned long) | |
379 | (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK); | |
380 | } | |
381 | ||
46a6e0cf | 382 | int mpx_enable_management(void) |
fe3d197f DH |
383 | { |
384 | void __user *bd_base = MPX_INVALID_BOUNDS_DIR; | |
46a6e0cf | 385 | struct mm_struct *mm = current->mm; |
fe3d197f DH |
386 | int ret = 0; |
387 | ||
388 | /* | |
389 | * runtime in the userspace will be responsible for allocation of | |
390 | * the bounds directory. Then, it will save the base of the bounds | |
391 | * directory into XSAVE/XRSTOR Save Area and enable MPX through | |
392 | * XRSTOR instruction. | |
393 | * | |
a84eeaa9 DH |
394 | * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is |
395 | * expected to be relatively expensive. Storing the bounds | |
396 | * directory here means that we do not have to do xsave in the | |
397 | * unmap path; we can just use mm->bd_addr instead. | |
fe3d197f | 398 | */ |
46a6e0cf | 399 | bd_base = mpx_get_bounds_dir(); |
fe3d197f DH |
400 | down_write(&mm->mmap_sem); |
401 | mm->bd_addr = bd_base; | |
402 | if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR) | |
403 | ret = -ENXIO; | |
404 | ||
405 | up_write(&mm->mmap_sem); | |
406 | return ret; | |
407 | } | |
408 | ||
46a6e0cf | 409 | int mpx_disable_management(void) |
fe3d197f DH |
410 | { |
411 | struct mm_struct *mm = current->mm; | |
412 | ||
413 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) | |
414 | return -ENXIO; | |
415 | ||
416 | down_write(&mm->mmap_sem); | |
417 | mm->bd_addr = MPX_INVALID_BOUNDS_DIR; | |
418 | up_write(&mm->mmap_sem); | |
419 | return 0; | |
420 | } | |
421 | ||
6ac52bb4 DH |
422 | static int mpx_cmpxchg_bd_entry(struct mm_struct *mm, |
423 | unsigned long *curval, | |
424 | unsigned long __user *addr, | |
425 | unsigned long old_val, unsigned long new_val) | |
426 | { | |
427 | int ret; | |
428 | /* | |
429 | * user_atomic_cmpxchg_inatomic() actually uses sizeof() | |
430 | * the pointer that we pass to it to figure out how much | |
431 | * data to cmpxchg. We have to be careful here not to | |
432 | * pass a pointer to a 64-bit data type when we only want | |
433 | * a 32-bit copy. | |
434 | */ | |
435 | if (is_64bit_mm(mm)) { | |
436 | ret = user_atomic_cmpxchg_inatomic(curval, | |
437 | addr, old_val, new_val); | |
438 | } else { | |
439 | u32 uninitialized_var(curval_32); | |
440 | u32 old_val_32 = old_val; | |
441 | u32 new_val_32 = new_val; | |
442 | u32 __user *addr_32 = (u32 __user *)addr; | |
443 | ||
444 | ret = user_atomic_cmpxchg_inatomic(&curval_32, | |
445 | addr_32, old_val_32, new_val_32); | |
446 | *curval = curval_32; | |
447 | } | |
448 | return ret; | |
449 | } | |
450 | ||
fe3d197f DH |
451 | /* |
452 | * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each | |
453 | * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB, | |
454 | * and the size of each bounds table is 4MB. | |
455 | */ | |
456 | static int allocate_bt(long __user *bd_entry) | |
457 | { | |
6ac52bb4 | 458 | struct mm_struct *mm = current->mm; |
fe3d197f DH |
459 | unsigned long expected_old_val = 0; |
460 | unsigned long actual_old_val = 0; | |
461 | unsigned long bt_addr; | |
a1149fc8 | 462 | unsigned long bd_new_entry; |
fe3d197f DH |
463 | int ret = 0; |
464 | ||
465 | /* | |
466 | * Carve the virtual space out of userspace for the new | |
467 | * bounds table: | |
468 | */ | |
469 | bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES); | |
470 | if (IS_ERR((void *)bt_addr)) | |
471 | return PTR_ERR((void *)bt_addr); | |
472 | /* | |
473 | * Set the valid flag (kinda like _PAGE_PRESENT in a pte) | |
474 | */ | |
a1149fc8 | 475 | bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG; |
fe3d197f DH |
476 | |
477 | /* | |
478 | * Go poke the address of the new bounds table in to the | |
479 | * bounds directory entry out in userspace memory. Note: | |
480 | * we may race with another CPU instantiating the same table. | |
481 | * In that case the cmpxchg will see an unexpected | |
482 | * 'actual_old_val'. | |
483 | * | |
484 | * This can fault, but that's OK because we do not hold | |
485 | * mmap_sem at this point, unlike some of the other part | |
486 | * of the MPX code that have to pagefault_disable(). | |
487 | */ | |
6ac52bb4 DH |
488 | ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry, |
489 | expected_old_val, bd_new_entry); | |
fe3d197f DH |
490 | if (ret) |
491 | goto out_unmap; | |
492 | ||
493 | /* | |
494 | * The user_atomic_cmpxchg_inatomic() will only return nonzero | |
495 | * for faults, *not* if the cmpxchg itself fails. Now we must | |
496 | * verify that the cmpxchg itself completed successfully. | |
497 | */ | |
498 | /* | |
499 | * We expected an empty 'expected_old_val', but instead found | |
500 | * an apparently valid entry. Assume we raced with another | |
501 | * thread to instantiate this table and desclare succecss. | |
502 | */ | |
503 | if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { | |
504 | ret = 0; | |
505 | goto out_unmap; | |
506 | } | |
507 | /* | |
508 | * We found a non-empty bd_entry but it did not have the | |
509 | * VALID_FLAG set. Return an error which will result in | |
510 | * a SEGV since this probably means that somebody scribbled | |
511 | * some invalid data in to a bounds table. | |
512 | */ | |
513 | if (expected_old_val != actual_old_val) { | |
514 | ret = -EINVAL; | |
515 | goto out_unmap; | |
516 | } | |
cd4996dc | 517 | trace_mpx_new_bounds_table(bt_addr); |
fe3d197f DH |
518 | return 0; |
519 | out_unmap: | |
a1149fc8 | 520 | vm_munmap(bt_addr, MPX_BT_SIZE_BYTES); |
fe3d197f DH |
521 | return ret; |
522 | } | |
523 | ||
524 | /* | |
525 | * When a BNDSTX instruction attempts to save bounds to a bounds | |
526 | * table, it will first attempt to look up the table in the | |
527 | * first-level bounds directory. If it does not find a table in | |
528 | * the directory, a #BR is generated and we get here in order to | |
529 | * allocate a new table. | |
530 | * | |
531 | * With 32-bit mode, the size of BD is 4MB, and the size of each | |
532 | * bound table is 16KB. With 64-bit mode, the size of BD is 2GB, | |
533 | * and the size of each bound table is 4MB. | |
534 | */ | |
46a6e0cf | 535 | static int do_mpx_bt_fault(void) |
fe3d197f DH |
536 | { |
537 | unsigned long bd_entry, bd_base; | |
a84eeaa9 | 538 | const struct bndcsr *bndcsr; |
fe3d197f | 539 | |
a84eeaa9 | 540 | bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); |
fe3d197f DH |
541 | if (!bndcsr) |
542 | return -EINVAL; | |
543 | /* | |
544 | * Mask off the preserve and enable bits | |
545 | */ | |
546 | bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK; | |
547 | /* | |
548 | * The hardware provides the address of the missing or invalid | |
549 | * entry via BNDSTATUS, so we don't have to go look it up. | |
550 | */ | |
551 | bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK; | |
552 | /* | |
553 | * Make sure the directory entry is within where we think | |
554 | * the directory is. | |
555 | */ | |
556 | if ((bd_entry < bd_base) || | |
557 | (bd_entry >= bd_base + MPX_BD_SIZE_BYTES)) | |
558 | return -EINVAL; | |
559 | ||
560 | return allocate_bt((long __user *)bd_entry); | |
561 | } | |
562 | ||
46a6e0cf | 563 | int mpx_handle_bd_fault(void) |
fe3d197f DH |
564 | { |
565 | /* | |
566 | * Userspace never asked us to manage the bounds tables, | |
567 | * so refuse to help. | |
568 | */ | |
569 | if (!kernel_managing_mpx_tables(current->mm)) | |
570 | return -EINVAL; | |
571 | ||
46a6e0cf | 572 | if (do_mpx_bt_fault()) { |
fe3d197f DH |
573 | force_sig(SIGSEGV, current); |
574 | /* | |
575 | * The force_sig() is essentially "handling" this | |
576 | * exception, so we do not pass up the error | |
577 | * from do_mpx_bt_fault(). | |
578 | */ | |
579 | } | |
580 | return 0; | |
581 | } | |
1de4fa14 DH |
582 | |
583 | /* | |
584 | * A thin wrapper around get_user_pages(). Returns 0 if the | |
585 | * fault was resolved or -errno if not. | |
586 | */ | |
587 | static int mpx_resolve_fault(long __user *addr, int write) | |
588 | { | |
589 | long gup_ret; | |
590 | int nr_pages = 1; | |
591 | int force = 0; | |
592 | ||
593 | gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, | |
594 | nr_pages, write, force, NULL, NULL); | |
595 | /* | |
596 | * get_user_pages() returns number of pages gotten. | |
597 | * 0 means we failed to fault in and get anything, | |
598 | * probably because 'addr' is bad. | |
599 | */ | |
600 | if (!gup_ret) | |
601 | return -EFAULT; | |
602 | /* Other error, return it */ | |
603 | if (gup_ret < 0) | |
604 | return gup_ret; | |
605 | /* must have gup'd a page and gup_ret>0, success */ | |
606 | return 0; | |
607 | } | |
608 | ||
54587653 DH |
609 | static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, |
610 | unsigned long bd_entry) | |
611 | { | |
612 | unsigned long bt_addr = bd_entry; | |
613 | int align_to_bytes; | |
614 | /* | |
615 | * Bit 0 in a bt_entry is always the valid bit. | |
616 | */ | |
617 | bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG; | |
618 | /* | |
619 | * Tables are naturally aligned at 8-byte boundaries | |
620 | * on 64-bit and 4-byte boundaries on 32-bit. The | |
621 | * documentation makes it appear that the low bits | |
622 | * are ignored by the hardware, so we do the same. | |
623 | */ | |
624 | if (is_64bit_mm(mm)) | |
625 | align_to_bytes = 8; | |
626 | else | |
627 | align_to_bytes = 4; | |
628 | bt_addr &= ~(align_to_bytes-1); | |
629 | return bt_addr; | |
630 | } | |
631 | ||
1de4fa14 DH |
632 | /* |
633 | * Get the base of bounds tables pointed by specific bounds | |
634 | * directory entry. | |
635 | */ | |
636 | static int get_bt_addr(struct mm_struct *mm, | |
54587653 DH |
637 | long __user *bd_entry_ptr, |
638 | unsigned long *bt_addr_result) | |
1de4fa14 DH |
639 | { |
640 | int ret; | |
641 | int valid_bit; | |
54587653 DH |
642 | unsigned long bd_entry; |
643 | unsigned long bt_addr; | |
1de4fa14 | 644 | |
54587653 | 645 | if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr))) |
1de4fa14 DH |
646 | return -EFAULT; |
647 | ||
648 | while (1) { | |
649 | int need_write = 0; | |
650 | ||
651 | pagefault_disable(); | |
54587653 | 652 | ret = get_user(bd_entry, bd_entry_ptr); |
1de4fa14 DH |
653 | pagefault_enable(); |
654 | if (!ret) | |
655 | break; | |
656 | if (ret == -EFAULT) | |
54587653 | 657 | ret = mpx_resolve_fault(bd_entry_ptr, need_write); |
1de4fa14 DH |
658 | /* |
659 | * If we could not resolve the fault, consider it | |
660 | * userspace's fault and error out. | |
661 | */ | |
662 | if (ret) | |
663 | return ret; | |
664 | } | |
665 | ||
54587653 DH |
666 | valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG; |
667 | bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry); | |
1de4fa14 DH |
668 | |
669 | /* | |
670 | * When the kernel is managing bounds tables, a bounds directory | |
671 | * entry will either have a valid address (plus the valid bit) | |
672 | * *OR* be completely empty. If we see a !valid entry *and* some | |
673 | * data in the address field, we know something is wrong. This | |
674 | * -EINVAL return will cause a SIGSEGV. | |
675 | */ | |
54587653 | 676 | if (!valid_bit && bt_addr) |
1de4fa14 DH |
677 | return -EINVAL; |
678 | /* | |
679 | * Do we have an completely zeroed bt entry? That is OK. It | |
680 | * just means there was no bounds table for this memory. Make | |
681 | * sure to distinguish this from -EINVAL, which will cause | |
682 | * a SEGV. | |
683 | */ | |
684 | if (!valid_bit) | |
685 | return -ENOENT; | |
686 | ||
54587653 | 687 | *bt_addr_result = bt_addr; |
1de4fa14 DH |
688 | return 0; |
689 | } | |
690 | ||
691 | /* | |
692 | * Free the backing physical pages of bounds table 'bt_addr'. | |
693 | * Assume start...end is within that bounds table. | |
694 | */ | |
695 | static int zap_bt_entries(struct mm_struct *mm, | |
696 | unsigned long bt_addr, | |
697 | unsigned long start, unsigned long end) | |
698 | { | |
699 | struct vm_area_struct *vma; | |
700 | unsigned long addr, len; | |
701 | ||
702 | /* | |
703 | * Find the first overlapping vma. If vma->vm_start > start, there | |
704 | * will be a hole in the bounds table. This -EINVAL return will | |
705 | * cause a SIGSEGV. | |
706 | */ | |
707 | vma = find_vma(mm, start); | |
708 | if (!vma || vma->vm_start > start) | |
709 | return -EINVAL; | |
710 | ||
711 | /* | |
712 | * A NUMA policy on a VM_MPX VMA could cause this bouds table to | |
713 | * be split. So we need to look across the entire 'start -> end' | |
714 | * range of this bounds table, find all of the VM_MPX VMAs, and | |
715 | * zap only those. | |
716 | */ | |
717 | addr = start; | |
718 | while (vma && vma->vm_start < end) { | |
719 | /* | |
720 | * We followed a bounds directory entry down | |
721 | * here. If we find a non-MPX VMA, that's bad, | |
722 | * so stop immediately and return an error. This | |
723 | * probably results in a SIGSEGV. | |
724 | */ | |
725 | if (!is_mpx_vma(vma)) | |
726 | return -EINVAL; | |
727 | ||
728 | len = min(vma->vm_end, end) - addr; | |
729 | zap_page_range(vma, addr, len, NULL); | |
2a1dcb1f | 730 | trace_mpx_unmap_zap(addr, addr+len); |
1de4fa14 DH |
731 | |
732 | vma = vma->vm_next; | |
733 | addr = vma->vm_start; | |
734 | } | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
739 | static int unmap_single_bt(struct mm_struct *mm, | |
740 | long __user *bd_entry, unsigned long bt_addr) | |
741 | { | |
742 | unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; | |
6ac52bb4 | 743 | unsigned long uninitialized_var(actual_old_val); |
1de4fa14 DH |
744 | int ret; |
745 | ||
746 | while (1) { | |
747 | int need_write = 1; | |
6ac52bb4 | 748 | unsigned long cleared_bd_entry = 0; |
1de4fa14 DH |
749 | |
750 | pagefault_disable(); | |
6ac52bb4 DH |
751 | ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, |
752 | bd_entry, expected_old_val, cleared_bd_entry); | |
1de4fa14 DH |
753 | pagefault_enable(); |
754 | if (!ret) | |
755 | break; | |
756 | if (ret == -EFAULT) | |
757 | ret = mpx_resolve_fault(bd_entry, need_write); | |
758 | /* | |
759 | * If we could not resolve the fault, consider it | |
760 | * userspace's fault and error out. | |
761 | */ | |
762 | if (ret) | |
763 | return ret; | |
764 | } | |
765 | /* | |
766 | * The cmpxchg was performed, check the results. | |
767 | */ | |
768 | if (actual_old_val != expected_old_val) { | |
769 | /* | |
770 | * Someone else raced with us to unmap the table. | |
771 | * There was no bounds table pointed to by the | |
772 | * directory, so declare success. Somebody freed | |
773 | * it. | |
774 | */ | |
775 | if (!actual_old_val) | |
776 | return 0; | |
777 | /* | |
778 | * Something messed with the bounds directory | |
779 | * entry. We hold mmap_sem for read or write | |
780 | * here, so it could not be a _new_ bounds table | |
781 | * that someone just allocated. Something is | |
782 | * wrong, so pass up the error and SIGSEGV. | |
783 | */ | |
784 | return -EINVAL; | |
785 | } | |
786 | ||
787 | /* | |
788 | * Note, we are likely being called under do_munmap() already. To | |
789 | * avoid recursion, do_munmap() will check whether it comes | |
790 | * from one bounds table through VM_MPX flag. | |
791 | */ | |
792 | return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES); | |
793 | } | |
794 | ||
795 | /* | |
796 | * If the bounds table pointed by bounds directory 'bd_entry' is | |
797 | * not shared, unmap this whole bounds table. Otherwise, only free | |
798 | * those backing physical pages of bounds table entries covered | |
799 | * in this virtual address region start...end. | |
800 | */ | |
801 | static int unmap_shared_bt(struct mm_struct *mm, | |
802 | long __user *bd_entry, unsigned long start, | |
803 | unsigned long end, bool prev_shared, bool next_shared) | |
804 | { | |
805 | unsigned long bt_addr; | |
806 | int ret; | |
807 | ||
808 | ret = get_bt_addr(mm, bd_entry, &bt_addr); | |
809 | /* | |
810 | * We could see an "error" ret for not-present bounds | |
811 | * tables (not really an error), or actual errors, but | |
812 | * stop unmapping either way. | |
813 | */ | |
814 | if (ret) | |
815 | return ret; | |
816 | ||
817 | if (prev_shared && next_shared) | |
818 | ret = zap_bt_entries(mm, bt_addr, | |
819 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), | |
820 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); | |
821 | else if (prev_shared) | |
822 | ret = zap_bt_entries(mm, bt_addr, | |
823 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), | |
824 | bt_addr+MPX_BT_SIZE_BYTES); | |
825 | else if (next_shared) | |
826 | ret = zap_bt_entries(mm, bt_addr, bt_addr, | |
827 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); | |
828 | else | |
829 | ret = unmap_single_bt(mm, bd_entry, bt_addr); | |
830 | ||
831 | return ret; | |
832 | } | |
833 | ||
834 | /* | |
835 | * A virtual address region being munmap()ed might share bounds table | |
836 | * with adjacent VMAs. We only need to free the backing physical | |
837 | * memory of these shared bounds tables entries covered in this virtual | |
838 | * address region. | |
839 | */ | |
840 | static int unmap_edge_bts(struct mm_struct *mm, | |
841 | unsigned long start, unsigned long end) | |
842 | { | |
843 | int ret; | |
844 | long __user *bde_start, *bde_end; | |
845 | struct vm_area_struct *prev, *next; | |
846 | bool prev_shared = false, next_shared = false; | |
847 | ||
848 | bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); | |
849 | bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); | |
850 | ||
851 | /* | |
852 | * Check whether bde_start and bde_end are shared with adjacent | |
853 | * VMAs. | |
854 | * | |
855 | * We already unliked the VMAs from the mm's rbtree so 'start' | |
856 | * is guaranteed to be in a hole. This gets us the first VMA | |
857 | * before the hole in to 'prev' and the next VMA after the hole | |
858 | * in to 'next'. | |
859 | */ | |
860 | next = find_vma_prev(mm, start, &prev); | |
861 | if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1)) | |
862 | == bde_start) | |
863 | prev_shared = true; | |
864 | if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start)) | |
865 | == bde_end) | |
866 | next_shared = true; | |
867 | ||
868 | /* | |
869 | * This virtual address region being munmap()ed is only | |
870 | * covered by one bounds table. | |
871 | * | |
872 | * In this case, if this table is also shared with adjacent | |
873 | * VMAs, only part of the backing physical memory of the bounds | |
874 | * table need be freeed. Otherwise the whole bounds table need | |
875 | * be unmapped. | |
876 | */ | |
877 | if (bde_start == bde_end) { | |
878 | return unmap_shared_bt(mm, bde_start, start, end, | |
879 | prev_shared, next_shared); | |
880 | } | |
881 | ||
882 | /* | |
883 | * If more than one bounds tables are covered in this virtual | |
884 | * address region being munmap()ed, we need to separately check | |
885 | * whether bde_start and bde_end are shared with adjacent VMAs. | |
886 | */ | |
887 | ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false); | |
888 | if (ret) | |
889 | return ret; | |
890 | ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared); | |
891 | if (ret) | |
892 | return ret; | |
893 | ||
894 | return 0; | |
895 | } | |
896 | ||
897 | static int mpx_unmap_tables(struct mm_struct *mm, | |
898 | unsigned long start, unsigned long end) | |
899 | { | |
900 | int ret; | |
901 | long __user *bd_entry, *bde_start, *bde_end; | |
902 | unsigned long bt_addr; | |
903 | ||
2a1dcb1f | 904 | trace_mpx_unmap_search(start, end); |
1de4fa14 DH |
905 | /* |
906 | * "Edge" bounds tables are those which are being used by the region | |
907 | * (start -> end), but that may be shared with adjacent areas. If they | |
908 | * turn out to be completely unshared, they will be freed. If they are | |
909 | * shared, we will free the backing store (like an MADV_DONTNEED) for | |
910 | * areas used by this region. | |
911 | */ | |
912 | ret = unmap_edge_bts(mm, start, end); | |
913 | switch (ret) { | |
914 | /* non-present tables are OK */ | |
915 | case 0: | |
916 | case -ENOENT: | |
917 | /* Success, or no tables to unmap */ | |
918 | break; | |
919 | case -EINVAL: | |
920 | case -EFAULT: | |
921 | default: | |
922 | return ret; | |
923 | } | |
924 | ||
925 | /* | |
926 | * Only unmap the bounds table that are | |
927 | * 1. fully covered | |
928 | * 2. not at the edges of the mapping, even if full aligned | |
929 | */ | |
930 | bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); | |
931 | bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); | |
932 | for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) { | |
933 | ret = get_bt_addr(mm, bd_entry, &bt_addr); | |
934 | switch (ret) { | |
935 | case 0: | |
936 | break; | |
937 | case -ENOENT: | |
938 | /* No table here, try the next one */ | |
939 | continue; | |
940 | case -EINVAL: | |
941 | case -EFAULT: | |
942 | default: | |
943 | /* | |
944 | * Note: we are being strict here. | |
945 | * Any time we run in to an issue | |
946 | * unmapping tables, we stop and | |
947 | * SIGSEGV. | |
948 | */ | |
949 | return ret; | |
950 | } | |
951 | ||
952 | ret = unmap_single_bt(mm, bd_entry, bt_addr); | |
953 | if (ret) | |
954 | return ret; | |
955 | } | |
956 | ||
957 | return 0; | |
958 | } | |
959 | ||
960 | /* | |
961 | * Free unused bounds tables covered in a virtual address region being | |
962 | * munmap()ed. Assume end > start. | |
963 | * | |
964 | * This function will be called by do_munmap(), and the VMAs covering | |
965 | * the virtual address region start...end have already been split if | |
966 | * necessary, and the 'vma' is the first vma in this range (start -> end). | |
967 | */ | |
968 | void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, | |
969 | unsigned long start, unsigned long end) | |
970 | { | |
971 | int ret; | |
972 | ||
973 | /* | |
974 | * Refuse to do anything unless userspace has asked | |
975 | * the kernel to help manage the bounds tables, | |
976 | */ | |
977 | if (!kernel_managing_mpx_tables(current->mm)) | |
978 | return; | |
979 | /* | |
980 | * This will look across the entire 'start -> end' range, | |
981 | * and find all of the non-VM_MPX VMAs. | |
982 | * | |
983 | * To avoid recursion, if a VM_MPX vma is found in the range | |
984 | * (start->end), we will not continue follow-up work. This | |
985 | * recursion represents having bounds tables for bounds tables, | |
986 | * which should not occur normally. Being strict about it here | |
987 | * helps ensure that we do not have an exploitable stack overflow. | |
988 | */ | |
989 | do { | |
990 | if (vma->vm_flags & VM_MPX) | |
991 | return; | |
992 | vma = vma->vm_next; | |
993 | } while (vma && vma->vm_start < end); | |
994 | ||
995 | ret = mpx_unmap_tables(mm, start, end); | |
996 | if (ret) | |
997 | force_sig(SIGSEGV, current); | |
998 | } |