1 // SPDX-License-Identifier: GPL-2.0+
3 * User-space Probes (UProbes)
5 * Copyright (C) IBM Corporation, 2008-2012
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h> /* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/coredump.h>
19 #include <linux/export.h>
20 #include <linux/rmap.h> /* anon_vma_prepare */
21 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
22 #include <linux/swap.h> /* try_to_free_swap */
23 #include <linux/ptrace.h> /* user_enable_single_step */
24 #include <linux/kdebug.h> /* notifier mechanism */
25 #include "../../mm/internal.h" /* munlock_vma_page */
26 #include <linux/percpu-rwsem.h>
27 #include <linux/task_work.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/khugepaged.h>
31 #include <linux/uprobes.h>
33 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
36 static struct rb_root uprobes_tree
= RB_ROOT
;
38 * allows us to skip the uprobe_mmap if there are no uprobe events active
39 * at this time. Probably a fine grained per inode count is better?
41 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
43 static DEFINE_SPINLOCK(uprobes_treelock
); /* serialize rbtree access */
45 #define UPROBES_HASH_SZ 13
46 /* serialize uprobe->pending_list */
47 static struct mutex uprobes_mmap_mutex
[UPROBES_HASH_SZ
];
48 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
50 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem
);
52 /* Have a copy of original instruction */
53 #define UPROBE_COPY_INSN 0
56 struct rb_node rb_node
; /* node in the rb tree */
58 struct rw_semaphore register_rwsem
;
59 struct rw_semaphore consumer_rwsem
;
60 struct list_head pending_list
;
61 struct uprobe_consumer
*consumers
;
62 struct inode
*inode
; /* Also hold a ref to inode */
64 loff_t ref_ctr_offset
;
68 * The generic code assumes that it has two members of unknown type
69 * owned by the arch-specific code:
71 * insn - copy_insn() saves the original instruction here for
72 * arch_uprobe_analyze_insn().
74 * ixol - potentially modified instruction to execute out of
75 * line, copied to xol_area by xol_get_insn_slot().
77 struct arch_uprobe arch
;
80 struct delayed_uprobe
{
81 struct list_head list
;
82 struct uprobe
*uprobe
;
86 static DEFINE_MUTEX(delayed_uprobe_lock
);
87 static LIST_HEAD(delayed_uprobe_list
);
90 * Execute out of line area: anonymous executable mapping installed
91 * by the probed task to execute the copy of the original instruction
92 * mangled by set_swbp().
94 * On a breakpoint hit, thread contests for a slot. It frees the
95 * slot after singlestep. Currently a fixed number of slots are
99 wait_queue_head_t wq
; /* if all slots are busy */
100 atomic_t slot_count
; /* number of in-use slots */
101 unsigned long *bitmap
; /* 0 = free slot */
103 struct vm_special_mapping xol_mapping
;
104 struct page
*pages
[2];
106 * We keep the vma's vm_start rather than a pointer to the vma
107 * itself. The probed process or a naughty kernel module could make
108 * the vma go away, and we must handle that reasonably gracefully.
110 unsigned long vaddr
; /* Page(s) of instruction slots */
114 * valid_vma: Verify if the specified vma is an executable vma
115 * Relax restrictions while unregistering: vm_flags might have
116 * changed after breakpoint was inserted.
117 * - is_register: indicates if we are in register context.
118 * - Return 1 if the specified virtual address is in an
121 static bool valid_vma(struct vm_area_struct
*vma
, bool is_register
)
123 vm_flags_t flags
= VM_HUGETLB
| VM_MAYEXEC
| VM_MAYSHARE
;
128 return vma
->vm_file
&& (vma
->vm_flags
& flags
) == VM_MAYEXEC
;
131 static unsigned long offset_to_vaddr(struct vm_area_struct
*vma
, loff_t offset
)
133 return vma
->vm_start
+ offset
- ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
136 static loff_t
vaddr_to_offset(struct vm_area_struct
*vma
, unsigned long vaddr
)
138 return ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
) + (vaddr
- vma
->vm_start
);
142 * __replace_page - replace page in vma by new page.
143 * based on replace_page in mm/ksm.c
145 * @vma: vma that holds the pte pointing to page
146 * @addr: address the old @page is mapped at
147 * @old_page: the page we are replacing by new_page
148 * @new_page: the modified page we replace page by
150 * If @new_page is NULL, only unmap @old_page.
152 * Returns 0 on success, negative error code otherwise.
154 static int __replace_page(struct vm_area_struct
*vma
, unsigned long addr
,
155 struct page
*old_page
, struct page
*new_page
)
157 struct mm_struct
*mm
= vma
->vm_mm
;
158 struct page_vma_mapped_walk pvmw
= {
159 .page
= compound_head(old_page
),
164 struct mmu_notifier_range range
;
166 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, mm
, addr
,
170 err
= mem_cgroup_charge(new_page
, vma
->vm_mm
, GFP_KERNEL
);
175 /* For try_to_free_swap() and munlock_vma_page() below */
178 mmu_notifier_invalidate_range_start(&range
);
180 if (!page_vma_mapped_walk(&pvmw
))
182 VM_BUG_ON_PAGE(addr
!= pvmw
.address
, old_page
);
186 page_add_new_anon_rmap(new_page
, vma
, addr
, false);
187 lru_cache_add_inactive_or_unevictable(new_page
, vma
);
189 /* no new page, just dec_mm_counter for old_page */
190 dec_mm_counter(mm
, MM_ANONPAGES
);
192 if (!PageAnon(old_page
)) {
193 dec_mm_counter(mm
, mm_counter_file(old_page
));
194 inc_mm_counter(mm
, MM_ANONPAGES
);
197 flush_cache_page(vma
, addr
, pte_pfn(*pvmw
.pte
));
198 ptep_clear_flush_notify(vma
, addr
, pvmw
.pte
);
200 set_pte_at_notify(mm
, addr
, pvmw
.pte
,
201 mk_pte(new_page
, vma
->vm_page_prot
));
203 page_remove_rmap(old_page
, false);
204 if (!page_mapped(old_page
))
205 try_to_free_swap(old_page
);
206 page_vma_mapped_walk_done(&pvmw
);
208 if ((vma
->vm_flags
& VM_LOCKED
) && !PageCompound(old_page
))
209 munlock_vma_page(old_page
);
214 mmu_notifier_invalidate_range_end(&range
);
215 unlock_page(old_page
);
220 * is_swbp_insn - check if instruction is breakpoint instruction.
221 * @insn: instruction to be checked.
222 * Default implementation of is_swbp_insn
223 * Returns true if @insn is a breakpoint instruction.
225 bool __weak
is_swbp_insn(uprobe_opcode_t
*insn
)
227 return *insn
== UPROBE_SWBP_INSN
;
231 * is_trap_insn - check if instruction is breakpoint instruction.
232 * @insn: instruction to be checked.
233 * Default implementation of is_trap_insn
234 * Returns true if @insn is a breakpoint instruction.
236 * This function is needed for the case where an architecture has multiple
237 * trap instructions (like powerpc).
239 bool __weak
is_trap_insn(uprobe_opcode_t
*insn
)
241 return is_swbp_insn(insn
);
244 static void copy_from_page(struct page
*page
, unsigned long vaddr
, void *dst
, int len
)
246 void *kaddr
= kmap_atomic(page
);
247 memcpy(dst
, kaddr
+ (vaddr
& ~PAGE_MASK
), len
);
248 kunmap_atomic(kaddr
);
251 static void copy_to_page(struct page
*page
, unsigned long vaddr
, const void *src
, int len
)
253 void *kaddr
= kmap_atomic(page
);
254 memcpy(kaddr
+ (vaddr
& ~PAGE_MASK
), src
, len
);
255 kunmap_atomic(kaddr
);
258 static int verify_opcode(struct page
*page
, unsigned long vaddr
, uprobe_opcode_t
*new_opcode
)
260 uprobe_opcode_t old_opcode
;
264 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
265 * We do not check if it is any other 'trap variant' which could
266 * be conditional trap instruction such as the one powerpc supports.
268 * The logic is that we do not care if the underlying instruction
269 * is a trap variant; uprobes always wins over any other (gdb)
272 copy_from_page(page
, vaddr
, &old_opcode
, UPROBE_SWBP_INSN_SIZE
);
273 is_swbp
= is_swbp_insn(&old_opcode
);
275 if (is_swbp_insn(new_opcode
)) {
276 if (is_swbp
) /* register: already installed? */
279 if (!is_swbp
) /* unregister: was it changed by us? */
286 static struct delayed_uprobe
*
287 delayed_uprobe_check(struct uprobe
*uprobe
, struct mm_struct
*mm
)
289 struct delayed_uprobe
*du
;
291 list_for_each_entry(du
, &delayed_uprobe_list
, list
)
292 if (du
->uprobe
== uprobe
&& du
->mm
== mm
)
297 static int delayed_uprobe_add(struct uprobe
*uprobe
, struct mm_struct
*mm
)
299 struct delayed_uprobe
*du
;
301 if (delayed_uprobe_check(uprobe
, mm
))
304 du
= kzalloc(sizeof(*du
), GFP_KERNEL
);
310 list_add(&du
->list
, &delayed_uprobe_list
);
314 static void delayed_uprobe_delete(struct delayed_uprobe
*du
)
322 static void delayed_uprobe_remove(struct uprobe
*uprobe
, struct mm_struct
*mm
)
324 struct list_head
*pos
, *q
;
325 struct delayed_uprobe
*du
;
330 list_for_each_safe(pos
, q
, &delayed_uprobe_list
) {
331 du
= list_entry(pos
, struct delayed_uprobe
, list
);
333 if (uprobe
&& du
->uprobe
!= uprobe
)
335 if (mm
&& du
->mm
!= mm
)
338 delayed_uprobe_delete(du
);
342 static bool valid_ref_ctr_vma(struct uprobe
*uprobe
,
343 struct vm_area_struct
*vma
)
345 unsigned long vaddr
= offset_to_vaddr(vma
, uprobe
->ref_ctr_offset
);
347 return uprobe
->ref_ctr_offset
&&
349 file_inode(vma
->vm_file
) == uprobe
->inode
&&
350 (vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) == VM_WRITE
&&
351 vma
->vm_start
<= vaddr
&&
355 static struct vm_area_struct
*
356 find_ref_ctr_vma(struct uprobe
*uprobe
, struct mm_struct
*mm
)
358 struct vm_area_struct
*tmp
;
360 for (tmp
= mm
->mmap
; tmp
; tmp
= tmp
->vm_next
)
361 if (valid_ref_ctr_vma(uprobe
, tmp
))
368 __update_ref_ctr(struct mm_struct
*mm
, unsigned long vaddr
, short d
)
372 struct vm_area_struct
*vma
;
379 ret
= get_user_pages_remote(mm
, vaddr
, 1,
380 FOLL_WRITE
, &page
, &vma
, NULL
);
381 if (unlikely(ret
<= 0)) {
383 * We are asking for 1 page. If get_user_pages_remote() fails,
384 * it may return 0, in that case we have to return error.
386 return ret
== 0 ? -EBUSY
: ret
;
389 kaddr
= kmap_atomic(page
);
390 ptr
= kaddr
+ (vaddr
& ~PAGE_MASK
);
392 if (unlikely(*ptr
+ d
< 0)) {
393 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
394 "curr val: %d, delta: %d\n", vaddr
, *ptr
, d
);
402 kunmap_atomic(kaddr
);
407 static void update_ref_ctr_warn(struct uprobe
*uprobe
,
408 struct mm_struct
*mm
, short d
)
410 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
411 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
412 d
> 0 ? "increment" : "decrement", uprobe
->inode
->i_ino
,
413 (unsigned long long) uprobe
->offset
,
414 (unsigned long long) uprobe
->ref_ctr_offset
, mm
);
417 static int update_ref_ctr(struct uprobe
*uprobe
, struct mm_struct
*mm
,
420 struct vm_area_struct
*rc_vma
;
421 unsigned long rc_vaddr
;
424 rc_vma
= find_ref_ctr_vma(uprobe
, mm
);
427 rc_vaddr
= offset_to_vaddr(rc_vma
, uprobe
->ref_ctr_offset
);
428 ret
= __update_ref_ctr(mm
, rc_vaddr
, d
);
430 update_ref_ctr_warn(uprobe
, mm
, d
);
436 mutex_lock(&delayed_uprobe_lock
);
438 ret
= delayed_uprobe_add(uprobe
, mm
);
440 delayed_uprobe_remove(uprobe
, mm
);
441 mutex_unlock(&delayed_uprobe_lock
);
448 * Expect the breakpoint instruction to be the smallest size instruction for
449 * the architecture. If an arch has variable length instruction and the
450 * breakpoint instruction is not of the smallest length instruction
451 * supported by that architecture then we need to modify is_trap_at_addr and
452 * uprobe_write_opcode accordingly. This would never be a problem for archs
453 * that have fixed length instructions.
455 * uprobe_write_opcode - write the opcode at a given virtual address.
456 * @auprobe: arch specific probepoint information.
457 * @mm: the probed process address space.
458 * @vaddr: the virtual address to store the opcode.
459 * @opcode: opcode to be written at @vaddr.
461 * Called with mm->mmap_lock held for write.
462 * Return 0 (success) or a negative errno.
464 int uprobe_write_opcode(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
465 unsigned long vaddr
, uprobe_opcode_t opcode
)
467 struct uprobe
*uprobe
;
468 struct page
*old_page
, *new_page
;
469 struct vm_area_struct
*vma
;
470 int ret
, is_register
, ref_ctr_updated
= 0;
471 bool orig_page_huge
= false;
472 unsigned int gup_flags
= FOLL_FORCE
;
474 is_register
= is_swbp_insn(&opcode
);
475 uprobe
= container_of(auprobe
, struct uprobe
, arch
);
479 gup_flags
|= FOLL_SPLIT_PMD
;
480 /* Read the page with vaddr into memory */
481 ret
= get_user_pages_remote(mm
, vaddr
, 1, gup_flags
,
482 &old_page
, &vma
, NULL
);
486 ret
= verify_opcode(old_page
, vaddr
, &opcode
);
490 if (WARN(!is_register
&& PageCompound(old_page
),
491 "uprobe unregister should never work on compound page\n")) {
496 /* We are going to replace instruction, update ref_ctr. */
497 if (!ref_ctr_updated
&& uprobe
->ref_ctr_offset
) {
498 ret
= update_ref_ctr(uprobe
, mm
, is_register
? 1 : -1);
506 if (!is_register
&& !PageAnon(old_page
))
509 ret
= anon_vma_prepare(vma
);
514 new_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, vaddr
);
518 __SetPageUptodate(new_page
);
519 copy_highpage(new_page
, old_page
);
520 copy_to_page(new_page
, vaddr
, &opcode
, UPROBE_SWBP_INSN_SIZE
);
523 struct page
*orig_page
;
526 VM_BUG_ON_PAGE(!PageAnon(old_page
), old_page
);
528 index
= vaddr_to_offset(vma
, vaddr
& PAGE_MASK
) >> PAGE_SHIFT
;
529 orig_page
= find_get_page(vma
->vm_file
->f_inode
->i_mapping
,
533 if (PageUptodate(orig_page
) &&
534 pages_identical(new_page
, orig_page
)) {
535 /* let go new_page */
539 if (PageCompound(orig_page
))
540 orig_page_huge
= true;
546 ret
= __replace_page(vma
, vaddr
, old_page
, new_page
);
552 if (unlikely(ret
== -EAGAIN
))
555 /* Revert back reference counter if instruction update failed. */
556 if (ret
&& is_register
&& ref_ctr_updated
)
557 update_ref_ctr(uprobe
, mm
, -1);
559 /* try collapse pmd for compound page */
560 if (!ret
&& orig_page_huge
)
561 collapse_pte_mapped_thp(mm
, vaddr
);
567 * set_swbp - store breakpoint at a given address.
568 * @auprobe: arch specific probepoint information.
569 * @mm: the probed process address space.
570 * @vaddr: the virtual address to insert the opcode.
572 * For mm @mm, store the breakpoint instruction at @vaddr.
573 * Return 0 (success) or a negative errno.
575 int __weak
set_swbp(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
577 return uprobe_write_opcode(auprobe
, mm
, vaddr
, UPROBE_SWBP_INSN
);
581 * set_orig_insn - Restore the original instruction.
582 * @mm: the probed process address space.
583 * @auprobe: arch specific probepoint information.
584 * @vaddr: the virtual address to insert the opcode.
586 * For mm @mm, restore the original opcode (opcode) at @vaddr.
587 * Return 0 (success) or a negative errno.
590 set_orig_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
592 return uprobe_write_opcode(auprobe
, mm
, vaddr
,
593 *(uprobe_opcode_t
*)&auprobe
->insn
);
596 static struct uprobe
*get_uprobe(struct uprobe
*uprobe
)
598 refcount_inc(&uprobe
->ref
);
602 static void put_uprobe(struct uprobe
*uprobe
)
604 if (refcount_dec_and_test(&uprobe
->ref
)) {
606 * If application munmap(exec_vma) before uprobe_unregister()
607 * gets called, we don't get a chance to remove uprobe from
608 * delayed_uprobe_list from remove_breakpoint(). Do it here.
610 mutex_lock(&delayed_uprobe_lock
);
611 delayed_uprobe_remove(uprobe
, NULL
);
612 mutex_unlock(&delayed_uprobe_lock
);
617 static __always_inline
618 int uprobe_cmp(const struct inode
*l_inode
, const loff_t l_offset
,
619 const struct uprobe
*r
)
621 if (l_inode
< r
->inode
)
624 if (l_inode
> r
->inode
)
627 if (l_offset
< r
->offset
)
630 if (l_offset
> r
->offset
)
636 #define __node_2_uprobe(node) \
637 rb_entry((node), struct uprobe, rb_node)
639 struct __uprobe_key
{
644 static inline int __uprobe_cmp_key(const void *key
, const struct rb_node
*b
)
646 const struct __uprobe_key
*a
= key
;
647 return uprobe_cmp(a
->inode
, a
->offset
, __node_2_uprobe(b
));
650 static inline int __uprobe_cmp(struct rb_node
*a
, const struct rb_node
*b
)
652 struct uprobe
*u
= __node_2_uprobe(a
);
653 return uprobe_cmp(u
->inode
, u
->offset
, __node_2_uprobe(b
));
656 static struct uprobe
*__find_uprobe(struct inode
*inode
, loff_t offset
)
658 struct __uprobe_key key
= {
662 struct rb_node
*node
= rb_find(&key
, &uprobes_tree
, __uprobe_cmp_key
);
665 return get_uprobe(__node_2_uprobe(node
));
671 * Find a uprobe corresponding to a given inode:offset
672 * Acquires uprobes_treelock
674 static struct uprobe
*find_uprobe(struct inode
*inode
, loff_t offset
)
676 struct uprobe
*uprobe
;
678 spin_lock(&uprobes_treelock
);
679 uprobe
= __find_uprobe(inode
, offset
);
680 spin_unlock(&uprobes_treelock
);
685 static struct uprobe
*__insert_uprobe(struct uprobe
*uprobe
)
687 struct rb_node
*node
;
689 node
= rb_find_add(&uprobe
->rb_node
, &uprobes_tree
, __uprobe_cmp
);
691 return get_uprobe(__node_2_uprobe(node
));
693 /* get access + creation ref */
694 refcount_set(&uprobe
->ref
, 2);
699 * Acquire uprobes_treelock.
700 * Matching uprobe already exists in rbtree;
701 * increment (access refcount) and return the matching uprobe.
703 * No matching uprobe; insert the uprobe in rb_tree;
704 * get a double refcount (access + creation) and return NULL.
706 static struct uprobe
*insert_uprobe(struct uprobe
*uprobe
)
710 spin_lock(&uprobes_treelock
);
711 u
= __insert_uprobe(uprobe
);
712 spin_unlock(&uprobes_treelock
);
718 ref_ctr_mismatch_warn(struct uprobe
*cur_uprobe
, struct uprobe
*uprobe
)
720 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
721 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
722 uprobe
->inode
->i_ino
, (unsigned long long) uprobe
->offset
,
723 (unsigned long long) cur_uprobe
->ref_ctr_offset
,
724 (unsigned long long) uprobe
->ref_ctr_offset
);
727 static struct uprobe
*alloc_uprobe(struct inode
*inode
, loff_t offset
,
728 loff_t ref_ctr_offset
)
730 struct uprobe
*uprobe
, *cur_uprobe
;
732 uprobe
= kzalloc(sizeof(struct uprobe
), GFP_KERNEL
);
736 uprobe
->inode
= inode
;
737 uprobe
->offset
= offset
;
738 uprobe
->ref_ctr_offset
= ref_ctr_offset
;
739 init_rwsem(&uprobe
->register_rwsem
);
740 init_rwsem(&uprobe
->consumer_rwsem
);
742 /* add to uprobes_tree, sorted on inode:offset */
743 cur_uprobe
= insert_uprobe(uprobe
);
744 /* a uprobe exists for this inode:offset combination */
746 if (cur_uprobe
->ref_ctr_offset
!= uprobe
->ref_ctr_offset
) {
747 ref_ctr_mismatch_warn(cur_uprobe
, uprobe
);
748 put_uprobe(cur_uprobe
);
750 return ERR_PTR(-EINVAL
);
759 static void consumer_add(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
761 down_write(&uprobe
->consumer_rwsem
);
762 uc
->next
= uprobe
->consumers
;
763 uprobe
->consumers
= uc
;
764 up_write(&uprobe
->consumer_rwsem
);
768 * For uprobe @uprobe, delete the consumer @uc.
769 * Return true if the @uc is deleted successfully
772 static bool consumer_del(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
774 struct uprobe_consumer
**con
;
777 down_write(&uprobe
->consumer_rwsem
);
778 for (con
= &uprobe
->consumers
; *con
; con
= &(*con
)->next
) {
785 up_write(&uprobe
->consumer_rwsem
);
790 static int __copy_insn(struct address_space
*mapping
, struct file
*filp
,
791 void *insn
, int nbytes
, loff_t offset
)
795 * Ensure that the page that has the original instruction is populated
796 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
797 * see uprobe_register().
799 if (mapping
->a_ops
->readpage
)
800 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, filp
);
802 page
= shmem_read_mapping_page(mapping
, offset
>> PAGE_SHIFT
);
804 return PTR_ERR(page
);
806 copy_from_page(page
, offset
, insn
, nbytes
);
812 static int copy_insn(struct uprobe
*uprobe
, struct file
*filp
)
814 struct address_space
*mapping
= uprobe
->inode
->i_mapping
;
815 loff_t offs
= uprobe
->offset
;
816 void *insn
= &uprobe
->arch
.insn
;
817 int size
= sizeof(uprobe
->arch
.insn
);
820 /* Copy only available bytes, -EIO if nothing was read */
822 if (offs
>= i_size_read(uprobe
->inode
))
825 len
= min_t(int, size
, PAGE_SIZE
- (offs
& ~PAGE_MASK
));
826 err
= __copy_insn(mapping
, filp
, insn
, len
, offs
);
838 static int prepare_uprobe(struct uprobe
*uprobe
, struct file
*file
,
839 struct mm_struct
*mm
, unsigned long vaddr
)
843 if (test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
))
846 /* TODO: move this into _register, until then we abuse this sem. */
847 down_write(&uprobe
->consumer_rwsem
);
848 if (test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
))
851 ret
= copy_insn(uprobe
, file
);
856 if (is_trap_insn((uprobe_opcode_t
*)&uprobe
->arch
.insn
))
859 ret
= arch_uprobe_analyze_insn(&uprobe
->arch
, mm
, vaddr
);
863 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
864 set_bit(UPROBE_COPY_INSN
, &uprobe
->flags
);
867 up_write(&uprobe
->consumer_rwsem
);
872 static inline bool consumer_filter(struct uprobe_consumer
*uc
,
873 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
875 return !uc
->filter
|| uc
->filter(uc
, ctx
, mm
);
878 static bool filter_chain(struct uprobe
*uprobe
,
879 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
881 struct uprobe_consumer
*uc
;
884 down_read(&uprobe
->consumer_rwsem
);
885 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
886 ret
= consumer_filter(uc
, ctx
, mm
);
890 up_read(&uprobe
->consumer_rwsem
);
896 install_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
,
897 struct vm_area_struct
*vma
, unsigned long vaddr
)
902 ret
= prepare_uprobe(uprobe
, vma
->vm_file
, mm
, vaddr
);
907 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
908 * the task can hit this breakpoint right after __replace_page().
910 first_uprobe
= !test_bit(MMF_HAS_UPROBES
, &mm
->flags
);
912 set_bit(MMF_HAS_UPROBES
, &mm
->flags
);
914 ret
= set_swbp(&uprobe
->arch
, mm
, vaddr
);
916 clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
917 else if (first_uprobe
)
918 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
924 remove_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
926 set_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
927 return set_orig_insn(&uprobe
->arch
, mm
, vaddr
);
930 static inline bool uprobe_is_active(struct uprobe
*uprobe
)
932 return !RB_EMPTY_NODE(&uprobe
->rb_node
);
935 * There could be threads that have already hit the breakpoint. They
936 * will recheck the current insn and restart if find_uprobe() fails.
937 * See find_active_uprobe().
939 static void delete_uprobe(struct uprobe
*uprobe
)
941 if (WARN_ON(!uprobe_is_active(uprobe
)))
944 spin_lock(&uprobes_treelock
);
945 rb_erase(&uprobe
->rb_node
, &uprobes_tree
);
946 spin_unlock(&uprobes_treelock
);
947 RB_CLEAR_NODE(&uprobe
->rb_node
); /* for uprobe_is_active() */
952 struct map_info
*next
;
953 struct mm_struct
*mm
;
957 static inline struct map_info
*free_map_info(struct map_info
*info
)
959 struct map_info
*next
= info
->next
;
964 static struct map_info
*
965 build_map_info(struct address_space
*mapping
, loff_t offset
, bool is_register
)
967 unsigned long pgoff
= offset
>> PAGE_SHIFT
;
968 struct vm_area_struct
*vma
;
969 struct map_info
*curr
= NULL
;
970 struct map_info
*prev
= NULL
;
971 struct map_info
*info
;
975 i_mmap_lock_read(mapping
);
976 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
977 if (!valid_vma(vma
, is_register
))
980 if (!prev
&& !more
) {
982 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
983 * reclaim. This is optimistic, no harm done if it fails.
985 prev
= kmalloc(sizeof(struct map_info
),
986 GFP_NOWAIT
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
995 if (!mmget_not_zero(vma
->vm_mm
))
1003 info
->mm
= vma
->vm_mm
;
1004 info
->vaddr
= offset_to_vaddr(vma
, offset
);
1006 i_mmap_unlock_read(mapping
);
1018 info
= kmalloc(sizeof(struct map_info
), GFP_KERNEL
);
1020 curr
= ERR_PTR(-ENOMEM
);
1030 prev
= free_map_info(prev
);
1035 register_for_each_vma(struct uprobe
*uprobe
, struct uprobe_consumer
*new)
1037 bool is_register
= !!new;
1038 struct map_info
*info
;
1041 percpu_down_write(&dup_mmap_sem
);
1042 info
= build_map_info(uprobe
->inode
->i_mapping
,
1043 uprobe
->offset
, is_register
);
1045 err
= PTR_ERR(info
);
1050 struct mm_struct
*mm
= info
->mm
;
1051 struct vm_area_struct
*vma
;
1053 if (err
&& is_register
)
1056 mmap_write_lock(mm
);
1057 vma
= find_vma(mm
, info
->vaddr
);
1058 if (!vma
|| !valid_vma(vma
, is_register
) ||
1059 file_inode(vma
->vm_file
) != uprobe
->inode
)
1062 if (vma
->vm_start
> info
->vaddr
||
1063 vaddr_to_offset(vma
, info
->vaddr
) != uprobe
->offset
)
1067 /* consult only the "caller", new consumer. */
1068 if (consumer_filter(new,
1069 UPROBE_FILTER_REGISTER
, mm
))
1070 err
= install_breakpoint(uprobe
, mm
, vma
, info
->vaddr
);
1071 } else if (test_bit(MMF_HAS_UPROBES
, &mm
->flags
)) {
1072 if (!filter_chain(uprobe
,
1073 UPROBE_FILTER_UNREGISTER
, mm
))
1074 err
|= remove_breakpoint(uprobe
, mm
, info
->vaddr
);
1078 mmap_write_unlock(mm
);
1081 info
= free_map_info(info
);
1084 percpu_up_write(&dup_mmap_sem
);
1089 __uprobe_unregister(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
1093 if (WARN_ON(!consumer_del(uprobe
, uc
)))
1096 err
= register_for_each_vma(uprobe
, NULL
);
1097 /* TODO : cant unregister? schedule a worker thread */
1098 if (!uprobe
->consumers
&& !err
)
1099 delete_uprobe(uprobe
);
1103 * uprobe_unregister - unregister an already registered probe.
1104 * @inode: the file in which the probe has to be removed.
1105 * @offset: offset from the start of the file.
1106 * @uc: identify which probe if multiple probes are colocated.
1108 void uprobe_unregister(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
1110 struct uprobe
*uprobe
;
1112 uprobe
= find_uprobe(inode
, offset
);
1113 if (WARN_ON(!uprobe
))
1116 down_write(&uprobe
->register_rwsem
);
1117 __uprobe_unregister(uprobe
, uc
);
1118 up_write(&uprobe
->register_rwsem
);
1121 EXPORT_SYMBOL_GPL(uprobe_unregister
);
1124 * __uprobe_register - register a probe
1125 * @inode: the file in which the probe has to be placed.
1126 * @offset: offset from the start of the file.
1127 * @uc: information on howto handle the probe..
1129 * Apart from the access refcount, __uprobe_register() takes a creation
1130 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1131 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1132 * tuple). Creation refcount stops uprobe_unregister from freeing the
1133 * @uprobe even before the register operation is complete. Creation
1134 * refcount is released when the last @uc for the @uprobe
1135 * unregisters. Caller of __uprobe_register() is required to keep @inode
1136 * (and the containing mount) referenced.
1138 * Return errno if it cannot successully install probes
1139 * else return 0 (success)
1141 static int __uprobe_register(struct inode
*inode
, loff_t offset
,
1142 loff_t ref_ctr_offset
, struct uprobe_consumer
*uc
)
1144 struct uprobe
*uprobe
;
1147 /* Uprobe must have at least one set consumer */
1148 if (!uc
->handler
&& !uc
->ret_handler
)
1151 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1152 if (!inode
->i_mapping
->a_ops
->readpage
&& !shmem_mapping(inode
->i_mapping
))
1154 /* Racy, just to catch the obvious mistakes */
1155 if (offset
> i_size_read(inode
))
1159 * This ensures that copy_from_page(), copy_to_page() and
1160 * __update_ref_ctr() can't cross page boundary.
1162 if (!IS_ALIGNED(offset
, UPROBE_SWBP_INSN_SIZE
))
1164 if (!IS_ALIGNED(ref_ctr_offset
, sizeof(short)))
1168 uprobe
= alloc_uprobe(inode
, offset
, ref_ctr_offset
);
1172 return PTR_ERR(uprobe
);
1175 * We can race with uprobe_unregister()->delete_uprobe().
1176 * Check uprobe_is_active() and retry if it is false.
1178 down_write(&uprobe
->register_rwsem
);
1180 if (likely(uprobe_is_active(uprobe
))) {
1181 consumer_add(uprobe
, uc
);
1182 ret
= register_for_each_vma(uprobe
, uc
);
1184 __uprobe_unregister(uprobe
, uc
);
1186 up_write(&uprobe
->register_rwsem
);
1189 if (unlikely(ret
== -EAGAIN
))
1194 int uprobe_register(struct inode
*inode
, loff_t offset
,
1195 struct uprobe_consumer
*uc
)
1197 return __uprobe_register(inode
, offset
, 0, uc
);
1199 EXPORT_SYMBOL_GPL(uprobe_register
);
1201 int uprobe_register_refctr(struct inode
*inode
, loff_t offset
,
1202 loff_t ref_ctr_offset
, struct uprobe_consumer
*uc
)
1204 return __uprobe_register(inode
, offset
, ref_ctr_offset
, uc
);
1206 EXPORT_SYMBOL_GPL(uprobe_register_refctr
);
1209 * uprobe_apply - unregister an already registered probe.
1210 * @inode: the file in which the probe has to be removed.
1211 * @offset: offset from the start of the file.
1212 * @uc: consumer which wants to add more or remove some breakpoints
1213 * @add: add or remove the breakpoints
1215 int uprobe_apply(struct inode
*inode
, loff_t offset
,
1216 struct uprobe_consumer
*uc
, bool add
)
1218 struct uprobe
*uprobe
;
1219 struct uprobe_consumer
*con
;
1222 uprobe
= find_uprobe(inode
, offset
);
1223 if (WARN_ON(!uprobe
))
1226 down_write(&uprobe
->register_rwsem
);
1227 for (con
= uprobe
->consumers
; con
&& con
!= uc
; con
= con
->next
)
1230 ret
= register_for_each_vma(uprobe
, add
? uc
: NULL
);
1231 up_write(&uprobe
->register_rwsem
);
1237 static int unapply_uprobe(struct uprobe
*uprobe
, struct mm_struct
*mm
)
1239 struct vm_area_struct
*vma
;
1243 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1244 unsigned long vaddr
;
1247 if (!valid_vma(vma
, false) ||
1248 file_inode(vma
->vm_file
) != uprobe
->inode
)
1251 offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1252 if (uprobe
->offset
< offset
||
1253 uprobe
->offset
>= offset
+ vma
->vm_end
- vma
->vm_start
)
1256 vaddr
= offset_to_vaddr(vma
, uprobe
->offset
);
1257 err
|= remove_breakpoint(uprobe
, mm
, vaddr
);
1259 mmap_read_unlock(mm
);
1264 static struct rb_node
*
1265 find_node_in_range(struct inode
*inode
, loff_t min
, loff_t max
)
1267 struct rb_node
*n
= uprobes_tree
.rb_node
;
1270 struct uprobe
*u
= rb_entry(n
, struct uprobe
, rb_node
);
1272 if (inode
< u
->inode
) {
1274 } else if (inode
> u
->inode
) {
1277 if (max
< u
->offset
)
1279 else if (min
> u
->offset
)
1290 * For a given range in vma, build a list of probes that need to be inserted.
1292 static void build_probe_list(struct inode
*inode
,
1293 struct vm_area_struct
*vma
,
1294 unsigned long start
, unsigned long end
,
1295 struct list_head
*head
)
1298 struct rb_node
*n
, *t
;
1301 INIT_LIST_HEAD(head
);
1302 min
= vaddr_to_offset(vma
, start
);
1303 max
= min
+ (end
- start
) - 1;
1305 spin_lock(&uprobes_treelock
);
1306 n
= find_node_in_range(inode
, min
, max
);
1308 for (t
= n
; t
; t
= rb_prev(t
)) {
1309 u
= rb_entry(t
, struct uprobe
, rb_node
);
1310 if (u
->inode
!= inode
|| u
->offset
< min
)
1312 list_add(&u
->pending_list
, head
);
1315 for (t
= n
; (t
= rb_next(t
)); ) {
1316 u
= rb_entry(t
, struct uprobe
, rb_node
);
1317 if (u
->inode
!= inode
|| u
->offset
> max
)
1319 list_add(&u
->pending_list
, head
);
1323 spin_unlock(&uprobes_treelock
);
1326 /* @vma contains reference counter, not the probed instruction. */
1327 static int delayed_ref_ctr_inc(struct vm_area_struct
*vma
)
1329 struct list_head
*pos
, *q
;
1330 struct delayed_uprobe
*du
;
1331 unsigned long vaddr
;
1332 int ret
= 0, err
= 0;
1334 mutex_lock(&delayed_uprobe_lock
);
1335 list_for_each_safe(pos
, q
, &delayed_uprobe_list
) {
1336 du
= list_entry(pos
, struct delayed_uprobe
, list
);
1338 if (du
->mm
!= vma
->vm_mm
||
1339 !valid_ref_ctr_vma(du
->uprobe
, vma
))
1342 vaddr
= offset_to_vaddr(vma
, du
->uprobe
->ref_ctr_offset
);
1343 ret
= __update_ref_ctr(vma
->vm_mm
, vaddr
, 1);
1345 update_ref_ctr_warn(du
->uprobe
, vma
->vm_mm
, 1);
1349 delayed_uprobe_delete(du
);
1351 mutex_unlock(&delayed_uprobe_lock
);
1356 * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
1358 * Currently we ignore all errors and always return 0, the callers
1359 * can't handle the failure anyway.
1361 int uprobe_mmap(struct vm_area_struct
*vma
)
1363 struct list_head tmp_list
;
1364 struct uprobe
*uprobe
, *u
;
1365 struct inode
*inode
;
1367 if (no_uprobe_events())
1371 (vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) == VM_WRITE
&&
1372 test_bit(MMF_HAS_UPROBES
, &vma
->vm_mm
->flags
))
1373 delayed_ref_ctr_inc(vma
);
1375 if (!valid_vma(vma
, true))
1378 inode
= file_inode(vma
->vm_file
);
1382 mutex_lock(uprobes_mmap_hash(inode
));
1383 build_probe_list(inode
, vma
, vma
->vm_start
, vma
->vm_end
, &tmp_list
);
1385 * We can race with uprobe_unregister(), this uprobe can be already
1386 * removed. But in this case filter_chain() must return false, all
1387 * consumers have gone away.
1389 list_for_each_entry_safe(uprobe
, u
, &tmp_list
, pending_list
) {
1390 if (!fatal_signal_pending(current
) &&
1391 filter_chain(uprobe
, UPROBE_FILTER_MMAP
, vma
->vm_mm
)) {
1392 unsigned long vaddr
= offset_to_vaddr(vma
, uprobe
->offset
);
1393 install_breakpoint(uprobe
, vma
->vm_mm
, vma
, vaddr
);
1397 mutex_unlock(uprobes_mmap_hash(inode
));
1403 vma_has_uprobes(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1406 struct inode
*inode
;
1409 inode
= file_inode(vma
->vm_file
);
1411 min
= vaddr_to_offset(vma
, start
);
1412 max
= min
+ (end
- start
) - 1;
1414 spin_lock(&uprobes_treelock
);
1415 n
= find_node_in_range(inode
, min
, max
);
1416 spin_unlock(&uprobes_treelock
);
1422 * Called in context of a munmap of a vma.
1424 void uprobe_munmap(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1426 if (no_uprobe_events() || !valid_vma(vma
, false))
1429 if (!atomic_read(&vma
->vm_mm
->mm_users
)) /* called by mmput() ? */
1432 if (!test_bit(MMF_HAS_UPROBES
, &vma
->vm_mm
->flags
) ||
1433 test_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
))
1436 if (vma_has_uprobes(vma
, start
, end
))
1437 set_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
);
1440 /* Slot allocation for XOL */
1441 static int xol_add_vma(struct mm_struct
*mm
, struct xol_area
*area
)
1443 struct vm_area_struct
*vma
;
1446 if (mmap_write_lock_killable(mm
))
1449 if (mm
->uprobes_state
.xol_area
) {
1455 /* Try to map as high as possible, this is only a hint. */
1456 area
->vaddr
= get_unmapped_area(NULL
, TASK_SIZE
- PAGE_SIZE
,
1458 if (IS_ERR_VALUE(area
->vaddr
)) {
1464 vma
= _install_special_mapping(mm
, area
->vaddr
, PAGE_SIZE
,
1465 VM_EXEC
|VM_MAYEXEC
|VM_DONTCOPY
|VM_IO
,
1466 &area
->xol_mapping
);
1473 /* pairs with get_xol_area() */
1474 smp_store_release(&mm
->uprobes_state
.xol_area
, area
); /* ^^^ */
1476 mmap_write_unlock(mm
);
1481 static struct xol_area
*__create_xol_area(unsigned long vaddr
)
1483 struct mm_struct
*mm
= current
->mm
;
1484 uprobe_opcode_t insn
= UPROBE_SWBP_INSN
;
1485 struct xol_area
*area
;
1487 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
1488 if (unlikely(!area
))
1491 area
->bitmap
= kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE
), sizeof(long),
1496 area
->xol_mapping
.name
= "[uprobes]";
1497 area
->xol_mapping
.fault
= NULL
;
1498 area
->xol_mapping
.pages
= area
->pages
;
1499 area
->pages
[0] = alloc_page(GFP_HIGHUSER
);
1500 if (!area
->pages
[0])
1502 area
->pages
[1] = NULL
;
1504 area
->vaddr
= vaddr
;
1505 init_waitqueue_head(&area
->wq
);
1506 /* Reserve the 1st slot for get_trampoline_vaddr() */
1507 set_bit(0, area
->bitmap
);
1508 atomic_set(&area
->slot_count
, 1);
1509 arch_uprobe_copy_ixol(area
->pages
[0], 0, &insn
, UPROBE_SWBP_INSN_SIZE
);
1511 if (!xol_add_vma(mm
, area
))
1514 __free_page(area
->pages
[0]);
1516 kfree(area
->bitmap
);
1524 * get_xol_area - Allocate process's xol_area if necessary.
1525 * This area will be used for storing instructions for execution out of line.
1527 * Returns the allocated area or NULL.
1529 static struct xol_area
*get_xol_area(void)
1531 struct mm_struct
*mm
= current
->mm
;
1532 struct xol_area
*area
;
1534 if (!mm
->uprobes_state
.xol_area
)
1535 __create_xol_area(0);
1537 /* Pairs with xol_add_vma() smp_store_release() */
1538 area
= READ_ONCE(mm
->uprobes_state
.xol_area
); /* ^^^ */
1543 * uprobe_clear_state - Free the area allocated for slots.
1545 void uprobe_clear_state(struct mm_struct
*mm
)
1547 struct xol_area
*area
= mm
->uprobes_state
.xol_area
;
1549 mutex_lock(&delayed_uprobe_lock
);
1550 delayed_uprobe_remove(NULL
, mm
);
1551 mutex_unlock(&delayed_uprobe_lock
);
1556 put_page(area
->pages
[0]);
1557 kfree(area
->bitmap
);
1561 void uprobe_start_dup_mmap(void)
1563 percpu_down_read(&dup_mmap_sem
);
1566 void uprobe_end_dup_mmap(void)
1568 percpu_up_read(&dup_mmap_sem
);
1571 void uprobe_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*newmm
)
1573 if (test_bit(MMF_HAS_UPROBES
, &oldmm
->flags
)) {
1574 set_bit(MMF_HAS_UPROBES
, &newmm
->flags
);
1575 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1576 set_bit(MMF_RECALC_UPROBES
, &newmm
->flags
);
1581 * - search for a free slot.
1583 static unsigned long xol_take_insn_slot(struct xol_area
*area
)
1585 unsigned long slot_addr
;
1589 slot_nr
= find_first_zero_bit(area
->bitmap
, UINSNS_PER_PAGE
);
1590 if (slot_nr
< UINSNS_PER_PAGE
) {
1591 if (!test_and_set_bit(slot_nr
, area
->bitmap
))
1594 slot_nr
= UINSNS_PER_PAGE
;
1597 wait_event(area
->wq
, (atomic_read(&area
->slot_count
) < UINSNS_PER_PAGE
));
1598 } while (slot_nr
>= UINSNS_PER_PAGE
);
1600 slot_addr
= area
->vaddr
+ (slot_nr
* UPROBE_XOL_SLOT_BYTES
);
1601 atomic_inc(&area
->slot_count
);
1607 * xol_get_insn_slot - allocate a slot for xol.
1608 * Returns the allocated slot address or 0.
1610 static unsigned long xol_get_insn_slot(struct uprobe
*uprobe
)
1612 struct xol_area
*area
;
1613 unsigned long xol_vaddr
;
1615 area
= get_xol_area();
1619 xol_vaddr
= xol_take_insn_slot(area
);
1620 if (unlikely(!xol_vaddr
))
1623 arch_uprobe_copy_ixol(area
->pages
[0], xol_vaddr
,
1624 &uprobe
->arch
.ixol
, sizeof(uprobe
->arch
.ixol
));
1630 * xol_free_insn_slot - If slot was earlier allocated by
1631 * @xol_get_insn_slot(), make the slot available for
1632 * subsequent requests.
1634 static void xol_free_insn_slot(struct task_struct
*tsk
)
1636 struct xol_area
*area
;
1637 unsigned long vma_end
;
1638 unsigned long slot_addr
;
1640 if (!tsk
->mm
|| !tsk
->mm
->uprobes_state
.xol_area
|| !tsk
->utask
)
1643 slot_addr
= tsk
->utask
->xol_vaddr
;
1644 if (unlikely(!slot_addr
))
1647 area
= tsk
->mm
->uprobes_state
.xol_area
;
1648 vma_end
= area
->vaddr
+ PAGE_SIZE
;
1649 if (area
->vaddr
<= slot_addr
&& slot_addr
< vma_end
) {
1650 unsigned long offset
;
1653 offset
= slot_addr
- area
->vaddr
;
1654 slot_nr
= offset
/ UPROBE_XOL_SLOT_BYTES
;
1655 if (slot_nr
>= UINSNS_PER_PAGE
)
1658 clear_bit(slot_nr
, area
->bitmap
);
1659 atomic_dec(&area
->slot_count
);
1660 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1661 if (waitqueue_active(&area
->wq
))
1664 tsk
->utask
->xol_vaddr
= 0;
1668 void __weak
arch_uprobe_copy_ixol(struct page
*page
, unsigned long vaddr
,
1669 void *src
, unsigned long len
)
1671 /* Initialize the slot */
1672 copy_to_page(page
, vaddr
, src
, len
);
1675 * We probably need flush_icache_user_page() but it needs vma.
1676 * This should work on most of architectures by default. If
1677 * architecture needs to do something different it can define
1678 * its own version of the function.
1680 flush_dcache_page(page
);
1684 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1685 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1687 * Return the address of the breakpoint instruction.
1689 unsigned long __weak
uprobe_get_swbp_addr(struct pt_regs
*regs
)
1691 return instruction_pointer(regs
) - UPROBE_SWBP_INSN_SIZE
;
1694 unsigned long uprobe_get_trap_addr(struct pt_regs
*regs
)
1696 struct uprobe_task
*utask
= current
->utask
;
1698 if (unlikely(utask
&& utask
->active_uprobe
))
1699 return utask
->vaddr
;
1701 return instruction_pointer(regs
);
1704 static struct return_instance
*free_ret_instance(struct return_instance
*ri
)
1706 struct return_instance
*next
= ri
->next
;
1707 put_uprobe(ri
->uprobe
);
1713 * Called with no locks held.
1714 * Called in context of an exiting or an exec-ing thread.
1716 void uprobe_free_utask(struct task_struct
*t
)
1718 struct uprobe_task
*utask
= t
->utask
;
1719 struct return_instance
*ri
;
1724 if (utask
->active_uprobe
)
1725 put_uprobe(utask
->active_uprobe
);
1727 ri
= utask
->return_instances
;
1729 ri
= free_ret_instance(ri
);
1731 xol_free_insn_slot(t
);
1737 * Allocate a uprobe_task object for the task if necessary.
1738 * Called when the thread hits a breakpoint.
1741 * - pointer to new uprobe_task on success
1744 static struct uprobe_task
*get_utask(void)
1746 if (!current
->utask
)
1747 current
->utask
= kzalloc(sizeof(struct uprobe_task
), GFP_KERNEL
);
1748 return current
->utask
;
1751 static int dup_utask(struct task_struct
*t
, struct uprobe_task
*o_utask
)
1753 struct uprobe_task
*n_utask
;
1754 struct return_instance
**p
, *o
, *n
;
1756 n_utask
= kzalloc(sizeof(struct uprobe_task
), GFP_KERNEL
);
1761 p
= &n_utask
->return_instances
;
1762 for (o
= o_utask
->return_instances
; o
; o
= o
->next
) {
1763 n
= kmalloc(sizeof(struct return_instance
), GFP_KERNEL
);
1768 get_uprobe(n
->uprobe
);
1779 static void uprobe_warn(struct task_struct
*t
, const char *msg
)
1781 pr_warn("uprobe: %s:%d failed to %s\n",
1782 current
->comm
, current
->pid
, msg
);
1785 static void dup_xol_work(struct callback_head
*work
)
1787 if (current
->flags
& PF_EXITING
)
1790 if (!__create_xol_area(current
->utask
->dup_xol_addr
) &&
1791 !fatal_signal_pending(current
))
1792 uprobe_warn(current
, "dup xol area");
1796 * Called in context of a new clone/fork from copy_process.
1798 void uprobe_copy_process(struct task_struct
*t
, unsigned long flags
)
1800 struct uprobe_task
*utask
= current
->utask
;
1801 struct mm_struct
*mm
= current
->mm
;
1802 struct xol_area
*area
;
1806 if (!utask
|| !utask
->return_instances
)
1809 if (mm
== t
->mm
&& !(flags
& CLONE_VFORK
))
1812 if (dup_utask(t
, utask
))
1813 return uprobe_warn(t
, "dup ret instances");
1815 /* The task can fork() after dup_xol_work() fails */
1816 area
= mm
->uprobes_state
.xol_area
;
1818 return uprobe_warn(t
, "dup xol area");
1823 t
->utask
->dup_xol_addr
= area
->vaddr
;
1824 init_task_work(&t
->utask
->dup_xol_work
, dup_xol_work
);
1825 task_work_add(t
, &t
->utask
->dup_xol_work
, TWA_RESUME
);
1829 * Current area->vaddr notion assume the trampoline address is always
1830 * equal area->vaddr.
1832 * Returns -1 in case the xol_area is not allocated.
1834 static unsigned long get_trampoline_vaddr(void)
1836 struct xol_area
*area
;
1837 unsigned long trampoline_vaddr
= -1;
1839 /* Pairs with xol_add_vma() smp_store_release() */
1840 area
= READ_ONCE(current
->mm
->uprobes_state
.xol_area
); /* ^^^ */
1842 trampoline_vaddr
= area
->vaddr
;
1844 return trampoline_vaddr
;
1847 static void cleanup_return_instances(struct uprobe_task
*utask
, bool chained
,
1848 struct pt_regs
*regs
)
1850 struct return_instance
*ri
= utask
->return_instances
;
1851 enum rp_check ctx
= chained
? RP_CHECK_CHAIN_CALL
: RP_CHECK_CALL
;
1853 while (ri
&& !arch_uretprobe_is_alive(ri
, ctx
, regs
)) {
1854 ri
= free_ret_instance(ri
);
1857 utask
->return_instances
= ri
;
1860 static void prepare_uretprobe(struct uprobe
*uprobe
, struct pt_regs
*regs
)
1862 struct return_instance
*ri
;
1863 struct uprobe_task
*utask
;
1864 unsigned long orig_ret_vaddr
, trampoline_vaddr
;
1867 if (!get_xol_area())
1870 utask
= get_utask();
1874 if (utask
->depth
>= MAX_URETPROBE_DEPTH
) {
1875 printk_ratelimited(KERN_INFO
"uprobe: omit uretprobe due to"
1876 " nestedness limit pid/tgid=%d/%d\n",
1877 current
->pid
, current
->tgid
);
1881 ri
= kmalloc(sizeof(struct return_instance
), GFP_KERNEL
);
1885 trampoline_vaddr
= get_trampoline_vaddr();
1886 orig_ret_vaddr
= arch_uretprobe_hijack_return_addr(trampoline_vaddr
, regs
);
1887 if (orig_ret_vaddr
== -1)
1890 /* drop the entries invalidated by longjmp() */
1891 chained
= (orig_ret_vaddr
== trampoline_vaddr
);
1892 cleanup_return_instances(utask
, chained
, regs
);
1895 * We don't want to keep trampoline address in stack, rather keep the
1896 * original return address of first caller thru all the consequent
1897 * instances. This also makes breakpoint unwrapping easier.
1900 if (!utask
->return_instances
) {
1902 * This situation is not possible. Likely we have an
1903 * attack from user-space.
1905 uprobe_warn(current
, "handle tail call");
1908 orig_ret_vaddr
= utask
->return_instances
->orig_ret_vaddr
;
1911 ri
->uprobe
= get_uprobe(uprobe
);
1912 ri
->func
= instruction_pointer(regs
);
1913 ri
->stack
= user_stack_pointer(regs
);
1914 ri
->orig_ret_vaddr
= orig_ret_vaddr
;
1915 ri
->chained
= chained
;
1918 ri
->next
= utask
->return_instances
;
1919 utask
->return_instances
= ri
;
1926 /* Prepare to single-step probed instruction out of line. */
1928 pre_ssout(struct uprobe
*uprobe
, struct pt_regs
*regs
, unsigned long bp_vaddr
)
1930 struct uprobe_task
*utask
;
1931 unsigned long xol_vaddr
;
1934 utask
= get_utask();
1938 xol_vaddr
= xol_get_insn_slot(uprobe
);
1942 utask
->xol_vaddr
= xol_vaddr
;
1943 utask
->vaddr
= bp_vaddr
;
1945 err
= arch_uprobe_pre_xol(&uprobe
->arch
, regs
);
1946 if (unlikely(err
)) {
1947 xol_free_insn_slot(current
);
1951 utask
->active_uprobe
= uprobe
;
1952 utask
->state
= UTASK_SSTEP
;
1957 * If we are singlestepping, then ensure this thread is not connected to
1958 * non-fatal signals until completion of singlestep. When xol insn itself
1959 * triggers the signal, restart the original insn even if the task is
1960 * already SIGKILL'ed (since coredump should report the correct ip). This
1961 * is even more important if the task has a handler for SIGSEGV/etc, The
1962 * _same_ instruction should be repeated again after return from the signal
1963 * handler, and SSTEP can never finish in this case.
1965 bool uprobe_deny_signal(void)
1967 struct task_struct
*t
= current
;
1968 struct uprobe_task
*utask
= t
->utask
;
1970 if (likely(!utask
|| !utask
->active_uprobe
))
1973 WARN_ON_ONCE(utask
->state
!= UTASK_SSTEP
);
1975 if (task_sigpending(t
)) {
1976 spin_lock_irq(&t
->sighand
->siglock
);
1977 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
1978 spin_unlock_irq(&t
->sighand
->siglock
);
1980 if (__fatal_signal_pending(t
) || arch_uprobe_xol_was_trapped(t
)) {
1981 utask
->state
= UTASK_SSTEP_TRAPPED
;
1982 set_tsk_thread_flag(t
, TIF_UPROBE
);
1989 static void mmf_recalc_uprobes(struct mm_struct
*mm
)
1991 struct vm_area_struct
*vma
;
1993 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1994 if (!valid_vma(vma
, false))
1997 * This is not strictly accurate, we can race with
1998 * uprobe_unregister() and see the already removed
1999 * uprobe if delete_uprobe() was not yet called.
2000 * Or this uprobe can be filtered out.
2002 if (vma_has_uprobes(vma
, vma
->vm_start
, vma
->vm_end
))
2006 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
2009 static int is_trap_at_addr(struct mm_struct
*mm
, unsigned long vaddr
)
2012 uprobe_opcode_t opcode
;
2015 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr
, UPROBE_SWBP_INSN_SIZE
)))
2018 pagefault_disable();
2019 result
= __get_user(opcode
, (uprobe_opcode_t __user
*)vaddr
);
2022 if (likely(result
== 0))
2026 * The NULL 'tsk' here ensures that any faults that occur here
2027 * will not be accounted to the task. 'mm' *is* current->mm,
2028 * but we treat this as a 'remote' access since it is
2029 * essentially a kernel access to the memory.
2031 result
= get_user_pages_remote(mm
, vaddr
, 1, FOLL_FORCE
, &page
,
2036 copy_from_page(page
, vaddr
, &opcode
, UPROBE_SWBP_INSN_SIZE
);
2039 /* This needs to return true for any variant of the trap insn */
2040 return is_trap_insn(&opcode
);
2043 static struct uprobe
*find_active_uprobe(unsigned long bp_vaddr
, int *is_swbp
)
2045 struct mm_struct
*mm
= current
->mm
;
2046 struct uprobe
*uprobe
= NULL
;
2047 struct vm_area_struct
*vma
;
2050 vma
= vma_lookup(mm
, bp_vaddr
);
2052 if (valid_vma(vma
, false)) {
2053 struct inode
*inode
= file_inode(vma
->vm_file
);
2054 loff_t offset
= vaddr_to_offset(vma
, bp_vaddr
);
2056 uprobe
= find_uprobe(inode
, offset
);
2060 *is_swbp
= is_trap_at_addr(mm
, bp_vaddr
);
2065 if (!uprobe
&& test_and_clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
))
2066 mmf_recalc_uprobes(mm
);
2067 mmap_read_unlock(mm
);
2072 static void handler_chain(struct uprobe
*uprobe
, struct pt_regs
*regs
)
2074 struct uprobe_consumer
*uc
;
2075 int remove
= UPROBE_HANDLER_REMOVE
;
2076 bool need_prep
= false; /* prepare return uprobe, when needed */
2078 down_read(&uprobe
->register_rwsem
);
2079 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
2083 rc
= uc
->handler(uc
, regs
);
2084 WARN(rc
& ~UPROBE_HANDLER_MASK
,
2085 "bad rc=0x%x from %ps()\n", rc
, uc
->handler
);
2088 if (uc
->ret_handler
)
2094 if (need_prep
&& !remove
)
2095 prepare_uretprobe(uprobe
, regs
); /* put bp at return */
2097 if (remove
&& uprobe
->consumers
) {
2098 WARN_ON(!uprobe_is_active(uprobe
));
2099 unapply_uprobe(uprobe
, current
->mm
);
2101 up_read(&uprobe
->register_rwsem
);
2105 handle_uretprobe_chain(struct return_instance
*ri
, struct pt_regs
*regs
)
2107 struct uprobe
*uprobe
= ri
->uprobe
;
2108 struct uprobe_consumer
*uc
;
2110 down_read(&uprobe
->register_rwsem
);
2111 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
2112 if (uc
->ret_handler
)
2113 uc
->ret_handler(uc
, ri
->func
, regs
);
2115 up_read(&uprobe
->register_rwsem
);
2118 static struct return_instance
*find_next_ret_chain(struct return_instance
*ri
)
2123 chained
= ri
->chained
;
2124 ri
= ri
->next
; /* can't be NULL if chained */
2130 static void handle_trampoline(struct pt_regs
*regs
)
2132 struct uprobe_task
*utask
;
2133 struct return_instance
*ri
, *next
;
2136 utask
= current
->utask
;
2140 ri
= utask
->return_instances
;
2146 * We should throw out the frames invalidated by longjmp().
2147 * If this chain is valid, then the next one should be alive
2148 * or NULL; the latter case means that nobody but ri->func
2149 * could hit this trampoline on return. TODO: sigaltstack().
2151 next
= find_next_ret_chain(ri
);
2152 valid
= !next
|| arch_uretprobe_is_alive(next
, RP_CHECK_RET
, regs
);
2154 instruction_pointer_set(regs
, ri
->orig_ret_vaddr
);
2157 handle_uretprobe_chain(ri
, regs
);
2158 ri
= free_ret_instance(ri
);
2160 } while (ri
!= next
);
2163 utask
->return_instances
= ri
;
2167 uprobe_warn(current
, "handle uretprobe, sending SIGILL.");
2172 bool __weak
arch_uprobe_ignore(struct arch_uprobe
*aup
, struct pt_regs
*regs
)
2177 bool __weak
arch_uretprobe_is_alive(struct return_instance
*ret
, enum rp_check ctx
,
2178 struct pt_regs
*regs
)
2184 * Run handler and ask thread to singlestep.
2185 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2187 static void handle_swbp(struct pt_regs
*regs
)
2189 struct uprobe
*uprobe
;
2190 unsigned long bp_vaddr
;
2193 bp_vaddr
= uprobe_get_swbp_addr(regs
);
2194 if (bp_vaddr
== get_trampoline_vaddr())
2195 return handle_trampoline(regs
);
2197 uprobe
= find_active_uprobe(bp_vaddr
, &is_swbp
);
2200 /* No matching uprobe; signal SIGTRAP. */
2204 * Either we raced with uprobe_unregister() or we can't
2205 * access this memory. The latter is only possible if
2206 * another thread plays with our ->mm. In both cases
2207 * we can simply restart. If this vma was unmapped we
2208 * can pretend this insn was not executed yet and get
2209 * the (correct) SIGSEGV after restart.
2211 instruction_pointer_set(regs
, bp_vaddr
);
2216 /* change it in advance for ->handler() and restart */
2217 instruction_pointer_set(regs
, bp_vaddr
);
2220 * TODO: move copy_insn/etc into _register and remove this hack.
2221 * After we hit the bp, _unregister + _register can install the
2222 * new and not-yet-analyzed uprobe at the same address, restart.
2224 if (unlikely(!test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
)))
2228 * Pairs with the smp_wmb() in prepare_uprobe().
2230 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2231 * we must also see the stores to &uprobe->arch performed by the
2232 * prepare_uprobe() call.
2236 /* Tracing handlers use ->utask to communicate with fetch methods */
2240 if (arch_uprobe_ignore(&uprobe
->arch
, regs
))
2243 handler_chain(uprobe
, regs
);
2245 if (arch_uprobe_skip_sstep(&uprobe
->arch
, regs
))
2248 if (!pre_ssout(uprobe
, regs
, bp_vaddr
))
2251 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2257 * Perform required fix-ups and disable singlestep.
2258 * Allow pending signals to take effect.
2260 static void handle_singlestep(struct uprobe_task
*utask
, struct pt_regs
*regs
)
2262 struct uprobe
*uprobe
;
2265 uprobe
= utask
->active_uprobe
;
2266 if (utask
->state
== UTASK_SSTEP_ACK
)
2267 err
= arch_uprobe_post_xol(&uprobe
->arch
, regs
);
2268 else if (utask
->state
== UTASK_SSTEP_TRAPPED
)
2269 arch_uprobe_abort_xol(&uprobe
->arch
, regs
);
2274 utask
->active_uprobe
= NULL
;
2275 utask
->state
= UTASK_RUNNING
;
2276 xol_free_insn_slot(current
);
2278 spin_lock_irq(¤t
->sighand
->siglock
);
2279 recalc_sigpending(); /* see uprobe_deny_signal() */
2280 spin_unlock_irq(¤t
->sighand
->siglock
);
2282 if (unlikely(err
)) {
2283 uprobe_warn(current
, "execute the probed insn, sending SIGILL.");
2289 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2290 * allows the thread to return from interrupt. After that handle_swbp()
2291 * sets utask->active_uprobe.
2293 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2294 * and allows the thread to return from interrupt.
2296 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2297 * uprobe_notify_resume().
2299 void uprobe_notify_resume(struct pt_regs
*regs
)
2301 struct uprobe_task
*utask
;
2303 clear_thread_flag(TIF_UPROBE
);
2305 utask
= current
->utask
;
2306 if (utask
&& utask
->active_uprobe
)
2307 handle_singlestep(utask
, regs
);
2313 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2314 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2316 int uprobe_pre_sstep_notifier(struct pt_regs
*regs
)
2321 if (!test_bit(MMF_HAS_UPROBES
, ¤t
->mm
->flags
) &&
2322 (!current
->utask
|| !current
->utask
->return_instances
))
2325 set_thread_flag(TIF_UPROBE
);
2330 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2331 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2333 int uprobe_post_sstep_notifier(struct pt_regs
*regs
)
2335 struct uprobe_task
*utask
= current
->utask
;
2337 if (!current
->mm
|| !utask
|| !utask
->active_uprobe
)
2338 /* task is currently not uprobed */
2341 utask
->state
= UTASK_SSTEP_ACK
;
2342 set_thread_flag(TIF_UPROBE
);
2346 static struct notifier_block uprobe_exception_nb
= {
2347 .notifier_call
= arch_uprobe_exception_notify
,
2348 .priority
= INT_MAX
-1, /* notified after kprobes, kgdb */
2351 void __init
uprobes_init(void)
2355 for (i
= 0; i
< UPROBES_HASH_SZ
; i
++)
2356 mutex_init(&uprobes_mmap_mutex
[i
]);
2358 BUG_ON(register_die_notifier(&uprobe_exception_nb
));