]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/kcov.c
netfilter: ingress: translate 0 nf_hook_slow retval to -1
[mirror_ubuntu-artful-kernel.git] / kernel / kcov.c
1 #define pr_fmt(fmt) "kcov: " fmt
2
3 #define DISABLE_BRANCH_PROFILING
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6 #include <linux/file.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/printk.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/vmalloc.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/kcov.h>
16
17 /*
18 * kcov descriptor (one per opened debugfs file).
19 * State transitions of the descriptor:
20 * - initial state after open()
21 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
22 * - then, mmap() call (several calls are allowed but not useful)
23 * - then, repeated enable/disable for a task (only one task a time allowed)
24 */
25 struct kcov {
26 /*
27 * Reference counter. We keep one for:
28 * - opened file descriptor
29 * - task with enabled coverage (we can't unwire it from another task)
30 */
31 atomic_t refcount;
32 /* The lock protects mode, size, area and t. */
33 spinlock_t lock;
34 enum kcov_mode mode;
35 /* Size of arena (in long's for KCOV_MODE_TRACE). */
36 unsigned size;
37 /* Coverage buffer shared with user space. */
38 void *area;
39 /* Task for which we collect coverage, or NULL. */
40 struct task_struct *t;
41 };
42
43 /*
44 * Entry point from instrumented code.
45 * This is called once per basic-block/edge.
46 */
47 void notrace __sanitizer_cov_trace_pc(void)
48 {
49 struct task_struct *t;
50 enum kcov_mode mode;
51
52 t = current;
53 /*
54 * We are interested in code coverage as a function of a syscall inputs,
55 * so we ignore code executed in interrupts.
56 * The checks for whether we are in an interrupt are open-coded, because
57 * 1. We can't use in_interrupt() here, since it also returns true
58 * when we are inside local_bh_disable() section.
59 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
60 * since that leads to slower generated code (three separate tests,
61 * one for each of the flags).
62 */
63 if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
64 | NMI_MASK)))
65 return;
66 mode = READ_ONCE(t->kcov_mode);
67 if (mode == KCOV_MODE_TRACE) {
68 unsigned long *area;
69 unsigned long pos;
70
71 /*
72 * There is some code that runs in interrupts but for which
73 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
74 * READ_ONCE()/barrier() effectively provides load-acquire wrt
75 * interrupts, there are paired barrier()/WRITE_ONCE() in
76 * kcov_ioctl_locked().
77 */
78 barrier();
79 area = t->kcov_area;
80 /* The first word is number of subsequent PCs. */
81 pos = READ_ONCE(area[0]) + 1;
82 if (likely(pos < t->kcov_size)) {
83 area[pos] = _RET_IP_;
84 WRITE_ONCE(area[0], pos);
85 }
86 }
87 }
88 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
89
90 static void kcov_get(struct kcov *kcov)
91 {
92 atomic_inc(&kcov->refcount);
93 }
94
95 static void kcov_put(struct kcov *kcov)
96 {
97 if (atomic_dec_and_test(&kcov->refcount)) {
98 vfree(kcov->area);
99 kfree(kcov);
100 }
101 }
102
103 void kcov_task_init(struct task_struct *t)
104 {
105 t->kcov_mode = KCOV_MODE_DISABLED;
106 t->kcov_size = 0;
107 t->kcov_area = NULL;
108 t->kcov = NULL;
109 }
110
111 void kcov_task_exit(struct task_struct *t)
112 {
113 struct kcov *kcov;
114
115 kcov = t->kcov;
116 if (kcov == NULL)
117 return;
118 spin_lock(&kcov->lock);
119 if (WARN_ON(kcov->t != t)) {
120 spin_unlock(&kcov->lock);
121 return;
122 }
123 /* Just to not leave dangling references behind. */
124 kcov_task_init(t);
125 kcov->t = NULL;
126 spin_unlock(&kcov->lock);
127 kcov_put(kcov);
128 }
129
130 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
131 {
132 int res = 0;
133 void *area;
134 struct kcov *kcov = vma->vm_file->private_data;
135 unsigned long size, off;
136 struct page *page;
137
138 area = vmalloc_user(vma->vm_end - vma->vm_start);
139 if (!area)
140 return -ENOMEM;
141
142 spin_lock(&kcov->lock);
143 size = kcov->size * sizeof(unsigned long);
144 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
145 vma->vm_end - vma->vm_start != size) {
146 res = -EINVAL;
147 goto exit;
148 }
149 if (!kcov->area) {
150 kcov->area = area;
151 vma->vm_flags |= VM_DONTEXPAND;
152 spin_unlock(&kcov->lock);
153 for (off = 0; off < size; off += PAGE_SIZE) {
154 page = vmalloc_to_page(kcov->area + off);
155 if (vm_insert_page(vma, vma->vm_start + off, page))
156 WARN_ONCE(1, "vm_insert_page() failed");
157 }
158 return 0;
159 }
160 exit:
161 spin_unlock(&kcov->lock);
162 vfree(area);
163 return res;
164 }
165
166 static int kcov_open(struct inode *inode, struct file *filep)
167 {
168 struct kcov *kcov;
169
170 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
171 if (!kcov)
172 return -ENOMEM;
173 atomic_set(&kcov->refcount, 1);
174 spin_lock_init(&kcov->lock);
175 filep->private_data = kcov;
176 return nonseekable_open(inode, filep);
177 }
178
179 static int kcov_close(struct inode *inode, struct file *filep)
180 {
181 kcov_put(filep->private_data);
182 return 0;
183 }
184
185 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
186 unsigned long arg)
187 {
188 struct task_struct *t;
189 unsigned long size, unused;
190
191 switch (cmd) {
192 case KCOV_INIT_TRACE:
193 /*
194 * Enable kcov in trace mode and setup buffer size.
195 * Must happen before anything else.
196 */
197 if (kcov->mode != KCOV_MODE_DISABLED)
198 return -EBUSY;
199 /*
200 * Size must be at least 2 to hold current position and one PC.
201 * Later we allocate size * sizeof(unsigned long) memory,
202 * that must not overflow.
203 */
204 size = arg;
205 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
206 return -EINVAL;
207 kcov->size = size;
208 kcov->mode = KCOV_MODE_TRACE;
209 return 0;
210 case KCOV_ENABLE:
211 /*
212 * Enable coverage for the current task.
213 * At this point user must have been enabled trace mode,
214 * and mmapped the file. Coverage collection is disabled only
215 * at task exit or voluntary by KCOV_DISABLE. After that it can
216 * be enabled for another task.
217 */
218 unused = arg;
219 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
220 kcov->area == NULL)
221 return -EINVAL;
222 if (kcov->t != NULL)
223 return -EBUSY;
224 t = current;
225 /* Cache in task struct for performance. */
226 t->kcov_size = kcov->size;
227 t->kcov_area = kcov->area;
228 /* See comment in __sanitizer_cov_trace_pc(). */
229 barrier();
230 WRITE_ONCE(t->kcov_mode, kcov->mode);
231 t->kcov = kcov;
232 kcov->t = t;
233 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
234 kcov_get(kcov);
235 return 0;
236 case KCOV_DISABLE:
237 /* Disable coverage for the current task. */
238 unused = arg;
239 if (unused != 0 || current->kcov != kcov)
240 return -EINVAL;
241 t = current;
242 if (WARN_ON(kcov->t != t))
243 return -EINVAL;
244 kcov_task_init(t);
245 kcov->t = NULL;
246 kcov_put(kcov);
247 return 0;
248 default:
249 return -ENOTTY;
250 }
251 }
252
253 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
254 {
255 struct kcov *kcov;
256 int res;
257
258 kcov = filep->private_data;
259 spin_lock(&kcov->lock);
260 res = kcov_ioctl_locked(kcov, cmd, arg);
261 spin_unlock(&kcov->lock);
262 return res;
263 }
264
265 static const struct file_operations kcov_fops = {
266 .open = kcov_open,
267 .unlocked_ioctl = kcov_ioctl,
268 .mmap = kcov_mmap,
269 .release = kcov_close,
270 };
271
272 static int __init kcov_init(void)
273 {
274 /*
275 * The kcov debugfs file won't ever get removed and thus,
276 * there is no need to protect it against removal races. The
277 * use of debugfs_create_file_unsafe() is actually safe here.
278 */
279 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
280 pr_err("failed to create kcov in debugfs\n");
281 return -ENOMEM;
282 }
283 return 0;
284 }
285
286 device_initcall(kcov_init);