]>
Commit | Line | Data |
---|---|---|
2541517c | 1 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 2 | * Copyright (c) 2016 Facebook |
2541517c AS |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/bpf.h> | |
0515e599 | 12 | #include <linux/bpf_perf_event.h> |
2541517c AS |
13 | #include <linux/filter.h> |
14 | #include <linux/uaccess.h> | |
9c959c86 | 15 | #include <linux/ctype.h> |
2541517c AS |
16 | #include "trace.h" |
17 | ||
2541517c AS |
18 | /** |
19 | * trace_call_bpf - invoke BPF program | |
20 | * @prog: BPF program | |
21 | * @ctx: opaque context pointer | |
22 | * | |
23 | * kprobe handlers execute BPF programs via this helper. | |
24 | * Can be used from static tracepoints in the future. | |
25 | * | |
26 | * Return: BPF programs always return an integer which is interpreted by | |
27 | * kprobe handler as: | |
28 | * 0 - return from kprobe (event is filtered out) | |
29 | * 1 - store kprobe event into ring buffer | |
30 | * Other values are reserved and currently alias to 1 | |
31 | */ | |
32 | unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) | |
33 | { | |
34 | unsigned int ret; | |
35 | ||
36 | if (in_nmi()) /* not supported yet */ | |
37 | return 1; | |
38 | ||
39 | preempt_disable(); | |
40 | ||
41 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
42 | /* | |
43 | * since some bpf program is already running on this cpu, | |
44 | * don't call into another bpf program (same or different) | |
45 | * and don't send kprobe event into ring-buffer, | |
46 | * so return zero here | |
47 | */ | |
48 | ret = 0; | |
49 | goto out; | |
50 | } | |
51 | ||
52 | rcu_read_lock(); | |
53 | ret = BPF_PROG_RUN(prog, ctx); | |
54 | rcu_read_unlock(); | |
55 | ||
56 | out: | |
57 | __this_cpu_dec(bpf_prog_active); | |
58 | preempt_enable(); | |
59 | ||
60 | return ret; | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(trace_call_bpf); | |
63 | ||
64 | static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |
65 | { | |
66 | void *dst = (void *) (long) r1; | |
074f528e | 67 | int ret, size = (int) r2; |
2541517c AS |
68 | void *unsafe_ptr = (void *) (long) r3; |
69 | ||
074f528e DB |
70 | ret = probe_kernel_read(dst, unsafe_ptr, size); |
71 | if (unlikely(ret < 0)) | |
72 | memset(dst, 0, size); | |
73 | ||
74 | return ret; | |
2541517c AS |
75 | } |
76 | ||
77 | static const struct bpf_func_proto bpf_probe_read_proto = { | |
78 | .func = bpf_probe_read, | |
79 | .gpl_only = true, | |
80 | .ret_type = RET_INTEGER, | |
074f528e | 81 | .arg1_type = ARG_PTR_TO_RAW_STACK, |
2541517c AS |
82 | .arg2_type = ARG_CONST_STACK_SIZE, |
83 | .arg3_type = ARG_ANYTHING, | |
84 | }; | |
85 | ||
96ae5227 SD |
86 | static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
87 | { | |
88 | void *unsafe_ptr = (void *) (long) r1; | |
89 | void *src = (void *) (long) r2; | |
90 | int size = (int) r3; | |
91 | ||
92 | /* | |
93 | * Ensure we're in user context which is safe for the helper to | |
94 | * run. This helper has no business in a kthread. | |
95 | * | |
96 | * access_ok() should prevent writing to non-user memory, but in | |
97 | * some situations (nommu, temporary switch, etc) access_ok() does | |
98 | * not provide enough validation, hence the check on KERNEL_DS. | |
99 | */ | |
100 | ||
101 | if (unlikely(in_interrupt() || | |
102 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
103 | return -EPERM; | |
104 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) | |
105 | return -EPERM; | |
106 | if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) | |
107 | return -EPERM; | |
108 | ||
109 | return probe_kernel_write(unsafe_ptr, src, size); | |
110 | } | |
111 | ||
112 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
113 | .func = bpf_probe_write_user, | |
114 | .gpl_only = true, | |
115 | .ret_type = RET_INTEGER, | |
116 | .arg1_type = ARG_ANYTHING, | |
117 | .arg2_type = ARG_PTR_TO_STACK, | |
118 | .arg3_type = ARG_CONST_STACK_SIZE, | |
119 | }; | |
120 | ||
121 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
122 | { | |
123 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", | |
124 | current->comm, task_pid_nr(current)); | |
125 | ||
126 | return &bpf_probe_write_user_proto; | |
127 | } | |
128 | ||
9c959c86 AS |
129 | /* |
130 | * limited trace_printk() | |
8d3b7dce | 131 | * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed |
9c959c86 AS |
132 | */ |
133 | static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) | |
134 | { | |
135 | char *fmt = (char *) (long) r1; | |
8d3b7dce | 136 | bool str_seen = false; |
9c959c86 AS |
137 | int mod[3] = {}; |
138 | int fmt_cnt = 0; | |
8d3b7dce AS |
139 | u64 unsafe_addr; |
140 | char buf[64]; | |
9c959c86 AS |
141 | int i; |
142 | ||
143 | /* | |
144 | * bpf_check()->check_func_arg()->check_stack_boundary() | |
145 | * guarantees that fmt points to bpf program stack, | |
146 | * fmt_size bytes of it were initialized and fmt_size > 0 | |
147 | */ | |
148 | if (fmt[--fmt_size] != 0) | |
149 | return -EINVAL; | |
150 | ||
151 | /* check format string for allowed specifiers */ | |
152 | for (i = 0; i < fmt_size; i++) { | |
153 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | |
154 | return -EINVAL; | |
155 | ||
156 | if (fmt[i] != '%') | |
157 | continue; | |
158 | ||
159 | if (fmt_cnt >= 3) | |
160 | return -EINVAL; | |
161 | ||
162 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | |
163 | i++; | |
164 | if (fmt[i] == 'l') { | |
165 | mod[fmt_cnt]++; | |
166 | i++; | |
8d3b7dce | 167 | } else if (fmt[i] == 'p' || fmt[i] == 's') { |
9c959c86 AS |
168 | mod[fmt_cnt]++; |
169 | i++; | |
170 | if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) | |
171 | return -EINVAL; | |
172 | fmt_cnt++; | |
8d3b7dce AS |
173 | if (fmt[i - 1] == 's') { |
174 | if (str_seen) | |
175 | /* allow only one '%s' per fmt string */ | |
176 | return -EINVAL; | |
177 | str_seen = true; | |
178 | ||
179 | switch (fmt_cnt) { | |
180 | case 1: | |
181 | unsafe_addr = r3; | |
182 | r3 = (long) buf; | |
183 | break; | |
184 | case 2: | |
185 | unsafe_addr = r4; | |
186 | r4 = (long) buf; | |
187 | break; | |
188 | case 3: | |
189 | unsafe_addr = r5; | |
190 | r5 = (long) buf; | |
191 | break; | |
192 | } | |
193 | buf[0] = 0; | |
194 | strncpy_from_unsafe(buf, | |
195 | (void *) (long) unsafe_addr, | |
196 | sizeof(buf)); | |
197 | } | |
9c959c86 AS |
198 | continue; |
199 | } | |
200 | ||
201 | if (fmt[i] == 'l') { | |
202 | mod[fmt_cnt]++; | |
203 | i++; | |
204 | } | |
205 | ||
206 | if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') | |
207 | return -EINVAL; | |
208 | fmt_cnt++; | |
209 | } | |
210 | ||
211 | return __trace_printk(1/* fake ip will not be printed */, fmt, | |
212 | mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, | |
213 | mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, | |
214 | mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); | |
215 | } | |
216 | ||
217 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
218 | .func = bpf_trace_printk, | |
219 | .gpl_only = true, | |
220 | .ret_type = RET_INTEGER, | |
221 | .arg1_type = ARG_PTR_TO_STACK, | |
222 | .arg2_type = ARG_CONST_STACK_SIZE, | |
223 | }; | |
224 | ||
0756ea3e AS |
225 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
226 | { | |
227 | /* | |
228 | * this program might be calling bpf_trace_printk, | |
229 | * so allocate per-cpu printk buffers | |
230 | */ | |
231 | trace_printk_init_buffers(); | |
232 | ||
233 | return &bpf_trace_printk_proto; | |
234 | } | |
235 | ||
6816a7ff | 236 | static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5) |
35578d79 KX |
237 | { |
238 | struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; | |
239 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
6816a7ff DB |
240 | unsigned int cpu = smp_processor_id(); |
241 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 242 | struct bpf_event_entry *ee; |
35578d79 KX |
243 | struct perf_event *event; |
244 | ||
6816a7ff DB |
245 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
246 | return -EINVAL; | |
247 | if (index == BPF_F_CURRENT_CPU) | |
248 | index = cpu; | |
35578d79 KX |
249 | if (unlikely(index >= array->map.max_entries)) |
250 | return -E2BIG; | |
251 | ||
3b1efb19 | 252 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 253 | if (!ee) |
35578d79 KX |
254 | return -ENOENT; |
255 | ||
3b1efb19 | 256 | event = ee->event; |
1ca1cc98 DB |
257 | if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && |
258 | event->attr.type != PERF_TYPE_RAW)) | |
259 | return -EINVAL; | |
260 | ||
62544ce8 | 261 | /* make sure event is local and doesn't have pmu::count */ |
6816a7ff | 262 | if (unlikely(event->oncpu != cpu || event->pmu->count)) |
62544ce8 AS |
263 | return -EINVAL; |
264 | ||
35578d79 KX |
265 | /* |
266 | * we don't know if the function is run successfully by the | |
267 | * return value. It can be judged in other places, such as | |
268 | * eBPF programs. | |
269 | */ | |
270 | return perf_event_read_local(event); | |
271 | } | |
272 | ||
62544ce8 | 273 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 274 | .func = bpf_perf_event_read, |
1075ef59 | 275 | .gpl_only = true, |
35578d79 KX |
276 | .ret_type = RET_INTEGER, |
277 | .arg1_type = ARG_CONST_MAP_PTR, | |
278 | .arg2_type = ARG_ANYTHING, | |
279 | }; | |
280 | ||
8e7a3920 DB |
281 | static __always_inline u64 |
282 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283 | u64 flags, struct perf_raw_record *raw) | |
a43eec30 | 284 | { |
a43eec30 | 285 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 286 | unsigned int cpu = smp_processor_id(); |
1e33759c | 287 | u64 index = flags & BPF_F_INDEX_MASK; |
a43eec30 | 288 | struct perf_sample_data sample_data; |
3b1efb19 | 289 | struct bpf_event_entry *ee; |
a43eec30 | 290 | struct perf_event *event; |
a43eec30 | 291 | |
1e33759c | 292 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 293 | index = cpu; |
a43eec30 AS |
294 | if (unlikely(index >= array->map.max_entries)) |
295 | return -E2BIG; | |
296 | ||
3b1efb19 | 297 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 298 | if (!ee) |
a43eec30 AS |
299 | return -ENOENT; |
300 | ||
3b1efb19 | 301 | event = ee->event; |
a43eec30 AS |
302 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
303 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
304 | return -EINVAL; | |
305 | ||
d7931330 | 306 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
307 | return -EOPNOTSUPP; |
308 | ||
309 | perf_sample_data_init(&sample_data, 0, 0); | |
8e7a3920 | 310 | sample_data.raw = raw; |
a43eec30 AS |
311 | perf_event_output(event, &sample_data, regs); |
312 | return 0; | |
313 | } | |
314 | ||
8e7a3920 DB |
315 | static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) |
316 | { | |
317 | struct pt_regs *regs = (struct pt_regs *)(long) r1; | |
318 | struct bpf_map *map = (struct bpf_map *)(long) r2; | |
319 | void *data = (void *)(long) r4; | |
320 | struct perf_raw_record raw = { | |
321 | .frag = { | |
322 | .size = size, | |
323 | .data = data, | |
324 | }, | |
325 | }; | |
326 | ||
327 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | |
328 | return -EINVAL; | |
329 | ||
330 | return __bpf_perf_event_output(regs, map, flags, &raw); | |
331 | } | |
332 | ||
a43eec30 AS |
333 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
334 | .func = bpf_perf_event_output, | |
1075ef59 | 335 | .gpl_only = true, |
a43eec30 AS |
336 | .ret_type = RET_INTEGER, |
337 | .arg1_type = ARG_PTR_TO_CTX, | |
338 | .arg2_type = ARG_CONST_MAP_PTR, | |
339 | .arg3_type = ARG_ANYTHING, | |
340 | .arg4_type = ARG_PTR_TO_STACK, | |
341 | .arg5_type = ARG_CONST_STACK_SIZE, | |
342 | }; | |
343 | ||
bd570ff9 DB |
344 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
345 | ||
555c8a86 DB |
346 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
347 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 DB |
348 | { |
349 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); | |
555c8a86 DB |
350 | struct perf_raw_frag frag = { |
351 | .copy = ctx_copy, | |
352 | .size = ctx_size, | |
353 | .data = ctx, | |
354 | }; | |
355 | struct perf_raw_record raw = { | |
356 | .frag = { | |
183fc153 AM |
357 | { |
358 | .next = ctx_size ? &frag : NULL, | |
359 | }, | |
555c8a86 DB |
360 | .size = meta_size, |
361 | .data = meta, | |
362 | }, | |
363 | }; | |
bd570ff9 DB |
364 | |
365 | perf_fetch_caller_regs(regs); | |
366 | ||
555c8a86 | 367 | return __bpf_perf_event_output(regs, map, flags, &raw); |
bd570ff9 DB |
368 | } |
369 | ||
606274c5 AS |
370 | static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
371 | { | |
372 | return (long) current; | |
373 | } | |
374 | ||
375 | static const struct bpf_func_proto bpf_get_current_task_proto = { | |
376 | .func = bpf_get_current_task, | |
377 | .gpl_only = true, | |
378 | .ret_type = RET_INTEGER, | |
379 | }; | |
380 | ||
60d20f91 SD |
381 | static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
382 | { | |
383 | struct bpf_map *map = (struct bpf_map *)(long)r1; | |
384 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
385 | struct cgroup *cgrp; | |
386 | u32 idx = (u32)r2; | |
387 | ||
388 | if (unlikely(in_interrupt())) | |
389 | return -EINVAL; | |
390 | ||
391 | if (unlikely(idx >= array->map.max_entries)) | |
392 | return -E2BIG; | |
393 | ||
394 | cgrp = READ_ONCE(array->ptrs[idx]); | |
395 | if (unlikely(!cgrp)) | |
396 | return -EAGAIN; | |
397 | ||
398 | return task_under_cgroup_hierarchy(current, cgrp); | |
399 | } | |
400 | ||
401 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
402 | .func = bpf_current_task_under_cgroup, | |
403 | .gpl_only = false, | |
404 | .ret_type = RET_INTEGER, | |
405 | .arg1_type = ARG_CONST_MAP_PTR, | |
406 | .arg2_type = ARG_ANYTHING, | |
407 | }; | |
408 | ||
9fd82b61 | 409 | static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) |
2541517c AS |
410 | { |
411 | switch (func_id) { | |
412 | case BPF_FUNC_map_lookup_elem: | |
413 | return &bpf_map_lookup_elem_proto; | |
414 | case BPF_FUNC_map_update_elem: | |
415 | return &bpf_map_update_elem_proto; | |
416 | case BPF_FUNC_map_delete_elem: | |
417 | return &bpf_map_delete_elem_proto; | |
418 | case BPF_FUNC_probe_read: | |
419 | return &bpf_probe_read_proto; | |
d9847d31 AS |
420 | case BPF_FUNC_ktime_get_ns: |
421 | return &bpf_ktime_get_ns_proto; | |
04fd61ab AS |
422 | case BPF_FUNC_tail_call: |
423 | return &bpf_tail_call_proto; | |
ffeedafb AS |
424 | case BPF_FUNC_get_current_pid_tgid: |
425 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
426 | case BPF_FUNC_get_current_task: |
427 | return &bpf_get_current_task_proto; | |
ffeedafb AS |
428 | case BPF_FUNC_get_current_uid_gid: |
429 | return &bpf_get_current_uid_gid_proto; | |
430 | case BPF_FUNC_get_current_comm: | |
431 | return &bpf_get_current_comm_proto; | |
9c959c86 | 432 | case BPF_FUNC_trace_printk: |
0756ea3e | 433 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
434 | case BPF_FUNC_get_smp_processor_id: |
435 | return &bpf_get_smp_processor_id_proto; | |
35578d79 KX |
436 | case BPF_FUNC_perf_event_read: |
437 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
438 | case BPF_FUNC_probe_write_user: |
439 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
440 | case BPF_FUNC_current_task_under_cgroup: |
441 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
442 | case BPF_FUNC_get_prandom_u32: |
443 | return &bpf_get_prandom_u32_proto; | |
9fd82b61 AS |
444 | default: |
445 | return NULL; | |
446 | } | |
447 | } | |
448 | ||
449 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) | |
450 | { | |
451 | switch (func_id) { | |
a43eec30 AS |
452 | case BPF_FUNC_perf_event_output: |
453 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
454 | case BPF_FUNC_get_stackid: |
455 | return &bpf_get_stackid_proto; | |
2541517c | 456 | default: |
9fd82b61 | 457 | return tracing_func_proto(func_id); |
2541517c AS |
458 | } |
459 | } | |
460 | ||
461 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 AS |
462 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
463 | enum bpf_reg_type *reg_type) | |
2541517c | 464 | { |
2541517c AS |
465 | if (off < 0 || off >= sizeof(struct pt_regs)) |
466 | return false; | |
2541517c AS |
467 | if (type != BPF_READ) |
468 | return false; | |
2541517c AS |
469 | if (off % size != 0) |
470 | return false; | |
2541517c AS |
471 | return true; |
472 | } | |
473 | ||
27dff4e0 | 474 | static const struct bpf_verifier_ops kprobe_prog_ops = { |
2541517c AS |
475 | .get_func_proto = kprobe_prog_func_proto, |
476 | .is_valid_access = kprobe_prog_is_valid_access, | |
477 | }; | |
478 | ||
479 | static struct bpf_prog_type_list kprobe_tl = { | |
480 | .ops = &kprobe_prog_ops, | |
481 | .type = BPF_PROG_TYPE_KPROBE, | |
482 | }; | |
483 | ||
9940d67c AS |
484 | static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) |
485 | { | |
486 | /* | |
487 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
488 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
489 | * from there and call the same bpf_perf_event_output() helper | |
490 | */ | |
266a0a79 | 491 | u64 ctx = *(long *)(uintptr_t)r1; |
9940d67c AS |
492 | |
493 | return bpf_perf_event_output(ctx, r2, index, r4, size); | |
494 | } | |
495 | ||
496 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
497 | .func = bpf_perf_event_output_tp, | |
498 | .gpl_only = true, | |
499 | .ret_type = RET_INTEGER, | |
500 | .arg1_type = ARG_PTR_TO_CTX, | |
501 | .arg2_type = ARG_CONST_MAP_PTR, | |
502 | .arg3_type = ARG_ANYTHING, | |
503 | .arg4_type = ARG_PTR_TO_STACK, | |
504 | .arg5_type = ARG_CONST_STACK_SIZE, | |
505 | }; | |
506 | ||
507 | static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |
508 | { | |
266a0a79 | 509 | u64 ctx = *(long *)(uintptr_t)r1; |
9940d67c AS |
510 | |
511 | return bpf_get_stackid(ctx, r2, r3, r4, r5); | |
512 | } | |
513 | ||
514 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
515 | .func = bpf_get_stackid_tp, | |
516 | .gpl_only = true, | |
517 | .ret_type = RET_INTEGER, | |
518 | .arg1_type = ARG_PTR_TO_CTX, | |
519 | .arg2_type = ARG_CONST_MAP_PTR, | |
520 | .arg3_type = ARG_ANYTHING, | |
521 | }; | |
522 | ||
9fd82b61 AS |
523 | static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) |
524 | { | |
525 | switch (func_id) { | |
526 | case BPF_FUNC_perf_event_output: | |
9940d67c | 527 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 528 | case BPF_FUNC_get_stackid: |
9940d67c | 529 | return &bpf_get_stackid_proto_tp; |
9fd82b61 AS |
530 | default: |
531 | return tracing_func_proto(func_id); | |
532 | } | |
533 | } | |
534 | ||
19de99f7 AS |
535 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
536 | enum bpf_reg_type *reg_type) | |
9fd82b61 AS |
537 | { |
538 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
539 | return false; | |
540 | if (type != BPF_READ) | |
541 | return false; | |
542 | if (off % size != 0) | |
543 | return false; | |
544 | return true; | |
545 | } | |
546 | ||
547 | static const struct bpf_verifier_ops tracepoint_prog_ops = { | |
548 | .get_func_proto = tp_prog_func_proto, | |
549 | .is_valid_access = tp_prog_is_valid_access, | |
550 | }; | |
551 | ||
552 | static struct bpf_prog_type_list tracepoint_tl = { | |
553 | .ops = &tracepoint_prog_ops, | |
554 | .type = BPF_PROG_TYPE_TRACEPOINT, | |
555 | }; | |
556 | ||
0515e599 AS |
557 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
558 | enum bpf_reg_type *reg_type) | |
559 | { | |
560 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) | |
561 | return false; | |
562 | if (type != BPF_READ) | |
563 | return false; | |
564 | if (off % size != 0) | |
565 | return false; | |
566 | if (off == offsetof(struct bpf_perf_event_data, sample_period)) { | |
567 | if (size != sizeof(u64)) | |
568 | return false; | |
569 | } else { | |
570 | if (size != sizeof(long)) | |
571 | return false; | |
572 | } | |
573 | return true; | |
574 | } | |
575 | ||
576 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg, | |
577 | int src_reg, int ctx_off, | |
578 | struct bpf_insn *insn_buf, | |
579 | struct bpf_prog *prog) | |
580 | { | |
581 | struct bpf_insn *insn = insn_buf; | |
582 | ||
583 | switch (ctx_off) { | |
584 | case offsetof(struct bpf_perf_event_data, sample_period): | |
585 | BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64)); | |
f035a515 DB |
586 | |
587 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
588 | data), dst_reg, src_reg, | |
0515e599 AS |
589 | offsetof(struct bpf_perf_event_data_kern, data)); |
590 | *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg, | |
591 | offsetof(struct perf_sample_data, period)); | |
592 | break; | |
593 | default: | |
f035a515 DB |
594 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
595 | regs), dst_reg, src_reg, | |
0515e599 | 596 | offsetof(struct bpf_perf_event_data_kern, regs)); |
f035a515 | 597 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off); |
0515e599 AS |
598 | break; |
599 | } | |
600 | ||
601 | return insn - insn_buf; | |
602 | } | |
603 | ||
604 | static const struct bpf_verifier_ops perf_event_prog_ops = { | |
605 | .get_func_proto = tp_prog_func_proto, | |
606 | .is_valid_access = pe_prog_is_valid_access, | |
607 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
608 | }; | |
609 | ||
610 | static struct bpf_prog_type_list perf_event_tl = { | |
611 | .ops = &perf_event_prog_ops, | |
612 | .type = BPF_PROG_TYPE_PERF_EVENT, | |
613 | }; | |
614 | ||
2541517c AS |
615 | static int __init register_kprobe_prog_ops(void) |
616 | { | |
617 | bpf_register_prog_type(&kprobe_tl); | |
9fd82b61 | 618 | bpf_register_prog_type(&tracepoint_tl); |
0515e599 | 619 | bpf_register_prog_type(&perf_event_tl); |
2541517c AS |
620 | return 0; |
621 | } | |
622 | late_initcall(register_kprobe_prog_ops); |