]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/bpf/helpers.c
Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / helpers.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/bpf.h>
13 #include <linux/rcupdate.h>
14 #include <linux/random.h>
15 #include <linux/smp.h>
16 #include <linux/ktime.h>
17 #include <linux/sched.h>
18 #include <linux/uidgid.h>
19 #include <linux/filter.h>
20
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22 * inside its own verifier_ops->get_func_proto() callback it should return
23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24 *
25 * Different map implementations will rely on rcu in map methods
26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
27 * if program is allowed to access maps, so check rcu_read_lock_held in
28 * all three functions.
29 */
30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
31 {
32 WARN_ON_ONCE(!rcu_read_lock_held());
33 return (unsigned long) map->ops->map_lookup_elem(map, key);
34 }
35
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37 .func = bpf_map_lookup_elem,
38 .gpl_only = false,
39 .pkt_access = true,
40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
41 .arg1_type = ARG_CONST_MAP_PTR,
42 .arg2_type = ARG_PTR_TO_MAP_KEY,
43 };
44
45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
47 {
48 WARN_ON_ONCE(!rcu_read_lock_held());
49 return map->ops->map_update_elem(map, key, value, flags);
50 }
51
52 const struct bpf_func_proto bpf_map_update_elem_proto = {
53 .func = bpf_map_update_elem,
54 .gpl_only = false,
55 .pkt_access = true,
56 .ret_type = RET_INTEGER,
57 .arg1_type = ARG_CONST_MAP_PTR,
58 .arg2_type = ARG_PTR_TO_MAP_KEY,
59 .arg3_type = ARG_PTR_TO_MAP_VALUE,
60 .arg4_type = ARG_ANYTHING,
61 };
62
63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
64 {
65 WARN_ON_ONCE(!rcu_read_lock_held());
66 return map->ops->map_delete_elem(map, key);
67 }
68
69 const struct bpf_func_proto bpf_map_delete_elem_proto = {
70 .func = bpf_map_delete_elem,
71 .gpl_only = false,
72 .pkt_access = true,
73 .ret_type = RET_INTEGER,
74 .arg1_type = ARG_CONST_MAP_PTR,
75 .arg2_type = ARG_PTR_TO_MAP_KEY,
76 };
77
78 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
79 .func = bpf_user_rnd_u32,
80 .gpl_only = false,
81 .ret_type = RET_INTEGER,
82 };
83
84 BPF_CALL_0(bpf_get_smp_processor_id)
85 {
86 return smp_processor_id();
87 }
88
89 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
90 .func = bpf_get_smp_processor_id,
91 .gpl_only = false,
92 .ret_type = RET_INTEGER,
93 };
94
95 BPF_CALL_0(bpf_ktime_get_ns)
96 {
97 /* NMI safe access to clock monotonic */
98 return ktime_get_mono_fast_ns();
99 }
100
101 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
102 .func = bpf_ktime_get_ns,
103 .gpl_only = true,
104 .ret_type = RET_INTEGER,
105 };
106
107 BPF_CALL_0(bpf_get_current_pid_tgid)
108 {
109 struct task_struct *task = current;
110
111 if (unlikely(!task))
112 return -EINVAL;
113
114 return (u64) task->tgid << 32 | task->pid;
115 }
116
117 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
118 .func = bpf_get_current_pid_tgid,
119 .gpl_only = false,
120 .ret_type = RET_INTEGER,
121 };
122
123 BPF_CALL_0(bpf_get_current_uid_gid)
124 {
125 struct task_struct *task = current;
126 kuid_t uid;
127 kgid_t gid;
128
129 if (unlikely(!task))
130 return -EINVAL;
131
132 current_uid_gid(&uid, &gid);
133 return (u64) from_kgid(&init_user_ns, gid) << 32 |
134 from_kuid(&init_user_ns, uid);
135 }
136
137 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
138 .func = bpf_get_current_uid_gid,
139 .gpl_only = false,
140 .ret_type = RET_INTEGER,
141 };
142
143 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
144 {
145 struct task_struct *task = current;
146
147 if (unlikely(!task))
148 goto err_clear;
149
150 strncpy(buf, task->comm, size);
151
152 /* Verifier guarantees that size > 0. For task->comm exceeding
153 * size, guarantee that buf is %NUL-terminated. Unconditionally
154 * done here to save the size test.
155 */
156 buf[size - 1] = 0;
157 return 0;
158 err_clear:
159 memset(buf, 0, size);
160 return -EINVAL;
161 }
162
163 const struct bpf_func_proto bpf_get_current_comm_proto = {
164 .func = bpf_get_current_comm,
165 .gpl_only = false,
166 .ret_type = RET_INTEGER,
167 .arg1_type = ARG_PTR_TO_RAW_STACK,
168 .arg2_type = ARG_CONST_STACK_SIZE,
169 };