]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/bpf/offload.c
x86/msr-index: Cleanup bit defines
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / offload.c
CommitLineData
a39e17b2
JK
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
ab3f0063
JK
16#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
19#include <linux/list.h>
20#include <linux/netdevice.h>
21#include <linux/printk.h>
22#include <linux/rtnetlink.h>
23
24/* protected by RTNL */
25static LIST_HEAD(bpf_prog_offload_devs);
26
27int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
28{
29 struct net *net = current->nsproxy->net_ns;
30 struct bpf_dev_offload *offload;
31
649f11dc
JK
32 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
33 attr->prog_type != BPF_PROG_TYPE_XDP)
34 return -EINVAL;
ab3f0063
JK
35
36 if (attr->prog_flags)
37 return -EINVAL;
38
39 offload = kzalloc(sizeof(*offload), GFP_USER);
40 if (!offload)
41 return -ENOMEM;
42
43 offload->prog = prog;
44 init_waitqueue_head(&offload->verifier_done);
45
46 rtnl_lock();
1f6f4cb7 47 offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
ab3f0063
JK
48 if (!offload->netdev) {
49 rtnl_unlock();
50 kfree(offload);
51 return -EINVAL;
52 }
53
54 prog->aux->offload = offload;
55 list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
56 rtnl_unlock();
57
58 return 0;
59}
60
61static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
62 struct netdev_bpf *data)
63{
64 struct net_device *netdev = prog->aux->offload->netdev;
65
66 ASSERT_RTNL();
67
68 if (!netdev)
69 return -ENODEV;
70 if (!netdev->netdev_ops->ndo_bpf)
71 return -EOPNOTSUPP;
72
73 data->command = cmd;
74
75 return netdev->netdev_ops->ndo_bpf(netdev, data);
76}
77
78int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
79{
80 struct netdev_bpf data = {};
81 int err;
82
83 data.verifier.prog = env->prog;
84
85 rtnl_lock();
86 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
87 if (err)
88 goto exit_unlock;
89
90 env->dev_ops = data.verifier.ops;
91
92 env->prog->aux->offload->dev_state = true;
93 env->prog->aux->offload->verifier_running = true;
94exit_unlock:
95 rtnl_unlock();
96 return err;
97}
98
99static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
100{
101 struct bpf_dev_offload *offload = prog->aux->offload;
102 struct netdev_bpf data = {};
103
13a9c48a
JK
104 /* Caution - if netdev is destroyed before the program, this function
105 * will be called twice.
106 */
107
ab3f0063
JK
108 data.offload.prog = prog;
109
110 if (offload->verifier_running)
111 wait_event(offload->verifier_done, !offload->verifier_running);
112
113 if (offload->dev_state)
114 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
115
116 offload->dev_state = false;
117 list_del_init(&offload->offloads);
118 offload->netdev = NULL;
119}
120
121void bpf_prog_offload_destroy(struct bpf_prog *prog)
122{
123 struct bpf_dev_offload *offload = prog->aux->offload;
124
125 offload->verifier_running = false;
126 wake_up(&offload->verifier_done);
127
128 rtnl_lock();
129 __bpf_prog_offload_destroy(prog);
130 rtnl_unlock();
131
132 kfree(offload);
133}
134
135static int bpf_prog_offload_translate(struct bpf_prog *prog)
136{
137 struct bpf_dev_offload *offload = prog->aux->offload;
138 struct netdev_bpf data = {};
139 int ret;
140
141 data.offload.prog = prog;
142
143 offload->verifier_running = false;
144 wake_up(&offload->verifier_done);
145
146 rtnl_lock();
147 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
148 rtnl_unlock();
149
150 return ret;
151}
152
153static unsigned int bpf_prog_warn_on_exec(const void *ctx,
154 const struct bpf_insn *insn)
155{
156 WARN(1, "attempt to execute device eBPF program on the host!");
157 return 0;
158}
159
160int bpf_prog_offload_compile(struct bpf_prog *prog)
161{
162 prog->bpf_func = bpf_prog_warn_on_exec;
163
164 return bpf_prog_offload_translate(prog);
165}
166
167const struct bpf_prog_ops bpf_offload_prog_ops = {
168};
169
170static int bpf_offload_notification(struct notifier_block *notifier,
171 ulong event, void *ptr)
172{
173 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
174 struct bpf_dev_offload *offload, *tmp;
175
176 ASSERT_RTNL();
177
178 switch (event) {
179 case NETDEV_UNREGISTER:
62c71b45
JK
180 /* ignore namespace changes */
181 if (netdev->reg_state != NETREG_UNREGISTERING)
182 break;
183
ab3f0063
JK
184 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
185 offloads) {
186 if (offload->netdev == netdev)
187 __bpf_prog_offload_destroy(offload->prog);
188 }
189 break;
190 default:
191 break;
192 }
193 return NOTIFY_OK;
194}
195
196static struct notifier_block bpf_offload_notifier = {
197 .notifier_call = bpf_offload_notification,
198};
199
200static int __init bpf_offload_init(void)
201{
202 register_netdevice_notifier(&bpf_offload_notifier);
203 return 0;
204}
205
206subsys_initcall(bpf_offload_init);