]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
nfp: replace -ENOTSUPP with -EOPNOTSUPP
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / netronome / nfp / nfp_net_offload.c
CommitLineData
7533fdc0
JK
1/*
2 * Copyright (C) 2016 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/pci.h>
42#include <linux/jiffies.h>
43#include <linux/timer.h>
44#include <linux/list.h>
45
46#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h>
48#include <net/tc_act/tc_mirred.h>
49
50#include "nfp_bpf.h"
51#include "nfp_net_ctrl.h"
52#include "nfp_net.h"
53
66860beb
JK
54void nfp_net_filter_stats_timer(unsigned long data)
55{
56 struct nfp_net *nn = (void *)data;
57 struct nfp_stat_pair latest;
58
59 spin_lock_bh(&nn->rx_filter_lock);
60
79c12a75 61 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
66860beb
JK
62 mod_timer(&nn->rx_filter_stats_timer,
63 jiffies + NFP_NET_STAT_POLL_IVL);
64
65 spin_unlock_bh(&nn->rx_filter_lock);
66
67 latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
68 latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
69
70 if (latest.pkts != nn->rx_filter.pkts)
71 nn->rx_filter_change = jiffies;
72
73 nn->rx_filter = latest;
74}
75
76static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
77{
78 nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
79 nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
80 nn->rx_filter_prev = nn->rx_filter;
81 nn->rx_filter_change = jiffies;
82}
83
84static int
85nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
86{
87 struct tc_action *a;
88 LIST_HEAD(actions);
89 u64 bytes, pkts;
90
91 pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
92 bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
93 bytes -= pkts * ETH_HLEN;
94
95 nn->rx_filter_prev = nn->rx_filter;
96
97 preempt_disable();
98
99 tcf_exts_to_list(cls_bpf->exts, &actions);
100 list_for_each_entry(a, &actions, list)
101 tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
102
103 preempt_enable();
104
105 return 0;
106}
107
7533fdc0
JK
108static int
109nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
110{
111 const struct tc_action *a;
112 LIST_HEAD(actions);
113
6d677075
JK
114 if (!cls_bpf->exts)
115 return NN_ACT_XDP;
116
7533fdc0 117 /* TC direct action */
e3b8baf0
JK
118 if (cls_bpf->exts_integrated) {
119 if (tc_no_actions(cls_bpf->exts))
120 return NN_ACT_DIRECT;
121
46c50518 122 return -EOPNOTSUPP;
e3b8baf0 123 }
7533fdc0
JK
124
125 /* TC legacy mode */
126 if (!tc_single_action(cls_bpf->exts))
46c50518 127 return -EOPNOTSUPP;
7533fdc0
JK
128
129 tcf_exts_to_list(cls_bpf->exts, &actions);
130 list_for_each_entry(a, &actions, list) {
131 if (is_tcf_gact_shot(a))
132 return NN_ACT_TC_DROP;
2d18421d 133
5724b8b5 134 if (is_tcf_mirred_egress_redirect(a) &&
79c12a75 135 tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
2d18421d 136 return NN_ACT_TC_REDIR;
7533fdc0
JK
137 }
138
46c50518 139 return -EOPNOTSUPP;
7533fdc0
JK
140}
141
142static int
143nfp_net_bpf_offload_prepare(struct nfp_net *nn,
144 struct tc_cls_bpf_offload *cls_bpf,
145 struct nfp_bpf_result *res,
146 void **code, dma_addr_t *dma_addr, u16 max_instr)
147{
148 unsigned int code_sz = max_instr * sizeof(u64);
149 enum nfp_bpf_action_type act;
150 u16 start_off, done_off;
151 unsigned int max_mtu;
152 int ret;
153
b47c62c5 154 if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
46c50518 155 return -EOPNOTSUPP;
b47c62c5 156
7533fdc0
JK
157 ret = nfp_net_bpf_get_act(nn, cls_bpf);
158 if (ret < 0)
159 return ret;
160 act = ret;
161
162 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
79c12a75 163 if (max_mtu < nn->dp.netdev->mtu) {
7533fdc0 164 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
46c50518 165 return -EOPNOTSUPP;
7533fdc0
JK
166 }
167
168 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
169 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
170
79c12a75 171 *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
7533fdc0
JK
172 if (!*code)
173 return -ENOMEM;
174
175 ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
176 max_instr, res);
177 if (ret)
178 goto out;
179
180 return 0;
181
182out:
79c12a75 183 dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
7533fdc0
JK
184 return ret;
185}
186
187static void
188nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
189 void *code, dma_addr_t dma_addr,
190 unsigned int code_sz, unsigned int n_instr,
191 bool dense_mode)
192{
193 u64 bpf_addr = dma_addr;
194 int err;
195
79c12a75 196 nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
7533fdc0
JK
197
198 if (dense_mode)
199 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
200
201 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
202 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
203
204 /* Load up the JITed code */
205 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
206 if (err)
207 nn_err(nn, "FW command error while loading BPF: %d\n", err);
208
209 /* Enable passing packets through BPF function */
79c12a75
JK
210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
211 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
7533fdc0
JK
212 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
213 if (err)
214 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
215
79c12a75 216 dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
66860beb
JK
217
218 nfp_net_bpf_stats_reset(nn);
219 mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
7533fdc0
JK
220}
221
222static int nfp_net_bpf_stop(struct nfp_net *nn)
223{
79c12a75 224 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
7533fdc0
JK
225 return 0;
226
66860beb 227 spin_lock_bh(&nn->rx_filter_lock);
79c12a75 228 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
66860beb 229 spin_unlock_bh(&nn->rx_filter_lock);
79c12a75 230 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
7533fdc0 231
66860beb 232 del_timer_sync(&nn->rx_filter_stats_timer);
79c12a75 233 nn->dp.bpf_offload_skip_sw = 0;
7533fdc0
JK
234
235 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
236}
237
2e9d594d 238int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
7533fdc0
JK
239{
240 struct nfp_bpf_result res;
241 dma_addr_t dma_addr;
242 u16 max_instr;
243 void *code;
244 int err;
245
246 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
247
248 switch (cls_bpf->command) {
249 case TC_CLSBPF_REPLACE:
250 /* There is nothing stopping us from implementing seamless
251 * replace but the simple method of loading I adopted in
252 * the firmware does not handle atomic replace (i.e. we have to
253 * stop the BPF offload and re-enable it). Leaking-in a few
254 * frames which didn't have BPF applied in the hardware should
255 * be fine if software fallback is available, though.
256 */
79c12a75 257 if (nn->dp.bpf_offload_skip_sw)
7533fdc0
JK
258 return -EBUSY;
259
260 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
261 &dma_addr, max_instr);
262 if (err)
263 return err;
264
265 nfp_net_bpf_stop(nn);
266 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
267 dma_addr, max_instr * sizeof(u64),
268 res.n_instr, res.dense_mode);
269 return 0;
270
271 case TC_CLSBPF_ADD:
79c12a75 272 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
7533fdc0
JK
273 return -EBUSY;
274
275 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
276 &dma_addr, max_instr);
277 if (err)
278 return err;
279
280 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
281 dma_addr, max_instr * sizeof(u64),
282 res.n_instr, res.dense_mode);
283 return 0;
284
285 case TC_CLSBPF_DESTROY:
286 return nfp_net_bpf_stop(nn);
287
66860beb
JK
288 case TC_CLSBPF_STATS:
289 return nfp_net_bpf_stats_update(nn, cls_bpf);
290
7533fdc0 291 default:
46c50518 292 return -EOPNOTSUPP;
7533fdc0
JK
293 }
294}