]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/netronome/nfp/flower/offload.c
Merge tag 'mac80211-next-for-davem-2018-03-29' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46
47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49 TCPHDR_PSH | TCPHDR_URG)
50
51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52 (FLOW_DIS_IS_FRAGMENT | \
53 FLOW_DIS_FIRST_FRAG)
54
55 #define NFP_FLOWER_WHITELIST_DISSECTOR \
56 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60 BIT(FLOW_DISSECTOR_KEY_TCP) | \
61 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
70 BIT(FLOW_DISSECTOR_KEY_IP))
71
72 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
73 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
74 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
75 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
76 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
77 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
78
79 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
80 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
81 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
82 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
83
84 static int
85 nfp_flower_xmit_flow(struct net_device *netdev,
86 struct nfp_fl_payload *nfp_flow, u8 mtype)
87 {
88 u32 meta_len, key_len, mask_len, act_len, tot_len;
89 struct nfp_repr *priv = netdev_priv(netdev);
90 struct sk_buff *skb;
91 unsigned char *msg;
92
93 meta_len = sizeof(struct nfp_fl_rule_metadata);
94 key_len = nfp_flow->meta.key_len;
95 mask_len = nfp_flow->meta.mask_len;
96 act_len = nfp_flow->meta.act_len;
97
98 tot_len = meta_len + key_len + mask_len + act_len;
99
100 /* Convert to long words as firmware expects
101 * lengths in units of NFP_FL_LW_SIZ.
102 */
103 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
104 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
105 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
106
107 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
108 if (!skb)
109 return -ENOMEM;
110
111 msg = nfp_flower_cmsg_get_data(skb);
112 memcpy(msg, &nfp_flow->meta, meta_len);
113 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
114 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
115 memcpy(&msg[meta_len + key_len + mask_len],
116 nfp_flow->action_data, act_len);
117
118 /* Convert back to bytes as software expects
119 * lengths in units of bytes.
120 */
121 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
122 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
123 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
124
125 nfp_ctrl_tx(priv->app->ctrl, skb);
126
127 return 0;
128 }
129
130 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
131 {
132 return dissector_uses_key(f->dissector,
133 FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
134 dissector_uses_key(f->dissector,
135 FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
136 dissector_uses_key(f->dissector,
137 FLOW_DISSECTOR_KEY_PORTS) ||
138 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
139 }
140
141 static int
142 nfp_flower_calculate_key_layers(struct nfp_app *app,
143 struct nfp_fl_key_ls *ret_key_ls,
144 struct tc_cls_flower_offload *flow,
145 bool egress,
146 enum nfp_flower_tun_type *tun_type)
147 {
148 struct flow_dissector_key_basic *mask_basic = NULL;
149 struct flow_dissector_key_basic *key_basic = NULL;
150 struct nfp_flower_priv *priv = app->priv;
151 u32 key_layer_two;
152 u8 key_layer;
153 int key_size;
154
155 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
156 return -EOPNOTSUPP;
157
158 /* If any tun dissector is used then the required set must be used. */
159 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
160 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
161 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
162 return -EOPNOTSUPP;
163
164 key_layer_two = 0;
165 key_layer = NFP_FLOWER_LAYER_PORT;
166 key_size = sizeof(struct nfp_flower_meta_tci) +
167 sizeof(struct nfp_flower_in_port);
168
169 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
170 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
171 key_layer |= NFP_FLOWER_LAYER_MAC;
172 key_size += sizeof(struct nfp_flower_mac_mpls);
173 }
174
175 if (dissector_uses_key(flow->dissector,
176 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
177 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
178 struct flow_dissector_key_ports *mask_enc_ports = NULL;
179 struct flow_dissector_key_ports *enc_ports = NULL;
180 struct flow_dissector_key_control *mask_enc_ctl =
181 skb_flow_dissector_target(flow->dissector,
182 FLOW_DISSECTOR_KEY_ENC_CONTROL,
183 flow->mask);
184 struct flow_dissector_key_control *enc_ctl =
185 skb_flow_dissector_target(flow->dissector,
186 FLOW_DISSECTOR_KEY_ENC_CONTROL,
187 flow->key);
188 if (!egress)
189 return -EOPNOTSUPP;
190
191 if (mask_enc_ctl->addr_type != 0xffff ||
192 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
193 return -EOPNOTSUPP;
194
195 /* These fields are already verified as used. */
196 mask_ipv4 =
197 skb_flow_dissector_target(flow->dissector,
198 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
199 flow->mask);
200 if (mask_ipv4->dst != cpu_to_be32(~0))
201 return -EOPNOTSUPP;
202
203 mask_enc_ports =
204 skb_flow_dissector_target(flow->dissector,
205 FLOW_DISSECTOR_KEY_ENC_PORTS,
206 flow->mask);
207 enc_ports =
208 skb_flow_dissector_target(flow->dissector,
209 FLOW_DISSECTOR_KEY_ENC_PORTS,
210 flow->key);
211
212 if (mask_enc_ports->dst != cpu_to_be16(~0))
213 return -EOPNOTSUPP;
214
215 switch (enc_ports->dst) {
216 case htons(NFP_FL_VXLAN_PORT):
217 *tun_type = NFP_FL_TUNNEL_VXLAN;
218 key_layer |= NFP_FLOWER_LAYER_VXLAN;
219 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
220 break;
221 case htons(NFP_FL_GENEVE_PORT):
222 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
223 return -EOPNOTSUPP;
224 *tun_type = NFP_FL_TUNNEL_GENEVE;
225 key_layer |= NFP_FLOWER_LAYER_EXT_META;
226 key_size += sizeof(struct nfp_flower_ext_meta);
227 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
228 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
229 break;
230 default:
231 return -EOPNOTSUPP;
232 }
233 } else if (egress) {
234 /* Reject non tunnel matches offloaded to egress repr. */
235 return -EOPNOTSUPP;
236 }
237
238 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
239 mask_basic = skb_flow_dissector_target(flow->dissector,
240 FLOW_DISSECTOR_KEY_BASIC,
241 flow->mask);
242
243 key_basic = skb_flow_dissector_target(flow->dissector,
244 FLOW_DISSECTOR_KEY_BASIC,
245 flow->key);
246 }
247
248 if (mask_basic && mask_basic->n_proto) {
249 /* Ethernet type is present in the key. */
250 switch (key_basic->n_proto) {
251 case cpu_to_be16(ETH_P_IP):
252 key_layer |= NFP_FLOWER_LAYER_IPV4;
253 key_size += sizeof(struct nfp_flower_ipv4);
254 break;
255
256 case cpu_to_be16(ETH_P_IPV6):
257 key_layer |= NFP_FLOWER_LAYER_IPV6;
258 key_size += sizeof(struct nfp_flower_ipv6);
259 break;
260
261 /* Currently we do not offload ARP
262 * because we rely on it to get to the host.
263 */
264 case cpu_to_be16(ETH_P_ARP):
265 return -EOPNOTSUPP;
266
267 /* Will be included in layer 2. */
268 case cpu_to_be16(ETH_P_8021Q):
269 break;
270
271 default:
272 /* Other ethtype - we need check the masks for the
273 * remainder of the key to ensure we can offload.
274 */
275 if (nfp_flower_check_higher_than_mac(flow))
276 return -EOPNOTSUPP;
277 break;
278 }
279 }
280
281 if (mask_basic && mask_basic->ip_proto) {
282 /* Ethernet type is present in the key. */
283 switch (key_basic->ip_proto) {
284 case IPPROTO_TCP:
285 case IPPROTO_UDP:
286 case IPPROTO_SCTP:
287 case IPPROTO_ICMP:
288 case IPPROTO_ICMPV6:
289 key_layer |= NFP_FLOWER_LAYER_TP;
290 key_size += sizeof(struct nfp_flower_tp_ports);
291 break;
292 default:
293 /* Other ip proto - we need check the masks for the
294 * remainder of the key to ensure we can offload.
295 */
296 return -EOPNOTSUPP;
297 }
298 }
299
300 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
301 struct flow_dissector_key_tcp *tcp;
302 u32 tcp_flags;
303
304 tcp = skb_flow_dissector_target(flow->dissector,
305 FLOW_DISSECTOR_KEY_TCP,
306 flow->key);
307 tcp_flags = be16_to_cpu(tcp->flags);
308
309 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
310 return -EOPNOTSUPP;
311
312 /* We only support PSH and URG flags when either
313 * FIN, SYN or RST is present as well.
314 */
315 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
316 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
317 return -EOPNOTSUPP;
318
319 /* We need to store TCP flags in the IPv4 key space, thus
320 * we need to ensure we include a IPv4 key layer if we have
321 * not done so already.
322 */
323 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
324 key_layer |= NFP_FLOWER_LAYER_IPV4;
325 key_size += sizeof(struct nfp_flower_ipv4);
326 }
327 }
328
329 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
330 struct flow_dissector_key_control *key_ctl;
331
332 key_ctl = skb_flow_dissector_target(flow->dissector,
333 FLOW_DISSECTOR_KEY_CONTROL,
334 flow->key);
335
336 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
337 return -EOPNOTSUPP;
338 }
339
340 ret_key_ls->key_layer = key_layer;
341 ret_key_ls->key_layer_two = key_layer_two;
342 ret_key_ls->key_size = key_size;
343
344 return 0;
345 }
346
347 static struct nfp_fl_payload *
348 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
349 {
350 struct nfp_fl_payload *flow_pay;
351
352 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
353 if (!flow_pay)
354 return NULL;
355
356 flow_pay->meta.key_len = key_layer->key_size;
357 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
358 if (!flow_pay->unmasked_data)
359 goto err_free_flow;
360
361 flow_pay->meta.mask_len = key_layer->key_size;
362 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
363 if (!flow_pay->mask_data)
364 goto err_free_unmasked;
365
366 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
367 if (!flow_pay->action_data)
368 goto err_free_mask;
369
370 flow_pay->nfp_tun_ipv4_addr = 0;
371 flow_pay->meta.flags = 0;
372 spin_lock_init(&flow_pay->lock);
373
374 return flow_pay;
375
376 err_free_mask:
377 kfree(flow_pay->mask_data);
378 err_free_unmasked:
379 kfree(flow_pay->unmasked_data);
380 err_free_flow:
381 kfree(flow_pay);
382 return NULL;
383 }
384
385 /**
386 * nfp_flower_add_offload() - Adds a new flow to hardware.
387 * @app: Pointer to the APP handle
388 * @netdev: netdev structure.
389 * @flow: TC flower classifier offload structure.
390 * @egress: NFP netdev is the egress.
391 *
392 * Adds a new flow to the repeated hash structure and action payload.
393 *
394 * Return: negative value on error, 0 if configured successfully.
395 */
396 static int
397 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
398 struct tc_cls_flower_offload *flow, bool egress)
399 {
400 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
401 struct nfp_port *port = nfp_port_from_netdev(netdev);
402 struct nfp_flower_priv *priv = app->priv;
403 struct nfp_fl_payload *flow_pay;
404 struct nfp_fl_key_ls *key_layer;
405 int err;
406
407 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
408 if (!key_layer)
409 return -ENOMEM;
410
411 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
412 &tun_type);
413 if (err)
414 goto err_free_key_ls;
415
416 flow_pay = nfp_flower_allocate_new(key_layer);
417 if (!flow_pay) {
418 err = -ENOMEM;
419 goto err_free_key_ls;
420 }
421
422 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
423 tun_type);
424 if (err)
425 goto err_destroy_flow;
426
427 err = nfp_flower_compile_action(flow, netdev, flow_pay);
428 if (err)
429 goto err_destroy_flow;
430
431 err = nfp_compile_flow_metadata(app, flow, flow_pay);
432 if (err)
433 goto err_destroy_flow;
434
435 err = nfp_flower_xmit_flow(netdev, flow_pay,
436 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
437 if (err)
438 goto err_destroy_flow;
439
440 INIT_HLIST_NODE(&flow_pay->link);
441 flow_pay->tc_flower_cookie = flow->cookie;
442 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
443 port->tc_offload_cnt++;
444
445 /* Deallocate flow payload when flower rule has been destroyed. */
446 kfree(key_layer);
447
448 return 0;
449
450 err_destroy_flow:
451 kfree(flow_pay->action_data);
452 kfree(flow_pay->mask_data);
453 kfree(flow_pay->unmasked_data);
454 kfree(flow_pay);
455 err_free_key_ls:
456 kfree(key_layer);
457 return err;
458 }
459
460 /**
461 * nfp_flower_del_offload() - Removes a flow from hardware.
462 * @app: Pointer to the APP handle
463 * @netdev: netdev structure.
464 * @flow: TC flower classifier offload structure
465 *
466 * Removes a flow from the repeated hash structure and clears the
467 * action payload.
468 *
469 * Return: negative value on error, 0 if removed successfully.
470 */
471 static int
472 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
473 struct tc_cls_flower_offload *flow)
474 {
475 struct nfp_port *port = nfp_port_from_netdev(netdev);
476 struct nfp_fl_payload *nfp_flow;
477 int err;
478
479 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
480 if (!nfp_flow)
481 return -ENOENT;
482
483 err = nfp_modify_flow_metadata(app, nfp_flow);
484 if (err)
485 goto err_free_flow;
486
487 if (nfp_flow->nfp_tun_ipv4_addr)
488 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
489
490 err = nfp_flower_xmit_flow(netdev, nfp_flow,
491 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
492 if (err)
493 goto err_free_flow;
494
495 err_free_flow:
496 hash_del_rcu(&nfp_flow->link);
497 port->tc_offload_cnt--;
498 kfree(nfp_flow->action_data);
499 kfree(nfp_flow->mask_data);
500 kfree(nfp_flow->unmasked_data);
501 kfree_rcu(nfp_flow, rcu);
502 return err;
503 }
504
505 /**
506 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
507 * @app: Pointer to the APP handle
508 * @flow: TC flower classifier offload structure
509 *
510 * Populates a flow statistics structure which which corresponds to a
511 * specific flow.
512 *
513 * Return: negative value on error, 0 if stats populated successfully.
514 */
515 static int
516 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
517 {
518 struct nfp_fl_payload *nfp_flow;
519
520 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
521 if (!nfp_flow)
522 return -EINVAL;
523
524 spin_lock_bh(&nfp_flow->lock);
525 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
526 nfp_flow->stats.pkts, nfp_flow->stats.used);
527
528 nfp_flow->stats.pkts = 0;
529 nfp_flow->stats.bytes = 0;
530 spin_unlock_bh(&nfp_flow->lock);
531
532 return 0;
533 }
534
535 static int
536 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
537 struct tc_cls_flower_offload *flower, bool egress)
538 {
539 if (!eth_proto_is_802_3(flower->common.protocol))
540 return -EOPNOTSUPP;
541
542 switch (flower->command) {
543 case TC_CLSFLOWER_REPLACE:
544 return nfp_flower_add_offload(app, netdev, flower, egress);
545 case TC_CLSFLOWER_DESTROY:
546 return nfp_flower_del_offload(app, netdev, flower);
547 case TC_CLSFLOWER_STATS:
548 return nfp_flower_get_stats(app, flower);
549 }
550
551 return -EOPNOTSUPP;
552 }
553
554 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
555 void *cb_priv)
556 {
557 struct nfp_repr *repr = cb_priv;
558
559 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
560 return -EOPNOTSUPP;
561
562 switch (type) {
563 case TC_SETUP_CLSFLOWER:
564 return nfp_flower_repr_offload(repr->app, repr->netdev,
565 type_data, true);
566 default:
567 return -EOPNOTSUPP;
568 }
569 }
570
571 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
572 void *type_data, void *cb_priv)
573 {
574 struct nfp_repr *repr = cb_priv;
575
576 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
577 return -EOPNOTSUPP;
578
579 switch (type) {
580 case TC_SETUP_CLSFLOWER:
581 return nfp_flower_repr_offload(repr->app, repr->netdev,
582 type_data, false);
583 default:
584 return -EOPNOTSUPP;
585 }
586 }
587
588 static int nfp_flower_setup_tc_block(struct net_device *netdev,
589 struct tc_block_offload *f)
590 {
591 struct nfp_repr *repr = netdev_priv(netdev);
592
593 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
594 return -EOPNOTSUPP;
595
596 switch (f->command) {
597 case TC_BLOCK_BIND:
598 return tcf_block_cb_register(f->block,
599 nfp_flower_setup_tc_block_cb,
600 repr, repr);
601 case TC_BLOCK_UNBIND:
602 tcf_block_cb_unregister(f->block,
603 nfp_flower_setup_tc_block_cb,
604 repr);
605 return 0;
606 default:
607 return -EOPNOTSUPP;
608 }
609 }
610
611 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
612 enum tc_setup_type type, void *type_data)
613 {
614 switch (type) {
615 case TC_SETUP_BLOCK:
616 return nfp_flower_setup_tc_block(netdev, type_data);
617 default:
618 return -EOPNOTSUPP;
619 }
620 }