]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/tap/tap_flow.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / tap / tap_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
4 */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23 * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24 * avoid sending TC messages the kernel cannot understand.
25 */
26 enum {
27 TCA_FLOWER_UNSPEC,
28 TCA_FLOWER_CLASSID,
29 TCA_FLOWER_INDEV,
30 TCA_FLOWER_ACT,
31 TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
32 TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
33 TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
34 TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
35 TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
36 TCA_FLOWER_KEY_IP_PROTO, /* u8 */
37 TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
38 TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
39 TCA_FLOWER_KEY_IPV4_DST, /* be32 */
40 TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
41 TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
42 TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
43 TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
44 TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
45 TCA_FLOWER_KEY_TCP_SRC, /* be16 */
46 TCA_FLOWER_KEY_TCP_DST, /* be16 */
47 TCA_FLOWER_KEY_UDP_SRC, /* be16 */
48 TCA_FLOWER_KEY_UDP_DST, /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53 /* TCA_FLOWER_FLAGS, */
54 TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55 TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
56 TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
57 };
58 #endif
59 /*
60 * For kernels < 4.2 BPF related enums may not be defined.
61 * Runtime checks will be carried out to gracefully report on TC messages that
62 * are rejected by the kernel. Rejection reasons may be due to:
63 * 1. enum is not defined
64 * 2. enum is defined but kernel is not configured to support BPF system calls,
65 * BPF classifications or BPF actions.
66 */
67 #ifndef HAVE_TC_BPF
68 enum {
69 TCA_BPF_UNSPEC,
70 TCA_BPF_ACT,
71 TCA_BPF_POLICE,
72 TCA_BPF_CLASSID,
73 TCA_BPF_OPS_LEN,
74 TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79 TCA_BPF_FD = TCA_BPF_OPS + 1,
80 TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85 __u32 index; \
86 __u32 capab; \
87 int action; \
88 int refcnt; \
89 int bindcnt
90
91 struct tc_act_bpf {
92 tc_gen;
93 };
94
95 enum {
96 TCA_ACT_BPF_UNSPEC,
97 TCA_ACT_BPF_TM,
98 TCA_ACT_BPF_PARMS,
99 TCA_ACT_BPF_OPS_LEN,
100 TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106 TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107 TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113 KEY_CMD_GET = 1,
114 KEY_CMD_RELEASE,
115 KEY_CMD_INIT,
116 KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120 KEY_STAT_UNSPEC,
121 KEY_STAT_USED,
122 KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126 #define REMOTE_PROMISCUOUS_HANDLE 2
127
128 struct rte_flow {
129 LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
130 struct rte_flow *remote_flow; /* associated remote flow */
131 int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
132 uint32_t key_idx; /* RSS rule key index into BPF map */
133 struct nlmsg msg;
134 };
135
136 struct convert_data {
137 uint16_t eth_type;
138 uint16_t ip_proto;
139 uint8_t vlan;
140 struct rte_flow *flow;
141 };
142
143 struct remote_rule {
144 struct rte_flow_attr attr;
145 struct rte_flow_item items[2];
146 struct rte_flow_action actions[2];
147 int mirred;
148 };
149
150 struct action_data {
151 char id[16];
152
153 union {
154 struct tc_gact gact;
155 struct tc_mirred mirred;
156 struct skbedit {
157 struct tc_skbedit skbedit;
158 uint16_t queue;
159 } skbedit;
160 struct bpf {
161 struct tc_act_bpf bpf;
162 int bpf_fd;
163 const char *annotation;
164 } bpf;
165 };
166 };
167
168 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
173 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
174 static int
175 tap_flow_validate(struct rte_eth_dev *dev,
176 const struct rte_flow_attr *attr,
177 const struct rte_flow_item items[],
178 const struct rte_flow_action actions[],
179 struct rte_flow_error *error);
180
181 static struct rte_flow *
182 tap_flow_create(struct rte_eth_dev *dev,
183 const struct rte_flow_attr *attr,
184 const struct rte_flow_item items[],
185 const struct rte_flow_action actions[],
186 struct rte_flow_error *error);
187
188 static void
189 tap_flow_free(struct pmd_internals *pmd,
190 struct rte_flow *flow);
191
192 static int
193 tap_flow_destroy(struct rte_eth_dev *dev,
194 struct rte_flow *flow,
195 struct rte_flow_error *error);
196
197 static int
198 tap_flow_isolate(struct rte_eth_dev *dev,
199 int set,
200 struct rte_flow_error *error);
201
202 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
203 static int rss_enable(struct pmd_internals *pmd,
204 const struct rte_flow_attr *attr,
205 struct rte_flow_error *error);
206 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
207 const struct rte_flow_action_rss *rss,
208 struct rte_flow_error *error);
209
210 static const struct rte_flow_ops tap_flow_ops = {
211 .validate = tap_flow_validate,
212 .create = tap_flow_create,
213 .destroy = tap_flow_destroy,
214 .flush = tap_flow_flush,
215 .isolate = tap_flow_isolate,
216 };
217
218 /* Static initializer for items. */
219 #define ITEMS(...) \
220 (const enum rte_flow_item_type []){ \
221 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
222 }
223
224 /* Structure to generate a simple graph of layers supported by the NIC. */
225 struct tap_flow_items {
226 /* Bit-mask corresponding to what is supported for this item. */
227 const void *mask;
228 const unsigned int mask_sz; /* Bit-mask size in bytes. */
229 /*
230 * Bit-mask corresponding to the default mask, if none is provided
231 * along with the item.
232 */
233 const void *default_mask;
234 /**
235 * Conversion function from rte_flow to netlink attributes.
236 *
237 * @param item
238 * rte_flow item to convert.
239 * @param data
240 * Internal structure to store the conversion.
241 *
242 * @return
243 * 0 on success, negative value otherwise.
244 */
245 int (*convert)(const struct rte_flow_item *item, void *data);
246 /** List of possible following items. */
247 const enum rte_flow_item_type *const items;
248 };
249
250 /* Graph of supported items and associated actions. */
251 static const struct tap_flow_items tap_flow_items[] = {
252 [RTE_FLOW_ITEM_TYPE_END] = {
253 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
254 },
255 [RTE_FLOW_ITEM_TYPE_ETH] = {
256 .items = ITEMS(
257 RTE_FLOW_ITEM_TYPE_VLAN,
258 RTE_FLOW_ITEM_TYPE_IPV4,
259 RTE_FLOW_ITEM_TYPE_IPV6),
260 .mask = &(const struct rte_flow_item_eth){
261 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
263 .type = -1,
264 },
265 .mask_sz = sizeof(struct rte_flow_item_eth),
266 .default_mask = &rte_flow_item_eth_mask,
267 .convert = tap_flow_create_eth,
268 },
269 [RTE_FLOW_ITEM_TYPE_VLAN] = {
270 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
271 RTE_FLOW_ITEM_TYPE_IPV6),
272 .mask = &(const struct rte_flow_item_vlan){
273 /* DEI matching is not supported */
274 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
275 .tci = 0xffef,
276 #else
277 .tci = 0xefff,
278 #endif
279 .inner_type = -1,
280 },
281 .mask_sz = sizeof(struct rte_flow_item_vlan),
282 .default_mask = &rte_flow_item_vlan_mask,
283 .convert = tap_flow_create_vlan,
284 },
285 [RTE_FLOW_ITEM_TYPE_IPV4] = {
286 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
287 RTE_FLOW_ITEM_TYPE_TCP),
288 .mask = &(const struct rte_flow_item_ipv4){
289 .hdr = {
290 .src_addr = -1,
291 .dst_addr = -1,
292 .next_proto_id = -1,
293 },
294 },
295 .mask_sz = sizeof(struct rte_flow_item_ipv4),
296 .default_mask = &rte_flow_item_ipv4_mask,
297 .convert = tap_flow_create_ipv4,
298 },
299 [RTE_FLOW_ITEM_TYPE_IPV6] = {
300 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
301 RTE_FLOW_ITEM_TYPE_TCP),
302 .mask = &(const struct rte_flow_item_ipv6){
303 .hdr = {
304 .src_addr = {
305 "\xff\xff\xff\xff\xff\xff\xff\xff"
306 "\xff\xff\xff\xff\xff\xff\xff\xff",
307 },
308 .dst_addr = {
309 "\xff\xff\xff\xff\xff\xff\xff\xff"
310 "\xff\xff\xff\xff\xff\xff\xff\xff",
311 },
312 .proto = -1,
313 },
314 },
315 .mask_sz = sizeof(struct rte_flow_item_ipv6),
316 .default_mask = &rte_flow_item_ipv6_mask,
317 .convert = tap_flow_create_ipv6,
318 },
319 [RTE_FLOW_ITEM_TYPE_UDP] = {
320 .mask = &(const struct rte_flow_item_udp){
321 .hdr = {
322 .src_port = -1,
323 .dst_port = -1,
324 },
325 },
326 .mask_sz = sizeof(struct rte_flow_item_udp),
327 .default_mask = &rte_flow_item_udp_mask,
328 .convert = tap_flow_create_udp,
329 },
330 [RTE_FLOW_ITEM_TYPE_TCP] = {
331 .mask = &(const struct rte_flow_item_tcp){
332 .hdr = {
333 .src_port = -1,
334 .dst_port = -1,
335 },
336 },
337 .mask_sz = sizeof(struct rte_flow_item_tcp),
338 .default_mask = &rte_flow_item_tcp_mask,
339 .convert = tap_flow_create_tcp,
340 },
341 };
342
343 /*
344 * TC rules, by growing priority
345 *
346 * Remote netdevice Tap netdevice
347 * +-------------+-------------+ +-------------+-------------+
348 * | Ingress | Egress | | Ingress | Egress |
349 * |-------------|-------------| |-------------|-------------|
350 * | | \ / | | | REMOTE TX | prio 1
351 * | | \ / | | | \ / | prio 2
352 * | EXPLICIT | \ / | | EXPLICIT | \ / | .
353 * | | \ / | | | \ / | .
354 * | RULES | X | | RULES | X | .
355 * | . | / \ | | . | / \ | .
356 * | . | / \ | | . | / \ | .
357 * | . | / \ | | . | / \ | .
358 * | . | / \ | | . | / \ | .
359 *
360 * .... .... .... ....
361 *
362 * | . | \ / | | . | \ / | .
363 * | . | \ / | | . | \ / | .
364 * | | \ / | | | \ / |
365 * | LOCAL_MAC | \ / | | \ / | \ / | last prio - 5
366 * | PROMISC | X | | \ / | X | last prio - 4
367 * | ALLMULTI | / \ | | X | / \ | last prio - 3
368 * | BROADCAST | / \ | | / \ | / \ | last prio - 2
369 * | BROADCASTV6 | / \ | | / \ | / \ | last prio - 1
370 * | xx | / \ | | ISOLATE | / \ | last prio
371 * +-------------+-------------+ +-------------+-------------+
372 *
373 * The implicit flow rules are stored in a list in with mandatorily the last two
374 * being the ISOLATE and REMOTE_TX rules. e.g.:
375 *
376 * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
377 *
378 * That enables tap_flow_isolate() to remove implicit rules by popping the list
379 * head and remove it as long as it applies on the remote netdevice. The
380 * implicit rule for TX redirection is not removed, as isolate concerns only
381 * incoming traffic.
382 */
383
384 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
385 [TAP_REMOTE_LOCAL_MAC] = {
386 .attr = {
387 .group = MAX_GROUP,
388 .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
389 .ingress = 1,
390 },
391 .items[0] = {
392 .type = RTE_FLOW_ITEM_TYPE_ETH,
393 .mask = &(const struct rte_flow_item_eth){
394 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
395 },
396 },
397 .items[1] = {
398 .type = RTE_FLOW_ITEM_TYPE_END,
399 },
400 .mirred = TCA_EGRESS_REDIR,
401 },
402 [TAP_REMOTE_BROADCAST] = {
403 .attr = {
404 .group = MAX_GROUP,
405 .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
406 .ingress = 1,
407 },
408 .items[0] = {
409 .type = RTE_FLOW_ITEM_TYPE_ETH,
410 .mask = &(const struct rte_flow_item_eth){
411 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
412 },
413 .spec = &(const struct rte_flow_item_eth){
414 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
415 },
416 },
417 .items[1] = {
418 .type = RTE_FLOW_ITEM_TYPE_END,
419 },
420 .mirred = TCA_EGRESS_MIRROR,
421 },
422 [TAP_REMOTE_BROADCASTV6] = {
423 .attr = {
424 .group = MAX_GROUP,
425 .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
426 .ingress = 1,
427 },
428 .items[0] = {
429 .type = RTE_FLOW_ITEM_TYPE_ETH,
430 .mask = &(const struct rte_flow_item_eth){
431 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
432 },
433 .spec = &(const struct rte_flow_item_eth){
434 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
435 },
436 },
437 .items[1] = {
438 .type = RTE_FLOW_ITEM_TYPE_END,
439 },
440 .mirred = TCA_EGRESS_MIRROR,
441 },
442 [TAP_REMOTE_PROMISC] = {
443 .attr = {
444 .group = MAX_GROUP,
445 .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
446 .ingress = 1,
447 },
448 .items[0] = {
449 .type = RTE_FLOW_ITEM_TYPE_VOID,
450 },
451 .items[1] = {
452 .type = RTE_FLOW_ITEM_TYPE_END,
453 },
454 .mirred = TCA_EGRESS_MIRROR,
455 },
456 [TAP_REMOTE_ALLMULTI] = {
457 .attr = {
458 .group = MAX_GROUP,
459 .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
460 .ingress = 1,
461 },
462 .items[0] = {
463 .type = RTE_FLOW_ITEM_TYPE_ETH,
464 .mask = &(const struct rte_flow_item_eth){
465 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
466 },
467 .spec = &(const struct rte_flow_item_eth){
468 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
469 },
470 },
471 .items[1] = {
472 .type = RTE_FLOW_ITEM_TYPE_END,
473 },
474 .mirred = TCA_EGRESS_MIRROR,
475 },
476 [TAP_REMOTE_TX] = {
477 .attr = {
478 .group = 0,
479 .priority = TAP_REMOTE_TX,
480 .egress = 1,
481 },
482 .items[0] = {
483 .type = RTE_FLOW_ITEM_TYPE_VOID,
484 },
485 .items[1] = {
486 .type = RTE_FLOW_ITEM_TYPE_END,
487 },
488 .mirred = TCA_EGRESS_MIRROR,
489 },
490 [TAP_ISOLATE] = {
491 .attr = {
492 .group = MAX_GROUP,
493 .priority = PRIORITY_MASK - TAP_ISOLATE,
494 .ingress = 1,
495 },
496 .items[0] = {
497 .type = RTE_FLOW_ITEM_TYPE_VOID,
498 },
499 .items[1] = {
500 .type = RTE_FLOW_ITEM_TYPE_END,
501 },
502 },
503 };
504
505 /**
506 * Make as much checks as possible on an Ethernet item, and if a flow is
507 * provided, fill it appropriately with Ethernet info.
508 *
509 * @param[in] item
510 * Item specification.
511 * @param[in, out] data
512 * Additional data structure to tell next layers we've been here.
513 *
514 * @return
515 * 0 if checks are alright, -1 otherwise.
516 */
517 static int
518 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
519 {
520 struct convert_data *info = (struct convert_data *)data;
521 const struct rte_flow_item_eth *spec = item->spec;
522 const struct rte_flow_item_eth *mask = item->mask;
523 struct rte_flow *flow = info->flow;
524 struct nlmsg *msg;
525
526 /* use default mask if none provided */
527 if (!mask)
528 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
529 /* TC does not support eth_type masking. Only accept if exact match. */
530 if (mask->type && mask->type != 0xffff)
531 return -1;
532 if (!spec)
533 return 0;
534 /* store eth_type for consistency if ipv4/6 pattern item comes next */
535 if (spec->type & mask->type)
536 info->eth_type = spec->type;
537 if (!flow)
538 return 0;
539 msg = &flow->msg;
540 if (!is_zero_ether_addr(&mask->dst)) {
541 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
542 &spec->dst.addr_bytes);
543 tap_nlattr_add(&msg->nh,
544 TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
545 &mask->dst.addr_bytes);
546 }
547 if (!is_zero_ether_addr(&mask->src)) {
548 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
549 &spec->src.addr_bytes);
550 tap_nlattr_add(&msg->nh,
551 TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
552 &mask->src.addr_bytes);
553 }
554 return 0;
555 }
556
557 /**
558 * Make as much checks as possible on a VLAN item, and if a flow is provided,
559 * fill it appropriately with VLAN info.
560 *
561 * @param[in] item
562 * Item specification.
563 * @param[in, out] data
564 * Additional data structure to tell next layers we've been here.
565 *
566 * @return
567 * 0 if checks are alright, -1 otherwise.
568 */
569 static int
570 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
571 {
572 struct convert_data *info = (struct convert_data *)data;
573 const struct rte_flow_item_vlan *spec = item->spec;
574 const struct rte_flow_item_vlan *mask = item->mask;
575 struct rte_flow *flow = info->flow;
576 struct nlmsg *msg;
577
578 /* use default mask if none provided */
579 if (!mask)
580 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
581 /* Outer TPID cannot be matched. */
582 if (info->eth_type)
583 return -1;
584 /* Double-tagging not supported. */
585 if (info->vlan)
586 return -1;
587 info->vlan = 1;
588 if (mask->inner_type) {
589 /* TC does not support partial eth_type masking */
590 if (mask->inner_type != RTE_BE16(0xffff))
591 return -1;
592 info->eth_type = spec->inner_type;
593 }
594 if (!flow)
595 return 0;
596 msg = &flow->msg;
597 msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
598 #define VLAN_PRIO(tci) ((tci) >> 13)
599 #define VLAN_ID(tci) ((tci) & 0xfff)
600 if (!spec)
601 return 0;
602 if (spec->tci) {
603 uint16_t tci = ntohs(spec->tci) & mask->tci;
604 uint16_t prio = VLAN_PRIO(tci);
605 uint8_t vid = VLAN_ID(tci);
606
607 if (prio)
608 tap_nlattr_add8(&msg->nh,
609 TCA_FLOWER_KEY_VLAN_PRIO, prio);
610 if (vid)
611 tap_nlattr_add16(&msg->nh,
612 TCA_FLOWER_KEY_VLAN_ID, vid);
613 }
614 return 0;
615 }
616
617 /**
618 * Make as much checks as possible on an IPv4 item, and if a flow is provided,
619 * fill it appropriately with IPv4 info.
620 *
621 * @param[in] item
622 * Item specification.
623 * @param[in, out] data
624 * Additional data structure to tell next layers we've been here.
625 *
626 * @return
627 * 0 if checks are alright, -1 otherwise.
628 */
629 static int
630 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
631 {
632 struct convert_data *info = (struct convert_data *)data;
633 const struct rte_flow_item_ipv4 *spec = item->spec;
634 const struct rte_flow_item_ipv4 *mask = item->mask;
635 struct rte_flow *flow = info->flow;
636 struct nlmsg *msg;
637
638 /* use default mask if none provided */
639 if (!mask)
640 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
641 /* check that previous eth type is compatible with ipv4 */
642 if (info->eth_type && info->eth_type != htons(ETH_P_IP))
643 return -1;
644 /* store ip_proto for consistency if udp/tcp pattern item comes next */
645 if (spec)
646 info->ip_proto = spec->hdr.next_proto_id;
647 if (!flow)
648 return 0;
649 msg = &flow->msg;
650 if (!info->eth_type)
651 info->eth_type = htons(ETH_P_IP);
652 if (!spec)
653 return 0;
654 if (mask->hdr.dst_addr) {
655 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
656 spec->hdr.dst_addr);
657 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
658 mask->hdr.dst_addr);
659 }
660 if (mask->hdr.src_addr) {
661 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
662 spec->hdr.src_addr);
663 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
664 mask->hdr.src_addr);
665 }
666 if (spec->hdr.next_proto_id)
667 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
668 spec->hdr.next_proto_id);
669 return 0;
670 }
671
672 /**
673 * Make as much checks as possible on an IPv6 item, and if a flow is provided,
674 * fill it appropriately with IPv6 info.
675 *
676 * @param[in] item
677 * Item specification.
678 * @param[in, out] data
679 * Additional data structure to tell next layers we've been here.
680 *
681 * @return
682 * 0 if checks are alright, -1 otherwise.
683 */
684 static int
685 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
686 {
687 struct convert_data *info = (struct convert_data *)data;
688 const struct rte_flow_item_ipv6 *spec = item->spec;
689 const struct rte_flow_item_ipv6 *mask = item->mask;
690 struct rte_flow *flow = info->flow;
691 uint8_t empty_addr[16] = { 0 };
692 struct nlmsg *msg;
693
694 /* use default mask if none provided */
695 if (!mask)
696 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
697 /* check that previous eth type is compatible with ipv6 */
698 if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
699 return -1;
700 /* store ip_proto for consistency if udp/tcp pattern item comes next */
701 if (spec)
702 info->ip_proto = spec->hdr.proto;
703 if (!flow)
704 return 0;
705 msg = &flow->msg;
706 if (!info->eth_type)
707 info->eth_type = htons(ETH_P_IPV6);
708 if (!spec)
709 return 0;
710 if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
711 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
712 sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
713 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
714 sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
715 }
716 if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
717 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
718 sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
719 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
720 sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
721 }
722 if (spec->hdr.proto)
723 tap_nlattr_add8(&msg->nh,
724 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
725 return 0;
726 }
727
728 /**
729 * Make as much checks as possible on a UDP item, and if a flow is provided,
730 * fill it appropriately with UDP info.
731 *
732 * @param[in] item
733 * Item specification.
734 * @param[in, out] data
735 * Additional data structure to tell next layers we've been here.
736 *
737 * @return
738 * 0 if checks are alright, -1 otherwise.
739 */
740 static int
741 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
742 {
743 struct convert_data *info = (struct convert_data *)data;
744 const struct rte_flow_item_udp *spec = item->spec;
745 const struct rte_flow_item_udp *mask = item->mask;
746 struct rte_flow *flow = info->flow;
747 struct nlmsg *msg;
748
749 /* use default mask if none provided */
750 if (!mask)
751 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
752 /* check that previous ip_proto is compatible with udp */
753 if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
754 return -1;
755 /* TC does not support UDP port masking. Only accept if exact match. */
756 if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
757 (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
758 return -1;
759 if (!flow)
760 return 0;
761 msg = &flow->msg;
762 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
763 if (!spec)
764 return 0;
765 if (mask->hdr.dst_port)
766 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
767 spec->hdr.dst_port);
768 if (mask->hdr.src_port)
769 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
770 spec->hdr.src_port);
771 return 0;
772 }
773
774 /**
775 * Make as much checks as possible on a TCP item, and if a flow is provided,
776 * fill it appropriately with TCP info.
777 *
778 * @param[in] item
779 * Item specification.
780 * @param[in, out] data
781 * Additional data structure to tell next layers we've been here.
782 *
783 * @return
784 * 0 if checks are alright, -1 otherwise.
785 */
786 static int
787 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
788 {
789 struct convert_data *info = (struct convert_data *)data;
790 const struct rte_flow_item_tcp *spec = item->spec;
791 const struct rte_flow_item_tcp *mask = item->mask;
792 struct rte_flow *flow = info->flow;
793 struct nlmsg *msg;
794
795 /* use default mask if none provided */
796 if (!mask)
797 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
798 /* check that previous ip_proto is compatible with tcp */
799 if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
800 return -1;
801 /* TC does not support TCP port masking. Only accept if exact match. */
802 if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
803 (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
804 return -1;
805 if (!flow)
806 return 0;
807 msg = &flow->msg;
808 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
809 if (!spec)
810 return 0;
811 if (mask->hdr.dst_port)
812 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
813 spec->hdr.dst_port);
814 if (mask->hdr.src_port)
815 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
816 spec->hdr.src_port);
817 return 0;
818 }
819
820 /**
821 * Check support for a given item.
822 *
823 * @param[in] item
824 * Item specification.
825 * @param size
826 * Bit-Mask size in bytes.
827 * @param[in] supported_mask
828 * Bit-mask covering supported fields to compare with spec, last and mask in
829 * \item.
830 * @param[in] default_mask
831 * Bit-mask default mask if none is provided in \item.
832 *
833 * @return
834 * 0 on success.
835 */
836 static int
837 tap_flow_item_validate(const struct rte_flow_item *item,
838 unsigned int size,
839 const uint8_t *supported_mask,
840 const uint8_t *default_mask)
841 {
842 int ret = 0;
843
844 /* An empty layer is allowed, as long as all fields are NULL */
845 if (!item->spec && (item->mask || item->last))
846 return -1;
847 /* Is the item spec compatible with what the NIC supports? */
848 if (item->spec && !item->mask) {
849 unsigned int i;
850 const uint8_t *spec = item->spec;
851
852 for (i = 0; i < size; ++i)
853 if ((spec[i] | supported_mask[i]) != supported_mask[i])
854 return -1;
855 /* Is the default mask compatible with what the NIC supports? */
856 for (i = 0; i < size; i++)
857 if ((default_mask[i] | supported_mask[i]) !=
858 supported_mask[i])
859 return -1;
860 }
861 /* Is the item last compatible with what the NIC supports? */
862 if (item->last && !item->mask) {
863 unsigned int i;
864 const uint8_t *spec = item->last;
865
866 for (i = 0; i < size; ++i)
867 if ((spec[i] | supported_mask[i]) != supported_mask[i])
868 return -1;
869 }
870 /* Is the item mask compatible with what the NIC supports? */
871 if (item->mask) {
872 unsigned int i;
873 const uint8_t *spec = item->mask;
874
875 for (i = 0; i < size; ++i)
876 if ((spec[i] | supported_mask[i]) != supported_mask[i])
877 return -1;
878 }
879 /**
880 * Once masked, Are item spec and item last equal?
881 * TC does not support range so anything else is invalid.
882 */
883 if (item->spec && item->last) {
884 uint8_t spec[size];
885 uint8_t last[size];
886 const uint8_t *apply = default_mask;
887 unsigned int i;
888
889 if (item->mask)
890 apply = item->mask;
891 for (i = 0; i < size; ++i) {
892 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
893 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
894 }
895 ret = memcmp(spec, last, size);
896 }
897 return ret;
898 }
899
900 /**
901 * Configure the kernel with a TC action and its configured parameters
902 * Handled actions: "gact", "mirred", "skbedit", "bpf"
903 *
904 * @param[in] flow
905 * Pointer to rte flow containing the netlink message
906 *
907 * @param[in, out] act_index
908 * Pointer to action sequence number in the TC command
909 *
910 * @param[in] adata
911 * Pointer to struct holding the action parameters
912 *
913 * @return
914 * -1 on failure, 0 on success
915 */
916 static int
917 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
918 {
919 struct nlmsg *msg = &flow->msg;
920
921 if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
922 return -1;
923
924 tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
925 strlen(adata->id) + 1, adata->id);
926 if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
927 return -1;
928 if (strcmp("gact", adata->id) == 0) {
929 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
930 &adata->gact);
931 } else if (strcmp("mirred", adata->id) == 0) {
932 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
933 adata->mirred.action = TC_ACT_PIPE;
934 else /* REDIRECT */
935 adata->mirred.action = TC_ACT_STOLEN;
936 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
937 sizeof(adata->mirred),
938 &adata->mirred);
939 } else if (strcmp("skbedit", adata->id) == 0) {
940 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
941 sizeof(adata->skbedit.skbedit),
942 &adata->skbedit.skbedit);
943 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
944 adata->skbedit.queue);
945 } else if (strcmp("bpf", adata->id) == 0) {
946 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
947 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
948 strlen(adata->bpf.annotation) + 1,
949 adata->bpf.annotation);
950 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
951 sizeof(adata->bpf.bpf),
952 &adata->bpf.bpf);
953 } else {
954 return -1;
955 }
956 tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
957 tap_nlattr_nested_finish(msg); /* nested act_index */
958 return 0;
959 }
960
961 /**
962 * Helper function to send a serie of TC actions to the kernel
963 *
964 * @param[in] flow
965 * Pointer to rte flow containing the netlink message
966 *
967 * @param[in] nb_actions
968 * Number of actions in an array of action structs
969 *
970 * @param[in] data
971 * Pointer to an array of action structs
972 *
973 * @param[in] classifier_actions
974 * The classifier on behave of which the actions are configured
975 *
976 * @return
977 * -1 on failure, 0 on success
978 */
979 static int
980 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
981 int classifier_action)
982 {
983 struct nlmsg *msg = &flow->msg;
984 size_t act_index = 1;
985 int i;
986
987 if (tap_nlattr_nested_start(msg, classifier_action) < 0)
988 return -1;
989 for (i = 0; i < nb_actions; i++)
990 if (add_action(flow, &act_index, data + i) < 0)
991 return -1;
992 tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
993 return 0;
994 }
995
996 /**
997 * Validate a flow supported by TC.
998 * If flow param is not NULL, then also fill the netlink message inside.
999 *
1000 * @param pmd
1001 * Pointer to private structure.
1002 * @param[in] attr
1003 * Flow rule attributes.
1004 * @param[in] pattern
1005 * Pattern specification (list terminated by the END pattern item).
1006 * @param[in] actions
1007 * Associated actions (list terminated by the END action).
1008 * @param[out] error
1009 * Perform verbose error reporting if not NULL.
1010 * @param[in, out] flow
1011 * Flow structure to update.
1012 * @param[in] mirred
1013 * If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1014 * redirection to the tap netdevice, and the TC rule will be configured
1015 * on the remote netdevice in pmd.
1016 * If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1017 * mirroring to the tap netdevice, and the TC rule will be configured
1018 * on the remote netdevice in pmd. Matching packets will thus be duplicated.
1019 * If set to 0, the standard behavior is to be used: set correct actions for
1020 * the TC rule, and apply it on the tap netdevice.
1021 *
1022 * @return
1023 * 0 on success, a negative errno value otherwise and rte_errno is set.
1024 */
1025 static int
1026 priv_flow_process(struct pmd_internals *pmd,
1027 const struct rte_flow_attr *attr,
1028 const struct rte_flow_item items[],
1029 const struct rte_flow_action actions[],
1030 struct rte_flow_error *error,
1031 struct rte_flow *flow,
1032 int mirred)
1033 {
1034 const struct tap_flow_items *cur_item = tap_flow_items;
1035 struct convert_data data = {
1036 .eth_type = 0,
1037 .ip_proto = 0,
1038 .flow = flow,
1039 };
1040 int action = 0; /* Only one action authorized for now */
1041
1042 if (attr->transfer) {
1043 rte_flow_error_set(
1044 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1045 NULL, "transfer is not supported");
1046 return -rte_errno;
1047 }
1048 if (attr->group > MAX_GROUP) {
1049 rte_flow_error_set(
1050 error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1051 NULL, "group value too big: cannot exceed 15");
1052 return -rte_errno;
1053 }
1054 if (attr->priority > MAX_PRIORITY) {
1055 rte_flow_error_set(
1056 error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057 NULL, "priority value too big");
1058 return -rte_errno;
1059 } else if (flow) {
1060 uint16_t group = attr->group << GROUP_SHIFT;
1061 uint16_t prio = group | (attr->priority +
1062 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1063 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1064 flow->msg.t.tcm_info);
1065 }
1066 if (flow) {
1067 if (mirred) {
1068 /*
1069 * If attr->ingress, the rule applies on remote ingress
1070 * to match incoming packets
1071 * If attr->egress, the rule applies on tap ingress (as
1072 * seen from the kernel) to deal with packets going out
1073 * from the DPDK app.
1074 */
1075 flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1076 } else {
1077 /* Standard rule on tap egress (kernel standpoint). */
1078 flow->msg.t.tcm_parent =
1079 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1080 }
1081 /* use flower filter type */
1082 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1083 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1084 goto exit_item_not_supported;
1085 }
1086 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1087 const struct tap_flow_items *token = NULL;
1088 unsigned int i;
1089 int err = 0;
1090
1091 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1092 continue;
1093 for (i = 0;
1094 cur_item->items &&
1095 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1096 ++i) {
1097 if (cur_item->items[i] == items->type) {
1098 token = &tap_flow_items[items->type];
1099 break;
1100 }
1101 }
1102 if (!token)
1103 goto exit_item_not_supported;
1104 cur_item = token;
1105 err = tap_flow_item_validate(
1106 items, cur_item->mask_sz,
1107 (const uint8_t *)cur_item->mask,
1108 (const uint8_t *)cur_item->default_mask);
1109 if (err)
1110 goto exit_item_not_supported;
1111 if (flow && cur_item->convert) {
1112 err = cur_item->convert(items, &data);
1113 if (err)
1114 goto exit_item_not_supported;
1115 }
1116 }
1117 if (flow) {
1118 if (data.vlan) {
1119 tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1120 htons(ETH_P_8021Q));
1121 tap_nlattr_add16(&flow->msg.nh,
1122 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1123 data.eth_type ?
1124 data.eth_type : htons(ETH_P_ALL));
1125 } else if (data.eth_type) {
1126 tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1127 data.eth_type);
1128 }
1129 }
1130 if (mirred && flow) {
1131 struct action_data adata = {
1132 .id = "mirred",
1133 .mirred = {
1134 .eaction = mirred,
1135 },
1136 };
1137
1138 /*
1139 * If attr->egress && mirred, then this is a special
1140 * case where the rule must be applied on the tap, to
1141 * redirect packets coming from the DPDK App, out
1142 * through the remote netdevice.
1143 */
1144 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1145 pmd->remote_if_index;
1146 if (mirred == TCA_EGRESS_MIRROR)
1147 adata.mirred.action = TC_ACT_PIPE;
1148 else
1149 adata.mirred.action = TC_ACT_STOLEN;
1150 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1151 goto exit_action_not_supported;
1152 else
1153 goto end;
1154 }
1155 actions:
1156 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1157 int err = 0;
1158
1159 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1160 continue;
1161 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1162 if (action)
1163 goto exit_action_not_supported;
1164 action = 1;
1165 if (flow) {
1166 struct action_data adata = {
1167 .id = "gact",
1168 .gact = {
1169 .action = TC_ACT_SHOT,
1170 },
1171 };
1172
1173 err = add_actions(flow, 1, &adata,
1174 TCA_FLOWER_ACT);
1175 }
1176 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1177 if (action)
1178 goto exit_action_not_supported;
1179 action = 1;
1180 if (flow) {
1181 struct action_data adata = {
1182 .id = "gact",
1183 .gact = {
1184 /* continue */
1185 .action = TC_ACT_UNSPEC,
1186 },
1187 };
1188
1189 err = add_actions(flow, 1, &adata,
1190 TCA_FLOWER_ACT);
1191 }
1192 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1193 const struct rte_flow_action_queue *queue =
1194 (const struct rte_flow_action_queue *)
1195 actions->conf;
1196
1197 if (action)
1198 goto exit_action_not_supported;
1199 action = 1;
1200 if (!queue ||
1201 (queue->index > pmd->dev->data->nb_rx_queues - 1))
1202 goto exit_action_not_supported;
1203 if (flow) {
1204 struct action_data adata = {
1205 .id = "skbedit",
1206 .skbedit = {
1207 .skbedit = {
1208 .action = TC_ACT_PIPE,
1209 },
1210 .queue = queue->index,
1211 },
1212 };
1213
1214 err = add_actions(flow, 1, &adata,
1215 TCA_FLOWER_ACT);
1216 }
1217 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1218 const struct rte_flow_action_rss *rss =
1219 (const struct rte_flow_action_rss *)
1220 actions->conf;
1221
1222 if (action++)
1223 goto exit_action_not_supported;
1224
1225 if (!pmd->rss_enabled) {
1226 err = rss_enable(pmd, attr, error);
1227 if (err)
1228 goto exit_action_not_supported;
1229 }
1230 if (flow)
1231 err = rss_add_actions(flow, pmd, rss, error);
1232 } else {
1233 goto exit_action_not_supported;
1234 }
1235 if (err)
1236 goto exit_action_not_supported;
1237 }
1238 /* When fate is unknown, drop traffic. */
1239 if (!action) {
1240 static const struct rte_flow_action drop[] = {
1241 { .type = RTE_FLOW_ACTION_TYPE_DROP, },
1242 { .type = RTE_FLOW_ACTION_TYPE_END, },
1243 };
1244
1245 actions = drop;
1246 goto actions;
1247 }
1248 end:
1249 if (flow)
1250 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1251 return 0;
1252 exit_item_not_supported:
1253 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1254 items, "item not supported");
1255 return -rte_errno;
1256 exit_action_not_supported:
1257 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1258 actions, "action not supported");
1259 return -rte_errno;
1260 }
1261
1262
1263
1264 /**
1265 * Validate a flow.
1266 *
1267 * @see rte_flow_validate()
1268 * @see rte_flow_ops
1269 */
1270 static int
1271 tap_flow_validate(struct rte_eth_dev *dev,
1272 const struct rte_flow_attr *attr,
1273 const struct rte_flow_item items[],
1274 const struct rte_flow_action actions[],
1275 struct rte_flow_error *error)
1276 {
1277 struct pmd_internals *pmd = dev->data->dev_private;
1278
1279 return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1280 }
1281
1282 /**
1283 * Set a unique handle in a flow.
1284 *
1285 * The kernel supports TC rules with equal priority, as long as they use the
1286 * same matching fields (e.g.: dst mac and ipv4) with different values (and
1287 * full mask to ensure no collision is possible).
1288 * In those rules, the handle (uint32_t) is the part that would identify
1289 * specifically each rule.
1290 *
1291 * On 32-bit architectures, the handle can simply be the flow's pointer address.
1292 * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1293 * unique handle.
1294 *
1295 * @param[in, out] flow
1296 * The flow that needs its handle set.
1297 */
1298 static void
1299 tap_flow_set_handle(struct rte_flow *flow)
1300 {
1301 uint32_t handle = 0;
1302
1303 if (sizeof(flow) > 4)
1304 handle = rte_jhash(&flow, sizeof(flow), 1);
1305 else
1306 handle = (uintptr_t)flow;
1307 /* must be at least 1 to avoid letting the kernel choose one for us */
1308 if (!handle)
1309 handle = 1;
1310 flow->msg.t.tcm_handle = handle;
1311 }
1312
1313 /**
1314 * Free the flow opened file descriptors and allocated memory
1315 *
1316 * @param[in] flow
1317 * Pointer to the flow to free
1318 *
1319 */
1320 static void
1321 tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
1322 {
1323 int i;
1324
1325 if (!flow)
1326 return;
1327
1328 if (pmd->rss_enabled) {
1329 /* Close flow BPF file descriptors */
1330 for (i = 0; i < SEC_MAX; i++)
1331 if (flow->bpf_fd[i] != 0) {
1332 close(flow->bpf_fd[i]);
1333 flow->bpf_fd[i] = 0;
1334 }
1335
1336 /* Release the map key for this RSS rule */
1337 bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1338 flow->key_idx = 0;
1339 }
1340
1341 /* Free flow allocated memory */
1342 rte_free(flow);
1343 }
1344
1345 /**
1346 * Create a flow.
1347 *
1348 * @see rte_flow_create()
1349 * @see rte_flow_ops
1350 */
1351 static struct rte_flow *
1352 tap_flow_create(struct rte_eth_dev *dev,
1353 const struct rte_flow_attr *attr,
1354 const struct rte_flow_item items[],
1355 const struct rte_flow_action actions[],
1356 struct rte_flow_error *error)
1357 {
1358 struct pmd_internals *pmd = dev->data->dev_private;
1359 struct rte_flow *remote_flow = NULL;
1360 struct rte_flow *flow = NULL;
1361 struct nlmsg *msg = NULL;
1362 int err;
1363
1364 if (!pmd->if_index) {
1365 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1366 NULL,
1367 "can't create rule, ifindex not found");
1368 goto fail;
1369 }
1370 /*
1371 * No rules configured through standard rte_flow should be set on the
1372 * priorities used by implicit rules.
1373 */
1374 if ((attr->group == MAX_GROUP) &&
1375 attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1376 rte_flow_error_set(
1377 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1378 NULL, "priority value too big");
1379 goto fail;
1380 }
1381 flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1382 if (!flow) {
1383 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1384 NULL, "cannot allocate memory for rte_flow");
1385 goto fail;
1386 }
1387 msg = &flow->msg;
1388 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1389 NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1390 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1391 tap_flow_set_handle(flow);
1392 if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1393 goto fail;
1394 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1395 if (err < 0) {
1396 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1397 NULL, "couldn't send request to kernel");
1398 goto fail;
1399 }
1400 err = tap_nl_recv_ack(pmd->nlsk_fd);
1401 if (err < 0) {
1402 TAP_LOG(ERR,
1403 "Kernel refused TC filter rule creation (%d): %s",
1404 errno, strerror(errno));
1405 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1406 NULL,
1407 "overlapping rules or Kernel too old for flower support");
1408 goto fail;
1409 }
1410 LIST_INSERT_HEAD(&pmd->flows, flow, next);
1411 /**
1412 * If a remote device is configured, a TC rule with identical items for
1413 * matching must be set on that device, with a single action: redirect
1414 * to the local pmd->if_index.
1415 */
1416 if (pmd->remote_if_index) {
1417 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1418 if (!remote_flow) {
1419 rte_flow_error_set(
1420 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1421 "cannot allocate memory for rte_flow");
1422 goto fail;
1423 }
1424 msg = &remote_flow->msg;
1425 /* set the rule if_index for the remote netdevice */
1426 tc_init_msg(
1427 msg, pmd->remote_if_index, RTM_NEWTFILTER,
1428 NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1429 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1430 tap_flow_set_handle(remote_flow);
1431 if (priv_flow_process(pmd, attr, items, NULL,
1432 error, remote_flow, TCA_EGRESS_REDIR)) {
1433 rte_flow_error_set(
1434 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1435 NULL, "rte flow rule validation failed");
1436 goto fail;
1437 }
1438 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1439 if (err < 0) {
1440 rte_flow_error_set(
1441 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1442 NULL, "Failure sending nl request");
1443 goto fail;
1444 }
1445 err = tap_nl_recv_ack(pmd->nlsk_fd);
1446 if (err < 0) {
1447 TAP_LOG(ERR,
1448 "Kernel refused TC filter rule creation (%d): %s",
1449 errno, strerror(errno));
1450 rte_flow_error_set(
1451 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1452 NULL,
1453 "overlapping rules or Kernel too old for flower support");
1454 goto fail;
1455 }
1456 flow->remote_flow = remote_flow;
1457 }
1458 return flow;
1459 fail:
1460 if (remote_flow)
1461 rte_free(remote_flow);
1462 if (flow)
1463 tap_flow_free(pmd, flow);
1464 return NULL;
1465 }
1466
1467 /**
1468 * Destroy a flow using pointer to pmd_internal.
1469 *
1470 * @param[in, out] pmd
1471 * Pointer to private structure.
1472 * @param[in] flow
1473 * Pointer to the flow to destroy.
1474 * @param[in, out] error
1475 * Pointer to the flow error handler
1476 *
1477 * @return 0 if the flow could be destroyed, -1 otherwise.
1478 */
1479 static int
1480 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1481 struct rte_flow *flow,
1482 struct rte_flow_error *error)
1483 {
1484 struct rte_flow *remote_flow = flow->remote_flow;
1485 int ret = 0;
1486
1487 LIST_REMOVE(flow, next);
1488 flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1489 flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1490
1491 ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1492 if (ret < 0) {
1493 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1494 NULL, "couldn't send request to kernel");
1495 goto end;
1496 }
1497 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1498 /* If errno is ENOENT, the rule is already no longer in the kernel. */
1499 if (ret < 0 && errno == ENOENT)
1500 ret = 0;
1501 if (ret < 0) {
1502 TAP_LOG(ERR,
1503 "Kernel refused TC filter rule deletion (%d): %s",
1504 errno, strerror(errno));
1505 rte_flow_error_set(
1506 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1507 "couldn't receive kernel ack to our request");
1508 goto end;
1509 }
1510
1511 if (remote_flow) {
1512 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1513 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1514
1515 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1516 if (ret < 0) {
1517 rte_flow_error_set(
1518 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1519 NULL, "Failure sending nl request");
1520 goto end;
1521 }
1522 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1523 if (ret < 0 && errno == ENOENT)
1524 ret = 0;
1525 if (ret < 0) {
1526 TAP_LOG(ERR,
1527 "Kernel refused TC filter rule deletion (%d): %s",
1528 errno, strerror(errno));
1529 rte_flow_error_set(
1530 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1531 NULL, "Failure trying to receive nl ack");
1532 goto end;
1533 }
1534 }
1535 end:
1536 if (remote_flow)
1537 rte_free(remote_flow);
1538 tap_flow_free(pmd, flow);
1539 return ret;
1540 }
1541
1542 /**
1543 * Destroy a flow.
1544 *
1545 * @see rte_flow_destroy()
1546 * @see rte_flow_ops
1547 */
1548 static int
1549 tap_flow_destroy(struct rte_eth_dev *dev,
1550 struct rte_flow *flow,
1551 struct rte_flow_error *error)
1552 {
1553 struct pmd_internals *pmd = dev->data->dev_private;
1554
1555 return tap_flow_destroy_pmd(pmd, flow, error);
1556 }
1557
1558 /**
1559 * Enable/disable flow isolation.
1560 *
1561 * @see rte_flow_isolate()
1562 * @see rte_flow_ops
1563 */
1564 static int
1565 tap_flow_isolate(struct rte_eth_dev *dev,
1566 int set,
1567 struct rte_flow_error *error __rte_unused)
1568 {
1569 struct pmd_internals *pmd = dev->data->dev_private;
1570 struct pmd_process_private *process_private = dev->process_private;
1571
1572 /* normalize 'set' variable to contain 0 or 1 values */
1573 if (set)
1574 set = 1;
1575 /* if already in the right isolation mode - nothing to do */
1576 if ((set ^ pmd->flow_isolate) == 0)
1577 return 0;
1578 /* mark the isolation mode for tap_flow_implicit_create() */
1579 pmd->flow_isolate = set;
1580 /*
1581 * If netdevice is there, setup appropriate flow rules immediately.
1582 * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1583 */
1584 if (!process_private->rxq_fds[0])
1585 return 0;
1586 if (set) {
1587 struct rte_flow *remote_flow;
1588
1589 while (1) {
1590 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1591 if (!remote_flow)
1592 break;
1593 /*
1594 * Remove all implicit rules on the remote.
1595 * Keep the local rule to redirect packets on TX.
1596 * Keep also the last implicit local rule: ISOLATE.
1597 */
1598 if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
1599 break;
1600 if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
1601 goto error;
1602 }
1603 /* Switch the TC rule according to pmd->flow_isolate */
1604 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1605 goto error;
1606 } else {
1607 /* Switch the TC rule according to pmd->flow_isolate */
1608 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1609 goto error;
1610 if (!pmd->remote_if_index)
1611 return 0;
1612 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1613 goto error;
1614 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1615 goto error;
1616 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1617 goto error;
1618 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1619 goto error;
1620 if (dev->data->promiscuous &&
1621 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1622 goto error;
1623 if (dev->data->all_multicast &&
1624 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1625 goto error;
1626 }
1627 return 0;
1628 error:
1629 pmd->flow_isolate = 0;
1630 return rte_flow_error_set(
1631 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1632 "TC rule creation failed");
1633 }
1634
1635 /**
1636 * Destroy all flows.
1637 *
1638 * @see rte_flow_flush()
1639 * @see rte_flow_ops
1640 */
1641 int
1642 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1643 {
1644 struct pmd_internals *pmd = dev->data->dev_private;
1645 struct rte_flow *flow;
1646
1647 while (!LIST_EMPTY(&pmd->flows)) {
1648 flow = LIST_FIRST(&pmd->flows);
1649 if (tap_flow_destroy(dev, flow, error) < 0)
1650 return -1;
1651 }
1652 return 0;
1653 }
1654
1655 /**
1656 * Add an implicit flow rule on the remote device to make sure traffic gets to
1657 * the tap netdevice from there.
1658 *
1659 * @param pmd
1660 * Pointer to private structure.
1661 * @param[in] idx
1662 * The idx in the implicit_rte_flows array specifying which rule to apply.
1663 *
1664 * @return -1 if the rule couldn't be applied, 0 otherwise.
1665 */
1666 int tap_flow_implicit_create(struct pmd_internals *pmd,
1667 enum implicit_rule_index idx)
1668 {
1669 uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1670 struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1671 struct rte_flow_action isolate_actions[2] = {
1672 [1] = {
1673 .type = RTE_FLOW_ACTION_TYPE_END,
1674 },
1675 };
1676 struct rte_flow_item *items = implicit_rte_flows[idx].items;
1677 struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1678 struct rte_flow_item_eth eth_local = { .type = 0 };
1679 uint16_t if_index = pmd->remote_if_index;
1680 struct rte_flow *remote_flow = NULL;
1681 struct nlmsg *msg = NULL;
1682 int err = 0;
1683 struct rte_flow_item items_local[2] = {
1684 [0] = {
1685 .type = items[0].type,
1686 .spec = &eth_local,
1687 .mask = items[0].mask,
1688 },
1689 [1] = {
1690 .type = items[1].type,
1691 }
1692 };
1693
1694 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1695 if (!remote_flow) {
1696 TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
1697 goto fail;
1698 }
1699 msg = &remote_flow->msg;
1700 if (idx == TAP_REMOTE_TX) {
1701 if_index = pmd->if_index;
1702 } else if (idx == TAP_ISOLATE) {
1703 if_index = pmd->if_index;
1704 /* Don't be exclusive for this rule, it can be changed later. */
1705 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1706 isolate_actions[0].type = pmd->flow_isolate ?
1707 RTE_FLOW_ACTION_TYPE_DROP :
1708 RTE_FLOW_ACTION_TYPE_PASSTHRU;
1709 actions = isolate_actions;
1710 } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1711 /*
1712 * eth addr couldn't be set in implicit_rte_flows[] as it is not
1713 * known at compile time.
1714 */
1715 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1716 items = items_local;
1717 }
1718 tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1719 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1720 /*
1721 * The ISOLATE rule is always present and must have a static handle, as
1722 * the action is changed whether the feature is enabled (DROP) or
1723 * disabled (PASSTHRU).
1724 * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
1725 * have a static handle such that adding it twice will fail with EEXIST
1726 * with any kernel version. Remark: old kernels may falsely accept the
1727 * same REMOTE_PROMISCUOUS rules if they had different handles.
1728 */
1729 if (idx == TAP_ISOLATE)
1730 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1731 else if (idx == TAP_REMOTE_PROMISC)
1732 remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
1733 else
1734 tap_flow_set_handle(remote_flow);
1735 if (priv_flow_process(pmd, attr, items, actions, NULL,
1736 remote_flow, implicit_rte_flows[idx].mirred)) {
1737 TAP_LOG(ERR, "rte flow rule validation failed");
1738 goto fail;
1739 }
1740 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1741 if (err < 0) {
1742 TAP_LOG(ERR, "Failure sending nl request");
1743 goto fail;
1744 }
1745 err = tap_nl_recv_ack(pmd->nlsk_fd);
1746 if (err < 0) {
1747 /* Silently ignore re-entering existing rule */
1748 if (errno == EEXIST)
1749 goto success;
1750 TAP_LOG(ERR,
1751 "Kernel refused TC filter rule creation (%d): %s",
1752 errno, strerror(errno));
1753 goto fail;
1754 }
1755 LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1756 success:
1757 return 0;
1758 fail:
1759 if (remote_flow)
1760 rte_free(remote_flow);
1761 return -1;
1762 }
1763
1764 /**
1765 * Remove specific implicit flow rule on the remote device.
1766 *
1767 * @param[in, out] pmd
1768 * Pointer to private structure.
1769 * @param[in] idx
1770 * The idx in the implicit_rte_flows array specifying which rule to remove.
1771 *
1772 * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1773 */
1774 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1775 enum implicit_rule_index idx)
1776 {
1777 struct rte_flow *remote_flow;
1778 int cur_prio = -1;
1779 int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1780
1781 for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1782 remote_flow;
1783 remote_flow = LIST_NEXT(remote_flow, next)) {
1784 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1785 if (cur_prio != idx_prio)
1786 continue;
1787 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1788 }
1789 return 0;
1790 }
1791
1792 /**
1793 * Destroy all implicit flows.
1794 *
1795 * @see rte_flow_flush()
1796 */
1797 int
1798 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1799 {
1800 struct rte_flow *remote_flow;
1801
1802 while (!LIST_EMPTY(&pmd->implicit_flows)) {
1803 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1804 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1805 return -1;
1806 }
1807 return 0;
1808 }
1809
1810 #define MAX_RSS_KEYS 256
1811 #define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
1812 #define SEC_NAME_CLS_Q "cls_q"
1813
1814 static const char *sec_name[SEC_MAX] = {
1815 [SEC_L3_L4] = "l3_l4",
1816 };
1817
1818 /**
1819 * Enable RSS on tap: create TC rules for queuing.
1820 *
1821 * @param[in, out] pmd
1822 * Pointer to private structure.
1823 *
1824 * @param[in] attr
1825 * Pointer to rte_flow to get flow group
1826 *
1827 * @param[out] error
1828 * Pointer to error reporting if not NULL.
1829 *
1830 * @return 0 on success, negative value on failure.
1831 */
1832 static int rss_enable(struct pmd_internals *pmd,
1833 const struct rte_flow_attr *attr,
1834 struct rte_flow_error *error)
1835 {
1836 struct rte_flow *rss_flow = NULL;
1837 struct nlmsg *msg = NULL;
1838 /* 4096 is the maximum number of instructions for a BPF program */
1839 char annotation[64];
1840 int i;
1841 int err = 0;
1842
1843 /* unlimit locked memory */
1844 struct rlimit memlock_limit = {
1845 .rlim_cur = RLIM_INFINITY,
1846 .rlim_max = RLIM_INFINITY,
1847 };
1848 setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1849
1850 /* Get a new map key for a new RSS rule */
1851 err = bpf_rss_key(KEY_CMD_INIT, NULL);
1852 if (err < 0) {
1853 rte_flow_error_set(
1854 error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1855 "Failed to initialize BPF RSS keys");
1856
1857 return -1;
1858 }
1859
1860 /*
1861 * Create BPF RSS MAP
1862 */
1863 pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1864 sizeof(struct rss_key),
1865 MAX_RSS_KEYS);
1866 if (pmd->map_fd < 0) {
1867 TAP_LOG(ERR,
1868 "Failed to create BPF map (%d): %s",
1869 errno, strerror(errno));
1870 rte_flow_error_set(
1871 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1872 "Kernel too old or not configured "
1873 "to support BPF maps");
1874
1875 return -ENOTSUP;
1876 }
1877
1878 /*
1879 * Add a rule per queue to match reclassified packets and direct them to
1880 * the correct queue.
1881 */
1882 for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1883 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1884 if (pmd->bpf_fd[i] < 0) {
1885 TAP_LOG(ERR,
1886 "Failed to load BPF section %s for queue %d",
1887 SEC_NAME_CLS_Q, i);
1888 rte_flow_error_set(
1889 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1890 NULL,
1891 "Kernel too old or not configured "
1892 "to support BPF programs loading");
1893
1894 return -ENOTSUP;
1895 }
1896
1897 rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1898 if (!rss_flow) {
1899 TAP_LOG(ERR,
1900 "Cannot allocate memory for rte_flow");
1901 return -1;
1902 }
1903 msg = &rss_flow->msg;
1904 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1905 NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1906 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1907 tap_flow_set_handle(rss_flow);
1908 uint16_t group = attr->group << GROUP_SHIFT;
1909 uint16_t prio = group | (i + PRIORITY_OFFSET);
1910 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1911 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1912
1913 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1914 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1915 return -1;
1916 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1917 snprintf(annotation, sizeof(annotation), "[%s%d]",
1918 SEC_NAME_CLS_Q, i);
1919 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1920 annotation);
1921 /* Actions */
1922 {
1923 struct action_data adata = {
1924 .id = "skbedit",
1925 .skbedit = {
1926 .skbedit = {
1927 .action = TC_ACT_PIPE,
1928 },
1929 .queue = i,
1930 },
1931 };
1932 if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1933 return -1;
1934 }
1935 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1936
1937 /* Netlink message is now ready to be sent */
1938 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1939 return -1;
1940 err = tap_nl_recv_ack(pmd->nlsk_fd);
1941 if (err < 0) {
1942 TAP_LOG(ERR,
1943 "Kernel refused TC filter rule creation (%d): %s",
1944 errno, strerror(errno));
1945 return err;
1946 }
1947 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1948 }
1949
1950 pmd->rss_enabled = 1;
1951 return err;
1952 }
1953
1954 /**
1955 * Manage bpf RSS keys repository with operations: init, get, release
1956 *
1957 * @param[in] cmd
1958 * Command on RSS keys: init, get, release
1959 *
1960 * @param[in, out] key_idx
1961 * Pointer to RSS Key index (out for get command, in for release command)
1962 *
1963 * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1964 */
1965 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1966 {
1967 __u32 i;
1968 int err = 0;
1969 static __u32 num_used_keys;
1970 static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1971 static __u32 rss_keys_initialized;
1972 __u32 key;
1973
1974 switch (cmd) {
1975 case KEY_CMD_GET:
1976 if (!rss_keys_initialized) {
1977 err = -1;
1978 break;
1979 }
1980
1981 if (num_used_keys == RTE_DIM(rss_keys)) {
1982 err = -1;
1983 break;
1984 }
1985
1986 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1987 while (rss_keys[*key_idx] == KEY_STAT_USED)
1988 *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1989
1990 rss_keys[*key_idx] = KEY_STAT_USED;
1991
1992 /*
1993 * Add an offset to key_idx in order to handle a case of
1994 * RSS and non RSS flows mixture.
1995 * If a non RSS flow is destroyed it has an eBPF map
1996 * index 0 (initialized on flow creation) and might
1997 * unintentionally remove RSS entry 0 from eBPF map.
1998 * To avoid this issue, add an offset to the real index
1999 * during a KEY_CMD_GET operation and subtract this offset
2000 * during a KEY_CMD_RELEASE operation in order to restore
2001 * the real index.
2002 */
2003 *key_idx += KEY_IDX_OFFSET;
2004 num_used_keys++;
2005 break;
2006
2007 case KEY_CMD_RELEASE:
2008 if (!rss_keys_initialized)
2009 break;
2010
2011 /*
2012 * Subtract offest to restore real key index
2013 * If a non RSS flow is falsely trying to release map
2014 * entry 0 - the offset subtraction will calculate the real
2015 * map index as an out-of-range value and the release operation
2016 * will be silently ignored.
2017 */
2018 key = *key_idx - KEY_IDX_OFFSET;
2019 if (key >= RTE_DIM(rss_keys))
2020 break;
2021
2022 if (rss_keys[key] == KEY_STAT_USED) {
2023 rss_keys[key] = KEY_STAT_AVAILABLE;
2024 num_used_keys--;
2025 }
2026 break;
2027
2028 case KEY_CMD_INIT:
2029 for (i = 0; i < RTE_DIM(rss_keys); i++)
2030 rss_keys[i] = KEY_STAT_AVAILABLE;
2031
2032 rss_keys_initialized = 1;
2033 num_used_keys = 0;
2034 break;
2035
2036 case KEY_CMD_DEINIT:
2037 for (i = 0; i < RTE_DIM(rss_keys); i++)
2038 rss_keys[i] = KEY_STAT_UNSPEC;
2039
2040 rss_keys_initialized = 0;
2041 num_used_keys = 0;
2042 break;
2043
2044 default:
2045 break;
2046 }
2047
2048 return err;
2049 }
2050
2051 /**
2052 * Add RSS hash calculations and queue selection
2053 *
2054 * @param[in, out] pmd
2055 * Pointer to internal structure. Used to set/get RSS map fd
2056 *
2057 * @param[in] rss
2058 * Pointer to RSS flow actions
2059 *
2060 * @param[out] error
2061 * Pointer to error reporting if not NULL.
2062 *
2063 * @return 0 on success, negative value on failure
2064 */
2065 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
2066 const struct rte_flow_action_rss *rss,
2067 struct rte_flow_error *error)
2068 {
2069 /* 4096 is the maximum number of instructions for a BPF program */
2070 unsigned int i;
2071 int err;
2072 struct rss_key rss_entry = { .hash_fields = 0,
2073 .key_size = 0 };
2074
2075 /* Check supported RSS features */
2076 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2077 return rte_flow_error_set
2078 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2079 "non-default RSS hash functions are not supported");
2080 if (rss->level)
2081 return rte_flow_error_set
2082 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2083 "a nonzero RSS encapsulation level is not supported");
2084
2085 /* Get a new map key for a new RSS rule */
2086 err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
2087 if (err < 0) {
2088 rte_flow_error_set(
2089 error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2090 "Failed to get BPF RSS key");
2091
2092 return -1;
2093 }
2094
2095 /* Update RSS map entry with queues */
2096 rss_entry.nb_queues = rss->queue_num;
2097 for (i = 0; i < rss->queue_num; i++)
2098 rss_entry.queues[i] = rss->queue[i];
2099 rss_entry.hash_fields =
2100 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2101
2102 /* Add this RSS entry to map */
2103 err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2104 &flow->key_idx, &rss_entry);
2105
2106 if (err) {
2107 TAP_LOG(ERR,
2108 "Failed to update BPF map entry #%u (%d): %s",
2109 flow->key_idx, errno, strerror(errno));
2110 rte_flow_error_set(
2111 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2112 "Kernel too old or not configured "
2113 "to support BPF maps updates");
2114
2115 return -ENOTSUP;
2116 }
2117
2118
2119 /*
2120 * Load bpf rules to calculate hash for this key_idx
2121 */
2122
2123 flow->bpf_fd[SEC_L3_L4] =
2124 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2125 if (flow->bpf_fd[SEC_L3_L4] < 0) {
2126 TAP_LOG(ERR,
2127 "Failed to load BPF section %s (%d): %s",
2128 sec_name[SEC_L3_L4], errno, strerror(errno));
2129 rte_flow_error_set(
2130 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2131 "Kernel too old or not configured "
2132 "to support BPF program loading");
2133
2134 return -ENOTSUP;
2135 }
2136
2137 /* Actions */
2138 {
2139 struct action_data adata[] = {
2140 {
2141 .id = "bpf",
2142 .bpf = {
2143 .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2144 .annotation = sec_name[SEC_L3_L4],
2145 .bpf = {
2146 .action = TC_ACT_PIPE,
2147 },
2148 },
2149 },
2150 };
2151
2152 if (add_actions(flow, RTE_DIM(adata), adata,
2153 TCA_FLOWER_ACT) < 0)
2154 return -1;
2155 }
2156
2157 return 0;
2158 }
2159
2160 /**
2161 * Manage filter operations.
2162 *
2163 * @param dev
2164 * Pointer to Ethernet device structure.
2165 * @param filter_type
2166 * Filter type.
2167 * @param filter_op
2168 * Operation to perform.
2169 * @param arg
2170 * Pointer to operation-specific structure.
2171 *
2172 * @return
2173 * 0 on success, negative errno value on failure.
2174 */
2175 int
2176 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
2177 enum rte_filter_type filter_type,
2178 enum rte_filter_op filter_op,
2179 void *arg)
2180 {
2181 switch (filter_type) {
2182 case RTE_ETH_FILTER_GENERIC:
2183 if (filter_op != RTE_ETH_FILTER_GET)
2184 return -EINVAL;
2185 *(const void **)arg = &tap_flow_ops;
2186 return 0;
2187 default:
2188 TAP_LOG(ERR, "%p: filter type (%d) not supported",
2189 dev, filter_type);
2190 }
2191 return -EINVAL;
2192 }