]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-offload-dpdk.c
netdev-offload-dpdk: Support offload of set TCP/UDP ports actions.
[mirror_ovs.git] / lib / netdev-offload-dpdk.c
1 /*
2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Mellanox Technologies, Ltd.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17 #include <config.h>
18
19 #include <rte_flow.h>
20
21 #include "cmap.h"
22 #include "dpif-netdev.h"
23 #include "netdev-offload-provider.h"
24 #include "netdev-provider.h"
25 #include "openvswitch/match.h"
26 #include "openvswitch/vlog.h"
27 #include "packets.h"
28 #include "uuid.h"
29
30 VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk);
31 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
32
33 /* Thread-safety
34 * =============
35 *
36 * Below API is NOT thread safe in following terms:
37 *
38 * - The caller must be sure that none of these functions will be called
39 * simultaneously. Even for different 'netdev's.
40 *
41 * - The caller must be sure that 'netdev' will not be destructed/deallocated.
42 *
43 * - The caller must be sure that 'netdev' configuration will not be changed.
44 * For example, simultaneous call of 'netdev_reconfigure()' for the same
45 * 'netdev' is forbidden.
46 *
47 * For current implementation all above restrictions could be fulfilled by
48 * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
49
50 /*
51 * A mapping from ufid to dpdk rte_flow.
52 */
53 static struct cmap ufid_to_rte_flow = CMAP_INITIALIZER;
54
55 struct ufid_to_rte_flow_data {
56 struct cmap_node node;
57 ovs_u128 ufid;
58 struct rte_flow *rte_flow;
59 bool actions_offloaded;
60 struct dpif_flow_stats stats;
61 };
62
63 /* Find rte_flow with @ufid. */
64 static struct ufid_to_rte_flow_data *
65 ufid_to_rte_flow_data_find(const ovs_u128 *ufid)
66 {
67 size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
68 struct ufid_to_rte_flow_data *data;
69
70 CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
71 if (ovs_u128_equals(*ufid, data->ufid)) {
72 return data;
73 }
74 }
75
76 return NULL;
77 }
78
79 static inline void
80 ufid_to_rte_flow_associate(const ovs_u128 *ufid,
81 struct rte_flow *rte_flow, bool actions_offloaded)
82 {
83 size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
84 struct ufid_to_rte_flow_data *data = xzalloc(sizeof *data);
85 struct ufid_to_rte_flow_data *data_prev;
86
87 /*
88 * We should not simply overwrite an existing rte flow.
89 * We should have deleted it first before re-adding it.
90 * Thus, if following assert triggers, something is wrong:
91 * the rte_flow is not destroyed.
92 */
93 data_prev = ufid_to_rte_flow_data_find(ufid);
94 if (data_prev) {
95 ovs_assert(data_prev->rte_flow == NULL);
96 }
97
98 data->ufid = *ufid;
99 data->rte_flow = rte_flow;
100 data->actions_offloaded = actions_offloaded;
101
102 cmap_insert(&ufid_to_rte_flow,
103 CONST_CAST(struct cmap_node *, &data->node), hash);
104 }
105
106 static inline void
107 ufid_to_rte_flow_disassociate(const ovs_u128 *ufid)
108 {
109 size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
110 struct ufid_to_rte_flow_data *data;
111
112 CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
113 if (ovs_u128_equals(*ufid, data->ufid)) {
114 cmap_remove(&ufid_to_rte_flow,
115 CONST_CAST(struct cmap_node *, &data->node), hash);
116 ovsrcu_postpone(free, data);
117 return;
118 }
119 }
120
121 VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow\n",
122 UUID_ARGS((struct uuid *) ufid));
123 }
124
125 /*
126 * To avoid individual xrealloc calls for each new element, a 'curent_max'
127 * is used to keep track of current allocated number of elements. Starts
128 * by 8 and doubles on each xrealloc call.
129 */
130 struct flow_patterns {
131 struct rte_flow_item *items;
132 int cnt;
133 int current_max;
134 };
135
136 struct flow_actions {
137 struct rte_flow_action *actions;
138 int cnt;
139 int current_max;
140 };
141
142 static void
143 dump_flow_attr(struct ds *s, const struct rte_flow_attr *attr)
144 {
145 ds_put_format(s,
146 " Attributes: "
147 "ingress=%d, egress=%d, prio=%d, group=%d, transfer=%d\n",
148 attr->ingress, attr->egress, attr->priority, attr->group,
149 attr->transfer);
150 }
151
152 static void
153 dump_flow_pattern(struct ds *s, const struct rte_flow_item *item)
154 {
155 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
156 const struct rte_flow_item_eth *eth_spec = item->spec;
157 const struct rte_flow_item_eth *eth_mask = item->mask;
158
159 ds_put_cstr(s, "rte flow eth pattern:\n");
160 if (eth_spec) {
161 ds_put_format(s,
162 " Spec: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
163 "type=0x%04" PRIx16"\n",
164 ETH_ADDR_BYTES_ARGS(eth_spec->src.addr_bytes),
165 ETH_ADDR_BYTES_ARGS(eth_spec->dst.addr_bytes),
166 ntohs(eth_spec->type));
167 } else {
168 ds_put_cstr(s, " Spec = null\n");
169 }
170 if (eth_mask) {
171 ds_put_format(s,
172 " Mask: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
173 "type=0x%04"PRIx16"\n",
174 ETH_ADDR_BYTES_ARGS(eth_mask->src.addr_bytes),
175 ETH_ADDR_BYTES_ARGS(eth_mask->dst.addr_bytes),
176 ntohs(eth_mask->type));
177 } else {
178 ds_put_cstr(s, " Mask = null\n");
179 }
180 } else if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
181 const struct rte_flow_item_vlan *vlan_spec = item->spec;
182 const struct rte_flow_item_vlan *vlan_mask = item->mask;
183
184 ds_put_cstr(s, "rte flow vlan pattern:\n");
185 if (vlan_spec) {
186 ds_put_format(s,
187 " Spec: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
188 ntohs(vlan_spec->inner_type), ntohs(vlan_spec->tci));
189 } else {
190 ds_put_cstr(s, " Spec = null\n");
191 }
192
193 if (vlan_mask) {
194 ds_put_format(s,
195 " Mask: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
196 ntohs(vlan_mask->inner_type), ntohs(vlan_mask->tci));
197 } else {
198 ds_put_cstr(s, " Mask = null\n");
199 }
200 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
201 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
202 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
203
204 ds_put_cstr(s, "rte flow ipv4 pattern:\n");
205 if (ipv4_spec) {
206 ds_put_format(s,
207 " Spec: tos=0x%"PRIx8", ttl=%"PRIx8
208 ", proto=0x%"PRIx8
209 ", src="IP_FMT", dst="IP_FMT"\n",
210 ipv4_spec->hdr.type_of_service,
211 ipv4_spec->hdr.time_to_live,
212 ipv4_spec->hdr.next_proto_id,
213 IP_ARGS(ipv4_spec->hdr.src_addr),
214 IP_ARGS(ipv4_spec->hdr.dst_addr));
215 } else {
216 ds_put_cstr(s, " Spec = null\n");
217 }
218 if (ipv4_mask) {
219 ds_put_format(s,
220 " Mask: tos=0x%"PRIx8", ttl=%"PRIx8
221 ", proto=0x%"PRIx8
222 ", src="IP_FMT", dst="IP_FMT"\n",
223 ipv4_mask->hdr.type_of_service,
224 ipv4_mask->hdr.time_to_live,
225 ipv4_mask->hdr.next_proto_id,
226 IP_ARGS(ipv4_mask->hdr.src_addr),
227 IP_ARGS(ipv4_mask->hdr.dst_addr));
228 } else {
229 ds_put_cstr(s, " Mask = null\n");
230 }
231 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
232 const struct rte_flow_item_udp *udp_spec = item->spec;
233 const struct rte_flow_item_udp *udp_mask = item->mask;
234
235 ds_put_cstr(s, "rte flow udp pattern:\n");
236 if (udp_spec) {
237 ds_put_format(s,
238 " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
239 ntohs(udp_spec->hdr.src_port),
240 ntohs(udp_spec->hdr.dst_port));
241 } else {
242 ds_put_cstr(s, " Spec = null\n");
243 }
244 if (udp_mask) {
245 ds_put_format(s,
246 " Mask: src_port=0x%"PRIx16
247 ", dst_port=0x%"PRIx16"\n",
248 ntohs(udp_mask->hdr.src_port),
249 ntohs(udp_mask->hdr.dst_port));
250 } else {
251 ds_put_cstr(s, " Mask = null\n");
252 }
253 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
254 const struct rte_flow_item_sctp *sctp_spec = item->spec;
255 const struct rte_flow_item_sctp *sctp_mask = item->mask;
256
257 ds_put_cstr(s, "rte flow sctp pattern:\n");
258 if (sctp_spec) {
259 ds_put_format(s,
260 " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
261 ntohs(sctp_spec->hdr.src_port),
262 ntohs(sctp_spec->hdr.dst_port));
263 } else {
264 ds_put_cstr(s, " Spec = null\n");
265 }
266 if (sctp_mask) {
267 ds_put_format(s,
268 " Mask: src_port=0x%"PRIx16
269 ", dst_port=0x%"PRIx16"\n",
270 ntohs(sctp_mask->hdr.src_port),
271 ntohs(sctp_mask->hdr.dst_port));
272 } else {
273 ds_put_cstr(s, " Mask = null\n");
274 }
275 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
276 const struct rte_flow_item_icmp *icmp_spec = item->spec;
277 const struct rte_flow_item_icmp *icmp_mask = item->mask;
278
279 ds_put_cstr(s, "rte flow icmp pattern:\n");
280 if (icmp_spec) {
281 ds_put_format(s,
282 " Spec: icmp_type=%"PRIu8", icmp_code=%"PRIu8"\n",
283 icmp_spec->hdr.icmp_type,
284 icmp_spec->hdr.icmp_code);
285 } else {
286 ds_put_cstr(s, " Spec = null\n");
287 }
288 if (icmp_mask) {
289 ds_put_format(s,
290 " Mask: icmp_type=0x%"PRIx8
291 ", icmp_code=0x%"PRIx8"\n",
292 icmp_spec->hdr.icmp_type,
293 icmp_spec->hdr.icmp_code);
294 } else {
295 ds_put_cstr(s, " Mask = null\n");
296 }
297 } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
298 const struct rte_flow_item_tcp *tcp_spec = item->spec;
299 const struct rte_flow_item_tcp *tcp_mask = item->mask;
300
301 ds_put_cstr(s, "rte flow tcp pattern:\n");
302 if (tcp_spec) {
303 ds_put_format(s,
304 " Spec: src_port=%"PRIu16", dst_port=%"PRIu16
305 ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
306 ntohs(tcp_spec->hdr.src_port),
307 ntohs(tcp_spec->hdr.dst_port),
308 tcp_spec->hdr.data_off,
309 tcp_spec->hdr.tcp_flags);
310 } else {
311 ds_put_cstr(s, " Spec = null\n");
312 }
313 if (tcp_mask) {
314 ds_put_format(s,
315 " Mask: src_port=%"PRIx16", dst_port=%"PRIx16
316 ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
317 ntohs(tcp_mask->hdr.src_port),
318 ntohs(tcp_mask->hdr.dst_port),
319 tcp_mask->hdr.data_off,
320 tcp_mask->hdr.tcp_flags);
321 } else {
322 ds_put_cstr(s, " Mask = null\n");
323 }
324 } else {
325 ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
326 }
327 }
328
329 static void
330 dump_flow_action(struct ds *s, const struct rte_flow_action *actions)
331 {
332 if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
333 const struct rte_flow_action_mark *mark = actions->conf;
334
335 ds_put_cstr(s, "rte flow mark action:\n");
336 if (mark) {
337 ds_put_format(s, " Mark: id=%d\n", mark->id);
338 } else {
339 ds_put_cstr(s, " Mark = null\n");
340 }
341 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
342 const struct rte_flow_action_rss *rss = actions->conf;
343
344 ds_put_cstr(s, "rte flow RSS action:\n");
345 if (rss) {
346 ds_put_format(s, " RSS: queue_num=%d\n", rss->queue_num);
347 } else {
348 ds_put_cstr(s, " RSS = null\n");
349 }
350 } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) {
351 const struct rte_flow_action_count *count = actions->conf;
352
353 ds_put_cstr(s, "rte flow count action:\n");
354 if (count) {
355 ds_put_format(s, " Count: shared=%d, id=%d\n", count->shared,
356 count->id);
357 } else {
358 ds_put_cstr(s, " Count = null\n");
359 }
360 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
361 const struct rte_flow_action_port_id *port_id = actions->conf;
362
363 ds_put_cstr(s, "rte flow port-id action:\n");
364 if (port_id) {
365 ds_put_format(s, " Port-id: original=%d, id=%d\n",
366 port_id->original, port_id->id);
367 } else {
368 ds_put_cstr(s, " Port-id = null\n");
369 }
370 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
371 ds_put_cstr(s, "rte flow drop action\n");
372 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ||
373 actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_DST) {
374 const struct rte_flow_action_set_mac *set_mac = actions->conf;
375
376 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_DST
377 ? "dst" : "src";
378
379 ds_put_format(s, "rte flow set-mac-%s action:\n", dirstr);
380 if (set_mac) {
381 ds_put_format(s,
382 " Set-mac-%s: "ETH_ADDR_FMT"\n", dirstr,
383 ETH_ADDR_BYTES_ARGS(set_mac->mac_addr));
384 } else {
385 ds_put_format(s, " Set-mac-%s = null\n", dirstr);
386 }
387 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ||
388 actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_DST) {
389 const struct rte_flow_action_set_ipv4 *set_ipv4 = actions->conf;
390 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
391 ? "dst" : "src";
392
393 ds_put_format(s, "rte flow set-ipv4-%s action:\n", dirstr);
394 if (set_ipv4) {
395 ds_put_format(s,
396 " Set-ipv4-%s: "IP_FMT"\n", dirstr,
397 IP_ARGS(set_ipv4->ipv4_addr));
398 } else {
399 ds_put_format(s, " Set-ipv4-%s = null\n", dirstr);
400 }
401 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_TTL) {
402 const struct rte_flow_action_set_ttl *set_ttl = actions->conf;
403
404 ds_put_cstr(s, "rte flow set-ttl action:\n");
405 if (set_ttl) {
406 ds_put_format(s, " Set-ttl: %d\n", set_ttl->ttl_value);
407 } else {
408 ds_put_cstr(s, " Set-ttl = null\n");
409 }
410 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ||
411 actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_DST) {
412 const struct rte_flow_action_set_tp *set_tp = actions->conf;
413 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_DST
414 ? "dst" : "src";
415
416 ds_put_format(s, "rte flow set-tcp/udp-port-%s action:\n", dirstr);
417 if (set_tp) {
418 ds_put_format(s, " Set-%s-tcp/udp-port: %"PRIu16"\n", dirstr,
419 ntohs(set_tp->port));
420 } else {
421 ds_put_format(s, " Set-%s-tcp/udp-port = null\n", dirstr);
422 }
423 } else {
424 ds_put_format(s, "unknown rte flow action (%d)\n", actions->type);
425 }
426 }
427
428 static struct ds *
429 dump_flow(struct ds *s,
430 const struct rte_flow_attr *attr,
431 const struct rte_flow_item *items,
432 const struct rte_flow_action *actions)
433 {
434 if (attr) {
435 dump_flow_attr(s, attr);
436 }
437 while (items && items->type != RTE_FLOW_ITEM_TYPE_END) {
438 dump_flow_pattern(s, items++);
439 }
440 while (actions && actions->type != RTE_FLOW_ACTION_TYPE_END) {
441 dump_flow_action(s, actions++);
442 }
443 return s;
444 }
445
446 static struct rte_flow *
447 netdev_offload_dpdk_flow_create(struct netdev *netdev,
448 const struct rte_flow_attr *attr,
449 const struct rte_flow_item *items,
450 const struct rte_flow_action *actions,
451 struct rte_flow_error *error)
452 {
453 struct rte_flow *flow;
454 struct ds s;
455
456 flow = netdev_dpdk_rte_flow_create(netdev, attr, items, actions, error);
457 if (flow) {
458 if (!VLOG_DROP_DBG(&rl)) {
459 ds_init(&s);
460 dump_flow(&s, attr, items, actions);
461 VLOG_DBG_RL(&rl, "%s: rte_flow 0x%"PRIxPTR" created:\n%s",
462 netdev_get_name(netdev), (intptr_t) flow, ds_cstr(&s));
463 ds_destroy(&s);
464 }
465 } else {
466 enum vlog_level level = VLL_WARN;
467
468 if (error->type == RTE_FLOW_ERROR_TYPE_ACTION) {
469 level = VLL_DBG;
470 }
471 VLOG_RL(&rl, level, "%s: rte_flow creation failed: %d (%s).",
472 netdev_get_name(netdev), error->type, error->message);
473 if (!vlog_should_drop(&this_module, level, &rl)) {
474 ds_init(&s);
475 dump_flow(&s, attr, items, actions);
476 VLOG_RL(&rl, level, "Failed flow:\n%s", ds_cstr(&s));
477 ds_destroy(&s);
478 }
479 }
480 return flow;
481 }
482
483 static void
484 add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
485 const void *spec, const void *mask)
486 {
487 int cnt = patterns->cnt;
488
489 if (cnt == 0) {
490 patterns->current_max = 8;
491 patterns->items = xcalloc(patterns->current_max,
492 sizeof *patterns->items);
493 } else if (cnt == patterns->current_max) {
494 patterns->current_max *= 2;
495 patterns->items = xrealloc(patterns->items, patterns->current_max *
496 sizeof *patterns->items);
497 }
498
499 patterns->items[cnt].type = type;
500 patterns->items[cnt].spec = spec;
501 patterns->items[cnt].mask = mask;
502 patterns->items[cnt].last = NULL;
503 patterns->cnt++;
504 }
505
506 static void
507 add_flow_action(struct flow_actions *actions, enum rte_flow_action_type type,
508 const void *conf)
509 {
510 int cnt = actions->cnt;
511
512 if (cnt == 0) {
513 actions->current_max = 8;
514 actions->actions = xcalloc(actions->current_max,
515 sizeof *actions->actions);
516 } else if (cnt == actions->current_max) {
517 actions->current_max *= 2;
518 actions->actions = xrealloc(actions->actions, actions->current_max *
519 sizeof *actions->actions);
520 }
521
522 actions->actions[cnt].type = type;
523 actions->actions[cnt].conf = conf;
524 actions->cnt++;
525 }
526
527 static void
528 free_flow_patterns(struct flow_patterns *patterns)
529 {
530 int i;
531
532 for (i = 0; i < patterns->cnt; i++) {
533 if (patterns->items[i].spec) {
534 free(CONST_CAST(void *, patterns->items[i].spec));
535 }
536 if (patterns->items[i].mask) {
537 free(CONST_CAST(void *, patterns->items[i].mask));
538 }
539 }
540 free(patterns->items);
541 patterns->items = NULL;
542 patterns->cnt = 0;
543 }
544
545 static void
546 free_flow_actions(struct flow_actions *actions)
547 {
548 int i;
549
550 for (i = 0; i < actions->cnt; i++) {
551 if (actions->actions[i].conf) {
552 free(CONST_CAST(void *, actions->actions[i].conf));
553 }
554 }
555 free(actions->actions);
556 actions->actions = NULL;
557 actions->cnt = 0;
558 }
559
560 static int
561 parse_flow_match(struct flow_patterns *patterns,
562 const struct match *match)
563 {
564 uint8_t *next_proto_mask = NULL;
565 uint8_t proto = 0;
566
567 /* Eth */
568 if (!eth_addr_is_zero(match->wc.masks.dl_src) ||
569 !eth_addr_is_zero(match->wc.masks.dl_dst)) {
570 struct rte_flow_item_eth *spec, *mask;
571
572 spec = xzalloc(sizeof *spec);
573 mask = xzalloc(sizeof *mask);
574
575 memcpy(&spec->dst, &match->flow.dl_dst, sizeof spec->dst);
576 memcpy(&spec->src, &match->flow.dl_src, sizeof spec->src);
577 spec->type = match->flow.dl_type;
578
579 memcpy(&mask->dst, &match->wc.masks.dl_dst, sizeof mask->dst);
580 memcpy(&mask->src, &match->wc.masks.dl_src, sizeof mask->src);
581 mask->type = match->wc.masks.dl_type;
582
583 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, spec, mask);
584 } else {
585 /*
586 * If user specifies a flow (like UDP flow) without L2 patterns,
587 * OVS will at least set the dl_type. Normally, it's enough to
588 * create an eth pattern just with it. Unluckily, some Intel's
589 * NIC (such as XL710) doesn't support that. Below is a workaround,
590 * which simply matches any L2 pkts.
591 */
592 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, NULL, NULL);
593 }
594
595 /* VLAN */
596 if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {
597 struct rte_flow_item_vlan *spec, *mask;
598
599 spec = xzalloc(sizeof *spec);
600 mask = xzalloc(sizeof *mask);
601
602 spec->tci = match->flow.vlans[0].tci & ~htons(VLAN_CFI);
603 mask->tci = match->wc.masks.vlans[0].tci & ~htons(VLAN_CFI);
604
605 /* Match any protocols. */
606 mask->inner_type = 0;
607
608 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VLAN, spec, mask);
609 }
610
611 /* IP v4 */
612 if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
613 struct rte_flow_item_ipv4 *spec, *mask;
614
615 spec = xzalloc(sizeof *spec);
616 mask = xzalloc(sizeof *mask);
617
618 spec->hdr.type_of_service = match->flow.nw_tos;
619 spec->hdr.time_to_live = match->flow.nw_ttl;
620 spec->hdr.next_proto_id = match->flow.nw_proto;
621 spec->hdr.src_addr = match->flow.nw_src;
622 spec->hdr.dst_addr = match->flow.nw_dst;
623
624 mask->hdr.type_of_service = match->wc.masks.nw_tos;
625 mask->hdr.time_to_live = match->wc.masks.nw_ttl;
626 mask->hdr.next_proto_id = match->wc.masks.nw_proto;
627 mask->hdr.src_addr = match->wc.masks.nw_src;
628 mask->hdr.dst_addr = match->wc.masks.nw_dst;
629
630 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask);
631
632 /* Save proto for L4 protocol setup. */
633 proto = spec->hdr.next_proto_id &
634 mask->hdr.next_proto_id;
635 next_proto_mask = &mask->hdr.next_proto_id;
636 }
637
638 if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP &&
639 proto != IPPROTO_SCTP && proto != IPPROTO_TCP &&
640 (match->wc.masks.tp_src ||
641 match->wc.masks.tp_dst ||
642 match->wc.masks.tcp_flags)) {
643 VLOG_DBG("L4 Protocol (%u) not supported", proto);
644 return -1;
645 }
646
647 if ((match->wc.masks.tp_src && match->wc.masks.tp_src != OVS_BE16_MAX) ||
648 (match->wc.masks.tp_dst && match->wc.masks.tp_dst != OVS_BE16_MAX)) {
649 return -1;
650 }
651
652 if (proto == IPPROTO_TCP) {
653 struct rte_flow_item_tcp *spec, *mask;
654
655 spec = xzalloc(sizeof *spec);
656 mask = xzalloc(sizeof *mask);
657
658 spec->hdr.src_port = match->flow.tp_src;
659 spec->hdr.dst_port = match->flow.tp_dst;
660 spec->hdr.data_off = ntohs(match->flow.tcp_flags) >> 8;
661 spec->hdr.tcp_flags = ntohs(match->flow.tcp_flags) & 0xff;
662
663 mask->hdr.src_port = match->wc.masks.tp_src;
664 mask->hdr.dst_port = match->wc.masks.tp_dst;
665 mask->hdr.data_off = ntohs(match->wc.masks.tcp_flags) >> 8;
666 mask->hdr.tcp_flags = ntohs(match->wc.masks.tcp_flags) & 0xff;
667
668 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_TCP, spec, mask);
669
670 /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
671 if (next_proto_mask) {
672 *next_proto_mask = 0;
673 }
674 } else if (proto == IPPROTO_UDP) {
675 struct rte_flow_item_udp *spec, *mask;
676
677 spec = xzalloc(sizeof *spec);
678 mask = xzalloc(sizeof *mask);
679
680 spec->hdr.src_port = match->flow.tp_src;
681 spec->hdr.dst_port = match->flow.tp_dst;
682
683 mask->hdr.src_port = match->wc.masks.tp_src;
684 mask->hdr.dst_port = match->wc.masks.tp_dst;
685
686 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
687
688 /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
689 if (next_proto_mask) {
690 *next_proto_mask = 0;
691 }
692 } else if (proto == IPPROTO_SCTP) {
693 struct rte_flow_item_sctp *spec, *mask;
694
695 spec = xzalloc(sizeof *spec);
696 mask = xzalloc(sizeof *mask);
697
698 spec->hdr.src_port = match->flow.tp_src;
699 spec->hdr.dst_port = match->flow.tp_dst;
700
701 mask->hdr.src_port = match->wc.masks.tp_src;
702 mask->hdr.dst_port = match->wc.masks.tp_dst;
703
704 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_SCTP, spec, mask);
705
706 /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
707 if (next_proto_mask) {
708 *next_proto_mask = 0;
709 }
710 } else if (proto == IPPROTO_ICMP) {
711 struct rte_flow_item_icmp *spec, *mask;
712
713 spec = xzalloc(sizeof *spec);
714 mask = xzalloc(sizeof *mask);
715
716 spec->hdr.icmp_type = (uint8_t) ntohs(match->flow.tp_src);
717 spec->hdr.icmp_code = (uint8_t) ntohs(match->flow.tp_dst);
718
719 mask->hdr.icmp_type = (uint8_t) ntohs(match->wc.masks.tp_src);
720 mask->hdr.icmp_code = (uint8_t) ntohs(match->wc.masks.tp_dst);
721
722 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ICMP, spec, mask);
723
724 /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
725 if (next_proto_mask) {
726 *next_proto_mask = 0;
727 }
728 }
729
730 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
731
732 return 0;
733 }
734
735 static void
736 add_flow_mark_rss_actions(struct flow_actions *actions,
737 uint32_t flow_mark,
738 const struct netdev *netdev)
739 {
740 struct rte_flow_action_mark *mark;
741 struct action_rss_data {
742 struct rte_flow_action_rss conf;
743 uint16_t queue[0];
744 } *rss_data;
745 BUILD_ASSERT_DECL(offsetof(struct action_rss_data, conf) == 0);
746 int i;
747
748 mark = xzalloc(sizeof *mark);
749
750 mark->id = flow_mark;
751 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_MARK, mark);
752
753 rss_data = xmalloc(sizeof *rss_data +
754 netdev_n_rxq(netdev) * sizeof rss_data->queue[0]);
755 *rss_data = (struct action_rss_data) {
756 .conf = (struct rte_flow_action_rss) {
757 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
758 .level = 0,
759 .types = 0,
760 .queue_num = netdev_n_rxq(netdev),
761 .queue = rss_data->queue,
762 .key_len = 0,
763 .key = NULL
764 },
765 };
766
767 /* Override queue array with default. */
768 for (i = 0; i < netdev_n_rxq(netdev); i++) {
769 rss_data->queue[i] = i;
770 }
771
772 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_RSS, &rss_data->conf);
773 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL);
774 }
775
776 static struct rte_flow *
777 netdev_offload_dpdk_mark_rss(struct flow_patterns *patterns,
778 struct netdev *netdev,
779 uint32_t flow_mark)
780 {
781 struct flow_actions actions = { .actions = NULL, .cnt = 0 };
782 const struct rte_flow_attr flow_attr = {
783 .group = 0,
784 .priority = 0,
785 .ingress = 1,
786 .egress = 0
787 };
788 struct rte_flow_error error;
789 struct rte_flow *flow;
790
791 add_flow_mark_rss_actions(&actions, flow_mark, netdev);
792
793 flow = netdev_offload_dpdk_flow_create(netdev, &flow_attr, patterns->items,
794 actions.actions, &error);
795
796 free_flow_actions(&actions);
797 return flow;
798 }
799
800 static void
801 add_count_action(struct flow_actions *actions)
802 {
803 struct rte_flow_action_count *count = xzalloc(sizeof *count);
804
805 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_COUNT, count);
806 }
807
808 static int
809 add_port_id_action(struct flow_actions *actions,
810 struct netdev *outdev)
811 {
812 struct rte_flow_action_port_id *port_id;
813 int outdev_id;
814
815 outdev_id = netdev_dpdk_get_port_id(outdev);
816 if (outdev_id < 0) {
817 return -1;
818 }
819 port_id = xzalloc(sizeof *port_id);
820 port_id->id = outdev_id;
821 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_PORT_ID, port_id);
822 return 0;
823 }
824
825 static int
826 add_output_action(struct netdev *netdev,
827 struct flow_actions *actions,
828 const struct nlattr *nla,
829 struct offload_info *info)
830 {
831 struct netdev *outdev;
832 odp_port_t port;
833 int ret = 0;
834
835 port = nl_attr_get_odp_port(nla);
836 outdev = netdev_ports_get(port, info->dpif_class);
837 if (outdev == NULL) {
838 VLOG_DBG_RL(&rl, "Cannot find netdev for odp port %"PRIu32, port);
839 return -1;
840 }
841 if (!netdev_flow_api_equals(netdev, outdev) ||
842 add_port_id_action(actions, outdev)) {
843 VLOG_DBG_RL(&rl, "%s: Output to port \'%s\' cannot be offloaded.",
844 netdev_get_name(netdev), netdev_get_name(outdev));
845 ret = -1;
846 }
847 netdev_close(outdev);
848 return ret;
849 }
850
851 static int
852 add_set_flow_action__(struct flow_actions *actions,
853 const void *value, void *mask,
854 const size_t size, const int attr)
855 {
856 void *spec;
857
858 if (mask) {
859 /* DPDK does not support partially masked set actions. In such
860 * case, fail the offload.
861 */
862 if (is_all_zeros(mask, size)) {
863 return 0;
864 }
865 if (!is_all_ones(mask, size)) {
866 VLOG_DBG_RL(&rl, "Partial mask is not supported");
867 return -1;
868 }
869 }
870
871 spec = xzalloc(size);
872 memcpy(spec, value, size);
873 add_flow_action(actions, attr, spec);
874
875 /* Clear used mask for later checking. */
876 if (mask) {
877 memset(mask, 0, size);
878 }
879 return 0;
880 }
881
882 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_mac) ==
883 MEMBER_SIZEOF(struct ovs_key_ethernet, eth_src));
884 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_mac) ==
885 MEMBER_SIZEOF(struct ovs_key_ethernet, eth_dst));
886 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv4) ==
887 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_src));
888 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv4) ==
889 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_dst));
890 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ttl) ==
891 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_ttl));
892 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
893 MEMBER_SIZEOF(struct ovs_key_tcp, tcp_src));
894 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
895 MEMBER_SIZEOF(struct ovs_key_tcp, tcp_dst));
896 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
897 MEMBER_SIZEOF(struct ovs_key_udp, udp_src));
898 BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
899 MEMBER_SIZEOF(struct ovs_key_udp, udp_dst));
900
901 static int
902 parse_set_actions(struct flow_actions *actions,
903 const struct nlattr *set_actions,
904 const size_t set_actions_len,
905 bool masked)
906 {
907 const struct nlattr *sa;
908 unsigned int sleft;
909
910 #define add_set_flow_action(field, type) \
911 if (add_set_flow_action__(actions, &key->field, \
912 mask ? CONST_CAST(void *, &mask->field) : NULL, \
913 sizeof key->field, type)) { \
914 return -1; \
915 }
916
917 NL_ATTR_FOR_EACH_UNSAFE (sa, sleft, set_actions, set_actions_len) {
918 if (nl_attr_type(sa) == OVS_KEY_ATTR_ETHERNET) {
919 const struct ovs_key_ethernet *key = nl_attr_get(sa);
920 const struct ovs_key_ethernet *mask = masked ? key + 1 : NULL;
921
922 add_set_flow_action(eth_src, RTE_FLOW_ACTION_TYPE_SET_MAC_SRC);
923 add_set_flow_action(eth_dst, RTE_FLOW_ACTION_TYPE_SET_MAC_DST);
924
925 if (mask && !is_all_zeros(mask, sizeof *mask)) {
926 VLOG_DBG_RL(&rl, "Unsupported ETHERNET set action");
927 return -1;
928 }
929 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_IPV4) {
930 const struct ovs_key_ipv4 *key = nl_attr_get(sa);
931 const struct ovs_key_ipv4 *mask = masked ? key + 1 : NULL;
932
933 add_set_flow_action(ipv4_src, RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC);
934 add_set_flow_action(ipv4_dst, RTE_FLOW_ACTION_TYPE_SET_IPV4_DST);
935 add_set_flow_action(ipv4_ttl, RTE_FLOW_ACTION_TYPE_SET_TTL);
936
937 if (mask && !is_all_zeros(mask, sizeof *mask)) {
938 VLOG_DBG_RL(&rl, "Unsupported IPv4 set action");
939 return -1;
940 }
941 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_TCP) {
942 const struct ovs_key_tcp *key = nl_attr_get(sa);
943 const struct ovs_key_tcp *mask = masked ? key + 1 : NULL;
944
945 add_set_flow_action(tcp_src, RTE_FLOW_ACTION_TYPE_SET_TP_SRC);
946 add_set_flow_action(tcp_dst, RTE_FLOW_ACTION_TYPE_SET_TP_DST);
947
948 if (mask && !is_all_zeros(mask, sizeof *mask)) {
949 VLOG_DBG_RL(&rl, "Unsupported TCP set action");
950 return -1;
951 }
952 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_UDP) {
953 const struct ovs_key_udp *key = nl_attr_get(sa);
954 const struct ovs_key_udp *mask = masked ? key + 1 : NULL;
955
956 add_set_flow_action(udp_src, RTE_FLOW_ACTION_TYPE_SET_TP_SRC);
957 add_set_flow_action(udp_dst, RTE_FLOW_ACTION_TYPE_SET_TP_DST);
958
959 if (mask && !is_all_zeros(mask, sizeof *mask)) {
960 VLOG_DBG_RL(&rl, "Unsupported UDP set action");
961 return -1;
962 }
963 } else {
964 VLOG_DBG_RL(&rl,
965 "Unsupported set action type %d", nl_attr_type(sa));
966 return -1;
967 }
968 }
969
970 return 0;
971 }
972
973 static int
974 parse_flow_actions(struct netdev *netdev,
975 struct flow_actions *actions,
976 struct nlattr *nl_actions,
977 size_t nl_actions_len,
978 struct offload_info *info)
979 {
980 struct nlattr *nla;
981 size_t left;
982
983 add_count_action(actions);
984 NL_ATTR_FOR_EACH_UNSAFE (nla, left, nl_actions, nl_actions_len) {
985 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
986 if (add_output_action(netdev, actions, nla, info)) {
987 return -1;
988 }
989 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_DROP) {
990 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_DROP, NULL);
991 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET ||
992 nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
993 const struct nlattr *set_actions = nl_attr_get(nla);
994 const size_t set_actions_len = nl_attr_get_size(nla);
995 bool masked = nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED;
996
997 if (parse_set_actions(actions, set_actions, set_actions_len,
998 masked)) {
999 return -1;
1000 }
1001 } else {
1002 VLOG_DBG_RL(&rl, "Unsupported action type %d", nl_attr_type(nla));
1003 return -1;
1004 }
1005 }
1006
1007 if (nl_actions_len == 0) {
1008 VLOG_DBG_RL(&rl, "No actions provided");
1009 return -1;
1010 }
1011
1012 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL);
1013 return 0;
1014 }
1015
1016 static struct rte_flow *
1017 netdev_offload_dpdk_actions(struct netdev *netdev,
1018 struct flow_patterns *patterns,
1019 struct nlattr *nl_actions,
1020 size_t actions_len,
1021 struct offload_info *info)
1022 {
1023 const struct rte_flow_attr flow_attr = { .ingress = 1, .transfer = 1 };
1024 struct flow_actions actions = { .actions = NULL, .cnt = 0 };
1025 struct rte_flow *flow = NULL;
1026 struct rte_flow_error error;
1027 int ret;
1028
1029 ret = parse_flow_actions(netdev, &actions, nl_actions, actions_len, info);
1030 if (ret) {
1031 goto out;
1032 }
1033 flow = netdev_offload_dpdk_flow_create(netdev, &flow_attr, patterns->items,
1034 actions.actions, &error);
1035 out:
1036 free_flow_actions(&actions);
1037 return flow;
1038 }
1039
1040 static int
1041 netdev_offload_dpdk_add_flow(struct netdev *netdev,
1042 const struct match *match,
1043 struct nlattr *nl_actions,
1044 size_t actions_len,
1045 const ovs_u128 *ufid,
1046 struct offload_info *info)
1047 {
1048 struct flow_patterns patterns = { .items = NULL, .cnt = 0 };
1049 bool actions_offloaded = true;
1050 struct rte_flow *flow;
1051 int ret = 0;
1052
1053 ret = parse_flow_match(&patterns, match);
1054 if (ret) {
1055 goto out;
1056 }
1057
1058 flow = netdev_offload_dpdk_actions(netdev, &patterns, nl_actions,
1059 actions_len, info);
1060 if (!flow) {
1061 /* If we failed to offload the rule actions fallback to MARK+RSS
1062 * actions.
1063 */
1064 flow = netdev_offload_dpdk_mark_rss(&patterns, netdev,
1065 info->flow_mark);
1066 actions_offloaded = false;
1067 }
1068
1069 if (!flow) {
1070 ret = -1;
1071 goto out;
1072 }
1073 ufid_to_rte_flow_associate(ufid, flow, actions_offloaded);
1074 VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT"\n",
1075 netdev_get_name(netdev), flow, UUID_ARGS((struct uuid *)ufid));
1076
1077 out:
1078 free_flow_patterns(&patterns);
1079 return ret;
1080 }
1081
1082 /*
1083 * Check if any unsupported flow patterns are specified.
1084 */
1085 static int
1086 netdev_offload_dpdk_validate_flow(const struct match *match)
1087 {
1088 struct match match_zero_wc;
1089 const struct flow *masks = &match->wc.masks;
1090
1091 /* Create a wc-zeroed version of flow. */
1092 match_init(&match_zero_wc, &match->flow, &match->wc);
1093
1094 if (!is_all_zeros(&match_zero_wc.flow.tunnel,
1095 sizeof match_zero_wc.flow.tunnel)) {
1096 goto err;
1097 }
1098
1099 if (masks->metadata || masks->skb_priority ||
1100 masks->pkt_mark || masks->dp_hash) {
1101 goto err;
1102 }
1103
1104 /* recirc id must be zero. */
1105 if (match_zero_wc.flow.recirc_id) {
1106 goto err;
1107 }
1108
1109 if (masks->ct_state || masks->ct_nw_proto ||
1110 masks->ct_zone || masks->ct_mark ||
1111 !ovs_u128_is_zero(masks->ct_label)) {
1112 goto err;
1113 }
1114
1115 if (masks->conj_id || masks->actset_output) {
1116 goto err;
1117 }
1118
1119 /* Unsupported L2. */
1120 if (!is_all_zeros(masks->mpls_lse, sizeof masks->mpls_lse)) {
1121 goto err;
1122 }
1123
1124 /* Unsupported L3. */
1125 if (masks->ipv6_label || masks->ct_nw_src || masks->ct_nw_dst ||
1126 !is_all_zeros(&masks->ipv6_src, sizeof masks->ipv6_src) ||
1127 !is_all_zeros(&masks->ipv6_dst, sizeof masks->ipv6_dst) ||
1128 !is_all_zeros(&masks->ct_ipv6_src, sizeof masks->ct_ipv6_src) ||
1129 !is_all_zeros(&masks->ct_ipv6_dst, sizeof masks->ct_ipv6_dst) ||
1130 !is_all_zeros(&masks->nd_target, sizeof masks->nd_target) ||
1131 !is_all_zeros(&masks->nsh, sizeof masks->nsh) ||
1132 !is_all_zeros(&masks->arp_sha, sizeof masks->arp_sha) ||
1133 !is_all_zeros(&masks->arp_tha, sizeof masks->arp_tha)) {
1134 goto err;
1135 }
1136
1137 /* If fragmented, then don't HW accelerate - for now. */
1138 if (match_zero_wc.flow.nw_frag) {
1139 goto err;
1140 }
1141
1142 /* Unsupported L4. */
1143 if (masks->igmp_group_ip4 || masks->ct_tp_src || masks->ct_tp_dst) {
1144 goto err;
1145 }
1146
1147 return 0;
1148
1149 err:
1150 VLOG_ERR("cannot HW accelerate this flow due to unsupported protocols");
1151 return -1;
1152 }
1153
1154 static int
1155 netdev_offload_dpdk_destroy_flow(struct netdev *netdev,
1156 const ovs_u128 *ufid,
1157 struct rte_flow *rte_flow)
1158 {
1159 struct rte_flow_error error;
1160 int ret = netdev_dpdk_rte_flow_destroy(netdev, rte_flow, &error);
1161
1162 if (ret == 0) {
1163 ufid_to_rte_flow_disassociate(ufid);
1164 VLOG_DBG("%s: removed rte flow %p associated with ufid " UUID_FMT "\n",
1165 netdev_get_name(netdev), rte_flow,
1166 UUID_ARGS((struct uuid *)ufid));
1167 } else {
1168 VLOG_ERR("%s: Failed to destroy flow: %s (%u)\n",
1169 netdev_get_name(netdev), error.message, error.type);
1170 }
1171
1172 return ret;
1173 }
1174
1175 static int
1176 netdev_offload_dpdk_flow_put(struct netdev *netdev, struct match *match,
1177 struct nlattr *actions, size_t actions_len,
1178 const ovs_u128 *ufid, struct offload_info *info,
1179 struct dpif_flow_stats *stats)
1180 {
1181 struct ufid_to_rte_flow_data *rte_flow_data;
1182 int ret;
1183
1184 /*
1185 * If an old rte_flow exists, it means it's a flow modification.
1186 * Here destroy the old rte flow first before adding a new one.
1187 */
1188 rte_flow_data = ufid_to_rte_flow_data_find(ufid);
1189 if (rte_flow_data && rte_flow_data->rte_flow) {
1190 ret = netdev_offload_dpdk_destroy_flow(netdev, ufid,
1191 rte_flow_data->rte_flow);
1192 if (ret < 0) {
1193 return ret;
1194 }
1195 }
1196
1197 ret = netdev_offload_dpdk_validate_flow(match);
1198 if (ret < 0) {
1199 return ret;
1200 }
1201
1202 if (stats) {
1203 memset(stats, 0, sizeof *stats);
1204 }
1205 return netdev_offload_dpdk_add_flow(netdev, match, actions,
1206 actions_len, ufid, info);
1207 }
1208
1209 static int
1210 netdev_offload_dpdk_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
1211 struct dpif_flow_stats *stats)
1212 {
1213 struct ufid_to_rte_flow_data *rte_flow_data;
1214
1215 rte_flow_data = ufid_to_rte_flow_data_find(ufid);
1216 if (!rte_flow_data || !rte_flow_data->rte_flow) {
1217 return -1;
1218 }
1219
1220 if (stats) {
1221 memset(stats, 0, sizeof *stats);
1222 }
1223 return netdev_offload_dpdk_destroy_flow(netdev, ufid,
1224 rte_flow_data->rte_flow);
1225 }
1226
1227 static int
1228 netdev_offload_dpdk_init_flow_api(struct netdev *netdev)
1229 {
1230 return netdev_dpdk_flow_api_supported(netdev) ? 0 : EOPNOTSUPP;
1231 }
1232
1233 static int
1234 netdev_offload_dpdk_flow_get(struct netdev *netdev,
1235 struct match *match OVS_UNUSED,
1236 struct nlattr **actions OVS_UNUSED,
1237 const ovs_u128 *ufid,
1238 struct dpif_flow_stats *stats,
1239 struct dpif_flow_attrs *attrs,
1240 struct ofpbuf *buf OVS_UNUSED)
1241 {
1242 struct rte_flow_query_count query = { .reset = 1 };
1243 struct ufid_to_rte_flow_data *rte_flow_data;
1244 struct rte_flow_error error;
1245 int ret = 0;
1246
1247 rte_flow_data = ufid_to_rte_flow_data_find(ufid);
1248 if (!rte_flow_data || !rte_flow_data->rte_flow) {
1249 ret = -1;
1250 goto out;
1251 }
1252
1253 attrs->offloaded = true;
1254 if (!rte_flow_data->actions_offloaded) {
1255 attrs->dp_layer = "ovs";
1256 memset(stats, 0, sizeof *stats);
1257 goto out;
1258 }
1259 attrs->dp_layer = "dpdk";
1260 ret = netdev_dpdk_rte_flow_query_count(netdev, rte_flow_data->rte_flow,
1261 &query, &error);
1262 if (ret) {
1263 VLOG_DBG_RL(&rl, "%s: Failed to query ufid "UUID_FMT" flow: %p\n",
1264 netdev_get_name(netdev), UUID_ARGS((struct uuid *) ufid),
1265 rte_flow_data->rte_flow);
1266 goto out;
1267 }
1268 rte_flow_data->stats.n_packets += (query.hits_set) ? query.hits : 0;
1269 rte_flow_data->stats.n_bytes += (query.bytes_set) ? query.bytes : 0;
1270 if (query.hits_set && query.hits) {
1271 rte_flow_data->stats.used = time_msec();
1272 }
1273 memcpy(stats, &rte_flow_data->stats, sizeof *stats);
1274 out:
1275 return ret;
1276 }
1277
1278 const struct netdev_flow_api netdev_offload_dpdk = {
1279 .type = "dpdk_flow_api",
1280 .flow_put = netdev_offload_dpdk_flow_put,
1281 .flow_del = netdev_offload_dpdk_flow_del,
1282 .init_flow_api = netdev_offload_dpdk_init_flow_api,
1283 .flow_get = netdev_offload_dpdk_flow_get,
1284 };