]> git.proxmox.com Git - ovs.git/blame - lib/netdev-offload-dpdk.c
Merge tag '2.15.0+ds1' into debian/victoria
[ovs.git] / lib / netdev-offload-dpdk.c
CommitLineData
3d67b2d2
RBY
1/*
2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Mellanox Technologies, Ltd.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17#include <config.h>
3d67b2d2 18
85270e99
EB
19#include <sys/types.h>
20#include <netinet/ip6.h>
3d67b2d2
RBY
21#include <rte_flow.h>
22
23#include "cmap.h"
24#include "dpif-netdev.h"
5fc5c50f 25#include "netdev-offload-provider.h"
3d67b2d2
RBY
26#include "netdev-provider.h"
27#include "openvswitch/match.h"
28#include "openvswitch/vlog.h"
29#include "packets.h"
30#include "uuid.h"
31
4f746d52 32VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk);
7d6033d6 33static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
3d67b2d2 34
5fc5c50f
IM
35/* Thread-safety
36 * =============
37 *
38 * Below API is NOT thread safe in following terms:
39 *
40 * - The caller must be sure that none of these functions will be called
41 * simultaneously. Even for different 'netdev's.
42 *
43 * - The caller must be sure that 'netdev' will not be destructed/deallocated.
44 *
45 * - The caller must be sure that 'netdev' configuration will not be changed.
46 * For example, simultaneous call of 'netdev_reconfigure()' for the same
47 * 'netdev' is forbidden.
48 *
49 * For current implementation all above restrictions could be fulfilled by
50 * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
51
3d67b2d2
RBY
52/*
53 * A mapping from ufid to dpdk rte_flow.
54 */
55static struct cmap ufid_to_rte_flow = CMAP_INITIALIZER;
56
57struct ufid_to_rte_flow_data {
58 struct cmap_node node;
59 ovs_u128 ufid;
d131664b 60 struct netdev *netdev;
3d67b2d2 61 struct rte_flow *rte_flow;
60e778c7 62 bool actions_offloaded;
2aca29df 63 struct dpif_flow_stats stats;
3d67b2d2
RBY
64};
65
66/* Find rte_flow with @ufid. */
34ce6bf7 67static struct ufid_to_rte_flow_data *
95e266f7 68ufid_to_rte_flow_data_find(const ovs_u128 *ufid, bool warn)
3d67b2d2
RBY
69{
70 size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
71 struct ufid_to_rte_flow_data *data;
72
73 CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
74 if (ovs_u128_equals(*ufid, data->ufid)) {
34ce6bf7 75 return data;
3d67b2d2
RBY
76 }
77 }
78
95e266f7
EB
79 if (warn) {
80 VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow",
81 UUID_ARGS((struct uuid *) ufid));
82 }
83
3d67b2d2
RBY
84 return NULL;
85}
86
588821ea 87static inline struct ufid_to_rte_flow_data *
d131664b 88ufid_to_rte_flow_associate(const ovs_u128 *ufid, struct netdev *netdev,
60e778c7 89 struct rte_flow *rte_flow, bool actions_offloaded)
3d67b2d2
RBY
90{
91 size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
92 struct ufid_to_rte_flow_data *data = xzalloc(sizeof *data);
34ce6bf7 93 struct ufid_to_rte_flow_data *data_prev;
3d67b2d2
RBY
94
95 /*
96 * We should not simply overwrite an existing rte flow.
97 * We should have deleted it first before re-adding it.
98 * Thus, if following assert triggers, something is wrong:
99 * the rte_flow is not destroyed.
100 */
95e266f7 101 data_prev = ufid_to_rte_flow_data_find(ufid, false);
34ce6bf7
EB
102 if (data_prev) {
103 ovs_assert(data_prev->rte_flow == NULL);
104 }
3d67b2d2
RBY
105
106 data->ufid = *ufid;
d131664b 107 data->netdev = netdev_ref(netdev);
3d67b2d2 108 data->rte_flow = rte_flow;
60e778c7 109 data->actions_offloaded = actions_offloaded;
3d67b2d2
RBY
110
111 cmap_insert(&ufid_to_rte_flow,
112 CONST_CAST(struct cmap_node *, &data->node), hash);
588821ea 113 return data;
3d67b2d2
RBY
114}
115
116static inline void
95e266f7 117ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
3d67b2d2 118{
95e266f7 119 size_t hash = hash_bytes(&data->ufid, sizeof data->ufid, 0);
3d67b2d2 120
95e266f7
EB
121 cmap_remove(&ufid_to_rte_flow,
122 CONST_CAST(struct cmap_node *, &data->node), hash);
123 netdev_close(data->netdev);
124 ovsrcu_postpone(free, data);
3d67b2d2
RBY
125}
126
127/*
128 * To avoid individual xrealloc calls for each new element, a 'curent_max'
129 * is used to keep track of current allocated number of elements. Starts
130 * by 8 and doubles on each xrealloc call.
131 */
132struct flow_patterns {
133 struct rte_flow_item *items;
134 int cnt;
135 int current_max;
136};
137
138struct flow_actions {
139 struct rte_flow_action *actions;
140 int cnt;
141 int current_max;
142};
143
144static void
7d6033d6 145dump_flow_attr(struct ds *s, const struct rte_flow_attr *attr)
3d67b2d2 146{
d8ad173f
EB
147 ds_put_format(s, "%s%spriority %"PRIu32" group %"PRIu32" %s",
148 attr->ingress ? "ingress " : "",
149 attr->egress ? "egress " : "", attr->priority, attr->group,
150 attr->transfer ? "transfer " : "");
7d6033d6 151}
3d67b2d2 152
d8ad173f
EB
153/* Adds one pattern item 'field' with the 'mask' to dynamic string 's' using
154 * 'testpmd command'-like format. */
155#define DUMP_PATTERN_ITEM(mask, field, fmt, spec_pri, mask_pri) \
156 if (is_all_ones(&mask, sizeof mask)) { \
157 ds_put_format(s, field " is " fmt " ", spec_pri); \
158 } else if (!is_all_zeros(&mask, sizeof mask)) { \
159 ds_put_format(s, field " spec " fmt " " field " mask " fmt " ", \
160 spec_pri, mask_pri); \
161 }
162
7d6033d6
EB
163static void
164dump_flow_pattern(struct ds *s, const struct rte_flow_item *item)
165{
3d67b2d2
RBY
166 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
167 const struct rte_flow_item_eth *eth_spec = item->spec;
168 const struct rte_flow_item_eth *eth_mask = item->mask;
169
d8ad173f 170 ds_put_cstr(s, "eth ");
3d67b2d2 171 if (eth_spec) {
d8ad173f
EB
172 if (!eth_mask) {
173 eth_mask = &rte_flow_item_eth_mask;
174 }
175 DUMP_PATTERN_ITEM(eth_mask->src, "src", ETH_ADDR_FMT,
176 ETH_ADDR_BYTES_ARGS(eth_spec->src.addr_bytes),
177 ETH_ADDR_BYTES_ARGS(eth_mask->src.addr_bytes));
178 DUMP_PATTERN_ITEM(eth_mask->dst, "dst", ETH_ADDR_FMT,
179 ETH_ADDR_BYTES_ARGS(eth_spec->dst.addr_bytes),
180 ETH_ADDR_BYTES_ARGS(eth_mask->dst.addr_bytes));
181 DUMP_PATTERN_ITEM(eth_mask->type, "type", "0x%04"PRIx16,
182 ntohs(eth_spec->type),
183 ntohs(eth_mask->type));
3d67b2d2 184 }
d8ad173f 185 ds_put_cstr(s, "/ ");
7d6033d6 186 } else if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3d67b2d2
RBY
187 const struct rte_flow_item_vlan *vlan_spec = item->spec;
188 const struct rte_flow_item_vlan *vlan_mask = item->mask;
189
d8ad173f 190 ds_put_cstr(s, "vlan ");
3d67b2d2 191 if (vlan_spec) {
d8ad173f
EB
192 if (!vlan_mask) {
193 vlan_mask = &rte_flow_item_vlan_mask;
194 }
195 DUMP_PATTERN_ITEM(vlan_mask->inner_type, "inner_type", "0x%"PRIx16,
196 ntohs(vlan_spec->inner_type),
197 ntohs(vlan_mask->inner_type));
198 DUMP_PATTERN_ITEM(vlan_mask->tci, "tci", "0x%"PRIx16,
199 ntohs(vlan_spec->tci), ntohs(vlan_mask->tci));
3d67b2d2 200 }
d8ad173f 201 ds_put_cstr(s, "/ ");
7d6033d6 202 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
3d67b2d2
RBY
203 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
204 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
205
d8ad173f 206 ds_put_cstr(s, "ipv4 ");
3d67b2d2 207 if (ipv4_spec) {
d8ad173f
EB
208 if (!ipv4_mask) {
209 ipv4_mask = &rte_flow_item_ipv4_mask;
210 }
211 DUMP_PATTERN_ITEM(ipv4_mask->hdr.src_addr, "src", IP_FMT,
212 IP_ARGS(ipv4_spec->hdr.src_addr),
213 IP_ARGS(ipv4_mask->hdr.src_addr));
214 DUMP_PATTERN_ITEM(ipv4_mask->hdr.dst_addr, "dst", IP_FMT,
215 IP_ARGS(ipv4_spec->hdr.dst_addr),
216 IP_ARGS(ipv4_mask->hdr.dst_addr));
217 DUMP_PATTERN_ITEM(ipv4_mask->hdr.next_proto_id, "proto",
218 "0x%"PRIx8, ipv4_spec->hdr.next_proto_id,
219 ipv4_mask->hdr.next_proto_id);
220 DUMP_PATTERN_ITEM(ipv4_mask->hdr.type_of_service, "tos",
221 "0x%"PRIx8, ipv4_spec->hdr.type_of_service,
222 ipv4_mask->hdr.type_of_service);
223 DUMP_PATTERN_ITEM(ipv4_mask->hdr.time_to_live, "ttl",
224 "0x%"PRIx8, ipv4_spec->hdr.time_to_live,
225 ipv4_mask->hdr.time_to_live);
3d67b2d2 226 }
d8ad173f 227 ds_put_cstr(s, "/ ");
7d6033d6 228 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
3d67b2d2
RBY
229 const struct rte_flow_item_udp *udp_spec = item->spec;
230 const struct rte_flow_item_udp *udp_mask = item->mask;
231
d8ad173f 232 ds_put_cstr(s, "udp ");
3d67b2d2 233 if (udp_spec) {
d8ad173f
EB
234 if (!udp_mask) {
235 udp_mask = &rte_flow_item_udp_mask;
236 }
237 DUMP_PATTERN_ITEM(udp_mask->hdr.src_port, "src", "%"PRIu16,
238 ntohs(udp_spec->hdr.src_port),
239 ntohs(udp_mask->hdr.src_port));
240 DUMP_PATTERN_ITEM(udp_mask->hdr.dst_port, "dst", "%"PRIu16,
241 ntohs(udp_spec->hdr.dst_port),
242 ntohs(udp_mask->hdr.dst_port));
3d67b2d2 243 }
d8ad173f 244 ds_put_cstr(s, "/ ");
7d6033d6 245 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
3d67b2d2
RBY
246 const struct rte_flow_item_sctp *sctp_spec = item->spec;
247 const struct rte_flow_item_sctp *sctp_mask = item->mask;
248
d8ad173f 249 ds_put_cstr(s, "sctp ");
3d67b2d2 250 if (sctp_spec) {
d8ad173f
EB
251 if (!sctp_mask) {
252 sctp_mask = &rte_flow_item_sctp_mask;
253 }
254 DUMP_PATTERN_ITEM(sctp_mask->hdr.src_port, "src", "%"PRIu16,
255 ntohs(sctp_spec->hdr.src_port),
256 ntohs(sctp_mask->hdr.src_port));
257 DUMP_PATTERN_ITEM(sctp_mask->hdr.dst_port, "dst", "%"PRIu16,
258 ntohs(sctp_spec->hdr.dst_port),
259 ntohs(sctp_mask->hdr.dst_port));
3d67b2d2 260 }
d8ad173f 261 ds_put_cstr(s, "/ ");
7d6033d6 262 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
3d67b2d2
RBY
263 const struct rte_flow_item_icmp *icmp_spec = item->spec;
264 const struct rte_flow_item_icmp *icmp_mask = item->mask;
265
d8ad173f 266 ds_put_cstr(s, "icmp ");
3d67b2d2 267 if (icmp_spec) {
d8ad173f
EB
268 if (!icmp_mask) {
269 icmp_mask = &rte_flow_item_icmp_mask;
270 }
271 DUMP_PATTERN_ITEM(icmp_mask->hdr.icmp_type, "icmp_type", "%"PRIu8,
272 icmp_spec->hdr.icmp_type,
273 icmp_mask->hdr.icmp_type);
274 DUMP_PATTERN_ITEM(icmp_mask->hdr.icmp_code, "icmp_code", "%"PRIu8,
275 icmp_spec->hdr.icmp_code,
276 icmp_mask->hdr.icmp_code);
3d67b2d2 277 }
d8ad173f 278 ds_put_cstr(s, "/ ");
7d6033d6 279 } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
3d67b2d2
RBY
280 const struct rte_flow_item_tcp *tcp_spec = item->spec;
281 const struct rte_flow_item_tcp *tcp_mask = item->mask;
282
d8ad173f 283 ds_put_cstr(s, "tcp ");
3d67b2d2 284 if (tcp_spec) {
d8ad173f
EB
285 if (!tcp_mask) {
286 tcp_mask = &rte_flow_item_tcp_mask;
287 }
288 DUMP_PATTERN_ITEM(tcp_mask->hdr.src_port, "src", "%"PRIu16,
289 ntohs(tcp_spec->hdr.src_port),
290 ntohs(tcp_mask->hdr.src_port));
291 DUMP_PATTERN_ITEM(tcp_mask->hdr.dst_port, "dst", "%"PRIu16,
292 ntohs(tcp_spec->hdr.dst_port),
293 ntohs(tcp_mask->hdr.dst_port));
294 DUMP_PATTERN_ITEM(tcp_mask->hdr.tcp_flags, "flags", "0x%"PRIx8,
295 tcp_spec->hdr.tcp_flags,
296 tcp_mask->hdr.tcp_flags);
7d6033d6 297 }
d8ad173f 298 ds_put_cstr(s, "/ ");
85270e99
EB
299 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
300 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
301 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
302
303 char addr_str[INET6_ADDRSTRLEN];
304 char mask_str[INET6_ADDRSTRLEN];
305 struct in6_addr addr, mask;
306
307 ds_put_cstr(s, "ipv6 ");
308 if (ipv6_spec) {
309 if (!ipv6_mask) {
310 ipv6_mask = &rte_flow_item_ipv6_mask;
311 }
312 memcpy(&addr, ipv6_spec->hdr.src_addr, sizeof addr);
313 memcpy(&mask, ipv6_mask->hdr.src_addr, sizeof mask);
314 ipv6_string_mapped(addr_str, &addr);
315 ipv6_string_mapped(mask_str, &mask);
316 DUMP_PATTERN_ITEM(mask, "src", "%s", addr_str, mask_str);
317
318 memcpy(&addr, ipv6_spec->hdr.dst_addr, sizeof addr);
319 memcpy(&mask, ipv6_mask->hdr.dst_addr, sizeof mask);
320 ipv6_string_mapped(addr_str, &addr);
321 ipv6_string_mapped(mask_str, &mask);
322 DUMP_PATTERN_ITEM(mask, "dst", "%s", addr_str, mask_str);
323
324 DUMP_PATTERN_ITEM(ipv6_mask->hdr.proto, "proto", "%"PRIu8,
325 ipv6_spec->hdr.proto, ipv6_mask->hdr.proto);
326 DUMP_PATTERN_ITEM(ipv6_mask->hdr.vtc_flow, "tc", "0x%"PRIx32,
327 ntohl(ipv6_spec->hdr.vtc_flow),
328 ntohl(ipv6_mask->hdr.vtc_flow));
329 DUMP_PATTERN_ITEM(ipv6_mask->hdr.hop_limits, "hop", "%"PRIu8,
330 ipv6_spec->hdr.hop_limits,
331 ipv6_mask->hdr.hop_limits);
332 }
333 ds_put_cstr(s, "/ ");
7d6033d6
EB
334 } else {
335 ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
336 }
337}
338
4e432d6f
EB
339static void
340dump_vxlan_encap(struct ds *s, const struct rte_flow_item *items)
341{
342 const struct rte_flow_item_eth *eth = NULL;
343 const struct rte_flow_item_ipv4 *ipv4 = NULL;
344 const struct rte_flow_item_ipv6 *ipv6 = NULL;
345 const struct rte_flow_item_udp *udp = NULL;
346 const struct rte_flow_item_vxlan *vxlan = NULL;
347
348 for (; items && items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
349 if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
350 eth = items->spec;
351 } else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
352 ipv4 = items->spec;
353 } else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
354 ipv6 = items->spec;
355 } else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
356 udp = items->spec;
357 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
358 vxlan = items->spec;
359 }
360 }
361
362 ds_put_format(s, "set vxlan ip-version %s ",
363 ipv4 ? "ipv4" : ipv6 ? "ipv6" : "ERR");
364 if (vxlan) {
365 ds_put_format(s, "vni %"PRIu32" ",
366 ntohl(*(ovs_be32 *) vxlan->vni) >> 8);
367 }
368 if (udp) {
369 ds_put_format(s, "udp-src %"PRIu16" udp-dst %"PRIu16" ",
370 ntohs(udp->hdr.src_port), ntohs(udp->hdr.dst_port));
371 }
372 if (ipv4) {
373 ds_put_format(s, "ip-src "IP_FMT" ip-dst "IP_FMT" ",
374 IP_ARGS(ipv4->hdr.src_addr),
375 IP_ARGS(ipv4->hdr.dst_addr));
376 }
377 if (ipv6) {
378 struct in6_addr addr;
379
380 ds_put_cstr(s, "ip-src ");
381 memcpy(&addr, ipv6->hdr.src_addr, sizeof addr);
382 ipv6_format_mapped(&addr, s);
383 ds_put_cstr(s, " ip-dst ");
384 memcpy(&addr, ipv6->hdr.dst_addr, sizeof addr);
385 ipv6_format_mapped(&addr, s);
386 ds_put_cstr(s, " ");
387 }
388 if (eth) {
389 ds_put_format(s, "eth-src "ETH_ADDR_FMT" eth-dst "ETH_ADDR_FMT,
390 ETH_ADDR_BYTES_ARGS(eth->src.addr_bytes),
391 ETH_ADDR_BYTES_ARGS(eth->dst.addr_bytes));
392 }
393}
394
7d6033d6 395static void
6571965b
EB
396dump_flow_action(struct ds *s, struct ds *s_extra,
397 const struct rte_flow_action *actions)
7d6033d6
EB
398{
399 if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
400 const struct rte_flow_action_mark *mark = actions->conf;
401
d8ad173f 402 ds_put_cstr(s, "mark ");
7d6033d6 403 if (mark) {
d8ad173f 404 ds_put_format(s, "id %d ", mark->id);
3d67b2d2 405 }
d8ad173f 406 ds_put_cstr(s, "/ ");
7d6033d6 407 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
d8ad173f 408 ds_put_cstr(s, "rss / ");
3c7330eb 409 } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) {
d8ad173f 410 ds_put_cstr(s, "count / ");
3c7330eb
EB
411 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
412 const struct rte_flow_action_port_id *port_id = actions->conf;
413
d8ad173f 414 ds_put_cstr(s, "port_id ");
3c7330eb 415 if (port_id) {
d8ad173f 416 ds_put_format(s, "original %d id %d ",
3c7330eb 417 port_id->original, port_id->id);
3c7330eb 418 }
d8ad173f 419 ds_put_cstr(s, "/ ");
abb288c0 420 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
d8ad173f 421 ds_put_cstr(s, "drop / ");
ae32e08d
EB
422 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ||
423 actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_DST) {
424 const struct rte_flow_action_set_mac *set_mac = actions->conf;
425
426 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_DST
427 ? "dst" : "src";
428
d8ad173f 429 ds_put_format(s, "set_mac_%s ", dirstr);
ae32e08d 430 if (set_mac) {
d8ad173f 431 ds_put_format(s, "mac_addr "ETH_ADDR_FMT" ",
ae32e08d 432 ETH_ADDR_BYTES_ARGS(set_mac->mac_addr));
ae32e08d 433 }
d8ad173f 434 ds_put_cstr(s, "/ ");
d9a831c3
EB
435 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ||
436 actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_DST) {
437 const struct rte_flow_action_set_ipv4 *set_ipv4 = actions->conf;
438 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
439 ? "dst" : "src";
440
d8ad173f 441 ds_put_format(s, "set_ipv4_%s ", dirstr);
d9a831c3 442 if (set_ipv4) {
d8ad173f 443 ds_put_format(s, "ipv4_addr "IP_FMT" ",
d9a831c3 444 IP_ARGS(set_ipv4->ipv4_addr));
d9a831c3 445 }
d8ad173f 446 ds_put_cstr(s, "/ ");
d9a831c3
EB
447 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_TTL) {
448 const struct rte_flow_action_set_ttl *set_ttl = actions->conf;
449
d8ad173f 450 ds_put_cstr(s, "set_ttl ");
d9a831c3 451 if (set_ttl) {
d8ad173f 452 ds_put_format(s, "ttl_value %d ", set_ttl->ttl_value);
d9a831c3 453 }
d8ad173f 454 ds_put_cstr(s, "/ ");
b9254f7b
EB
455 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ||
456 actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_DST) {
457 const struct rte_flow_action_set_tp *set_tp = actions->conf;
458 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_DST
459 ? "dst" : "src";
460
d8ad173f 461 ds_put_format(s, "set_tp_%s ", dirstr);
b9254f7b 462 if (set_tp) {
d8ad173f 463 ds_put_format(s, "port %"PRIu16" ", ntohs(set_tp->port));
b9254f7b 464 }
d8ad173f 465 ds_put_cstr(s, "/ ");
02927385 466 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
d8ad173f
EB
467 const struct rte_flow_action_of_push_vlan *of_push_vlan =
468 actions->conf;
02927385 469
d8ad173f
EB
470 ds_put_cstr(s, "of_push_vlan ");
471 if (of_push_vlan) {
472 ds_put_format(s, "ethertype 0x%"PRIx16" ",
473 ntohs(of_push_vlan->ethertype));
02927385 474 }
d8ad173f 475 ds_put_cstr(s, "/ ");
02927385 476 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
d8ad173f
EB
477 const struct rte_flow_action_of_set_vlan_pcp *of_set_vlan_pcp =
478 actions->conf;
02927385 479
d8ad173f
EB
480 ds_put_cstr(s, "of_set_vlan_pcp ");
481 if (of_set_vlan_pcp) {
482 ds_put_format(s, "vlan_pcp %"PRIu8" ", of_set_vlan_pcp->vlan_pcp);
02927385 483 }
d8ad173f 484 ds_put_cstr(s, "/ ");
02927385 485 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
d8ad173f
EB
486 const struct rte_flow_action_of_set_vlan_vid *of_set_vlan_vid =
487 actions->conf;
02927385 488
d8ad173f
EB
489 ds_put_cstr(s, "of_set_vlan_vid ");
490 if (of_set_vlan_vid) {
491 ds_put_format(s, "vlan_vid %"PRIu16" ",
492 ntohs(of_set_vlan_vid->vlan_vid));
02927385 493 }
d8ad173f 494 ds_put_cstr(s, "/ ");
02927385 495 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) {
d8ad173f 496 ds_put_cstr(s, "of_pop_vlan / ");
b6207b1d
EB
497 } else if (actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ||
498 actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_DST) {
499 const struct rte_flow_action_set_ipv6 *set_ipv6 = actions->conf;
500
501 char *dirstr = actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
502 ? "dst" : "src";
503
504 ds_put_format(s, "set_ipv6_%s ", dirstr);
505 if (set_ipv6) {
506 ds_put_cstr(s, "ipv6_addr ");
507 ipv6_format_addr((struct in6_addr *) &set_ipv6->ipv6_addr, s);
508 ds_put_cstr(s, " ");
509 }
510 ds_put_cstr(s, "/ ");
6571965b
EB
511 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
512 const struct rte_flow_action_raw_encap *raw_encap = actions->conf;
513
514 ds_put_cstr(s, "raw_encap index 0 / ");
515 if (raw_encap) {
516 ds_put_format(s_extra, "Raw-encap size=%ld set raw_encap 0 raw "
517 "pattern is ", raw_encap->size);
518 for (int i = 0; i < raw_encap->size; i++) {
519 ds_put_format(s_extra, "%02x", raw_encap->data[i]);
520 }
521 ds_put_cstr(s_extra, " / end_set;");
522 }
4e432d6f
EB
523 } else if (actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
524 const struct rte_flow_action_vxlan_encap *vxlan_encap = actions->conf;
525 const struct rte_flow_item *items = vxlan_encap->definition;
526
527 ds_put_cstr(s, "vxlan_encap / ");
528 dump_vxlan_encap(s_extra, items);
529 ds_put_cstr(s_extra, ";");
7d6033d6
EB
530 } else {
531 ds_put_format(s, "unknown rte flow action (%d)\n", actions->type);
3d67b2d2 532 }
7d6033d6 533}
3d67b2d2 534
7d6033d6 535static struct ds *
6571965b 536dump_flow(struct ds *s, struct ds *s_extra,
7d6033d6
EB
537 const struct rte_flow_attr *attr,
538 const struct rte_flow_item *items,
539 const struct rte_flow_action *actions)
540{
541 if (attr) {
542 dump_flow_attr(s, attr);
543 }
d8ad173f 544 ds_put_cstr(s, "pattern ");
7d6033d6
EB
545 while (items && items->type != RTE_FLOW_ITEM_TYPE_END) {
546 dump_flow_pattern(s, items++);
547 }
d8ad173f 548 ds_put_cstr(s, "end actions ");
7d6033d6 549 while (actions && actions->type != RTE_FLOW_ACTION_TYPE_END) {
6571965b 550 dump_flow_action(s, s_extra, actions++);
7d6033d6 551 }
d8ad173f 552 ds_put_cstr(s, "end");
7d6033d6
EB
553 return s;
554}
555
556static struct rte_flow *
557netdev_offload_dpdk_flow_create(struct netdev *netdev,
558 const struct rte_flow_attr *attr,
559 const struct rte_flow_item *items,
560 const struct rte_flow_action *actions,
561 struct rte_flow_error *error)
562{
6571965b
EB
563 struct ds s_extra = DS_EMPTY_INITIALIZER;
564 struct ds s = DS_EMPTY_INITIALIZER;
7d6033d6 565 struct rte_flow *flow;
6571965b 566 char *extra_str;
7d6033d6
EB
567
568 flow = netdev_dpdk_rte_flow_create(netdev, attr, items, actions, error);
569 if (flow) {
570 if (!VLOG_DROP_DBG(&rl)) {
6571965b
EB
571 dump_flow(&s, &s_extra, attr, items, actions);
572 extra_str = ds_cstr(&s_extra);
573 VLOG_DBG_RL(&rl, "%s: rte_flow 0x%"PRIxPTR" %s flow create %d %s",
574 netdev_get_name(netdev), (intptr_t) flow, extra_str,
d8ad173f 575 netdev_dpdk_get_port_id(netdev), ds_cstr(&s));
7d6033d6
EB
576 }
577 } else {
578 enum vlog_level level = VLL_WARN;
579
580 if (error->type == RTE_FLOW_ERROR_TYPE_ACTION) {
581 level = VLL_DBG;
582 }
583 VLOG_RL(&rl, level, "%s: rte_flow creation failed: %d (%s).",
584 netdev_get_name(netdev), error->type, error->message);
585 if (!vlog_should_drop(&this_module, level, &rl)) {
6571965b
EB
586 dump_flow(&s, &s_extra, attr, items, actions);
587 extra_str = ds_cstr(&s_extra);
588 VLOG_RL(&rl, level, "%s: Failed flow: %s flow create %d %s",
589 netdev_get_name(netdev), extra_str,
d8ad173f 590 netdev_dpdk_get_port_id(netdev), ds_cstr(&s));
7d6033d6
EB
591 }
592 }
6571965b
EB
593 ds_destroy(&s);
594 ds_destroy(&s_extra);
7d6033d6 595 return flow;
3d67b2d2
RBY
596}
597
598static void
599add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
600 const void *spec, const void *mask)
601{
602 int cnt = patterns->cnt;
603
604 if (cnt == 0) {
605 patterns->current_max = 8;
606 patterns->items = xcalloc(patterns->current_max,
607 sizeof *patterns->items);
608 } else if (cnt == patterns->current_max) {
609 patterns->current_max *= 2;
610 patterns->items = xrealloc(patterns->items, patterns->current_max *
611 sizeof *patterns->items);
612 }
613
614 patterns->items[cnt].type = type;
615 patterns->items[cnt].spec = spec;
616 patterns->items[cnt].mask = mask;
617 patterns->items[cnt].last = NULL;
3d67b2d2
RBY
618 patterns->cnt++;
619}
620
621static void
622add_flow_action(struct flow_actions *actions, enum rte_flow_action_type type,
623 const void *conf)
624{
625 int cnt = actions->cnt;
626
627 if (cnt == 0) {
628 actions->current_max = 8;
629 actions->actions = xcalloc(actions->current_max,
630 sizeof *actions->actions);
631 } else if (cnt == actions->current_max) {
632 actions->current_max *= 2;
633 actions->actions = xrealloc(actions->actions, actions->current_max *
634 sizeof *actions->actions);
635 }
636
637 actions->actions[cnt].type = type;
638 actions->actions[cnt].conf = conf;
639 actions->cnt++;
640}
641
900fe007
EB
642static void
643free_flow_patterns(struct flow_patterns *patterns)
644{
645 int i;
646
647 for (i = 0; i < patterns->cnt; i++) {
648 if (patterns->items[i].spec) {
649 free(CONST_CAST(void *, patterns->items[i].spec));
650 }
651 if (patterns->items[i].mask) {
652 free(CONST_CAST(void *, patterns->items[i].mask));
653 }
654 }
655 free(patterns->items);
656 patterns->items = NULL;
657 patterns->cnt = 0;
658}
659
1e60e4b0
EB
660static void
661free_flow_actions(struct flow_actions *actions)
3d67b2d2
RBY
662{
663 int i;
3d67b2d2 664
1e60e4b0
EB
665 for (i = 0; i < actions->cnt; i++) {
666 if (actions->actions[i].conf) {
667 free(CONST_CAST(void *, actions->actions[i].conf));
668 }
3d67b2d2 669 }
1e60e4b0
EB
670 free(actions->actions);
671 actions->actions = NULL;
672 actions->cnt = 0;
3d67b2d2
RBY
673}
674
675static int
7c5b722a 676parse_flow_match(struct flow_patterns *patterns,
a79eae87 677 struct match *match)
3d67b2d2 678{
a79eae87 679 struct flow *consumed_masks;
3d67b2d2 680 uint8_t proto = 0;
3d67b2d2 681
a79eae87
EB
682 consumed_masks = &match->wc.masks;
683
0ef70536
LW
684 if (!flow_tnl_dst_is_set(&match->flow.tunnel)) {
685 memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
686 }
687
a79eae87
EB
688 memset(&consumed_masks->in_port, 0, sizeof consumed_masks->in_port);
689 /* recirc id must be zero. */
690 if (match->wc.masks.recirc_id & match->flow.recirc_id) {
691 return -1;
692 }
693 consumed_masks->recirc_id = 0;
694 consumed_masks->packet_type = 0;
695
3d67b2d2 696 /* Eth */
584bacb5
EF
697 if (match->wc.masks.dl_type ||
698 !eth_addr_is_zero(match->wc.masks.dl_src) ||
699 !eth_addr_is_zero(match->wc.masks.dl_dst)) {
900fe007
EB
700 struct rte_flow_item_eth *spec, *mask;
701
702 spec = xzalloc(sizeof *spec);
703 mask = xzalloc(sizeof *mask);
704
705 memcpy(&spec->dst, &match->flow.dl_dst, sizeof spec->dst);
706 memcpy(&spec->src, &match->flow.dl_src, sizeof spec->src);
707 spec->type = match->flow.dl_type;
3d67b2d2 708
900fe007
EB
709 memcpy(&mask->dst, &match->wc.masks.dl_dst, sizeof mask->dst);
710 memcpy(&mask->src, &match->wc.masks.dl_src, sizeof mask->src);
711 mask->type = match->wc.masks.dl_type;
3d67b2d2 712
a79eae87
EB
713 memset(&consumed_masks->dl_dst, 0, sizeof consumed_masks->dl_dst);
714 memset(&consumed_masks->dl_src, 0, sizeof consumed_masks->dl_src);
715 consumed_masks->dl_type = 0;
716
900fe007 717 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, spec, mask);
3d67b2d2
RBY
718 }
719
720 /* VLAN */
721 if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {
900fe007
EB
722 struct rte_flow_item_vlan *spec, *mask;
723
724 spec = xzalloc(sizeof *spec);
725 mask = xzalloc(sizeof *mask);
726
727 spec->tci = match->flow.vlans[0].tci & ~htons(VLAN_CFI);
728 mask->tci = match->wc.masks.vlans[0].tci & ~htons(VLAN_CFI);
3d67b2d2
RBY
729
730 /* Match any protocols. */
900fe007 731 mask->inner_type = 0;
3d67b2d2 732
900fe007 733 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VLAN, spec, mask);
3d67b2d2 734 }
a79eae87
EB
735 /* For untagged matching match->wc.masks.vlans[0].tci is 0xFFFF and
736 * match->flow.vlans[0].tci is 0. Consuming is needed outside of the if
737 * scope to handle that.
738 */
739 memset(&consumed_masks->vlans[0], 0, sizeof consumed_masks->vlans[0]);
3d67b2d2
RBY
740
741 /* IP v4 */
742 if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
900fe007
EB
743 struct rte_flow_item_ipv4 *spec, *mask;
744
745 spec = xzalloc(sizeof *spec);
746 mask = xzalloc(sizeof *mask);
747
748 spec->hdr.type_of_service = match->flow.nw_tos;
749 spec->hdr.time_to_live = match->flow.nw_ttl;
750 spec->hdr.next_proto_id = match->flow.nw_proto;
751 spec->hdr.src_addr = match->flow.nw_src;
752 spec->hdr.dst_addr = match->flow.nw_dst;
3d67b2d2 753
900fe007
EB
754 mask->hdr.type_of_service = match->wc.masks.nw_tos;
755 mask->hdr.time_to_live = match->wc.masks.nw_ttl;
756 mask->hdr.next_proto_id = match->wc.masks.nw_proto;
757 mask->hdr.src_addr = match->wc.masks.nw_src;
758 mask->hdr.dst_addr = match->wc.masks.nw_dst;
3d67b2d2 759
a79eae87
EB
760 consumed_masks->nw_tos = 0;
761 consumed_masks->nw_ttl = 0;
762 consumed_masks->nw_proto = 0;
763 consumed_masks->nw_src = 0;
764 consumed_masks->nw_dst = 0;
765
900fe007 766 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask);
3d67b2d2
RBY
767
768 /* Save proto for L4 protocol setup. */
900fe007
EB
769 proto = spec->hdr.next_proto_id &
770 mask->hdr.next_proto_id;
3d67b2d2 771 }
a79eae87
EB
772 /* If fragmented, then don't HW accelerate - for now. */
773 if (match->wc.masks.nw_frag & match->flow.nw_frag) {
774 return -1;
775 }
776 consumed_masks->nw_frag = 0;
3d67b2d2 777
85270e99
EB
778 /* IP v6 */
779 if (match->flow.dl_type == htons(ETH_TYPE_IPV6)) {
780 struct rte_flow_item_ipv6 *spec, *mask;
781
782 spec = xzalloc(sizeof *spec);
783 mask = xzalloc(sizeof *mask);
784
785 spec->hdr.proto = match->flow.nw_proto;
786 spec->hdr.hop_limits = match->flow.nw_ttl;
787 spec->hdr.vtc_flow =
788 htonl((uint32_t) match->flow.nw_tos << RTE_IPV6_HDR_TC_SHIFT);
789 memcpy(spec->hdr.src_addr, &match->flow.ipv6_src,
790 sizeof spec->hdr.src_addr);
791 memcpy(spec->hdr.dst_addr, &match->flow.ipv6_dst,
792 sizeof spec->hdr.dst_addr);
793
794 mask->hdr.proto = match->wc.masks.nw_proto;
795 mask->hdr.hop_limits = match->wc.masks.nw_ttl;
796 mask->hdr.vtc_flow =
797 htonl((uint32_t) match->wc.masks.nw_tos << RTE_IPV6_HDR_TC_SHIFT);
798 memcpy(mask->hdr.src_addr, &match->wc.masks.ipv6_src,
799 sizeof mask->hdr.src_addr);
800 memcpy(mask->hdr.dst_addr, &match->wc.masks.ipv6_dst,
801 sizeof mask->hdr.dst_addr);
802
803 consumed_masks->nw_proto = 0;
804 consumed_masks->nw_ttl = 0;
805 consumed_masks->nw_tos = 0;
806 memset(&consumed_masks->ipv6_src, 0, sizeof consumed_masks->ipv6_src);
807 memset(&consumed_masks->ipv6_dst, 0, sizeof consumed_masks->ipv6_dst);
808
809 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV6, spec, mask);
810
811 /* Save proto for L4 protocol setup. */
812 proto = spec->hdr.proto & mask->hdr.proto;
85270e99
EB
813 }
814
3d67b2d2
RBY
815 if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP &&
816 proto != IPPROTO_SCTP && proto != IPPROTO_TCP &&
817 (match->wc.masks.tp_src ||
818 match->wc.masks.tp_dst ||
819 match->wc.masks.tcp_flags)) {
820 VLOG_DBG("L4 Protocol (%u) not supported", proto);
7c5b722a 821 return -1;
3d67b2d2
RBY
822 }
823
900fe007
EB
824 if (proto == IPPROTO_TCP) {
825 struct rte_flow_item_tcp *spec, *mask;
3d67b2d2 826
900fe007
EB
827 spec = xzalloc(sizeof *spec);
828 mask = xzalloc(sizeof *mask);
3d67b2d2 829
900fe007
EB
830 spec->hdr.src_port = match->flow.tp_src;
831 spec->hdr.dst_port = match->flow.tp_dst;
832 spec->hdr.data_off = ntohs(match->flow.tcp_flags) >> 8;
833 spec->hdr.tcp_flags = ntohs(match->flow.tcp_flags) & 0xff;
834
835 mask->hdr.src_port = match->wc.masks.tp_src;
836 mask->hdr.dst_port = match->wc.masks.tp_dst;
837 mask->hdr.data_off = ntohs(match->wc.masks.tcp_flags) >> 8;
838 mask->hdr.tcp_flags = ntohs(match->wc.masks.tcp_flags) & 0xff;
839
a79eae87
EB
840 consumed_masks->tp_src = 0;
841 consumed_masks->tp_dst = 0;
842 consumed_masks->tcp_flags = 0;
843
900fe007 844 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_TCP, spec, mask);
900fe007
EB
845 } else if (proto == IPPROTO_UDP) {
846 struct rte_flow_item_udp *spec, *mask;
3d67b2d2 847
900fe007
EB
848 spec = xzalloc(sizeof *spec);
849 mask = xzalloc(sizeof *mask);
3d67b2d2 850
900fe007
EB
851 spec->hdr.src_port = match->flow.tp_src;
852 spec->hdr.dst_port = match->flow.tp_dst;
3d67b2d2 853
900fe007
EB
854 mask->hdr.src_port = match->wc.masks.tp_src;
855 mask->hdr.dst_port = match->wc.masks.tp_dst;
856
a79eae87
EB
857 consumed_masks->tp_src = 0;
858 consumed_masks->tp_dst = 0;
859
900fe007 860 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
900fe007
EB
861 } else if (proto == IPPROTO_SCTP) {
862 struct rte_flow_item_sctp *spec, *mask;
3d67b2d2 863
900fe007
EB
864 spec = xzalloc(sizeof *spec);
865 mask = xzalloc(sizeof *mask);
3d67b2d2 866
900fe007
EB
867 spec->hdr.src_port = match->flow.tp_src;
868 spec->hdr.dst_port = match->flow.tp_dst;
3d67b2d2 869
900fe007
EB
870 mask->hdr.src_port = match->wc.masks.tp_src;
871 mask->hdr.dst_port = match->wc.masks.tp_dst;
872
a79eae87
EB
873 consumed_masks->tp_src = 0;
874 consumed_masks->tp_dst = 0;
875
900fe007 876 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_SCTP, spec, mask);
900fe007
EB
877 } else if (proto == IPPROTO_ICMP) {
878 struct rte_flow_item_icmp *spec, *mask;
3d67b2d2 879
900fe007
EB
880 spec = xzalloc(sizeof *spec);
881 mask = xzalloc(sizeof *mask);
3d67b2d2 882
900fe007
EB
883 spec->hdr.icmp_type = (uint8_t) ntohs(match->flow.tp_src);
884 spec->hdr.icmp_code = (uint8_t) ntohs(match->flow.tp_dst);
3d67b2d2 885
900fe007
EB
886 mask->hdr.icmp_type = (uint8_t) ntohs(match->wc.masks.tp_src);
887 mask->hdr.icmp_code = (uint8_t) ntohs(match->wc.masks.tp_dst);
888
a79eae87
EB
889 consumed_masks->tp_src = 0;
890 consumed_masks->tp_dst = 0;
891
900fe007 892 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ICMP, spec, mask);
3d67b2d2
RBY
893 }
894
7c5b722a
EB
895 add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
896
a79eae87
EB
897 if (!is_all_zeros(consumed_masks, sizeof *consumed_masks)) {
898 return -1;
899 }
7c5b722a
EB
900 return 0;
901}
902
1e60e4b0
EB
903static void
904add_flow_mark_rss_actions(struct flow_actions *actions,
905 uint32_t flow_mark,
906 const struct netdev *netdev)
907{
908 struct rte_flow_action_mark *mark;
909 struct action_rss_data {
910 struct rte_flow_action_rss conf;
911 uint16_t queue[0];
912 } *rss_data;
913 BUILD_ASSERT_DECL(offsetof(struct action_rss_data, conf) == 0);
914 int i;
915
916 mark = xzalloc(sizeof *mark);
917
918 mark->id = flow_mark;
919 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_MARK, mark);
920
921 rss_data = xmalloc(sizeof *rss_data +
922 netdev_n_rxq(netdev) * sizeof rss_data->queue[0]);
923 *rss_data = (struct action_rss_data) {
924 .conf = (struct rte_flow_action_rss) {
925 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
926 .level = 0,
927 .types = 0,
928 .queue_num = netdev_n_rxq(netdev),
929 .queue = rss_data->queue,
930 .key_len = 0,
931 .key = NULL
932 },
933 };
934
935 /* Override queue array with default. */
936 for (i = 0; i < netdev_n_rxq(netdev); i++) {
937 rss_data->queue[i] = i;
938 }
939
940 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_RSS, &rss_data->conf);
941 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL);
942}
943
60e778c7
EB
944static struct rte_flow *
945netdev_offload_dpdk_mark_rss(struct flow_patterns *patterns,
946 struct netdev *netdev,
947 uint32_t flow_mark)
7c5b722a 948{
60e778c7 949 struct flow_actions actions = { .actions = NULL, .cnt = 0 };
7c5b722a
EB
950 const struct rte_flow_attr flow_attr = {
951 .group = 0,
952 .priority = 0,
953 .ingress = 1,
954 .egress = 0
955 };
60e778c7 956 struct rte_flow_error error;
7c5b722a 957 struct rte_flow *flow;
60e778c7
EB
958
959 add_flow_mark_rss_actions(&actions, flow_mark, netdev);
960
961 flow = netdev_offload_dpdk_flow_create(netdev, &flow_attr, patterns->items,
962 actions.actions, &error);
963
964 free_flow_actions(&actions);
965 return flow;
966}
967
3c7330eb
EB
968static void
969add_count_action(struct flow_actions *actions)
970{
971 struct rte_flow_action_count *count = xzalloc(sizeof *count);
972
973 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_COUNT, count);
974}
975
60e778c7 976static int
3c7330eb
EB
977add_port_id_action(struct flow_actions *actions,
978 struct netdev *outdev)
979{
980 struct rte_flow_action_port_id *port_id;
981 int outdev_id;
982
983 outdev_id = netdev_dpdk_get_port_id(outdev);
984 if (outdev_id < 0) {
985 return -1;
986 }
987 port_id = xzalloc(sizeof *port_id);
988 port_id->id = outdev_id;
989 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_PORT_ID, port_id);
990 return 0;
991}
992
993static int
994add_output_action(struct netdev *netdev,
995 struct flow_actions *actions,
8842fdf1 996 const struct nlattr *nla)
3c7330eb
EB
997{
998 struct netdev *outdev;
999 odp_port_t port;
1000 int ret = 0;
1001
1002 port = nl_attr_get_odp_port(nla);
8842fdf1 1003 outdev = netdev_ports_get(port, netdev->dpif_type);
3c7330eb
EB
1004 if (outdev == NULL) {
1005 VLOG_DBG_RL(&rl, "Cannot find netdev for odp port %"PRIu32, port);
1006 return -1;
1007 }
1008 if (!netdev_flow_api_equals(netdev, outdev) ||
1009 add_port_id_action(actions, outdev)) {
1010 VLOG_DBG_RL(&rl, "%s: Output to port \'%s\' cannot be offloaded.",
1011 netdev_get_name(netdev), netdev_get_name(outdev));
1012 ret = -1;
1013 }
1014 netdev_close(outdev);
1015 return ret;
1016}
1017
ae32e08d
EB
1018static int
1019add_set_flow_action__(struct flow_actions *actions,
1020 const void *value, void *mask,
1021 const size_t size, const int attr)
1022{
1023 void *spec;
1024
1025 if (mask) {
1026 /* DPDK does not support partially masked set actions. In such
1027 * case, fail the offload.
1028 */
1029 if (is_all_zeros(mask, size)) {
1030 return 0;
1031 }
1032 if (!is_all_ones(mask, size)) {
1033 VLOG_DBG_RL(&rl, "Partial mask is not supported");
1034 return -1;
1035 }
1036 }
1037
1038 spec = xzalloc(size);
1039 memcpy(spec, value, size);
1040 add_flow_action(actions, attr, spec);
1041
1042 /* Clear used mask for later checking. */
1043 if (mask) {
1044 memset(mask, 0, size);
1045 }
1046 return 0;
1047}
1048
1049BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_mac) ==
1050 MEMBER_SIZEOF(struct ovs_key_ethernet, eth_src));
1051BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_mac) ==
1052 MEMBER_SIZEOF(struct ovs_key_ethernet, eth_dst));
d9a831c3
EB
1053BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv4) ==
1054 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_src));
1055BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv4) ==
1056 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_dst));
1057BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ttl) ==
1058 MEMBER_SIZEOF(struct ovs_key_ipv4, ipv4_ttl));
b6207b1d
EB
1059BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv6) ==
1060 MEMBER_SIZEOF(struct ovs_key_ipv6, ipv6_src));
1061BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ipv6) ==
1062 MEMBER_SIZEOF(struct ovs_key_ipv6, ipv6_dst));
1063BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_ttl) ==
1064 MEMBER_SIZEOF(struct ovs_key_ipv6, ipv6_hlimit));
b9254f7b
EB
1065BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
1066 MEMBER_SIZEOF(struct ovs_key_tcp, tcp_src));
1067BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
1068 MEMBER_SIZEOF(struct ovs_key_tcp, tcp_dst));
1069BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
1070 MEMBER_SIZEOF(struct ovs_key_udp, udp_src));
1071BUILD_ASSERT_DECL(sizeof(struct rte_flow_action_set_tp) ==
1072 MEMBER_SIZEOF(struct ovs_key_udp, udp_dst));
ae32e08d
EB
1073
1074static int
1075parse_set_actions(struct flow_actions *actions,
1076 const struct nlattr *set_actions,
1077 const size_t set_actions_len,
1078 bool masked)
1079{
1080 const struct nlattr *sa;
1081 unsigned int sleft;
1082
1083#define add_set_flow_action(field, type) \
1084 if (add_set_flow_action__(actions, &key->field, \
1085 mask ? CONST_CAST(void *, &mask->field) : NULL, \
1086 sizeof key->field, type)) { \
1087 return -1; \
1088 }
1089
1090 NL_ATTR_FOR_EACH_UNSAFE (sa, sleft, set_actions, set_actions_len) {
1091 if (nl_attr_type(sa) == OVS_KEY_ATTR_ETHERNET) {
1092 const struct ovs_key_ethernet *key = nl_attr_get(sa);
1093 const struct ovs_key_ethernet *mask = masked ? key + 1 : NULL;
1094
1095 add_set_flow_action(eth_src, RTE_FLOW_ACTION_TYPE_SET_MAC_SRC);
1096 add_set_flow_action(eth_dst, RTE_FLOW_ACTION_TYPE_SET_MAC_DST);
1097
1098 if (mask && !is_all_zeros(mask, sizeof *mask)) {
1099 VLOG_DBG_RL(&rl, "Unsupported ETHERNET set action");
1100 return -1;
1101 }
d9a831c3
EB
1102 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_IPV4) {
1103 const struct ovs_key_ipv4 *key = nl_attr_get(sa);
1104 const struct ovs_key_ipv4 *mask = masked ? key + 1 : NULL;
1105
1106 add_set_flow_action(ipv4_src, RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC);
1107 add_set_flow_action(ipv4_dst, RTE_FLOW_ACTION_TYPE_SET_IPV4_DST);
1108 add_set_flow_action(ipv4_ttl, RTE_FLOW_ACTION_TYPE_SET_TTL);
1109
1110 if (mask && !is_all_zeros(mask, sizeof *mask)) {
1111 VLOG_DBG_RL(&rl, "Unsupported IPv4 set action");
1112 return -1;
1113 }
b6207b1d
EB
1114 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_IPV6) {
1115 const struct ovs_key_ipv6 *key = nl_attr_get(sa);
1116 const struct ovs_key_ipv6 *mask = masked ? key + 1 : NULL;
1117
1118 add_set_flow_action(ipv6_src, RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC);
1119 add_set_flow_action(ipv6_dst, RTE_FLOW_ACTION_TYPE_SET_IPV6_DST);
1120 add_set_flow_action(ipv6_hlimit, RTE_FLOW_ACTION_TYPE_SET_TTL);
1121
1122 if (mask && !is_all_zeros(mask, sizeof *mask)) {
1123 VLOG_DBG_RL(&rl, "Unsupported IPv6 set action");
1124 return -1;
1125 }
b9254f7b
EB
1126 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_TCP) {
1127 const struct ovs_key_tcp *key = nl_attr_get(sa);
1128 const struct ovs_key_tcp *mask = masked ? key + 1 : NULL;
1129
1130 add_set_flow_action(tcp_src, RTE_FLOW_ACTION_TYPE_SET_TP_SRC);
1131 add_set_flow_action(tcp_dst, RTE_FLOW_ACTION_TYPE_SET_TP_DST);
1132
1133 if (mask && !is_all_zeros(mask, sizeof *mask)) {
1134 VLOG_DBG_RL(&rl, "Unsupported TCP set action");
1135 return -1;
1136 }
1137 } else if (nl_attr_type(sa) == OVS_KEY_ATTR_UDP) {
1138 const struct ovs_key_udp *key = nl_attr_get(sa);
1139 const struct ovs_key_udp *mask = masked ? key + 1 : NULL;
1140
1141 add_set_flow_action(udp_src, RTE_FLOW_ACTION_TYPE_SET_TP_SRC);
1142 add_set_flow_action(udp_dst, RTE_FLOW_ACTION_TYPE_SET_TP_DST);
1143
1144 if (mask && !is_all_zeros(mask, sizeof *mask)) {
1145 VLOG_DBG_RL(&rl, "Unsupported UDP set action");
1146 return -1;
1147 }
ae32e08d
EB
1148 } else {
1149 VLOG_DBG_RL(&rl,
1150 "Unsupported set action type %d", nl_attr_type(sa));
1151 return -1;
1152 }
1153 }
1154
1155 return 0;
1156}
1157
4e432d6f
EB
1158/* Maximum number of items in struct rte_flow_action_vxlan_encap.
1159 * ETH / IPv4(6) / UDP / VXLAN / END
1160 */
1161#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5
1162
1163static int
1164add_vxlan_encap_action(struct flow_actions *actions,
1165 const void *header)
1166{
1167 const struct eth_header *eth;
1168 const struct udp_header *udp;
1169 struct vxlan_data {
1170 struct rte_flow_action_vxlan_encap conf;
1171 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
1172 } *vxlan_data;
1173 BUILD_ASSERT_DECL(offsetof(struct vxlan_data, conf) == 0);
1174 const void *vxlan;
1175 const void *l3;
1176 const void *l4;
1177 int field;
1178
1179 vxlan_data = xzalloc(sizeof *vxlan_data);
1180 field = 0;
1181
1182 eth = header;
1183 /* Ethernet */
1184 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_ETH;
1185 vxlan_data->items[field].spec = eth;
1186 vxlan_data->items[field].mask = &rte_flow_item_eth_mask;
1187 field++;
1188
1189 l3 = eth + 1;
1190 /* IP */
1191 if (eth->eth_type == htons(ETH_TYPE_IP)) {
1192 /* IPv4 */
1193 const struct ip_header *ip = l3;
1194
1195 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_IPV4;
1196 vxlan_data->items[field].spec = ip;
1197 vxlan_data->items[field].mask = &rte_flow_item_ipv4_mask;
1198
1199 if (ip->ip_proto != IPPROTO_UDP) {
1200 goto err;
1201 }
1202 l4 = (ip + 1);
1203 } else if (eth->eth_type == htons(ETH_TYPE_IPV6)) {
1204 const struct ovs_16aligned_ip6_hdr *ip6 = l3;
1205
1206 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_IPV6;
1207 vxlan_data->items[field].spec = ip6;
1208 vxlan_data->items[field].mask = &rte_flow_item_ipv6_mask;
1209
1210 if (ip6->ip6_nxt != IPPROTO_UDP) {
1211 goto err;
1212 }
1213 l4 = (ip6 + 1);
1214 } else {
1215 goto err;
1216 }
1217 field++;
1218
1219 udp = l4;
1220 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_UDP;
1221 vxlan_data->items[field].spec = udp;
1222 vxlan_data->items[field].mask = &rte_flow_item_udp_mask;
1223 field++;
1224
1225 vxlan = (udp + 1);
1226 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_VXLAN;
1227 vxlan_data->items[field].spec = vxlan;
1228 vxlan_data->items[field].mask = &rte_flow_item_vxlan_mask;
1229 field++;
1230
1231 vxlan_data->items[field].type = RTE_FLOW_ITEM_TYPE_END;
1232
1233 vxlan_data->conf.definition = vxlan_data->items;
1234
1235 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, vxlan_data);
1236
1237 return 0;
1238err:
1239 free(vxlan_data);
1240 return -1;
1241}
1242
02927385
SB
1243static int
1244parse_vlan_push_action(struct flow_actions *actions,
1245 const struct ovs_action_push_vlan *vlan_push)
1246{
1247 struct rte_flow_action_of_push_vlan *rte_push_vlan;
1248 struct rte_flow_action_of_set_vlan_pcp *rte_vlan_pcp;
1249 struct rte_flow_action_of_set_vlan_vid *rte_vlan_vid;
1250
1251 rte_push_vlan = xzalloc(sizeof *rte_push_vlan);
1252 rte_push_vlan->ethertype = vlan_push->vlan_tpid;
1253 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN, rte_push_vlan);
1254
1255 rte_vlan_pcp = xzalloc(sizeof *rte_vlan_pcp);
1256 rte_vlan_pcp->vlan_pcp = vlan_tci_to_pcp(vlan_push->vlan_tci);
1257 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
1258 rte_vlan_pcp);
1259
1260 rte_vlan_vid = xzalloc(sizeof *rte_vlan_vid);
1261 rte_vlan_vid->vlan_vid = htons(vlan_tci_to_vid(vlan_push->vlan_tci));
1262 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
1263 rte_vlan_vid);
1264 return 0;
1265}
1266
6571965b
EB
1267static int
1268parse_clone_actions(struct netdev *netdev,
1269 struct flow_actions *actions,
1270 const struct nlattr *clone_actions,
1271 const size_t clone_actions_len)
1272{
1273 const struct nlattr *ca;
1274 unsigned int cleft;
1275
1276 NL_ATTR_FOR_EACH_UNSAFE (ca, cleft, clone_actions, clone_actions_len) {
1277 int clone_type = nl_attr_type(ca);
1278
1279 if (clone_type == OVS_ACTION_ATTR_TUNNEL_PUSH) {
1280 const struct ovs_action_push_tnl *tnl_push = nl_attr_get(ca);
4e432d6f
EB
1281 struct rte_flow_action_raw_encap *raw_encap;
1282
1283 if (tnl_push->tnl_type == OVS_VPORT_TYPE_VXLAN &&
1284 !add_vxlan_encap_action(actions, tnl_push->header)) {
1285 continue;
1286 }
6571965b 1287
4e432d6f 1288 raw_encap = xzalloc(sizeof *raw_encap);
6571965b
EB
1289 raw_encap->data = (uint8_t *) tnl_push->header;
1290 raw_encap->preserve = NULL;
1291 raw_encap->size = tnl_push->header_len;
1292
1293 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_RAW_ENCAP,
1294 raw_encap);
1295 } else if (clone_type == OVS_ACTION_ATTR_OUTPUT) {
1296 if (add_output_action(netdev, actions, ca)) {
1297 return -1;
1298 }
1299 } else {
1300 VLOG_DBG_RL(&rl,
1301 "Unsupported nested action inside clone(), "
1302 "action type: %d", clone_type);
1303 return -1;
1304 }
1305 }
1306 return 0;
1307}
1308
3c7330eb
EB
1309static int
1310parse_flow_actions(struct netdev *netdev,
60e778c7
EB
1311 struct flow_actions *actions,
1312 struct nlattr *nl_actions,
8842fdf1 1313 size_t nl_actions_len)
60e778c7
EB
1314{
1315 struct nlattr *nla;
1316 size_t left;
1317
3c7330eb 1318 add_count_action(actions);
60e778c7 1319 NL_ATTR_FOR_EACH_UNSAFE (nla, left, nl_actions, nl_actions_len) {
3c7330eb 1320 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
8842fdf1 1321 if (add_output_action(netdev, actions, nla)) {
3c7330eb
EB
1322 return -1;
1323 }
abb288c0
EB
1324 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_DROP) {
1325 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_DROP, NULL);
ae32e08d
EB
1326 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET ||
1327 nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
1328 const struct nlattr *set_actions = nl_attr_get(nla);
1329 const size_t set_actions_len = nl_attr_get_size(nla);
1330 bool masked = nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED;
1331
1332 if (parse_set_actions(actions, set_actions, set_actions_len,
1333 masked)) {
1334 return -1;
1335 }
02927385
SB
1336 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) {
1337 const struct ovs_action_push_vlan *vlan = nl_attr_get(nla);
1338
1339 if (parse_vlan_push_action(actions, vlan)) {
1340 return -1;
1341 }
1342 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) {
1343 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_OF_POP_VLAN, NULL);
6571965b
EB
1344 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CLONE &&
1345 left <= NLA_ALIGN(nla->nla_len)) {
1346 const struct nlattr *clone_actions = nl_attr_get(nla);
1347 size_t clone_actions_len = nl_attr_get_size(nla);
1348
1349 if (parse_clone_actions(netdev, actions, clone_actions,
1350 clone_actions_len)) {
1351 return -1;
1352 }
3c7330eb
EB
1353 } else {
1354 VLOG_DBG_RL(&rl, "Unsupported action type %d", nl_attr_type(nla));
1355 return -1;
1356 }
60e778c7
EB
1357 }
1358
1359 if (nl_actions_len == 0) {
1360 VLOG_DBG_RL(&rl, "No actions provided");
1361 return -1;
1362 }
1363
1364 add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL);
1365 return 0;
1366}
1367
1368static struct rte_flow *
1369netdev_offload_dpdk_actions(struct netdev *netdev,
1370 struct flow_patterns *patterns,
1371 struct nlattr *nl_actions,
8842fdf1 1372 size_t actions_len)
60e778c7
EB
1373{
1374 const struct rte_flow_attr flow_attr = { .ingress = 1, .transfer = 1 };
1375 struct flow_actions actions = { .actions = NULL, .cnt = 0 };
1376 struct rte_flow *flow = NULL;
7c5b722a 1377 struct rte_flow_error error;
60e778c7
EB
1378 int ret;
1379
8842fdf1 1380 ret = parse_flow_actions(netdev, &actions, nl_actions, actions_len);
60e778c7
EB
1381 if (ret) {
1382 goto out;
1383 }
1384 flow = netdev_offload_dpdk_flow_create(netdev, &flow_attr, patterns->items,
1385 actions.actions, &error);
1386out:
1387 free_flow_actions(&actions);
1388 return flow;
1389}
1390
588821ea 1391static struct ufid_to_rte_flow_data *
60e778c7 1392netdev_offload_dpdk_add_flow(struct netdev *netdev,
a79eae87 1393 struct match *match,
60e778c7
EB
1394 struct nlattr *nl_actions,
1395 size_t actions_len,
1396 const ovs_u128 *ufid,
1397 struct offload_info *info)
1398{
1399 struct flow_patterns patterns = { .items = NULL, .cnt = 0 };
588821ea 1400 struct ufid_to_rte_flow_data *flows_data = NULL;
60e778c7
EB
1401 bool actions_offloaded = true;
1402 struct rte_flow *flow;
7c5b722a 1403
588821ea 1404 if (parse_flow_match(&patterns, match)) {
a79eae87
EB
1405 VLOG_DBG_RL(&rl, "%s: matches of ufid "UUID_FMT" are not supported",
1406 netdev_get_name(netdev), UUID_ARGS((struct uuid *) ufid));
7c5b722a
EB
1407 goto out;
1408 }
3d67b2d2 1409
60e778c7 1410 flow = netdev_offload_dpdk_actions(netdev, &patterns, nl_actions,
8842fdf1 1411 actions_len);
60e778c7
EB
1412 if (!flow) {
1413 /* If we failed to offload the rule actions fallback to MARK+RSS
1414 * actions.
1415 */
1416 flow = netdev_offload_dpdk_mark_rss(&patterns, netdev,
1417 info->flow_mark);
1418 actions_offloaded = false;
1419 }
3d67b2d2 1420
3d67b2d2 1421 if (!flow) {
3d67b2d2
RBY
1422 goto out;
1423 }
d131664b
EB
1424 flows_data = ufid_to_rte_flow_associate(ufid, netdev, flow,
1425 actions_offloaded);
d8ad173f 1426 VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT,
3d67b2d2
RBY
1427 netdev_get_name(netdev), flow, UUID_ARGS((struct uuid *)ufid));
1428
1429out:
900fe007 1430 free_flow_patterns(&patterns);
588821ea 1431 return flows_data;
3d67b2d2
RBY
1432}
1433
3d67b2d2 1434static int
95e266f7 1435netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
3d67b2d2
RBY
1436{
1437 struct rte_flow_error error;
95e266f7
EB
1438 struct rte_flow *rte_flow;
1439 struct netdev *netdev;
1440 ovs_u128 *ufid;
1441 int ret;
1442
1443 rte_flow = rte_flow_data->rte_flow;
1444 netdev = rte_flow_data->netdev;
1445 ufid = &rte_flow_data->ufid;
1446
1447 ret = netdev_dpdk_rte_flow_destroy(netdev, rte_flow, &error);
3d67b2d2
RBY
1448
1449 if (ret == 0) {
95e266f7 1450 ufid_to_rte_flow_disassociate(rte_flow_data);
d8ad173f
EB
1451 VLOG_DBG_RL(&rl, "%s: rte_flow 0x%"PRIxPTR
1452 " flow destroy %d ufid " UUID_FMT,
1453 netdev_get_name(netdev), (intptr_t) rte_flow,
1454 netdev_dpdk_get_port_id(netdev),
1455 UUID_ARGS((struct uuid *) ufid));
3d67b2d2 1456 } else {
d8ad173f
EB
1457 VLOG_ERR("Failed flow: %s: flow destroy %d ufid " UUID_FMT,
1458 netdev_get_name(netdev), netdev_dpdk_get_port_id(netdev),
1459 UUID_ARGS((struct uuid *) ufid));
3d67b2d2
RBY
1460 }
1461
1462 return ret;
1463}
1464
5fc5c50f 1465static int
4f746d52 1466netdev_offload_dpdk_flow_put(struct netdev *netdev, struct match *match,
e0c58ca6
OM
1467 struct nlattr *actions, size_t actions_len,
1468 const ovs_u128 *ufid, struct offload_info *info,
75ad1cd6 1469 struct dpif_flow_stats *stats)
3d67b2d2 1470{
34ce6bf7 1471 struct ufid_to_rte_flow_data *rte_flow_data;
588821ea
EB
1472 struct dpif_flow_stats old_stats;
1473 bool modification = false;
3d67b2d2
RBY
1474 int ret;
1475
1476 /*
1477 * If an old rte_flow exists, it means it's a flow modification.
1478 * Here destroy the old rte flow first before adding a new one.
588821ea 1479 * Keep the stats for the newly created rule.
3d67b2d2 1480 */
95e266f7 1481 rte_flow_data = ufid_to_rte_flow_data_find(ufid, false);
34ce6bf7 1482 if (rte_flow_data && rte_flow_data->rte_flow) {
588821ea
EB
1483 old_stats = rte_flow_data->stats;
1484 modification = true;
95e266f7 1485 ret = netdev_offload_dpdk_flow_destroy(rte_flow_data);
3d67b2d2
RBY
1486 if (ret < 0) {
1487 return ret;
1488 }
1489 }
1490
588821ea
EB
1491 rte_flow_data = netdev_offload_dpdk_add_flow(netdev, match, actions,
1492 actions_len, ufid, info);
1493 if (!rte_flow_data) {
1494 return -1;
1495 }
1496 if (modification) {
1497 rte_flow_data->stats = old_stats;
1498 }
75ad1cd6 1499 if (stats) {
588821ea 1500 *stats = rte_flow_data->stats;
75ad1cd6 1501 }
588821ea 1502 return 0;
3d67b2d2
RBY
1503}
1504
5fc5c50f 1505static int
95e266f7
EB
1506netdev_offload_dpdk_flow_del(struct netdev *netdev OVS_UNUSED,
1507 const ovs_u128 *ufid,
75ad1cd6 1508 struct dpif_flow_stats *stats)
3d67b2d2 1509{
34ce6bf7 1510 struct ufid_to_rte_flow_data *rte_flow_data;
3d67b2d2 1511
95e266f7 1512 rte_flow_data = ufid_to_rte_flow_data_find(ufid, true);
34ce6bf7 1513 if (!rte_flow_data || !rte_flow_data->rte_flow) {
3d67b2d2
RBY
1514 return -1;
1515 }
1516
75ad1cd6
BP
1517 if (stats) {
1518 memset(stats, 0, sizeof *stats);
1519 }
95e266f7 1520 return netdev_offload_dpdk_flow_destroy(rte_flow_data);
3d67b2d2 1521}
5fc5c50f
IM
1522
1523static int
4f746d52 1524netdev_offload_dpdk_init_flow_api(struct netdev *netdev)
5fc5c50f
IM
1525{
1526 return netdev_dpdk_flow_api_supported(netdev) ? 0 : EOPNOTSUPP;
1527}
1528
2aca29df
EB
1529static int
1530netdev_offload_dpdk_flow_get(struct netdev *netdev,
1531 struct match *match OVS_UNUSED,
1532 struct nlattr **actions OVS_UNUSED,
1533 const ovs_u128 *ufid,
1534 struct dpif_flow_stats *stats,
1535 struct dpif_flow_attrs *attrs,
1536 struct ofpbuf *buf OVS_UNUSED)
1537{
1538 struct rte_flow_query_count query = { .reset = 1 };
1539 struct ufid_to_rte_flow_data *rte_flow_data;
1540 struct rte_flow_error error;
1541 int ret = 0;
1542
95e266f7 1543 rte_flow_data = ufid_to_rte_flow_data_find(ufid, false);
2aca29df
EB
1544 if (!rte_flow_data || !rte_flow_data->rte_flow) {
1545 ret = -1;
1546 goto out;
1547 }
1548
1549 attrs->offloaded = true;
1550 if (!rte_flow_data->actions_offloaded) {
1551 attrs->dp_layer = "ovs";
1552 memset(stats, 0, sizeof *stats);
1553 goto out;
1554 }
1555 attrs->dp_layer = "dpdk";
1556 ret = netdev_dpdk_rte_flow_query_count(netdev, rte_flow_data->rte_flow,
1557 &query, &error);
1558 if (ret) {
d8ad173f 1559 VLOG_DBG_RL(&rl, "%s: Failed to query ufid "UUID_FMT" flow: %p",
2aca29df
EB
1560 netdev_get_name(netdev), UUID_ARGS((struct uuid *) ufid),
1561 rte_flow_data->rte_flow);
1562 goto out;
1563 }
1564 rte_flow_data->stats.n_packets += (query.hits_set) ? query.hits : 0;
1565 rte_flow_data->stats.n_bytes += (query.bytes_set) ? query.bytes : 0;
1566 if (query.hits_set && query.hits) {
1567 rte_flow_data->stats.used = time_msec();
1568 }
1569 memcpy(stats, &rte_flow_data->stats, sizeof *stats);
1570out:
d7b55c5c 1571 attrs->dp_extra_info = NULL;
2aca29df
EB
1572 return ret;
1573}
1574
ac661e62
EB
1575static int
1576netdev_offload_dpdk_flow_flush(struct netdev *netdev)
1577{
1578 struct ufid_to_rte_flow_data *data;
1579
1580 CMAP_FOR_EACH (data, node, &ufid_to_rte_flow) {
1581 if (data->netdev != netdev) {
1582 continue;
1583 }
1584
1585 netdev_offload_dpdk_flow_destroy(data);
1586 }
1587
1588 return 0;
1589}
1590
4f746d52 1591const struct netdev_flow_api netdev_offload_dpdk = {
5fc5c50f 1592 .type = "dpdk_flow_api",
4f746d52
IM
1593 .flow_put = netdev_offload_dpdk_flow_put,
1594 .flow_del = netdev_offload_dpdk_flow_del,
1595 .init_flow_api = netdev_offload_dpdk_init_flow_api,
2aca29df 1596 .flow_get = netdev_offload_dpdk_flow_get,
ac661e62 1597 .flow_flush = netdev_offload_dpdk_flow_flush,
5fc5c50f 1598};