]> git.proxmox.com Git - ovs.git/blame - lib/netdev-tc-offloads.c
tc: Fix compile with Glibc < 2.24 and Linux > 4.5.
[ovs.git] / lib / netdev-tc-offloads.c
CommitLineData
18ebd48c
PB
1/*
2 * Copyright (c) 2016 Mellanox Technologies, Ltd.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18#include "netdev-tc-offloads.h"
19#include <errno.h>
20#include <linux/if_ether.h>
21#include "openvswitch/hmap.h"
22#include "openvswitch/match.h"
23#include "openvswitch/ofpbuf.h"
24#include "openvswitch/thread.h"
25#include "openvswitch/types.h"
26#include "openvswitch/vlog.h"
27#include "netdev-provider.h"
28#include "netlink.h"
29#include "netlink-socket.h"
30#include "odp-netlink.h"
31#include "unaligned.h"
32#include "util.h"
33#include "hash.h"
34#include "dpif.h"
35#include "tc.h"
adbbe97f 36#include "netdev-linux.h"
18ebd48c
PB
37
38VLOG_DEFINE_THIS_MODULE(netdev_tc_offloads);
39
8140a5ff
PB
40static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
41
9116730d
PB
42static struct hmap ufid_tc = HMAP_INITIALIZER(&ufid_tc);
43static struct ovs_mutex ufid_lock = OVS_MUTEX_INITIALIZER;
44
45/**
46 * struct ufid_tc_data - data entry for ufid_tc hmap.
47 * @ufid_node: Element in @ufid_tc hash table by ufid key.
48 * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
49 * @ufid: ufid assigned to the flow
50 * @prio: tc priority
51 * @handle: tc handle
52 * @ifindex: netdev ifindex.
53 * @netdev: netdev associated with the tc rule
54 */
55struct ufid_tc_data {
56 struct hmap_node ufid_node;
57 struct hmap_node tc_node;
58 ovs_u128 ufid;
59 uint16_t prio;
60 uint32_t handle;
61 int ifindex;
62 struct netdev *netdev;
63};
64
65/* Remove matching ufid entry from ufid_tc hashmap. */
66static void
67del_ufid_tc_mapping(const ovs_u128 *ufid)
68{
69 size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
70 struct ufid_tc_data *data;
71
72 ovs_mutex_lock(&ufid_lock);
73 HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
74 if (ovs_u128_equals(*ufid, data->ufid)) {
75 break;
76 }
77 }
78
79 if (!data) {
80 ovs_mutex_unlock(&ufid_lock);
81 return;
82 }
83
84 hmap_remove(&ufid_tc, &data->ufid_node);
85 hmap_remove(&ufid_tc, &data->tc_node);
86 netdev_close(data->netdev);
87 free(data);
88 ovs_mutex_unlock(&ufid_lock);
89}
90
91/* Add ufid entry to ufid_tc hashmap.
92 * If entry exists already it will be replaced. */
8f283af8 93static void
9116730d
PB
94add_ufid_tc_mapping(const ovs_u128 *ufid, int prio, int handle,
95 struct netdev *netdev, int ifindex)
96{
97 size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
98 size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
99 struct ufid_tc_data *new_data = xzalloc(sizeof *new_data);
100
101 del_ufid_tc_mapping(ufid);
102
103 new_data->ufid = *ufid;
104 new_data->prio = prio;
105 new_data->handle = handle;
106 new_data->netdev = netdev_ref(netdev);
107 new_data->ifindex = ifindex;
108
109 ovs_mutex_lock(&ufid_lock);
110 hmap_insert(&ufid_tc, &new_data->ufid_node, ufid_hash);
111 hmap_insert(&ufid_tc, &new_data->tc_node, tc_hash);
112 ovs_mutex_unlock(&ufid_lock);
113}
114
115/* Get ufid from ufid_tc hashmap.
116 *
117 * If netdev output param is not NULL then the function will return
118 * associated netdev on success and a refcount is taken on that netdev.
119 * The caller is then responsible to close the netdev.
120 *
121 * Returns handle if successful and fill prio and netdev for that ufid.
122 * Otherwise returns 0.
123 */
8f283af8 124static int
9116730d
PB
125get_ufid_tc_mapping(const ovs_u128 *ufid, int *prio, struct netdev **netdev)
126{
127 size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
128 struct ufid_tc_data *data;
129 int handle = 0;
130
131 ovs_mutex_lock(&ufid_lock);
132 HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
133 if (ovs_u128_equals(*ufid, data->ufid)) {
134 if (prio) {
135 *prio = data->prio;
136 }
137 if (netdev) {
138 *netdev = netdev_ref(data->netdev);
139 }
140 handle = data->handle;
141 break;
142 }
143 }
144 ovs_mutex_unlock(&ufid_lock);
145
146 return handle;
147}
148
149/* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
150 * The result is saved in ufid.
151 *
152 * Returns true on success.
153 */
8f7620e6 154static bool
9116730d
PB
155find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
156{
157 int ifindex = netdev_get_ifindex(netdev);
158 struct ufid_tc_data *data;
159 size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
160
161 ovs_mutex_lock(&ufid_lock);
162 HMAP_FOR_EACH_WITH_HASH(data, tc_node, tc_hash, &ufid_tc) {
163 if (data->prio == prio && data->handle == handle
164 && data->ifindex == ifindex) {
165 *ufid = data->ufid;
166 break;
167 }
168 }
169 ovs_mutex_unlock(&ufid_lock);
170
171 return (data != NULL);
172}
173
d86eea7c
PB
174struct prio_map_data {
175 struct hmap_node node;
176 struct tc_flower_key mask;
177 ovs_be16 protocol;
178 uint16_t prio;
179};
180
181/* Get free prio for tc flower
182 * If prio is already allocated for mask/eth_type combination then return it.
183 * If not assign new prio.
184 *
185 * Return prio on success or 0 if we are out of prios.
186 */
8f283af8 187static uint16_t
d86eea7c
PB
188get_prio_for_tc_flower(struct tc_flower *flower)
189{
190 static struct hmap prios = HMAP_INITIALIZER(&prios);
191 static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
192 static uint16_t last_prio = 0;
193 size_t key_len = sizeof(struct tc_flower_key);
194 size_t hash = hash_bytes(&flower->mask, key_len,
195 (OVS_FORCE uint32_t) flower->key.eth_type);
196 struct prio_map_data *data;
197 struct prio_map_data *new_data;
198
199 /* We can use the same prio for same mask/eth combination but must have
200 * different prio if not. Flower classifier will reject same prio for
201 * different mask/eth combination. */
202 ovs_mutex_lock(&prios_lock);
203 HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
204 if (!memcmp(&flower->mask, &data->mask, key_len)
205 && data->protocol == flower->key.eth_type) {
206 ovs_mutex_unlock(&prios_lock);
207 return data->prio;
208 }
209 }
210
211 if (last_prio == UINT16_MAX) {
212 /* last_prio can overflow if there will be many different kinds of
213 * flows which shouldn't happen organically. */
214 ovs_mutex_unlock(&prios_lock);
215 return 0;
216 }
217
218 new_data = xzalloc(sizeof *new_data);
219 memcpy(&new_data->mask, &flower->mask, key_len);
220 new_data->prio = ++last_prio;
221 new_data->protocol = flower->key.eth_type;
222 hmap_insert(&prios, &new_data->node, hash);
223 ovs_mutex_unlock(&prios_lock);
224
225 return new_data->prio;
226}
227
18ebd48c 228int
8140a5ff 229netdev_tc_flow_flush(struct netdev *netdev)
18ebd48c 230{
8140a5ff
PB
231 int ifindex = netdev_get_ifindex(netdev);
232
233 if (ifindex < 0) {
234 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
235 netdev_get_name(netdev), ovs_strerror(-ifindex));
236 return -ifindex;
237 }
238
239 return tc_flush(ifindex);
18ebd48c
PB
240}
241
242int
243netdev_tc_flow_dump_create(struct netdev *netdev,
244 struct netdev_flow_dump **dump_out)
245{
8f7620e6
PB
246 struct netdev_flow_dump *dump;
247 int ifindex;
248
249 ifindex = netdev_get_ifindex(netdev);
250 if (ifindex < 0) {
251 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
252 netdev_get_name(netdev), ovs_strerror(-ifindex));
253 return -ifindex;
254 }
18ebd48c 255
8f7620e6
PB
256 dump = xzalloc(sizeof *dump);
257 dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
18ebd48c 258 dump->netdev = netdev_ref(netdev);
8f7620e6 259 tc_dump_flower_start(ifindex, dump->nl_dump);
18ebd48c
PB
260
261 *dump_out = dump;
262
263 return 0;
264}
265
266int
267netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
268{
8f7620e6 269 nl_dump_done(dump->nl_dump);
18ebd48c 270 netdev_close(dump->netdev);
8f7620e6 271 free(dump->nl_dump);
18ebd48c 272 free(dump);
8f7620e6
PB
273 return 0;
274}
275
276static int
277parse_tc_flower_to_match(struct tc_flower *flower,
278 struct match *match,
279 struct nlattr **actions,
280 struct dpif_flow_stats *stats,
281 struct ofpbuf *buf) {
282 size_t act_off;
283 struct tc_flower_key *key = &flower->key;
284 struct tc_flower_key *mask = &flower->mask;
285 odp_port_t outport = 0;
286
287 if (flower->ifindex_out) {
288 outport = netdev_ifindex_to_odp_port(flower->ifindex_out);
289 if (!outport) {
290 return ENOENT;
291 }
292 }
293
294 ofpbuf_clear(buf);
295
296 match_init_catchall(match);
297 match_set_dl_src_masked(match, key->src_mac, mask->src_mac);
298 match_set_dl_dst_masked(match, key->dst_mac, mask->dst_mac);
299
300 if (key->eth_type == htons(ETH_TYPE_VLAN)) {
301 match_set_dl_vlan(match, htons(key->vlan_id));
302 match_set_dl_vlan_pcp(match, key->vlan_prio);
303 match_set_dl_type(match, key->encap_eth_type);
304 flow_fix_vlan_tpid(&match->flow);
305 } else {
306 match_set_dl_type(match, key->eth_type);
307 }
308
309 if (key->ip_proto && is_ip_any(&match->flow)) {
310 match_set_nw_proto(match, key->ip_proto);
311 }
312
313 match_set_nw_src_masked(match, key->ipv4.ipv4_src, mask->ipv4.ipv4_src);
314 match_set_nw_dst_masked(match, key->ipv4.ipv4_dst, mask->ipv4.ipv4_dst);
315
316 match_set_ipv6_src_masked(match,
317 &key->ipv6.ipv6_src, &mask->ipv6.ipv6_src);
318 match_set_ipv6_dst_masked(match,
319 &key->ipv6.ipv6_dst, &mask->ipv6.ipv6_dst);
320
321 match_set_tp_dst_masked(match, key->dst_port, mask->dst_port);
322 match_set_tp_src_masked(match, key->src_port, mask->src_port);
323
324 if (flower->tunnel.tunnel) {
325 match_set_tun_id(match, flower->tunnel.id);
326 if (flower->tunnel.ipv4.ipv4_dst) {
327 match_set_tun_src(match, flower->tunnel.ipv4.ipv4_src);
328 match_set_tun_dst(match, flower->tunnel.ipv4.ipv4_dst);
329 } else if (!is_all_zeros(&flower->tunnel.ipv6.ipv6_dst,
330 sizeof flower->tunnel.ipv6.ipv6_dst)) {
331 match_set_tun_ipv6_src(match, &flower->tunnel.ipv6.ipv6_src);
332 match_set_tun_ipv6_dst(match, &flower->tunnel.ipv6.ipv6_dst);
333 }
334 if (flower->tunnel.tp_dst) {
335 match_set_tun_tp_dst(match, flower->tunnel.tp_dst);
336 }
337 }
338
339 act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS);
340 {
341 if (flower->vlan_pop) {
342 nl_msg_put_flag(buf, OVS_ACTION_ATTR_POP_VLAN);
343 }
344
345 if (flower->vlan_push_id || flower->vlan_push_prio) {
346 struct ovs_action_push_vlan *push;
347 push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_VLAN,
348 sizeof *push);
349
350 push->vlan_tpid = htons(ETH_TYPE_VLAN);
351 push->vlan_tci = htons(flower->vlan_push_id
352 | (flower->vlan_push_prio << 13)
353 | VLAN_CFI);
354 }
355
356 if (flower->set.set) {
357 size_t set_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_SET);
358 size_t tunnel_offset =
359 nl_msg_start_nested(buf, OVS_KEY_ATTR_TUNNEL);
360
361 nl_msg_put_be64(buf, OVS_TUNNEL_KEY_ATTR_ID, flower->set.id);
362 if (flower->set.ipv4.ipv4_src) {
363 nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
364 flower->set.ipv4.ipv4_src);
365 }
366 if (flower->set.ipv4.ipv4_dst) {
367 nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
368 flower->set.ipv4.ipv4_dst);
369 }
370 if (!is_all_zeros(&flower->set.ipv6.ipv6_src,
371 sizeof flower->set.ipv6.ipv6_src)) {
372 nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
373 &flower->set.ipv6.ipv6_src);
374 }
375 if (!is_all_zeros(&flower->set.ipv6.ipv6_dst,
376 sizeof flower->set.ipv6.ipv6_dst)) {
377 nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
378 &flower->set.ipv6.ipv6_dst);
379 }
380 nl_msg_put_be16(buf, OVS_TUNNEL_KEY_ATTR_TP_DST,
381 flower->set.tp_dst);
382
383 nl_msg_end_nested(buf, tunnel_offset);
384 nl_msg_end_nested(buf, set_offset);
385 }
386
387 if (flower->ifindex_out > 0) {
388 nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport));
389 }
390
391 }
392 nl_msg_end_nested(buf, act_off);
393
394 *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
395
396 if (stats) {
397 memset(stats, 0, sizeof *stats);
398 stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
399 stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
400 stats->used = flower->lastused;
401 }
18ebd48c
PB
402
403 return 0;
404}
405
406bool
8f7620e6
PB
407netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
408 struct match *match,
409 struct nlattr **actions,
410 struct dpif_flow_stats *stats,
411 ovs_u128 *ufid,
412 struct ofpbuf *rbuffer,
413 struct ofpbuf *wbuffer)
18ebd48c 414{
8f7620e6
PB
415 struct ofpbuf nl_flow;
416
417 while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) {
418 struct tc_flower flower;
419 struct netdev *netdev = dump->netdev;
420
421 if (parse_netlink_to_tc_flower(&nl_flow, &flower)) {
422 continue;
423 }
424
425 if (parse_tc_flower_to_match(&flower, match, actions, stats,
426 wbuffer)) {
427 continue;
428 }
429
430 if (flower.act_cookie.len) {
431 *ufid = *((ovs_u128 *) flower.act_cookie.data);
432 } else if (!find_ufid(flower.prio, flower.handle, netdev, ufid)) {
433 continue;
434 }
435
436 match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
437 match->flow.in_port.odp_port = dump->port;
438
439 return true;
440 }
441
18ebd48c
PB
442 return false;
443}
444
8f283af8
PB
445static int
446parse_put_flow_set_action(struct tc_flower *flower, const struct nlattr *set,
447 size_t set_len)
448{
449 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
450 const struct nlattr *set_attr;
451 size_t set_left;
452
453 NL_ATTR_FOR_EACH_UNSAFE(set_attr, set_left, set, set_len) {
454 if (nl_attr_type(set_attr) == OVS_KEY_ATTR_TUNNEL) {
455 const struct nlattr *tunnel = nl_attr_get(set_attr);
456 const size_t tunnel_len = nl_attr_get_size(set_attr);
457 const struct nlattr *tun_attr;
458 size_t tun_left;
459
460 flower->set.set = true;
461 NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) {
462 switch (nl_attr_type(tun_attr)) {
463 case OVS_TUNNEL_KEY_ATTR_ID: {
464 flower->set.id = nl_attr_get_be64(tun_attr);
465 }
466 break;
467 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: {
468 flower->set.ipv4.ipv4_src = nl_attr_get_be32(tun_attr);
469 }
470 break;
471 case OVS_TUNNEL_KEY_ATTR_IPV4_DST: {
472 flower->set.ipv4.ipv4_dst = nl_attr_get_be32(tun_attr);
473 }
474 break;
475 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
476 flower->set.ipv6.ipv6_src =
477 nl_attr_get_in6_addr(tun_attr);
478 }
479 break;
480 case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
481 flower->set.ipv6.ipv6_dst =
482 nl_attr_get_in6_addr(tun_attr);
483 }
484 break;
485 case OVS_TUNNEL_KEY_ATTR_TP_SRC: {
486 flower->set.tp_src = nl_attr_get_be16(tun_attr);
487 }
488 break;
489 case OVS_TUNNEL_KEY_ATTR_TP_DST: {
490 flower->set.tp_dst = nl_attr_get_be16(tun_attr);
491 }
492 break;
493 }
494 }
495 } else {
496 VLOG_DBG_RL(&rl, "unsupported set action type: %d",
497 nl_attr_type(set_attr));
498 return EOPNOTSUPP;
499 }
500 }
501 return 0;
502}
503
504static int
505test_key_and_mask(struct match *match)
506{
507 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
508 const struct flow *key = &match->flow;
509 struct flow *mask = &match->wc.masks;
510
511 if (mask->pkt_mark) {
512 VLOG_DBG_RL(&rl, "offloading attribute pkt_mark isn't supported");
513 return EOPNOTSUPP;
514 }
515
516 if (mask->recirc_id && key->recirc_id) {
517 VLOG_DBG_RL(&rl, "offloading attribute recirc_id isn't supported");
518 return EOPNOTSUPP;
519 }
520 mask->recirc_id = 0;
521
522 if (mask->dp_hash) {
523 VLOG_DBG_RL(&rl, "offloading attribute dp_hash isn't supported");
524 return EOPNOTSUPP;
525 }
526
527 if (mask->conj_id) {
528 VLOG_DBG_RL(&rl, "offloading attribute conj_id isn't supported");
529 return EOPNOTSUPP;
530 }
531
532 if (mask->skb_priority) {
533 VLOG_DBG_RL(&rl, "offloading attribute skb_priority isn't supported");
534 return EOPNOTSUPP;
535 }
536
537 if (mask->actset_output) {
538 VLOG_DBG_RL(&rl,
539 "offloading attribute actset_output isn't supported");
540 return EOPNOTSUPP;
541 }
542
543 if (mask->ct_state) {
544 VLOG_DBG_RL(&rl, "offloading attribute ct_state isn't supported");
545 return EOPNOTSUPP;
546 }
547
548 if (mask->ct_zone) {
549 VLOG_DBG_RL(&rl, "offloading attribute ct_zone isn't supported");
550 return EOPNOTSUPP;
551 }
552
553 if (mask->ct_mark) {
554 VLOG_DBG_RL(&rl, "offloading attribute ct_mark isn't supported");
555 return EOPNOTSUPP;
556 }
557
558 if (mask->packet_type && key->packet_type) {
559 VLOG_DBG_RL(&rl, "offloading attribute packet_type isn't supported");
560 return EOPNOTSUPP;
561 }
562 mask->packet_type = 0;
563
564 if (!ovs_u128_is_zero(mask->ct_label)) {
565 VLOG_DBG_RL(&rl, "offloading attribute ct_label isn't supported");
566 return EOPNOTSUPP;
567 }
568
569 for (int i = 0; i < FLOW_N_REGS; i++) {
570 if (mask->regs[i]) {
571 VLOG_DBG_RL(&rl,
572 "offloading attribute regs[%d] isn't supported", i);
573 return EOPNOTSUPP;
574 }
575 }
576
577 if (mask->metadata) {
578 VLOG_DBG_RL(&rl, "offloading attribute metadata isn't supported");
579 return EOPNOTSUPP;
580 }
581
582 if (mask->nw_tos) {
583 VLOG_DBG_RL(&rl, "offloading attribute nw_tos isn't supported");
584 return EOPNOTSUPP;
585 }
586
587 if (mask->nw_ttl) {
588 VLOG_DBG_RL(&rl, "offloading attribute nw_ttl isn't supported");
589 return EOPNOTSUPP;
590 }
591
592 if (mask->nw_frag) {
593 VLOG_DBG_RL(&rl, "offloading attribute nw_frag isn't supported");
594 return EOPNOTSUPP;
595 }
596
597 for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
598 if (mask->mpls_lse[i]) {
599 VLOG_DBG_RL(&rl, "offloading attribute mpls_lse isn't supported");
600 return EOPNOTSUPP;
601 }
602 }
603
604 if (key->dl_type == htons(ETH_TYPE_IP) &&
605 key->nw_proto == IPPROTO_ICMP) {
606 if (mask->tp_src) {
607 VLOG_DBG_RL(&rl,
608 "offloading attribute icmp_type isn't supported");
609 return EOPNOTSUPP;
610 }
611 if (mask->tp_dst) {
612 VLOG_DBG_RL(&rl,
613 "offloading attribute icmp_code isn't supported");
614 return EOPNOTSUPP;
615 }
616 } else if (key->dl_type == htons(ETH_TYPE_IP) &&
617 key->nw_proto == IPPROTO_IGMP) {
618 if (mask->tp_src) {
619 VLOG_DBG_RL(&rl,
620 "offloading attribute igmp_type isn't supported");
621 return EOPNOTSUPP;
622 }
623 if (mask->tp_dst) {
624 VLOG_DBG_RL(&rl,
625 "offloading attribute igmp_code isn't supported");
626 return EOPNOTSUPP;
627 }
628 } else if (key->dl_type == htons(ETH_TYPE_IPV6) &&
629 key->nw_proto == IPPROTO_ICMPV6) {
630 if (mask->tp_src) {
631 VLOG_DBG_RL(&rl,
632 "offloading attribute icmp_type isn't supported");
633 return EOPNOTSUPP;
634 }
635 if (mask->tp_dst) {
636 VLOG_DBG_RL(&rl,
637 "offloading attribute icmp_code isn't supported");
638 return EOPNOTSUPP;
639 }
640 }
641 if (is_ip_any(key) && key->nw_proto == IPPROTO_TCP && mask->tcp_flags) {
642 if (mask->tcp_flags) {
643 VLOG_DBG_RL(&rl,
644 "offloading attribute tcp_flags isn't supported");
645 return EOPNOTSUPP;
646 }
647 }
648
649 if (!is_all_zeros(mask, sizeof *mask)) {
650 VLOG_DBG_RL(&rl, "offloading isn't supported, unknown attribute");
651 return EOPNOTSUPP;
652 }
653
654 return 0;
655}
656
18ebd48c 657int
8f283af8
PB
658netdev_tc_flow_put(struct netdev *netdev, struct match *match,
659 struct nlattr *actions, size_t actions_len,
660 const ovs_u128 *ufid, struct offload_info *info,
18ebd48c
PB
661 struct dpif_flow_stats *stats OVS_UNUSED)
662{
8f283af8
PB
663 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
664 struct tc_flower flower;
665 const struct flow *key = &match->flow;
666 struct flow *mask = &match->wc.masks;
667 const struct flow_tnl *tnl = &match->flow.tunnel;
668 struct nlattr *nla;
669 size_t left;
670 int prio = 0;
671 int handle;
672 int ifindex;
673 int err;
674
675 ifindex = netdev_get_ifindex(netdev);
676 if (ifindex < 0) {
677 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
678 netdev_get_name(netdev), ovs_strerror(-ifindex));
679 return -ifindex;
680 }
681
682 memset(&flower, 0, sizeof flower);
683
684 if (tnl->tun_id) {
685 VLOG_DBG_RL(&rl,
686 "tunnel: id %#" PRIx64 " src " IP_FMT
687 " dst " IP_FMT " tp_src %d tp_dst %d",
688 ntohll(tnl->tun_id),
689 IP_ARGS(tnl->ip_src), IP_ARGS(tnl->ip_dst),
690 ntohs(tnl->tp_src), ntohs(tnl->tp_dst));
691 flower.tunnel.id = tnl->tun_id;
692 flower.tunnel.ipv4.ipv4_src = tnl->ip_src;
693 flower.tunnel.ipv4.ipv4_dst = tnl->ip_dst;
694 flower.tunnel.ipv6.ipv6_src = tnl->ipv6_src;
695 flower.tunnel.ipv6.ipv6_dst = tnl->ipv6_dst;
696 flower.tunnel.tp_src = tnl->tp_src;
697 flower.tunnel.tp_dst = tnl->tp_dst;
698 flower.tunnel.tunnel = true;
699
700 memset(&mask->tunnel, 0, sizeof mask->tunnel);
701 }
702
703 flower.key.eth_type = key->dl_type;
704 flower.mask.eth_type = mask->dl_type;
705
706 if (mask->vlans[0].tci) {
707 ovs_be16 vid_mask = mask->vlans[0].tci & htons(VLAN_VID_MASK);
708 ovs_be16 pcp_mask = mask->vlans[0].tci & htons(VLAN_PCP_MASK);
709 ovs_be16 cfi = mask->vlans[0].tci & htons(VLAN_CFI);
710
711 if (cfi && key->vlans[0].tci & htons(VLAN_CFI)
712 && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
713 && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
714 && (vid_mask || pcp_mask)) {
715 if (vid_mask) {
716 flower.key.vlan_id = vlan_tci_to_vid(key->vlans[0].tci);
717 VLOG_DBG_RL(&rl, "vlan_id: %d\n", flower.key.vlan_id);
718 }
719 if (pcp_mask) {
720 flower.key.vlan_prio = vlan_tci_to_pcp(key->vlans[0].tci);
721 VLOG_DBG_RL(&rl, "vlan_prio: %d\n", flower.key.vlan_prio);
722 }
723 flower.key.encap_eth_type = flower.key.eth_type;
724 flower.key.eth_type = htons(ETH_TYPE_VLAN);
725 } else if (mask->vlans[0].tci == htons(0xffff) &&
726 ntohs(key->vlans[0].tci) == 0) {
727 /* exact && no vlan */
728 } else {
729 /* partial mask */
730 return EOPNOTSUPP;
731 }
732 } else if (mask->vlans[1].tci) {
733 return EOPNOTSUPP;
734 }
735 memset(mask->vlans, 0, sizeof mask->vlans);
736
737 flower.key.dst_mac = key->dl_dst;
738 flower.mask.dst_mac = mask->dl_dst;
739 flower.key.src_mac = key->dl_src;
740 flower.mask.src_mac = mask->dl_src;
741 memset(&mask->dl_dst, 0, sizeof mask->dl_dst);
742 memset(&mask->dl_src, 0, sizeof mask->dl_src);
743 mask->dl_type = 0;
744 mask->in_port.odp_port = 0;
745
746 if (is_ip_any(key)) {
747 flower.key.ip_proto = key->nw_proto;
748 flower.mask.ip_proto = mask->nw_proto;
749
750 if (key->nw_proto == IPPROTO_TCP || key->nw_proto == IPPROTO_UDP) {
751 flower.key.dst_port = key->tp_dst;
752 flower.mask.dst_port = mask->tp_dst;
753 flower.key.src_port = key->tp_src;
754 flower.mask.src_port = mask->tp_src;
755 mask->tp_src = 0;
756 mask->tp_dst = 0;
757 }
758
759 mask->nw_frag = 0;
760 mask->nw_tos = 0;
761 mask->nw_proto = 0;
762
763 if (key->dl_type == htons(ETH_P_IP)) {
764 flower.key.ipv4.ipv4_src = key->nw_src;
765 flower.mask.ipv4.ipv4_src = mask->nw_src;
766 flower.key.ipv4.ipv4_dst = key->nw_dst;
767 flower.mask.ipv4.ipv4_dst = mask->nw_dst;
768 mask->nw_src = 0;
769 mask->nw_dst = 0;
770 } else if (key->dl_type == htons(ETH_P_IPV6)) {
771 flower.key.ipv6.ipv6_src = key->ipv6_src;
772 flower.mask.ipv6.ipv6_src = mask->ipv6_src;
773 flower.key.ipv6.ipv6_dst = key->ipv6_dst;
774 flower.mask.ipv6.ipv6_dst = mask->ipv6_dst;
775 memset(&mask->ipv6_src, 0, sizeof mask->ipv6_src);
776 memset(&mask->ipv6_dst, 0, sizeof mask->ipv6_dst);
777 }
778 }
779
780 err = test_key_and_mask(match);
781 if (err) {
782 return err;
783 }
784
785 NL_ATTR_FOR_EACH(nla, left, actions, actions_len) {
786 if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
787 odp_port_t port = nl_attr_get_odp_port(nla);
788 struct netdev *outdev = netdev_ports_get(port,
789 info->port_hmap_obj);
790
791 flower.ifindex_out = netdev_get_ifindex(outdev);
792 flower.set.tp_dst = info->tp_dst_port;
793 netdev_close(outdev);
794 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) {
795 const struct ovs_action_push_vlan *vlan_push = nl_attr_get(nla);
796
797 flower.vlan_push_id = vlan_tci_to_vid(vlan_push->vlan_tci);
798 flower.vlan_push_prio = vlan_tci_to_pcp(vlan_push->vlan_tci);
799 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) {
800 flower.vlan_pop = 1;
801 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET) {
802 const struct nlattr *set = nl_attr_get(nla);
803 const size_t set_len = nl_attr_get_size(nla);
804
805 err = parse_put_flow_set_action(&flower, set, set_len);
806 if (err) {
807 return err;
808 }
809 } else {
810 VLOG_DBG_RL(&rl, "unsupported put action type: %d",
811 nl_attr_type(nla));
812 return EOPNOTSUPP;
813 }
814 }
815
816 handle = get_ufid_tc_mapping(ufid, &prio, NULL);
817 if (handle && prio) {
818 VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", handle, prio);
819 tc_del_filter(ifindex, prio, handle);
820 }
821
822 if (!prio) {
823 prio = get_prio_for_tc_flower(&flower);
824 if (prio == 0) {
825 VLOG_ERR_RL(&rl, "couldn't get tc prio: %s", ovs_strerror(ENOSPC));
826 return ENOSPC;
827 }
828 }
829
830 flower.act_cookie.data = ufid;
831 flower.act_cookie.len = sizeof *ufid;
832
833 err = tc_replace_flower(ifindex, prio, handle, &flower);
834 if (!err) {
835 add_ufid_tc_mapping(ufid, flower.prio, flower.handle, netdev, ifindex);
836 }
837
838 return err;
18ebd48c
PB
839}
840
841int
842netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
7ecdef27
PB
843 struct match *match,
844 struct nlattr **actions,
845 const ovs_u128 *ufid,
846 struct dpif_flow_stats *stats,
847 struct ofpbuf *buf)
18ebd48c 848{
7ecdef27
PB
849 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
850 struct netdev *dev;
851 struct tc_flower flower;
852 odp_port_t in_port;
853 int prio = 0;
854 int ifindex;
855 int handle;
856 int err;
857
858 handle = get_ufid_tc_mapping(ufid, &prio, &dev);
859 if (!handle) {
860 return ENOENT;
861 }
862
863 ifindex = netdev_get_ifindex(dev);
864 if (ifindex < 0) {
865 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
866 netdev_get_name(dev), ovs_strerror(-ifindex));
867 netdev_close(dev);
868 return -ifindex;
869 }
870
871 VLOG_DBG_RL(&rl, "flow get (dev %s prio %d handle %d)",
872 netdev_get_name(dev), prio, handle);
873 err = tc_get_flower(ifindex, prio, handle, &flower);
874 netdev_close(dev);
875 if (err) {
876 VLOG_ERR_RL(&error_rl, "flow get failed (dev %s prio %d handle %d): %s",
877 netdev_get_name(dev), prio, handle, ovs_strerror(err));
878 return err;
879 }
880
881 in_port = netdev_ifindex_to_odp_port(ifindex);
882 parse_tc_flower_to_match(&flower, match, actions, stats, buf);
883
884 match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
885 match->flow.in_port.odp_port = in_port;
886
887 return 0;
18ebd48c
PB
888}
889
890int
891netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
30b6b047
PB
892 const ovs_u128 *ufid,
893 struct dpif_flow_stats *stats)
18ebd48c 894{
30b6b047
PB
895 struct netdev *dev;
896 int prio = 0;
897 int ifindex;
898 int handle;
899 int error;
900
901 handle = get_ufid_tc_mapping(ufid, &prio, &dev);
902 if (!handle) {
903 return ENOENT;
904 }
905
906 ifindex = netdev_get_ifindex(dev);
907 if (ifindex < 0) {
908 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
909 netdev_get_name(dev), ovs_strerror(-ifindex));
910 netdev_close(dev);
911 return -ifindex;
912 }
913
914 error = tc_del_filter(ifindex, prio, handle);
915 del_ufid_tc_mapping(ufid);
916
917 netdev_close(dev);
918
919 if (stats) {
920 memset(stats, 0, sizeof *stats);
921 }
922 return error;
18ebd48c
PB
923}
924
925int
adbbe97f 926netdev_tc_init_flow_api(struct netdev *netdev)
18ebd48c 927{
adbbe97f
PB
928 int ifindex;
929 int error;
930
931 ifindex = netdev_get_ifindex(netdev);
932 if (ifindex < 0) {
933 VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s",
934 netdev_get_name(netdev), ovs_strerror(-ifindex));
935 return -ifindex;
936 }
937
938 error = tc_add_del_ingress_qdisc(ifindex, true);
939
940 if (error && error != EEXIST) {
941 VLOG_ERR("failed adding ingress qdisc required for offloading: %s",
942 ovs_strerror(error));
943 return error;
944 }
945
946 VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev));
947
18ebd48c
PB
948 return 0;
949}