]> git.proxmox.com Git - mirror_ovs.git/blob - lib/tun-metadata.c
netdev-offload-tc: Use single 'once' variable for probing tc features
[mirror_ovs.git] / lib / tun-metadata.c
1 /*
2 * Copyright (c) 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <errno.h>
19 #include <stdbool.h>
20
21 #include "bitmap.h"
22 #include "compiler.h"
23 #include "openvswitch/hmap.h"
24 #include "openvswitch/match.h"
25 #include "nx-match.h"
26 #include "odp-netlink.h"
27 #include "openvswitch/ofp-match.h"
28 #include "ovs-rcu.h"
29 #include "packets.h"
30 #include "tun-metadata.h"
31 #include "util.h"
32
33 struct tun_meta_entry {
34 struct hmap_node node; /* In struct tun_table's key_hmap. */
35 struct tun_metadata_loc loc;
36 uint32_t key; /* (class << 8) | type. */
37 bool valid; /* True if allocated to a class and type. */
38 };
39
40 /* Maps from TLV option class+type to positions in a struct tun_metadata's
41 * 'opts' array. */
42 struct tun_table {
43 /* TUN_METADATA<i> is stored in element <i>. */
44 struct tun_meta_entry entries[TUN_METADATA_NUM_OPTS];
45
46 /* Each bit represents 4 bytes of space, 0-bits are free space. */
47 unsigned long alloc_map[BITMAP_N_LONGS(TUN_METADATA_TOT_OPT_SIZE / 4)];
48
49 /* The valid elements in entries[], indexed by class+type. */
50 struct hmap key_hmap;
51 };
52 BUILD_ASSERT_DECL(TUN_METADATA_TOT_OPT_SIZE % 4 == 0);
53
54 static enum ofperr tun_metadata_add_entry(struct tun_table *map, uint8_t idx,
55 uint16_t opt_class, uint8_t type,
56 uint8_t len);
57 static void tun_metadata_del_entry(struct tun_table *map, uint8_t idx);
58 static void memcpy_to_metadata(struct tun_metadata *dst, const void *src,
59 const struct tun_metadata_loc *,
60 unsigned int idx);
61 static void memcpy_from_metadata(void *dst, const struct tun_metadata *src,
62 const struct tun_metadata_loc *);
63
64 static uint32_t
65 tun_meta_key(ovs_be16 class, uint8_t type)
66 {
67 return (OVS_FORCE uint16_t)class << 8 | type;
68 }
69
70 static ovs_be16
71 tun_key_class(uint32_t key)
72 {
73 return (OVS_FORCE ovs_be16)(key >> 8);
74 }
75
76 static uint8_t
77 tun_key_type(uint32_t key)
78 {
79 return key & 0xff;
80 }
81
82 /* Returns a newly allocated tun_table. If 'old_map' is nonnull then the new
83 * tun_table is a deep copy of the old one. */
84 struct tun_table *
85 tun_metadata_alloc(const struct tun_table *old_map)
86 {
87 struct tun_table *new_map;
88
89 new_map = xzalloc(sizeof *new_map);
90
91 if (old_map) {
92 struct tun_meta_entry *entry;
93
94 *new_map = *old_map;
95 hmap_init(&new_map->key_hmap);
96
97 HMAP_FOR_EACH (entry, node, &old_map->key_hmap) {
98 struct tun_meta_entry *new_entry;
99 struct tun_metadata_loc_chain *chain;
100
101 new_entry = &new_map->entries[entry - old_map->entries];
102 hmap_insert(&new_map->key_hmap, &new_entry->node, entry->node.hash);
103
104 chain = &new_entry->loc.c;
105 while (chain->next) {
106 chain->next = xmemdup(chain->next, sizeof *chain->next);
107 chain = chain->next;
108 }
109 }
110 } else {
111 hmap_init(&new_map->key_hmap);
112 }
113
114 return new_map;
115 }
116
117 /* Frees 'map' and all the memory it owns. */
118 void
119 tun_metadata_free(struct tun_table *map)
120 {
121 struct tun_meta_entry *entry;
122
123 if (!map) {
124 return;
125 }
126
127 HMAP_FOR_EACH (entry, node, &map->key_hmap) {
128 tun_metadata_del_entry(map, entry - map->entries);
129 }
130
131 hmap_destroy(&map->key_hmap);
132 free(map);
133 }
134
135 void
136 tun_metadata_postpone_free(struct tun_table *tab)
137 {
138 ovsrcu_postpone(tun_metadata_free, tab);
139 }
140
141 enum ofperr
142 tun_metadata_table_mod(struct ofputil_tlv_table_mod *ttm,
143 const struct tun_table *old_tab,
144 struct tun_table **new_tab)
145 {
146 struct ofputil_tlv_map *ofp_map;
147 enum ofperr err = 0;
148
149 switch (ttm->command) {
150 case NXTTMC_ADD:
151 *new_tab = tun_metadata_alloc(old_tab);
152
153 LIST_FOR_EACH (ofp_map, list_node, &ttm->mappings) {
154 err = tun_metadata_add_entry(*new_tab, ofp_map->index,
155 ofp_map->option_class,
156 ofp_map->option_type,
157 ofp_map->option_len);
158 if (err) {
159 tun_metadata_free(*new_tab);
160 *new_tab = NULL;
161 return err;
162 }
163 }
164 break;
165
166 case NXTTMC_DELETE:
167 *new_tab = tun_metadata_alloc(old_tab);
168
169 LIST_FOR_EACH (ofp_map, list_node, &ttm->mappings) {
170 tun_metadata_del_entry(*new_tab, ofp_map->index);
171 }
172 break;
173
174 case NXTTMC_CLEAR:
175 *new_tab = tun_metadata_alloc(NULL);
176 break;
177
178 default:
179 OVS_NOT_REACHED();
180 }
181
182 return 0;
183 }
184
185 void
186 tun_metadata_table_request(const struct tun_table *tun_table,
187 struct ofputil_tlv_table_reply *ttr)
188 {
189 int i;
190
191 ttr->max_option_space = TUN_METADATA_TOT_OPT_SIZE;
192 ttr->max_fields = TUN_METADATA_NUM_OPTS;
193 ovs_list_init(&ttr->mappings);
194
195 for (i = 0; i < TUN_METADATA_NUM_OPTS; i++) {
196 const struct tun_meta_entry *entry = &tun_table->entries[i];
197 struct ofputil_tlv_map *map;
198
199 if (!entry->valid) {
200 continue;
201 }
202
203 map = xmalloc(sizeof *map);
204 map->option_class = ntohs(tun_key_class(entry->key));
205 map->option_type = tun_key_type(entry->key);
206 map->option_len = entry->loc.len;
207 map->index = i;
208
209 ovs_list_push_back(&ttr->mappings, &map->list_node);
210 }
211 }
212
213 /* Copies the value of field 'mf' from 'tnl' (which must be in non-UDPIF format) * into 'value'.
214 *
215 * 'mf' must be an MFF_TUN_METADATA* field.
216 *
217 * This uses the tunnel metadata mapping table created by tun_metadata_alloc().
218 * If no such table has been created or if 'mf' hasn't been allocated in it yet,
219 * this just zeros 'value'. */
220 void
221 tun_metadata_read(const struct flow_tnl *tnl,
222 const struct mf_field *mf, union mf_value *value)
223 {
224 const struct tun_table *map = tnl->metadata.tab;
225 unsigned int idx = mf->id - MFF_TUN_METADATA0;
226 const struct tun_metadata_loc *loc;
227
228 if (!map) {
229 memset(value->tun_metadata, 0, mf->n_bytes);
230 return;
231 }
232
233 loc = &map->entries[idx].loc;
234
235 memset(value->tun_metadata, 0, mf->n_bytes - loc->len);
236 memcpy_from_metadata(value->tun_metadata + mf->n_bytes - loc->len,
237 &tnl->metadata, loc);
238 }
239
240 /* Copies 'value' into field 'mf' in 'tnl' (in non-UDPIF format).
241 *
242 * 'mf' must be an MFF_TUN_METADATA* field.
243 *
244 * This uses the tunnel metadata mapping table created by tun_metadata_alloc().
245 * If no such table has been created or if 'mf' hasn't been allocated in it yet,
246 * this function does nothing. */
247 void
248 tun_metadata_write(struct flow_tnl *tnl,
249 const struct mf_field *mf, const union mf_value *value)
250 {
251 const struct tun_table *map = tnl->metadata.tab;
252 unsigned int idx = mf->id - MFF_TUN_METADATA0;
253 const struct tun_metadata_loc *loc;
254
255 if (!map || !map->entries[idx].valid) {
256 return;
257 }
258
259 loc = &map->entries[idx].loc;
260 memcpy_to_metadata(&tnl->metadata,
261 value->tun_metadata + mf->n_bytes - loc->len, loc, idx);
262 }
263
264 /* Deletes field 'mf' in 'tnl' (in non-UDPIF format).
265 * 'mf' must be an MFF_TUN_METADATA* field.
266 */
267 void
268 tun_metadata_delete(struct flow_tnl *tnl, const struct mf_field *mf)
269 {
270 unsigned int idx;
271
272 if (tnl->flags & FLOW_TNL_F_UDPIF) {
273 return;
274 }
275
276 idx = mf->id - MFF_TUN_METADATA0;
277 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
278 ULLONG_SET0(tnl->metadata.present.map, idx);
279 }
280
281 static const struct tun_metadata_loc *
282 metadata_loc_from_match(const struct tun_table *map, struct match *match,
283 const char *name, unsigned int idx,
284 unsigned int field_len, bool masked, char **err_str)
285 {
286 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
287
288 if (err_str) {
289 *err_str = NULL;
290 }
291
292 if (map) {
293 if (map->entries[idx].valid) {
294 return &map->entries[idx].loc;
295 } else {
296 return NULL;
297 }
298 }
299
300 if (match->tun_md.alloc_offset + field_len > TUN_METADATA_TOT_OPT_SIZE) {
301 if (err_str) {
302 *err_str = xasprintf("field %s exceeds maximum size for tunnel "
303 "metadata (used %d, max %d)", name,
304 match->tun_md.alloc_offset + field_len,
305 TUN_METADATA_TOT_OPT_SIZE);
306 }
307
308 return NULL;
309 }
310
311 if (ULLONG_GET(match->wc.masks.tunnel.metadata.present.map, idx)) {
312 if (err_str) {
313 *err_str = xasprintf("field %s set multiple times", name);
314 }
315
316 return NULL;
317 }
318
319 match->tun_md.entry[idx].loc.len = field_len;
320 match->tun_md.entry[idx].loc.c.offset = match->tun_md.alloc_offset;
321 match->tun_md.entry[idx].loc.c.len = field_len;
322 match->tun_md.entry[idx].loc.c.next = NULL;
323 match->tun_md.entry[idx].masked = masked;
324 match->tun_md.alloc_offset += field_len;
325 match->tun_md.valid = true;
326
327 return &match->tun_md.entry[idx].loc;
328 }
329
330 /* Makes 'match' match 'value'/'mask' on field 'mf'.
331 *
332 * 'mf' must be an MFF_TUN_METADATA* field. 'match' must be in non-UDPIF format.
333 *
334 * If there is a tunnel metadata mapping table associated with the switch,
335 * this function is effective only if there is already a mapping for 'mf'.
336 * Otherwise, the metadata mapping table integrated into 'match' is used,
337 * adding 'mf' to its mapping table if it isn't already mapped (and if there
338 * is room). If 'mf' isn't or can't be mapped, this function returns without
339 * modifying 'match'.
340 *
341 * 'value' may be NULL; if so, then 'mf' is made to match on an all-zeros
342 * value.
343 *
344 * 'mask' may be NULL; if so, then 'mf' is made exact-match.
345 *
346 * If non-NULL, 'err_str' returns a malloc'ed string describing any errors
347 * with the request or NULL if there is no error. The caller is reponsible
348 * for freeing the string.
349 */
350 void
351 tun_metadata_set_match(const struct mf_field *mf, const union mf_value *value,
352 const union mf_value *mask, struct match *match,
353 char **err_str)
354 {
355 const struct tun_table *map = match->flow.tunnel.metadata.tab;
356 const struct tun_metadata_loc *loc;
357 unsigned int idx = mf->id - MFF_TUN_METADATA0;
358 unsigned int field_len;
359 bool is_masked;
360 unsigned int data_offset;
361 union mf_value data;
362
363 field_len = mf_field_len(mf, value, mask, &is_masked);
364 loc = metadata_loc_from_match(map, match, mf->name, idx, field_len,
365 is_masked, err_str);
366 if (!loc) {
367 return;
368 }
369
370 data_offset = mf->n_bytes - loc->len;
371
372 if (!value) {
373 memset(data.tun_metadata, 0, loc->len);
374 } else if (!mask) {
375 memcpy(data.tun_metadata, value->tun_metadata + data_offset, loc->len);
376 } else {
377 int i;
378 for (i = 0; i < loc->len; i++) {
379 data.tun_metadata[i] = value->tun_metadata[data_offset + i] &
380 mask->tun_metadata[data_offset + i];
381 }
382 }
383 memcpy_to_metadata(&match->flow.tunnel.metadata, data.tun_metadata,
384 loc, idx);
385
386 if (!value) {
387 memset(data.tun_metadata, 0, loc->len);
388 } else if (!mask) {
389 memset(data.tun_metadata, 0xff, loc->len);
390 } else {
391 memcpy(data.tun_metadata, mask->tun_metadata + data_offset, loc->len);
392 }
393 memcpy_to_metadata(&match->wc.masks.tunnel.metadata, data.tun_metadata,
394 loc, idx);
395 }
396
397 /* Copies all MFF_TUN_METADATA* fields from 'tnl' to 'flow_metadata'. This
398 * is called during action translation and therefore 'tnl' must be in
399 * non-udpif format. */
400 void
401 tun_metadata_get_fmd(const struct flow_tnl *tnl, struct match *flow_metadata)
402 {
403 int i;
404
405 ULLONG_FOR_EACH_1 (i, tnl->metadata.present.map) {
406 union mf_value opts;
407 const struct tun_metadata_loc *old_loc = &tnl->metadata.tab->entries[i].loc;
408 const struct tun_metadata_loc *new_loc;
409
410 new_loc = metadata_loc_from_match(NULL, flow_metadata, NULL, i,
411 old_loc->len, false, NULL);
412
413 memcpy_from_metadata(opts.tun_metadata, &tnl->metadata, old_loc);
414 memcpy_to_metadata(&flow_metadata->flow.tunnel.metadata,
415 opts.tun_metadata, new_loc, i);
416
417 memset(opts.tun_metadata, 0xff, old_loc->len);
418 memcpy_to_metadata(&flow_metadata->wc.masks.tunnel.metadata,
419 opts.tun_metadata, new_loc, i);
420 }
421 }
422
423 static uint32_t
424 tun_meta_hash(uint32_t key)
425 {
426 return hash_int(key, 0);
427 }
428
429 static struct tun_meta_entry *
430 tun_meta_find_key(const struct hmap *hmap, uint32_t key)
431 {
432 struct tun_meta_entry *entry;
433
434 HMAP_FOR_EACH_IN_BUCKET (entry, node, tun_meta_hash(key), hmap) {
435 if (entry->key == key) {
436 return entry;
437 }
438 }
439 return NULL;
440 }
441
442 static void
443 memcpy_to_metadata(struct tun_metadata *dst, const void *src,
444 const struct tun_metadata_loc *loc, unsigned int idx)
445 {
446 const struct tun_metadata_loc_chain *chain = &loc->c;
447 int addr = 0;
448
449 while (chain) {
450 memcpy(dst->opts.u8 + chain->offset, (uint8_t *)src + addr,
451 chain->len);
452 addr += chain->len;
453 chain = chain->next;
454 }
455
456 ULLONG_SET1(dst->present.map, idx);
457 }
458
459 static void
460 memcpy_from_metadata(void *dst, const struct tun_metadata *src,
461 const struct tun_metadata_loc *loc)
462 {
463 const struct tun_metadata_loc_chain *chain = &loc->c;
464 int addr = 0;
465
466 while (chain) {
467 memcpy((uint8_t *)dst + addr, src->opts.u8 + chain->offset,
468 chain->len);
469 addr += chain->len;
470 chain = chain->next;
471 }
472 }
473
474 static int
475 tun_metadata_alloc_chain(struct tun_table *map, uint8_t len,
476 struct tun_metadata_loc_chain *loc)
477 {
478 int alloc_len = len / 4;
479 int scan_start = 0;
480 int scan_end = TUN_METADATA_TOT_OPT_SIZE / 4;
481 int pos_start, pos_end, pos_len;
482 int best_start = 0, best_len = 0;
483
484 while (true) {
485 pos_start = bitmap_scan(map->alloc_map, 0, scan_start, scan_end);
486 if (pos_start == scan_end) {
487 break;
488 }
489
490 pos_end = bitmap_scan(map->alloc_map, 1, pos_start,
491 MIN(pos_start + alloc_len, scan_end));
492 pos_len = pos_end - pos_start;
493 if (pos_len == alloc_len) {
494 goto found;
495 }
496
497 if (pos_len > best_len) {
498 best_start = pos_start;
499 best_len = pos_len;
500 }
501 scan_start = pos_end + 1;
502 }
503
504 if (best_len == 0) {
505 return ENOSPC;
506 }
507
508 pos_start = best_start;
509 pos_len = best_len;
510
511 found:
512 bitmap_set_multiple(map->alloc_map, pos_start, pos_len, 1);
513 loc->offset = pos_start * 4;
514 loc->len = pos_len * 4;
515
516 return 0;
517 }
518
519 static enum ofperr
520 tun_metadata_add_entry(struct tun_table *map, uint8_t idx, uint16_t opt_class,
521 uint8_t type, uint8_t len)
522 {
523 struct tun_meta_entry *entry;
524 struct tun_metadata_loc_chain *cur_chain, *prev_chain;
525
526 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
527
528 entry = &map->entries[idx];
529 if (entry->valid) {
530 return OFPERR_NXTTMFC_ALREADY_MAPPED;
531 }
532
533 entry->key = tun_meta_key(htons(opt_class), type);
534 if (tun_meta_find_key(&map->key_hmap, entry->key)) {
535 return OFPERR_NXTTMFC_DUP_ENTRY;
536 }
537
538 entry->valid = true;
539 hmap_insert(&map->key_hmap, &entry->node,
540 tun_meta_hash(entry->key));
541
542 entry->loc.len = len;
543 cur_chain = &entry->loc.c;
544 memset(cur_chain, 0, sizeof *cur_chain);
545 prev_chain = NULL;
546
547 while (len) {
548 int err;
549
550 if (!cur_chain) {
551 cur_chain = xzalloc(sizeof *cur_chain);
552 prev_chain->next = cur_chain;
553 }
554
555 err = tun_metadata_alloc_chain(map, len, cur_chain);
556 if (err) {
557 tun_metadata_del_entry(map, idx);
558 return OFPERR_NXTTMFC_TABLE_FULL;
559 }
560
561 len -= cur_chain->len;
562
563 prev_chain = cur_chain;
564 cur_chain = NULL;
565 }
566
567 return 0;
568 }
569
570 static void
571 tun_metadata_del_entry(struct tun_table *map, uint8_t idx)
572 {
573 struct tun_meta_entry *entry;
574 struct tun_metadata_loc_chain *chain;
575
576 if (idx >= TUN_METADATA_NUM_OPTS) {
577 return;
578 }
579
580 entry = &map->entries[idx];
581 if (!entry->valid) {
582 return;
583 }
584
585 chain = &entry->loc.c;
586 while (chain) {
587 struct tun_metadata_loc_chain *next = chain->next;
588
589 bitmap_set_multiple(map->alloc_map, chain->offset / 4,
590 chain->len / 4, 0);
591 if (chain != &entry->loc.c) {
592 free(chain);
593 }
594 chain = next;
595 }
596
597 entry->valid = false;
598 hmap_remove(&map->key_hmap, &entry->node);
599 memset(&entry->loc, 0, sizeof entry->loc);
600 }
601
602 /* Converts from Geneve netlink attributes in 'attr' to tunnel metadata
603 * in 'tun'. In reality, there is very little conversion done since we are
604 * just copying over the tunnel options in the form that they were received
605 * on the wire. By always using UDPIF format, this allows us to process the
606 * flow key without any knowledge of the mapping table. We can do the
607 * conversion later if necessary. */
608 void
609 tun_metadata_from_geneve_nlattr(const struct nlattr *attr, bool is_mask,
610 struct flow_tnl *tun)
611 {
612 int attr_len = nl_attr_get_size(attr);
613
614 memcpy(tun->metadata.opts.gnv, nl_attr_get(attr), attr_len);
615 tun->flags |= FLOW_TNL_F_UDPIF;
616
617 if (!is_mask) {
618 tun->metadata.present.len = attr_len;
619 } else {
620 /* We need to exact match on the length so we don't
621 * accidentally match on sets of options that are the same
622 * at the beginning but with additional options after. */
623 tun->metadata.present.len = 0xff;
624 }
625 }
626
627 /* Converts from the flat Geneve options representation extracted directly
628 * from the tunnel header to the representation that maps options to
629 * pre-allocated locations. The original version (in UDPIF form) is passed
630 * in 'src' and the translated form in stored in 'dst'. To handle masks, the
631 * flow must also be passed in through 'flow' (in the original, raw form). */
632 int
633 tun_metadata_from_geneve_udpif(const struct tun_table *tun_tab,
634 const struct flow_tnl *flow,
635 const struct flow_tnl *src,
636 struct flow_tnl *dst)
637 {
638 const struct geneve_opt *opt = src->metadata.opts.gnv;
639 const struct geneve_opt *flow_opt = flow->metadata.opts.gnv;
640 int opts_len = flow->metadata.present.len;
641
642 dst->metadata.tab = tun_tab;
643 dst->flags = src->flags & ~FLOW_TNL_F_UDPIF;
644 dst->metadata.present.map = 0;
645
646 while (opts_len > 0) {
647 int len;
648 struct tun_meta_entry *entry;
649
650 if (opts_len < sizeof(*opt)) {
651 return EINVAL;
652 }
653
654 len = sizeof(*opt) + flow_opt->length * 4;
655 if (len > opts_len) {
656 return EINVAL;
657 }
658
659 entry = tun_meta_find_key(&tun_tab->key_hmap,
660 tun_meta_key(flow_opt->opt_class,
661 flow_opt->type));
662 if (entry) {
663 if (entry->loc.len == flow_opt->length * 4) {
664 memcpy_to_metadata(&dst->metadata, opt + 1, &entry->loc,
665 entry - tun_tab->entries);
666 } else {
667 return EINVAL;
668 }
669 } else if (flow_opt->type & GENEVE_CRIT_OPT_TYPE) {
670 return EINVAL;
671 }
672
673 opt = opt + len / sizeof(*opt);
674 flow_opt = flow_opt + len / sizeof(*opt);
675 opts_len -= len;
676 }
677
678 return 0;
679 }
680
681 static void
682 tun_metadata_to_geneve__(const struct tun_metadata *flow, struct ofpbuf *b,
683 bool *crit_opt)
684 {
685 int i;
686
687 *crit_opt = false;
688
689 ULLONG_FOR_EACH_1 (i, flow->present.map) {
690 const struct tun_meta_entry *entry = &flow->tab->entries[i];
691 struct geneve_opt *opt;
692
693 opt = ofpbuf_put_uninit(b, sizeof *opt + entry->loc.len);
694
695 opt->opt_class = tun_key_class(entry->key);
696 opt->type = tun_key_type(entry->key);
697 opt->length = entry->loc.len / 4;
698 opt->r1 = 0;
699 opt->r2 = 0;
700 opt->r3 = 0;
701
702 memcpy_from_metadata(opt + 1, flow, &entry->loc);
703 *crit_opt |= !!(opt->type & GENEVE_CRIT_OPT_TYPE);
704 }
705 }
706
707 static void
708 tun_metadata_to_geneve_nlattr_flow(const struct flow_tnl *flow,
709 struct ofpbuf *b)
710 {
711 size_t nlattr_offset;
712 bool crit_opt;
713
714 if (!flow->metadata.present.map) {
715 return;
716 }
717
718 /* For all intents and purposes, the Geneve options are nested
719 * attributes even if this doesn't show up directly to netlink. It's
720 * similar enough that we can use the same mechanism. */
721 nlattr_offset = nl_msg_start_nested(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
722
723 tun_metadata_to_geneve__(&flow->metadata, b, &crit_opt);
724
725 nl_msg_end_nested(b, nlattr_offset);
726 }
727
728 /* Converts from processed tunnel metadata information (in non-udpif
729 * format) in 'flow' to a stream of Geneve options suitable for
730 * transmission in 'opts'. Additionally returns whether there were
731 * any critical options in 'crit_opt' as well as the total length of
732 * data. */
733 int
734 tun_metadata_to_geneve_header(const struct flow_tnl *flow,
735 struct geneve_opt *opts, bool *crit_opt)
736 {
737 struct ofpbuf b;
738
739 ofpbuf_use_stack(&b, opts, TLV_TOT_OPT_SIZE);
740 tun_metadata_to_geneve__(&flow->metadata, &b, crit_opt);
741
742 return b.size;
743 }
744
745 static void
746 tun_metadata_to_geneve_mask__(const struct tun_metadata *flow,
747 const struct tun_metadata *mask,
748 struct geneve_opt *opt, int opts_len)
749 {
750 /* All of these options have already been validated, so no need
751 * for sanity checking. */
752 while (opts_len > 0) {
753 struct tun_meta_entry *entry;
754 int len = sizeof(*opt) + opt->length * 4;
755
756 entry = tun_meta_find_key(&flow->tab->key_hmap,
757 tun_meta_key(opt->opt_class, opt->type));
758 if (entry) {
759 memcpy_from_metadata(opt + 1, mask, &entry->loc);
760 } else {
761 memset(opt + 1, 0, opt->length * 4);
762 }
763
764 opt->opt_class = htons(0xffff);
765 opt->type = 0xff;
766 opt->length = 0x1f;
767 opt->r1 = 0;
768 opt->r2 = 0;
769 opt->r3 = 0;
770
771 opt = opt + len / sizeof(*opt);
772 opts_len -= len;
773 }
774 }
775
776 static void
777 tun_metadata_to_geneve_nlattr_mask(const struct ofpbuf *key,
778 const struct flow_tnl *mask,
779 const struct flow_tnl *flow,
780 struct ofpbuf *b)
781 {
782 const struct nlattr *tnl_key, *geneve_key;
783 struct nlattr *geneve_mask;
784 struct geneve_opt *opt;
785 int opts_len;
786
787 if (!key) {
788 return;
789 }
790
791 tnl_key = nl_attr_find__(key->data, key->size, OVS_KEY_ATTR_TUNNEL);
792 if (!tnl_key) {
793 return;
794 }
795
796 geneve_key = nl_attr_find_nested(tnl_key, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
797 if (!geneve_key) {
798 return;
799 }
800
801 geneve_mask = ofpbuf_tail(b);
802 nl_msg_put(b, geneve_key, geneve_key->nla_len);
803
804 opt = CONST_CAST(struct geneve_opt *, nl_attr_get(geneve_mask));
805 opts_len = nl_attr_get_size(geneve_mask);
806
807 tun_metadata_to_geneve_mask__(&flow->metadata, &mask->metadata,
808 opt, opts_len);
809 }
810
811 /* Convert from the tunnel metadata in 'tun' to netlink attributes stored
812 * in 'b'. Either UDPIF or non-UDPIF input forms are accepted.
813 *
814 * To assist with parsing, it is necessary to also pass in the tunnel metadata
815 * from the flow in 'flow' as well in the original netlink form of the flow in
816 * 'key'. */
817 void
818 tun_metadata_to_geneve_nlattr(const struct flow_tnl *tun,
819 const struct flow_tnl *flow,
820 const struct ofpbuf *key,
821 struct ofpbuf *b)
822 {
823 bool is_mask = tun != flow;
824
825 if (!(flow->flags & FLOW_TNL_F_UDPIF)) {
826 if (!is_mask) {
827 tun_metadata_to_geneve_nlattr_flow(tun, b);
828 } else {
829 tun_metadata_to_geneve_nlattr_mask(key, tun, flow, b);
830 }
831 } else if (flow->metadata.present.len || is_mask) {
832 nl_msg_put_unspec(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
833 tun->metadata.opts.gnv,
834 flow->metadata.present.len);
835 }
836 }
837
838 /* Converts 'mask_src' (in non-UDPIF format) to a series of masked options in
839 * 'dst'. 'flow_src' (also in non-UDPIF format) and the original set of
840 * options 'flow_src_opt'/'opts_len' are needed as a guide to interpret the
841 * mask data. */
842 void
843 tun_metadata_to_geneve_udpif_mask(const struct flow_tnl *flow_src,
844 const struct flow_tnl *mask_src,
845 const struct geneve_opt *flow_src_opt,
846 int opts_len, struct geneve_opt *dst)
847 {
848 memcpy(dst, flow_src_opt, opts_len);
849 tun_metadata_to_geneve_mask__(&flow_src->metadata,
850 &mask_src->metadata, dst, opts_len);
851 }
852
853 static const struct tun_metadata_loc *
854 metadata_loc_from_match_read(const struct tun_table *map,
855 const struct match *match, unsigned int idx,
856 const struct flow_tnl *mask, bool *is_masked)
857 {
858 union mf_value mask_opts;
859
860 if (match->tun_md.valid) {
861 *is_masked = match->tun_md.entry[idx].masked;
862 return &match->tun_md.entry[idx].loc;
863 }
864
865 memcpy_from_metadata(mask_opts.tun_metadata, &mask->metadata,
866 &map->entries[idx].loc);
867
868 *is_masked = map->entries[idx].loc.len == 0 ||
869 !is_all_ones(mask_opts.tun_metadata,
870 map->entries[idx].loc.len);
871 return &map->entries[idx].loc;
872 }
873
874 /* Generates NXM formatted matches in 'b' based on the contents of 'match'.
875 * 'match' must be in non-udpif format. */
876 void
877 tun_metadata_to_nx_match(struct ofpbuf *b, enum ofp_version oxm,
878 const struct match *match)
879 {
880 int i;
881
882 ULLONG_FOR_EACH_1 (i, match->wc.masks.tunnel.metadata.present.map) {
883 const struct tun_metadata_loc *loc;
884 bool is_masked;
885 union mf_value opts;
886 union mf_value mask_opts;
887
888 loc = metadata_loc_from_match_read(match->flow.tunnel.metadata.tab,
889 match, i, &match->wc.masks.tunnel,
890 &is_masked);
891 memcpy_from_metadata(opts.tun_metadata, &match->flow.tunnel.metadata,
892 loc);
893 memcpy_from_metadata(mask_opts.tun_metadata,
894 &match->wc.masks.tunnel.metadata, loc);
895 nxm_put_entry_raw(b, MFF_TUN_METADATA0 + i, oxm, opts.tun_metadata,
896 is_masked ? mask_opts.tun_metadata : NULL, loc->len);
897 }
898 }
899
900 /* Formatted matches in 's' based on the contents of 'match'. 'match' must be
901 * in non-udpif format. */
902 void
903 tun_metadata_match_format(struct ds *s, const struct match *match)
904 {
905 int i;
906
907 if (match->flow.tunnel.flags & FLOW_TNL_F_UDPIF ||
908 (!match->flow.tunnel.metadata.tab && !match->tun_md.valid)) {
909 return;
910 }
911
912 ULLONG_FOR_EACH_1 (i, match->wc.masks.tunnel.metadata.present.map) {
913 const struct tun_metadata_loc *loc;
914 bool is_masked;
915 union mf_value opts, mask_opts;
916
917 loc = metadata_loc_from_match_read(match->flow.tunnel.metadata.tab,
918 match, i, &match->wc.masks.tunnel,
919 &is_masked);
920
921 ds_put_format(s, "tun_metadata%u", i);
922 memcpy_from_metadata(mask_opts.tun_metadata,
923 &match->wc.masks.tunnel.metadata, loc);
924
925 if (!ULLONG_GET(match->flow.tunnel.metadata.present.map, i)) {
926 /* Indicate that we are matching on the field being not present. */
927 ds_put_cstr(s, "=NP");
928 } else if (!(is_masked &&
929 is_all_zeros(mask_opts.tun_metadata, loc->len))) {
930 ds_put_char(s, '=');
931
932 memcpy_from_metadata(opts.tun_metadata,
933 &match->flow.tunnel.metadata, loc);
934 ds_put_hex(s, opts.tun_metadata, loc->len);
935
936 if (!is_all_ones(mask_opts.tun_metadata, loc->len)) {
937 ds_put_char(s, '/');
938 ds_put_hex(s, mask_opts.tun_metadata, loc->len);
939 }
940 }
941 ds_put_char(s, ',');
942 }
943 }
944
945 struct tun_metadata_allocation *
946 tun_metadata_allocation_clone(const struct tun_metadata_allocation *src)
947 {
948 return src && src->valid ? xmemdup(src, sizeof *src) : NULL;
949 }
950
951 void
952 tun_metadata_allocation_copy(struct tun_metadata_allocation *dst,
953 const struct tun_metadata_allocation *src)
954 {
955 if (src && src->valid) {
956 *dst = *src;
957 } else {
958 memset(dst, 0, sizeof *dst);
959 }
960 }