]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-ipfix.c
48ff827785b49bd8834c8eed9a55085f63488f11
[mirror_ovs.git] / ofproto / ofproto-dpif-ipfix.c
1 /*
2 * Copyright (c) 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "ofproto-dpif-ipfix.h"
19 #include <sys/time.h>
20 #include "byte-order.h"
21 #include "collectors.h"
22 #include "flow.h"
23 #include "hash.h"
24 #include "hmap.h"
25 #include "list.h"
26 #include "ofpbuf.h"
27 #include "ofproto.h"
28 #include "ofproto-dpif.h"
29 #include "dp-packet.h"
30 #include "packets.h"
31 #include "poll-loop.h"
32 #include "sset.h"
33 #include "util.h"
34 #include "timeval.h"
35 #include "util.h"
36 #include "openvswitch/vlog.h"
37
38 VLOG_DEFINE_THIS_MODULE(ipfix);
39
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
41 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
42
43 /* Cf. IETF RFC 5101 Section 10.3.4. */
44 #define IPFIX_DEFAULT_COLLECTOR_PORT 4739
45
46 /* The standard layer2SegmentId (ID 351) element is included in vDS to send
47 * the VxLAN tunnel's VNI. It is 64-bit long, the most significant byte is
48 * used to indicate the type of tunnel (0x01 = VxLAN, 0x02 = GRE) and the three
49 * least significant bytes hold the value of the layer 2 overlay network
50 * segment identifier: a 24-bit VxLAN tunnel's VNI or a 24-bit GRE tunnel's
51 * TNI. This is not compatible with STT, as implemented in OVS, as
52 * its tunnel IDs is 64-bit.
53 *
54 * Two new enterprise information elements are defined which are similar to
55 * laryerSegmentId but support 64-bit IDs:
56 * tunnelType (ID 891) and tunnelKey (ID 892).
57 *
58 * The enum dpif_ipfix_tunnel_type is to declare the types supported in the
59 * tunnelType element.
60 * The number of ipfix tunnel types includes two reserverd types: 0x04 and 0x06.
61 */
62 enum dpif_ipfix_tunnel_type {
63 DPIF_IPFIX_TUNNEL_UNKNOWN = 0x00,
64 DPIF_IPFIX_TUNNEL_VXLAN = 0x01,
65 DPIF_IPFIX_TUNNEL_GRE = 0x02,
66 DPIF_IPFIX_TUNNEL_LISP = 0x03,
67 DPIF_IPFIX_TUNNEL_STT = 0x04,
68 DPIF_IPFIX_TUNNEL_IPSEC_GRE = 0x05,
69 DPIF_IPFIX_TUNNEL_GENEVE = 0x07,
70 NUM_DPIF_IPFIX_TUNNEL
71 };
72
73 struct dpif_ipfix_port {
74 struct hmap_node hmap_node; /* In struct dpif_ipfix's "tunnel_ports" hmap. */
75 struct ofport *ofport; /* To retrieve port stats. */
76 odp_port_t odp_port;
77 enum dpif_ipfix_tunnel_type tunnel_type;
78 uint8_t tunnel_key_length;
79 };
80
81 struct dpif_ipfix_exporter {
82 struct collectors *collectors;
83 uint32_t seq_number;
84 time_t last_template_set_time;
85 struct hmap cache_flow_key_map; /* ipfix_flow_cache_entry. */
86 struct ovs_list cache_flow_start_timestamp_list; /* ipfix_flow_cache_entry. */
87 uint32_t cache_active_timeout; /* In seconds. */
88 uint32_t cache_max_flows;
89 };
90
91 struct dpif_ipfix_bridge_exporter {
92 struct dpif_ipfix_exporter exporter;
93 struct ofproto_ipfix_bridge_exporter_options *options;
94 uint32_t probability;
95 };
96
97 struct dpif_ipfix_flow_exporter {
98 struct dpif_ipfix_exporter exporter;
99 struct ofproto_ipfix_flow_exporter_options *options;
100 };
101
102 struct dpif_ipfix_flow_exporter_map_node {
103 struct hmap_node node;
104 struct dpif_ipfix_flow_exporter exporter;
105 };
106
107 struct dpif_ipfix {
108 struct dpif_ipfix_bridge_exporter bridge_exporter;
109 struct hmap flow_exporter_map; /* dpif_ipfix_flow_exporter_map_node. */
110 struct hmap tunnel_ports; /* Contains "struct dpif_ipfix_port"s.
111 * It makes tunnel port lookups faster in
112 * sampling upcalls. */
113 struct ovs_refcount ref_cnt;
114 };
115
116 #define IPFIX_VERSION 0x000a
117
118 /* When using UDP, IPFIX Template Records must be re-sent regularly.
119 * The standard default interval is 10 minutes (600 seconds).
120 * Cf. IETF RFC 5101 Section 10.3.6. */
121 #define IPFIX_TEMPLATE_INTERVAL 600
122
123 /* Cf. IETF RFC 5101 Section 3.1. */
124 OVS_PACKED(
125 struct ipfix_header {
126 ovs_be16 version; /* IPFIX_VERSION. */
127 ovs_be16 length; /* Length in bytes including this header. */
128 ovs_be32 export_time; /* Seconds since the epoch. */
129 ovs_be32 seq_number; /* Message sequence number. */
130 ovs_be32 obs_domain_id; /* Observation Domain ID. */
131 });
132 BUILD_ASSERT_DECL(sizeof(struct ipfix_header) == 16);
133
134 #define IPFIX_SET_ID_TEMPLATE 2
135 #define IPFIX_SET_ID_OPTION_TEMPLATE 3
136
137 /* Cf. IETF RFC 5101 Section 3.3.2. */
138 OVS_PACKED(
139 struct ipfix_set_header {
140 ovs_be16 set_id; /* IPFIX_SET_ID_* or valid template ID for Data Sets. */
141 ovs_be16 length; /* Length of the set in bytes including header. */
142 });
143 BUILD_ASSERT_DECL(sizeof(struct ipfix_set_header) == 4);
144
145 /* Alternatives for templates at each layer. A template is defined by
146 * a combination of one value for each layer. */
147 enum ipfix_proto_l2 {
148 IPFIX_PROTO_L2_ETH = 0, /* No VLAN. */
149 IPFIX_PROTO_L2_VLAN,
150 NUM_IPFIX_PROTO_L2
151 };
152 enum ipfix_proto_l3 {
153 IPFIX_PROTO_L3_UNKNOWN = 0,
154 IPFIX_PROTO_L3_IPV4,
155 IPFIX_PROTO_L3_IPV6,
156 NUM_IPFIX_PROTO_L3
157 };
158 enum ipfix_proto_l4 {
159 IPFIX_PROTO_L4_UNKNOWN = 0,
160 IPFIX_PROTO_L4_TCP_UDP_SCTP,
161 IPFIX_PROTO_L4_ICMP,
162 NUM_IPFIX_PROTO_L4
163 };
164 enum ipfix_proto_tunnel {
165 IPFIX_PROTO_NOT_TUNNELED = 0,
166 IPFIX_PROTO_TUNNELED, /* Support gre, lisp and vxlan. */
167 NUM_IPFIX_PROTO_TUNNEL
168 };
169
170 /* Any Template ID > 255 is usable for Template Records. */
171 #define IPFIX_TEMPLATE_ID_MIN 256
172
173 /* Cf. IETF RFC 5101 Section 3.4.1. */
174 OVS_PACKED(
175 struct ipfix_template_record_header {
176 ovs_be16 template_id;
177 ovs_be16 field_count;
178 });
179 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_record_header) == 4);
180
181 enum ipfix_entity_id {
182 /* standard IPFIX elements */
183 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ID_##ENUM = ID,
184 #include "ofproto/ipfix-entities.def"
185 /* non-standard IPFIX elements */
186 #define IPFIX_SET_ENTERPRISE(v) (((v) | 0x8000))
187 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
188 IPFIX_ENTITY_ID_##ENUM = IPFIX_SET_ENTERPRISE(ID),
189 #include "ofproto/ipfix-enterprise-entities.def"
190 };
191
192 enum ipfix_entity_size {
193 /* standard IPFIX elements */
194 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_SIZE_##ENUM = SIZE,
195 #include "ofproto/ipfix-entities.def"
196 /* non-standard IPFIX elements */
197 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
198 IPFIX_ENTITY_SIZE_##ENUM = SIZE,
199 #include "ofproto/ipfix-enterprise-entities.def"
200 };
201
202 enum ipfix_entity_enterprise {
203 /* standard IPFIX elements */
204 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ENTERPRISE_##ENUM = 0,
205 #include "ofproto/ipfix-entities.def"
206 /* non-standard IPFIX elements */
207 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
208 IPFIX_ENTITY_ENTERPRISE_##ENUM = ENTERPRISE,
209 #include "ofproto/ipfix-enterprise-entities.def"
210 };
211
212 OVS_PACKED(
213 struct ipfix_template_field_specifier {
214 ovs_be16 element_id; /* IPFIX_ENTITY_ID_*. */
215 ovs_be16 field_length; /* Length of the field's value, in bytes.
216 * For Variable-Length element, it should be 65535.
217 */
218 ovs_be32 enterprise; /* Enterprise number */
219 });
220 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_field_specifier) == 8);
221
222 /* Cf. IETF RFC 5102 Section 5.11.6. */
223 enum ipfix_flow_direction {
224 INGRESS_FLOW = 0x00,
225 EGRESS_FLOW = 0x01
226 };
227
228 /* Part of data record flow key for common metadata and Ethernet entities. */
229 OVS_PACKED(
230 struct ipfix_data_record_flow_key_common {
231 ovs_be32 observation_point_id; /* OBSERVATION_POINT_ID */
232 uint8_t flow_direction; /* FLOW_DIRECTION */
233 struct eth_addr source_mac_address; /* SOURCE_MAC_ADDRESS */
234 struct eth_addr destination_mac_address; /* DESTINATION_MAC_ADDRESS */
235 ovs_be16 ethernet_type; /* ETHERNET_TYPE */
236 uint8_t ethernet_header_length; /* ETHERNET_HEADER_LENGTH */
237 });
238 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_common) == 20);
239
240 /* Part of data record flow key for VLAN entities. */
241 OVS_PACKED(
242 struct ipfix_data_record_flow_key_vlan {
243 ovs_be16 vlan_id; /* VLAN_ID */
244 ovs_be16 dot1q_vlan_id; /* DOT1Q_VLAN_ID */
245 uint8_t dot1q_priority; /* DOT1Q_PRIORITY */
246 });
247 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_vlan) == 5);
248
249 /* Part of data record flow key for IP entities. */
250 /* XXX: Replace IP_TTL with MINIMUM_TTL and MAXIMUM_TTL? */
251 OVS_PACKED(
252 struct ipfix_data_record_flow_key_ip {
253 uint8_t ip_version; /* IP_VERSION */
254 uint8_t ip_ttl; /* IP_TTL */
255 uint8_t protocol_identifier; /* PROTOCOL_IDENTIFIER */
256 uint8_t ip_diff_serv_code_point; /* IP_DIFF_SERV_CODE_POINT */
257 uint8_t ip_precedence; /* IP_PRECEDENCE */
258 uint8_t ip_class_of_service; /* IP_CLASS_OF_SERVICE */
259 });
260 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ip) == 6);
261
262 /* Part of data record flow key for IPv4 entities. */
263 OVS_PACKED(
264 struct ipfix_data_record_flow_key_ipv4 {
265 ovs_be32 source_ipv4_address; /* SOURCE_IPV4_ADDRESS */
266 ovs_be32 destination_ipv4_address; /* DESTINATION_IPV4_ADDRESS */
267 });
268 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv4) == 8);
269
270 /* Part of data record flow key for IPv6 entities. */
271 OVS_PACKED(
272 struct ipfix_data_record_flow_key_ipv6 {
273 uint8_t source_ipv6_address[16]; /* SOURCE_IPV6_ADDRESS */
274 uint8_t destination_ipv6_address[16]; /* DESTINATION_IPV6_ADDRESS */
275 ovs_be32 flow_label_ipv6; /* FLOW_LABEL_IPV6 */
276 });
277 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv6) == 36);
278
279 /* Part of data record flow key for TCP/UDP/SCTP entities. */
280 OVS_PACKED(
281 struct ipfix_data_record_flow_key_transport {
282 ovs_be16 source_transport_port; /* SOURCE_TRANSPORT_PORT */
283 ovs_be16 destination_transport_port; /* DESTINATION_TRANSPORT_PORT */
284 });
285 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_transport) == 4);
286
287 /* Part of data record flow key for ICMP entities. */
288 OVS_PACKED(
289 struct ipfix_data_record_flow_key_icmp {
290 uint8_t icmp_type; /* ICMP_TYPE_IPV4 / ICMP_TYPE_IPV6 */
291 uint8_t icmp_code; /* ICMP_CODE_IPV4 / ICMP_CODE_IPV6 */
292 });
293 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_icmp) == 2);
294
295 /* For the tunnel type that is on the top of IPSec, the protocol identifier
296 * of the upper tunnel type is used.
297 */
298 static uint8_t tunnel_protocol[NUM_DPIF_IPFIX_TUNNEL] = {
299 0, /* reserved */
300 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_VXLAN */
301 IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_GRE */
302 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_LISP*/
303 IPPROTO_TCP, /* DPIF_IPFIX_TUNNEL_STT*/
304 IPPROTO_GRE, /* DPIF_IPFIX_TUNNEL_IPSEC_GRE */
305 0 , /* reserved */
306 IPPROTO_UDP, /* DPIF_IPFIX_TUNNEL_GENEVE*/
307 };
308
309 OVS_PACKED(
310 struct ipfix_data_record_flow_key_tunnel {
311 ovs_be32 tunnel_source_ipv4_address; /* TUNNEL_SOURCE_IPV4_ADDRESS */
312 ovs_be32 tunnel_destination_ipv4_address; /* TUNNEL_DESTINATION_IPV4_ADDRESS */
313 uint8_t tunnel_protocol_identifier; /* TUNNEL_PROTOCOL_IDENTIFIER */
314 ovs_be16 tunnel_source_transport_port; /* TUNNEL_SOURCE_TRANSPORT_PORT */
315 ovs_be16 tunnel_destination_transport_port; /* TUNNEL_DESTINATION_TRANSPORT_PORT */
316 uint8_t tunnel_type; /* TUNNEL_TYPE */
317 uint8_t tunnel_key_length; /* length of TUNNEL_KEY */
318 uint8_t tunnel_key[]; /* data of TUNNEL_KEY */
319 });
320 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_tunnel) == 15);
321
322 /* Cf. IETF RFC 5102 Section 5.11.3. */
323 enum ipfix_flow_end_reason {
324 IDLE_TIMEOUT = 0x01,
325 ACTIVE_TIMEOUT = 0x02,
326 END_OF_FLOW_DETECTED = 0x03,
327 FORCED_END = 0x04,
328 LACK_OF_RESOURCES = 0x05
329 };
330
331 /* Part of data record for common aggregated elements. */
332 OVS_PACKED(
333 struct ipfix_data_record_aggregated_common {
334 ovs_be32 flow_start_delta_microseconds; /* FLOW_START_DELTA_MICROSECONDS */
335 ovs_be32 flow_end_delta_microseconds; /* FLOW_END_DELTA_MICROSECONDS */
336 ovs_be64 packet_delta_count; /* PACKET_DELTA_COUNT */
337 ovs_be64 layer2_octet_delta_count; /* LAYER2_OCTET_DELTA_COUNT */
338 uint8_t flow_end_reason; /* FLOW_END_REASON */
339 });
340 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_common) == 25);
341
342 /* Part of data record for IP aggregated elements. */
343 OVS_PACKED(
344 struct ipfix_data_record_aggregated_ip {
345 ovs_be64 octet_delta_count; /* OCTET_DELTA_COUNT */
346 ovs_be64 octet_delta_sum_of_squares; /* OCTET_DELTA_SUM_OF_SQUARES */
347 ovs_be64 minimum_ip_total_length; /* MINIMUM_IP_TOTAL_LENGTH */
348 ovs_be64 maximum_ip_total_length; /* MAXIMUM_IP_TOTAL_LENGTH */
349 });
350 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_ip) == 32);
351
352 /*
353 * support tunnel key for:
354 * VxLAN: 24-bit VIN,
355 * GRE: 32-bit key,
356 * LISP: 24-bit instance ID
357 * STT: 64-bit key
358 */
359 #define MAX_TUNNEL_KEY_LEN 8
360
361 #define MAX_FLOW_KEY_LEN \
362 (sizeof(struct ipfix_data_record_flow_key_common) \
363 + sizeof(struct ipfix_data_record_flow_key_vlan) \
364 + sizeof(struct ipfix_data_record_flow_key_ip) \
365 + MAX(sizeof(struct ipfix_data_record_flow_key_ipv4), \
366 sizeof(struct ipfix_data_record_flow_key_ipv6)) \
367 + MAX(sizeof(struct ipfix_data_record_flow_key_icmp), \
368 sizeof(struct ipfix_data_record_flow_key_transport)) \
369 + sizeof(struct ipfix_data_record_flow_key_tunnel) \
370 + MAX_TUNNEL_KEY_LEN)
371
372 #define MAX_DATA_RECORD_LEN \
373 (MAX_FLOW_KEY_LEN \
374 + sizeof(struct ipfix_data_record_aggregated_common) \
375 + sizeof(struct ipfix_data_record_aggregated_ip))
376
377 /* Max length of a data set. To simplify the implementation, each
378 * data record is sent in a separate data set, so each data set
379 * contains at most one data record. */
380 #define MAX_DATA_SET_LEN \
381 (sizeof(struct ipfix_set_header) \
382 + MAX_DATA_RECORD_LEN)
383
384 /* Max length of an IPFIX message. Arbitrarily set to accommodate low
385 * MTU. */
386 #define MAX_MESSAGE_LEN 1024
387
388 /* Cache structures. */
389
390 /* Flow key. */
391 struct ipfix_flow_key {
392 uint32_t obs_domain_id;
393 uint16_t template_id;
394 size_t flow_key_msg_part_size;
395 uint64_t flow_key_msg_part[DIV_ROUND_UP(MAX_FLOW_KEY_LEN, 8)];
396 };
397
398 /* Flow cache entry. */
399 struct ipfix_flow_cache_entry {
400 struct hmap_node flow_key_map_node;
401 struct ovs_list cache_flow_start_timestamp_list_node;
402 struct ipfix_flow_key flow_key;
403 /* Common aggregated elements. */
404 uint64_t flow_start_timestamp_usec;
405 uint64_t flow_end_timestamp_usec;
406 uint64_t packet_delta_count;
407 uint64_t layer2_octet_delta_count;
408 uint64_t octet_delta_count;
409 uint64_t octet_delta_sum_of_squares; /* 0 if not IP. */
410 uint16_t minimum_ip_total_length; /* 0 if not IP. */
411 uint16_t maximum_ip_total_length; /* 0 if not IP. */
412 };
413
414 static void dpif_ipfix_cache_expire(struct dpif_ipfix_exporter *, bool,
415 const uint64_t, const uint32_t);
416
417 static void get_export_time_now(uint64_t *, uint32_t *);
418
419 static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *, bool);
420
421 static bool
422 ofproto_ipfix_bridge_exporter_options_equal(
423 const struct ofproto_ipfix_bridge_exporter_options *a,
424 const struct ofproto_ipfix_bridge_exporter_options *b)
425 {
426 return (a->obs_domain_id == b->obs_domain_id
427 && a->obs_point_id == b->obs_point_id
428 && a->sampling_rate == b->sampling_rate
429 && a->cache_active_timeout == b->cache_active_timeout
430 && a->cache_max_flows == b->cache_max_flows
431 && a->enable_tunnel_sampling == b->enable_tunnel_sampling
432 && a->enable_input_sampling == b->enable_input_sampling
433 && a->enable_output_sampling == b->enable_output_sampling
434 && sset_equals(&a->targets, &b->targets));
435 }
436
437 static struct ofproto_ipfix_bridge_exporter_options *
438 ofproto_ipfix_bridge_exporter_options_clone(
439 const struct ofproto_ipfix_bridge_exporter_options *old)
440 {
441 struct ofproto_ipfix_bridge_exporter_options *new =
442 xmemdup(old, sizeof *old);
443 sset_clone(&new->targets, &old->targets);
444 return new;
445 }
446
447 static void
448 ofproto_ipfix_bridge_exporter_options_destroy(
449 struct ofproto_ipfix_bridge_exporter_options *options)
450 {
451 if (options) {
452 sset_destroy(&options->targets);
453 free(options);
454 }
455 }
456
457 static bool
458 ofproto_ipfix_flow_exporter_options_equal(
459 const struct ofproto_ipfix_flow_exporter_options *a,
460 const struct ofproto_ipfix_flow_exporter_options *b)
461 {
462 return (a->collector_set_id == b->collector_set_id
463 && a->cache_active_timeout == b->cache_active_timeout
464 && a->cache_max_flows == b->cache_max_flows
465 && sset_equals(&a->targets, &b->targets));
466 }
467
468 static struct ofproto_ipfix_flow_exporter_options *
469 ofproto_ipfix_flow_exporter_options_clone(
470 const struct ofproto_ipfix_flow_exporter_options *old)
471 {
472 struct ofproto_ipfix_flow_exporter_options *new =
473 xmemdup(old, sizeof *old);
474 sset_clone(&new->targets, &old->targets);
475 return new;
476 }
477
478 static void
479 ofproto_ipfix_flow_exporter_options_destroy(
480 struct ofproto_ipfix_flow_exporter_options *options)
481 {
482 if (options) {
483 sset_destroy(&options->targets);
484 free(options);
485 }
486 }
487
488 static void
489 dpif_ipfix_exporter_init(struct dpif_ipfix_exporter *exporter)
490 {
491 exporter->collectors = NULL;
492 exporter->seq_number = 1;
493 exporter->last_template_set_time = TIME_MIN;
494 hmap_init(&exporter->cache_flow_key_map);
495 list_init(&exporter->cache_flow_start_timestamp_list);
496 exporter->cache_active_timeout = 0;
497 exporter->cache_max_flows = 0;
498 }
499
500 static void
501 dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter *exporter)
502 {
503 /* Flush the cache with flow end reason "forced end." */
504 dpif_ipfix_cache_expire_now(exporter, true);
505
506 collectors_destroy(exporter->collectors);
507 exporter->collectors = NULL;
508 exporter->seq_number = 1;
509 exporter->last_template_set_time = TIME_MIN;
510 exporter->cache_active_timeout = 0;
511 exporter->cache_max_flows = 0;
512 }
513
514 static void
515 dpif_ipfix_exporter_destroy(struct dpif_ipfix_exporter *exporter)
516 {
517 dpif_ipfix_exporter_clear(exporter);
518 hmap_destroy(&exporter->cache_flow_key_map);
519 }
520
521 static bool
522 dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter *exporter,
523 const struct sset *targets,
524 const uint32_t cache_active_timeout,
525 const uint32_t cache_max_flows)
526 {
527 collectors_destroy(exporter->collectors);
528 collectors_create(targets, IPFIX_DEFAULT_COLLECTOR_PORT,
529 &exporter->collectors);
530 if (exporter->collectors == NULL) {
531 VLOG_WARN_RL(&rl, "no collectors could be initialized, "
532 "IPFIX exporter disabled");
533 dpif_ipfix_exporter_clear(exporter);
534 return false;
535 }
536 exporter->cache_active_timeout = cache_active_timeout;
537 exporter->cache_max_flows = cache_max_flows;
538 return true;
539 }
540
541 static struct dpif_ipfix_port *
542 dpif_ipfix_find_port(const struct dpif_ipfix *di,
543 odp_port_t odp_port) OVS_REQUIRES(mutex)
544 {
545 struct dpif_ipfix_port *dip;
546
547 HMAP_FOR_EACH_IN_BUCKET (dip, hmap_node, hash_odp_port(odp_port),
548 &di->tunnel_ports) {
549 if (dip->odp_port == odp_port) {
550 return dip;
551 }
552 }
553 return NULL;
554 }
555
556 static void
557 dpif_ipfix_del_port(struct dpif_ipfix *di,
558 struct dpif_ipfix_port *dip)
559 OVS_REQUIRES(mutex)
560 {
561 hmap_remove(&di->tunnel_ports, &dip->hmap_node);
562 free(dip);
563 }
564
565 void
566 dpif_ipfix_add_tunnel_port(struct dpif_ipfix *di, struct ofport *ofport,
567 odp_port_t odp_port) OVS_EXCLUDED(mutex)
568 {
569 struct dpif_ipfix_port *dip;
570 const char *type;
571
572 ovs_mutex_lock(&mutex);
573 dip = dpif_ipfix_find_port(di, odp_port);
574 if (dip) {
575 dpif_ipfix_del_port(di, dip);
576 }
577
578 type = netdev_get_type(ofport->netdev);
579 if (type == NULL) {
580 goto out;
581 }
582
583 /* Add to table of tunnel ports. */
584 dip = xmalloc(sizeof *dip);
585 dip->ofport = ofport;
586 dip->odp_port = odp_port;
587 if (strcmp(type, "gre") == 0) {
588 /* 32-bit key gre */
589 dip->tunnel_type = DPIF_IPFIX_TUNNEL_GRE;
590 dip->tunnel_key_length = 4;
591 } else if (strcmp(type, "ipsec_gre") == 0) {
592 /* 32-bit key ipsec_gre */
593 dip->tunnel_type = DPIF_IPFIX_TUNNEL_IPSEC_GRE;
594 dip->tunnel_key_length = 4;
595 } else if (strcmp(type, "vxlan") == 0) {
596 dip->tunnel_type = DPIF_IPFIX_TUNNEL_VXLAN;
597 dip->tunnel_key_length = 3;
598 } else if (strcmp(type, "lisp") == 0) {
599 dip->tunnel_type = DPIF_IPFIX_TUNNEL_LISP;
600 dip->tunnel_key_length = 3;
601 } else if (strcmp(type, "geneve") == 0) {
602 dip->tunnel_type = DPIF_IPFIX_TUNNEL_GENEVE;
603 dip->tunnel_key_length = 3;
604 } else if (strcmp(type, "stt") == 0) {
605 dip->tunnel_type = DPIF_IPFIX_TUNNEL_STT;
606 dip->tunnel_key_length = 8;
607 } else {
608 free(dip);
609 goto out;
610 }
611 hmap_insert(&di->tunnel_ports, &dip->hmap_node, hash_odp_port(odp_port));
612
613 out:
614 ovs_mutex_unlock(&mutex);
615 }
616
617 void
618 dpif_ipfix_del_tunnel_port(struct dpif_ipfix *di, odp_port_t odp_port)
619 OVS_EXCLUDED(mutex)
620 {
621 struct dpif_ipfix_port *dip;
622 ovs_mutex_lock(&mutex);
623 dip = dpif_ipfix_find_port(di, odp_port);
624 if (dip) {
625 dpif_ipfix_del_port(di, dip);
626 }
627 ovs_mutex_unlock(&mutex);
628 }
629
630 bool
631 dpif_ipfix_get_tunnel_port(const struct dpif_ipfix *di, odp_port_t odp_port)
632 OVS_EXCLUDED(mutex)
633 {
634 struct dpif_ipfix_port *dip;
635 ovs_mutex_lock(&mutex);
636 dip = dpif_ipfix_find_port(di, odp_port);
637 ovs_mutex_unlock(&mutex);
638 return dip != NULL;
639 }
640
641 static void
642 dpif_ipfix_bridge_exporter_init(struct dpif_ipfix_bridge_exporter *exporter)
643 {
644 dpif_ipfix_exporter_init(&exporter->exporter);
645 exporter->options = NULL;
646 exporter->probability = 0;
647 }
648
649 static void
650 dpif_ipfix_bridge_exporter_clear(struct dpif_ipfix_bridge_exporter *exporter)
651 {
652 dpif_ipfix_exporter_clear(&exporter->exporter);
653 ofproto_ipfix_bridge_exporter_options_destroy(exporter->options);
654 exporter->options = NULL;
655 exporter->probability = 0;
656 }
657
658 static void
659 dpif_ipfix_bridge_exporter_destroy(struct dpif_ipfix_bridge_exporter *exporter)
660 {
661 dpif_ipfix_bridge_exporter_clear(exporter);
662 dpif_ipfix_exporter_destroy(&exporter->exporter);
663 }
664
665 static void
666 dpif_ipfix_bridge_exporter_set_options(
667 struct dpif_ipfix_bridge_exporter *exporter,
668 const struct ofproto_ipfix_bridge_exporter_options *options)
669 {
670 bool options_changed;
671
672 if (!options || sset_is_empty(&options->targets)) {
673 /* No point in doing any work if there are no targets. */
674 dpif_ipfix_bridge_exporter_clear(exporter);
675 return;
676 }
677
678 options_changed = (
679 !exporter->options
680 || !ofproto_ipfix_bridge_exporter_options_equal(
681 options, exporter->options));
682
683 /* Configure collectors if options have changed or if we're
684 * shortchanged in collectors (which indicates that opening one or
685 * more of the configured collectors failed, so that we should
686 * retry). */
687 if (options_changed
688 || collectors_count(exporter->exporter.collectors)
689 < sset_count(&options->targets)) {
690 if (!dpif_ipfix_exporter_set_options(
691 &exporter->exporter, &options->targets,
692 options->cache_active_timeout, options->cache_max_flows)) {
693 return;
694 }
695 }
696
697 /* Avoid reconfiguring if options didn't change. */
698 if (!options_changed) {
699 return;
700 }
701
702 ofproto_ipfix_bridge_exporter_options_destroy(exporter->options);
703 exporter->options = ofproto_ipfix_bridge_exporter_options_clone(options);
704 exporter->probability =
705 MAX(1, UINT32_MAX / exporter->options->sampling_rate);
706
707 /* Run over the cache as some entries might have expired after
708 * changing the timeouts. */
709 dpif_ipfix_cache_expire_now(&exporter->exporter, false);
710 }
711
712 static struct dpif_ipfix_flow_exporter_map_node*
713 dpif_ipfix_find_flow_exporter_map_node(
714 const struct dpif_ipfix *di, const uint32_t collector_set_id)
715 OVS_REQUIRES(mutex)
716 {
717 struct dpif_ipfix_flow_exporter_map_node *exporter_node;
718
719 HMAP_FOR_EACH_WITH_HASH (exporter_node, node,
720 hash_int(collector_set_id, 0),
721 &di->flow_exporter_map) {
722 if (exporter_node->exporter.options->collector_set_id
723 == collector_set_id) {
724 return exporter_node;
725 }
726 }
727
728 return NULL;
729 }
730
731 static void
732 dpif_ipfix_flow_exporter_init(struct dpif_ipfix_flow_exporter *exporter)
733 {
734 dpif_ipfix_exporter_init(&exporter->exporter);
735 exporter->options = NULL;
736 }
737
738 static void
739 dpif_ipfix_flow_exporter_clear(struct dpif_ipfix_flow_exporter *exporter)
740 {
741 dpif_ipfix_exporter_clear(&exporter->exporter);
742 ofproto_ipfix_flow_exporter_options_destroy(exporter->options);
743 exporter->options = NULL;
744 }
745
746 static void
747 dpif_ipfix_flow_exporter_destroy(struct dpif_ipfix_flow_exporter *exporter)
748 {
749 dpif_ipfix_flow_exporter_clear(exporter);
750 dpif_ipfix_exporter_destroy(&exporter->exporter);
751 }
752
753 static bool
754 dpif_ipfix_flow_exporter_set_options(
755 struct dpif_ipfix_flow_exporter *exporter,
756 const struct ofproto_ipfix_flow_exporter_options *options)
757 {
758 bool options_changed;
759
760 if (sset_is_empty(&options->targets)) {
761 /* No point in doing any work if there are no targets. */
762 dpif_ipfix_flow_exporter_clear(exporter);
763 return true;
764 }
765
766 options_changed = (
767 !exporter->options
768 || !ofproto_ipfix_flow_exporter_options_equal(
769 options, exporter->options));
770
771 /* Configure collectors if options have changed or if we're
772 * shortchanged in collectors (which indicates that opening one or
773 * more of the configured collectors failed, so that we should
774 * retry). */
775 if (options_changed
776 || collectors_count(exporter->exporter.collectors)
777 < sset_count(&options->targets)) {
778 if (!dpif_ipfix_exporter_set_options(
779 &exporter->exporter, &options->targets,
780 options->cache_active_timeout, options->cache_max_flows)) {
781 return false;
782 }
783 }
784
785 /* Avoid reconfiguring if options didn't change. */
786 if (!options_changed) {
787 return true;
788 }
789
790 ofproto_ipfix_flow_exporter_options_destroy(exporter->options);
791 exporter->options = ofproto_ipfix_flow_exporter_options_clone(options);
792
793 /* Run over the cache as some entries might have expired after
794 * changing the timeouts. */
795 dpif_ipfix_cache_expire_now(&exporter->exporter, false);
796
797 return true;
798 }
799
800 void
801 dpif_ipfix_set_options(
802 struct dpif_ipfix *di,
803 const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
804 const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
805 size_t n_flow_exporters_options) OVS_EXCLUDED(mutex)
806 {
807 int i;
808 struct ofproto_ipfix_flow_exporter_options *options;
809 struct dpif_ipfix_flow_exporter_map_node *node, *next;
810 size_t n_broken_flow_exporters_options = 0;
811
812 ovs_mutex_lock(&mutex);
813 dpif_ipfix_bridge_exporter_set_options(&di->bridge_exporter,
814 bridge_exporter_options);
815
816 /* Add new flow exporters and update current flow exporters. */
817 options = (struct ofproto_ipfix_flow_exporter_options *)
818 flow_exporters_options;
819 for (i = 0; i < n_flow_exporters_options; i++) {
820 node = dpif_ipfix_find_flow_exporter_map_node(
821 di, options->collector_set_id);
822 if (!node) {
823 node = xzalloc(sizeof *node);
824 dpif_ipfix_flow_exporter_init(&node->exporter);
825 hmap_insert(&di->flow_exporter_map, &node->node,
826 hash_int(options->collector_set_id, 0));
827 }
828 if (!dpif_ipfix_flow_exporter_set_options(&node->exporter, options)) {
829 n_broken_flow_exporters_options++;
830 }
831 options++;
832 }
833
834 ovs_assert(hmap_count(&di->flow_exporter_map) >=
835 (n_flow_exporters_options - n_broken_flow_exporters_options));
836
837 /* Remove dropped flow exporters, if any needs to be removed. */
838 if (hmap_count(&di->flow_exporter_map) > n_flow_exporters_options) {
839 HMAP_FOR_EACH_SAFE (node, next, node, &di->flow_exporter_map) {
840 /* This is slow but doesn't take any extra memory, and
841 * this table is not supposed to contain many rows anyway. */
842 options = (struct ofproto_ipfix_flow_exporter_options *)
843 flow_exporters_options;
844 for (i = 0; i < n_flow_exporters_options; i++) {
845 if (node->exporter.options->collector_set_id
846 == options->collector_set_id) {
847 break;
848 }
849 options++;
850 }
851 if (i == n_flow_exporters_options) { // Not found.
852 hmap_remove(&di->flow_exporter_map, &node->node);
853 dpif_ipfix_flow_exporter_destroy(&node->exporter);
854 free(node);
855 }
856 }
857 }
858
859 ovs_assert(hmap_count(&di->flow_exporter_map) ==
860 (n_flow_exporters_options - n_broken_flow_exporters_options));
861 ovs_mutex_unlock(&mutex);
862 }
863
864 struct dpif_ipfix *
865 dpif_ipfix_create(void)
866 {
867 struct dpif_ipfix *di;
868 di = xzalloc(sizeof *di);
869 dpif_ipfix_bridge_exporter_init(&di->bridge_exporter);
870 hmap_init(&di->flow_exporter_map);
871 hmap_init(&di->tunnel_ports);
872 ovs_refcount_init(&di->ref_cnt);
873 return di;
874 }
875
876 struct dpif_ipfix *
877 dpif_ipfix_ref(const struct dpif_ipfix *di_)
878 {
879 struct dpif_ipfix *di = CONST_CAST(struct dpif_ipfix *, di_);
880 if (di) {
881 ovs_refcount_ref(&di->ref_cnt);
882 }
883 return di;
884 }
885
886 uint32_t
887 dpif_ipfix_get_bridge_exporter_probability(const struct dpif_ipfix *di)
888 OVS_EXCLUDED(mutex)
889 {
890 uint32_t ret;
891 ovs_mutex_lock(&mutex);
892 ret = di->bridge_exporter.probability;
893 ovs_mutex_unlock(&mutex);
894 return ret;
895 }
896
897 bool
898 dpif_ipfix_get_bridge_exporter_input_sampling(const struct dpif_ipfix *di)
899 OVS_EXCLUDED(mutex)
900 {
901 bool ret = true;
902 ovs_mutex_lock(&mutex);
903 if (di->bridge_exporter.options) {
904 ret = di->bridge_exporter.options->enable_input_sampling;
905 }
906 ovs_mutex_unlock(&mutex);
907 return ret;
908 }
909
910 bool
911 dpif_ipfix_get_bridge_exporter_output_sampling(const struct dpif_ipfix *di)
912 OVS_EXCLUDED(mutex)
913 {
914 bool ret = true;
915 ovs_mutex_lock(&mutex);
916 if (di->bridge_exporter.options) {
917 ret = di->bridge_exporter.options->enable_output_sampling;
918 }
919 ovs_mutex_unlock(&mutex);
920 return ret;
921 }
922
923 bool
924 dpif_ipfix_get_bridge_exporter_tunnel_sampling(const struct dpif_ipfix *di)
925 OVS_EXCLUDED(mutex)
926 {
927 bool ret = false;
928 ovs_mutex_lock(&mutex);
929 if (di->bridge_exporter.options) {
930 ret = di->bridge_exporter.options->enable_tunnel_sampling;
931 }
932 ovs_mutex_unlock(&mutex);
933 return ret;
934 }
935
936 static void
937 dpif_ipfix_clear(struct dpif_ipfix *di) OVS_REQUIRES(mutex)
938 {
939 struct dpif_ipfix_flow_exporter_map_node *exp_node, *exp_next;
940 struct dpif_ipfix_port *dip, *next;
941
942 dpif_ipfix_bridge_exporter_clear(&di->bridge_exporter);
943
944 HMAP_FOR_EACH_SAFE (exp_node, exp_next, node, &di->flow_exporter_map) {
945 hmap_remove(&di->flow_exporter_map, &exp_node->node);
946 dpif_ipfix_flow_exporter_destroy(&exp_node->exporter);
947 free(exp_node);
948 }
949
950 HMAP_FOR_EACH_SAFE (dip, next, hmap_node, &di->tunnel_ports) {
951 dpif_ipfix_del_port(di, dip);
952 }
953 }
954
955 void
956 dpif_ipfix_unref(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
957 {
958 if (di && ovs_refcount_unref_relaxed(&di->ref_cnt) == 1) {
959 ovs_mutex_lock(&mutex);
960 dpif_ipfix_clear(di);
961 dpif_ipfix_bridge_exporter_destroy(&di->bridge_exporter);
962 hmap_destroy(&di->flow_exporter_map);
963 hmap_destroy(&di->tunnel_ports);
964 free(di);
965 ovs_mutex_unlock(&mutex);
966 }
967 }
968
969 static void
970 ipfix_init_header(uint32_t export_time_sec, uint32_t seq_number,
971 uint32_t obs_domain_id, struct dp_packet *msg)
972 {
973 struct ipfix_header *hdr;
974
975 hdr = dp_packet_put_zeros(msg, sizeof *hdr);
976 hdr->version = htons(IPFIX_VERSION);
977 hdr->length = htons(sizeof *hdr); /* Updated in ipfix_send_msg. */
978 hdr->export_time = htonl(export_time_sec);
979 hdr->seq_number = htonl(seq_number);
980 hdr->obs_domain_id = htonl(obs_domain_id);
981 }
982
983 static void
984 ipfix_send_msg(const struct collectors *collectors, struct dp_packet *msg)
985 {
986 struct ipfix_header *hdr;
987
988 /* Adjust the length in the header. */
989 hdr = dp_packet_data(msg);
990 hdr->length = htons(dp_packet_size(msg));
991
992 collectors_send(collectors, dp_packet_data(msg), dp_packet_size(msg));
993 dp_packet_set_size(msg, 0);
994 }
995
996 static uint16_t
997 ipfix_get_template_id(enum ipfix_proto_l2 l2, enum ipfix_proto_l3 l3,
998 enum ipfix_proto_l4 l4, enum ipfix_proto_tunnel tunnel)
999 {
1000 uint16_t template_id;
1001 template_id = l2;
1002 template_id = template_id * NUM_IPFIX_PROTO_L3 + l3;
1003 template_id = template_id * NUM_IPFIX_PROTO_L4 + l4;
1004 template_id = template_id * NUM_IPFIX_PROTO_TUNNEL + tunnel;
1005 return IPFIX_TEMPLATE_ID_MIN + template_id;
1006 }
1007
1008 static void
1009 ipfix_define_template_entity(enum ipfix_entity_id id,
1010 enum ipfix_entity_size size,
1011 enum ipfix_entity_enterprise enterprise,
1012 struct dp_packet *msg)
1013 {
1014 struct ipfix_template_field_specifier *field;
1015 size_t field_size;
1016
1017 if (enterprise) {
1018 field_size = sizeof *field;
1019 } else {
1020 /* No enterprise number */
1021 field_size = sizeof *field - sizeof(ovs_be32);
1022 }
1023 field = dp_packet_put_zeros(msg, field_size);
1024 field->element_id = htons(id);
1025 if (size) {
1026 field->field_length = htons(size);
1027 } else {
1028 /* RFC 5101, Section 7. Variable-Length Information Element */
1029 field->field_length = OVS_BE16_MAX;
1030 }
1031 if (enterprise) {
1032 field->enterprise = htonl(enterprise);
1033 }
1034
1035 }
1036
1037 static uint16_t
1038 ipfix_define_template_fields(enum ipfix_proto_l2 l2, enum ipfix_proto_l3 l3,
1039 enum ipfix_proto_l4 l4, enum ipfix_proto_tunnel tunnel,
1040 struct dp_packet *msg)
1041 {
1042 uint16_t count = 0;
1043
1044 #define DEF(ID) \
1045 { \
1046 ipfix_define_template_entity(IPFIX_ENTITY_ID_##ID, \
1047 IPFIX_ENTITY_SIZE_##ID, \
1048 IPFIX_ENTITY_ENTERPRISE_##ID, msg); \
1049 count++; \
1050 }
1051
1052 /* 1. Flow key. */
1053
1054 DEF(OBSERVATION_POINT_ID);
1055 DEF(FLOW_DIRECTION);
1056
1057 /* Common Ethernet entities. */
1058 DEF(SOURCE_MAC_ADDRESS);
1059 DEF(DESTINATION_MAC_ADDRESS);
1060 DEF(ETHERNET_TYPE);
1061 DEF(ETHERNET_HEADER_LENGTH);
1062
1063 if (l2 == IPFIX_PROTO_L2_VLAN) {
1064 DEF(VLAN_ID);
1065 DEF(DOT1Q_VLAN_ID);
1066 DEF(DOT1Q_PRIORITY);
1067 }
1068
1069 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1070 DEF(IP_VERSION);
1071 DEF(IP_TTL);
1072 DEF(PROTOCOL_IDENTIFIER);
1073 DEF(IP_DIFF_SERV_CODE_POINT);
1074 DEF(IP_PRECEDENCE);
1075 DEF(IP_CLASS_OF_SERVICE);
1076
1077 if (l3 == IPFIX_PROTO_L3_IPV4) {
1078 DEF(SOURCE_IPV4_ADDRESS);
1079 DEF(DESTINATION_IPV4_ADDRESS);
1080 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1081 DEF(SOURCE_TRANSPORT_PORT);
1082 DEF(DESTINATION_TRANSPORT_PORT);
1083 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1084 DEF(ICMP_TYPE_IPV4);
1085 DEF(ICMP_CODE_IPV4);
1086 }
1087 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1088 DEF(SOURCE_IPV6_ADDRESS);
1089 DEF(DESTINATION_IPV6_ADDRESS);
1090 DEF(FLOW_LABEL_IPV6);
1091 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1092 DEF(SOURCE_TRANSPORT_PORT);
1093 DEF(DESTINATION_TRANSPORT_PORT);
1094 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1095 DEF(ICMP_TYPE_IPV6);
1096 DEF(ICMP_CODE_IPV6);
1097 }
1098 }
1099 }
1100
1101 if (tunnel != IPFIX_PROTO_NOT_TUNNELED) {
1102 DEF(TUNNEL_SOURCE_IPV4_ADDRESS);
1103 DEF(TUNNEL_DESTINATION_IPV4_ADDRESS);
1104 DEF(TUNNEL_PROTOCOL_IDENTIFIER);
1105 DEF(TUNNEL_SOURCE_TRANSPORT_PORT);
1106 DEF(TUNNEL_DESTINATION_TRANSPORT_PORT);
1107 DEF(TUNNEL_TYPE);
1108 DEF(TUNNEL_KEY);
1109 }
1110
1111 /* 2. Flow aggregated data. */
1112
1113 DEF(FLOW_START_DELTA_MICROSECONDS);
1114 DEF(FLOW_END_DELTA_MICROSECONDS);
1115 DEF(PACKET_DELTA_COUNT);
1116 DEF(LAYER2_OCTET_DELTA_COUNT);
1117 DEF(FLOW_END_REASON);
1118
1119 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1120 DEF(OCTET_DELTA_COUNT);
1121 DEF(OCTET_DELTA_SUM_OF_SQUARES);
1122 DEF(MINIMUM_IP_TOTAL_LENGTH);
1123 DEF(MAXIMUM_IP_TOTAL_LENGTH);
1124 }
1125
1126
1127 #undef DEF
1128
1129 return count;
1130 }
1131
1132 static void
1133 ipfix_init_template_msg(void *msg_stub, uint32_t export_time_sec,
1134 uint32_t seq_number, uint32_t obs_domain_id,
1135 struct dp_packet *msg, size_t *set_hdr_offset)
1136 {
1137 struct ipfix_set_header *set_hdr;
1138
1139 dp_packet_use_stub(msg, msg_stub, sizeof msg_stub);
1140
1141 ipfix_init_header(export_time_sec, seq_number, obs_domain_id, msg);
1142 *set_hdr_offset = dp_packet_size(msg);
1143
1144 /* Add a Template Set. */
1145 set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
1146 set_hdr->set_id = htons(IPFIX_SET_ID_TEMPLATE);
1147 }
1148
1149 static void
1150 ipfix_send_template_msg(const struct collectors *collectors,
1151 struct dp_packet *msg, size_t set_hdr_offset)
1152 {
1153 struct ipfix_set_header *set_hdr;
1154
1155 /* Send template message. */
1156 set_hdr = (struct ipfix_set_header*)
1157 ((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
1158 set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
1159
1160 ipfix_send_msg(collectors, msg);
1161
1162 dp_packet_uninit(msg);
1163 }
1164
1165 static void
1166 ipfix_send_template_msgs(struct dpif_ipfix_exporter *exporter,
1167 uint32_t export_time_sec, uint32_t obs_domain_id)
1168 {
1169 uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
1170 struct dp_packet msg;
1171 size_t set_hdr_offset, tmpl_hdr_offset;
1172 struct ipfix_template_record_header *tmpl_hdr;
1173 uint16_t field_count;
1174 enum ipfix_proto_l2 l2;
1175 enum ipfix_proto_l3 l3;
1176 enum ipfix_proto_l4 l4;
1177 enum ipfix_proto_tunnel tunnel;
1178
1179 ipfix_init_template_msg(msg_stub, export_time_sec, exporter->seq_number,
1180 obs_domain_id, &msg, &set_hdr_offset);
1181 /* Define one template for each possible combination of
1182 * protocols. */
1183 for (l2 = 0; l2 < NUM_IPFIX_PROTO_L2; l2++) {
1184 for (l3 = 0; l3 < NUM_IPFIX_PROTO_L3; l3++) {
1185 for (l4 = 0; l4 < NUM_IPFIX_PROTO_L4; l4++) {
1186 if (l3 == IPFIX_PROTO_L3_UNKNOWN &&
1187 l4 != IPFIX_PROTO_L4_UNKNOWN) {
1188 continue;
1189 }
1190 for (tunnel = 0; tunnel < NUM_IPFIX_PROTO_TUNNEL; tunnel++) {
1191 /* When the size of the template packet reaches
1192 * MAX_MESSAGE_LEN(1024), send it out.
1193 * And then reinitialize the msg to construct a new
1194 * packet for the following templates.
1195 */
1196 if (dp_packet_size(&msg) >= MAX_MESSAGE_LEN) {
1197 /* Send template message. */
1198 ipfix_send_template_msg(exporter->collectors,
1199 &msg, set_hdr_offset);
1200
1201 /* Reinitialize the template msg. */
1202 ipfix_init_template_msg(msg_stub, export_time_sec,
1203 exporter->seq_number,
1204 obs_domain_id, &msg,
1205 &set_hdr_offset);
1206 }
1207
1208 tmpl_hdr_offset = dp_packet_size(&msg);
1209 tmpl_hdr = dp_packet_put_zeros(&msg, sizeof *tmpl_hdr);
1210 tmpl_hdr->template_id = htons(
1211 ipfix_get_template_id(l2, l3, l4, tunnel));
1212 field_count =
1213 ipfix_define_template_fields(l2, l3, l4, tunnel, &msg);
1214 tmpl_hdr = (struct ipfix_template_record_header*)
1215 ((uint8_t*)dp_packet_data(&msg) + tmpl_hdr_offset);
1216 tmpl_hdr->field_count = htons(field_count);
1217 }
1218 }
1219 }
1220 }
1221
1222 /* Send template message. */
1223 ipfix_send_template_msg(exporter->collectors, &msg, set_hdr_offset);
1224
1225 /* XXX: Add Options Template Sets, at least to define a Flow Keys
1226 * Option Template. */
1227
1228 }
1229
1230 static inline uint32_t
1231 ipfix_hash_flow_key(const struct ipfix_flow_key *flow_key, uint32_t basis)
1232 {
1233 uint32_t hash;
1234 hash = hash_int(flow_key->obs_domain_id, basis);
1235 hash = hash_int(flow_key->template_id, hash);
1236 hash = hash_bytes(flow_key->flow_key_msg_part,
1237 flow_key->flow_key_msg_part_size, hash);
1238 return hash;
1239 }
1240
1241 static bool
1242 ipfix_flow_key_equal(const struct ipfix_flow_key *a,
1243 const struct ipfix_flow_key *b)
1244 {
1245 /* The template ID determines the flow key size, so not need to
1246 * compare it. */
1247 return (a->obs_domain_id == b->obs_domain_id
1248 && a->template_id == b->template_id
1249 && memcmp(a->flow_key_msg_part, b->flow_key_msg_part,
1250 a->flow_key_msg_part_size) == 0);
1251 }
1252
1253 static struct ipfix_flow_cache_entry*
1254 ipfix_cache_find_entry(const struct dpif_ipfix_exporter *exporter,
1255 const struct ipfix_flow_key *flow_key)
1256 {
1257 struct ipfix_flow_cache_entry *entry;
1258
1259 HMAP_FOR_EACH_WITH_HASH (entry, flow_key_map_node,
1260 ipfix_hash_flow_key(flow_key, 0),
1261 &exporter->cache_flow_key_map) {
1262 if (ipfix_flow_key_equal(&entry->flow_key, flow_key)) {
1263 return entry;
1264 }
1265 }
1266
1267 return NULL;
1268 }
1269
1270 static bool
1271 ipfix_cache_next_timeout_msec(const struct dpif_ipfix_exporter *exporter,
1272 long long int *next_timeout_msec)
1273 {
1274 struct ipfix_flow_cache_entry *entry;
1275
1276 LIST_FOR_EACH (entry, cache_flow_start_timestamp_list_node,
1277 &exporter->cache_flow_start_timestamp_list) {
1278 *next_timeout_msec = entry->flow_start_timestamp_usec / 1000LL
1279 + 1000LL * exporter->cache_active_timeout;
1280 return true;
1281 }
1282
1283 return false;
1284 }
1285
1286 static void
1287 ipfix_cache_aggregate_entries(struct ipfix_flow_cache_entry *from_entry,
1288 struct ipfix_flow_cache_entry *to_entry)
1289 {
1290 uint64_t *to_start, *to_end, *from_start, *from_end;
1291 uint16_t *to_min_len, *to_max_len, *from_min_len, *from_max_len;
1292
1293 to_start = &to_entry->flow_start_timestamp_usec;
1294 to_end = &to_entry->flow_end_timestamp_usec;
1295 from_start = &from_entry->flow_start_timestamp_usec;
1296 from_end = &from_entry->flow_end_timestamp_usec;
1297
1298 if (*to_start > *from_start) {
1299 *to_start = *from_start;
1300 }
1301 if (*to_end < *from_end) {
1302 *to_end = *from_end;
1303 }
1304
1305 to_entry->packet_delta_count += from_entry->packet_delta_count;
1306 to_entry->layer2_octet_delta_count += from_entry->layer2_octet_delta_count;
1307
1308 to_entry->octet_delta_count += from_entry->octet_delta_count;
1309 to_entry->octet_delta_sum_of_squares +=
1310 from_entry->octet_delta_sum_of_squares;
1311
1312 to_min_len = &to_entry->minimum_ip_total_length;
1313 to_max_len = &to_entry->maximum_ip_total_length;
1314 from_min_len = &from_entry->minimum_ip_total_length;
1315 from_max_len = &from_entry->maximum_ip_total_length;
1316
1317 if (!*to_min_len || (*from_min_len && *to_min_len > *from_min_len)) {
1318 *to_min_len = *from_min_len;
1319 }
1320 if (*to_max_len < *from_max_len) {
1321 *to_max_len = *from_max_len;
1322 }
1323 }
1324
1325 /* Add an entry into a flow cache. The entry is either aggregated into
1326 * an existing entry with the same flow key and free()d, or it is
1327 * inserted into the cache. */
1328 static void
1329 ipfix_cache_update(struct dpif_ipfix_exporter *exporter,
1330 struct ipfix_flow_cache_entry *entry)
1331 {
1332 struct ipfix_flow_cache_entry *old_entry;
1333
1334 old_entry = ipfix_cache_find_entry(exporter, &entry->flow_key);
1335
1336 if (old_entry == NULL) {
1337 hmap_insert(&exporter->cache_flow_key_map, &entry->flow_key_map_node,
1338 ipfix_hash_flow_key(&entry->flow_key, 0));
1339
1340 /* As the latest entry added into the cache, it should
1341 * logically have the highest flow_start_timestamp_usec, so
1342 * append it at the tail. */
1343 list_push_back(&exporter->cache_flow_start_timestamp_list,
1344 &entry->cache_flow_start_timestamp_list_node);
1345
1346 /* Enforce exporter->cache_max_flows limit. */
1347 if (hmap_count(&exporter->cache_flow_key_map)
1348 > exporter->cache_max_flows) {
1349 dpif_ipfix_cache_expire_now(exporter, false);
1350 }
1351 } else {
1352 ipfix_cache_aggregate_entries(entry, old_entry);
1353 free(entry);
1354 }
1355 }
1356
1357 static void
1358 ipfix_cache_entry_init(struct ipfix_flow_cache_entry *entry,
1359 const struct dp_packet *packet, const struct flow *flow,
1360 uint64_t packet_delta_count, uint32_t obs_domain_id,
1361 uint32_t obs_point_id, odp_port_t output_odp_port,
1362 const struct dpif_ipfix_port *tunnel_port,
1363 const struct flow_tnl *tunnel_key)
1364 {
1365 struct ipfix_flow_key *flow_key;
1366 struct dp_packet msg;
1367 enum ipfix_proto_l2 l2;
1368 enum ipfix_proto_l3 l3;
1369 enum ipfix_proto_l4 l4;
1370 enum ipfix_proto_tunnel tunnel = IPFIX_PROTO_NOT_TUNNELED;
1371 uint8_t ethernet_header_length;
1372 uint16_t ethernet_total_length;
1373
1374 flow_key = &entry->flow_key;
1375 dp_packet_use_stub(&msg, flow_key->flow_key_msg_part,
1376 sizeof flow_key->flow_key_msg_part);
1377
1378 /* Choose the right template ID matching the protocols in the
1379 * sampled packet. */
1380 l2 = (flow->vlan_tci == 0) ? IPFIX_PROTO_L2_ETH : IPFIX_PROTO_L2_VLAN;
1381
1382 switch(ntohs(flow->dl_type)) {
1383 case ETH_TYPE_IP:
1384 l3 = IPFIX_PROTO_L3_IPV4;
1385 switch(flow->nw_proto) {
1386 case IPPROTO_TCP:
1387 case IPPROTO_UDP:
1388 case IPPROTO_SCTP:
1389 l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
1390 break;
1391 case IPPROTO_ICMP:
1392 l4 = IPFIX_PROTO_L4_ICMP;
1393 break;
1394 default:
1395 l4 = IPFIX_PROTO_L4_UNKNOWN;
1396 }
1397 break;
1398 case ETH_TYPE_IPV6:
1399 l3 = IPFIX_PROTO_L3_IPV6;
1400 switch(flow->nw_proto) {
1401 case IPPROTO_TCP:
1402 case IPPROTO_UDP:
1403 case IPPROTO_SCTP:
1404 l4 = IPFIX_PROTO_L4_TCP_UDP_SCTP;
1405 break;
1406 case IPPROTO_ICMPV6:
1407 l4 = IPFIX_PROTO_L4_ICMP;
1408 break;
1409 default:
1410 l4 = IPFIX_PROTO_L4_UNKNOWN;
1411 }
1412 break;
1413 default:
1414 l3 = IPFIX_PROTO_L3_UNKNOWN;
1415 l4 = IPFIX_PROTO_L4_UNKNOWN;
1416 }
1417
1418 if (tunnel_port && tunnel_key) {
1419 tunnel = IPFIX_PROTO_TUNNELED;
1420 }
1421
1422 flow_key->obs_domain_id = obs_domain_id;
1423 flow_key->template_id = ipfix_get_template_id(l2, l3, l4, tunnel);
1424
1425 /* The fields defined in the ipfix_data_record_* structs and sent
1426 * below must match exactly the templates defined in
1427 * ipfix_define_template_fields. */
1428
1429 ethernet_header_length = (l2 == IPFIX_PROTO_L2_VLAN)
1430 ? VLAN_ETH_HEADER_LEN : ETH_HEADER_LEN;
1431 ethernet_total_length = dp_packet_size(packet);
1432
1433 /* Common Ethernet entities. */
1434 {
1435 struct ipfix_data_record_flow_key_common *data_common;
1436
1437 data_common = dp_packet_put_zeros(&msg, sizeof *data_common);
1438 data_common->observation_point_id = htonl(obs_point_id);
1439 data_common->flow_direction =
1440 (output_odp_port == ODPP_NONE) ? INGRESS_FLOW : EGRESS_FLOW;
1441 data_common->source_mac_address = flow->dl_src;
1442 data_common->destination_mac_address = flow->dl_dst;
1443 data_common->ethernet_type = flow->dl_type;
1444 data_common->ethernet_header_length = ethernet_header_length;
1445 }
1446
1447 if (l2 == IPFIX_PROTO_L2_VLAN) {
1448 struct ipfix_data_record_flow_key_vlan *data_vlan;
1449 uint16_t vlan_id = vlan_tci_to_vid(flow->vlan_tci);
1450 uint8_t priority = vlan_tci_to_pcp(flow->vlan_tci);
1451
1452 data_vlan = dp_packet_put_zeros(&msg, sizeof *data_vlan);
1453 data_vlan->vlan_id = htons(vlan_id);
1454 data_vlan->dot1q_vlan_id = htons(vlan_id);
1455 data_vlan->dot1q_priority = priority;
1456 }
1457
1458 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1459 struct ipfix_data_record_flow_key_ip *data_ip;
1460
1461 data_ip = dp_packet_put_zeros(&msg, sizeof *data_ip);
1462 data_ip->ip_version = (l3 == IPFIX_PROTO_L3_IPV4) ? 4 : 6;
1463 data_ip->ip_ttl = flow->nw_ttl;
1464 data_ip->protocol_identifier = flow->nw_proto;
1465 data_ip->ip_diff_serv_code_point = flow->nw_tos >> 2;
1466 data_ip->ip_precedence = flow->nw_tos >> 5;
1467 data_ip->ip_class_of_service = flow->nw_tos;
1468
1469 if (l3 == IPFIX_PROTO_L3_IPV4) {
1470 struct ipfix_data_record_flow_key_ipv4 *data_ipv4;
1471
1472 data_ipv4 = dp_packet_put_zeros(&msg, sizeof *data_ipv4);
1473 data_ipv4->source_ipv4_address = flow->nw_src;
1474 data_ipv4->destination_ipv4_address = flow->nw_dst;
1475 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1476 struct ipfix_data_record_flow_key_ipv6 *data_ipv6;
1477
1478 data_ipv6 = dp_packet_put_zeros(&msg, sizeof *data_ipv6);
1479 memcpy(data_ipv6->source_ipv6_address, &flow->ipv6_src,
1480 sizeof flow->ipv6_src);
1481 memcpy(data_ipv6->destination_ipv6_address, &flow->ipv6_dst,
1482 sizeof flow->ipv6_dst);
1483 data_ipv6->flow_label_ipv6 = flow->ipv6_label;
1484 }
1485 }
1486
1487 if (l4 == IPFIX_PROTO_L4_TCP_UDP_SCTP) {
1488 struct ipfix_data_record_flow_key_transport *data_transport;
1489
1490 data_transport = dp_packet_put_zeros(&msg, sizeof *data_transport);
1491 data_transport->source_transport_port = flow->tp_src;
1492 data_transport->destination_transport_port = flow->tp_dst;
1493 } else if (l4 == IPFIX_PROTO_L4_ICMP) {
1494 struct ipfix_data_record_flow_key_icmp *data_icmp;
1495
1496 data_icmp = dp_packet_put_zeros(&msg, sizeof *data_icmp);
1497 data_icmp->icmp_type = ntohs(flow->tp_src) & 0xff;
1498 data_icmp->icmp_code = ntohs(flow->tp_dst) & 0xff;
1499 }
1500
1501 if (tunnel == IPFIX_PROTO_TUNNELED) {
1502 struct ipfix_data_record_flow_key_tunnel *data_tunnel;
1503 const uint8_t *tun_id;
1504
1505 data_tunnel = dp_packet_put_zeros(&msg, sizeof *data_tunnel +
1506 tunnel_port->tunnel_key_length);
1507 data_tunnel->tunnel_source_ipv4_address = tunnel_key->ip_src;
1508 data_tunnel->tunnel_destination_ipv4_address = tunnel_key->ip_dst;
1509 /* The tunnel_protocol_identifier is from tunnel_proto array, which
1510 * contains protocol_identifiers of each tunnel type.
1511 * For the tunnel type on the top of IPSec, which uses the protocol
1512 * identifier of the upper tunnel type is used, the tcp_src and tcp_dst
1513 * are decided based on the protocol identifiers.
1514 * E.g:
1515 * The protocol identifier of DPIF_IPFIX_TUNNEL_IPSEC_GRE is IPPROTO_GRE,
1516 * and both tp_src and tp_dst are zero.
1517 */
1518 data_tunnel->tunnel_protocol_identifier =
1519 tunnel_protocol[tunnel_port->tunnel_type];
1520 data_tunnel->tunnel_source_transport_port = tunnel_key->tp_src;
1521 data_tunnel->tunnel_destination_transport_port = tunnel_key->tp_dst;
1522 data_tunnel->tunnel_type = tunnel_port->tunnel_type;
1523 data_tunnel->tunnel_key_length = tunnel_port->tunnel_key_length;
1524 /* tun_id is in network order, and tunnel key is in low bits. */
1525 tun_id = (const uint8_t *) &tunnel_key->tun_id;
1526 memcpy(data_tunnel->tunnel_key,
1527 &tun_id[8 - tunnel_port->tunnel_key_length],
1528 tunnel_port->tunnel_key_length);
1529 }
1530
1531 flow_key->flow_key_msg_part_size = dp_packet_size(&msg);
1532
1533 {
1534 struct timeval now;
1535 uint64_t layer2_octet_delta_count;
1536
1537 /* Calculate the total matched octet count by considering as
1538 * an approximation that all matched packets have the same
1539 * length. */
1540 layer2_octet_delta_count = packet_delta_count * ethernet_total_length;
1541
1542 xgettimeofday(&now);
1543 entry->flow_end_timestamp_usec = now.tv_usec + 1000000LL * now.tv_sec;
1544 entry->flow_start_timestamp_usec = entry->flow_end_timestamp_usec;
1545 entry->packet_delta_count = packet_delta_count;
1546 entry->layer2_octet_delta_count = layer2_octet_delta_count;
1547 }
1548
1549 if (l3 != IPFIX_PROTO_L3_UNKNOWN) {
1550 uint16_t ip_total_length =
1551 ethernet_total_length - ethernet_header_length;
1552 uint64_t octet_delta_count;
1553
1554 /* Calculate the total matched octet count by considering as
1555 * an approximation that all matched packets have the same
1556 * length. */
1557 octet_delta_count = packet_delta_count * ip_total_length;
1558
1559 entry->octet_delta_count = octet_delta_count;
1560 entry->octet_delta_sum_of_squares = octet_delta_count * ip_total_length;
1561 entry->minimum_ip_total_length = ip_total_length;
1562 entry->maximum_ip_total_length = ip_total_length;
1563 } else {
1564 entry->octet_delta_sum_of_squares = 0;
1565 entry->minimum_ip_total_length = 0;
1566 entry->maximum_ip_total_length = 0;
1567 }
1568 }
1569
1570 /* Send each single data record in its own data set, to simplify the
1571 * implementation by avoiding having to group record by template ID
1572 * before sending. */
1573 static void
1574 ipfix_put_data_set(uint32_t export_time_sec,
1575 struct ipfix_flow_cache_entry *entry,
1576 enum ipfix_flow_end_reason flow_end_reason,
1577 struct dp_packet *msg)
1578 {
1579 size_t set_hdr_offset;
1580 struct ipfix_set_header *set_hdr;
1581
1582 set_hdr_offset = dp_packet_size(msg);
1583
1584 /* Put a Data Set. */
1585 set_hdr = dp_packet_put_zeros(msg, sizeof *set_hdr);
1586 set_hdr->set_id = htons(entry->flow_key.template_id);
1587
1588 /* Copy the flow key part of the data record. */
1589
1590 dp_packet_put(msg, entry->flow_key.flow_key_msg_part,
1591 entry->flow_key.flow_key_msg_part_size);
1592
1593 /* Put the non-key part of the data record. */
1594
1595 {
1596 struct ipfix_data_record_aggregated_common *data_aggregated_common;
1597 uint64_t export_time_usec, flow_start_delta_usec, flow_end_delta_usec;
1598
1599 /* Calculate the negative deltas relative to the export time
1600 * in seconds sent in the header, not the exact export
1601 * time. */
1602 export_time_usec = 1000000LL * export_time_sec;
1603 flow_start_delta_usec = export_time_usec
1604 - entry->flow_start_timestamp_usec;
1605 flow_end_delta_usec = export_time_usec
1606 - entry->flow_end_timestamp_usec;
1607
1608 data_aggregated_common = dp_packet_put_zeros(
1609 msg, sizeof *data_aggregated_common);
1610 data_aggregated_common->flow_start_delta_microseconds = htonl(
1611 flow_start_delta_usec);
1612 data_aggregated_common->flow_end_delta_microseconds = htonl(
1613 flow_end_delta_usec);
1614 data_aggregated_common->packet_delta_count = htonll(
1615 entry->packet_delta_count);
1616 data_aggregated_common->layer2_octet_delta_count = htonll(
1617 entry->layer2_octet_delta_count);
1618 data_aggregated_common->flow_end_reason = flow_end_reason;
1619 }
1620
1621 if (entry->octet_delta_sum_of_squares) { /* IP packet. */
1622 struct ipfix_data_record_aggregated_ip *data_aggregated_ip;
1623
1624 data_aggregated_ip = dp_packet_put_zeros(
1625 msg, sizeof *data_aggregated_ip);
1626 data_aggregated_ip->octet_delta_count = htonll(
1627 entry->octet_delta_count);
1628 data_aggregated_ip->octet_delta_sum_of_squares = htonll(
1629 entry->octet_delta_sum_of_squares);
1630 data_aggregated_ip->minimum_ip_total_length = htonll(
1631 entry->minimum_ip_total_length);
1632 data_aggregated_ip->maximum_ip_total_length = htonll(
1633 entry->maximum_ip_total_length);
1634 }
1635
1636 set_hdr = (struct ipfix_set_header*)((uint8_t*)dp_packet_data(msg) + set_hdr_offset);
1637 set_hdr->length = htons(dp_packet_size(msg) - set_hdr_offset);
1638 }
1639
1640 /* Send an IPFIX message with a single data record. */
1641 static void
1642 ipfix_send_data_msg(struct dpif_ipfix_exporter *exporter,
1643 uint32_t export_time_sec,
1644 struct ipfix_flow_cache_entry *entry,
1645 enum ipfix_flow_end_reason flow_end_reason)
1646 {
1647 uint64_t msg_stub[DIV_ROUND_UP(MAX_MESSAGE_LEN, 8)];
1648 struct dp_packet msg;
1649 dp_packet_use_stub(&msg, msg_stub, sizeof msg_stub);
1650
1651 ipfix_init_header(export_time_sec, exporter->seq_number++,
1652 entry->flow_key.obs_domain_id, &msg);
1653 ipfix_put_data_set(export_time_sec, entry, flow_end_reason, &msg);
1654 ipfix_send_msg(exporter->collectors, &msg);
1655
1656 dp_packet_uninit(&msg);
1657 }
1658
1659 static void
1660 dpif_ipfix_sample(struct dpif_ipfix_exporter *exporter,
1661 const struct dp_packet *packet, const struct flow *flow,
1662 uint64_t packet_delta_count, uint32_t obs_domain_id,
1663 uint32_t obs_point_id, odp_port_t output_odp_port,
1664 const struct dpif_ipfix_port *tunnel_port,
1665 const struct flow_tnl *tunnel_key)
1666 {
1667 struct ipfix_flow_cache_entry *entry;
1668
1669 /* Create a flow cache entry from the sample. */
1670 entry = xmalloc(sizeof *entry);
1671 ipfix_cache_entry_init(entry, packet, flow, packet_delta_count,
1672 obs_domain_id, obs_point_id,
1673 output_odp_port, tunnel_port, tunnel_key);
1674 ipfix_cache_update(exporter, entry);
1675 }
1676
1677 static bool
1678 bridge_exporter_enabled(struct dpif_ipfix *di)
1679 {
1680 return di->bridge_exporter.probability > 0;
1681 }
1682
1683 void
1684 dpif_ipfix_bridge_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
1685 const struct flow *flow,
1686 odp_port_t input_odp_port, odp_port_t output_odp_port,
1687 const struct flow_tnl *output_tunnel_key)
1688 OVS_EXCLUDED(mutex)
1689 {
1690 uint64_t packet_delta_count;
1691 const struct flow_tnl *tunnel_key = NULL;
1692 struct dpif_ipfix_port * tunnel_port = NULL;
1693
1694 ovs_mutex_lock(&mutex);
1695 /* Use the sampling probability as an approximation of the number
1696 * of matched packets. */
1697 packet_delta_count = UINT32_MAX / di->bridge_exporter.probability;
1698 if (di->bridge_exporter.options->enable_tunnel_sampling) {
1699 if (output_odp_port == ODPP_NONE && flow->tunnel.ip_dst) {
1700 /* Input tunnel. */
1701 tunnel_key = &flow->tunnel;
1702 tunnel_port = dpif_ipfix_find_port(di, input_odp_port);
1703 }
1704 if (output_odp_port != ODPP_NONE && output_tunnel_key) {
1705 /* Output tunnel, output_tunnel_key must be valid. */
1706 tunnel_key = output_tunnel_key;
1707 tunnel_port = dpif_ipfix_find_port(di, output_odp_port);
1708 }
1709 }
1710 dpif_ipfix_sample(&di->bridge_exporter.exporter, packet, flow,
1711 packet_delta_count,
1712 di->bridge_exporter.options->obs_domain_id,
1713 di->bridge_exporter.options->obs_point_id,
1714 output_odp_port, tunnel_port, tunnel_key);
1715 ovs_mutex_unlock(&mutex);
1716 }
1717
1718 void
1719 dpif_ipfix_flow_sample(struct dpif_ipfix *di, const struct dp_packet *packet,
1720 const struct flow *flow, uint32_t collector_set_id,
1721 uint16_t probability, uint32_t obs_domain_id,
1722 uint32_t obs_point_id) OVS_EXCLUDED(mutex)
1723 {
1724 struct dpif_ipfix_flow_exporter_map_node *node;
1725 /* Use the sampling probability as an approximation of the number
1726 * of matched packets. */
1727 uint64_t packet_delta_count = USHRT_MAX / probability;
1728
1729 ovs_mutex_lock(&mutex);
1730 node = dpif_ipfix_find_flow_exporter_map_node(di, collector_set_id);
1731 if (node) {
1732 dpif_ipfix_sample(&node->exporter.exporter, packet, flow,
1733 packet_delta_count, obs_domain_id, obs_point_id,
1734 ODPP_NONE, NULL, NULL);
1735 }
1736 ovs_mutex_unlock(&mutex);
1737 }
1738
1739 static void
1740 dpif_ipfix_cache_expire(struct dpif_ipfix_exporter *exporter,
1741 bool forced_end, const uint64_t export_time_usec,
1742 const uint32_t export_time_sec)
1743 {
1744 struct ipfix_flow_cache_entry *entry, *next_entry;
1745 uint64_t max_flow_start_timestamp_usec;
1746 bool template_msg_sent = false;
1747 enum ipfix_flow_end_reason flow_end_reason;
1748
1749 if (list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
1750 return;
1751 }
1752
1753 max_flow_start_timestamp_usec = export_time_usec -
1754 1000000LL * exporter->cache_active_timeout;
1755
1756 LIST_FOR_EACH_SAFE (entry, next_entry, cache_flow_start_timestamp_list_node,
1757 &exporter->cache_flow_start_timestamp_list) {
1758 if (forced_end) {
1759 flow_end_reason = FORCED_END;
1760 } else if (entry->flow_start_timestamp_usec
1761 <= max_flow_start_timestamp_usec) {
1762 flow_end_reason = ACTIVE_TIMEOUT;
1763 } else if (hmap_count(&exporter->cache_flow_key_map)
1764 > exporter->cache_max_flows) {
1765 /* Enforce exporter->cache_max_flows. */
1766 flow_end_reason = LACK_OF_RESOURCES;
1767 } else {
1768 /* Remaining flows haven't expired yet. */
1769 break;
1770 }
1771
1772 list_remove(&entry->cache_flow_start_timestamp_list_node);
1773 hmap_remove(&exporter->cache_flow_key_map,
1774 &entry->flow_key_map_node);
1775
1776 if (!template_msg_sent
1777 && (exporter->last_template_set_time + IPFIX_TEMPLATE_INTERVAL)
1778 <= export_time_sec) {
1779 ipfix_send_template_msgs(exporter, export_time_sec,
1780 entry->flow_key.obs_domain_id);
1781 exporter->last_template_set_time = export_time_sec;
1782 template_msg_sent = true;
1783 }
1784
1785 /* XXX: Group multiple data records for the same obs domain id
1786 * into the same message. */
1787 ipfix_send_data_msg(exporter, export_time_sec, entry, flow_end_reason);
1788 free(entry);
1789 }
1790 }
1791
1792 static void
1793 get_export_time_now(uint64_t *export_time_usec, uint32_t *export_time_sec)
1794 {
1795 struct timeval export_time;
1796 xgettimeofday(&export_time);
1797
1798 *export_time_usec = export_time.tv_usec + 1000000LL * export_time.tv_sec;
1799
1800 /* The IPFIX start and end deltas are negative deltas relative to
1801 * the export time, so set the export time 1 second off to
1802 * calculate those deltas. */
1803 if (export_time.tv_usec == 0) {
1804 *export_time_sec = export_time.tv_sec;
1805 } else {
1806 *export_time_sec = export_time.tv_sec + 1;
1807 }
1808 }
1809
1810 static void
1811 dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *exporter,
1812 bool forced_end)
1813 {
1814 uint64_t export_time_usec;
1815 uint32_t export_time_sec;
1816
1817 get_export_time_now(&export_time_usec, &export_time_sec);
1818 dpif_ipfix_cache_expire(exporter, forced_end, export_time_usec,
1819 export_time_sec);
1820 }
1821
1822 void
1823 dpif_ipfix_run(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
1824 {
1825 uint64_t export_time_usec;
1826 uint32_t export_time_sec;
1827 struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
1828
1829 ovs_mutex_lock(&mutex);
1830 get_export_time_now(&export_time_usec, &export_time_sec);
1831 if (bridge_exporter_enabled(di)) {
1832 dpif_ipfix_cache_expire(
1833 &di->bridge_exporter.exporter, false, export_time_usec,
1834 export_time_sec);
1835 }
1836 HMAP_FOR_EACH (flow_exporter_node, node, &di->flow_exporter_map) {
1837 dpif_ipfix_cache_expire(
1838 &flow_exporter_node->exporter.exporter, false, export_time_usec,
1839 export_time_sec);
1840 }
1841 ovs_mutex_unlock(&mutex);
1842 }
1843
1844 void
1845 dpif_ipfix_wait(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
1846 {
1847 long long int next_timeout_msec = LLONG_MAX;
1848 struct dpif_ipfix_flow_exporter_map_node *flow_exporter_node;
1849
1850 ovs_mutex_lock(&mutex);
1851 if (bridge_exporter_enabled(di)) {
1852 if (ipfix_cache_next_timeout_msec(
1853 &di->bridge_exporter.exporter, &next_timeout_msec)) {
1854 poll_timer_wait_until(next_timeout_msec);
1855 }
1856 }
1857 HMAP_FOR_EACH (flow_exporter_node, node, &di->flow_exporter_map) {
1858 if (ipfix_cache_next_timeout_msec(
1859 &flow_exporter_node->exporter.exporter, &next_timeout_msec)) {
1860 poll_timer_wait_until(next_timeout_msec);
1861 }
1862 }
1863 ovs_mutex_unlock(&mutex);
1864 }