2 * Copyright (c) 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "ofproto-dpif-ipfix.h"
20 #include "byte-order.h"
21 #include "collectors.h"
26 #include "openvswitch/list.h"
27 #include "openvswitch/ofpbuf.h"
29 #include "ofproto-dpif.h"
30 #include "dp-packet.h"
32 #include "poll-loop.h"
37 #include "openvswitch/vlog.h"
39 VLOG_DEFINE_THIS_MODULE(ipfix
);
41 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
42 static struct ovs_mutex mutex
= OVS_MUTEX_INITIALIZER
;
44 /* Cf. IETF RFC 5101 Section 10.3.4. */
45 #define IPFIX_DEFAULT_COLLECTOR_PORT 4739
47 /* Cf. IETF RFC 5881 Setion 8. */
48 #define BFD_CONTROL_DEST_PORT 3784
49 #define BFD_ECHO_DEST_PORT 3785
51 /* The standard layer2SegmentId (ID 351) element is included in vDS to send
52 * the VxLAN tunnel's VNI. It is 64-bit long, the most significant byte is
53 * used to indicate the type of tunnel (0x01 = VxLAN, 0x02 = GRE) and the three
54 * least significant bytes hold the value of the layer 2 overlay network
55 * segment identifier: a 24-bit VxLAN tunnel's VNI or a 24-bit GRE tunnel's
56 * TNI. This is not compatible with STT, as implemented in OVS, as
57 * its tunnel IDs is 64-bit.
59 * Two new enterprise information elements are defined which are similar to
60 * laryerSegmentId but support 64-bit IDs:
61 * tunnelType (ID 891) and tunnelKey (ID 892).
63 * The enum dpif_ipfix_tunnel_type is to declare the types supported in the
65 * The number of ipfix tunnel types includes two reserverd types: 0x04 and 0x06.
67 enum dpif_ipfix_tunnel_type
{
68 DPIF_IPFIX_TUNNEL_UNKNOWN
= 0x00,
69 DPIF_IPFIX_TUNNEL_VXLAN
= 0x01,
70 DPIF_IPFIX_TUNNEL_GRE
= 0x02,
71 DPIF_IPFIX_TUNNEL_LISP
= 0x03,
72 DPIF_IPFIX_TUNNEL_STT
= 0x04,
73 DPIF_IPFIX_TUNNEL_IPSEC_GRE
= 0x05,
74 DPIF_IPFIX_TUNNEL_GENEVE
= 0x07,
78 struct dpif_ipfix_port
{
79 struct hmap_node hmap_node
; /* In struct dpif_ipfix's "tunnel_ports" hmap. */
80 struct ofport
*ofport
; /* To retrieve port stats. */
82 enum dpif_ipfix_tunnel_type tunnel_type
;
83 uint8_t tunnel_key_length
;
86 struct dpif_ipfix_exporter
{
87 struct collectors
*collectors
;
89 time_t last_template_set_time
;
90 struct hmap cache_flow_key_map
; /* ipfix_flow_cache_entry. */
91 struct ovs_list cache_flow_start_timestamp_list
; /* ipfix_flow_cache_entry. */
92 uint32_t cache_active_timeout
; /* In seconds. */
93 uint32_t cache_max_flows
;
96 struct dpif_ipfix_bridge_exporter
{
97 struct dpif_ipfix_exporter exporter
;
98 struct ofproto_ipfix_bridge_exporter_options
*options
;
102 struct dpif_ipfix_flow_exporter
{
103 struct dpif_ipfix_exporter exporter
;
104 struct ofproto_ipfix_flow_exporter_options
*options
;
107 struct dpif_ipfix_flow_exporter_map_node
{
108 struct hmap_node node
;
109 struct dpif_ipfix_flow_exporter exporter
;
113 struct dpif_ipfix_bridge_exporter bridge_exporter
;
114 struct hmap flow_exporter_map
; /* dpif_ipfix_flow_exporter_map_node. */
115 struct hmap tunnel_ports
; /* Contains "struct dpif_ipfix_port"s.
116 * It makes tunnel port lookups faster in
117 * sampling upcalls. */
118 struct ovs_refcount ref_cnt
;
121 #define IPFIX_VERSION 0x000a
123 /* When using UDP, IPFIX Template Records must be re-sent regularly.
124 * The standard default interval is 10 minutes (600 seconds).
125 * Cf. IETF RFC 5101 Section 10.3.6. */
126 #define IPFIX_TEMPLATE_INTERVAL 600
128 /* Cf. IETF RFC 5101 Section 3.1. */
130 struct ipfix_header
{
131 ovs_be16 version
; /* IPFIX_VERSION. */
132 ovs_be16 length
; /* Length in bytes including this header. */
133 ovs_be32 export_time
; /* Seconds since the epoch. */
134 ovs_be32 seq_number
; /* Message sequence number. */
135 ovs_be32 obs_domain_id
; /* Observation Domain ID. */
137 BUILD_ASSERT_DECL(sizeof(struct ipfix_header
) == 16);
139 #define IPFIX_SET_ID_TEMPLATE 2
140 #define IPFIX_SET_ID_OPTION_TEMPLATE 3
142 /* Cf. IETF RFC 5101 Section 3.3.2. */
144 struct ipfix_set_header
{
145 ovs_be16 set_id
; /* IPFIX_SET_ID_* or valid template ID for Data Sets. */
146 ovs_be16 length
; /* Length of the set in bytes including header. */
148 BUILD_ASSERT_DECL(sizeof(struct ipfix_set_header
) == 4);
150 /* Alternatives for templates at each layer. A template is defined by
151 * a combination of one value for each layer. */
152 enum ipfix_proto_l2
{
153 IPFIX_PROTO_L2_ETH
= 0, /* No VLAN. */
157 enum ipfix_proto_l3
{
158 IPFIX_PROTO_L3_UNKNOWN
= 0,
163 enum ipfix_proto_l4
{
164 IPFIX_PROTO_L4_UNKNOWN
= 0,
165 IPFIX_PROTO_L4_TCP_UDP_SCTP
,
169 enum ipfix_proto_tunnel
{
170 IPFIX_PROTO_NOT_TUNNELED
= 0,
171 IPFIX_PROTO_TUNNELED
, /* Support gre, lisp and vxlan. */
172 NUM_IPFIX_PROTO_TUNNEL
175 /* Any Template ID > 255 is usable for Template Records. */
176 #define IPFIX_TEMPLATE_ID_MIN 256
178 /* Cf. IETF RFC 5101 Section 3.4.1. */
180 struct ipfix_template_record_header
{
181 ovs_be16 template_id
;
182 ovs_be16 field_count
;
184 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_record_header
) == 4);
186 enum ipfix_entity_id
{
187 /* standard IPFIX elements */
188 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ID_##ENUM = ID,
189 #include "ofproto/ipfix-entities.def"
190 /* non-standard IPFIX elements */
191 #define IPFIX_SET_ENTERPRISE(v) (((v) | 0x8000))
192 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
193 IPFIX_ENTITY_ID_##ENUM = IPFIX_SET_ENTERPRISE(ID),
194 #include "ofproto/ipfix-enterprise-entities.def"
197 enum ipfix_entity_size
{
198 /* standard IPFIX elements */
199 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_SIZE_##ENUM = SIZE,
200 #include "ofproto/ipfix-entities.def"
201 /* non-standard IPFIX elements */
202 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
203 IPFIX_ENTITY_SIZE_##ENUM = SIZE,
204 #include "ofproto/ipfix-enterprise-entities.def"
207 enum ipfix_entity_enterprise
{
208 /* standard IPFIX elements */
209 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ENTERPRISE_##ENUM = 0,
210 #include "ofproto/ipfix-entities.def"
211 /* non-standard IPFIX elements */
212 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
213 IPFIX_ENTITY_ENTERPRISE_##ENUM = ENTERPRISE,
214 #include "ofproto/ipfix-enterprise-entities.def"
218 struct ipfix_template_field_specifier
{
219 ovs_be16 element_id
; /* IPFIX_ENTITY_ID_*. */
220 ovs_be16 field_length
; /* Length of the field's value, in bytes.
221 * For Variable-Length element, it should be 65535.
223 ovs_be32 enterprise
; /* Enterprise number */
225 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_field_specifier
) == 8);
227 /* Cf. IETF RFC 5102 Section 5.11.6. */
228 enum ipfix_flow_direction
{
233 /* Part of data record flow key for common metadata and Ethernet entities. */
235 struct ipfix_data_record_flow_key_common
{
236 ovs_be32 observation_point_id
; /* OBSERVATION_POINT_ID */
237 uint8_t flow_direction
; /* FLOW_DIRECTION */
238 struct eth_addr source_mac_address
; /* SOURCE_MAC_ADDRESS */
239 struct eth_addr destination_mac_address
; /* DESTINATION_MAC_ADDRESS */
240 ovs_be16 ethernet_type
; /* ETHERNET_TYPE */
241 uint8_t ethernet_header_length
; /* ETHERNET_HEADER_LENGTH */
243 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_common
) == 20);
245 /* Part of data record flow key for VLAN entities. */
247 struct ipfix_data_record_flow_key_vlan
{
248 ovs_be16 vlan_id
; /* VLAN_ID */
249 ovs_be16 dot1q_vlan_id
; /* DOT1Q_VLAN_ID */
250 uint8_t dot1q_priority
; /* DOT1Q_PRIORITY */
252 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_vlan
) == 5);
254 /* Part of data record flow key for IP entities. */
255 /* XXX: Replace IP_TTL with MINIMUM_TTL and MAXIMUM_TTL? */
257 struct ipfix_data_record_flow_key_ip
{
258 uint8_t ip_version
; /* IP_VERSION */
259 uint8_t ip_ttl
; /* IP_TTL */
260 uint8_t protocol_identifier
; /* PROTOCOL_IDENTIFIER */
261 uint8_t ip_diff_serv_code_point
; /* IP_DIFF_SERV_CODE_POINT */
262 uint8_t ip_precedence
; /* IP_PRECEDENCE */
263 uint8_t ip_class_of_service
; /* IP_CLASS_OF_SERVICE */
265 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ip
) == 6);
267 /* Part of data record flow key for IPv4 entities. */
269 struct ipfix_data_record_flow_key_ipv4
{
270 ovs_be32 source_ipv4_address
; /* SOURCE_IPV4_ADDRESS */
271 ovs_be32 destination_ipv4_address
; /* DESTINATION_IPV4_ADDRESS */
273 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv4
) == 8);
275 /* Part of data record flow key for IPv6 entities. */
277 struct ipfix_data_record_flow_key_ipv6
{
278 uint8_t source_ipv6_address
[16]; /* SOURCE_IPV6_ADDRESS */
279 uint8_t destination_ipv6_address
[16]; /* DESTINATION_IPV6_ADDRESS */
280 ovs_be32 flow_label_ipv6
; /* FLOW_LABEL_IPV6 */
282 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv6
) == 36);
284 /* Part of data record flow key for TCP/UDP/SCTP entities. */
286 struct ipfix_data_record_flow_key_transport
{
287 ovs_be16 source_transport_port
; /* SOURCE_TRANSPORT_PORT */
288 ovs_be16 destination_transport_port
; /* DESTINATION_TRANSPORT_PORT */
290 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_transport
) == 4);
292 /* Part of data record flow key for ICMP entities. */
294 struct ipfix_data_record_flow_key_icmp
{
295 uint8_t icmp_type
; /* ICMP_TYPE_IPV4 / ICMP_TYPE_IPV6 */
296 uint8_t icmp_code
; /* ICMP_CODE_IPV4 / ICMP_CODE_IPV6 */
298 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_icmp
) == 2);
300 /* For the tunnel type that is on the top of IPSec, the protocol identifier
301 * of the upper tunnel type is used.
303 static uint8_t tunnel_protocol
[NUM_DPIF_IPFIX_TUNNEL
] = {
305 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_VXLAN */
306 IPPROTO_GRE
, /* DPIF_IPFIX_TUNNEL_GRE */
307 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_LISP*/
308 IPPROTO_TCP
, /* DPIF_IPFIX_TUNNEL_STT*/
309 IPPROTO_GRE
, /* DPIF_IPFIX_TUNNEL_IPSEC_GRE */
311 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_GENEVE*/
315 struct ipfix_data_record_flow_key_tunnel
{
316 ovs_be32 tunnel_source_ipv4_address
; /* TUNNEL_SOURCE_IPV4_ADDRESS */
317 ovs_be32 tunnel_destination_ipv4_address
; /* TUNNEL_DESTINATION_IPV4_ADDRESS */
318 uint8_t tunnel_protocol_identifier
; /* TUNNEL_PROTOCOL_IDENTIFIER */
319 ovs_be16 tunnel_source_transport_port
; /* TUNNEL_SOURCE_TRANSPORT_PORT */
320 ovs_be16 tunnel_destination_transport_port
; /* TUNNEL_DESTINATION_TRANSPORT_PORT */
321 uint8_t tunnel_type
; /* TUNNEL_TYPE */
322 uint8_t tunnel_key_length
; /* length of TUNNEL_KEY */
323 uint8_t tunnel_key
[]; /* data of TUNNEL_KEY */
325 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_tunnel
) == 15);
327 /* Cf. IETF RFC 5102 Section 5.11.3. */
328 enum ipfix_flow_end_reason
{
330 ACTIVE_TIMEOUT
= 0x02,
331 END_OF_FLOW_DETECTED
= 0x03,
333 LACK_OF_RESOURCES
= 0x05
336 /* Part of data record for common aggregated elements. */
338 struct ipfix_data_record_aggregated_common
{
339 ovs_be32 flow_start_delta_microseconds
; /* FLOW_START_DELTA_MICROSECONDS */
340 ovs_be32 flow_end_delta_microseconds
; /* FLOW_END_DELTA_MICROSECONDS */
341 ovs_be64 packet_delta_count
; /* PACKET_DELTA_COUNT */
342 ovs_be64 layer2_octet_delta_count
; /* LAYER2_OCTET_DELTA_COUNT */
343 uint8_t flow_end_reason
; /* FLOW_END_REASON */
345 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_common
) == 25);
347 /* Part of data record for IP aggregated elements. */
349 struct ipfix_data_record_aggregated_ip
{
350 ovs_be64 octet_delta_count
; /* OCTET_DELTA_COUNT */
351 ovs_be64 octet_delta_sum_of_squares
; /* OCTET_DELTA_SUM_OF_SQUARES */
352 ovs_be64 minimum_ip_total_length
; /* MINIMUM_IP_TOTAL_LENGTH */
353 ovs_be64 maximum_ip_total_length
; /* MAXIMUM_IP_TOTAL_LENGTH */
355 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_ip
) == 32);
358 * support tunnel key for:
361 * LISP: 24-bit instance ID
364 #define MAX_TUNNEL_KEY_LEN 8
366 #define MAX_FLOW_KEY_LEN \
367 (sizeof(struct ipfix_data_record_flow_key_common) \
368 + sizeof(struct ipfix_data_record_flow_key_vlan) \
369 + sizeof(struct ipfix_data_record_flow_key_ip) \
370 + MAX(sizeof(struct ipfix_data_record_flow_key_ipv4), \
371 sizeof(struct ipfix_data_record_flow_key_ipv6)) \
372 + MAX(sizeof(struct ipfix_data_record_flow_key_icmp), \
373 sizeof(struct ipfix_data_record_flow_key_transport)) \
374 + sizeof(struct ipfix_data_record_flow_key_tunnel) \
375 + MAX_TUNNEL_KEY_LEN)
377 #define MAX_DATA_RECORD_LEN \
379 + sizeof(struct ipfix_data_record_aggregated_common) \
380 + sizeof(struct ipfix_data_record_aggregated_ip))
382 /* Max length of a data set. To simplify the implementation, each
383 * data record is sent in a separate data set, so each data set
384 * contains at most one data record. */
385 #define MAX_DATA_SET_LEN \
386 (sizeof(struct ipfix_set_header) \
387 + MAX_DATA_RECORD_LEN)
389 /* Max length of an IPFIX message. Arbitrarily set to accommodate low
391 #define MAX_MESSAGE_LEN 1024
393 /* Cache structures. */
396 struct ipfix_flow_key
{
397 uint32_t obs_domain_id
;
398 uint16_t template_id
;
399 size_t flow_key_msg_part_size
;
400 uint64_t flow_key_msg_part
[DIV_ROUND_UP(MAX_FLOW_KEY_LEN
, 8)];
403 /* Flow cache entry. */
404 struct ipfix_flow_cache_entry
{
405 struct hmap_node flow_key_map_node
;
406 struct ovs_list cache_flow_start_timestamp_list_node
;
407 struct ipfix_flow_key flow_key
;
408 /* Common aggregated elements. */
409 uint64_t flow_start_timestamp_usec
;
410 uint64_t flow_end_timestamp_usec
;
411 uint64_t packet_delta_count
;
412 uint64_t layer2_octet_delta_count
;
413 uint64_t octet_delta_count
;
414 uint64_t octet_delta_sum_of_squares
; /* 0 if not IP. */
415 uint16_t minimum_ip_total_length
; /* 0 if not IP. */
416 uint16_t maximum_ip_total_length
; /* 0 if not IP. */
419 static void dpif_ipfix_cache_expire(struct dpif_ipfix_exporter
*, bool,
420 const uint64_t, const uint32_t);
422 static void get_export_time_now(uint64_t *, uint32_t *);
424 static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter
*, bool);
427 ofproto_ipfix_bridge_exporter_options_equal(
428 const struct ofproto_ipfix_bridge_exporter_options
*a
,
429 const struct ofproto_ipfix_bridge_exporter_options
*b
)
431 return (a
->obs_domain_id
== b
->obs_domain_id
432 && a
->obs_point_id
== b
->obs_point_id
433 && a
->sampling_rate
== b
->sampling_rate
434 && a
->cache_active_timeout
== b
->cache_active_timeout
435 && a
->cache_max_flows
== b
->cache_max_flows
436 && a
->enable_tunnel_sampling
== b
->enable_tunnel_sampling
437 && a
->enable_input_sampling
== b
->enable_input_sampling
438 && a
->enable_output_sampling
== b
->enable_output_sampling
439 && sset_equals(&a
->targets
, &b
->targets
));
442 static struct ofproto_ipfix_bridge_exporter_options
*
443 ofproto_ipfix_bridge_exporter_options_clone(
444 const struct ofproto_ipfix_bridge_exporter_options
*old
)
446 struct ofproto_ipfix_bridge_exporter_options
*new =
447 xmemdup(old
, sizeof *old
);
448 sset_clone(&new->targets
, &old
->targets
);
453 ofproto_ipfix_bridge_exporter_options_destroy(
454 struct ofproto_ipfix_bridge_exporter_options
*options
)
457 sset_destroy(&options
->targets
);
463 ofproto_ipfix_flow_exporter_options_equal(
464 const struct ofproto_ipfix_flow_exporter_options
*a
,
465 const struct ofproto_ipfix_flow_exporter_options
*b
)
467 return (a
->collector_set_id
== b
->collector_set_id
468 && a
->cache_active_timeout
== b
->cache_active_timeout
469 && a
->cache_max_flows
== b
->cache_max_flows
470 && sset_equals(&a
->targets
, &b
->targets
));
473 static struct ofproto_ipfix_flow_exporter_options
*
474 ofproto_ipfix_flow_exporter_options_clone(
475 const struct ofproto_ipfix_flow_exporter_options
*old
)
477 struct ofproto_ipfix_flow_exporter_options
*new =
478 xmemdup(old
, sizeof *old
);
479 sset_clone(&new->targets
, &old
->targets
);
484 ofproto_ipfix_flow_exporter_options_destroy(
485 struct ofproto_ipfix_flow_exporter_options
*options
)
488 sset_destroy(&options
->targets
);
494 dpif_ipfix_exporter_init(struct dpif_ipfix_exporter
*exporter
)
496 exporter
->collectors
= NULL
;
497 exporter
->seq_number
= 1;
498 exporter
->last_template_set_time
= TIME_MIN
;
499 hmap_init(&exporter
->cache_flow_key_map
);
500 ovs_list_init(&exporter
->cache_flow_start_timestamp_list
);
501 exporter
->cache_active_timeout
= 0;
502 exporter
->cache_max_flows
= 0;
506 dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter
*exporter
)
508 /* Flush the cache with flow end reason "forced end." */
509 dpif_ipfix_cache_expire_now(exporter
, true);
511 collectors_destroy(exporter
->collectors
);
512 exporter
->collectors
= NULL
;
513 exporter
->seq_number
= 1;
514 exporter
->last_template_set_time
= TIME_MIN
;
515 exporter
->cache_active_timeout
= 0;
516 exporter
->cache_max_flows
= 0;
520 dpif_ipfix_exporter_destroy(struct dpif_ipfix_exporter
*exporter
)
522 dpif_ipfix_exporter_clear(exporter
);
523 hmap_destroy(&exporter
->cache_flow_key_map
);
527 dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter
*exporter
,
528 const struct sset
*targets
,
529 const uint32_t cache_active_timeout
,
530 const uint32_t cache_max_flows
)
532 collectors_destroy(exporter
->collectors
);
533 collectors_create(targets
, IPFIX_DEFAULT_COLLECTOR_PORT
,
534 &exporter
->collectors
);
535 if (exporter
->collectors
== NULL
) {
536 VLOG_WARN_RL(&rl
, "no collectors could be initialized, "
537 "IPFIX exporter disabled");
538 dpif_ipfix_exporter_clear(exporter
);
541 exporter
->cache_active_timeout
= cache_active_timeout
;
542 exporter
->cache_max_flows
= cache_max_flows
;
546 static struct dpif_ipfix_port
*
547 dpif_ipfix_find_port(const struct dpif_ipfix
*di
,
548 odp_port_t odp_port
) OVS_REQUIRES(mutex
)
550 struct dpif_ipfix_port
*dip
;
552 HMAP_FOR_EACH_IN_BUCKET (dip
, hmap_node
, hash_odp_port(odp_port
),
554 if (dip
->odp_port
== odp_port
) {
562 dpif_ipfix_del_port(struct dpif_ipfix
*di
,
563 struct dpif_ipfix_port
*dip
)
566 hmap_remove(&di
->tunnel_ports
, &dip
->hmap_node
);
571 dpif_ipfix_add_tunnel_port(struct dpif_ipfix
*di
, struct ofport
*ofport
,
572 odp_port_t odp_port
) OVS_EXCLUDED(mutex
)
574 struct dpif_ipfix_port
*dip
;
577 ovs_mutex_lock(&mutex
);
578 dip
= dpif_ipfix_find_port(di
, odp_port
);
580 dpif_ipfix_del_port(di
, dip
);
583 type
= netdev_get_type(ofport
->netdev
);
588 /* Add to table of tunnel ports. */
589 dip
= xmalloc(sizeof *dip
);
590 dip
->ofport
= ofport
;
591 dip
->odp_port
= odp_port
;
592 if (strcmp(type
, "gre") == 0) {
594 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_GRE
;
595 dip
->tunnel_key_length
= 4;
596 } else if (strcmp(type
, "ipsec_gre") == 0) {
597 /* 32-bit key ipsec_gre */
598 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_IPSEC_GRE
;
599 dip
->tunnel_key_length
= 4;
600 } else if (strcmp(type
, "vxlan") == 0) {
601 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_VXLAN
;
602 dip
->tunnel_key_length
= 3;
603 } else if (strcmp(type
, "lisp") == 0) {
604 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_LISP
;
605 dip
->tunnel_key_length
= 3;
606 } else if (strcmp(type
, "geneve") == 0) {
607 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_GENEVE
;
608 dip
->tunnel_key_length
= 3;
609 } else if (strcmp(type
, "stt") == 0) {
610 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_STT
;
611 dip
->tunnel_key_length
= 8;
616 hmap_insert(&di
->tunnel_ports
, &dip
->hmap_node
, hash_odp_port(odp_port
));
619 ovs_mutex_unlock(&mutex
);
623 dpif_ipfix_del_tunnel_port(struct dpif_ipfix
*di
, odp_port_t odp_port
)
626 struct dpif_ipfix_port
*dip
;
627 ovs_mutex_lock(&mutex
);
628 dip
= dpif_ipfix_find_port(di
, odp_port
);
630 dpif_ipfix_del_port(di
, dip
);
632 ovs_mutex_unlock(&mutex
);
636 dpif_ipfix_get_tunnel_port(const struct dpif_ipfix
*di
, odp_port_t odp_port
)
639 struct dpif_ipfix_port
*dip
;
640 ovs_mutex_lock(&mutex
);
641 dip
= dpif_ipfix_find_port(di
, odp_port
);
642 ovs_mutex_unlock(&mutex
);
647 dpif_ipfix_bridge_exporter_init(struct dpif_ipfix_bridge_exporter
*exporter
)
649 dpif_ipfix_exporter_init(&exporter
->exporter
);
650 exporter
->options
= NULL
;
651 exporter
->probability
= 0;
655 dpif_ipfix_bridge_exporter_clear(struct dpif_ipfix_bridge_exporter
*exporter
)
657 dpif_ipfix_exporter_clear(&exporter
->exporter
);
658 ofproto_ipfix_bridge_exporter_options_destroy(exporter
->options
);
659 exporter
->options
= NULL
;
660 exporter
->probability
= 0;
664 dpif_ipfix_bridge_exporter_destroy(struct dpif_ipfix_bridge_exporter
*exporter
)
666 dpif_ipfix_bridge_exporter_clear(exporter
);
667 dpif_ipfix_exporter_destroy(&exporter
->exporter
);
671 dpif_ipfix_bridge_exporter_set_options(
672 struct dpif_ipfix_bridge_exporter
*exporter
,
673 const struct ofproto_ipfix_bridge_exporter_options
*options
)
675 bool options_changed
;
677 if (!options
|| sset_is_empty(&options
->targets
)) {
678 /* No point in doing any work if there are no targets. */
679 dpif_ipfix_bridge_exporter_clear(exporter
);
685 || !ofproto_ipfix_bridge_exporter_options_equal(
686 options
, exporter
->options
));
688 /* Configure collectors if options have changed or if we're
689 * shortchanged in collectors (which indicates that opening one or
690 * more of the configured collectors failed, so that we should
693 || collectors_count(exporter
->exporter
.collectors
)
694 < sset_count(&options
->targets
)) {
695 if (!dpif_ipfix_exporter_set_options(
696 &exporter
->exporter
, &options
->targets
,
697 options
->cache_active_timeout
, options
->cache_max_flows
)) {
702 /* Avoid reconfiguring if options didn't change. */
703 if (!options_changed
) {
707 ofproto_ipfix_bridge_exporter_options_destroy(exporter
->options
);
708 exporter
->options
= ofproto_ipfix_bridge_exporter_options_clone(options
);
709 exporter
->probability
=
710 MAX(1, UINT32_MAX
/ exporter
->options
->sampling_rate
);
712 /* Run over the cache as some entries might have expired after
713 * changing the timeouts. */
714 dpif_ipfix_cache_expire_now(&exporter
->exporter
, false);
717 static struct dpif_ipfix_flow_exporter_map_node
*
718 dpif_ipfix_find_flow_exporter_map_node(
719 const struct dpif_ipfix
*di
, const uint32_t collector_set_id
)
722 struct dpif_ipfix_flow_exporter_map_node
*exporter_node
;
724 HMAP_FOR_EACH_WITH_HASH (exporter_node
, node
,
725 hash_int(collector_set_id
, 0),
726 &di
->flow_exporter_map
) {
727 if (exporter_node
->exporter
.options
->collector_set_id
728 == collector_set_id
) {
729 return exporter_node
;
737 dpif_ipfix_flow_exporter_init(struct dpif_ipfix_flow_exporter
*exporter
)
739 dpif_ipfix_exporter_init(&exporter
->exporter
);
740 exporter
->options
= NULL
;
744 dpif_ipfix_flow_exporter_clear(struct dpif_ipfix_flow_exporter
*exporter
)
746 dpif_ipfix_exporter_clear(&exporter
->exporter
);
747 ofproto_ipfix_flow_exporter_options_destroy(exporter
->options
);
748 exporter
->options
= NULL
;
752 dpif_ipfix_flow_exporter_destroy(struct dpif_ipfix_flow_exporter
*exporter
)
754 dpif_ipfix_flow_exporter_clear(exporter
);
755 dpif_ipfix_exporter_destroy(&exporter
->exporter
);
759 dpif_ipfix_flow_exporter_set_options(
760 struct dpif_ipfix_flow_exporter
*exporter
,
761 const struct ofproto_ipfix_flow_exporter_options
*options
)
763 bool options_changed
;
765 if (sset_is_empty(&options
->targets
)) {
766 /* No point in doing any work if there are no targets. */
767 dpif_ipfix_flow_exporter_clear(exporter
);
773 || !ofproto_ipfix_flow_exporter_options_equal(
774 options
, exporter
->options
));
776 /* Configure collectors if options have changed or if we're
777 * shortchanged in collectors (which indicates that opening one or
778 * more of the configured collectors failed, so that we should
781 || collectors_count(exporter
->exporter
.collectors
)
782 < sset_count(&options
->targets
)) {
783 if (!dpif_ipfix_exporter_set_options(
784 &exporter
->exporter
, &options
->targets
,
785 options
->cache_active_timeout
, options
->cache_max_flows
)) {
790 /* Avoid reconfiguring if options didn't change. */
791 if (!options_changed
) {
795 ofproto_ipfix_flow_exporter_options_destroy(exporter
->options
);
796 exporter
->options
= ofproto_ipfix_flow_exporter_options_clone(options
);
798 /* Run over the cache as some entries might have expired after
799 * changing the timeouts. */
800 dpif_ipfix_cache_expire_now(&exporter
->exporter
, false);
806 dpif_ipfix_set_options(
807 struct dpif_ipfix
*di
,
808 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
809 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
810 size_t n_flow_exporters_options
) OVS_EXCLUDED(mutex
)
813 struct ofproto_ipfix_flow_exporter_options
*options
;
814 struct dpif_ipfix_flow_exporter_map_node
*node
, *next
;
815 size_t n_broken_flow_exporters_options
= 0;
817 ovs_mutex_lock(&mutex
);
818 dpif_ipfix_bridge_exporter_set_options(&di
->bridge_exporter
,
819 bridge_exporter_options
);
821 /* Add new flow exporters and update current flow exporters. */
822 options
= (struct ofproto_ipfix_flow_exporter_options
*)
823 flow_exporters_options
;
824 for (i
= 0; i
< n_flow_exporters_options
; i
++) {
825 node
= dpif_ipfix_find_flow_exporter_map_node(
826 di
, options
->collector_set_id
);
828 node
= xzalloc(sizeof *node
);
829 dpif_ipfix_flow_exporter_init(&node
->exporter
);
830 hmap_insert(&di
->flow_exporter_map
, &node
->node
,
831 hash_int(options
->collector_set_id
, 0));
833 if (!dpif_ipfix_flow_exporter_set_options(&node
->exporter
, options
)) {
834 n_broken_flow_exporters_options
++;
839 ovs_assert(hmap_count(&di
->flow_exporter_map
) >=
840 (n_flow_exporters_options
- n_broken_flow_exporters_options
));
842 /* Remove dropped flow exporters, if any needs to be removed. */
843 if (hmap_count(&di
->flow_exporter_map
) > n_flow_exporters_options
) {
844 HMAP_FOR_EACH_SAFE (node
, next
, node
, &di
->flow_exporter_map
) {
845 /* This is slow but doesn't take any extra memory, and
846 * this table is not supposed to contain many rows anyway. */
847 options
= (struct ofproto_ipfix_flow_exporter_options
*)
848 flow_exporters_options
;
849 for (i
= 0; i
< n_flow_exporters_options
; i
++) {
850 if (node
->exporter
.options
->collector_set_id
851 == options
->collector_set_id
) {
856 if (i
== n_flow_exporters_options
) { // Not found.
857 hmap_remove(&di
->flow_exporter_map
, &node
->node
);
858 dpif_ipfix_flow_exporter_destroy(&node
->exporter
);
864 ovs_assert(hmap_count(&di
->flow_exporter_map
) ==
865 (n_flow_exporters_options
- n_broken_flow_exporters_options
));
866 ovs_mutex_unlock(&mutex
);
870 dpif_ipfix_create(void)
872 struct dpif_ipfix
*di
;
873 di
= xzalloc(sizeof *di
);
874 dpif_ipfix_bridge_exporter_init(&di
->bridge_exporter
);
875 hmap_init(&di
->flow_exporter_map
);
876 hmap_init(&di
->tunnel_ports
);
877 ovs_refcount_init(&di
->ref_cnt
);
882 dpif_ipfix_ref(const struct dpif_ipfix
*di_
)
884 struct dpif_ipfix
*di
= CONST_CAST(struct dpif_ipfix
*, di_
);
886 ovs_refcount_ref(&di
->ref_cnt
);
892 dpif_ipfix_get_bridge_exporter_probability(const struct dpif_ipfix
*di
)
896 ovs_mutex_lock(&mutex
);
897 ret
= di
->bridge_exporter
.probability
;
898 ovs_mutex_unlock(&mutex
);
903 dpif_ipfix_get_bridge_exporter_input_sampling(const struct dpif_ipfix
*di
)
907 ovs_mutex_lock(&mutex
);
908 if (di
->bridge_exporter
.options
) {
909 ret
= di
->bridge_exporter
.options
->enable_input_sampling
;
911 ovs_mutex_unlock(&mutex
);
916 dpif_ipfix_get_bridge_exporter_output_sampling(const struct dpif_ipfix
*di
)
920 ovs_mutex_lock(&mutex
);
921 if (di
->bridge_exporter
.options
) {
922 ret
= di
->bridge_exporter
.options
->enable_output_sampling
;
924 ovs_mutex_unlock(&mutex
);
929 dpif_ipfix_get_bridge_exporter_tunnel_sampling(const struct dpif_ipfix
*di
)
933 ovs_mutex_lock(&mutex
);
934 if (di
->bridge_exporter
.options
) {
935 ret
= di
->bridge_exporter
.options
->enable_tunnel_sampling
;
937 ovs_mutex_unlock(&mutex
);
942 dpif_ipfix_clear(struct dpif_ipfix
*di
) OVS_REQUIRES(mutex
)
944 struct dpif_ipfix_flow_exporter_map_node
*exp_node
;
945 struct dpif_ipfix_port
*dip
, *next
;
947 dpif_ipfix_bridge_exporter_clear(&di
->bridge_exporter
);
949 HMAP_FOR_EACH_POP (exp_node
, node
, &di
->flow_exporter_map
) {
950 dpif_ipfix_flow_exporter_destroy(&exp_node
->exporter
);
954 HMAP_FOR_EACH_SAFE (dip
, next
, hmap_node
, &di
->tunnel_ports
) {
955 dpif_ipfix_del_port(di
, dip
);
960 dpif_ipfix_unref(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
962 if (di
&& ovs_refcount_unref_relaxed(&di
->ref_cnt
) == 1) {
963 ovs_mutex_lock(&mutex
);
964 dpif_ipfix_clear(di
);
965 dpif_ipfix_bridge_exporter_destroy(&di
->bridge_exporter
);
966 hmap_destroy(&di
->flow_exporter_map
);
967 hmap_destroy(&di
->tunnel_ports
);
969 ovs_mutex_unlock(&mutex
);
974 ipfix_init_header(uint32_t export_time_sec
, uint32_t seq_number
,
975 uint32_t obs_domain_id
, struct dp_packet
*msg
)
977 struct ipfix_header
*hdr
;
979 hdr
= dp_packet_put_zeros(msg
, sizeof *hdr
);
980 hdr
->version
= htons(IPFIX_VERSION
);
981 hdr
->length
= htons(sizeof *hdr
); /* Updated in ipfix_send_msg. */
982 hdr
->export_time
= htonl(export_time_sec
);
983 hdr
->seq_number
= htonl(seq_number
);
984 hdr
->obs_domain_id
= htonl(obs_domain_id
);
988 ipfix_send_msg(const struct collectors
*collectors
, struct dp_packet
*msg
)
990 struct ipfix_header
*hdr
;
992 /* Adjust the length in the header. */
993 hdr
= dp_packet_data(msg
);
994 hdr
->length
= htons(dp_packet_size(msg
));
996 collectors_send(collectors
, dp_packet_data(msg
), dp_packet_size(msg
));
997 dp_packet_set_size(msg
, 0);
1001 ipfix_get_template_id(enum ipfix_proto_l2 l2
, enum ipfix_proto_l3 l3
,
1002 enum ipfix_proto_l4 l4
, enum ipfix_proto_tunnel tunnel
)
1004 uint16_t template_id
;
1006 template_id
= template_id
* NUM_IPFIX_PROTO_L3
+ l3
;
1007 template_id
= template_id
* NUM_IPFIX_PROTO_L4
+ l4
;
1008 template_id
= template_id
* NUM_IPFIX_PROTO_TUNNEL
+ tunnel
;
1009 return IPFIX_TEMPLATE_ID_MIN
+ template_id
;
1013 ipfix_define_template_entity(enum ipfix_entity_id id
,
1014 enum ipfix_entity_size size
,
1015 enum ipfix_entity_enterprise enterprise
,
1016 struct dp_packet
*msg
)
1018 struct ipfix_template_field_specifier
*field
;
1022 field_size
= sizeof *field
;
1024 /* No enterprise number */
1025 field_size
= sizeof *field
- sizeof(ovs_be32
);
1027 field
= dp_packet_put_zeros(msg
, field_size
);
1028 field
->element_id
= htons(id
);
1030 field
->field_length
= htons(size
);
1032 /* RFC 5101, Section 7. Variable-Length Information Element */
1033 field
->field_length
= OVS_BE16_MAX
;
1036 field
->enterprise
= htonl(enterprise
);
1042 ipfix_define_template_fields(enum ipfix_proto_l2 l2
, enum ipfix_proto_l3 l3
,
1043 enum ipfix_proto_l4 l4
, enum ipfix_proto_tunnel tunnel
,
1044 struct dp_packet
*msg
)
1050 ipfix_define_template_entity(IPFIX_ENTITY_ID_##ID, \
1051 IPFIX_ENTITY_SIZE_##ID, \
1052 IPFIX_ENTITY_ENTERPRISE_##ID, msg); \
1058 DEF(OBSERVATION_POINT_ID
);
1059 DEF(FLOW_DIRECTION
);
1061 /* Common Ethernet entities. */
1062 DEF(SOURCE_MAC_ADDRESS
);
1063 DEF(DESTINATION_MAC_ADDRESS
);
1065 DEF(ETHERNET_HEADER_LENGTH
);
1067 if (l2
== IPFIX_PROTO_L2_VLAN
) {
1070 DEF(DOT1Q_PRIORITY
);
1073 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1076 DEF(PROTOCOL_IDENTIFIER
);
1077 DEF(IP_DIFF_SERV_CODE_POINT
);
1079 DEF(IP_CLASS_OF_SERVICE
);
1081 if (l3
== IPFIX_PROTO_L3_IPV4
) {
1082 DEF(SOURCE_IPV4_ADDRESS
);
1083 DEF(DESTINATION_IPV4_ADDRESS
);
1084 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1085 DEF(SOURCE_TRANSPORT_PORT
);
1086 DEF(DESTINATION_TRANSPORT_PORT
);
1087 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1088 DEF(ICMP_TYPE_IPV4
);
1089 DEF(ICMP_CODE_IPV4
);
1091 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1092 DEF(SOURCE_IPV6_ADDRESS
);
1093 DEF(DESTINATION_IPV6_ADDRESS
);
1094 DEF(FLOW_LABEL_IPV6
);
1095 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1096 DEF(SOURCE_TRANSPORT_PORT
);
1097 DEF(DESTINATION_TRANSPORT_PORT
);
1098 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1099 DEF(ICMP_TYPE_IPV6
);
1100 DEF(ICMP_CODE_IPV6
);
1105 if (tunnel
!= IPFIX_PROTO_NOT_TUNNELED
) {
1106 DEF(TUNNEL_SOURCE_IPV4_ADDRESS
);
1107 DEF(TUNNEL_DESTINATION_IPV4_ADDRESS
);
1108 DEF(TUNNEL_PROTOCOL_IDENTIFIER
);
1109 DEF(TUNNEL_SOURCE_TRANSPORT_PORT
);
1110 DEF(TUNNEL_DESTINATION_TRANSPORT_PORT
);
1115 /* 2. Flow aggregated data. */
1117 DEF(FLOW_START_DELTA_MICROSECONDS
);
1118 DEF(FLOW_END_DELTA_MICROSECONDS
);
1119 DEF(PACKET_DELTA_COUNT
);
1120 DEF(LAYER2_OCTET_DELTA_COUNT
);
1121 DEF(FLOW_END_REASON
);
1123 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1124 DEF(OCTET_DELTA_COUNT
);
1125 DEF(OCTET_DELTA_SUM_OF_SQUARES
);
1126 DEF(MINIMUM_IP_TOTAL_LENGTH
);
1127 DEF(MAXIMUM_IP_TOTAL_LENGTH
);
1137 ipfix_init_template_msg(void *msg_stub
, uint32_t export_time_sec
,
1138 uint32_t seq_number
, uint32_t obs_domain_id
,
1139 struct dp_packet
*msg
, size_t *set_hdr_offset
)
1141 struct ipfix_set_header
*set_hdr
;
1143 dp_packet_use_stub(msg
, msg_stub
, sizeof msg_stub
);
1145 ipfix_init_header(export_time_sec
, seq_number
, obs_domain_id
, msg
);
1146 *set_hdr_offset
= dp_packet_size(msg
);
1148 /* Add a Template Set. */
1149 set_hdr
= dp_packet_put_zeros(msg
, sizeof *set_hdr
);
1150 set_hdr
->set_id
= htons(IPFIX_SET_ID_TEMPLATE
);
1154 ipfix_send_template_msg(const struct collectors
*collectors
,
1155 struct dp_packet
*msg
, size_t set_hdr_offset
)
1157 struct ipfix_set_header
*set_hdr
;
1159 /* Send template message. */
1160 set_hdr
= (struct ipfix_set_header
*)
1161 ((uint8_t*)dp_packet_data(msg
) + set_hdr_offset
);
1162 set_hdr
->length
= htons(dp_packet_size(msg
) - set_hdr_offset
);
1164 ipfix_send_msg(collectors
, msg
);
1166 dp_packet_uninit(msg
);
1170 ipfix_send_template_msgs(struct dpif_ipfix_exporter
*exporter
,
1171 uint32_t export_time_sec
, uint32_t obs_domain_id
)
1173 uint64_t msg_stub
[DIV_ROUND_UP(MAX_MESSAGE_LEN
, 8)];
1174 struct dp_packet msg
;
1175 size_t set_hdr_offset
, tmpl_hdr_offset
;
1176 struct ipfix_template_record_header
*tmpl_hdr
;
1177 uint16_t field_count
;
1178 enum ipfix_proto_l2 l2
;
1179 enum ipfix_proto_l3 l3
;
1180 enum ipfix_proto_l4 l4
;
1181 enum ipfix_proto_tunnel tunnel
;
1183 ipfix_init_template_msg(msg_stub
, export_time_sec
, exporter
->seq_number
,
1184 obs_domain_id
, &msg
, &set_hdr_offset
);
1185 /* Define one template for each possible combination of
1187 for (l2
= 0; l2
< NUM_IPFIX_PROTO_L2
; l2
++) {
1188 for (l3
= 0; l3
< NUM_IPFIX_PROTO_L3
; l3
++) {
1189 for (l4
= 0; l4
< NUM_IPFIX_PROTO_L4
; l4
++) {
1190 if (l3
== IPFIX_PROTO_L3_UNKNOWN
&&
1191 l4
!= IPFIX_PROTO_L4_UNKNOWN
) {
1194 for (tunnel
= 0; tunnel
< NUM_IPFIX_PROTO_TUNNEL
; tunnel
++) {
1195 /* When the size of the template packet reaches
1196 * MAX_MESSAGE_LEN(1024), send it out.
1197 * And then reinitialize the msg to construct a new
1198 * packet for the following templates.
1200 if (dp_packet_size(&msg
) >= MAX_MESSAGE_LEN
) {
1201 /* Send template message. */
1202 ipfix_send_template_msg(exporter
->collectors
,
1203 &msg
, set_hdr_offset
);
1205 /* Reinitialize the template msg. */
1206 ipfix_init_template_msg(msg_stub
, export_time_sec
,
1207 exporter
->seq_number
,
1208 obs_domain_id
, &msg
,
1212 tmpl_hdr_offset
= dp_packet_size(&msg
);
1213 tmpl_hdr
= dp_packet_put_zeros(&msg
, sizeof *tmpl_hdr
);
1214 tmpl_hdr
->template_id
= htons(
1215 ipfix_get_template_id(l2
, l3
, l4
, tunnel
));
1217 ipfix_define_template_fields(l2
, l3
, l4
, tunnel
, &msg
);
1218 tmpl_hdr
= (struct ipfix_template_record_header
*)
1219 ((uint8_t*)dp_packet_data(&msg
) + tmpl_hdr_offset
);
1220 tmpl_hdr
->field_count
= htons(field_count
);
1226 /* Send template message. */
1227 ipfix_send_template_msg(exporter
->collectors
, &msg
, set_hdr_offset
);
1229 /* XXX: Add Options Template Sets, at least to define a Flow Keys
1230 * Option Template. */
1234 static inline uint32_t
1235 ipfix_hash_flow_key(const struct ipfix_flow_key
*flow_key
, uint32_t basis
)
1238 hash
= hash_int(flow_key
->obs_domain_id
, basis
);
1239 hash
= hash_int(flow_key
->template_id
, hash
);
1240 hash
= hash_bytes(flow_key
->flow_key_msg_part
,
1241 flow_key
->flow_key_msg_part_size
, hash
);
1246 ipfix_flow_key_equal(const struct ipfix_flow_key
*a
,
1247 const struct ipfix_flow_key
*b
)
1249 /* The template ID determines the flow key size, so not need to
1251 return (a
->obs_domain_id
== b
->obs_domain_id
1252 && a
->template_id
== b
->template_id
1253 && memcmp(a
->flow_key_msg_part
, b
->flow_key_msg_part
,
1254 a
->flow_key_msg_part_size
) == 0);
1257 static struct ipfix_flow_cache_entry
*
1258 ipfix_cache_find_entry(const struct dpif_ipfix_exporter
*exporter
,
1259 const struct ipfix_flow_key
*flow_key
)
1261 struct ipfix_flow_cache_entry
*entry
;
1263 HMAP_FOR_EACH_WITH_HASH (entry
, flow_key_map_node
,
1264 ipfix_hash_flow_key(flow_key
, 0),
1265 &exporter
->cache_flow_key_map
) {
1266 if (ipfix_flow_key_equal(&entry
->flow_key
, flow_key
)) {
1275 ipfix_cache_next_timeout_msec(const struct dpif_ipfix_exporter
*exporter
,
1276 long long int *next_timeout_msec
)
1278 struct ipfix_flow_cache_entry
*entry
;
1280 LIST_FOR_EACH (entry
, cache_flow_start_timestamp_list_node
,
1281 &exporter
->cache_flow_start_timestamp_list
) {
1282 *next_timeout_msec
= entry
->flow_start_timestamp_usec
/ 1000LL
1283 + 1000LL * exporter
->cache_active_timeout
;
1291 ipfix_cache_aggregate_entries(struct ipfix_flow_cache_entry
*from_entry
,
1292 struct ipfix_flow_cache_entry
*to_entry
)
1294 uint64_t *to_start
, *to_end
, *from_start
, *from_end
;
1295 uint16_t *to_min_len
, *to_max_len
, *from_min_len
, *from_max_len
;
1297 to_start
= &to_entry
->flow_start_timestamp_usec
;
1298 to_end
= &to_entry
->flow_end_timestamp_usec
;
1299 from_start
= &from_entry
->flow_start_timestamp_usec
;
1300 from_end
= &from_entry
->flow_end_timestamp_usec
;
1302 if (*to_start
> *from_start
) {
1303 *to_start
= *from_start
;
1305 if (*to_end
< *from_end
) {
1306 *to_end
= *from_end
;
1309 to_entry
->packet_delta_count
+= from_entry
->packet_delta_count
;
1310 to_entry
->layer2_octet_delta_count
+= from_entry
->layer2_octet_delta_count
;
1312 to_entry
->octet_delta_count
+= from_entry
->octet_delta_count
;
1313 to_entry
->octet_delta_sum_of_squares
+=
1314 from_entry
->octet_delta_sum_of_squares
;
1316 to_min_len
= &to_entry
->minimum_ip_total_length
;
1317 to_max_len
= &to_entry
->maximum_ip_total_length
;
1318 from_min_len
= &from_entry
->minimum_ip_total_length
;
1319 from_max_len
= &from_entry
->maximum_ip_total_length
;
1321 if (!*to_min_len
|| (*from_min_len
&& *to_min_len
> *from_min_len
)) {
1322 *to_min_len
= *from_min_len
;
1324 if (*to_max_len
< *from_max_len
) {
1325 *to_max_len
= *from_max_len
;
1329 /* Add an entry into a flow cache. The entry is either aggregated into
1330 * an existing entry with the same flow key and free()d, or it is
1331 * inserted into the cache. */
1333 ipfix_cache_update(struct dpif_ipfix_exporter
*exporter
,
1334 struct ipfix_flow_cache_entry
*entry
)
1336 struct ipfix_flow_cache_entry
*old_entry
;
1338 old_entry
= ipfix_cache_find_entry(exporter
, &entry
->flow_key
);
1340 if (old_entry
== NULL
) {
1341 hmap_insert(&exporter
->cache_flow_key_map
, &entry
->flow_key_map_node
,
1342 ipfix_hash_flow_key(&entry
->flow_key
, 0));
1344 /* As the latest entry added into the cache, it should
1345 * logically have the highest flow_start_timestamp_usec, so
1346 * append it at the tail. */
1347 ovs_list_push_back(&exporter
->cache_flow_start_timestamp_list
,
1348 &entry
->cache_flow_start_timestamp_list_node
);
1350 /* Enforce exporter->cache_max_flows limit. */
1351 if (hmap_count(&exporter
->cache_flow_key_map
)
1352 > exporter
->cache_max_flows
) {
1353 dpif_ipfix_cache_expire_now(exporter
, false);
1356 ipfix_cache_aggregate_entries(entry
, old_entry
);
1362 ipfix_cache_entry_init(struct ipfix_flow_cache_entry
*entry
,
1363 const struct dp_packet
*packet
, const struct flow
*flow
,
1364 uint64_t packet_delta_count
, uint32_t obs_domain_id
,
1365 uint32_t obs_point_id
, odp_port_t output_odp_port
,
1366 const struct dpif_ipfix_port
*tunnel_port
,
1367 const struct flow_tnl
*tunnel_key
)
1369 struct ipfix_flow_key
*flow_key
;
1370 struct dp_packet msg
;
1371 enum ipfix_proto_l2 l2
;
1372 enum ipfix_proto_l3 l3
;
1373 enum ipfix_proto_l4 l4
;
1374 enum ipfix_proto_tunnel tunnel
= IPFIX_PROTO_NOT_TUNNELED
;
1375 uint8_t ethernet_header_length
;
1376 uint16_t ethernet_total_length
;
1378 flow_key
= &entry
->flow_key
;
1379 dp_packet_use_stub(&msg
, flow_key
->flow_key_msg_part
,
1380 sizeof flow_key
->flow_key_msg_part
);
1382 /* Choose the right template ID matching the protocols in the
1383 * sampled packet. */
1384 l2
= (flow
->vlan_tci
== 0) ? IPFIX_PROTO_L2_ETH
: IPFIX_PROTO_L2_VLAN
;
1386 switch(ntohs(flow
->dl_type
)) {
1388 l3
= IPFIX_PROTO_L3_IPV4
;
1389 switch(flow
->nw_proto
) {
1393 l4
= IPFIX_PROTO_L4_TCP_UDP_SCTP
;
1396 l4
= IPFIX_PROTO_L4_ICMP
;
1399 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1403 l3
= IPFIX_PROTO_L3_IPV6
;
1404 switch(flow
->nw_proto
) {
1408 l4
= IPFIX_PROTO_L4_TCP_UDP_SCTP
;
1410 case IPPROTO_ICMPV6
:
1411 l4
= IPFIX_PROTO_L4_ICMP
;
1414 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1418 l3
= IPFIX_PROTO_L3_UNKNOWN
;
1419 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1422 if (tunnel_port
&& tunnel_key
) {
1423 tunnel
= IPFIX_PROTO_TUNNELED
;
1426 flow_key
->obs_domain_id
= obs_domain_id
;
1427 flow_key
->template_id
= ipfix_get_template_id(l2
, l3
, l4
, tunnel
);
1429 /* The fields defined in the ipfix_data_record_* structs and sent
1430 * below must match exactly the templates defined in
1431 * ipfix_define_template_fields. */
1433 ethernet_header_length
= (l2
== IPFIX_PROTO_L2_VLAN
)
1434 ? VLAN_ETH_HEADER_LEN
: ETH_HEADER_LEN
;
1435 ethernet_total_length
= dp_packet_size(packet
);
1437 /* Common Ethernet entities. */
1439 struct ipfix_data_record_flow_key_common
*data_common
;
1441 data_common
= dp_packet_put_zeros(&msg
, sizeof *data_common
);
1442 data_common
->observation_point_id
= htonl(obs_point_id
);
1443 data_common
->flow_direction
=
1444 (output_odp_port
== ODPP_NONE
) ? INGRESS_FLOW
: EGRESS_FLOW
;
1445 data_common
->source_mac_address
= flow
->dl_src
;
1446 data_common
->destination_mac_address
= flow
->dl_dst
;
1447 data_common
->ethernet_type
= flow
->dl_type
;
1448 data_common
->ethernet_header_length
= ethernet_header_length
;
1451 if (l2
== IPFIX_PROTO_L2_VLAN
) {
1452 struct ipfix_data_record_flow_key_vlan
*data_vlan
;
1453 uint16_t vlan_id
= vlan_tci_to_vid(flow
->vlan_tci
);
1454 uint8_t priority
= vlan_tci_to_pcp(flow
->vlan_tci
);
1456 data_vlan
= dp_packet_put_zeros(&msg
, sizeof *data_vlan
);
1457 data_vlan
->vlan_id
= htons(vlan_id
);
1458 data_vlan
->dot1q_vlan_id
= htons(vlan_id
);
1459 data_vlan
->dot1q_priority
= priority
;
1462 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1463 struct ipfix_data_record_flow_key_ip
*data_ip
;
1465 data_ip
= dp_packet_put_zeros(&msg
, sizeof *data_ip
);
1466 data_ip
->ip_version
= (l3
== IPFIX_PROTO_L3_IPV4
) ? 4 : 6;
1467 data_ip
->ip_ttl
= flow
->nw_ttl
;
1468 data_ip
->protocol_identifier
= flow
->nw_proto
;
1469 data_ip
->ip_diff_serv_code_point
= flow
->nw_tos
>> 2;
1470 data_ip
->ip_precedence
= flow
->nw_tos
>> 5;
1471 data_ip
->ip_class_of_service
= flow
->nw_tos
;
1473 if (l3
== IPFIX_PROTO_L3_IPV4
) {
1474 struct ipfix_data_record_flow_key_ipv4
*data_ipv4
;
1476 data_ipv4
= dp_packet_put_zeros(&msg
, sizeof *data_ipv4
);
1477 data_ipv4
->source_ipv4_address
= flow
->nw_src
;
1478 data_ipv4
->destination_ipv4_address
= flow
->nw_dst
;
1479 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1480 struct ipfix_data_record_flow_key_ipv6
*data_ipv6
;
1482 data_ipv6
= dp_packet_put_zeros(&msg
, sizeof *data_ipv6
);
1483 memcpy(data_ipv6
->source_ipv6_address
, &flow
->ipv6_src
,
1484 sizeof flow
->ipv6_src
);
1485 memcpy(data_ipv6
->destination_ipv6_address
, &flow
->ipv6_dst
,
1486 sizeof flow
->ipv6_dst
);
1487 data_ipv6
->flow_label_ipv6
= flow
->ipv6_label
;
1491 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1492 struct ipfix_data_record_flow_key_transport
*data_transport
;
1494 data_transport
= dp_packet_put_zeros(&msg
, sizeof *data_transport
);
1495 data_transport
->source_transport_port
= flow
->tp_src
;
1496 data_transport
->destination_transport_port
= flow
->tp_dst
;
1497 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1498 struct ipfix_data_record_flow_key_icmp
*data_icmp
;
1500 data_icmp
= dp_packet_put_zeros(&msg
, sizeof *data_icmp
);
1501 data_icmp
->icmp_type
= ntohs(flow
->tp_src
) & 0xff;
1502 data_icmp
->icmp_code
= ntohs(flow
->tp_dst
) & 0xff;
1505 if (tunnel
== IPFIX_PROTO_TUNNELED
) {
1506 struct ipfix_data_record_flow_key_tunnel
*data_tunnel
;
1507 const uint8_t *tun_id
;
1509 data_tunnel
= dp_packet_put_zeros(&msg
, sizeof *data_tunnel
+
1510 tunnel_port
->tunnel_key_length
);
1511 data_tunnel
->tunnel_source_ipv4_address
= tunnel_key
->ip_src
;
1512 data_tunnel
->tunnel_destination_ipv4_address
= tunnel_key
->ip_dst
;
1513 /* The tunnel_protocol_identifier is from tunnel_proto array, which
1514 * contains protocol_identifiers of each tunnel type.
1515 * For the tunnel type on the top of IPSec, which uses the protocol
1516 * identifier of the upper tunnel type is used, the tcp_src and tcp_dst
1517 * are decided based on the protocol identifiers.
1519 * The protocol identifier of DPIF_IPFIX_TUNNEL_IPSEC_GRE is IPPROTO_GRE,
1520 * and both tp_src and tp_dst are zero.
1522 data_tunnel
->tunnel_protocol_identifier
=
1523 tunnel_protocol
[tunnel_port
->tunnel_type
];
1524 data_tunnel
->tunnel_source_transport_port
= tunnel_key
->tp_src
;
1525 data_tunnel
->tunnel_destination_transport_port
= tunnel_key
->tp_dst
;
1526 data_tunnel
->tunnel_type
= tunnel_port
->tunnel_type
;
1527 data_tunnel
->tunnel_key_length
= tunnel_port
->tunnel_key_length
;
1528 /* tun_id is in network order, and tunnel key is in low bits. */
1529 tun_id
= (const uint8_t *) &tunnel_key
->tun_id
;
1530 memcpy(data_tunnel
->tunnel_key
,
1531 &tun_id
[8 - tunnel_port
->tunnel_key_length
],
1532 tunnel_port
->tunnel_key_length
);
1535 flow_key
->flow_key_msg_part_size
= dp_packet_size(&msg
);
1539 uint64_t layer2_octet_delta_count
;
1541 /* Calculate the total matched octet count by considering as
1542 * an approximation that all matched packets have the same
1544 layer2_octet_delta_count
= packet_delta_count
* ethernet_total_length
;
1546 xgettimeofday(&now
);
1547 entry
->flow_end_timestamp_usec
= now
.tv_usec
+ 1000000LL * now
.tv_sec
;
1548 entry
->flow_start_timestamp_usec
= entry
->flow_end_timestamp_usec
;
1549 entry
->packet_delta_count
= packet_delta_count
;
1550 entry
->layer2_octet_delta_count
= layer2_octet_delta_count
;
1553 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1554 uint16_t ip_total_length
=
1555 ethernet_total_length
- ethernet_header_length
;
1556 uint64_t octet_delta_count
;
1558 /* Calculate the total matched octet count by considering as
1559 * an approximation that all matched packets have the same
1561 octet_delta_count
= packet_delta_count
* ip_total_length
;
1563 entry
->octet_delta_count
= octet_delta_count
;
1564 entry
->octet_delta_sum_of_squares
= octet_delta_count
* ip_total_length
;
1565 entry
->minimum_ip_total_length
= ip_total_length
;
1566 entry
->maximum_ip_total_length
= ip_total_length
;
1568 entry
->octet_delta_sum_of_squares
= 0;
1569 entry
->minimum_ip_total_length
= 0;
1570 entry
->maximum_ip_total_length
= 0;
1574 /* Send each single data record in its own data set, to simplify the
1575 * implementation by avoiding having to group record by template ID
1576 * before sending. */
1578 ipfix_put_data_set(uint32_t export_time_sec
,
1579 struct ipfix_flow_cache_entry
*entry
,
1580 enum ipfix_flow_end_reason flow_end_reason
,
1581 struct dp_packet
*msg
)
1583 size_t set_hdr_offset
;
1584 struct ipfix_set_header
*set_hdr
;
1586 set_hdr_offset
= dp_packet_size(msg
);
1588 /* Put a Data Set. */
1589 set_hdr
= dp_packet_put_zeros(msg
, sizeof *set_hdr
);
1590 set_hdr
->set_id
= htons(entry
->flow_key
.template_id
);
1592 /* Copy the flow key part of the data record. */
1594 dp_packet_put(msg
, entry
->flow_key
.flow_key_msg_part
,
1595 entry
->flow_key
.flow_key_msg_part_size
);
1597 /* Put the non-key part of the data record. */
1600 struct ipfix_data_record_aggregated_common
*data_aggregated_common
;
1601 uint64_t export_time_usec
, flow_start_delta_usec
, flow_end_delta_usec
;
1603 /* Calculate the negative deltas relative to the export time
1604 * in seconds sent in the header, not the exact export
1606 export_time_usec
= 1000000LL * export_time_sec
;
1607 flow_start_delta_usec
= export_time_usec
1608 - entry
->flow_start_timestamp_usec
;
1609 flow_end_delta_usec
= export_time_usec
1610 - entry
->flow_end_timestamp_usec
;
1612 data_aggregated_common
= dp_packet_put_zeros(
1613 msg
, sizeof *data_aggregated_common
);
1614 data_aggregated_common
->flow_start_delta_microseconds
= htonl(
1615 flow_start_delta_usec
);
1616 data_aggregated_common
->flow_end_delta_microseconds
= htonl(
1617 flow_end_delta_usec
);
1618 data_aggregated_common
->packet_delta_count
= htonll(
1619 entry
->packet_delta_count
);
1620 data_aggregated_common
->layer2_octet_delta_count
= htonll(
1621 entry
->layer2_octet_delta_count
);
1622 data_aggregated_common
->flow_end_reason
= flow_end_reason
;
1625 if (entry
->octet_delta_sum_of_squares
) { /* IP packet. */
1626 struct ipfix_data_record_aggregated_ip
*data_aggregated_ip
;
1628 data_aggregated_ip
= dp_packet_put_zeros(
1629 msg
, sizeof *data_aggregated_ip
);
1630 data_aggregated_ip
->octet_delta_count
= htonll(
1631 entry
->octet_delta_count
);
1632 data_aggregated_ip
->octet_delta_sum_of_squares
= htonll(
1633 entry
->octet_delta_sum_of_squares
);
1634 data_aggregated_ip
->minimum_ip_total_length
= htonll(
1635 entry
->minimum_ip_total_length
);
1636 data_aggregated_ip
->maximum_ip_total_length
= htonll(
1637 entry
->maximum_ip_total_length
);
1640 set_hdr
= (struct ipfix_set_header
*)((uint8_t*)dp_packet_data(msg
) + set_hdr_offset
);
1641 set_hdr
->length
= htons(dp_packet_size(msg
) - set_hdr_offset
);
1644 /* Send an IPFIX message with a single data record. */
1646 ipfix_send_data_msg(struct dpif_ipfix_exporter
*exporter
,
1647 uint32_t export_time_sec
,
1648 struct ipfix_flow_cache_entry
*entry
,
1649 enum ipfix_flow_end_reason flow_end_reason
)
1651 uint64_t msg_stub
[DIV_ROUND_UP(MAX_MESSAGE_LEN
, 8)];
1652 struct dp_packet msg
;
1653 dp_packet_use_stub(&msg
, msg_stub
, sizeof msg_stub
);
1655 ipfix_init_header(export_time_sec
, exporter
->seq_number
++,
1656 entry
->flow_key
.obs_domain_id
, &msg
);
1657 ipfix_put_data_set(export_time_sec
, entry
, flow_end_reason
, &msg
);
1658 ipfix_send_msg(exporter
->collectors
, &msg
);
1660 dp_packet_uninit(&msg
);
1664 dpif_ipfix_sample(struct dpif_ipfix_exporter
*exporter
,
1665 const struct dp_packet
*packet
, const struct flow
*flow
,
1666 uint64_t packet_delta_count
, uint32_t obs_domain_id
,
1667 uint32_t obs_point_id
, odp_port_t output_odp_port
,
1668 const struct dpif_ipfix_port
*tunnel_port
,
1669 const struct flow_tnl
*tunnel_key
)
1671 struct ipfix_flow_cache_entry
*entry
;
1673 /* Create a flow cache entry from the sample. */
1674 entry
= xmalloc(sizeof *entry
);
1675 ipfix_cache_entry_init(entry
, packet
, flow
, packet_delta_count
,
1676 obs_domain_id
, obs_point_id
,
1677 output_odp_port
, tunnel_port
, tunnel_key
);
1678 ipfix_cache_update(exporter
, entry
);
1682 bridge_exporter_enabled(struct dpif_ipfix
*di
)
1684 return di
->bridge_exporter
.probability
> 0;
1688 dpif_ipfix_bridge_sample(struct dpif_ipfix
*di
, const struct dp_packet
*packet
,
1689 const struct flow
*flow
,
1690 odp_port_t input_odp_port
, odp_port_t output_odp_port
,
1691 const struct flow_tnl
*output_tunnel_key
)
1694 uint64_t packet_delta_count
;
1695 const struct flow_tnl
*tunnel_key
= NULL
;
1696 struct dpif_ipfix_port
* tunnel_port
= NULL
;
1698 ovs_mutex_lock(&mutex
);
1699 if (!bridge_exporter_enabled(di
)) {
1700 ovs_mutex_unlock(&mutex
);
1704 /* Skip BFD packets:
1705 * Bidirectional Forwarding Detection(BFD) packets are for monitoring
1706 * the tunnel link status and consumed by ovs itself. No need to
1708 * CF IETF RFC 5881, BFD control packet is the UDP packet with
1709 * destination port 3784, and BFD echo packet is the UDP packet with
1710 * destination port 3785.
1712 if (is_ip_any(flow
) &&
1713 flow
->nw_proto
== IPPROTO_UDP
&&
1714 (flow
->tp_dst
== htons(BFD_CONTROL_DEST_PORT
) ||
1715 flow
->tp_dst
== htons(BFD_ECHO_DEST_PORT
))) {
1716 ovs_mutex_unlock(&mutex
);
1720 /* Use the sampling probability as an approximation of the number
1721 * of matched packets. */
1722 packet_delta_count
= UINT32_MAX
/ di
->bridge_exporter
.probability
;
1723 if (di
->bridge_exporter
.options
->enable_tunnel_sampling
) {
1724 if (output_odp_port
== ODPP_NONE
&& flow
->tunnel
.ip_dst
) {
1726 tunnel_key
= &flow
->tunnel
;
1727 tunnel_port
= dpif_ipfix_find_port(di
, input_odp_port
);
1729 if (output_odp_port
!= ODPP_NONE
&& output_tunnel_key
) {
1730 /* Output tunnel, output_tunnel_key must be valid. */
1731 tunnel_key
= output_tunnel_key
;
1732 tunnel_port
= dpif_ipfix_find_port(di
, output_odp_port
);
1736 dpif_ipfix_sample(&di
->bridge_exporter
.exporter
, packet
, flow
,
1738 di
->bridge_exporter
.options
->obs_domain_id
,
1739 di
->bridge_exporter
.options
->obs_point_id
,
1740 output_odp_port
, tunnel_port
, tunnel_key
);
1741 ovs_mutex_unlock(&mutex
);
1745 dpif_ipfix_flow_sample(struct dpif_ipfix
*di
, const struct dp_packet
*packet
,
1746 const struct flow
*flow
, uint32_t collector_set_id
,
1747 uint16_t probability
, uint32_t obs_domain_id
,
1748 uint32_t obs_point_id
) OVS_EXCLUDED(mutex
)
1750 struct dpif_ipfix_flow_exporter_map_node
*node
;
1751 /* Use the sampling probability as an approximation of the number
1752 * of matched packets. */
1753 uint64_t packet_delta_count
= USHRT_MAX
/ probability
;
1755 ovs_mutex_lock(&mutex
);
1756 node
= dpif_ipfix_find_flow_exporter_map_node(di
, collector_set_id
);
1758 dpif_ipfix_sample(&node
->exporter
.exporter
, packet
, flow
,
1759 packet_delta_count
, obs_domain_id
, obs_point_id
,
1760 ODPP_NONE
, NULL
, NULL
);
1762 ovs_mutex_unlock(&mutex
);
1766 dpif_ipfix_cache_expire(struct dpif_ipfix_exporter
*exporter
,
1767 bool forced_end
, const uint64_t export_time_usec
,
1768 const uint32_t export_time_sec
)
1770 struct ipfix_flow_cache_entry
*entry
, *next_entry
;
1771 uint64_t max_flow_start_timestamp_usec
;
1772 bool template_msg_sent
= false;
1773 enum ipfix_flow_end_reason flow_end_reason
;
1775 if (ovs_list_is_empty(&exporter
->cache_flow_start_timestamp_list
)) {
1779 max_flow_start_timestamp_usec
= export_time_usec
-
1780 1000000LL * exporter
->cache_active_timeout
;
1782 LIST_FOR_EACH_SAFE (entry
, next_entry
, cache_flow_start_timestamp_list_node
,
1783 &exporter
->cache_flow_start_timestamp_list
) {
1785 flow_end_reason
= FORCED_END
;
1786 } else if (entry
->flow_start_timestamp_usec
1787 <= max_flow_start_timestamp_usec
) {
1788 flow_end_reason
= ACTIVE_TIMEOUT
;
1789 } else if (hmap_count(&exporter
->cache_flow_key_map
)
1790 > exporter
->cache_max_flows
) {
1791 /* Enforce exporter->cache_max_flows. */
1792 flow_end_reason
= LACK_OF_RESOURCES
;
1794 /* Remaining flows haven't expired yet. */
1798 ovs_list_remove(&entry
->cache_flow_start_timestamp_list_node
);
1799 hmap_remove(&exporter
->cache_flow_key_map
,
1800 &entry
->flow_key_map_node
);
1802 if (!template_msg_sent
1803 && (exporter
->last_template_set_time
+ IPFIX_TEMPLATE_INTERVAL
)
1804 <= export_time_sec
) {
1805 ipfix_send_template_msgs(exporter
, export_time_sec
,
1806 entry
->flow_key
.obs_domain_id
);
1807 exporter
->last_template_set_time
= export_time_sec
;
1808 template_msg_sent
= true;
1811 /* XXX: Group multiple data records for the same obs domain id
1812 * into the same message. */
1813 ipfix_send_data_msg(exporter
, export_time_sec
, entry
, flow_end_reason
);
1819 get_export_time_now(uint64_t *export_time_usec
, uint32_t *export_time_sec
)
1821 struct timeval export_time
;
1822 xgettimeofday(&export_time
);
1824 *export_time_usec
= export_time
.tv_usec
+ 1000000LL * export_time
.tv_sec
;
1826 /* The IPFIX start and end deltas are negative deltas relative to
1827 * the export time, so set the export time 1 second off to
1828 * calculate those deltas. */
1829 if (export_time
.tv_usec
== 0) {
1830 *export_time_sec
= export_time
.tv_sec
;
1832 *export_time_sec
= export_time
.tv_sec
+ 1;
1837 dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter
*exporter
,
1840 uint64_t export_time_usec
;
1841 uint32_t export_time_sec
;
1843 get_export_time_now(&export_time_usec
, &export_time_sec
);
1844 dpif_ipfix_cache_expire(exporter
, forced_end
, export_time_usec
,
1849 dpif_ipfix_run(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
1851 uint64_t export_time_usec
;
1852 uint32_t export_time_sec
;
1853 struct dpif_ipfix_flow_exporter_map_node
*flow_exporter_node
;
1855 ovs_mutex_lock(&mutex
);
1856 get_export_time_now(&export_time_usec
, &export_time_sec
);
1857 if (bridge_exporter_enabled(di
)) {
1858 dpif_ipfix_cache_expire(
1859 &di
->bridge_exporter
.exporter
, false, export_time_usec
,
1862 HMAP_FOR_EACH (flow_exporter_node
, node
, &di
->flow_exporter_map
) {
1863 dpif_ipfix_cache_expire(
1864 &flow_exporter_node
->exporter
.exporter
, false, export_time_usec
,
1867 ovs_mutex_unlock(&mutex
);
1871 dpif_ipfix_wait(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
1873 long long int next_timeout_msec
= LLONG_MAX
;
1874 struct dpif_ipfix_flow_exporter_map_node
*flow_exporter_node
;
1876 ovs_mutex_lock(&mutex
);
1877 if (bridge_exporter_enabled(di
)) {
1878 if (ipfix_cache_next_timeout_msec(
1879 &di
->bridge_exporter
.exporter
, &next_timeout_msec
)) {
1880 poll_timer_wait_until(next_timeout_msec
);
1883 HMAP_FOR_EACH (flow_exporter_node
, node
, &di
->flow_exporter_map
) {
1884 if (ipfix_cache_next_timeout_msec(
1885 &flow_exporter_node
->exporter
.exporter
, &next_timeout_msec
)) {
1886 poll_timer_wait_until(next_timeout_msec
);
1889 ovs_mutex_unlock(&mutex
);