2 * Copyright (c) 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "ofproto-dpif-ipfix.h"
20 #include "byte-order.h"
21 #include "collectors.h"
24 #include "openvswitch/hmap.h"
26 #include "openvswitch/list.h"
27 #include "openvswitch/ofpbuf.h"
29 #include "ofproto-dpif.h"
30 #include "dp-packet.h"
32 #include "poll-loop.h"
36 #include "openvswitch/vlog.h"
38 VLOG_DEFINE_THIS_MODULE(ipfix
);
40 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
41 static struct ovs_mutex mutex
= OVS_MUTEX_INITIALIZER
;
43 /* Cf. IETF RFC 5101 Section 10.3.4. */
44 #define IPFIX_DEFAULT_COLLECTOR_PORT 4739
46 /* Cf. IETF RFC 5881 Setion 8. */
47 #define BFD_CONTROL_DEST_PORT 3784
48 #define BFD_ECHO_DEST_PORT 3785
50 enum ipfix_sampled_packet_type
{
51 IPFIX_SAMPLED_PKT_UNKNOWN
= 0x00,
52 IPFIX_SAMPLED_PKT_IPV4_OK
= 0x01,
53 IPFIX_SAMPLED_PKT_IPV6_OK
= 0x02,
54 IPFIX_SAMPLED_PKT_IPV4_ERROR
= 0x03,
55 IPFIX_SAMPLED_PKT_IPV6_ERROR
= 0x04,
56 IPFIX_SAMPLED_PKT_OTHERS
= 0x05
59 /* The standard layer2SegmentId (ID 351) element is included in vDS to send
60 * the VxLAN tunnel's VNI. It is 64-bit long, the most significant byte is
61 * used to indicate the type of tunnel (0x01 = VxLAN, 0x02 = GRE) and the three
62 * least significant bytes hold the value of the layer 2 overlay network
63 * segment identifier: a 24-bit VxLAN tunnel's VNI or a 24-bit GRE tunnel's
64 * TNI. This is not compatible with STT, as implemented in OVS, as
65 * its tunnel IDs is 64-bit.
67 * Two new enterprise information elements are defined which are similar to
68 * laryerSegmentId but support 64-bit IDs:
69 * tunnelType (ID 891) and tunnelKey (ID 892).
71 * The enum dpif_ipfix_tunnel_type is to declare the types supported in the
73 * The number of ipfix tunnel types includes two reserverd types: 0x04 and 0x06.
75 enum dpif_ipfix_tunnel_type
{
76 DPIF_IPFIX_TUNNEL_UNKNOWN
= 0x00,
77 DPIF_IPFIX_TUNNEL_VXLAN
= 0x01,
78 DPIF_IPFIX_TUNNEL_GRE
= 0x02,
79 DPIF_IPFIX_TUNNEL_LISP
= 0x03,
80 DPIF_IPFIX_TUNNEL_STT
= 0x04,
81 DPIF_IPFIX_TUNNEL_IPSEC_GRE
= 0x05,
82 DPIF_IPFIX_TUNNEL_GENEVE
= 0x07,
86 typedef struct ofputil_ipfix_stats ofproto_ipfix_stats
;
88 struct dpif_ipfix_port
{
89 struct hmap_node hmap_node
; /* In struct dpif_ipfix's "tunnel_ports" hmap. */
90 struct ofport
*ofport
; /* To retrieve port stats. */
92 enum dpif_ipfix_tunnel_type tunnel_type
;
93 uint8_t tunnel_key_length
;
96 struct dpif_ipfix_exporter
{
97 struct collectors
*collectors
;
99 time_t last_template_set_time
;
100 struct hmap cache_flow_key_map
; /* ipfix_flow_cache_entry. */
101 struct ovs_list cache_flow_start_timestamp_list
; /* ipfix_flow_cache_entry. */
102 uint32_t cache_active_timeout
; /* In seconds. */
103 uint32_t cache_max_flows
;
104 char *virtual_obs_id
;
105 uint8_t virtual_obs_len
;
107 ofproto_ipfix_stats stats
;
110 struct dpif_ipfix_bridge_exporter
{
111 struct dpif_ipfix_exporter exporter
;
112 struct ofproto_ipfix_bridge_exporter_options
*options
;
113 uint32_t probability
;
116 struct dpif_ipfix_flow_exporter
{
117 struct dpif_ipfix_exporter exporter
;
118 struct ofproto_ipfix_flow_exporter_options
*options
;
121 struct dpif_ipfix_flow_exporter_map_node
{
122 struct hmap_node node
;
123 struct dpif_ipfix_flow_exporter exporter
;
127 struct dpif_ipfix_bridge_exporter bridge_exporter
;
128 struct hmap flow_exporter_map
; /* dpif_ipfix_flow_exporter_map_node. */
129 struct hmap tunnel_ports
; /* Contains "struct dpif_ipfix_port"s.
130 * It makes tunnel port lookups faster in
131 * sampling upcalls. */
132 struct ovs_refcount ref_cnt
;
135 #define IPFIX_VERSION 0x000a
137 /* When using UDP, IPFIX Template Records must be re-sent regularly.
138 * The standard default interval is 10 minutes (600 seconds).
139 * Cf. IETF RFC 5101 Section 10.3.6. */
140 #define IPFIX_TEMPLATE_INTERVAL 600
142 /* Cf. IETF RFC 5101 Section 3.1. */
144 struct ipfix_header
{
145 ovs_be16 version
; /* IPFIX_VERSION. */
146 ovs_be16 length
; /* Length in bytes including this header. */
147 ovs_be32 export_time
; /* Seconds since the epoch. */
148 ovs_be32 seq_number
; /* Message sequence number. */
149 ovs_be32 obs_domain_id
; /* Observation Domain ID. */
151 BUILD_ASSERT_DECL(sizeof(struct ipfix_header
) == 16);
153 #define IPFIX_SET_ID_TEMPLATE 2
154 #define IPFIX_SET_ID_OPTION_TEMPLATE 3
156 /* Cf. IETF RFC 5101 Section 3.3.2. */
158 struct ipfix_set_header
{
159 ovs_be16 set_id
; /* IPFIX_SET_ID_* or valid template ID for Data Sets. */
160 ovs_be16 length
; /* Length of the set in bytes including header. */
162 BUILD_ASSERT_DECL(sizeof(struct ipfix_set_header
) == 4);
164 /* Alternatives for templates at each layer. A template is defined by
165 * a combination of one value for each layer. */
166 enum ipfix_proto_l2
{
167 IPFIX_PROTO_L2_ETH
= 0, /* No VLAN. */
171 enum ipfix_proto_l3
{
172 IPFIX_PROTO_L3_UNKNOWN
= 0,
177 enum ipfix_proto_l4
{
178 IPFIX_PROTO_L4_UNKNOWN
= 0,
179 IPFIX_PROTO_L4_TCP_UDP_SCTP
,
183 enum ipfix_proto_tunnel
{
184 IPFIX_PROTO_NOT_TUNNELED
= 0,
185 IPFIX_PROTO_TUNNELED
, /* Support gre, lisp and vxlan. */
186 NUM_IPFIX_PROTO_TUNNEL
189 /* Any Template ID > 255 is usable for Template Records. */
190 #define IPFIX_TEMPLATE_ID_MIN 256
192 /* Cf. IETF RFC 5101 Section 3.4.1. */
194 struct ipfix_template_record_header
{
195 ovs_be16 template_id
;
196 ovs_be16 field_count
;
198 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_record_header
) == 4);
200 enum ipfix_entity_id
{
201 /* standard IPFIX elements */
202 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ID_##ENUM = ID,
203 #include "ofproto/ipfix-entities.def"
204 /* non-standard IPFIX elements */
205 #define IPFIX_SET_ENTERPRISE(v) (((v) | 0x8000))
206 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
207 IPFIX_ENTITY_ID_##ENUM = IPFIX_SET_ENTERPRISE(ID),
208 #include "ofproto/ipfix-enterprise-entities.def"
211 enum ipfix_entity_size
{
212 /* standard IPFIX elements */
213 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_SIZE_##ENUM = SIZE,
214 #include "ofproto/ipfix-entities.def"
215 /* non-standard IPFIX elements */
216 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
217 IPFIX_ENTITY_SIZE_##ENUM = SIZE,
218 #include "ofproto/ipfix-enterprise-entities.def"
221 enum ipfix_entity_enterprise
{
222 /* standard IPFIX elements */
223 #define IPFIX_ENTITY(ENUM, ID, SIZE, NAME) IPFIX_ENTITY_ENTERPRISE_##ENUM = 0,
224 #include "ofproto/ipfix-entities.def"
225 /* non-standard IPFIX elements */
226 #define IPFIX_ENTERPRISE_ENTITY(ENUM, ID, SIZE, NAME, ENTERPRISE) \
227 IPFIX_ENTITY_ENTERPRISE_##ENUM = ENTERPRISE,
228 #include "ofproto/ipfix-enterprise-entities.def"
232 struct ipfix_template_field_specifier
{
233 ovs_be16 element_id
; /* IPFIX_ENTITY_ID_*. */
234 ovs_be16 field_length
; /* Length of the field's value, in bytes.
235 * For Variable-Length element, it should be 65535.
237 ovs_be32 enterprise
; /* Enterprise number */
239 BUILD_ASSERT_DECL(sizeof(struct ipfix_template_field_specifier
) == 8);
241 /* Cf. IETF RFC 5102 Section 5.11.6. */
242 enum ipfix_flow_direction
{
247 /* Part of data record flow key for common metadata and Ethernet entities. */
249 struct ipfix_data_record_flow_key_common
{
250 ovs_be32 observation_point_id
; /* OBSERVATION_POINT_ID */
251 uint8_t flow_direction
; /* FLOW_DIRECTION */
252 struct eth_addr source_mac_address
; /* SOURCE_MAC_ADDRESS */
253 struct eth_addr destination_mac_address
; /* DESTINATION_MAC_ADDRESS */
254 ovs_be16 ethernet_type
; /* ETHERNET_TYPE */
255 uint8_t ethernet_header_length
; /* ETHERNET_HEADER_LENGTH */
257 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_common
) == 20);
259 /* Part of data record flow key for VLAN entities. */
261 struct ipfix_data_record_flow_key_vlan
{
262 ovs_be16 vlan_id
; /* VLAN_ID */
263 ovs_be16 dot1q_vlan_id
; /* DOT1Q_VLAN_ID */
264 uint8_t dot1q_priority
; /* DOT1Q_PRIORITY */
266 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_vlan
) == 5);
268 /* Part of data record flow key for IP entities. */
269 /* XXX: Replace IP_TTL with MINIMUM_TTL and MAXIMUM_TTL? */
271 struct ipfix_data_record_flow_key_ip
{
272 uint8_t ip_version
; /* IP_VERSION */
273 uint8_t ip_ttl
; /* IP_TTL */
274 uint8_t protocol_identifier
; /* PROTOCOL_IDENTIFIER */
275 uint8_t ip_diff_serv_code_point
; /* IP_DIFF_SERV_CODE_POINT */
276 uint8_t ip_precedence
; /* IP_PRECEDENCE */
277 uint8_t ip_class_of_service
; /* IP_CLASS_OF_SERVICE */
279 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ip
) == 6);
281 /* Part of data record flow key for IPv4 entities. */
283 struct ipfix_data_record_flow_key_ipv4
{
284 ovs_be32 source_ipv4_address
; /* SOURCE_IPV4_ADDRESS */
285 ovs_be32 destination_ipv4_address
; /* DESTINATION_IPV4_ADDRESS */
287 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv4
) == 8);
289 /* Part of data record flow key for IPv6 entities. */
291 struct ipfix_data_record_flow_key_ipv6
{
292 uint8_t source_ipv6_address
[16]; /* SOURCE_IPV6_ADDRESS */
293 uint8_t destination_ipv6_address
[16]; /* DESTINATION_IPV6_ADDRESS */
294 ovs_be32 flow_label_ipv6
; /* FLOW_LABEL_IPV6 */
296 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_ipv6
) == 36);
298 /* Part of data record flow key for TCP/UDP/SCTP entities. */
300 struct ipfix_data_record_flow_key_transport
{
301 ovs_be16 source_transport_port
; /* SOURCE_TRANSPORT_PORT */
302 ovs_be16 destination_transport_port
; /* DESTINATION_TRANSPORT_PORT */
304 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_transport
) == 4);
306 /* Part of data record flow key for ICMP entities. */
308 struct ipfix_data_record_flow_key_icmp
{
309 uint8_t icmp_type
; /* ICMP_TYPE_IPV4 / ICMP_TYPE_IPV6 */
310 uint8_t icmp_code
; /* ICMP_CODE_IPV4 / ICMP_CODE_IPV6 */
312 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_icmp
) == 2);
314 /* For the tunnel type that is on the top of IPSec, the protocol identifier
315 * of the upper tunnel type is used.
317 static uint8_t tunnel_protocol
[NUM_DPIF_IPFIX_TUNNEL
] = {
319 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_VXLAN */
320 IPPROTO_GRE
, /* DPIF_IPFIX_TUNNEL_GRE */
321 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_LISP*/
322 IPPROTO_TCP
, /* DPIF_IPFIX_TUNNEL_STT*/
323 IPPROTO_GRE
, /* DPIF_IPFIX_TUNNEL_IPSEC_GRE */
325 IPPROTO_UDP
, /* DPIF_IPFIX_TUNNEL_GENEVE*/
329 struct ipfix_data_record_flow_key_tunnel
{
330 ovs_be32 tunnel_source_ipv4_address
; /* TUNNEL_SOURCE_IPV4_ADDRESS */
331 ovs_be32 tunnel_destination_ipv4_address
; /* TUNNEL_DESTINATION_IPV4_ADDRESS */
332 uint8_t tunnel_protocol_identifier
; /* TUNNEL_PROTOCOL_IDENTIFIER */
333 ovs_be16 tunnel_source_transport_port
; /* TUNNEL_SOURCE_TRANSPORT_PORT */
334 ovs_be16 tunnel_destination_transport_port
; /* TUNNEL_DESTINATION_TRANSPORT_PORT */
335 uint8_t tunnel_type
; /* TUNNEL_TYPE */
336 uint8_t tunnel_key_length
; /* length of TUNNEL_KEY */
337 uint8_t tunnel_key
[]; /* data of TUNNEL_KEY */
339 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_flow_key_tunnel
) == 15);
341 /* Cf. IETF RFC 5102 Section 5.11.3. */
342 enum ipfix_flow_end_reason
{
344 ACTIVE_TIMEOUT
= 0x02,
345 END_OF_FLOW_DETECTED
= 0x03,
347 LACK_OF_RESOURCES
= 0x05
350 /* Part of data record for common aggregated elements. */
352 struct ipfix_data_record_aggregated_common
{
353 ovs_be32 flow_start_delta_microseconds
; /* FLOW_START_DELTA_MICROSECONDS */
354 ovs_be32 flow_end_delta_microseconds
; /* FLOW_END_DELTA_MICROSECONDS */
355 ovs_be64 packet_delta_count
; /* PACKET_DELTA_COUNT */
356 ovs_be64 layer2_octet_delta_count
; /* LAYER2_OCTET_DELTA_COUNT */
357 uint8_t flow_end_reason
; /* FLOW_END_REASON */
359 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_common
) == 25);
361 /* Part of data record for IP aggregated elements. */
363 struct ipfix_data_record_aggregated_ip
{
364 ovs_be64 octet_delta_count
; /* OCTET_DELTA_COUNT */
365 ovs_be64 octet_delta_sum_of_squares
; /* OCTET_DELTA_SUM_OF_SQUARES */
366 ovs_be64 minimum_ip_total_length
; /* MINIMUM_IP_TOTAL_LENGTH */
367 ovs_be64 maximum_ip_total_length
; /* MAXIMUM_IP_TOTAL_LENGTH */
369 BUILD_ASSERT_DECL(sizeof(struct ipfix_data_record_aggregated_ip
) == 32);
372 * Refer to RFC 7011, the length of Variable length element is 0~65535:
373 * In most case, it should be less than 255 octets:
375 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
376 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
377 * | Length (< 255)| Information Element |
378 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
379 * | ... continuing as needed |
380 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
382 * When it is greater than or equeal to 255 octets:
384 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
385 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
386 * | 255 | Length (0 to 65535) | IE |
387 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
388 * | ... continuing as needed |
389 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
392 * Now, only the virtual_obs_id whose length < 255 is implemented.
395 #define IPFIX_VIRTUAL_OBS_MAX_LEN 254
398 * support tunnel key for:
401 * LISP: 24-bit instance ID
404 #define MAX_TUNNEL_KEY_LEN 8
406 #define MAX_FLOW_KEY_LEN \
407 (sizeof(struct ipfix_data_record_flow_key_common) \
408 + sizeof(struct ipfix_data_record_flow_key_vlan) \
409 + sizeof(struct ipfix_data_record_flow_key_ip) \
410 + MAX(sizeof(struct ipfix_data_record_flow_key_ipv4), \
411 sizeof(struct ipfix_data_record_flow_key_ipv6)) \
412 + MAX(sizeof(struct ipfix_data_record_flow_key_icmp), \
413 sizeof(struct ipfix_data_record_flow_key_transport)) \
414 + sizeof(struct ipfix_data_record_flow_key_tunnel) \
415 + MAX_TUNNEL_KEY_LEN)
417 #define MAX_DATA_RECORD_LEN \
419 + sizeof(struct ipfix_data_record_aggregated_common) \
420 + sizeof(struct ipfix_data_record_aggregated_ip))
422 /* Max length of a data set. To simplify the implementation, each
423 * data record is sent in a separate data set, so each data set
424 * contains at most one data record. */
425 #define MAX_DATA_SET_LEN \
426 (sizeof(struct ipfix_set_header) \
427 + MAX_DATA_RECORD_LEN)
429 /* Max length of an IPFIX message. Arbitrarily set to accommodate low
431 #define MAX_MESSAGE_LEN 1024
433 /* Cache structures. */
436 struct ipfix_flow_key
{
437 uint32_t obs_domain_id
;
438 uint16_t template_id
;
439 size_t flow_key_msg_part_size
;
440 uint64_t flow_key_msg_part
[DIV_ROUND_UP(MAX_FLOW_KEY_LEN
, 8)];
443 /* Flow cache entry. */
444 struct ipfix_flow_cache_entry
{
445 struct hmap_node flow_key_map_node
;
446 struct ovs_list cache_flow_start_timestamp_list_node
;
447 struct ipfix_flow_key flow_key
;
448 /* Common aggregated elements. */
449 uint64_t flow_start_timestamp_usec
;
450 uint64_t flow_end_timestamp_usec
;
451 uint64_t packet_delta_count
;
452 uint64_t layer2_octet_delta_count
;
453 uint64_t octet_delta_count
;
454 uint64_t octet_delta_sum_of_squares
; /* 0 if not IP. */
455 uint16_t minimum_ip_total_length
; /* 0 if not IP. */
456 uint16_t maximum_ip_total_length
; /* 0 if not IP. */
459 static void dpif_ipfix_cache_expire(struct dpif_ipfix_exporter
*, bool,
460 const uint64_t, const uint32_t);
462 static void get_export_time_now(uint64_t *, uint32_t *);
464 static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter
*, bool);
467 ofproto_ipfix_bridge_exporter_options_equal(
468 const struct ofproto_ipfix_bridge_exporter_options
*a
,
469 const struct ofproto_ipfix_bridge_exporter_options
*b
)
471 return (a
->obs_domain_id
== b
->obs_domain_id
472 && a
->obs_point_id
== b
->obs_point_id
473 && a
->sampling_rate
== b
->sampling_rate
474 && a
->cache_active_timeout
== b
->cache_active_timeout
475 && a
->cache_max_flows
== b
->cache_max_flows
476 && a
->enable_tunnel_sampling
== b
->enable_tunnel_sampling
477 && a
->enable_input_sampling
== b
->enable_input_sampling
478 && a
->enable_output_sampling
== b
->enable_output_sampling
479 && sset_equals(&a
->targets
, &b
->targets
)
480 && nullable_string_is_equal(a
->virtual_obs_id
, b
->virtual_obs_id
));
483 static struct ofproto_ipfix_bridge_exporter_options
*
484 ofproto_ipfix_bridge_exporter_options_clone(
485 const struct ofproto_ipfix_bridge_exporter_options
*old
)
487 struct ofproto_ipfix_bridge_exporter_options
*new =
488 xmemdup(old
, sizeof *old
);
489 sset_clone(&new->targets
, &old
->targets
);
490 new->virtual_obs_id
= nullable_xstrdup(old
->virtual_obs_id
);
495 ofproto_ipfix_bridge_exporter_options_destroy(
496 struct ofproto_ipfix_bridge_exporter_options
*options
)
499 sset_destroy(&options
->targets
);
500 free(options
->virtual_obs_id
);
506 ofproto_ipfix_flow_exporter_options_equal(
507 const struct ofproto_ipfix_flow_exporter_options
*a
,
508 const struct ofproto_ipfix_flow_exporter_options
*b
)
510 return (a
->collector_set_id
== b
->collector_set_id
511 && a
->cache_active_timeout
== b
->cache_active_timeout
512 && a
->cache_max_flows
== b
->cache_max_flows
513 && a
->enable_tunnel_sampling
== b
->enable_tunnel_sampling
514 && sset_equals(&a
->targets
, &b
->targets
)
515 && nullable_string_is_equal(a
->virtual_obs_id
, b
->virtual_obs_id
));
518 static struct ofproto_ipfix_flow_exporter_options
*
519 ofproto_ipfix_flow_exporter_options_clone(
520 const struct ofproto_ipfix_flow_exporter_options
*old
)
522 struct ofproto_ipfix_flow_exporter_options
*new =
523 xmemdup(old
, sizeof *old
);
524 sset_clone(&new->targets
, &old
->targets
);
525 new->virtual_obs_id
= nullable_xstrdup(old
->virtual_obs_id
);
530 ofproto_ipfix_flow_exporter_options_destroy(
531 struct ofproto_ipfix_flow_exporter_options
*options
)
534 sset_destroy(&options
->targets
);
535 free(options
->virtual_obs_id
);
541 dpif_ipfix_exporter_init(struct dpif_ipfix_exporter
*exporter
)
543 exporter
->collectors
= NULL
;
544 exporter
->seq_number
= 1;
545 exporter
->last_template_set_time
= 0;
546 hmap_init(&exporter
->cache_flow_key_map
);
547 ovs_list_init(&exporter
->cache_flow_start_timestamp_list
);
548 exporter
->cache_active_timeout
= 0;
549 exporter
->cache_max_flows
= 0;
550 exporter
->virtual_obs_id
= NULL
;
551 exporter
->virtual_obs_len
= 0;
555 dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter
*exporter
)
557 /* Flush the cache with flow end reason "forced end." */
558 dpif_ipfix_cache_expire_now(exporter
, true);
560 collectors_destroy(exporter
->collectors
);
561 exporter
->collectors
= NULL
;
562 exporter
->seq_number
= 1;
563 exporter
->last_template_set_time
= 0;
564 exporter
->cache_active_timeout
= 0;
565 exporter
->cache_max_flows
= 0;
566 free(exporter
->virtual_obs_id
);
567 exporter
->virtual_obs_id
= NULL
;
568 exporter
->virtual_obs_len
= 0;
572 dpif_ipfix_exporter_destroy(struct dpif_ipfix_exporter
*exporter
)
574 dpif_ipfix_exporter_clear(exporter
);
575 hmap_destroy(&exporter
->cache_flow_key_map
);
579 dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter
*exporter
,
580 const struct sset
*targets
,
581 const uint32_t cache_active_timeout
,
582 const uint32_t cache_max_flows
,
583 const char *virtual_obs_id
)
585 size_t virtual_obs_len
;
586 collectors_destroy(exporter
->collectors
);
587 collectors_create(targets
, IPFIX_DEFAULT_COLLECTOR_PORT
,
588 &exporter
->collectors
);
589 if (exporter
->collectors
== NULL
) {
590 VLOG_WARN_RL(&rl
, "no collectors could be initialized, "
591 "IPFIX exporter disabled");
592 dpif_ipfix_exporter_clear(exporter
);
595 exporter
->cache_active_timeout
= cache_active_timeout
;
596 exporter
->cache_max_flows
= cache_max_flows
;
597 virtual_obs_len
= virtual_obs_id
? strlen(virtual_obs_id
) : 0;
598 if (virtual_obs_len
> IPFIX_VIRTUAL_OBS_MAX_LEN
) {
599 VLOG_WARN_RL(&rl
, "Virtual obsevation ID too long (%d bytes), "
600 "should not be longer than %d bytes.",
601 exporter
->virtual_obs_len
, IPFIX_VIRTUAL_OBS_MAX_LEN
);
602 dpif_ipfix_exporter_clear(exporter
);
605 exporter
->virtual_obs_len
= virtual_obs_len
;
606 exporter
->virtual_obs_id
= nullable_xstrdup(virtual_obs_id
);
610 static struct dpif_ipfix_port
*
611 dpif_ipfix_find_port(const struct dpif_ipfix
*di
,
612 odp_port_t odp_port
) OVS_REQUIRES(mutex
)
614 struct dpif_ipfix_port
*dip
;
616 HMAP_FOR_EACH_IN_BUCKET (dip
, hmap_node
, hash_odp_port(odp_port
),
618 if (dip
->odp_port
== odp_port
) {
626 dpif_ipfix_del_port(struct dpif_ipfix
*di
,
627 struct dpif_ipfix_port
*dip
)
630 hmap_remove(&di
->tunnel_ports
, &dip
->hmap_node
);
635 dpif_ipfix_add_tunnel_port(struct dpif_ipfix
*di
, struct ofport
*ofport
,
636 odp_port_t odp_port
) OVS_EXCLUDED(mutex
)
638 struct dpif_ipfix_port
*dip
;
641 ovs_mutex_lock(&mutex
);
642 dip
= dpif_ipfix_find_port(di
, odp_port
);
644 dpif_ipfix_del_port(di
, dip
);
647 type
= netdev_get_type(ofport
->netdev
);
652 /* Add to table of tunnel ports. */
653 dip
= xmalloc(sizeof *dip
);
654 dip
->ofport
= ofport
;
655 dip
->odp_port
= odp_port
;
656 if (strcmp(type
, "gre") == 0) {
658 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_GRE
;
659 dip
->tunnel_key_length
= 4;
660 } else if (strcmp(type
, "ipsec_gre") == 0) {
661 /* 32-bit key ipsec_gre */
662 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_IPSEC_GRE
;
663 dip
->tunnel_key_length
= 4;
664 } else if (strcmp(type
, "vxlan") == 0) {
665 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_VXLAN
;
666 dip
->tunnel_key_length
= 3;
667 } else if (strcmp(type
, "lisp") == 0) {
668 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_LISP
;
669 dip
->tunnel_key_length
= 3;
670 } else if (strcmp(type
, "geneve") == 0) {
671 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_GENEVE
;
672 dip
->tunnel_key_length
= 3;
673 } else if (strcmp(type
, "stt") == 0) {
674 dip
->tunnel_type
= DPIF_IPFIX_TUNNEL_STT
;
675 dip
->tunnel_key_length
= 8;
680 hmap_insert(&di
->tunnel_ports
, &dip
->hmap_node
, hash_odp_port(odp_port
));
683 ovs_mutex_unlock(&mutex
);
687 dpif_ipfix_del_tunnel_port(struct dpif_ipfix
*di
, odp_port_t odp_port
)
690 struct dpif_ipfix_port
*dip
;
691 ovs_mutex_lock(&mutex
);
692 dip
= dpif_ipfix_find_port(di
, odp_port
);
694 dpif_ipfix_del_port(di
, dip
);
696 ovs_mutex_unlock(&mutex
);
700 dpif_ipfix_get_tunnel_port(const struct dpif_ipfix
*di
, odp_port_t odp_port
)
703 struct dpif_ipfix_port
*dip
;
704 ovs_mutex_lock(&mutex
);
705 dip
= dpif_ipfix_find_port(di
, odp_port
);
706 ovs_mutex_unlock(&mutex
);
711 dpif_ipfix_bridge_exporter_init(struct dpif_ipfix_bridge_exporter
*exporter
)
713 dpif_ipfix_exporter_init(&exporter
->exporter
);
714 exporter
->options
= NULL
;
715 exporter
->probability
= 0;
719 dpif_ipfix_bridge_exporter_clear(struct dpif_ipfix_bridge_exporter
*exporter
)
721 dpif_ipfix_exporter_clear(&exporter
->exporter
);
722 ofproto_ipfix_bridge_exporter_options_destroy(exporter
->options
);
723 exporter
->options
= NULL
;
724 exporter
->probability
= 0;
728 dpif_ipfix_bridge_exporter_destroy(struct dpif_ipfix_bridge_exporter
*exporter
)
730 dpif_ipfix_bridge_exporter_clear(exporter
);
731 dpif_ipfix_exporter_destroy(&exporter
->exporter
);
735 dpif_ipfix_bridge_exporter_set_options(
736 struct dpif_ipfix_bridge_exporter
*exporter
,
737 const struct ofproto_ipfix_bridge_exporter_options
*options
)
739 bool options_changed
;
741 if (!options
|| sset_is_empty(&options
->targets
)) {
742 /* No point in doing any work if there are no targets. */
743 dpif_ipfix_bridge_exporter_clear(exporter
);
749 || !ofproto_ipfix_bridge_exporter_options_equal(
750 options
, exporter
->options
));
752 /* Configure collectors if options have changed or if we're
753 * shortchanged in collectors (which indicates that opening one or
754 * more of the configured collectors failed, so that we should
757 || collectors_count(exporter
->exporter
.collectors
)
758 < sset_count(&options
->targets
)) {
759 if (!dpif_ipfix_exporter_set_options(
760 &exporter
->exporter
, &options
->targets
,
761 options
->cache_active_timeout
, options
->cache_max_flows
,
762 options
->virtual_obs_id
)) {
767 /* Avoid reconfiguring if options didn't change. */
768 if (!options_changed
) {
772 ofproto_ipfix_bridge_exporter_options_destroy(exporter
->options
);
773 exporter
->options
= ofproto_ipfix_bridge_exporter_options_clone(options
);
774 exporter
->probability
=
775 MAX(1, UINT32_MAX
/ exporter
->options
->sampling_rate
);
777 /* Run over the cache as some entries might have expired after
778 * changing the timeouts. */
779 dpif_ipfix_cache_expire_now(&exporter
->exporter
, false);
782 static struct dpif_ipfix_flow_exporter_map_node
*
783 dpif_ipfix_find_flow_exporter_map_node(
784 const struct dpif_ipfix
*di
, const uint32_t collector_set_id
)
787 struct dpif_ipfix_flow_exporter_map_node
*exporter_node
;
789 HMAP_FOR_EACH_WITH_HASH (exporter_node
, node
,
790 hash_int(collector_set_id
, 0),
791 &di
->flow_exporter_map
) {
792 if (exporter_node
->exporter
.options
->collector_set_id
793 == collector_set_id
) {
794 return exporter_node
;
802 dpif_ipfix_flow_exporter_init(struct dpif_ipfix_flow_exporter
*exporter
)
804 dpif_ipfix_exporter_init(&exporter
->exporter
);
805 exporter
->options
= NULL
;
809 dpif_ipfix_flow_exporter_clear(struct dpif_ipfix_flow_exporter
*exporter
)
811 dpif_ipfix_exporter_clear(&exporter
->exporter
);
812 ofproto_ipfix_flow_exporter_options_destroy(exporter
->options
);
813 exporter
->options
= NULL
;
817 dpif_ipfix_flow_exporter_destroy(struct dpif_ipfix_flow_exporter
*exporter
)
819 dpif_ipfix_flow_exporter_clear(exporter
);
820 dpif_ipfix_exporter_destroy(&exporter
->exporter
);
824 dpif_ipfix_flow_exporter_set_options(
825 struct dpif_ipfix_flow_exporter
*exporter
,
826 const struct ofproto_ipfix_flow_exporter_options
*options
)
828 bool options_changed
;
830 if (sset_is_empty(&options
->targets
)) {
831 /* No point in doing any work if there are no targets. */
832 dpif_ipfix_flow_exporter_clear(exporter
);
838 || !ofproto_ipfix_flow_exporter_options_equal(
839 options
, exporter
->options
));
841 /* Configure collectors if options have changed or if we're
842 * shortchanged in collectors (which indicates that opening one or
843 * more of the configured collectors failed, so that we should
846 || collectors_count(exporter
->exporter
.collectors
)
847 < sset_count(&options
->targets
)) {
848 if (!dpif_ipfix_exporter_set_options(
849 &exporter
->exporter
, &options
->targets
,
850 options
->cache_active_timeout
, options
->cache_max_flows
,
851 options
->virtual_obs_id
)) {
856 /* Avoid reconfiguring if options didn't change. */
857 if (!options_changed
) {
861 ofproto_ipfix_flow_exporter_options_destroy(exporter
->options
);
862 exporter
->options
= ofproto_ipfix_flow_exporter_options_clone(options
);
864 /* Run over the cache as some entries might have expired after
865 * changing the timeouts. */
866 dpif_ipfix_cache_expire_now(&exporter
->exporter
, false);
872 dpif_ipfix_set_options(
873 struct dpif_ipfix
*di
,
874 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
875 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
876 size_t n_flow_exporters_options
) OVS_EXCLUDED(mutex
)
879 struct ofproto_ipfix_flow_exporter_options
*options
;
880 struct dpif_ipfix_flow_exporter_map_node
*node
, *next
;
881 size_t n_broken_flow_exporters_options
= 0;
883 ovs_mutex_lock(&mutex
);
884 dpif_ipfix_bridge_exporter_set_options(&di
->bridge_exporter
,
885 bridge_exporter_options
);
887 /* Add new flow exporters and update current flow exporters. */
888 options
= (struct ofproto_ipfix_flow_exporter_options
*)
889 flow_exporters_options
;
890 for (i
= 0; i
< n_flow_exporters_options
; i
++) {
891 node
= dpif_ipfix_find_flow_exporter_map_node(
892 di
, options
->collector_set_id
);
894 node
= xzalloc(sizeof *node
);
895 dpif_ipfix_flow_exporter_init(&node
->exporter
);
896 hmap_insert(&di
->flow_exporter_map
, &node
->node
,
897 hash_int(options
->collector_set_id
, 0));
899 if (!dpif_ipfix_flow_exporter_set_options(&node
->exporter
, options
)) {
900 n_broken_flow_exporters_options
++;
905 ovs_assert(hmap_count(&di
->flow_exporter_map
) >=
906 (n_flow_exporters_options
- n_broken_flow_exporters_options
));
908 /* Remove dropped flow exporters, if any needs to be removed. */
909 if (hmap_count(&di
->flow_exporter_map
) > n_flow_exporters_options
) {
910 HMAP_FOR_EACH_SAFE (node
, next
, node
, &di
->flow_exporter_map
) {
911 /* This is slow but doesn't take any extra memory, and
912 * this table is not supposed to contain many rows anyway. */
913 options
= (struct ofproto_ipfix_flow_exporter_options
*)
914 flow_exporters_options
;
915 for (i
= 0; i
< n_flow_exporters_options
; i
++) {
916 if (node
->exporter
.options
->collector_set_id
917 == options
->collector_set_id
) {
922 if (i
== n_flow_exporters_options
) { // Not found.
923 hmap_remove(&di
->flow_exporter_map
, &node
->node
);
924 dpif_ipfix_flow_exporter_destroy(&node
->exporter
);
930 ovs_assert(hmap_count(&di
->flow_exporter_map
) ==
931 (n_flow_exporters_options
- n_broken_flow_exporters_options
));
932 ovs_mutex_unlock(&mutex
);
936 dpif_ipfix_create(void)
938 struct dpif_ipfix
*di
;
939 di
= xzalloc(sizeof *di
);
940 dpif_ipfix_bridge_exporter_init(&di
->bridge_exporter
);
941 hmap_init(&di
->flow_exporter_map
);
942 hmap_init(&di
->tunnel_ports
);
943 ovs_refcount_init(&di
->ref_cnt
);
948 dpif_ipfix_ref(const struct dpif_ipfix
*di_
)
950 struct dpif_ipfix
*di
= CONST_CAST(struct dpif_ipfix
*, di_
);
952 ovs_refcount_ref(&di
->ref_cnt
);
958 dpif_ipfix_get_bridge_exporter_probability(const struct dpif_ipfix
*di
)
962 ovs_mutex_lock(&mutex
);
963 ret
= di
->bridge_exporter
.probability
;
964 ovs_mutex_unlock(&mutex
);
969 dpif_ipfix_get_bridge_exporter_input_sampling(const struct dpif_ipfix
*di
)
973 ovs_mutex_lock(&mutex
);
974 if (di
->bridge_exporter
.options
) {
975 ret
= di
->bridge_exporter
.options
->enable_input_sampling
;
977 ovs_mutex_unlock(&mutex
);
982 dpif_ipfix_get_bridge_exporter_output_sampling(const struct dpif_ipfix
*di
)
986 ovs_mutex_lock(&mutex
);
987 if (di
->bridge_exporter
.options
) {
988 ret
= di
->bridge_exporter
.options
->enable_output_sampling
;
990 ovs_mutex_unlock(&mutex
);
995 dpif_ipfix_get_bridge_exporter_tunnel_sampling(const struct dpif_ipfix
*di
)
999 ovs_mutex_lock(&mutex
);
1000 if (di
->bridge_exporter
.options
) {
1001 ret
= di
->bridge_exporter
.options
->enable_tunnel_sampling
;
1003 ovs_mutex_unlock(&mutex
);
1008 dpif_ipfix_get_flow_exporter_tunnel_sampling(const struct dpif_ipfix
*di
,
1009 const uint32_t collector_set_id
)
1012 ovs_mutex_lock(&mutex
);
1013 struct dpif_ipfix_flow_exporter_map_node
*node
1014 = dpif_ipfix_find_flow_exporter_map_node(di
, collector_set_id
);
1016 && node
->exporter
.options
1017 && node
->exporter
.options
->enable_tunnel_sampling
);
1018 ovs_mutex_unlock(&mutex
);
1024 dpif_ipfix_clear(struct dpif_ipfix
*di
) OVS_REQUIRES(mutex
)
1026 struct dpif_ipfix_flow_exporter_map_node
*exp_node
;
1027 struct dpif_ipfix_port
*dip
, *next
;
1029 dpif_ipfix_bridge_exporter_clear(&di
->bridge_exporter
);
1031 HMAP_FOR_EACH_POP (exp_node
, node
, &di
->flow_exporter_map
) {
1032 dpif_ipfix_flow_exporter_destroy(&exp_node
->exporter
);
1036 HMAP_FOR_EACH_SAFE (dip
, next
, hmap_node
, &di
->tunnel_ports
) {
1037 dpif_ipfix_del_port(di
, dip
);
1042 dpif_ipfix_unref(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
1044 if (di
&& ovs_refcount_unref_relaxed(&di
->ref_cnt
) == 1) {
1045 ovs_mutex_lock(&mutex
);
1046 dpif_ipfix_clear(di
);
1047 dpif_ipfix_bridge_exporter_destroy(&di
->bridge_exporter
);
1048 hmap_destroy(&di
->flow_exporter_map
);
1049 hmap_destroy(&di
->tunnel_ports
);
1051 ovs_mutex_unlock(&mutex
);
1056 ipfix_init_header(uint32_t export_time_sec
, uint32_t seq_number
,
1057 uint32_t obs_domain_id
, struct dp_packet
*msg
)
1059 struct ipfix_header
*hdr
;
1061 hdr
= dp_packet_put_zeros(msg
, sizeof *hdr
);
1062 hdr
->version
= htons(IPFIX_VERSION
);
1063 hdr
->length
= htons(sizeof *hdr
); /* Updated in ipfix_send_msg. */
1064 hdr
->export_time
= htonl(export_time_sec
);
1065 hdr
->seq_number
= htonl(seq_number
);
1066 hdr
->obs_domain_id
= htonl(obs_domain_id
);
1070 ipfix_send_msg(const struct collectors
*collectors
, struct dp_packet
*msg
)
1072 struct ipfix_header
*hdr
;
1075 /* Adjust the length in the header. */
1076 hdr
= dp_packet_data(msg
);
1077 hdr
->length
= htons(dp_packet_size(msg
));
1079 tx_errors
= collectors_send(collectors
,
1080 dp_packet_data(msg
), dp_packet_size(msg
));
1081 dp_packet_set_size(msg
, 0);
1087 ipfix_get_template_id(enum ipfix_proto_l2 l2
, enum ipfix_proto_l3 l3
,
1088 enum ipfix_proto_l4 l4
, enum ipfix_proto_tunnel tunnel
)
1090 uint16_t template_id
;
1092 template_id
= template_id
* NUM_IPFIX_PROTO_L3
+ l3
;
1093 template_id
= template_id
* NUM_IPFIX_PROTO_L4
+ l4
;
1094 template_id
= template_id
* NUM_IPFIX_PROTO_TUNNEL
+ tunnel
;
1095 return IPFIX_TEMPLATE_ID_MIN
+ template_id
;
1099 ipfix_define_template_entity(enum ipfix_entity_id id
,
1100 enum ipfix_entity_size size
,
1101 enum ipfix_entity_enterprise enterprise
,
1102 struct dp_packet
*msg
)
1104 struct ipfix_template_field_specifier
*field
;
1108 field_size
= sizeof *field
;
1110 /* No enterprise number */
1111 field_size
= sizeof *field
- sizeof(ovs_be32
);
1113 field
= dp_packet_put_zeros(msg
, field_size
);
1114 field
->element_id
= htons(id
);
1116 field
->field_length
= htons(size
);
1118 /* RFC 5101, Section 7. Variable-Length Information Element */
1119 field
->field_length
= OVS_BE16_MAX
;
1122 field
->enterprise
= htonl(enterprise
);
1128 ipfix_define_template_fields(enum ipfix_proto_l2 l2
, enum ipfix_proto_l3 l3
,
1129 enum ipfix_proto_l4 l4
, enum ipfix_proto_tunnel tunnel
,
1130 bool virtual_obs_id_set
,
1131 struct dp_packet
*msg
)
1137 ipfix_define_template_entity(IPFIX_ENTITY_ID_##ID, \
1138 IPFIX_ENTITY_SIZE_##ID, \
1139 IPFIX_ENTITY_ENTERPRISE_##ID, msg); \
1145 DEF(OBSERVATION_POINT_ID
);
1146 DEF(FLOW_DIRECTION
);
1148 /* Common Ethernet entities. */
1149 DEF(SOURCE_MAC_ADDRESS
);
1150 DEF(DESTINATION_MAC_ADDRESS
);
1152 DEF(ETHERNET_HEADER_LENGTH
);
1154 if (l2
== IPFIX_PROTO_L2_VLAN
) {
1157 DEF(DOT1Q_PRIORITY
);
1160 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1163 DEF(PROTOCOL_IDENTIFIER
);
1164 DEF(IP_DIFF_SERV_CODE_POINT
);
1166 DEF(IP_CLASS_OF_SERVICE
);
1168 if (l3
== IPFIX_PROTO_L3_IPV4
) {
1169 DEF(SOURCE_IPV4_ADDRESS
);
1170 DEF(DESTINATION_IPV4_ADDRESS
);
1171 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1172 DEF(SOURCE_TRANSPORT_PORT
);
1173 DEF(DESTINATION_TRANSPORT_PORT
);
1174 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1175 DEF(ICMP_TYPE_IPV4
);
1176 DEF(ICMP_CODE_IPV4
);
1178 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1179 DEF(SOURCE_IPV6_ADDRESS
);
1180 DEF(DESTINATION_IPV6_ADDRESS
);
1181 DEF(FLOW_LABEL_IPV6
);
1182 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1183 DEF(SOURCE_TRANSPORT_PORT
);
1184 DEF(DESTINATION_TRANSPORT_PORT
);
1185 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1186 DEF(ICMP_TYPE_IPV6
);
1187 DEF(ICMP_CODE_IPV6
);
1192 if (tunnel
!= IPFIX_PROTO_NOT_TUNNELED
) {
1193 DEF(TUNNEL_SOURCE_IPV4_ADDRESS
);
1194 DEF(TUNNEL_DESTINATION_IPV4_ADDRESS
);
1195 DEF(TUNNEL_PROTOCOL_IDENTIFIER
);
1196 DEF(TUNNEL_SOURCE_TRANSPORT_PORT
);
1197 DEF(TUNNEL_DESTINATION_TRANSPORT_PORT
);
1202 /* 2. Virtual observation ID, which is not a part of flow key. */
1203 if (virtual_obs_id_set
) {
1204 DEF(VIRTUAL_OBS_ID
);
1207 /* 3. Flow aggregated data. */
1209 DEF(FLOW_START_DELTA_MICROSECONDS
);
1210 DEF(FLOW_END_DELTA_MICROSECONDS
);
1211 DEF(PACKET_DELTA_COUNT
);
1212 DEF(LAYER2_OCTET_DELTA_COUNT
);
1213 DEF(FLOW_END_REASON
);
1215 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1216 DEF(OCTET_DELTA_COUNT
);
1217 DEF(OCTET_DELTA_SUM_OF_SQUARES
);
1218 DEF(MINIMUM_IP_TOTAL_LENGTH
);
1219 DEF(MAXIMUM_IP_TOTAL_LENGTH
);
1227 ipfix_init_template_msg(void *msg_stub
, uint32_t export_time_sec
,
1228 uint32_t seq_number
, uint32_t obs_domain_id
,
1229 struct dp_packet
*msg
, size_t *set_hdr_offset
)
1231 struct ipfix_set_header
*set_hdr
;
1233 dp_packet_use_stub(msg
, msg_stub
, sizeof msg_stub
);
1235 ipfix_init_header(export_time_sec
, seq_number
, obs_domain_id
, msg
);
1236 *set_hdr_offset
= dp_packet_size(msg
);
1238 /* Add a Template Set. */
1239 set_hdr
= dp_packet_put_zeros(msg
, sizeof *set_hdr
);
1240 set_hdr
->set_id
= htons(IPFIX_SET_ID_TEMPLATE
);
1244 ipfix_send_template_msg(const struct collectors
*collectors
,
1245 struct dp_packet
*msg
, size_t set_hdr_offset
)
1247 struct ipfix_set_header
*set_hdr
;
1250 /* Send template message. */
1251 set_hdr
= (struct ipfix_set_header
*)
1252 ((uint8_t*)dp_packet_data(msg
) + set_hdr_offset
);
1253 set_hdr
->length
= htons(dp_packet_size(msg
) - set_hdr_offset
);
1255 tx_errors
= ipfix_send_msg(collectors
, msg
);
1257 dp_packet_uninit(msg
);
1263 ipfix_send_template_msgs(struct dpif_ipfix_exporter
*exporter
,
1264 uint32_t export_time_sec
, uint32_t obs_domain_id
)
1266 uint64_t msg_stub
[DIV_ROUND_UP(MAX_MESSAGE_LEN
, 8)];
1267 struct dp_packet msg
;
1268 size_t set_hdr_offset
, tmpl_hdr_offset
, error_pkts
;
1269 struct ipfix_template_record_header
*tmpl_hdr
;
1270 uint16_t field_count
;
1271 size_t tx_packets
= 0;
1272 size_t tx_errors
= 0;
1273 enum ipfix_proto_l2 l2
;
1274 enum ipfix_proto_l3 l3
;
1275 enum ipfix_proto_l4 l4
;
1276 enum ipfix_proto_tunnel tunnel
;
1278 ipfix_init_template_msg(msg_stub
, export_time_sec
, exporter
->seq_number
,
1279 obs_domain_id
, &msg
, &set_hdr_offset
);
1280 /* Define one template for each possible combination of
1282 for (l2
= 0; l2
< NUM_IPFIX_PROTO_L2
; l2
++) {
1283 for (l3
= 0; l3
< NUM_IPFIX_PROTO_L3
; l3
++) {
1284 for (l4
= 0; l4
< NUM_IPFIX_PROTO_L4
; l4
++) {
1285 if (l3
== IPFIX_PROTO_L3_UNKNOWN
&&
1286 l4
!= IPFIX_PROTO_L4_UNKNOWN
) {
1289 for (tunnel
= 0; tunnel
< NUM_IPFIX_PROTO_TUNNEL
; tunnel
++) {
1290 /* When the size of the template packet reaches
1291 * MAX_MESSAGE_LEN(1024), send it out.
1292 * And then reinitialize the msg to construct a new
1293 * packet for the following templates.
1295 if (dp_packet_size(&msg
) >= MAX_MESSAGE_LEN
) {
1296 /* Send template message. */
1297 error_pkts
= ipfix_send_template_msg(exporter
->collectors
,
1298 &msg
, set_hdr_offset
);
1299 tx_errors
+= error_pkts
;
1300 tx_packets
+= collectors_count(exporter
->collectors
) - error_pkts
;
1302 /* Reinitialize the template msg. */
1303 ipfix_init_template_msg(msg_stub
, export_time_sec
,
1304 exporter
->seq_number
,
1305 obs_domain_id
, &msg
,
1309 tmpl_hdr_offset
= dp_packet_size(&msg
);
1310 tmpl_hdr
= dp_packet_put_zeros(&msg
, sizeof *tmpl_hdr
);
1311 tmpl_hdr
->template_id
= htons(
1312 ipfix_get_template_id(l2
, l3
, l4
, tunnel
));
1313 field_count
= ipfix_define_template_fields(
1314 l2
, l3
, l4
, tunnel
, exporter
->virtual_obs_id
!= NULL
,
1316 tmpl_hdr
= (struct ipfix_template_record_header
*)
1317 ((uint8_t*)dp_packet_data(&msg
) + tmpl_hdr_offset
);
1318 tmpl_hdr
->field_count
= htons(field_count
);
1324 /* Send template message. */
1325 error_pkts
= ipfix_send_template_msg(exporter
->collectors
, &msg
, set_hdr_offset
);
1326 tx_errors
+= error_pkts
;
1327 tx_packets
+= collectors_count(exporter
->collectors
) - error_pkts
;
1329 exporter
->stats
.tx_pkts
+= tx_packets
;
1330 exporter
->stats
.tx_errors
+= tx_errors
;
1332 /* XXX: Add Options Template Sets, at least to define a Flow Keys
1333 * Option Template. */
1337 static inline uint32_t
1338 ipfix_hash_flow_key(const struct ipfix_flow_key
*flow_key
, uint32_t basis
)
1341 hash
= hash_int(flow_key
->obs_domain_id
, basis
);
1342 hash
= hash_int(flow_key
->template_id
, hash
);
1343 hash
= hash_bytes(flow_key
->flow_key_msg_part
,
1344 flow_key
->flow_key_msg_part_size
, hash
);
1349 ipfix_flow_key_equal(const struct ipfix_flow_key
*a
,
1350 const struct ipfix_flow_key
*b
)
1352 /* The template ID determines the flow key size, so not need to
1354 return (a
->obs_domain_id
== b
->obs_domain_id
1355 && a
->template_id
== b
->template_id
1356 && memcmp(a
->flow_key_msg_part
, b
->flow_key_msg_part
,
1357 a
->flow_key_msg_part_size
) == 0);
1360 static struct ipfix_flow_cache_entry
*
1361 ipfix_cache_find_entry(const struct dpif_ipfix_exporter
*exporter
,
1362 const struct ipfix_flow_key
*flow_key
)
1364 struct ipfix_flow_cache_entry
*entry
;
1366 HMAP_FOR_EACH_WITH_HASH (entry
, flow_key_map_node
,
1367 ipfix_hash_flow_key(flow_key
, 0),
1368 &exporter
->cache_flow_key_map
) {
1369 if (ipfix_flow_key_equal(&entry
->flow_key
, flow_key
)) {
1378 ipfix_cache_next_timeout_msec(const struct dpif_ipfix_exporter
*exporter
,
1379 long long int *next_timeout_msec
)
1381 struct ipfix_flow_cache_entry
*entry
;
1383 LIST_FOR_EACH (entry
, cache_flow_start_timestamp_list_node
,
1384 &exporter
->cache_flow_start_timestamp_list
) {
1385 *next_timeout_msec
= entry
->flow_start_timestamp_usec
/ 1000LL
1386 + 1000LL * exporter
->cache_active_timeout
;
1394 ipfix_cache_aggregate_entries(struct ipfix_flow_cache_entry
*from_entry
,
1395 struct ipfix_flow_cache_entry
*to_entry
)
1397 uint64_t *to_start
, *to_end
, *from_start
, *from_end
;
1398 uint16_t *to_min_len
, *to_max_len
, *from_min_len
, *from_max_len
;
1400 to_start
= &to_entry
->flow_start_timestamp_usec
;
1401 to_end
= &to_entry
->flow_end_timestamp_usec
;
1402 from_start
= &from_entry
->flow_start_timestamp_usec
;
1403 from_end
= &from_entry
->flow_end_timestamp_usec
;
1405 if (*to_start
> *from_start
) {
1406 *to_start
= *from_start
;
1408 if (*to_end
< *from_end
) {
1409 *to_end
= *from_end
;
1412 to_entry
->packet_delta_count
+= from_entry
->packet_delta_count
;
1413 to_entry
->layer2_octet_delta_count
+= from_entry
->layer2_octet_delta_count
;
1415 to_entry
->octet_delta_count
+= from_entry
->octet_delta_count
;
1416 to_entry
->octet_delta_sum_of_squares
+=
1417 from_entry
->octet_delta_sum_of_squares
;
1419 to_min_len
= &to_entry
->minimum_ip_total_length
;
1420 to_max_len
= &to_entry
->maximum_ip_total_length
;
1421 from_min_len
= &from_entry
->minimum_ip_total_length
;
1422 from_max_len
= &from_entry
->maximum_ip_total_length
;
1424 if (!*to_min_len
|| (*from_min_len
&& *to_min_len
> *from_min_len
)) {
1425 *to_min_len
= *from_min_len
;
1427 if (*to_max_len
< *from_max_len
) {
1428 *to_max_len
= *from_max_len
;
1432 /* Get statistics */
1434 ipfix_get_stats__(const struct dpif_ipfix_exporter
*exporter
,
1435 ofproto_ipfix_stats
*stats
)
1437 memset(stats
, 0xff, sizeof *stats
);
1443 *stats
= exporter
->stats
;
1447 ipfix_get_bridge_stats(const struct dpif_ipfix_bridge_exporter
*exporter
,
1448 ofproto_ipfix_stats
*stats
)
1450 ipfix_get_stats__(&exporter
->exporter
, stats
);
1454 ipfix_get_flow_stats(const struct dpif_ipfix_flow_exporter
*exporter
,
1455 ofproto_ipfix_stats
*stats
)
1457 ipfix_get_stats__(&exporter
->exporter
, stats
);
1458 stats
->collector_set_id
= exporter
->options
->collector_set_id
;
1462 dpif_ipfix_get_stats(const struct dpif_ipfix
*di
,
1464 struct ovs_list
*replies
)
1467 struct dpif_ipfix_flow_exporter_map_node
*flow_exporter_node
;
1468 struct ofputil_ipfix_stats ois
;
1470 ovs_mutex_lock(&mutex
);
1472 if (!di
->bridge_exporter
.options
) {
1473 ovs_mutex_unlock(&mutex
);
1474 return OFPERR_NXST_NOT_CONFIGURED
;
1477 ipfix_get_bridge_stats(&di
->bridge_exporter
, &ois
);
1478 ofputil_append_ipfix_stat(replies
, &ois
);
1480 if (hmap_count(&di
->flow_exporter_map
) == 0) {
1481 ovs_mutex_unlock(&mutex
);
1482 return OFPERR_NXST_NOT_CONFIGURED
;
1485 HMAP_FOR_EACH (flow_exporter_node
, node
,
1486 &di
->flow_exporter_map
) {
1487 ipfix_get_flow_stats(&flow_exporter_node
->exporter
, &ois
);
1488 ofputil_append_ipfix_stat(replies
, &ois
);
1491 ovs_mutex_unlock(&mutex
);
1496 /* Update partial ipfix stats */
1498 ipfix_update_stats(struct dpif_ipfix_exporter
*exporter
,
1500 size_t current_flows
,
1501 enum ipfix_sampled_packet_type sampled_pkt_type
)
1504 exporter
->stats
.total_flows
++;
1505 exporter
->stats
.current_flows
= current_flows
;
1507 exporter
->stats
.pkts
++;
1509 switch (sampled_pkt_type
) {
1510 case IPFIX_SAMPLED_PKT_IPV4_OK
:
1511 exporter
->stats
.ipv4_pkts
++;
1513 case IPFIX_SAMPLED_PKT_IPV6_OK
:
1514 exporter
->stats
.ipv6_pkts
++;
1516 case IPFIX_SAMPLED_PKT_IPV4_ERROR
:
1517 exporter
->stats
.ipv4_error_pkts
++;
1518 exporter
->stats
.error_pkts
++;
1520 case IPFIX_SAMPLED_PKT_IPV6_ERROR
:
1521 exporter
->stats
.ipv6_error_pkts
++;
1522 exporter
->stats
.error_pkts
++;
1524 case IPFIX_SAMPLED_PKT_UNKNOWN
:
1525 exporter
->stats
.error_pkts
++;
1527 case IPFIX_SAMPLED_PKT_OTHERS
:
1533 /* Add an entry into a flow cache. The entry is either aggregated into
1534 * an existing entry with the same flow key and free()d, or it is
1535 * inserted into the cache. And IPFIX stats will be updated */
1537 ipfix_cache_update(struct dpif_ipfix_exporter
*exporter
,
1538 struct ipfix_flow_cache_entry
*entry
,
1539 enum ipfix_sampled_packet_type sampled_pkt_type
)
1541 struct ipfix_flow_cache_entry
*old_entry
;
1542 size_t current_flows
= 0;
1544 old_entry
= ipfix_cache_find_entry(exporter
, &entry
->flow_key
);
1546 if (old_entry
== NULL
) {
1547 hmap_insert(&exporter
->cache_flow_key_map
, &entry
->flow_key_map_node
,
1548 ipfix_hash_flow_key(&entry
->flow_key
, 0));
1550 /* As the latest entry added into the cache, it should
1551 * logically have the highest flow_start_timestamp_usec, so
1552 * append it at the tail. */
1553 ovs_list_push_back(&exporter
->cache_flow_start_timestamp_list
,
1554 &entry
->cache_flow_start_timestamp_list_node
);
1556 /* Enforce exporter->cache_max_flows limit. */
1557 current_flows
= hmap_count(&exporter
->cache_flow_key_map
);
1558 ipfix_update_stats(exporter
, true, current_flows
, sampled_pkt_type
);
1559 if (current_flows
> exporter
->cache_max_flows
) {
1560 dpif_ipfix_cache_expire_now(exporter
, false);
1563 ipfix_cache_aggregate_entries(entry
, old_entry
);
1565 ipfix_update_stats(exporter
, false, current_flows
, sampled_pkt_type
);
1569 static enum ipfix_sampled_packet_type
1570 ipfix_cache_entry_init(struct ipfix_flow_cache_entry
*entry
,
1571 const struct dp_packet
*packet
, const struct flow
*flow
,
1572 uint64_t packet_delta_count
, uint32_t obs_domain_id
,
1573 uint32_t obs_point_id
, odp_port_t output_odp_port
,
1574 const struct dpif_ipfix_port
*tunnel_port
,
1575 const struct flow_tnl
*tunnel_key
)
1577 struct ipfix_flow_key
*flow_key
;
1578 struct dp_packet msg
;
1579 enum ipfix_proto_l2 l2
;
1580 enum ipfix_proto_l3 l3
;
1581 enum ipfix_proto_l4 l4
;
1582 enum ipfix_proto_tunnel tunnel
= IPFIX_PROTO_NOT_TUNNELED
;
1583 enum ipfix_sampled_packet_type sampled_pkt_type
= IPFIX_SAMPLED_PKT_UNKNOWN
;
1584 uint8_t ethernet_header_length
;
1585 uint16_t ethernet_total_length
;
1587 flow_key
= &entry
->flow_key
;
1588 dp_packet_use_stub(&msg
, flow_key
->flow_key_msg_part
,
1589 sizeof flow_key
->flow_key_msg_part
);
1591 /* Choose the right template ID matching the protocols in the
1592 * sampled packet. */
1593 l2
= (flow
->vlan_tci
== 0) ? IPFIX_PROTO_L2_ETH
: IPFIX_PROTO_L2_VLAN
;
1595 switch(ntohs(flow
->dl_type
)) {
1597 l3
= IPFIX_PROTO_L3_IPV4
;
1598 switch(flow
->nw_proto
) {
1602 l4
= IPFIX_PROTO_L4_TCP_UDP_SCTP
;
1603 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV4_OK
;
1606 l4
= IPFIX_PROTO_L4_ICMP
;
1607 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV4_OK
;
1610 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1611 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV4_ERROR
;
1615 l3
= IPFIX_PROTO_L3_IPV6
;
1616 switch(flow
->nw_proto
) {
1620 l4
= IPFIX_PROTO_L4_TCP_UDP_SCTP
;
1621 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV6_OK
;
1623 case IPPROTO_ICMPV6
:
1624 l4
= IPFIX_PROTO_L4_ICMP
;
1625 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV6_OK
;
1628 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1629 sampled_pkt_type
= IPFIX_SAMPLED_PKT_IPV6_ERROR
;
1633 l3
= IPFIX_PROTO_L3_UNKNOWN
;
1634 l4
= IPFIX_PROTO_L4_UNKNOWN
;
1635 sampled_pkt_type
= IPFIX_SAMPLED_PKT_OTHERS
;
1638 if (tunnel_port
&& tunnel_key
) {
1639 tunnel
= IPFIX_PROTO_TUNNELED
;
1642 flow_key
->obs_domain_id
= obs_domain_id
;
1643 flow_key
->template_id
= ipfix_get_template_id(l2
, l3
, l4
, tunnel
);
1645 /* The fields defined in the ipfix_data_record_* structs and sent
1646 * below must match exactly the templates defined in
1647 * ipfix_define_template_fields. */
1649 ethernet_header_length
= (l2
== IPFIX_PROTO_L2_VLAN
)
1650 ? VLAN_ETH_HEADER_LEN
: ETH_HEADER_LEN
;
1651 ethernet_total_length
= dp_packet_size(packet
);
1653 /* Common Ethernet entities. */
1655 struct ipfix_data_record_flow_key_common
*data_common
;
1657 data_common
= dp_packet_put_zeros(&msg
, sizeof *data_common
);
1658 data_common
->observation_point_id
= htonl(obs_point_id
);
1659 data_common
->flow_direction
=
1660 (output_odp_port
== ODPP_NONE
) ? INGRESS_FLOW
: EGRESS_FLOW
;
1661 data_common
->source_mac_address
= flow
->dl_src
;
1662 data_common
->destination_mac_address
= flow
->dl_dst
;
1663 data_common
->ethernet_type
= flow
->dl_type
;
1664 data_common
->ethernet_header_length
= ethernet_header_length
;
1667 if (l2
== IPFIX_PROTO_L2_VLAN
) {
1668 struct ipfix_data_record_flow_key_vlan
*data_vlan
;
1669 uint16_t vlan_id
= vlan_tci_to_vid(flow
->vlan_tci
);
1670 uint8_t priority
= vlan_tci_to_pcp(flow
->vlan_tci
);
1672 data_vlan
= dp_packet_put_zeros(&msg
, sizeof *data_vlan
);
1673 data_vlan
->vlan_id
= htons(vlan_id
);
1674 data_vlan
->dot1q_vlan_id
= htons(vlan_id
);
1675 data_vlan
->dot1q_priority
= priority
;
1678 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1679 struct ipfix_data_record_flow_key_ip
*data_ip
;
1681 data_ip
= dp_packet_put_zeros(&msg
, sizeof *data_ip
);
1682 data_ip
->ip_version
= (l3
== IPFIX_PROTO_L3_IPV4
) ? 4 : 6;
1683 data_ip
->ip_ttl
= flow
->nw_ttl
;
1684 data_ip
->protocol_identifier
= flow
->nw_proto
;
1685 data_ip
->ip_diff_serv_code_point
= flow
->nw_tos
>> 2;
1686 data_ip
->ip_precedence
= flow
->nw_tos
>> 5;
1687 data_ip
->ip_class_of_service
= flow
->nw_tos
;
1689 if (l3
== IPFIX_PROTO_L3_IPV4
) {
1690 struct ipfix_data_record_flow_key_ipv4
*data_ipv4
;
1692 data_ipv4
= dp_packet_put_zeros(&msg
, sizeof *data_ipv4
);
1693 data_ipv4
->source_ipv4_address
= flow
->nw_src
;
1694 data_ipv4
->destination_ipv4_address
= flow
->nw_dst
;
1695 } else { /* l3 == IPFIX_PROTO_L3_IPV6 */
1696 struct ipfix_data_record_flow_key_ipv6
*data_ipv6
;
1698 data_ipv6
= dp_packet_put_zeros(&msg
, sizeof *data_ipv6
);
1699 memcpy(data_ipv6
->source_ipv6_address
, &flow
->ipv6_src
,
1700 sizeof flow
->ipv6_src
);
1701 memcpy(data_ipv6
->destination_ipv6_address
, &flow
->ipv6_dst
,
1702 sizeof flow
->ipv6_dst
);
1703 data_ipv6
->flow_label_ipv6
= flow
->ipv6_label
;
1707 if (l4
== IPFIX_PROTO_L4_TCP_UDP_SCTP
) {
1708 struct ipfix_data_record_flow_key_transport
*data_transport
;
1710 data_transport
= dp_packet_put_zeros(&msg
, sizeof *data_transport
);
1711 data_transport
->source_transport_port
= flow
->tp_src
;
1712 data_transport
->destination_transport_port
= flow
->tp_dst
;
1713 } else if (l4
== IPFIX_PROTO_L4_ICMP
) {
1714 struct ipfix_data_record_flow_key_icmp
*data_icmp
;
1716 data_icmp
= dp_packet_put_zeros(&msg
, sizeof *data_icmp
);
1717 data_icmp
->icmp_type
= ntohs(flow
->tp_src
) & 0xff;
1718 data_icmp
->icmp_code
= ntohs(flow
->tp_dst
) & 0xff;
1721 if (tunnel
== IPFIX_PROTO_TUNNELED
) {
1722 struct ipfix_data_record_flow_key_tunnel
*data_tunnel
;
1723 const uint8_t *tun_id
;
1725 data_tunnel
= dp_packet_put_zeros(&msg
, sizeof *data_tunnel
+
1726 tunnel_port
->tunnel_key_length
);
1727 data_tunnel
->tunnel_source_ipv4_address
= tunnel_key
->ip_src
;
1728 data_tunnel
->tunnel_destination_ipv4_address
= tunnel_key
->ip_dst
;
1729 /* The tunnel_protocol_identifier is from tunnel_proto array, which
1730 * contains protocol_identifiers of each tunnel type.
1731 * For the tunnel type on the top of IPSec, which uses the protocol
1732 * identifier of the upper tunnel type is used, the tcp_src and tcp_dst
1733 * are decided based on the protocol identifiers.
1735 * The protocol identifier of DPIF_IPFIX_TUNNEL_IPSEC_GRE is IPPROTO_GRE,
1736 * and both tp_src and tp_dst are zero.
1738 data_tunnel
->tunnel_protocol_identifier
=
1739 tunnel_protocol
[tunnel_port
->tunnel_type
];
1740 data_tunnel
->tunnel_source_transport_port
= tunnel_key
->tp_src
;
1741 data_tunnel
->tunnel_destination_transport_port
= tunnel_key
->tp_dst
;
1742 data_tunnel
->tunnel_type
= tunnel_port
->tunnel_type
;
1743 data_tunnel
->tunnel_key_length
= tunnel_port
->tunnel_key_length
;
1744 /* tun_id is in network order, and tunnel key is in low bits. */
1745 tun_id
= (const uint8_t *) &tunnel_key
->tun_id
;
1746 memcpy(data_tunnel
->tunnel_key
,
1747 &tun_id
[8 - tunnel_port
->tunnel_key_length
],
1748 tunnel_port
->tunnel_key_length
);
1751 flow_key
->flow_key_msg_part_size
= dp_packet_size(&msg
);
1755 uint64_t layer2_octet_delta_count
;
1757 /* Calculate the total matched octet count by considering as
1758 * an approximation that all matched packets have the same
1760 layer2_octet_delta_count
= packet_delta_count
* ethernet_total_length
;
1762 xgettimeofday(&now
);
1763 entry
->flow_end_timestamp_usec
= now
.tv_usec
+ 1000000LL * now
.tv_sec
;
1764 entry
->flow_start_timestamp_usec
= entry
->flow_end_timestamp_usec
;
1765 entry
->packet_delta_count
= packet_delta_count
;
1766 entry
->layer2_octet_delta_count
= layer2_octet_delta_count
;
1769 if (l3
!= IPFIX_PROTO_L3_UNKNOWN
) {
1770 uint16_t ip_total_length
=
1771 ethernet_total_length
- ethernet_header_length
;
1772 uint64_t octet_delta_count
;
1774 /* Calculate the total matched octet count by considering as
1775 * an approximation that all matched packets have the same
1777 octet_delta_count
= packet_delta_count
* ip_total_length
;
1779 entry
->octet_delta_count
= octet_delta_count
;
1780 entry
->octet_delta_sum_of_squares
= octet_delta_count
* ip_total_length
;
1781 entry
->minimum_ip_total_length
= ip_total_length
;
1782 entry
->maximum_ip_total_length
= ip_total_length
;
1784 entry
->octet_delta_sum_of_squares
= 0;
1785 entry
->minimum_ip_total_length
= 0;
1786 entry
->maximum_ip_total_length
= 0;
1789 return sampled_pkt_type
;
1792 /* Send each single data record in its own data set, to simplify the
1793 * implementation by avoiding having to group record by template ID
1794 * before sending. */
1796 ipfix_put_data_set(uint32_t export_time_sec
,
1797 struct ipfix_flow_cache_entry
*entry
,
1798 enum ipfix_flow_end_reason flow_end_reason
,
1799 const char *virtual_obs_id
,
1800 uint8_t virtual_obs_len
,
1801 struct dp_packet
*msg
)
1803 size_t set_hdr_offset
;
1804 struct ipfix_set_header
*set_hdr
;
1806 set_hdr_offset
= dp_packet_size(msg
);
1808 /* Put a Data Set. */
1809 set_hdr
= dp_packet_put_zeros(msg
, sizeof *set_hdr
);
1810 set_hdr
->set_id
= htons(entry
->flow_key
.template_id
);
1812 /* Copy the flow key part of the data record. */
1814 dp_packet_put(msg
, entry
->flow_key
.flow_key_msg_part
,
1815 entry
->flow_key
.flow_key_msg_part_size
);
1817 /* Export virtual observation ID. */
1818 if (virtual_obs_id
) {
1819 dp_packet_put(msg
, &virtual_obs_len
, sizeof(virtual_obs_len
));
1820 dp_packet_put(msg
, virtual_obs_id
, virtual_obs_len
);
1823 /* Put the non-key part of the data record. */
1826 struct ipfix_data_record_aggregated_common
*data_aggregated_common
;
1827 uint64_t export_time_usec
, flow_start_delta_usec
, flow_end_delta_usec
;
1829 /* Calculate the negative deltas relative to the export time
1830 * in seconds sent in the header, not the exact export
1832 export_time_usec
= 1000000LL * export_time_sec
;
1833 flow_start_delta_usec
= export_time_usec
1834 - entry
->flow_start_timestamp_usec
;
1835 flow_end_delta_usec
= export_time_usec
1836 - entry
->flow_end_timestamp_usec
;
1838 data_aggregated_common
= dp_packet_put_zeros(
1839 msg
, sizeof *data_aggregated_common
);
1840 data_aggregated_common
->flow_start_delta_microseconds
= htonl(
1841 flow_start_delta_usec
);
1842 data_aggregated_common
->flow_end_delta_microseconds
= htonl(
1843 flow_end_delta_usec
);
1844 data_aggregated_common
->packet_delta_count
= htonll(
1845 entry
->packet_delta_count
);
1846 data_aggregated_common
->layer2_octet_delta_count
= htonll(
1847 entry
->layer2_octet_delta_count
);
1848 data_aggregated_common
->flow_end_reason
= flow_end_reason
;
1851 if (entry
->octet_delta_sum_of_squares
) { /* IP packet. */
1852 struct ipfix_data_record_aggregated_ip
*data_aggregated_ip
;
1854 data_aggregated_ip
= dp_packet_put_zeros(
1855 msg
, sizeof *data_aggregated_ip
);
1856 data_aggregated_ip
->octet_delta_count
= htonll(
1857 entry
->octet_delta_count
);
1858 data_aggregated_ip
->octet_delta_sum_of_squares
= htonll(
1859 entry
->octet_delta_sum_of_squares
);
1860 data_aggregated_ip
->minimum_ip_total_length
= htonll(
1861 entry
->minimum_ip_total_length
);
1862 data_aggregated_ip
->maximum_ip_total_length
= htonll(
1863 entry
->maximum_ip_total_length
);
1866 set_hdr
= (struct ipfix_set_header
*)((uint8_t*)dp_packet_data(msg
) + set_hdr_offset
);
1867 set_hdr
->length
= htons(dp_packet_size(msg
) - set_hdr_offset
);
1870 /* Send an IPFIX message with a single data record. */
1872 ipfix_send_data_msg(struct dpif_ipfix_exporter
*exporter
,
1873 uint32_t export_time_sec
,
1874 struct ipfix_flow_cache_entry
*entry
,
1875 enum ipfix_flow_end_reason flow_end_reason
)
1877 uint64_t msg_stub
[DIV_ROUND_UP(MAX_MESSAGE_LEN
, 8)];
1878 struct dp_packet msg
;
1881 dp_packet_use_stub(&msg
, msg_stub
, sizeof msg_stub
);
1883 ipfix_init_header(export_time_sec
, exporter
->seq_number
++,
1884 entry
->flow_key
.obs_domain_id
, &msg
);
1885 ipfix_put_data_set(export_time_sec
, entry
, flow_end_reason
,
1886 exporter
->virtual_obs_id
, exporter
->virtual_obs_len
,
1888 tx_errors
= ipfix_send_msg(exporter
->collectors
, &msg
);
1890 dp_packet_uninit(&msg
);
1892 exporter
->stats
.current_flows
--;
1893 exporter
->stats
.tx_pkts
+= collectors_count(exporter
->collectors
) - tx_errors
;
1894 exporter
->stats
.tx_errors
+= tx_errors
;
1898 dpif_ipfix_sample(struct dpif_ipfix_exporter
*exporter
,
1899 const struct dp_packet
*packet
, const struct flow
*flow
,
1900 uint64_t packet_delta_count
, uint32_t obs_domain_id
,
1901 uint32_t obs_point_id
, odp_port_t output_odp_port
,
1902 const struct dpif_ipfix_port
*tunnel_port
,
1903 const struct flow_tnl
*tunnel_key
)
1905 struct ipfix_flow_cache_entry
*entry
;
1906 enum ipfix_sampled_packet_type sampled_packet_type
;
1908 /* Create a flow cache entry from the sample. */
1909 entry
= xmalloc(sizeof *entry
);
1910 sampled_packet_type
= ipfix_cache_entry_init(entry
, packet
,
1911 flow
, packet_delta_count
,
1912 obs_domain_id
, obs_point_id
,
1913 output_odp_port
, tunnel_port
,
1915 ipfix_cache_update(exporter
, entry
, sampled_packet_type
);
1919 bridge_exporter_enabled(struct dpif_ipfix
*di
)
1921 return di
->bridge_exporter
.probability
> 0;
1925 dpif_ipfix_bridge_sample(struct dpif_ipfix
*di
, const struct dp_packet
*packet
,
1926 const struct flow
*flow
,
1927 odp_port_t input_odp_port
, odp_port_t output_odp_port
,
1928 const struct flow_tnl
*output_tunnel_key
)
1931 uint64_t packet_delta_count
;
1932 const struct flow_tnl
*tunnel_key
= NULL
;
1933 struct dpif_ipfix_port
* tunnel_port
= NULL
;
1935 ovs_mutex_lock(&mutex
);
1936 if (!bridge_exporter_enabled(di
)) {
1937 ovs_mutex_unlock(&mutex
);
1941 /* Skip BFD packets:
1942 * Bidirectional Forwarding Detection(BFD) packets are for monitoring
1943 * the tunnel link status and consumed by ovs itself. No need to
1945 * CF IETF RFC 5881, BFD control packet is the UDP packet with
1946 * destination port 3784, and BFD echo packet is the UDP packet with
1947 * destination port 3785.
1949 if (is_ip_any(flow
) &&
1950 flow
->nw_proto
== IPPROTO_UDP
&&
1951 (flow
->tp_dst
== htons(BFD_CONTROL_DEST_PORT
) ||
1952 flow
->tp_dst
== htons(BFD_ECHO_DEST_PORT
))) {
1953 ovs_mutex_unlock(&mutex
);
1957 /* Use the sampling probability as an approximation of the number
1958 * of matched packets. */
1959 packet_delta_count
= UINT32_MAX
/ di
->bridge_exporter
.probability
;
1960 if (di
->bridge_exporter
.options
->enable_tunnel_sampling
) {
1961 if (output_odp_port
== ODPP_NONE
&& flow
->tunnel
.ip_dst
) {
1963 tunnel_key
= &flow
->tunnel
;
1964 tunnel_port
= dpif_ipfix_find_port(di
, input_odp_port
);
1966 if (output_odp_port
!= ODPP_NONE
&& output_tunnel_key
) {
1967 /* Output tunnel, output_tunnel_key must be valid. */
1968 tunnel_key
= output_tunnel_key
;
1969 tunnel_port
= dpif_ipfix_find_port(di
, output_odp_port
);
1973 dpif_ipfix_sample(&di
->bridge_exporter
.exporter
, packet
, flow
,
1975 di
->bridge_exporter
.options
->obs_domain_id
,
1976 di
->bridge_exporter
.options
->obs_point_id
,
1977 output_odp_port
, tunnel_port
, tunnel_key
);
1978 ovs_mutex_unlock(&mutex
);
1982 dpif_ipfix_flow_sample(struct dpif_ipfix
*di
, const struct dp_packet
*packet
,
1983 const struct flow
*flow
,
1984 const union user_action_cookie
*cookie
,
1985 odp_port_t input_odp_port
,
1986 const struct flow_tnl
*output_tunnel_key
)
1989 struct dpif_ipfix_flow_exporter_map_node
*node
;
1990 const struct flow_tnl
*tunnel_key
= NULL
;
1991 struct dpif_ipfix_port
* tunnel_port
= NULL
;
1992 odp_port_t output_odp_port
= cookie
->flow_sample
.output_odp_port
;
1993 uint32_t collector_set_id
= cookie
->flow_sample
.collector_set_id
;
1994 uint16_t probability
= cookie
->flow_sample
.probability
;
1996 /* Use the sampling probability as an approximation of the number
1997 * of matched packets. */
1998 uint64_t packet_delta_count
= USHRT_MAX
/ probability
;
2000 ovs_mutex_lock(&mutex
);
2001 node
= dpif_ipfix_find_flow_exporter_map_node(di
, collector_set_id
);
2003 if (node
->exporter
.options
->enable_tunnel_sampling
) {
2004 if (output_odp_port
== ODPP_NONE
&& flow
->tunnel
.ip_dst
) {
2006 tunnel_key
= &flow
->tunnel
;
2007 tunnel_port
= dpif_ipfix_find_port(di
, input_odp_port
);
2009 if (output_odp_port
!= ODPP_NONE
&& output_tunnel_key
) {
2010 /* Output tunnel, output_tunnel_key must be valid. */
2011 tunnel_key
= output_tunnel_key
;
2012 tunnel_port
= dpif_ipfix_find_port(di
, output_odp_port
);
2016 dpif_ipfix_sample(&node
->exporter
.exporter
, packet
, flow
,
2018 cookie
->flow_sample
.obs_domain_id
,
2019 cookie
->flow_sample
.obs_point_id
,
2020 output_odp_port
, tunnel_port
, tunnel_key
);
2022 ovs_mutex_unlock(&mutex
);
2026 dpif_ipfix_cache_expire(struct dpif_ipfix_exporter
*exporter
,
2027 bool forced_end
, const uint64_t export_time_usec
,
2028 const uint32_t export_time_sec
)
2030 struct ipfix_flow_cache_entry
*entry
, *next_entry
;
2031 uint64_t max_flow_start_timestamp_usec
;
2032 bool template_msg_sent
= false;
2033 enum ipfix_flow_end_reason flow_end_reason
;
2035 if (ovs_list_is_empty(&exporter
->cache_flow_start_timestamp_list
)) {
2039 max_flow_start_timestamp_usec
= export_time_usec
-
2040 1000000LL * exporter
->cache_active_timeout
;
2042 LIST_FOR_EACH_SAFE (entry
, next_entry
, cache_flow_start_timestamp_list_node
,
2043 &exporter
->cache_flow_start_timestamp_list
) {
2045 flow_end_reason
= FORCED_END
;
2046 } else if (entry
->flow_start_timestamp_usec
2047 <= max_flow_start_timestamp_usec
) {
2048 flow_end_reason
= ACTIVE_TIMEOUT
;
2049 } else if (hmap_count(&exporter
->cache_flow_key_map
)
2050 > exporter
->cache_max_flows
) {
2051 /* Enforce exporter->cache_max_flows. */
2052 flow_end_reason
= LACK_OF_RESOURCES
;
2054 /* Remaining flows haven't expired yet. */
2058 ovs_list_remove(&entry
->cache_flow_start_timestamp_list_node
);
2059 hmap_remove(&exporter
->cache_flow_key_map
,
2060 &entry
->flow_key_map_node
);
2062 if (!template_msg_sent
2063 && (exporter
->last_template_set_time
+ IPFIX_TEMPLATE_INTERVAL
)
2064 <= export_time_sec
) {
2065 ipfix_send_template_msgs(exporter
, export_time_sec
,
2066 entry
->flow_key
.obs_domain_id
);
2067 exporter
->last_template_set_time
= export_time_sec
;
2068 template_msg_sent
= true;
2071 /* XXX: Group multiple data records for the same obs domain id
2072 * into the same message. */
2073 ipfix_send_data_msg(exporter
, export_time_sec
, entry
, flow_end_reason
);
2079 get_export_time_now(uint64_t *export_time_usec
, uint32_t *export_time_sec
)
2081 struct timeval export_time
;
2082 xgettimeofday(&export_time
);
2084 *export_time_usec
= export_time
.tv_usec
+ 1000000LL * export_time
.tv_sec
;
2086 /* The IPFIX start and end deltas are negative deltas relative to
2087 * the export time, so set the export time 1 second off to
2088 * calculate those deltas. */
2089 if (export_time
.tv_usec
== 0) {
2090 *export_time_sec
= export_time
.tv_sec
;
2092 *export_time_sec
= export_time
.tv_sec
+ 1;
2097 dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter
*exporter
,
2100 uint64_t export_time_usec
;
2101 uint32_t export_time_sec
;
2103 get_export_time_now(&export_time_usec
, &export_time_sec
);
2104 dpif_ipfix_cache_expire(exporter
, forced_end
, export_time_usec
,
2109 dpif_ipfix_run(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
2111 uint64_t export_time_usec
;
2112 uint32_t export_time_sec
;
2113 struct dpif_ipfix_flow_exporter_map_node
*flow_exporter_node
;
2115 ovs_mutex_lock(&mutex
);
2116 get_export_time_now(&export_time_usec
, &export_time_sec
);
2117 if (bridge_exporter_enabled(di
)) {
2118 dpif_ipfix_cache_expire(
2119 &di
->bridge_exporter
.exporter
, false, export_time_usec
,
2122 HMAP_FOR_EACH (flow_exporter_node
, node
, &di
->flow_exporter_map
) {
2123 dpif_ipfix_cache_expire(
2124 &flow_exporter_node
->exporter
.exporter
, false, export_time_usec
,
2127 ovs_mutex_unlock(&mutex
);
2131 dpif_ipfix_wait(struct dpif_ipfix
*di
) OVS_EXCLUDED(mutex
)
2133 long long int next_timeout_msec
= LLONG_MAX
;
2134 struct dpif_ipfix_flow_exporter_map_node
*flow_exporter_node
;
2136 ovs_mutex_lock(&mutex
);
2137 if (bridge_exporter_enabled(di
)) {
2138 if (ipfix_cache_next_timeout_msec(
2139 &di
->bridge_exporter
.exporter
, &next_timeout_msec
)) {
2140 poll_timer_wait_until(next_timeout_msec
);
2143 HMAP_FOR_EACH (flow_exporter_node
, node
, &di
->flow_exporter_map
) {
2144 if (ipfix_cache_next_timeout_msec(
2145 &flow_exporter_node
->exporter
.exporter
, &next_timeout_msec
)) {
2146 poll_timer_wait_until(next_timeout_msec
);
2149 ovs_mutex_unlock(&mutex
);