]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-sflow.c
tun-metadata: Manage tunnel TLV mapping table on a per-bridge basis.
[mirror_ovs.git] / ofproto / ofproto-dpif-sflow.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 * Copyright (c) 2009 InMon Corp.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <config.h>
19 #include "ofproto-dpif-sflow.h"
20 #include <inttypes.h>
21 #include <sys/resource.h>
22 #include <sys/socket.h>
23 #include <net/if.h>
24 #include <stdlib.h>
25 #include "collectors.h"
26 #include "compiler.h"
27 #include "dpif.h"
28 #include "hash.h"
29 #include "openvswitch/hmap.h"
30 #include "netdev.h"
31 #include "netlink.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "ofproto.h"
34 #include "packets.h"
35 #include "poll-loop.h"
36 #include "ovs-router.h"
37 #include "route-table.h"
38 #include "sflow_api.h"
39 #include "socket-util.h"
40 #include "timeval.h"
41 #include "openvswitch/vlog.h"
42 #include "lib/odp-util.h"
43 #include "lib/unaligned.h"
44 #include "ofproto-provider.h"
45 #include "lacp.h"
46
47 VLOG_DEFINE_THIS_MODULE(sflow);
48
49 static struct ovs_mutex mutex;
50
51 /* This global var is used to determine which sFlow
52 sub-agent should send the datapath counters. */
53 #define SFLOW_GC_SUBID_UNCLAIMED (uint32_t)-1
54 static uint32_t sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
55
56 /*
57 * The enum dpif_sflow_tunnel_type is to declare the types supported
58 */
59 enum dpif_sflow_tunnel_type {
60 DPIF_SFLOW_TUNNEL_UNKNOWN = 0,
61 DPIF_SFLOW_TUNNEL_VXLAN,
62 DPIF_SFLOW_TUNNEL_GRE,
63 DPIF_SFLOW_TUNNEL_LISP,
64 DPIF_SFLOW_TUNNEL_IPSEC_GRE,
65 DPIF_SFLOW_TUNNEL_GENEVE
66 };
67
68 struct dpif_sflow_port {
69 struct hmap_node hmap_node; /* In struct dpif_sflow's "ports" hmap. */
70 SFLDataSource_instance dsi; /* sFlow library's notion of port number. */
71 struct ofport *ofport; /* To retrive port stats. */
72 odp_port_t odp_port;
73 enum dpif_sflow_tunnel_type tunnel_type;
74 };
75
76 struct dpif_sflow {
77 struct collectors *collectors;
78 SFLAgent *sflow_agent;
79 struct ofproto_sflow_options *options;
80 time_t next_tick;
81 size_t n_flood, n_all;
82 struct hmap ports; /* Contains "struct dpif_sflow_port"s. */
83 uint32_t probability;
84 struct ovs_refcount ref_cnt;
85 };
86
87 static void dpif_sflow_del_port__(struct dpif_sflow *,
88 struct dpif_sflow_port *);
89
90 #define RECEIVER_INDEX 1
91
92 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
93
94 static bool
95 ofproto_sflow_options_equal(const struct ofproto_sflow_options *a,
96 const struct ofproto_sflow_options *b)
97 {
98 return (sset_equals(&a->targets, &b->targets)
99 && a->sampling_rate == b->sampling_rate
100 && a->polling_interval == b->polling_interval
101 && a->header_len == b->header_len
102 && a->sub_id == b->sub_id
103 && nullable_string_is_equal(a->agent_device, b->agent_device)
104 && nullable_string_is_equal(a->control_ip, b->control_ip));
105 }
106
107 static struct ofproto_sflow_options *
108 ofproto_sflow_options_clone(const struct ofproto_sflow_options *old)
109 {
110 struct ofproto_sflow_options *new = xmemdup(old, sizeof *old);
111 sset_clone(&new->targets, &old->targets);
112 new->agent_device = nullable_xstrdup(old->agent_device);
113 new->control_ip = nullable_xstrdup(old->control_ip);
114 return new;
115 }
116
117 static void
118 ofproto_sflow_options_destroy(struct ofproto_sflow_options *options)
119 {
120 if (options) {
121 sset_destroy(&options->targets);
122 free(options->agent_device);
123 free(options->control_ip);
124 free(options);
125 }
126 }
127
128 /* sFlow library callback to allocate memory. */
129 static void *
130 sflow_agent_alloc_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
131 size_t bytes)
132 {
133 return xzalloc(bytes);
134 }
135
136 /* sFlow library callback to free memory. */
137 static int
138 sflow_agent_free_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
139 void *obj)
140 {
141 free(obj);
142 return 0;
143 }
144
145 /* sFlow library callback to report error. */
146 static void
147 sflow_agent_error_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
148 char *msg)
149 {
150 VLOG_WARN("sFlow agent error: %s", msg);
151 }
152
153 /* sFlow library callback to send datagram. */
154 static void
155 sflow_agent_send_packet_cb(void *ds_, SFLAgent *agent OVS_UNUSED,
156 SFLReceiver *receiver OVS_UNUSED, u_char *pkt,
157 uint32_t pktLen)
158 {
159 struct dpif_sflow *ds = ds_;
160 collectors_send(ds->collectors, pkt, pktLen);
161 }
162
163 static struct dpif_sflow_port *
164 dpif_sflow_find_port(const struct dpif_sflow *ds, odp_port_t odp_port)
165 OVS_REQUIRES(mutex)
166 {
167 struct dpif_sflow_port *dsp;
168
169 HMAP_FOR_EACH_IN_BUCKET (dsp, hmap_node, hash_odp_port(odp_port),
170 &ds->ports) {
171 if (dsp->odp_port == odp_port) {
172 return dsp;
173 }
174 }
175 return NULL;
176 }
177
178 /* Call to get the datapath stats. Modeled after the dpctl utility.
179 *
180 * It might be more efficient for this module to be given a handle it can use
181 * to get these stats more efficiently, but this is only going to be called
182 * once every 20-30 seconds. Return number of datapaths found (normally expect
183 * 1). */
184 static int
185 sflow_get_dp_stats(struct dpif_sflow *ds OVS_UNUSED,
186 struct dpif_dp_stats *dp_totals)
187 {
188 struct sset types;
189 const char *type;
190 int count = 0;
191
192 memset(dp_totals, 0, sizeof *dp_totals);
193 sset_init(&types);
194 dp_enumerate_types(&types);
195 SSET_FOR_EACH (type, &types) {
196 struct sset names;
197 const char *name;
198 sset_init(&names);
199 if (dp_enumerate_names(type, &names) == 0) {
200 SSET_FOR_EACH (name, &names) {
201 struct dpif *dpif;
202 if (dpif_open(name, type, &dpif) == 0) {
203 struct dpif_dp_stats dp_stats;
204 if (dpif_get_dp_stats(dpif, &dp_stats) == 0) {
205 count++;
206 dp_totals->n_hit += dp_stats.n_hit;
207 dp_totals->n_missed += dp_stats.n_missed;
208 dp_totals->n_lost += dp_stats.n_lost;
209 dp_totals->n_flows += dp_stats.n_flows;
210 dp_totals->n_mask_hit += dp_stats.n_mask_hit;
211 dp_totals->n_masks += dp_stats.n_masks;
212 }
213 dpif_close(dpif);
214 }
215 }
216 sset_destroy(&names);
217 }
218 }
219 sset_destroy(&types);
220 return count;
221 }
222
223 /* If there are multiple bridges defined then we need some
224 minimal artibration to decide which one should send the
225 global counters. This function allows each sub-agent to
226 ask if he should do it or not. */
227 static bool
228 sflow_global_counters_subid_test(uint32_t subid)
229 OVS_REQUIRES(mutex)
230 {
231 if (sflow_global_counters_subid == SFLOW_GC_SUBID_UNCLAIMED) {
232 /* The role is up for grabs. */
233 sflow_global_counters_subid = subid;
234 }
235 return (sflow_global_counters_subid == subid);
236 }
237
238 static void
239 sflow_global_counters_subid_clear(uint32_t subid)
240 OVS_REQUIRES(mutex)
241 {
242 if (sflow_global_counters_subid == subid) {
243 /* The sub-agent that was sending global counters
244 is going away, so reset to allow another
245 to take over. */
246 sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
247 }
248 }
249
250 static void
251 sflow_agent_get_global_counters(void *ds_, SFLPoller *poller,
252 SFL_COUNTERS_SAMPLE_TYPE *cs)
253 OVS_REQUIRES(mutex)
254 {
255 struct dpif_sflow *ds = ds_;
256 SFLCounters_sample_element dp_elem, res_elem;
257 struct dpif_dp_stats dp_totals;
258 struct rusage usage;
259
260 if (!sflow_global_counters_subid_test(poller->agent->subId)) {
261 /* Another sub-agent is currently responsible for this. */
262 return;
263 }
264
265 /* datapath stats */
266 if (sflow_get_dp_stats(ds, &dp_totals)) {
267 dp_elem.tag = SFLCOUNTERS_OVSDP;
268 dp_elem.counterBlock.ovsdp.n_hit = dp_totals.n_hit;
269 dp_elem.counterBlock.ovsdp.n_missed = dp_totals.n_missed;
270 dp_elem.counterBlock.ovsdp.n_lost = dp_totals.n_lost;
271 dp_elem.counterBlock.ovsdp.n_mask_hit = dp_totals.n_mask_hit;
272 dp_elem.counterBlock.ovsdp.n_flows = dp_totals.n_flows;
273 dp_elem.counterBlock.ovsdp.n_masks = dp_totals.n_masks;
274 SFLADD_ELEMENT(cs, &dp_elem);
275 }
276
277 /* resource usage */
278 getrusage(RUSAGE_SELF, &usage);
279 res_elem.tag = SFLCOUNTERS_APP_RESOURCES;
280 res_elem.counterBlock.appResources.user_time
281 = timeval_to_msec(&usage.ru_utime);
282 res_elem.counterBlock.appResources.system_time
283 = timeval_to_msec(&usage.ru_stime);
284 res_elem.counterBlock.appResources.mem_used = (usage.ru_maxrss * 1024);
285 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.mem_max);
286 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_open);
287 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_max);
288 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_open);
289 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_max);
290
291 SFLADD_ELEMENT(cs, &res_elem);
292 sfl_poller_writeCountersSample(poller, cs);
293 }
294
295 static void
296 sflow_agent_get_counters(void *ds_, SFLPoller *poller,
297 SFL_COUNTERS_SAMPLE_TYPE *cs)
298 OVS_REQUIRES(mutex)
299 {
300 struct dpif_sflow *ds = ds_;
301 SFLCounters_sample_element elem, lacp_elem, of_elem, name_elem;
302 enum netdev_features current;
303 struct dpif_sflow_port *dsp;
304 SFLIf_counters *counters;
305 struct netdev_stats stats;
306 enum netdev_flags flags;
307 struct lacp_slave_stats lacp_stats;
308 const char *ifName;
309
310 dsp = dpif_sflow_find_port(ds, u32_to_odp(poller->bridgePort));
311 if (!dsp) {
312 return;
313 }
314
315 elem.tag = SFLCOUNTERS_GENERIC;
316 counters = &elem.counterBlock.generic;
317 counters->ifIndex = SFL_DS_INDEX(poller->dsi);
318 counters->ifType = 6;
319 if (!netdev_get_features(dsp->ofport->netdev, &current, NULL, NULL, NULL)) {
320 /* The values of ifDirection come from MAU MIB (RFC 2668): 0 = unknown,
321 1 = full-duplex, 2 = half-duplex, 3 = in, 4=out */
322 counters->ifSpeed = netdev_features_to_bps(current, 0);
323 counters->ifDirection = (netdev_features_is_full_duplex(current)
324 ? 1 : 2);
325 } else {
326 counters->ifSpeed = 100000000;
327 counters->ifDirection = 0;
328 }
329 if (!netdev_get_flags(dsp->ofport->netdev, &flags) && flags & NETDEV_UP) {
330 counters->ifStatus = 1; /* ifAdminStatus up. */
331 if (netdev_get_carrier(dsp->ofport->netdev)) {
332 counters->ifStatus |= 2; /* ifOperStatus us. */
333 }
334 } else {
335 counters->ifStatus = 0; /* Down. */
336 }
337
338 /* XXX
339 1. Is the multicast counter filled in?
340 2. Does the multicast counter include broadcasts?
341 3. Does the rx_packets counter include multicasts/broadcasts?
342 */
343 ofproto_port_get_stats(dsp->ofport, &stats);
344 counters->ifInOctets = stats.rx_bytes;
345 counters->ifInUcastPkts = stats.rx_packets;
346 counters->ifInMulticastPkts = stats.multicast;
347 counters->ifInBroadcastPkts = -1;
348 counters->ifInDiscards = stats.rx_dropped;
349 counters->ifInErrors = stats.rx_errors;
350 counters->ifInUnknownProtos = -1;
351 counters->ifOutOctets = stats.tx_bytes;
352 counters->ifOutUcastPkts = stats.tx_packets;
353 counters->ifOutMulticastPkts = -1;
354 counters->ifOutBroadcastPkts = -1;
355 counters->ifOutDiscards = stats.tx_dropped;
356 counters->ifOutErrors = stats.tx_errors;
357 counters->ifPromiscuousMode = 0;
358
359 SFLADD_ELEMENT(cs, &elem);
360
361 /* Include LACP counters and identifiers if this port is part of a LAG. */
362 if (ofproto_port_get_lacp_stats(dsp->ofport, &lacp_stats) == 0) {
363 memset(&lacp_elem, 0, sizeof lacp_elem);
364 lacp_elem.tag = SFLCOUNTERS_LACP;
365 lacp_elem.counterBlock.lacp.actorSystemID =
366 lacp_stats.dot3adAggPortActorSystemID;
367 lacp_elem.counterBlock.lacp.partnerSystemID =
368 lacp_stats.dot3adAggPortPartnerOperSystemID;
369 lacp_elem.counterBlock.lacp.attachedAggID =
370 lacp_stats.dot3adAggPortAttachedAggID;
371 lacp_elem.counterBlock.lacp.portState.v.actorAdmin =
372 lacp_stats.dot3adAggPortActorAdminState;
373 lacp_elem.counterBlock.lacp.portState.v.actorOper =
374 lacp_stats.dot3adAggPortActorOperState;
375 lacp_elem.counterBlock.lacp.portState.v.partnerAdmin =
376 lacp_stats.dot3adAggPortPartnerAdminState;
377 lacp_elem.counterBlock.lacp.portState.v.partnerOper =
378 lacp_stats.dot3adAggPortPartnerOperState;
379 lacp_elem.counterBlock.lacp.LACPDUsRx =
380 lacp_stats.dot3adAggPortStatsLACPDUsRx;
381 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsRx);
382 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsRx);
383 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.unknownRx);
384 lacp_elem.counterBlock.lacp.illegalRx =
385 lacp_stats.dot3adAggPortStatsIllegalRx;
386 lacp_elem.counterBlock.lacp.LACPDUsTx =
387 lacp_stats.dot3adAggPortStatsLACPDUsTx;
388 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsTx);
389 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsTx);
390 SFLADD_ELEMENT(cs, &lacp_elem);
391 }
392
393 /* Include Port name. */
394 if ((ifName = netdev_get_name(dsp->ofport->netdev)) != NULL) {
395 memset(&name_elem, 0, sizeof name_elem);
396 name_elem.tag = SFLCOUNTERS_PORTNAME;
397 name_elem.counterBlock.portName.portName.str = (char *)ifName;
398 name_elem.counterBlock.portName.portName.len = strlen(ifName);
399 SFLADD_ELEMENT(cs, &name_elem);
400 }
401
402 /* Include OpenFlow DPID and openflow port number. */
403 memset(&of_elem, 0, sizeof of_elem);
404 of_elem.tag = SFLCOUNTERS_OPENFLOWPORT;
405 of_elem.counterBlock.ofPort.datapath_id =
406 ofproto_get_datapath_id(dsp->ofport->ofproto);
407 of_elem.counterBlock.ofPort.port_no =
408 (OVS_FORCE uint32_t)dsp->ofport->ofp_port;
409 SFLADD_ELEMENT(cs, &of_elem);
410
411 sfl_poller_writeCountersSample(poller, cs);
412 }
413
414 /* Obtains an address to use for the local sFlow agent and stores it into
415 * '*agent_addr'. Returns true if successful, false on failure.
416 *
417 * The sFlow agent address should be a local IP address that is persistent and
418 * reachable over the network, if possible. The IP address associated with
419 * 'agent_device' is used if it has one, and otherwise 'control_ip', the IP
420 * address used to talk to the controller. If the agent device is not
421 * specified then it is figured out by taking a look at the routing table based
422 * on 'targets'. */
423 static bool
424 sflow_choose_agent_address(const char *agent_device,
425 const struct sset *targets,
426 const char *control_ip,
427 SFLAddress *agent_addr)
428 {
429 const char *target;
430 struct in_addr in4;
431
432 memset(agent_addr, 0, sizeof *agent_addr);
433 agent_addr->type = SFLADDRESSTYPE_IP_V4;
434
435 if (agent_device) {
436 if (!netdev_get_in4_by_name(agent_device, &in4)) {
437 goto success;
438 }
439 }
440
441 SSET_FOR_EACH (target, targets) {
442 union {
443 struct sockaddr_storage ss;
444 struct sockaddr_in sin;
445 } sa;
446 char name[IFNAMSIZ];
447
448 if (inet_parse_active(target, SFL_DEFAULT_COLLECTOR_PORT, &sa.ss)
449 && sa.ss.ss_family == AF_INET) {
450 struct in6_addr addr6, src, gw;
451
452 in6_addr_set_mapped_ipv4(&addr6, sa.sin.sin_addr.s_addr);
453 if (ovs_router_lookup(&addr6, name, &src, &gw)) {
454
455 in4.s_addr = in6_addr_get_mapped_ipv4(&src);
456 goto success;
457 }
458 }
459 }
460
461 if (control_ip && !lookup_ip(control_ip, &in4)) {
462 goto success;
463 }
464
465 VLOG_ERR("could not determine IP address for sFlow agent");
466 return false;
467
468 success:
469 agent_addr->address.ip_v4.addr = (OVS_FORCE uint32_t) in4.s_addr;
470 return true;
471 }
472
473 static void
474 dpif_sflow_clear__(struct dpif_sflow *ds) OVS_REQUIRES(mutex)
475 {
476 if (ds->sflow_agent) {
477 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
478 sfl_agent_release(ds->sflow_agent);
479 free(ds->sflow_agent);
480 ds->sflow_agent = NULL;
481 }
482 collectors_destroy(ds->collectors);
483 ds->collectors = NULL;
484 ofproto_sflow_options_destroy(ds->options);
485 ds->options = NULL;
486
487 /* Turn off sampling to save CPU cycles. */
488 ds->probability = 0;
489 }
490
491 void
492 dpif_sflow_clear(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
493 {
494 ovs_mutex_lock(&mutex);
495 dpif_sflow_clear__(ds);
496 ovs_mutex_unlock(&mutex);
497 }
498
499 bool
500 dpif_sflow_is_enabled(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
501 {
502 bool enabled;
503
504 ovs_mutex_lock(&mutex);
505 enabled = ds->collectors != NULL;
506 ovs_mutex_unlock(&mutex);
507 return enabled;
508 }
509
510 struct dpif_sflow *
511 dpif_sflow_create(void)
512 {
513 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
514 struct dpif_sflow *ds;
515
516 if (ovsthread_once_start(&once)) {
517 ovs_mutex_init_recursive(&mutex);
518 ovsthread_once_done(&once);
519 }
520
521 ds = xcalloc(1, sizeof *ds);
522 ds->next_tick = time_now() + 1;
523 hmap_init(&ds->ports);
524 ds->probability = 0;
525 ovs_refcount_init(&ds->ref_cnt);
526
527 return ds;
528 }
529
530 struct dpif_sflow *
531 dpif_sflow_ref(const struct dpif_sflow *ds_)
532 {
533 struct dpif_sflow *ds = CONST_CAST(struct dpif_sflow *, ds_);
534 if (ds) {
535 ovs_refcount_ref(&ds->ref_cnt);
536 }
537 return ds;
538 }
539
540 /* 32-bit fraction of packets to sample with. A value of 0 samples no packets,
541 * a value of %UINT32_MAX samples all packets and intermediate values sample
542 * intermediate fractions of packets. */
543 uint32_t
544 dpif_sflow_get_probability(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
545 {
546 uint32_t probability;
547 ovs_mutex_lock(&mutex);
548 probability = ds->probability;
549 ovs_mutex_unlock(&mutex);
550 return probability;
551 }
552
553 void
554 dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
555 {
556 if (ds && ovs_refcount_unref_relaxed(&ds->ref_cnt) == 1) {
557 struct dpif_sflow_port *dsp, *next;
558
559 dpif_sflow_clear(ds);
560 HMAP_FOR_EACH_SAFE (dsp, next, hmap_node, &ds->ports) {
561 dpif_sflow_del_port__(ds, dsp);
562 }
563 hmap_destroy(&ds->ports);
564 free(ds);
565 }
566 }
567
568 static void
569 dpif_sflow_add_poller(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
570 OVS_REQUIRES(mutex)
571 {
572 SFLPoller *poller = sfl_agent_addPoller(ds->sflow_agent, &dsp->dsi, ds,
573 sflow_agent_get_counters);
574 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
575 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
576 sfl_poller_set_bridgePort(poller, odp_to_u32(dsp->odp_port));
577 }
578
579 static enum dpif_sflow_tunnel_type
580 dpif_sflow_tunnel_type(struct ofport *ofport) {
581 const char *type = netdev_get_type(ofport->netdev);
582 if (type) {
583 if (strcmp(type, "gre") == 0) {
584 return DPIF_SFLOW_TUNNEL_GRE;
585 } else if (strcmp(type, "ipsec_gre") == 0) {
586 return DPIF_SFLOW_TUNNEL_IPSEC_GRE;
587 } else if (strcmp(type, "vxlan") == 0) {
588 return DPIF_SFLOW_TUNNEL_VXLAN;
589 } else if (strcmp(type, "lisp") == 0) {
590 return DPIF_SFLOW_TUNNEL_LISP;
591 } else if (strcmp(type, "geneve") == 0) {
592 return DPIF_SFLOW_TUNNEL_GENEVE;
593 }
594 }
595 return DPIF_SFLOW_TUNNEL_UNKNOWN;
596 }
597
598 static uint8_t
599 dpif_sflow_tunnel_proto(enum dpif_sflow_tunnel_type tunnel_type)
600 {
601 /* Default to 0 (IPPROTO_IP), meaning "unknown". */
602 uint8_t ipproto = 0;
603 switch(tunnel_type) {
604
605 case DPIF_SFLOW_TUNNEL_GRE:
606 ipproto = IPPROTO_GRE;
607 break;
608
609 case DPIF_SFLOW_TUNNEL_IPSEC_GRE:
610 ipproto = IPPROTO_ESP;
611 break;
612
613 case DPIF_SFLOW_TUNNEL_VXLAN:
614 case DPIF_SFLOW_TUNNEL_LISP:
615 case DPIF_SFLOW_TUNNEL_GENEVE:
616 ipproto = IPPROTO_UDP;
617
618 case DPIF_SFLOW_TUNNEL_UNKNOWN:
619 break;
620 }
621 return ipproto;
622 }
623
624 void
625 dpif_sflow_add_port(struct dpif_sflow *ds, struct ofport *ofport,
626 odp_port_t odp_port) OVS_EXCLUDED(mutex)
627 {
628 struct dpif_sflow_port *dsp;
629 int ifindex;
630 enum dpif_sflow_tunnel_type tunnel_type;
631
632 ovs_mutex_lock(&mutex);
633 dpif_sflow_del_port(ds, odp_port);
634
635 tunnel_type = dpif_sflow_tunnel_type(ofport);
636 ifindex = netdev_get_ifindex(ofport->netdev);
637
638 if (ifindex <= 0
639 && tunnel_type == DPIF_SFLOW_TUNNEL_UNKNOWN) {
640 /* Not an ifindex port, and not a tunnel port either
641 * so do not add a cross-reference to it here.
642 */
643 goto out;
644 }
645
646 /* Add to table of ports. */
647 dsp = xmalloc(sizeof *dsp);
648 dsp->ofport = ofport;
649 dsp->odp_port = odp_port;
650 dsp->tunnel_type = tunnel_type;
651 hmap_insert(&ds->ports, &dsp->hmap_node, hash_odp_port(odp_port));
652
653 if (ifindex > 0) {
654 /* Add poller for ports that have ifindex. */
655 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, ifindex, 0);
656 if (ds->sflow_agent) {
657 dpif_sflow_add_poller(ds, dsp);
658 }
659 } else {
660 /* Record "ifindex unknown" for the others */
661 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, 0, 0);
662 }
663
664 out:
665 ovs_mutex_unlock(&mutex);
666 }
667
668 static void
669 dpif_sflow_del_port__(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
670 OVS_REQUIRES(mutex)
671 {
672 if (ds->sflow_agent
673 && SFL_DS_INDEX(dsp->dsi)) {
674 sfl_agent_removePoller(ds->sflow_agent, &dsp->dsi);
675 sfl_agent_removeSampler(ds->sflow_agent, &dsp->dsi);
676 }
677 hmap_remove(&ds->ports, &dsp->hmap_node);
678 free(dsp);
679 }
680
681 void
682 dpif_sflow_del_port(struct dpif_sflow *ds, odp_port_t odp_port)
683 OVS_EXCLUDED(mutex)
684 {
685 struct dpif_sflow_port *dsp;
686
687 ovs_mutex_lock(&mutex);
688 dsp = dpif_sflow_find_port(ds, odp_port);
689 if (dsp) {
690 dpif_sflow_del_port__(ds, dsp);
691 }
692 ovs_mutex_unlock(&mutex);
693 }
694
695 void
696 dpif_sflow_set_options(struct dpif_sflow *ds,
697 const struct ofproto_sflow_options *options)
698 OVS_EXCLUDED(mutex)
699 {
700 struct dpif_sflow_port *dsp;
701 bool options_changed;
702 SFLReceiver *receiver;
703 SFLAddress agentIP;
704 time_t now;
705 SFLDataSource_instance dsi;
706 uint32_t dsIndex;
707 SFLSampler *sampler;
708 SFLPoller *poller;
709
710 ovs_mutex_lock(&mutex);
711 if (sset_is_empty(&options->targets) || !options->sampling_rate) {
712 /* No point in doing any work if there are no targets or nothing to
713 * sample. */
714 dpif_sflow_clear__(ds);
715 goto out;
716 }
717
718 options_changed = (!ds->options
719 || !ofproto_sflow_options_equal(options, ds->options));
720
721 /* Configure collectors if options have changed or if we're shortchanged in
722 * collectors (which indicates that opening one or more of the configured
723 * collectors failed, so that we should retry). */
724 if (options_changed
725 || collectors_count(ds->collectors) < sset_count(&options->targets)) {
726 collectors_destroy(ds->collectors);
727 collectors_create(&options->targets, SFL_DEFAULT_COLLECTOR_PORT,
728 &ds->collectors);
729 if (ds->collectors == NULL) {
730 VLOG_WARN_RL(&rl, "no collectors could be initialized, "
731 "sFlow disabled");
732 dpif_sflow_clear__(ds);
733 goto out;
734 }
735 }
736
737 /* Choose agent IP address and agent device (if not yet setup) */
738 if (!sflow_choose_agent_address(options->agent_device,
739 &options->targets,
740 options->control_ip, &agentIP)) {
741 dpif_sflow_clear__(ds);
742 goto out;
743 }
744
745 /* Avoid reconfiguring if options didn't change. */
746 if (!options_changed) {
747 goto out;
748 }
749 ofproto_sflow_options_destroy(ds->options);
750 ds->options = ofproto_sflow_options_clone(options);
751
752 /* Create agent. */
753 VLOG_INFO("creating sFlow agent %d", options->sub_id);
754 if (ds->sflow_agent) {
755 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
756 sfl_agent_release(ds->sflow_agent);
757 }
758 ds->sflow_agent = xcalloc(1, sizeof *ds->sflow_agent);
759 now = time_wall();
760 sfl_agent_init(ds->sflow_agent,
761 &agentIP,
762 options->sub_id,
763 now, /* Boot time. */
764 now, /* Current time. */
765 ds, /* Pointer supplied to callbacks. */
766 sflow_agent_alloc_cb,
767 sflow_agent_free_cb,
768 sflow_agent_error_cb,
769 sflow_agent_send_packet_cb);
770
771 receiver = sfl_agent_addReceiver(ds->sflow_agent);
772 sfl_receiver_set_sFlowRcvrOwner(receiver, "Open vSwitch sFlow");
773 sfl_receiver_set_sFlowRcvrTimeout(receiver, 0xffffffff);
774
775 /* Set the sampling_rate down in the datapath. */
776 ds->probability = MAX(1, UINT32_MAX / ds->options->sampling_rate);
777
778 /* Add a single sampler for the bridge. This appears as a PHYSICAL_ENTITY
779 because it is associated with the hypervisor, and interacts with the server
780 hardware directly. The sub_id is used to distinguish this sampler from
781 others on other bridges within the same agent. */
782 dsIndex = 1000 + options->sub_id;
783 SFL_DS_SET(dsi, SFL_DSCLASS_PHYSICAL_ENTITY, dsIndex, 0);
784 sampler = sfl_agent_addSampler(ds->sflow_agent, &dsi);
785 sfl_sampler_set_sFlowFsPacketSamplingRate(sampler, ds->options->sampling_rate);
786 sfl_sampler_set_sFlowFsMaximumHeaderSize(sampler, ds->options->header_len);
787 sfl_sampler_set_sFlowFsReceiver(sampler, RECEIVER_INDEX);
788
789 /* Add a counter poller for the bridge so we can use it to send
790 global counters such as datapath cache hit/miss stats. */
791 poller = sfl_agent_addPoller(ds->sflow_agent, &dsi, ds,
792 sflow_agent_get_global_counters);
793 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
794 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
795
796 /* Add pollers for the currently known ifindex-ports */
797 HMAP_FOR_EACH (dsp, hmap_node, &ds->ports) {
798 if (SFL_DS_INDEX(dsp->dsi)) {
799 dpif_sflow_add_poller(ds, dsp);
800 }
801 }
802
803
804 out:
805 ovs_mutex_unlock(&mutex);
806 }
807
808 int
809 dpif_sflow_odp_port_to_ifindex(const struct dpif_sflow *ds,
810 odp_port_t odp_port) OVS_EXCLUDED(mutex)
811 {
812 struct dpif_sflow_port *dsp;
813 int ret;
814
815 ovs_mutex_lock(&mutex);
816 dsp = dpif_sflow_find_port(ds, odp_port);
817 ret = dsp ? SFL_DS_INDEX(dsp->dsi) : 0;
818 ovs_mutex_unlock(&mutex);
819 return ret;
820 }
821
822 static void
823 dpif_sflow_tunnel_v4(uint8_t tunnel_ipproto,
824 const struct flow_tnl *tunnel,
825 SFLSampled_ipv4 *ipv4)
826
827 {
828 ipv4->protocol = tunnel_ipproto;
829 ipv4->tos = tunnel->ip_tos;
830 ipv4->src_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_src;
831 ipv4->dst_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_dst;
832 ipv4->src_port = (OVS_FORCE uint16_t) tunnel->tp_src;
833 ipv4->dst_port = (OVS_FORCE uint16_t) tunnel->tp_dst;
834 }
835
836 static void
837 dpif_sflow_push_mpls_lse(struct dpif_sflow_actions *sflow_actions,
838 ovs_be32 lse)
839 {
840 if (sflow_actions->mpls_stack_depth >= FLOW_MAX_MPLS_LABELS) {
841 sflow_actions->mpls_err = true;
842 return;
843 }
844
845 /* Record the new lse in host-byte-order. */
846 /* BOS flag will be fixed later when we send stack to sFlow library. */
847 sflow_actions->mpls_lse[sflow_actions->mpls_stack_depth++] = ntohl(lse);
848 }
849
850 static void
851 dpif_sflow_pop_mpls_lse(struct dpif_sflow_actions *sflow_actions)
852 {
853 if (sflow_actions->mpls_stack_depth == 0) {
854 sflow_actions->mpls_err = true;
855 return;
856 }
857 sflow_actions->mpls_stack_depth--;
858 }
859
860 static void
861 dpif_sflow_set_mpls(struct dpif_sflow_actions *sflow_actions,
862 const struct ovs_key_mpls *mpls_key, int n)
863 {
864 int ii;
865 if (n > FLOW_MAX_MPLS_LABELS) {
866 sflow_actions->mpls_err = true;
867 return;
868 }
869
870 for (ii = 0; ii < n; ii++) {
871 /* Reverse stack order, and use host-byte-order for each lse. */
872 sflow_actions->mpls_lse[n - ii - 1] = ntohl(mpls_key[ii].mpls_lse);
873 }
874 sflow_actions->mpls_stack_depth = n;
875 }
876
877 static void
878 sflow_read_tnl_push_action(const struct nlattr *attr,
879 struct dpif_sflow_actions *sflow_actions)
880 {
881 /* Modeled on lib/odp-util.c: format_odp_tnl_push_header */
882 const struct ovs_action_push_tnl *data = nl_attr_get(attr);
883 const struct eth_header *eth = (const struct eth_header *) data->header;
884 const struct ip_header *ip
885 = ALIGNED_CAST(const struct ip_header *, eth + 1);
886
887 sflow_actions->out_port = u32_to_odp(data->out_port);
888
889 /* Ethernet. */
890 /* TODO: SFlow does not currently define a MAC-in-MAC
891 * encapsulation structure. We could use an extension
892 * structure to report this.
893 */
894
895 /* IPv4 */
896 /* Cannot assume alignment so just use memcpy. */
897 sflow_actions->tunnel.ip_src = get_16aligned_be32(&ip->ip_src);
898 sflow_actions->tunnel.ip_dst = get_16aligned_be32(&ip->ip_dst);
899 sflow_actions->tunnel.ip_tos = ip->ip_tos;
900 sflow_actions->tunnel.ip_ttl = ip->ip_ttl;
901 /* The tnl_push action can supply the ip_protocol too. */
902 sflow_actions->tunnel_ipproto = ip->ip_proto;
903
904 /* Layer 4 */
905 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN
906 || data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
907 const struct udp_header *udp = (const struct udp_header *) (ip + 1);
908 sflow_actions->tunnel.tp_src = udp->udp_src;
909 sflow_actions->tunnel.tp_dst = udp->udp_dst;
910
911 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
912 const struct vxlanhdr *vxh = (const struct vxlanhdr *) (udp + 1);
913 uint64_t tun_id = ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8;
914 sflow_actions->tunnel.tun_id = htonll(tun_id);
915 } else {
916 const struct genevehdr *gnh = (const struct genevehdr *) (udp + 1);
917 uint64_t tun_id = ntohl(get_16aligned_be32(&gnh->vni)) >> 8;
918 sflow_actions->tunnel.tun_id = htonll(tun_id);
919 }
920 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
921 const void *l4 = ip + 1;
922 const struct gre_base_hdr *greh = (const struct gre_base_hdr *) l4;
923 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *)(greh + 1);
924 if (greh->flags & htons(GRE_CSUM)) {
925 options++;
926 }
927 if (greh->flags & htons(GRE_KEY)) {
928 uint64_t tun_id = ntohl(get_16aligned_be32(options));
929 sflow_actions->tunnel.tun_id = htonll(tun_id);
930 }
931 }
932 }
933
934 static void
935 sflow_read_set_action(const struct nlattr *attr,
936 struct dpif_sflow_actions *sflow_actions)
937 {
938 enum ovs_key_attr type = nl_attr_type(attr);
939 switch (type) {
940 case OVS_KEY_ATTR_ENCAP:
941 if (++sflow_actions->encap_depth > 1) {
942 /* Do not handle multi-encap for now. */
943 sflow_actions->tunnel_err = true;
944 } else {
945 dpif_sflow_read_actions(NULL,
946 nl_attr_get(attr), nl_attr_get_size(attr),
947 sflow_actions);
948 }
949 break;
950 case OVS_KEY_ATTR_PRIORITY:
951 case OVS_KEY_ATTR_SKB_MARK:
952 case OVS_KEY_ATTR_DP_HASH:
953 case OVS_KEY_ATTR_RECIRC_ID:
954 break;
955
956 case OVS_KEY_ATTR_TUNNEL: {
957 if (++sflow_actions->encap_depth > 1) {
958 /* Do not handle multi-encap for now. */
959 sflow_actions->tunnel_err = true;
960 } else {
961 if (odp_tun_key_from_attr(attr, &sflow_actions->tunnel)
962 == ODP_FIT_ERROR) {
963 /* Tunnel parsing error. */
964 sflow_actions->tunnel_err = true;
965 }
966 }
967 break;
968 }
969
970 case OVS_KEY_ATTR_IN_PORT:
971 case OVS_KEY_ATTR_ETHERNET:
972 case OVS_KEY_ATTR_VLAN:
973 break;
974
975 case OVS_KEY_ATTR_MPLS: {
976 const struct ovs_key_mpls *mpls_key = nl_attr_get(attr);
977 size_t size = nl_attr_get_size(attr);
978 dpif_sflow_set_mpls(sflow_actions, mpls_key, size / sizeof *mpls_key);
979 break;
980 }
981
982 case OVS_KEY_ATTR_ETHERTYPE:
983 case OVS_KEY_ATTR_IPV4:
984 if (sflow_actions->encap_depth == 1) {
985 const struct ovs_key_ipv4 *key = nl_attr_get(attr);
986 if (key->ipv4_src) {
987 sflow_actions->tunnel.ip_src = key->ipv4_src;
988 }
989 if (key->ipv4_dst) {
990 sflow_actions->tunnel.ip_dst = key->ipv4_dst;
991 }
992 if (key->ipv4_proto) {
993 sflow_actions->tunnel_ipproto = key->ipv4_proto;
994 }
995 if (key->ipv4_tos) {
996 sflow_actions->tunnel.ip_tos = key->ipv4_tos;
997 }
998 if (key->ipv4_ttl) {
999 sflow_actions->tunnel.ip_tos = key->ipv4_ttl;
1000 }
1001 }
1002 break;
1003
1004 case OVS_KEY_ATTR_IPV6:
1005 /* TODO: parse IPv6 encap. */
1006 break;
1007
1008 /* These have the same structure and format. */
1009 case OVS_KEY_ATTR_TCP:
1010 case OVS_KEY_ATTR_UDP:
1011 case OVS_KEY_ATTR_SCTP:
1012 if (sflow_actions->encap_depth == 1) {
1013 const struct ovs_key_tcp *key = nl_attr_get(attr);
1014 if (key->tcp_src) {
1015 sflow_actions->tunnel.tp_src = key->tcp_src;
1016 }
1017 if (key->tcp_dst) {
1018 sflow_actions->tunnel.tp_dst = key->tcp_dst;
1019 }
1020 }
1021 break;
1022
1023 case OVS_KEY_ATTR_TCP_FLAGS:
1024 case OVS_KEY_ATTR_ICMP:
1025 case OVS_KEY_ATTR_ICMPV6:
1026 case OVS_KEY_ATTR_ARP:
1027 case OVS_KEY_ATTR_ND:
1028 case OVS_KEY_ATTR_CT_STATE:
1029 case OVS_KEY_ATTR_CT_ZONE:
1030 case OVS_KEY_ATTR_CT_MARK:
1031 case OVS_KEY_ATTR_CT_LABELS:
1032 case OVS_KEY_ATTR_UNSPEC:
1033 case __OVS_KEY_ATTR_MAX:
1034 default:
1035 break;
1036 }
1037 }
1038
1039 static void
1040 dpif_sflow_capture_input_mpls(const struct flow *flow,
1041 struct dpif_sflow_actions *sflow_actions)
1042 {
1043 if (eth_type_mpls(flow->dl_type)) {
1044 int depth = 0;
1045 int ii;
1046 ovs_be32 lse;
1047 /* Calculate depth by detecting BOS. */
1048 for (ii = 0; ii < FLOW_MAX_MPLS_LABELS; ii++) {
1049 lse = flow->mpls_lse[ii];
1050 depth++;
1051 if (lse & htonl(MPLS_BOS_MASK)) {
1052 break;
1053 }
1054 }
1055 /* Capture stack, reversing stack order, and
1056 * using host-byte-order for each lse. BOS flag
1057 * is ignored for now. It is set later when
1058 * the output stack is encoded.
1059 */
1060 for (ii = 0; ii < depth; ii++) {
1061 lse = flow->mpls_lse[ii];
1062 sflow_actions->mpls_lse[depth - ii - 1] = ntohl(lse);
1063 }
1064 sflow_actions->mpls_stack_depth = depth;
1065 }
1066 }
1067
1068 void
1069 dpif_sflow_read_actions(const struct flow *flow,
1070 const struct nlattr *actions, size_t actions_len,
1071 struct dpif_sflow_actions *sflow_actions)
1072 {
1073 const struct nlattr *a;
1074 unsigned int left;
1075
1076 if (actions_len == 0) {
1077 /* Packet dropped.*/
1078 return;
1079 }
1080
1081 if (flow != NULL) {
1082 /* Make sure the MPLS output stack
1083 * is seeded with the input stack.
1084 */
1085 dpif_sflow_capture_input_mpls(flow, sflow_actions);
1086
1087 /* XXX when 802.1AD(QinQ) is supported then
1088 * we can do the same with VLAN stacks here
1089 */
1090 }
1091
1092 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1093 enum ovs_action_attr type = nl_attr_type(a);
1094 switch (type) {
1095 case OVS_ACTION_ATTR_OUTPUT:
1096 /* Capture the output port in case we need it
1097 * to get the output tunnel type.
1098 */
1099 sflow_actions->out_port = u32_to_odp(nl_attr_get_u32(a));
1100 break;
1101
1102 case OVS_ACTION_ATTR_TUNNEL_POP:
1103 /* XXX: Do not handle this for now. It's not clear
1104 * if we should start with encap_depth == 1 when we
1105 * see an input tunnel, or if we should assume
1106 * that the input tunnel was always "popped" if it
1107 * was presented to us decoded in flow->tunnel?
1108 *
1109 * If we do handle this it might look like this,
1110 * as we clear the captured tunnel info and decrement
1111 * the encap_depth:
1112 *
1113 * memset(&sflow_actions->tunnel, 0, sizeof struct flow_tnl);
1114 * sflow_actions->tunnel_ipproto = 0;
1115 * --sflow_actions->encap_depth;
1116 *
1117 * but for now just disable the tunnel annotation:
1118 */
1119 sflow_actions->tunnel_err = true;
1120 break;
1121
1122 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1123 /* XXX: This actions appears to come with it's own
1124 * OUTPUT action, so should it be regarded as having
1125 * an implicit "pop" following it too? Put another
1126 * way, would two tnl_push() actions in succession
1127 * result in a packet with two layers of encap?
1128 */
1129 if (++sflow_actions->encap_depth > 1) {
1130 /* Do not handle multi-encap for now. */
1131 sflow_actions->tunnel_err = true;
1132 } else {
1133 sflow_read_tnl_push_action(a, sflow_actions);
1134 }
1135 break;
1136
1137 case OVS_ACTION_ATTR_TRUNC:
1138 case OVS_ACTION_ATTR_USERSPACE:
1139 case OVS_ACTION_ATTR_RECIRC:
1140 case OVS_ACTION_ATTR_HASH:
1141 case OVS_ACTION_ATTR_CT:
1142 break;
1143
1144 case OVS_ACTION_ATTR_SET_MASKED:
1145 /* TODO: apply mask. XXX: Are we likely to see this? */
1146 break;
1147
1148 case OVS_ACTION_ATTR_SET:
1149 sflow_read_set_action(nl_attr_get(a), sflow_actions);
1150 break;
1151
1152 case OVS_ACTION_ATTR_PUSH_VLAN:
1153 case OVS_ACTION_ATTR_POP_VLAN:
1154 /* TODO: 802.1AD(QinQ) is not supported by OVS (yet), so do not
1155 * construct a VLAN-stack. The sFlow user-action cookie already
1156 * captures the egress VLAN ID so there is nothing more to do here.
1157 */
1158 break;
1159
1160 case OVS_ACTION_ATTR_PUSH_MPLS: {
1161 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1162 if (mpls) {
1163 dpif_sflow_push_mpls_lse(sflow_actions, mpls->mpls_lse);
1164 }
1165 break;
1166 }
1167 case OVS_ACTION_ATTR_POP_MPLS: {
1168 dpif_sflow_pop_mpls_lse(sflow_actions);
1169 break;
1170 }
1171 case OVS_ACTION_ATTR_SAMPLE:
1172 case OVS_ACTION_ATTR_UNSPEC:
1173 case __OVS_ACTION_ATTR_MAX:
1174 default:
1175 break;
1176 }
1177 }
1178 }
1179
1180 static void
1181 dpif_sflow_encode_mpls_stack(SFLLabelStack *stack,
1182 uint32_t *mpls_lse_buf,
1183 const struct dpif_sflow_actions *sflow_actions)
1184 {
1185 /* Put the MPLS stack back into "packet header" order,
1186 * and make sure the BOS flag is set correctly on the last
1187 * one. Each lse is still in host-byte-order.
1188 */
1189 int ii;
1190 uint32_t lse;
1191 stack->depth = sflow_actions->mpls_stack_depth;
1192 stack->stack = mpls_lse_buf;
1193 for (ii = 0; ii < stack->depth; ii++) {
1194 lse = sflow_actions->mpls_lse[stack->depth - ii - 1];
1195 stack->stack[ii] = (lse & ~MPLS_BOS_MASK);
1196 }
1197 stack->stack[stack->depth - 1] |= MPLS_BOS_MASK;
1198 }
1199
1200 /* Extract the output port count from the user action cookie.
1201 * See http://sflow.org/sflow_version_5.txt "Input/Output port information"
1202 */
1203 static uint32_t
1204 dpif_sflow_cookie_num_outputs(const union user_action_cookie *cookie)
1205 {
1206 uint32_t format = cookie->sflow.output & 0xC0000000;
1207 uint32_t port_n = cookie->sflow.output & 0x3FFFFFFF;
1208 if (format == 0) {
1209 return port_n ? 1 : 0;
1210 }
1211 else if (format == 0x80000000) {
1212 return port_n;
1213 }
1214 return 0;
1215 }
1216
1217 void
1218 dpif_sflow_received(struct dpif_sflow *ds, const struct dp_packet *packet,
1219 const struct flow *flow, odp_port_t odp_in_port,
1220 const union user_action_cookie *cookie,
1221 const struct dpif_sflow_actions *sflow_actions)
1222 OVS_EXCLUDED(mutex)
1223 {
1224 SFL_FLOW_SAMPLE_TYPE fs;
1225 SFLFlow_sample_element hdrElem;
1226 SFLSampled_header *header;
1227 SFLFlow_sample_element switchElem;
1228 uint8_t tnlInProto, tnlOutProto;
1229 SFLFlow_sample_element tnlInElem, tnlOutElem;
1230 SFLFlow_sample_element vniInElem, vniOutElem;
1231 SFLFlow_sample_element mplsElem;
1232 uint32_t mpls_lse_buf[FLOW_MAX_MPLS_LABELS];
1233 SFLSampler *sampler;
1234 struct dpif_sflow_port *in_dsp;
1235 struct dpif_sflow_port *out_dsp;
1236 ovs_be16 vlan_tci;
1237
1238 ovs_mutex_lock(&mutex);
1239 sampler = ds->sflow_agent->samplers;
1240 if (!sampler) {
1241 goto out;
1242 }
1243
1244 /* Build a flow sample. */
1245 memset(&fs, 0, sizeof fs);
1246
1247 /* Look up the input ifIndex if this port has one. Otherwise just
1248 * leave it as 0 (meaning 'unknown') and continue. */
1249 in_dsp = dpif_sflow_find_port(ds, odp_in_port);
1250 if (in_dsp) {
1251 fs.input = SFL_DS_INDEX(in_dsp->dsi);
1252 }
1253
1254 /* Make the assumption that the random number generator in the datapath converges
1255 * to the configured mean, and just increment the samplePool by the configured
1256 * sampling rate every time. */
1257 sampler->samplePool += sfl_sampler_get_sFlowFsPacketSamplingRate(sampler);
1258
1259 /* Sampled header. */
1260 memset(&hdrElem, 0, sizeof hdrElem);
1261 hdrElem.tag = SFLFLOW_HEADER;
1262 header = &hdrElem.flowType.header;
1263 header->header_protocol = SFLHEADER_ETHERNET_ISO8023;
1264 /* The frame_length should include the Ethernet FCS (4 bytes),
1265 * but it has already been stripped, so we need to add 4 here. */
1266 header->frame_length = dp_packet_size(packet) + 4;
1267 /* Ethernet FCS stripped off. */
1268 header->stripped = 4;
1269 header->header_length = MIN(dp_packet_size(packet),
1270 sampler->sFlowFsMaximumHeaderSize);
1271 header->header_bytes = dp_packet_data(packet);
1272
1273 /* Add extended switch element. */
1274 memset(&switchElem, 0, sizeof(switchElem));
1275 switchElem.tag = SFLFLOW_EX_SWITCH;
1276 switchElem.flowType.sw.src_vlan = vlan_tci_to_vid(flow->vlan_tci);
1277 switchElem.flowType.sw.src_priority = vlan_tci_to_pcp(flow->vlan_tci);
1278
1279 /* Retrieve data from user_action_cookie. */
1280 vlan_tci = cookie->sflow.vlan_tci;
1281 switchElem.flowType.sw.dst_vlan = vlan_tci_to_vid(vlan_tci);
1282 switchElem.flowType.sw.dst_priority = vlan_tci_to_pcp(vlan_tci);
1283
1284 fs.output = cookie->sflow.output;
1285
1286 /* Input tunnel. */
1287 if (flow->tunnel.ip_dst) {
1288 memset(&tnlInElem, 0, sizeof(tnlInElem));
1289 tnlInElem.tag = SFLFLOW_EX_IPV4_TUNNEL_INGRESS;
1290 tnlInProto = in_dsp ? dpif_sflow_tunnel_proto(in_dsp->tunnel_type) : 0;
1291 dpif_sflow_tunnel_v4(tnlInProto,
1292 &flow->tunnel,
1293 &tnlInElem.flowType.ipv4);
1294 SFLADD_ELEMENT(&fs, &tnlInElem);
1295 if (flow->tunnel.tun_id) {
1296 memset(&vniInElem, 0, sizeof(vniInElem));
1297 vniInElem.tag = SFLFLOW_EX_VNI_INGRESS;
1298 vniInElem.flowType.tunnel_vni.vni
1299 = ntohll(flow->tunnel.tun_id);
1300 SFLADD_ELEMENT(&fs, &vniInElem);
1301 }
1302 }
1303
1304 /* Output tunnel. */
1305 if (sflow_actions
1306 && sflow_actions->encap_depth == 1
1307 && !sflow_actions->tunnel_err
1308 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1309 tnlOutProto = sflow_actions->tunnel_ipproto;
1310 if (tnlOutProto == 0) {
1311 /* Try to infer the ip-protocol from the output port. */
1312 if (sflow_actions->out_port != ODPP_NONE) {
1313 out_dsp = dpif_sflow_find_port(ds, sflow_actions->out_port);
1314 if (out_dsp) {
1315 tnlOutProto = dpif_sflow_tunnel_proto(out_dsp->tunnel_type);
1316 }
1317 }
1318 }
1319 memset(&tnlOutElem, 0, sizeof(tnlOutElem));
1320 tnlOutElem.tag = SFLFLOW_EX_IPV4_TUNNEL_EGRESS;
1321 dpif_sflow_tunnel_v4(tnlOutProto,
1322 &sflow_actions->tunnel,
1323 &tnlOutElem.flowType.ipv4);
1324 SFLADD_ELEMENT(&fs, &tnlOutElem);
1325 if (sflow_actions->tunnel.tun_id) {
1326 memset(&vniOutElem, 0, sizeof(vniOutElem));
1327 vniOutElem.tag = SFLFLOW_EX_VNI_EGRESS;
1328 vniOutElem.flowType.tunnel_vni.vni
1329 = ntohll(sflow_actions->tunnel.tun_id);
1330 SFLADD_ELEMENT(&fs, &vniOutElem);
1331 }
1332 }
1333
1334 /* MPLS output label stack. */
1335 if (sflow_actions
1336 && sflow_actions->mpls_stack_depth > 0
1337 && !sflow_actions->mpls_err
1338 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1339 memset(&mplsElem, 0, sizeof(mplsElem));
1340 mplsElem.tag = SFLFLOW_EX_MPLS;
1341 dpif_sflow_encode_mpls_stack(&mplsElem.flowType.mpls.out_stack,
1342 mpls_lse_buf,
1343 sflow_actions);
1344 SFLADD_ELEMENT(&fs, &mplsElem);
1345 }
1346
1347 /* Submit the flow sample to be encoded into the next datagram. */
1348 SFLADD_ELEMENT(&fs, &hdrElem);
1349 SFLADD_ELEMENT(&fs, &switchElem);
1350 sfl_sampler_writeFlowSample(sampler, &fs);
1351
1352 out:
1353 ovs_mutex_unlock(&mutex);
1354 }
1355
1356 void
1357 dpif_sflow_run(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1358 {
1359 ovs_mutex_lock(&mutex);
1360 if (ds->collectors != NULL) {
1361 time_t now = time_now();
1362 route_table_run();
1363 if (now >= ds->next_tick) {
1364 sfl_agent_tick(ds->sflow_agent, time_wall());
1365 ds->next_tick = now + 1;
1366 }
1367 }
1368 ovs_mutex_unlock(&mutex);
1369 }
1370
1371 void
1372 dpif_sflow_wait(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1373 {
1374 ovs_mutex_lock(&mutex);
1375 if (ds->collectors != NULL) {
1376 poll_timer_wait_until(ds->next_tick * 1000LL);
1377 }
1378 ovs_mutex_unlock(&mutex);
1379 }