]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-sflow.c
Add connection tracking mark support.
[mirror_ovs.git] / ofproto / ofproto-dpif-sflow.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 * Copyright (c) 2009 InMon Corp.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <config.h>
19 #include "ofproto-dpif-sflow.h"
20 #include <inttypes.h>
21 #include <sys/resource.h>
22 #include <sys/socket.h>
23 #include <net/if.h>
24 #include <stdlib.h>
25 #include "collectors.h"
26 #include "compiler.h"
27 #include "dpif.h"
28 #include "hash.h"
29 #include "hmap.h"
30 #include "netdev.h"
31 #include "netlink.h"
32 #include "ofpbuf.h"
33 #include "ofproto.h"
34 #include "packets.h"
35 #include "poll-loop.h"
36 #include "ovs-router.h"
37 #include "route-table.h"
38 #include "sflow_api.h"
39 #include "socket-util.h"
40 #include "timeval.h"
41 #include "openvswitch/vlog.h"
42 #include "lib/odp-util.h"
43 #include "lib/unaligned.h"
44 #include "ofproto-provider.h"
45 #include "lacp.h"
46
47 VLOG_DEFINE_THIS_MODULE(sflow);
48
49 static struct ovs_mutex mutex;
50
51 /* This global var is used to determine which sFlow
52 sub-agent should send the datapath counters. */
53 #define SFLOW_GC_SUBID_UNCLAIMED (uint32_t)-1
54 static uint32_t sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
55
56 /*
57 * The enum dpif_sflow_tunnel_type is to declare the types supported
58 */
59 enum dpif_sflow_tunnel_type {
60 DPIF_SFLOW_TUNNEL_UNKNOWN = 0,
61 DPIF_SFLOW_TUNNEL_VXLAN,
62 DPIF_SFLOW_TUNNEL_GRE,
63 DPIF_SFLOW_TUNNEL_LISP,
64 DPIF_SFLOW_TUNNEL_IPSEC_GRE,
65 DPIF_SFLOW_TUNNEL_GENEVE
66 };
67
68 struct dpif_sflow_port {
69 struct hmap_node hmap_node; /* In struct dpif_sflow's "ports" hmap. */
70 SFLDataSource_instance dsi; /* sFlow library's notion of port number. */
71 struct ofport *ofport; /* To retrive port stats. */
72 odp_port_t odp_port;
73 enum dpif_sflow_tunnel_type tunnel_type;
74 };
75
76 struct dpif_sflow {
77 struct collectors *collectors;
78 SFLAgent *sflow_agent;
79 struct ofproto_sflow_options *options;
80 time_t next_tick;
81 size_t n_flood, n_all;
82 struct hmap ports; /* Contains "struct dpif_sflow_port"s. */
83 uint32_t probability;
84 struct ovs_refcount ref_cnt;
85 };
86
87 static void dpif_sflow_del_port__(struct dpif_sflow *,
88 struct dpif_sflow_port *);
89
90 #define RECEIVER_INDEX 1
91
92 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
93
94 static bool
95 nullable_string_is_equal(const char *a, const char *b)
96 {
97 return a ? b && !strcmp(a, b) : !b;
98 }
99
100 static bool
101 ofproto_sflow_options_equal(const struct ofproto_sflow_options *a,
102 const struct ofproto_sflow_options *b)
103 {
104 return (sset_equals(&a->targets, &b->targets)
105 && a->sampling_rate == b->sampling_rate
106 && a->polling_interval == b->polling_interval
107 && a->header_len == b->header_len
108 && a->sub_id == b->sub_id
109 && nullable_string_is_equal(a->agent_device, b->agent_device)
110 && nullable_string_is_equal(a->control_ip, b->control_ip));
111 }
112
113 static struct ofproto_sflow_options *
114 ofproto_sflow_options_clone(const struct ofproto_sflow_options *old)
115 {
116 struct ofproto_sflow_options *new = xmemdup(old, sizeof *old);
117 sset_clone(&new->targets, &old->targets);
118 new->agent_device = old->agent_device ? xstrdup(old->agent_device) : NULL;
119 new->control_ip = old->control_ip ? xstrdup(old->control_ip) : NULL;
120 return new;
121 }
122
123 static void
124 ofproto_sflow_options_destroy(struct ofproto_sflow_options *options)
125 {
126 if (options) {
127 sset_destroy(&options->targets);
128 free(options->agent_device);
129 free(options->control_ip);
130 free(options);
131 }
132 }
133
134 /* sFlow library callback to allocate memory. */
135 static void *
136 sflow_agent_alloc_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
137 size_t bytes)
138 {
139 return xzalloc(bytes);
140 }
141
142 /* sFlow library callback to free memory. */
143 static int
144 sflow_agent_free_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
145 void *obj)
146 {
147 free(obj);
148 return 0;
149 }
150
151 /* sFlow library callback to report error. */
152 static void
153 sflow_agent_error_cb(void *magic OVS_UNUSED, SFLAgent *agent OVS_UNUSED,
154 char *msg)
155 {
156 VLOG_WARN("sFlow agent error: %s", msg);
157 }
158
159 /* sFlow library callback to send datagram. */
160 static void
161 sflow_agent_send_packet_cb(void *ds_, SFLAgent *agent OVS_UNUSED,
162 SFLReceiver *receiver OVS_UNUSED, u_char *pkt,
163 uint32_t pktLen)
164 {
165 struct dpif_sflow *ds = ds_;
166 collectors_send(ds->collectors, pkt, pktLen);
167 }
168
169 static struct dpif_sflow_port *
170 dpif_sflow_find_port(const struct dpif_sflow *ds, odp_port_t odp_port)
171 OVS_REQUIRES(mutex)
172 {
173 struct dpif_sflow_port *dsp;
174
175 HMAP_FOR_EACH_IN_BUCKET (dsp, hmap_node, hash_odp_port(odp_port),
176 &ds->ports) {
177 if (dsp->odp_port == odp_port) {
178 return dsp;
179 }
180 }
181 return NULL;
182 }
183
184 /* Call to get the datapath stats. Modeled after the dpctl utility.
185 *
186 * It might be more efficient for this module to be given a handle it can use
187 * to get these stats more efficiently, but this is only going to be called
188 * once every 20-30 seconds. Return number of datapaths found (normally expect
189 * 1). */
190 static int
191 sflow_get_dp_stats(struct dpif_sflow *ds OVS_UNUSED,
192 struct dpif_dp_stats *dp_totals)
193 {
194 struct sset types;
195 const char *type;
196 int count = 0;
197
198 memset(dp_totals, 0, sizeof *dp_totals);
199 sset_init(&types);
200 dp_enumerate_types(&types);
201 SSET_FOR_EACH (type, &types) {
202 struct sset names;
203 const char *name;
204 sset_init(&names);
205 if (dp_enumerate_names(type, &names) == 0) {
206 SSET_FOR_EACH (name, &names) {
207 struct dpif *dpif;
208 if (dpif_open(name, type, &dpif) == 0) {
209 struct dpif_dp_stats dp_stats;
210 if (dpif_get_dp_stats(dpif, &dp_stats) == 0) {
211 count++;
212 dp_totals->n_hit += dp_stats.n_hit;
213 dp_totals->n_missed += dp_stats.n_missed;
214 dp_totals->n_lost += dp_stats.n_lost;
215 dp_totals->n_flows += dp_stats.n_flows;
216 dp_totals->n_mask_hit += dp_stats.n_mask_hit;
217 dp_totals->n_masks += dp_stats.n_masks;
218 }
219 dpif_close(dpif);
220 }
221 }
222 sset_destroy(&names);
223 }
224 }
225 sset_destroy(&types);
226 return count;
227 }
228
229 /* If there are multiple bridges defined then we need some
230 minimal artibration to decide which one should send the
231 global counters. This function allows each sub-agent to
232 ask if he should do it or not. */
233 static bool
234 sflow_global_counters_subid_test(uint32_t subid)
235 OVS_REQUIRES(mutex)
236 {
237 if (sflow_global_counters_subid == SFLOW_GC_SUBID_UNCLAIMED) {
238 /* The role is up for grabs. */
239 sflow_global_counters_subid = subid;
240 }
241 return (sflow_global_counters_subid == subid);
242 }
243
244 static void
245 sflow_global_counters_subid_clear(uint32_t subid)
246 OVS_REQUIRES(mutex)
247 {
248 if (sflow_global_counters_subid == subid) {
249 /* The sub-agent that was sending global counters
250 is going away, so reset to allow another
251 to take over. */
252 sflow_global_counters_subid = SFLOW_GC_SUBID_UNCLAIMED;
253 }
254 }
255
256 static void
257 sflow_agent_get_global_counters(void *ds_, SFLPoller *poller,
258 SFL_COUNTERS_SAMPLE_TYPE *cs)
259 OVS_REQUIRES(mutex)
260 {
261 struct dpif_sflow *ds = ds_;
262 SFLCounters_sample_element dp_elem, res_elem;
263 struct dpif_dp_stats dp_totals;
264 struct rusage usage;
265
266 if (!sflow_global_counters_subid_test(poller->agent->subId)) {
267 /* Another sub-agent is currently responsible for this. */
268 return;
269 }
270
271 /* datapath stats */
272 if (sflow_get_dp_stats(ds, &dp_totals)) {
273 dp_elem.tag = SFLCOUNTERS_OVSDP;
274 dp_elem.counterBlock.ovsdp.n_hit = dp_totals.n_hit;
275 dp_elem.counterBlock.ovsdp.n_missed = dp_totals.n_missed;
276 dp_elem.counterBlock.ovsdp.n_lost = dp_totals.n_lost;
277 dp_elem.counterBlock.ovsdp.n_mask_hit = dp_totals.n_mask_hit;
278 dp_elem.counterBlock.ovsdp.n_flows = dp_totals.n_flows;
279 dp_elem.counterBlock.ovsdp.n_masks = dp_totals.n_masks;
280 SFLADD_ELEMENT(cs, &dp_elem);
281 }
282
283 /* resource usage */
284 getrusage(RUSAGE_SELF, &usage);
285 res_elem.tag = SFLCOUNTERS_APP_RESOURCES;
286 res_elem.counterBlock.appResources.user_time
287 = timeval_to_msec(&usage.ru_utime);
288 res_elem.counterBlock.appResources.system_time
289 = timeval_to_msec(&usage.ru_stime);
290 res_elem.counterBlock.appResources.mem_used = (usage.ru_maxrss * 1024);
291 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.mem_max);
292 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_open);
293 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.fd_max);
294 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_open);
295 SFL_UNDEF_GAUGE(res_elem.counterBlock.appResources.conn_max);
296
297 SFLADD_ELEMENT(cs, &res_elem);
298 sfl_poller_writeCountersSample(poller, cs);
299 }
300
301 static void
302 sflow_agent_get_counters(void *ds_, SFLPoller *poller,
303 SFL_COUNTERS_SAMPLE_TYPE *cs)
304 OVS_REQUIRES(mutex)
305 {
306 struct dpif_sflow *ds = ds_;
307 SFLCounters_sample_element elem, lacp_elem, of_elem, name_elem;
308 enum netdev_features current;
309 struct dpif_sflow_port *dsp;
310 SFLIf_counters *counters;
311 struct netdev_stats stats;
312 enum netdev_flags flags;
313 struct lacp_slave_stats lacp_stats;
314 const char *ifName;
315
316 dsp = dpif_sflow_find_port(ds, u32_to_odp(poller->bridgePort));
317 if (!dsp) {
318 return;
319 }
320
321 elem.tag = SFLCOUNTERS_GENERIC;
322 counters = &elem.counterBlock.generic;
323 counters->ifIndex = SFL_DS_INDEX(poller->dsi);
324 counters->ifType = 6;
325 if (!netdev_get_features(dsp->ofport->netdev, &current, NULL, NULL, NULL)) {
326 /* The values of ifDirection come from MAU MIB (RFC 2668): 0 = unknown,
327 1 = full-duplex, 2 = half-duplex, 3 = in, 4=out */
328 counters->ifSpeed = netdev_features_to_bps(current, 0);
329 counters->ifDirection = (netdev_features_is_full_duplex(current)
330 ? 1 : 2);
331 } else {
332 counters->ifSpeed = 100000000;
333 counters->ifDirection = 0;
334 }
335 if (!netdev_get_flags(dsp->ofport->netdev, &flags) && flags & NETDEV_UP) {
336 counters->ifStatus = 1; /* ifAdminStatus up. */
337 if (netdev_get_carrier(dsp->ofport->netdev)) {
338 counters->ifStatus |= 2; /* ifOperStatus us. */
339 }
340 } else {
341 counters->ifStatus = 0; /* Down. */
342 }
343
344 /* XXX
345 1. Is the multicast counter filled in?
346 2. Does the multicast counter include broadcasts?
347 3. Does the rx_packets counter include multicasts/broadcasts?
348 */
349 ofproto_port_get_stats(dsp->ofport, &stats);
350 counters->ifInOctets = stats.rx_bytes;
351 counters->ifInUcastPkts = stats.rx_packets;
352 counters->ifInMulticastPkts = stats.multicast;
353 counters->ifInBroadcastPkts = -1;
354 counters->ifInDiscards = stats.rx_dropped;
355 counters->ifInErrors = stats.rx_errors;
356 counters->ifInUnknownProtos = -1;
357 counters->ifOutOctets = stats.tx_bytes;
358 counters->ifOutUcastPkts = stats.tx_packets;
359 counters->ifOutMulticastPkts = -1;
360 counters->ifOutBroadcastPkts = -1;
361 counters->ifOutDiscards = stats.tx_dropped;
362 counters->ifOutErrors = stats.tx_errors;
363 counters->ifPromiscuousMode = 0;
364
365 SFLADD_ELEMENT(cs, &elem);
366
367 /* Include LACP counters and identifiers if this port is part of a LAG. */
368 if (ofproto_port_get_lacp_stats(dsp->ofport, &lacp_stats) == 0) {
369 memset(&lacp_elem, 0, sizeof lacp_elem);
370 lacp_elem.tag = SFLCOUNTERS_LACP;
371 lacp_elem.counterBlock.lacp.actorSystemID =
372 lacp_stats.dot3adAggPortActorSystemID;
373 lacp_elem.counterBlock.lacp.partnerSystemID =
374 lacp_stats.dot3adAggPortPartnerOperSystemID;
375 lacp_elem.counterBlock.lacp.attachedAggID =
376 lacp_stats.dot3adAggPortAttachedAggID;
377 lacp_elem.counterBlock.lacp.portState.v.actorAdmin =
378 lacp_stats.dot3adAggPortActorAdminState;
379 lacp_elem.counterBlock.lacp.portState.v.actorOper =
380 lacp_stats.dot3adAggPortActorOperState;
381 lacp_elem.counterBlock.lacp.portState.v.partnerAdmin =
382 lacp_stats.dot3adAggPortPartnerAdminState;
383 lacp_elem.counterBlock.lacp.portState.v.partnerOper =
384 lacp_stats.dot3adAggPortPartnerOperState;
385 lacp_elem.counterBlock.lacp.LACPDUsRx =
386 lacp_stats.dot3adAggPortStatsLACPDUsRx;
387 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsRx);
388 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsRx);
389 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.unknownRx);
390 lacp_elem.counterBlock.lacp.illegalRx =
391 lacp_stats.dot3adAggPortStatsIllegalRx;
392 lacp_elem.counterBlock.lacp.LACPDUsTx =
393 lacp_stats.dot3adAggPortStatsLACPDUsTx;
394 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerPDUsTx);
395 SFL_UNDEF_COUNTER(lacp_elem.counterBlock.lacp.markerResponsePDUsTx);
396 SFLADD_ELEMENT(cs, &lacp_elem);
397 }
398
399 /* Include Port name. */
400 if ((ifName = netdev_get_name(dsp->ofport->netdev)) != NULL) {
401 memset(&name_elem, 0, sizeof name_elem);
402 name_elem.tag = SFLCOUNTERS_PORTNAME;
403 name_elem.counterBlock.portName.portName.str = (char *)ifName;
404 name_elem.counterBlock.portName.portName.len = strlen(ifName);
405 SFLADD_ELEMENT(cs, &name_elem);
406 }
407
408 /* Include OpenFlow DPID and openflow port number. */
409 memset(&of_elem, 0, sizeof of_elem);
410 of_elem.tag = SFLCOUNTERS_OPENFLOWPORT;
411 of_elem.counterBlock.ofPort.datapath_id =
412 ofproto_get_datapath_id(dsp->ofport->ofproto);
413 of_elem.counterBlock.ofPort.port_no =
414 (OVS_FORCE uint32_t)dsp->ofport->ofp_port;
415 SFLADD_ELEMENT(cs, &of_elem);
416
417 sfl_poller_writeCountersSample(poller, cs);
418 }
419
420 /* Obtains an address to use for the local sFlow agent and stores it into
421 * '*agent_addr'. Returns true if successful, false on failure.
422 *
423 * The sFlow agent address should be a local IP address that is persistent and
424 * reachable over the network, if possible. The IP address associated with
425 * 'agent_device' is used if it has one, and otherwise 'control_ip', the IP
426 * address used to talk to the controller. If the agent device is not
427 * specified then it is figured out by taking a look at the routing table based
428 * on 'targets'. */
429 static bool
430 sflow_choose_agent_address(const char *agent_device,
431 const struct sset *targets,
432 const char *control_ip,
433 SFLAddress *agent_addr)
434 {
435 const char *target;
436 struct in_addr in4;
437
438 memset(agent_addr, 0, sizeof *agent_addr);
439 agent_addr->type = SFLADDRESSTYPE_IP_V4;
440
441 if (agent_device) {
442 if (!netdev_get_in4_by_name(agent_device, &in4)) {
443 goto success;
444 }
445 }
446
447 SSET_FOR_EACH (target, targets) {
448 union {
449 struct sockaddr_storage ss;
450 struct sockaddr_in sin;
451 } sa;
452 char name[IFNAMSIZ];
453
454 if (inet_parse_active(target, SFL_DEFAULT_COLLECTOR_PORT, &sa.ss)
455 && sa.ss.ss_family == AF_INET) {
456 ovs_be32 gw;
457
458 if (ovs_router_lookup4(sa.sin.sin_addr.s_addr, name, &gw)
459 && !netdev_get_in4_by_name(name, &in4)) {
460 goto success;
461 }
462 }
463 }
464
465 if (control_ip && !lookup_ip(control_ip, &in4)) {
466 goto success;
467 }
468
469 VLOG_ERR("could not determine IP address for sFlow agent");
470 return false;
471
472 success:
473 agent_addr->address.ip_v4.addr = (OVS_FORCE uint32_t) in4.s_addr;
474 return true;
475 }
476
477 static void
478 dpif_sflow_clear__(struct dpif_sflow *ds) OVS_REQUIRES(mutex)
479 {
480 if (ds->sflow_agent) {
481 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
482 sfl_agent_release(ds->sflow_agent);
483 free(ds->sflow_agent);
484 ds->sflow_agent = NULL;
485 }
486 collectors_destroy(ds->collectors);
487 ds->collectors = NULL;
488 ofproto_sflow_options_destroy(ds->options);
489 ds->options = NULL;
490
491 /* Turn off sampling to save CPU cycles. */
492 ds->probability = 0;
493 }
494
495 void
496 dpif_sflow_clear(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
497 {
498 ovs_mutex_lock(&mutex);
499 dpif_sflow_clear__(ds);
500 ovs_mutex_unlock(&mutex);
501 }
502
503 bool
504 dpif_sflow_is_enabled(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
505 {
506 bool enabled;
507
508 ovs_mutex_lock(&mutex);
509 enabled = ds->collectors != NULL;
510 ovs_mutex_unlock(&mutex);
511 return enabled;
512 }
513
514 struct dpif_sflow *
515 dpif_sflow_create(void)
516 {
517 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
518 struct dpif_sflow *ds;
519
520 if (ovsthread_once_start(&once)) {
521 ovs_mutex_init_recursive(&mutex);
522 ovsthread_once_done(&once);
523 }
524
525 ds = xcalloc(1, sizeof *ds);
526 ds->next_tick = time_now() + 1;
527 hmap_init(&ds->ports);
528 ds->probability = 0;
529 ovs_refcount_init(&ds->ref_cnt);
530
531 return ds;
532 }
533
534 struct dpif_sflow *
535 dpif_sflow_ref(const struct dpif_sflow *ds_)
536 {
537 struct dpif_sflow *ds = CONST_CAST(struct dpif_sflow *, ds_);
538 if (ds) {
539 ovs_refcount_ref(&ds->ref_cnt);
540 }
541 return ds;
542 }
543
544 /* 32-bit fraction of packets to sample with. A value of 0 samples no packets,
545 * a value of %UINT32_MAX samples all packets and intermediate values sample
546 * intermediate fractions of packets. */
547 uint32_t
548 dpif_sflow_get_probability(const struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
549 {
550 uint32_t probability;
551 ovs_mutex_lock(&mutex);
552 probability = ds->probability;
553 ovs_mutex_unlock(&mutex);
554 return probability;
555 }
556
557 void
558 dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
559 {
560 if (ds && ovs_refcount_unref_relaxed(&ds->ref_cnt) == 1) {
561 struct dpif_sflow_port *dsp, *next;
562
563 dpif_sflow_clear(ds);
564 HMAP_FOR_EACH_SAFE (dsp, next, hmap_node, &ds->ports) {
565 dpif_sflow_del_port__(ds, dsp);
566 }
567 hmap_destroy(&ds->ports);
568 free(ds);
569 }
570 }
571
572 static void
573 dpif_sflow_add_poller(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
574 OVS_REQUIRES(mutex)
575 {
576 SFLPoller *poller = sfl_agent_addPoller(ds->sflow_agent, &dsp->dsi, ds,
577 sflow_agent_get_counters);
578 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
579 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
580 sfl_poller_set_bridgePort(poller, odp_to_u32(dsp->odp_port));
581 }
582
583 static enum dpif_sflow_tunnel_type
584 dpif_sflow_tunnel_type(struct ofport *ofport) {
585 const char *type = netdev_get_type(ofport->netdev);
586 if (type) {
587 if (strcmp(type, "gre") == 0) {
588 return DPIF_SFLOW_TUNNEL_GRE;
589 } else if (strcmp(type, "ipsec_gre") == 0) {
590 return DPIF_SFLOW_TUNNEL_IPSEC_GRE;
591 } else if (strcmp(type, "vxlan") == 0) {
592 return DPIF_SFLOW_TUNNEL_VXLAN;
593 } else if (strcmp(type, "lisp") == 0) {
594 return DPIF_SFLOW_TUNNEL_LISP;
595 } else if (strcmp(type, "geneve") == 0) {
596 return DPIF_SFLOW_TUNNEL_GENEVE;
597 }
598 }
599 return DPIF_SFLOW_TUNNEL_UNKNOWN;
600 }
601
602 static uint8_t
603 dpif_sflow_tunnel_proto(enum dpif_sflow_tunnel_type tunnel_type)
604 {
605 /* Default to 0 (IPPROTO_IP), meaning "unknown". */
606 uint8_t ipproto = 0;
607 switch(tunnel_type) {
608
609 case DPIF_SFLOW_TUNNEL_GRE:
610 ipproto = IPPROTO_GRE;
611 break;
612
613 case DPIF_SFLOW_TUNNEL_IPSEC_GRE:
614 ipproto = IPPROTO_ESP;
615 break;
616
617 case DPIF_SFLOW_TUNNEL_VXLAN:
618 case DPIF_SFLOW_TUNNEL_LISP:
619 case DPIF_SFLOW_TUNNEL_GENEVE:
620 ipproto = IPPROTO_UDP;
621
622 case DPIF_SFLOW_TUNNEL_UNKNOWN:
623 break;
624 }
625 return ipproto;
626 }
627
628 void
629 dpif_sflow_add_port(struct dpif_sflow *ds, struct ofport *ofport,
630 odp_port_t odp_port) OVS_EXCLUDED(mutex)
631 {
632 struct dpif_sflow_port *dsp;
633 int ifindex;
634 enum dpif_sflow_tunnel_type tunnel_type;
635
636 ovs_mutex_lock(&mutex);
637 dpif_sflow_del_port(ds, odp_port);
638
639 tunnel_type = dpif_sflow_tunnel_type(ofport);
640 ifindex = netdev_get_ifindex(ofport->netdev);
641
642 if (ifindex <= 0
643 && tunnel_type == DPIF_SFLOW_TUNNEL_UNKNOWN) {
644 /* Not an ifindex port, and not a tunnel port either
645 * so do not add a cross-reference to it here.
646 */
647 goto out;
648 }
649
650 /* Add to table of ports. */
651 dsp = xmalloc(sizeof *dsp);
652 dsp->ofport = ofport;
653 dsp->odp_port = odp_port;
654 dsp->tunnel_type = tunnel_type;
655 hmap_insert(&ds->ports, &dsp->hmap_node, hash_odp_port(odp_port));
656
657 if (ifindex > 0) {
658 /* Add poller for ports that have ifindex. */
659 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, ifindex, 0);
660 if (ds->sflow_agent) {
661 dpif_sflow_add_poller(ds, dsp);
662 }
663 } else {
664 /* Record "ifindex unknown" for the others */
665 SFL_DS_SET(dsp->dsi, SFL_DSCLASS_IFINDEX, 0, 0);
666 }
667
668 out:
669 ovs_mutex_unlock(&mutex);
670 }
671
672 static void
673 dpif_sflow_del_port__(struct dpif_sflow *ds, struct dpif_sflow_port *dsp)
674 OVS_REQUIRES(mutex)
675 {
676 if (ds->sflow_agent
677 && SFL_DS_INDEX(dsp->dsi)) {
678 sfl_agent_removePoller(ds->sflow_agent, &dsp->dsi);
679 sfl_agent_removeSampler(ds->sflow_agent, &dsp->dsi);
680 }
681 hmap_remove(&ds->ports, &dsp->hmap_node);
682 free(dsp);
683 }
684
685 void
686 dpif_sflow_del_port(struct dpif_sflow *ds, odp_port_t odp_port)
687 OVS_EXCLUDED(mutex)
688 {
689 struct dpif_sflow_port *dsp;
690
691 ovs_mutex_lock(&mutex);
692 dsp = dpif_sflow_find_port(ds, odp_port);
693 if (dsp) {
694 dpif_sflow_del_port__(ds, dsp);
695 }
696 ovs_mutex_unlock(&mutex);
697 }
698
699 void
700 dpif_sflow_set_options(struct dpif_sflow *ds,
701 const struct ofproto_sflow_options *options)
702 OVS_EXCLUDED(mutex)
703 {
704 struct dpif_sflow_port *dsp;
705 bool options_changed;
706 SFLReceiver *receiver;
707 SFLAddress agentIP;
708 time_t now;
709 SFLDataSource_instance dsi;
710 uint32_t dsIndex;
711 SFLSampler *sampler;
712 SFLPoller *poller;
713
714 ovs_mutex_lock(&mutex);
715 if (sset_is_empty(&options->targets) || !options->sampling_rate) {
716 /* No point in doing any work if there are no targets or nothing to
717 * sample. */
718 dpif_sflow_clear__(ds);
719 goto out;
720 }
721
722 options_changed = (!ds->options
723 || !ofproto_sflow_options_equal(options, ds->options));
724
725 /* Configure collectors if options have changed or if we're shortchanged in
726 * collectors (which indicates that opening one or more of the configured
727 * collectors failed, so that we should retry). */
728 if (options_changed
729 || collectors_count(ds->collectors) < sset_count(&options->targets)) {
730 collectors_destroy(ds->collectors);
731 collectors_create(&options->targets, SFL_DEFAULT_COLLECTOR_PORT,
732 &ds->collectors);
733 if (ds->collectors == NULL) {
734 VLOG_WARN_RL(&rl, "no collectors could be initialized, "
735 "sFlow disabled");
736 dpif_sflow_clear__(ds);
737 goto out;
738 }
739 }
740
741 /* Choose agent IP address and agent device (if not yet setup) */
742 if (!sflow_choose_agent_address(options->agent_device,
743 &options->targets,
744 options->control_ip, &agentIP)) {
745 dpif_sflow_clear__(ds);
746 goto out;
747 }
748
749 /* Avoid reconfiguring if options didn't change. */
750 if (!options_changed) {
751 goto out;
752 }
753 ofproto_sflow_options_destroy(ds->options);
754 ds->options = ofproto_sflow_options_clone(options);
755
756 /* Create agent. */
757 VLOG_INFO("creating sFlow agent %d", options->sub_id);
758 if (ds->sflow_agent) {
759 sflow_global_counters_subid_clear(ds->sflow_agent->subId);
760 sfl_agent_release(ds->sflow_agent);
761 }
762 ds->sflow_agent = xcalloc(1, sizeof *ds->sflow_agent);
763 now = time_wall();
764 sfl_agent_init(ds->sflow_agent,
765 &agentIP,
766 options->sub_id,
767 now, /* Boot time. */
768 now, /* Current time. */
769 ds, /* Pointer supplied to callbacks. */
770 sflow_agent_alloc_cb,
771 sflow_agent_free_cb,
772 sflow_agent_error_cb,
773 sflow_agent_send_packet_cb);
774
775 receiver = sfl_agent_addReceiver(ds->sflow_agent);
776 sfl_receiver_set_sFlowRcvrOwner(receiver, "Open vSwitch sFlow");
777 sfl_receiver_set_sFlowRcvrTimeout(receiver, 0xffffffff);
778
779 /* Set the sampling_rate down in the datapath. */
780 ds->probability = MAX(1, UINT32_MAX / ds->options->sampling_rate);
781
782 /* Add a single sampler for the bridge. This appears as a PHYSICAL_ENTITY
783 because it is associated with the hypervisor, and interacts with the server
784 hardware directly. The sub_id is used to distinguish this sampler from
785 others on other bridges within the same agent. */
786 dsIndex = 1000 + options->sub_id;
787 SFL_DS_SET(dsi, SFL_DSCLASS_PHYSICAL_ENTITY, dsIndex, 0);
788 sampler = sfl_agent_addSampler(ds->sflow_agent, &dsi);
789 sfl_sampler_set_sFlowFsPacketSamplingRate(sampler, ds->options->sampling_rate);
790 sfl_sampler_set_sFlowFsMaximumHeaderSize(sampler, ds->options->header_len);
791 sfl_sampler_set_sFlowFsReceiver(sampler, RECEIVER_INDEX);
792
793 /* Add a counter poller for the bridge so we can use it to send
794 global counters such as datapath cache hit/miss stats. */
795 poller = sfl_agent_addPoller(ds->sflow_agent, &dsi, ds,
796 sflow_agent_get_global_counters);
797 sfl_poller_set_sFlowCpInterval(poller, ds->options->polling_interval);
798 sfl_poller_set_sFlowCpReceiver(poller, RECEIVER_INDEX);
799
800 /* Add pollers for the currently known ifindex-ports */
801 HMAP_FOR_EACH (dsp, hmap_node, &ds->ports) {
802 if (SFL_DS_INDEX(dsp->dsi)) {
803 dpif_sflow_add_poller(ds, dsp);
804 }
805 }
806
807
808 out:
809 ovs_mutex_unlock(&mutex);
810 }
811
812 int
813 dpif_sflow_odp_port_to_ifindex(const struct dpif_sflow *ds,
814 odp_port_t odp_port) OVS_EXCLUDED(mutex)
815 {
816 struct dpif_sflow_port *dsp;
817 int ret;
818
819 ovs_mutex_lock(&mutex);
820 dsp = dpif_sflow_find_port(ds, odp_port);
821 ret = dsp ? SFL_DS_INDEX(dsp->dsi) : 0;
822 ovs_mutex_unlock(&mutex);
823 return ret;
824 }
825
826 static void
827 dpif_sflow_tunnel_v4(uint8_t tunnel_ipproto,
828 const struct flow_tnl *tunnel,
829 SFLSampled_ipv4 *ipv4)
830
831 {
832 ipv4->protocol = tunnel_ipproto;
833 ipv4->tos = tunnel->ip_tos;
834 ipv4->src_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_src;
835 ipv4->dst_ip.addr = (OVS_FORCE uint32_t) tunnel->ip_dst;
836 ipv4->src_port = (OVS_FORCE uint16_t) tunnel->tp_src;
837 ipv4->dst_port = (OVS_FORCE uint16_t) tunnel->tp_dst;
838 }
839
840 static void
841 dpif_sflow_push_mpls_lse(struct dpif_sflow_actions *sflow_actions,
842 ovs_be32 lse)
843 {
844 if (sflow_actions->mpls_stack_depth >= FLOW_MAX_MPLS_LABELS) {
845 sflow_actions->mpls_err = true;
846 return;
847 }
848
849 /* Record the new lse in host-byte-order. */
850 /* BOS flag will be fixed later when we send stack to sFlow library. */
851 sflow_actions->mpls_lse[sflow_actions->mpls_stack_depth++] = ntohl(lse);
852 }
853
854 static void
855 dpif_sflow_pop_mpls_lse(struct dpif_sflow_actions *sflow_actions)
856 {
857 if (sflow_actions->mpls_stack_depth == 0) {
858 sflow_actions->mpls_err = true;
859 return;
860 }
861 sflow_actions->mpls_stack_depth--;
862 }
863
864 static void
865 dpif_sflow_set_mpls(struct dpif_sflow_actions *sflow_actions,
866 const struct ovs_key_mpls *mpls_key, int n)
867 {
868 int ii;
869 if (n > FLOW_MAX_MPLS_LABELS) {
870 sflow_actions->mpls_err = true;
871 return;
872 }
873
874 for (ii = 0; ii < n; ii++) {
875 /* Reverse stack order, and use host-byte-order for each lse. */
876 sflow_actions->mpls_lse[n - ii - 1] = ntohl(mpls_key[ii].mpls_lse);
877 }
878 sflow_actions->mpls_stack_depth = n;
879 }
880
881 static void
882 sflow_read_tnl_push_action(const struct nlattr *attr,
883 struct dpif_sflow_actions *sflow_actions)
884 {
885 /* Modeled on lib/odp-util.c: format_odp_tnl_push_header */
886 const struct ovs_action_push_tnl *data = nl_attr_get(attr);
887 const struct eth_header *eth = (const struct eth_header *) data->header;
888 const struct ip_header *ip
889 = ALIGNED_CAST(const struct ip_header *, eth + 1);
890
891 sflow_actions->out_port = u32_to_odp(data->out_port);
892
893 /* Ethernet. */
894 /* TODO: SFlow does not currently define a MAC-in-MAC
895 * encapsulation structure. We could use an extension
896 * structure to report this.
897 */
898
899 /* IPv4 */
900 /* Cannot assume alignment so just use memcpy. */
901 sflow_actions->tunnel.ip_src = get_16aligned_be32(&ip->ip_src);
902 sflow_actions->tunnel.ip_dst = get_16aligned_be32(&ip->ip_dst);
903 sflow_actions->tunnel.ip_tos = ip->ip_tos;
904 sflow_actions->tunnel.ip_ttl = ip->ip_ttl;
905 /* The tnl_push action can supply the ip_protocol too. */
906 sflow_actions->tunnel_ipproto = ip->ip_proto;
907
908 /* Layer 4 */
909 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN
910 || data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
911 const struct udp_header *udp = (const struct udp_header *) (ip + 1);
912 sflow_actions->tunnel.tp_src = udp->udp_src;
913 sflow_actions->tunnel.tp_dst = udp->udp_dst;
914
915 if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
916 const struct vxlanhdr *vxh = (const struct vxlanhdr *) (udp + 1);
917 uint64_t tun_id = ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8;
918 sflow_actions->tunnel.tun_id = htonll(tun_id);
919 } else {
920 const struct genevehdr *gnh = (const struct genevehdr *) (udp + 1);
921 uint64_t tun_id = ntohl(get_16aligned_be32(&gnh->vni)) >> 8;
922 sflow_actions->tunnel.tun_id = htonll(tun_id);
923 }
924 } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
925 const void *l4 = ip + 1;
926 const struct gre_base_hdr *greh = (const struct gre_base_hdr *) l4;
927 ovs_16aligned_be32 *options = (ovs_16aligned_be32 *)(greh + 1);
928 if (greh->flags & htons(GRE_CSUM)) {
929 options++;
930 }
931 if (greh->flags & htons(GRE_KEY)) {
932 uint64_t tun_id = ntohl(get_16aligned_be32(options));
933 sflow_actions->tunnel.tun_id = htonll(tun_id);
934 }
935 }
936 }
937
938 static void
939 sflow_read_set_action(const struct nlattr *attr,
940 struct dpif_sflow_actions *sflow_actions)
941 {
942 enum ovs_key_attr type = nl_attr_type(attr);
943 switch (type) {
944 case OVS_KEY_ATTR_ENCAP:
945 if (++sflow_actions->encap_depth > 1) {
946 /* Do not handle multi-encap for now. */
947 sflow_actions->tunnel_err = true;
948 } else {
949 dpif_sflow_read_actions(NULL,
950 nl_attr_get(attr), nl_attr_get_size(attr),
951 sflow_actions);
952 }
953 break;
954 case OVS_KEY_ATTR_PRIORITY:
955 case OVS_KEY_ATTR_SKB_MARK:
956 case OVS_KEY_ATTR_DP_HASH:
957 case OVS_KEY_ATTR_RECIRC_ID:
958 break;
959
960 case OVS_KEY_ATTR_TUNNEL: {
961 if (++sflow_actions->encap_depth > 1) {
962 /* Do not handle multi-encap for now. */
963 sflow_actions->tunnel_err = true;
964 } else {
965 if (odp_tun_key_from_attr(attr, false, &sflow_actions->tunnel)
966 == ODP_FIT_ERROR) {
967 /* Tunnel parsing error. */
968 sflow_actions->tunnel_err = true;
969 }
970 }
971 break;
972 }
973
974 case OVS_KEY_ATTR_IN_PORT:
975 case OVS_KEY_ATTR_ETHERNET:
976 case OVS_KEY_ATTR_VLAN:
977 break;
978
979 case OVS_KEY_ATTR_MPLS: {
980 const struct ovs_key_mpls *mpls_key = nl_attr_get(attr);
981 size_t size = nl_attr_get_size(attr);
982 dpif_sflow_set_mpls(sflow_actions, mpls_key, size / sizeof *mpls_key);
983 break;
984 }
985
986 case OVS_KEY_ATTR_ETHERTYPE:
987 case OVS_KEY_ATTR_IPV4:
988 if (sflow_actions->encap_depth == 1) {
989 const struct ovs_key_ipv4 *key = nl_attr_get(attr);
990 if (key->ipv4_src) {
991 sflow_actions->tunnel.ip_src = key->ipv4_src;
992 }
993 if (key->ipv4_dst) {
994 sflow_actions->tunnel.ip_dst = key->ipv4_dst;
995 }
996 if (key->ipv4_proto) {
997 sflow_actions->tunnel_ipproto = key->ipv4_proto;
998 }
999 if (key->ipv4_tos) {
1000 sflow_actions->tunnel.ip_tos = key->ipv4_tos;
1001 }
1002 if (key->ipv4_ttl) {
1003 sflow_actions->tunnel.ip_tos = key->ipv4_ttl;
1004 }
1005 }
1006 break;
1007
1008 case OVS_KEY_ATTR_IPV6:
1009 /* TODO: parse IPv6 encap. */
1010 break;
1011
1012 /* These have the same structure and format. */
1013 case OVS_KEY_ATTR_TCP:
1014 case OVS_KEY_ATTR_UDP:
1015 case OVS_KEY_ATTR_SCTP:
1016 if (sflow_actions->encap_depth == 1) {
1017 const struct ovs_key_tcp *key = nl_attr_get(attr);
1018 if (key->tcp_src) {
1019 sflow_actions->tunnel.tp_src = key->tcp_src;
1020 }
1021 if (key->tcp_dst) {
1022 sflow_actions->tunnel.tp_dst = key->tcp_dst;
1023 }
1024 }
1025 break;
1026
1027 case OVS_KEY_ATTR_TCP_FLAGS:
1028 case OVS_KEY_ATTR_ICMP:
1029 case OVS_KEY_ATTR_ICMPV6:
1030 case OVS_KEY_ATTR_ARP:
1031 case OVS_KEY_ATTR_ND:
1032 case OVS_KEY_ATTR_CT_STATE:
1033 case OVS_KEY_ATTR_CT_ZONE:
1034 case OVS_KEY_ATTR_CT_MARK:
1035 case OVS_KEY_ATTR_UNSPEC:
1036 case __OVS_KEY_ATTR_MAX:
1037 default:
1038 break;
1039 }
1040 }
1041
1042 static void
1043 dpif_sflow_capture_input_mpls(const struct flow *flow,
1044 struct dpif_sflow_actions *sflow_actions)
1045 {
1046 if (eth_type_mpls(flow->dl_type)) {
1047 int depth = 0;
1048 int ii;
1049 ovs_be32 lse;
1050 /* Calculate depth by detecting BOS. */
1051 for (ii = 0; ii < FLOW_MAX_MPLS_LABELS; ii++) {
1052 lse = flow->mpls_lse[ii];
1053 depth++;
1054 if (lse & htonl(MPLS_BOS_MASK)) {
1055 break;
1056 }
1057 }
1058 /* Capture stack, reversing stack order, and
1059 * using host-byte-order for each lse. BOS flag
1060 * is ignored for now. It is set later when
1061 * the output stack is encoded.
1062 */
1063 for (ii = 0; ii < depth; ii++) {
1064 lse = flow->mpls_lse[ii];
1065 sflow_actions->mpls_lse[depth - ii - 1] = ntohl(lse);
1066 }
1067 sflow_actions->mpls_stack_depth = depth;
1068 }
1069 }
1070
1071 void
1072 dpif_sflow_read_actions(const struct flow *flow,
1073 const struct nlattr *actions, size_t actions_len,
1074 struct dpif_sflow_actions *sflow_actions)
1075 {
1076 const struct nlattr *a;
1077 unsigned int left;
1078
1079 if (actions_len == 0) {
1080 /* Packet dropped.*/
1081 return;
1082 }
1083
1084 if (flow != NULL) {
1085 /* Make sure the MPLS output stack
1086 * is seeded with the input stack.
1087 */
1088 dpif_sflow_capture_input_mpls(flow, sflow_actions);
1089
1090 /* XXX when 802.1AD(QinQ) is supported then
1091 * we can do the same with VLAN stacks here
1092 */
1093 }
1094
1095 NL_ATTR_FOR_EACH (a, left, actions, actions_len) {
1096 enum ovs_action_attr type = nl_attr_type(a);
1097 switch (type) {
1098 case OVS_ACTION_ATTR_OUTPUT:
1099 /* Capture the output port in case we need it
1100 * to get the output tunnel type.
1101 */
1102 sflow_actions->out_port = u32_to_odp(nl_attr_get_u32(a));
1103 break;
1104
1105 case OVS_ACTION_ATTR_TUNNEL_POP:
1106 /* XXX: Do not handle this for now. It's not clear
1107 * if we should start with encap_depth == 1 when we
1108 * see an input tunnel, or if we should assume
1109 * that the input tunnel was always "popped" if it
1110 * was presented to us decoded in flow->tunnel?
1111 *
1112 * If we do handle this it might look like this,
1113 * as we clear the captured tunnel info and decrement
1114 * the encap_depth:
1115 *
1116 * memset(&sflow_actions->tunnel, 0, sizeof struct flow_tnl);
1117 * sflow_actions->tunnel_ipproto = 0;
1118 * --sflow_actions->encap_depth;
1119 *
1120 * but for now just disable the tunnel annotation:
1121 */
1122 sflow_actions->tunnel_err = true;
1123 break;
1124
1125 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1126 /* XXX: This actions appears to come with it's own
1127 * OUTPUT action, so should it be regarded as having
1128 * an implicit "pop" following it too? Put another
1129 * way, would two tnl_push() actions in succession
1130 * result in a packet with two layers of encap?
1131 */
1132 if (++sflow_actions->encap_depth > 1) {
1133 /* Do not handle multi-encap for now. */
1134 sflow_actions->tunnel_err = true;
1135 } else {
1136 sflow_read_tnl_push_action(a, sflow_actions);
1137 }
1138 break;
1139
1140 case OVS_ACTION_ATTR_USERSPACE:
1141 case OVS_ACTION_ATTR_RECIRC:
1142 case OVS_ACTION_ATTR_HASH:
1143 case OVS_ACTION_ATTR_CT:
1144 break;
1145
1146 case OVS_ACTION_ATTR_SET_MASKED:
1147 /* TODO: apply mask. XXX: Are we likely to see this? */
1148 break;
1149
1150 case OVS_ACTION_ATTR_SET:
1151 sflow_read_set_action(nl_attr_get(a), sflow_actions);
1152 break;
1153
1154 case OVS_ACTION_ATTR_PUSH_VLAN:
1155 case OVS_ACTION_ATTR_POP_VLAN:
1156 /* TODO: 802.1AD(QinQ) is not supported by OVS (yet), so do not
1157 * construct a VLAN-stack. The sFlow user-action cookie already
1158 * captures the egress VLAN ID so there is nothing more to do here.
1159 */
1160 break;
1161
1162 case OVS_ACTION_ATTR_PUSH_MPLS: {
1163 const struct ovs_action_push_mpls *mpls = nl_attr_get(a);
1164 if (mpls) {
1165 dpif_sflow_push_mpls_lse(sflow_actions, mpls->mpls_lse);
1166 }
1167 break;
1168 }
1169 case OVS_ACTION_ATTR_POP_MPLS: {
1170 dpif_sflow_pop_mpls_lse(sflow_actions);
1171 break;
1172 }
1173 case OVS_ACTION_ATTR_SAMPLE:
1174 case OVS_ACTION_ATTR_UNSPEC:
1175 case __OVS_ACTION_ATTR_MAX:
1176 default:
1177 break;
1178 }
1179 }
1180 }
1181
1182 static void
1183 dpif_sflow_encode_mpls_stack(SFLLabelStack *stack,
1184 uint32_t *mpls_lse_buf,
1185 const struct dpif_sflow_actions *sflow_actions)
1186 {
1187 /* Put the MPLS stack back into "packet header" order,
1188 * and make sure the BOS flag is set correctly on the last
1189 * one. Each lse is still in host-byte-order.
1190 */
1191 int ii;
1192 uint32_t lse;
1193 stack->depth = sflow_actions->mpls_stack_depth;
1194 stack->stack = mpls_lse_buf;
1195 for (ii = 0; ii < stack->depth; ii++) {
1196 lse = sflow_actions->mpls_lse[stack->depth - ii - 1];
1197 stack->stack[ii] = (lse & ~MPLS_BOS_MASK);
1198 }
1199 stack->stack[stack->depth - 1] |= MPLS_BOS_MASK;
1200 }
1201
1202 /* Extract the output port count from the user action cookie.
1203 * See http://sflow.org/sflow_version_5.txt "Input/Output port information"
1204 */
1205 static uint32_t
1206 dpif_sflow_cookie_num_outputs(const union user_action_cookie *cookie)
1207 {
1208 uint32_t format = cookie->sflow.output & 0xC0000000;
1209 uint32_t port_n = cookie->sflow.output & 0x3FFFFFFF;
1210 if (format == 0) {
1211 return port_n ? 1 : 0;
1212 }
1213 else if (format == 0x80000000) {
1214 return port_n;
1215 }
1216 return 0;
1217 }
1218
1219 void
1220 dpif_sflow_received(struct dpif_sflow *ds, const struct dp_packet *packet,
1221 const struct flow *flow, odp_port_t odp_in_port,
1222 const union user_action_cookie *cookie,
1223 const struct dpif_sflow_actions *sflow_actions)
1224 OVS_EXCLUDED(mutex)
1225 {
1226 SFL_FLOW_SAMPLE_TYPE fs;
1227 SFLFlow_sample_element hdrElem;
1228 SFLSampled_header *header;
1229 SFLFlow_sample_element switchElem;
1230 uint8_t tnlInProto, tnlOutProto;
1231 SFLFlow_sample_element tnlInElem, tnlOutElem;
1232 SFLFlow_sample_element vniInElem, vniOutElem;
1233 SFLFlow_sample_element mplsElem;
1234 uint32_t mpls_lse_buf[FLOW_MAX_MPLS_LABELS];
1235 SFLSampler *sampler;
1236 struct dpif_sflow_port *in_dsp;
1237 struct dpif_sflow_port *out_dsp;
1238 ovs_be16 vlan_tci;
1239
1240 ovs_mutex_lock(&mutex);
1241 sampler = ds->sflow_agent->samplers;
1242 if (!sampler) {
1243 goto out;
1244 }
1245
1246 /* Build a flow sample. */
1247 memset(&fs, 0, sizeof fs);
1248
1249 /* Look up the input ifIndex if this port has one. Otherwise just
1250 * leave it as 0 (meaning 'unknown') and continue. */
1251 in_dsp = dpif_sflow_find_port(ds, odp_in_port);
1252 if (in_dsp) {
1253 fs.input = SFL_DS_INDEX(in_dsp->dsi);
1254 }
1255
1256 /* Make the assumption that the random number generator in the datapath converges
1257 * to the configured mean, and just increment the samplePool by the configured
1258 * sampling rate every time. */
1259 sampler->samplePool += sfl_sampler_get_sFlowFsPacketSamplingRate(sampler);
1260
1261 /* Sampled header. */
1262 memset(&hdrElem, 0, sizeof hdrElem);
1263 hdrElem.tag = SFLFLOW_HEADER;
1264 header = &hdrElem.flowType.header;
1265 header->header_protocol = SFLHEADER_ETHERNET_ISO8023;
1266 /* The frame_length should include the Ethernet FCS (4 bytes),
1267 * but it has already been stripped, so we need to add 4 here. */
1268 header->frame_length = dp_packet_size(packet) + 4;
1269 /* Ethernet FCS stripped off. */
1270 header->stripped = 4;
1271 header->header_length = MIN(dp_packet_size(packet),
1272 sampler->sFlowFsMaximumHeaderSize);
1273 header->header_bytes = dp_packet_data(packet);
1274
1275 /* Add extended switch element. */
1276 memset(&switchElem, 0, sizeof(switchElem));
1277 switchElem.tag = SFLFLOW_EX_SWITCH;
1278 switchElem.flowType.sw.src_vlan = vlan_tci_to_vid(flow->vlan_tci);
1279 switchElem.flowType.sw.src_priority = vlan_tci_to_pcp(flow->vlan_tci);
1280
1281 /* Retrieve data from user_action_cookie. */
1282 vlan_tci = cookie->sflow.vlan_tci;
1283 switchElem.flowType.sw.dst_vlan = vlan_tci_to_vid(vlan_tci);
1284 switchElem.flowType.sw.dst_priority = vlan_tci_to_pcp(vlan_tci);
1285
1286 fs.output = cookie->sflow.output;
1287
1288 /* Input tunnel. */
1289 if (flow->tunnel.ip_dst) {
1290 memset(&tnlInElem, 0, sizeof(tnlInElem));
1291 tnlInElem.tag = SFLFLOW_EX_IPV4_TUNNEL_INGRESS;
1292 tnlInProto = dpif_sflow_tunnel_proto(in_dsp->tunnel_type);
1293 dpif_sflow_tunnel_v4(tnlInProto,
1294 &flow->tunnel,
1295 &tnlInElem.flowType.ipv4);
1296 SFLADD_ELEMENT(&fs, &tnlInElem);
1297 if (flow->tunnel.tun_id) {
1298 memset(&vniInElem, 0, sizeof(vniInElem));
1299 vniInElem.tag = SFLFLOW_EX_VNI_INGRESS;
1300 vniInElem.flowType.tunnel_vni.vni
1301 = ntohll(flow->tunnel.tun_id);
1302 SFLADD_ELEMENT(&fs, &vniInElem);
1303 }
1304 }
1305
1306 /* Output tunnel. */
1307 if (sflow_actions
1308 && sflow_actions->encap_depth == 1
1309 && !sflow_actions->tunnel_err
1310 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1311 tnlOutProto = sflow_actions->tunnel_ipproto;
1312 if (tnlOutProto == 0) {
1313 /* Try to infer the ip-protocol from the output port. */
1314 if (sflow_actions->out_port != ODPP_NONE) {
1315 out_dsp = dpif_sflow_find_port(ds, sflow_actions->out_port);
1316 if (out_dsp) {
1317 tnlOutProto = dpif_sflow_tunnel_proto(out_dsp->tunnel_type);
1318 }
1319 }
1320 }
1321 memset(&tnlOutElem, 0, sizeof(tnlOutElem));
1322 tnlOutElem.tag = SFLFLOW_EX_IPV4_TUNNEL_EGRESS;
1323 dpif_sflow_tunnel_v4(tnlOutProto,
1324 &sflow_actions->tunnel,
1325 &tnlOutElem.flowType.ipv4);
1326 SFLADD_ELEMENT(&fs, &tnlOutElem);
1327 if (sflow_actions->tunnel.tun_id) {
1328 memset(&vniOutElem, 0, sizeof(vniOutElem));
1329 vniOutElem.tag = SFLFLOW_EX_VNI_EGRESS;
1330 vniOutElem.flowType.tunnel_vni.vni
1331 = ntohll(sflow_actions->tunnel.tun_id);
1332 SFLADD_ELEMENT(&fs, &vniOutElem);
1333 }
1334 }
1335
1336 /* MPLS output label stack. */
1337 if (sflow_actions
1338 && sflow_actions->mpls_stack_depth > 0
1339 && !sflow_actions->mpls_err
1340 && dpif_sflow_cookie_num_outputs(cookie) == 1) {
1341 memset(&mplsElem, 0, sizeof(mplsElem));
1342 mplsElem.tag = SFLFLOW_EX_MPLS;
1343 dpif_sflow_encode_mpls_stack(&mplsElem.flowType.mpls.out_stack,
1344 mpls_lse_buf,
1345 sflow_actions);
1346 SFLADD_ELEMENT(&fs, &mplsElem);
1347 }
1348
1349 /* Submit the flow sample to be encoded into the next datagram. */
1350 SFLADD_ELEMENT(&fs, &hdrElem);
1351 SFLADD_ELEMENT(&fs, &switchElem);
1352 sfl_sampler_writeFlowSample(sampler, &fs);
1353
1354 out:
1355 ovs_mutex_unlock(&mutex);
1356 }
1357
1358 void
1359 dpif_sflow_run(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1360 {
1361 ovs_mutex_lock(&mutex);
1362 if (ds->collectors != NULL) {
1363 time_t now = time_now();
1364 route_table_run();
1365 if (now >= ds->next_tick) {
1366 sfl_agent_tick(ds->sflow_agent, time_wall());
1367 ds->next_tick = now + 1;
1368 }
1369 }
1370 ovs_mutex_unlock(&mutex);
1371 }
1372
1373 void
1374 dpif_sflow_wait(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
1375 {
1376 ovs_mutex_lock(&mutex);
1377 if (ds->collectors != NULL) {
1378 poll_timer_wait_until(ds->next_tick * 1000LL);
1379 }
1380 ovs_mutex_unlock(&mutex);
1381 }