]> git.proxmox.com Git - ovs.git/blob - ofproto/ofproto-dpif.c
ofproto-dpif: Remove a debug log
[ovs.git] / ofproto / ofproto-dpif.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "ofproto/ofproto-dpif.h"
20 #include "ofproto/ofproto-provider.h"
21
22 #include <errno.h>
23
24 #include "bfd.h"
25 #include "bond.h"
26 #include "bundle.h"
27 #include "byte-order.h"
28 #include "connectivity.h"
29 #include "connmgr.h"
30 #include "coverage.h"
31 #include "cfm.h"
32 #include "ovs-lldp.h"
33 #include "dpif.h"
34 #include "dynamic-string.h"
35 #include "fail-open.h"
36 #include "guarded-list.h"
37 #include "hmapx.h"
38 #include "lacp.h"
39 #include "learn.h"
40 #include "mac-learning.h"
41 #include "mcast-snooping.h"
42 #include "meta-flow.h"
43 #include "multipath.h"
44 #include "netdev-vport.h"
45 #include "netdev.h"
46 #include "netlink.h"
47 #include "nx-match.h"
48 #include "odp-util.h"
49 #include "odp-execute.h"
50 #include "ofp-util.h"
51 #include "ofpbuf.h"
52 #include "ofp-actions.h"
53 #include "ofp-parse.h"
54 #include "ofp-print.h"
55 #include "ofproto-dpif-ipfix.h"
56 #include "ofproto-dpif-mirror.h"
57 #include "ofproto-dpif-monitor.h"
58 #include "ofproto-dpif-rid.h"
59 #include "ofproto-dpif-sflow.h"
60 #include "ofproto-dpif-upcall.h"
61 #include "ofproto-dpif-xlate.h"
62 #include "poll-loop.h"
63 #include "ovs-rcu.h"
64 #include "ovs-router.h"
65 #include "seq.h"
66 #include "simap.h"
67 #include "smap.h"
68 #include "timer.h"
69 #include "tunnel.h"
70 #include "unaligned.h"
71 #include "unixctl.h"
72 #include "vlan-bitmap.h"
73 #include "openvswitch/vlog.h"
74
75 VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
76
77 COVERAGE_DEFINE(ofproto_dpif_expired);
78 COVERAGE_DEFINE(packet_in_overflow);
79
80 struct flow_miss;
81
82 struct rule_dpif {
83 struct rule up;
84
85 /* These statistics:
86 *
87 * - Do include packets and bytes from datapath flows which have not
88 * recently been processed by a revalidator. */
89 struct ovs_mutex stats_mutex;
90 struct dpif_flow_stats stats OVS_GUARDED;
91
92 /* In non-NULL, will point to a new rule (for which a reference is held) to
93 * which all the stats updates should be forwarded. This exists only
94 * transitionally when flows are replaced.
95 *
96 * Protected by stats_mutex. If both 'rule->stats_mutex' and
97 * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
98 * order, */
99 struct rule_dpif *new_rule OVS_GUARDED;
100
101 /* If non-zero then the recirculation id that has
102 * been allocated for use with this rule.
103 * The recirculation id and associated internal flow should
104 * be freed when the rule is freed */
105 uint32_t recirc_id;
106 };
107
108 /* RULE_CAST() depends on this. */
109 BUILD_ASSERT_DECL(offsetof(struct rule_dpif, up) == 0);
110
111 static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
112 long long int *used);
113 static struct rule_dpif *rule_dpif_cast(const struct rule *);
114 static void rule_expire(struct rule_dpif *);
115
116 struct group_dpif {
117 struct ofgroup up;
118
119 /* These statistics:
120 *
121 * - Do include packets and bytes from datapath flows which have not
122 * recently been processed by a revalidator. */
123 struct ovs_mutex stats_mutex;
124 uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
125 uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
126 };
127
128 struct ofbundle {
129 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
130 struct ofproto_dpif *ofproto; /* Owning ofproto. */
131 void *aux; /* Key supplied by ofproto's client. */
132 char *name; /* Identifier for log messages. */
133
134 /* Configuration. */
135 struct ovs_list ports; /* Contains "struct ofport"s. */
136 enum port_vlan_mode vlan_mode; /* VLAN mode */
137 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
138 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
139 * NULL if all VLANs are trunked. */
140 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
141 struct bond *bond; /* Nonnull iff more than one port. */
142 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
143
144 /* Status. */
145 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
146 };
147
148 static void bundle_remove(struct ofport *);
149 static void bundle_update(struct ofbundle *);
150 static void bundle_destroy(struct ofbundle *);
151 static void bundle_del_port(struct ofport_dpif *);
152 static void bundle_run(struct ofbundle *);
153 static void bundle_wait(struct ofbundle *);
154 static void bundle_flush_macs(struct ofbundle *, bool);
155 static void bundle_move(struct ofbundle *, struct ofbundle *);
156
157 static void stp_run(struct ofproto_dpif *ofproto);
158 static void stp_wait(struct ofproto_dpif *ofproto);
159 static int set_stp_port(struct ofport *,
160 const struct ofproto_port_stp_settings *);
161
162 static void rstp_run(struct ofproto_dpif *ofproto);
163 static void set_rstp_port(struct ofport *,
164 const struct ofproto_port_rstp_settings *);
165
166 struct ofport_dpif {
167 struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
168 struct ofport up;
169
170 odp_port_t odp_port;
171 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
172 struct ovs_list bundle_node;/* In struct ofbundle's "ports" list. */
173 struct cfm *cfm; /* Connectivity Fault Management, if any. */
174 struct bfd *bfd; /* BFD, if any. */
175 struct lldp *lldp; /* lldp, if any. */
176 bool may_enable; /* May be enabled in bonds. */
177 bool is_tunnel; /* This port is a tunnel. */
178 bool is_layer3; /* This is a layer 3 port. */
179 long long int carrier_seq; /* Carrier status changes. */
180 struct ofport_dpif *peer; /* Peer if patch port. */
181
182 /* Spanning tree. */
183 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
184 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
185 long long int stp_state_entered;
186
187 /* Rapid Spanning Tree. */
188 struct rstp_port *rstp_port; /* Rapid Spanning Tree Protocol, if any. */
189 enum rstp_state rstp_state; /* Always RSTP_DISABLED if RSTP not in use. */
190
191 /* Queue to DSCP mapping. */
192 struct ofproto_port_queue *qdscp;
193 size_t n_qdscp;
194
195 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
196 *
197 * This is deprecated. It is only for compatibility with broken device
198 * drivers in old versions of Linux that do not properly support VLANs when
199 * VLAN devices are not used. When broken device drivers are no longer in
200 * widespread use, we will delete these interfaces. */
201 ofp_port_t realdev_ofp_port;
202 int vlandev_vid;
203 };
204
205 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
206 *
207 * This is deprecated. It is only for compatibility with broken device drivers
208 * in old versions of Linux that do not properly support VLANs when VLAN
209 * devices are not used. When broken device drivers are no longer in
210 * widespread use, we will delete these interfaces. */
211 struct vlan_splinter {
212 struct hmap_node realdev_vid_node;
213 struct hmap_node vlandev_node;
214 ofp_port_t realdev_ofp_port;
215 ofp_port_t vlandev_ofp_port;
216 int vid;
217 };
218
219 static void vsp_remove(struct ofport_dpif *);
220 static void vsp_add(struct ofport_dpif *, ofp_port_t realdev_ofp_port, int vid);
221
222 static odp_port_t ofp_port_to_odp_port(const struct ofproto_dpif *,
223 ofp_port_t);
224
225 static ofp_port_t odp_port_to_ofp_port(const struct ofproto_dpif *,
226 odp_port_t);
227
228 static struct ofport_dpif *
229 ofport_dpif_cast(const struct ofport *ofport)
230 {
231 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
232 }
233
234 static void port_run(struct ofport_dpif *);
235 static int set_bfd(struct ofport *, const struct smap *);
236 static int set_cfm(struct ofport *, const struct cfm_settings *);
237 static int set_lldp(struct ofport *ofport_, const struct smap *cfg);
238 static void ofport_update_peer(struct ofport_dpif *);
239
240 /* Reasons that we might need to revalidate every datapath flow, and
241 * corresponding coverage counters.
242 *
243 * A value of 0 means that there is no need to revalidate.
244 *
245 * It would be nice to have some cleaner way to integrate with coverage
246 * counters, but with only a few reasons I guess this is good enough for
247 * now. */
248 enum revalidate_reason {
249 REV_RECONFIGURE = 1, /* Switch configuration changed. */
250 REV_STP, /* Spanning tree protocol port status change. */
251 REV_RSTP, /* RSTP port status change. */
252 REV_BOND, /* Bonding changed. */
253 REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
254 REV_FLOW_TABLE, /* Flow table changed. */
255 REV_MAC_LEARNING, /* Mac learning changed. */
256 REV_MCAST_SNOOPING, /* Multicast snooping changed. */
257 };
258 COVERAGE_DEFINE(rev_reconfigure);
259 COVERAGE_DEFINE(rev_stp);
260 COVERAGE_DEFINE(rev_rstp);
261 COVERAGE_DEFINE(rev_bond);
262 COVERAGE_DEFINE(rev_port_toggled);
263 COVERAGE_DEFINE(rev_flow_table);
264 COVERAGE_DEFINE(rev_mac_learning);
265 COVERAGE_DEFINE(rev_mcast_snooping);
266
267 /* All datapaths of a given type share a single dpif backer instance. */
268 struct dpif_backer {
269 char *type;
270 int refcount;
271 struct dpif *dpif;
272 struct udpif *udpif;
273
274 struct ovs_rwlock odp_to_ofport_lock;
275 struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */
276
277 struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
278
279 enum revalidate_reason need_revalidate; /* Revalidate all flows. */
280
281 bool recv_set_enable; /* Enables or disables receiving packets. */
282
283 /* Version string of the datapath stored in OVSDB. */
284 char *dp_version_string;
285
286 /* Datapath feature support. */
287 struct dpif_backer_support support;
288 struct atomic_count tnl_count;
289 };
290
291 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
292 static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
293
294 struct ofproto_dpif {
295 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
296 struct ofproto up;
297 struct dpif_backer *backer;
298
299 ATOMIC(cls_version_t) tables_version; /* For classifier lookups. */
300
301 uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
302
303 /* Special OpenFlow rules. */
304 struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
305 struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
306 struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
307
308 /* Bridging. */
309 struct netflow *netflow;
310 struct dpif_sflow *sflow;
311 struct dpif_ipfix *ipfix;
312 struct hmap bundles; /* Contains "struct ofbundle"s. */
313 struct mac_learning *ml;
314 struct mcast_snooping *ms;
315 bool has_bonded_bundles;
316 bool lacp_enabled;
317 struct mbridge *mbridge;
318
319 struct ovs_mutex stats_mutex;
320 struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
321 * consumed in userspace. */
322
323 /* Spanning tree. */
324 struct stp *stp;
325 long long int stp_last_tick;
326
327 /* Rapid Spanning Tree. */
328 struct rstp *rstp;
329 long long int rstp_last_tick;
330
331 /* VLAN splinters. */
332 struct ovs_mutex vsp_mutex;
333 struct hmap realdev_vid_map OVS_GUARDED; /* (realdev,vid) -> vlandev. */
334 struct hmap vlandev_map OVS_GUARDED; /* vlandev -> (realdev,vid). */
335
336 /* Ports. */
337 struct sset ports; /* Set of standard port names. */
338 struct sset ghost_ports; /* Ports with no datapath port. */
339 struct sset port_poll_set; /* Queued names for port_poll() reply. */
340 int port_poll_errno; /* Last errno for port_poll() reply. */
341 uint64_t change_seq; /* Connectivity status changes. */
342
343 /* Work queues. */
344 struct guarded_list pins; /* Contains "struct ofputil_packet_in"s. */
345 struct seq *pins_seq; /* For notifying 'pins' reception. */
346 uint64_t pins_seqno;
347 };
348
349 /* All existing ofproto_dpif instances, indexed by ->up.name. */
350 static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
351
352 static bool ofproto_use_tnl_push_pop = true;
353 static void ofproto_unixctl_init(void);
354
355 static inline struct ofproto_dpif *
356 ofproto_dpif_cast(const struct ofproto *ofproto)
357 {
358 ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
359 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
360 }
361
362 bool
363 ofproto_dpif_get_enable_ufid(const struct dpif_backer *backer)
364 {
365 return backer->support.ufid;
366 }
367
368 struct dpif_backer_support *
369 ofproto_dpif_get_support(const struct ofproto_dpif *ofproto)
370 {
371 return &ofproto->backer->support;
372 }
373
374 static void ofproto_trace(struct ofproto_dpif *, struct flow *,
375 const struct dp_packet *packet,
376 const struct ofpact[], size_t ofpacts_len,
377 struct ds *);
378
379 /* Global variables. */
380 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
381
382 /* Initial mappings of port to bridge mappings. */
383 static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
384
385 /* Executes 'fm'. The caller retains ownership of 'fm' and everything in
386 * it. */
387 void
388 ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto,
389 const struct ofputil_flow_mod *fm)
390 {
391 struct ofproto_flow_mod ofm;
392
393 /* Multiple threads may do this for the same 'fm' at the same time.
394 * Allocate ofproto_flow_mod with execution context from stack.
395 *
396 * Note: This copy could be avoided by making ofproto_flow_mod more
397 * complex, but that may not be desireable, and a learn action is not that
398 * fast to begin with. */
399 ofm.fm = *fm;
400 ofproto_flow_mod(&ofproto->up, &ofm);
401 }
402
403 /* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
404 * Takes ownership of 'pin' and pin->packet. */
405 void
406 ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
407 struct ofproto_packet_in *pin)
408 {
409 if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
410 COVERAGE_INC(packet_in_overflow);
411 free(CONST_CAST(void *, pin->up.packet));
412 free(pin);
413 }
414
415 /* Wakes up main thread for packet-in I/O. */
416 seq_change(ofproto->pins_seq);
417 }
418
419 /* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
420 * packet rather than to send the packet to the controller.
421 *
422 * This function returns false to indicate that a packet_in message
423 * for a "table-miss" should be sent to at least one controller.
424 * False otherwise. */
425 bool
426 ofproto_dpif_wants_packet_in_on_miss(struct ofproto_dpif *ofproto)
427 {
428 return connmgr_wants_packet_in_on_miss(ofproto->up.connmgr);
429 }
430 \f
431 /* Factory functions. */
432
433 static void
434 init(const struct shash *iface_hints)
435 {
436 struct shash_node *node;
437
438 /* Make a local copy, since we don't own 'iface_hints' elements. */
439 SHASH_FOR_EACH(node, iface_hints) {
440 const struct iface_hint *orig_hint = node->data;
441 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
442
443 new_hint->br_name = xstrdup(orig_hint->br_name);
444 new_hint->br_type = xstrdup(orig_hint->br_type);
445 new_hint->ofp_port = orig_hint->ofp_port;
446
447 shash_add(&init_ofp_ports, node->name, new_hint);
448 }
449
450 ofproto_unixctl_init();
451 udpif_init();
452 }
453
454 static void
455 enumerate_types(struct sset *types)
456 {
457 dp_enumerate_types(types);
458 }
459
460 static int
461 enumerate_names(const char *type, struct sset *names)
462 {
463 struct ofproto_dpif *ofproto;
464
465 sset_clear(names);
466 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
467 if (strcmp(type, ofproto->up.type)) {
468 continue;
469 }
470 sset_add(names, ofproto->up.name);
471 }
472
473 return 0;
474 }
475
476 static int
477 del(const char *type, const char *name)
478 {
479 struct dpif *dpif;
480 int error;
481
482 error = dpif_open(name, type, &dpif);
483 if (!error) {
484 error = dpif_delete(dpif);
485 dpif_close(dpif);
486 }
487 return error;
488 }
489 \f
490 static const char *
491 port_open_type(const char *datapath_type, const char *port_type)
492 {
493 return dpif_port_open_type(datapath_type, port_type);
494 }
495
496 /* Type functions. */
497
498 static void process_dpif_port_changes(struct dpif_backer *);
499 static void process_dpif_all_ports_changed(struct dpif_backer *);
500 static void process_dpif_port_change(struct dpif_backer *,
501 const char *devname);
502 static void process_dpif_port_error(struct dpif_backer *, int error);
503
504 static struct ofproto_dpif *
505 lookup_ofproto_dpif_by_port_name(const char *name)
506 {
507 struct ofproto_dpif *ofproto;
508
509 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
510 if (sset_contains(&ofproto->ports, name)) {
511 return ofproto;
512 }
513 }
514
515 return NULL;
516 }
517
518 bool
519 ofproto_dpif_backer_enabled(struct dpif_backer* backer)
520 {
521 return backer->recv_set_enable;
522 }
523
524 static int
525 type_run(const char *type)
526 {
527 struct dpif_backer *backer;
528
529 backer = shash_find_data(&all_dpif_backers, type);
530 if (!backer) {
531 /* This is not necessarily a problem, since backers are only
532 * created on demand. */
533 return 0;
534 }
535
536
537 if (dpif_run(backer->dpif)) {
538 backer->need_revalidate = REV_RECONFIGURE;
539 }
540
541 udpif_run(backer->udpif);
542
543 /* If vswitchd started with other_config:flow_restore_wait set as "true",
544 * and the configuration has now changed to "false", enable receiving
545 * packets from the datapath. */
546 if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
547 int error;
548
549 backer->recv_set_enable = true;
550
551 error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
552 if (error) {
553 VLOG_ERR("Failed to enable receiving packets in dpif.");
554 return error;
555 }
556 dpif_flow_flush(backer->dpif);
557 backer->need_revalidate = REV_RECONFIGURE;
558 }
559
560 if (backer->recv_set_enable) {
561 udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
562 }
563
564 dpif_poll_threads_set(backer->dpif, n_dpdk_rxqs, pmd_cpu_mask);
565
566 if (backer->need_revalidate) {
567 struct ofproto_dpif *ofproto;
568 struct simap_node *node;
569 struct simap tmp_backers;
570
571 /* Handle tunnel garbage collection. */
572 simap_init(&tmp_backers);
573 simap_swap(&backer->tnl_backers, &tmp_backers);
574
575 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
576 struct ofport_dpif *iter;
577
578 if (backer != ofproto->backer) {
579 continue;
580 }
581
582 HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
583 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
584 const char *dp_port;
585
586 if (!iter->is_tunnel) {
587 continue;
588 }
589
590 dp_port = netdev_vport_get_dpif_port(iter->up.netdev,
591 namebuf, sizeof namebuf);
592 node = simap_find(&tmp_backers, dp_port);
593 if (node) {
594 simap_put(&backer->tnl_backers, dp_port, node->data);
595 simap_delete(&tmp_backers, node);
596 node = simap_find(&backer->tnl_backers, dp_port);
597 } else {
598 node = simap_find(&backer->tnl_backers, dp_port);
599 if (!node) {
600 odp_port_t odp_port = ODPP_NONE;
601
602 if (!dpif_port_add(backer->dpif, iter->up.netdev,
603 &odp_port)) {
604 simap_put(&backer->tnl_backers, dp_port,
605 odp_to_u32(odp_port));
606 node = simap_find(&backer->tnl_backers, dp_port);
607 }
608 }
609 }
610
611 iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
612 if (tnl_port_reconfigure(iter, iter->up.netdev,
613 iter->odp_port,
614 ovs_native_tunneling_is_on(ofproto), dp_port)) {
615 backer->need_revalidate = REV_RECONFIGURE;
616 }
617 }
618 }
619
620 SIMAP_FOR_EACH (node, &tmp_backers) {
621 dpif_port_del(backer->dpif, u32_to_odp(node->data));
622 }
623 simap_destroy(&tmp_backers);
624
625 switch (backer->need_revalidate) {
626 case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
627 case REV_STP: COVERAGE_INC(rev_stp); break;
628 case REV_RSTP: COVERAGE_INC(rev_rstp); break;
629 case REV_BOND: COVERAGE_INC(rev_bond); break;
630 case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
631 case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
632 case REV_MAC_LEARNING: COVERAGE_INC(rev_mac_learning); break;
633 case REV_MCAST_SNOOPING: COVERAGE_INC(rev_mcast_snooping); break;
634 }
635 backer->need_revalidate = 0;
636
637 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
638 struct ofport_dpif *ofport;
639 struct ofbundle *bundle;
640
641 if (ofproto->backer != backer) {
642 continue;
643 }
644
645 xlate_txn_start();
646 xlate_ofproto_set(ofproto, ofproto->up.name,
647 ofproto->backer->dpif, ofproto->ml,
648 ofproto->stp, ofproto->rstp, ofproto->ms,
649 ofproto->mbridge, ofproto->sflow, ofproto->ipfix,
650 ofproto->netflow,
651 ofproto->up.forward_bpdu,
652 connmgr_has_in_band(ofproto->up.connmgr),
653 &ofproto->backer->support);
654
655 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
656 xlate_bundle_set(ofproto, bundle, bundle->name,
657 bundle->vlan_mode, bundle->vlan,
658 bundle->trunks, bundle->use_priority_tags,
659 bundle->bond, bundle->lacp,
660 bundle->floodable);
661 }
662
663 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
664 int stp_port = ofport->stp_port
665 ? stp_port_no(ofport->stp_port)
666 : -1;
667 xlate_ofport_set(ofproto, ofport->bundle, ofport,
668 ofport->up.ofp_port, ofport->odp_port,
669 ofport->up.netdev, ofport->cfm, ofport->bfd,
670 ofport->lldp, ofport->peer, stp_port,
671 ofport->rstp_port, ofport->qdscp,
672 ofport->n_qdscp, ofport->up.pp.config,
673 ofport->up.pp.state, ofport->is_tunnel,
674 ofport->may_enable);
675 }
676 xlate_txn_commit();
677 }
678
679 udpif_revalidate(backer->udpif);
680 }
681
682 process_dpif_port_changes(backer);
683
684 return 0;
685 }
686
687 /* Check for and handle port changes in 'backer''s dpif. */
688 static void
689 process_dpif_port_changes(struct dpif_backer *backer)
690 {
691 for (;;) {
692 char *devname;
693 int error;
694
695 error = dpif_port_poll(backer->dpif, &devname);
696 switch (error) {
697 case EAGAIN:
698 return;
699
700 case ENOBUFS:
701 process_dpif_all_ports_changed(backer);
702 break;
703
704 case 0:
705 process_dpif_port_change(backer, devname);
706 free(devname);
707 break;
708
709 default:
710 process_dpif_port_error(backer, error);
711 break;
712 }
713 }
714 }
715
716 static void
717 process_dpif_all_ports_changed(struct dpif_backer *backer)
718 {
719 struct ofproto_dpif *ofproto;
720 struct dpif_port dpif_port;
721 struct dpif_port_dump dump;
722 struct sset devnames;
723 const char *devname;
724
725 sset_init(&devnames);
726 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
727 if (ofproto->backer == backer) {
728 struct ofport *ofport;
729
730 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
731 sset_add(&devnames, netdev_get_name(ofport->netdev));
732 }
733 }
734 }
735 DPIF_PORT_FOR_EACH (&dpif_port, &dump, backer->dpif) {
736 sset_add(&devnames, dpif_port.name);
737 }
738
739 SSET_FOR_EACH (devname, &devnames) {
740 process_dpif_port_change(backer, devname);
741 }
742 sset_destroy(&devnames);
743 }
744
745 static void
746 process_dpif_port_change(struct dpif_backer *backer, const char *devname)
747 {
748 struct ofproto_dpif *ofproto;
749 struct dpif_port port;
750
751 /* Don't report on the datapath's device. */
752 if (!strcmp(devname, dpif_base_name(backer->dpif))) {
753 return;
754 }
755
756 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
757 &all_ofproto_dpifs) {
758 if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
759 return;
760 }
761 }
762
763 ofproto = lookup_ofproto_dpif_by_port_name(devname);
764 if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
765 /* The port was removed. If we know the datapath,
766 * report it through poll_set(). If we don't, it may be
767 * notifying us of a removal we initiated, so ignore it.
768 * If there's a pending ENOBUFS, let it stand, since
769 * everything will be reevaluated. */
770 if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
771 sset_add(&ofproto->port_poll_set, devname);
772 ofproto->port_poll_errno = 0;
773 }
774 } else if (!ofproto) {
775 /* The port was added, but we don't know with which
776 * ofproto we should associate it. Delete it. */
777 dpif_port_del(backer->dpif, port.port_no);
778 } else {
779 struct ofport_dpif *ofport;
780
781 ofport = ofport_dpif_cast(shash_find_data(
782 &ofproto->up.port_by_name, devname));
783 if (ofport
784 && ofport->odp_port != port.port_no
785 && !odp_port_to_ofport(backer, port.port_no))
786 {
787 /* 'ofport''s datapath port number has changed from
788 * 'ofport->odp_port' to 'port.port_no'. Update our internal data
789 * structures to match. */
790 ovs_rwlock_wrlock(&backer->odp_to_ofport_lock);
791 hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node);
792 ofport->odp_port = port.port_no;
793 hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node,
794 hash_odp_port(port.port_no));
795 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
796 backer->need_revalidate = REV_RECONFIGURE;
797 }
798 }
799 dpif_port_destroy(&port);
800 }
801
802 /* Propagate 'error' to all ofprotos based on 'backer'. */
803 static void
804 process_dpif_port_error(struct dpif_backer *backer, int error)
805 {
806 struct ofproto_dpif *ofproto;
807
808 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
809 if (ofproto->backer == backer) {
810 sset_clear(&ofproto->port_poll_set);
811 ofproto->port_poll_errno = error;
812 }
813 }
814 }
815
816 static void
817 type_wait(const char *type)
818 {
819 struct dpif_backer *backer;
820
821 backer = shash_find_data(&all_dpif_backers, type);
822 if (!backer) {
823 /* This is not necessarily a problem, since backers are only
824 * created on demand. */
825 return;
826 }
827
828 dpif_wait(backer->dpif);
829 }
830 \f
831 /* Basic life-cycle. */
832
833 static int add_internal_flows(struct ofproto_dpif *);
834
835 static struct ofproto *
836 alloc(void)
837 {
838 struct ofproto_dpif *ofproto = xzalloc(sizeof *ofproto);
839 return &ofproto->up;
840 }
841
842 static void
843 dealloc(struct ofproto *ofproto_)
844 {
845 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
846 free(ofproto);
847 }
848
849 static void
850 close_dpif_backer(struct dpif_backer *backer)
851 {
852 ovs_assert(backer->refcount > 0);
853
854 if (--backer->refcount) {
855 return;
856 }
857
858 udpif_destroy(backer->udpif);
859
860 simap_destroy(&backer->tnl_backers);
861 ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
862 hmap_destroy(&backer->odp_to_ofport_map);
863 shash_find_and_delete(&all_dpif_backers, backer->type);
864 free(backer->type);
865 free(backer->dp_version_string);
866 dpif_close(backer->dpif);
867 free(backer);
868 }
869
870 /* Datapath port slated for removal from datapath. */
871 struct odp_garbage {
872 struct ovs_list list_node;
873 odp_port_t odp_port;
874 };
875
876 static bool check_variable_length_userdata(struct dpif_backer *backer);
877 static void check_support(struct dpif_backer *backer);
878
879 static int
880 open_dpif_backer(const char *type, struct dpif_backer **backerp)
881 {
882 struct dpif_backer *backer;
883 struct dpif_port_dump port_dump;
884 struct dpif_port port;
885 struct shash_node *node;
886 struct ovs_list garbage_list;
887 struct odp_garbage *garbage;
888
889 struct sset names;
890 char *backer_name;
891 const char *name;
892 int error;
893
894 recirc_init();
895
896 backer = shash_find_data(&all_dpif_backers, type);
897 if (backer) {
898 backer->refcount++;
899 *backerp = backer;
900 return 0;
901 }
902
903 backer_name = xasprintf("ovs-%s", type);
904
905 /* Remove any existing datapaths, since we assume we're the only
906 * userspace controlling the datapath. */
907 sset_init(&names);
908 dp_enumerate_names(type, &names);
909 SSET_FOR_EACH(name, &names) {
910 struct dpif *old_dpif;
911
912 /* Don't remove our backer if it exists. */
913 if (!strcmp(name, backer_name)) {
914 continue;
915 }
916
917 if (dpif_open(name, type, &old_dpif)) {
918 VLOG_WARN("couldn't open old datapath %s to remove it", name);
919 } else {
920 dpif_delete(old_dpif);
921 dpif_close(old_dpif);
922 }
923 }
924 sset_destroy(&names);
925
926 backer = xmalloc(sizeof *backer);
927
928 error = dpif_create_and_open(backer_name, type, &backer->dpif);
929 free(backer_name);
930 if (error) {
931 VLOG_ERR("failed to open datapath of type %s: %s", type,
932 ovs_strerror(error));
933 free(backer);
934 return error;
935 }
936 backer->udpif = udpif_create(backer, backer->dpif);
937
938 backer->type = xstrdup(type);
939 backer->refcount = 1;
940 hmap_init(&backer->odp_to_ofport_map);
941 ovs_rwlock_init(&backer->odp_to_ofport_lock);
942 backer->need_revalidate = 0;
943 simap_init(&backer->tnl_backers);
944 backer->recv_set_enable = !ofproto_get_flow_restore_wait();
945 *backerp = backer;
946
947 if (backer->recv_set_enable) {
948 dpif_flow_flush(backer->dpif);
949 }
950
951 /* Loop through the ports already on the datapath and remove any
952 * that we don't need anymore. */
953 list_init(&garbage_list);
954 dpif_port_dump_start(&port_dump, backer->dpif);
955 while (dpif_port_dump_next(&port_dump, &port)) {
956 node = shash_find(&init_ofp_ports, port.name);
957 if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
958 garbage = xmalloc(sizeof *garbage);
959 garbage->odp_port = port.port_no;
960 list_push_front(&garbage_list, &garbage->list_node);
961 }
962 }
963 dpif_port_dump_done(&port_dump);
964
965 LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
966 dpif_port_del(backer->dpif, garbage->odp_port);
967 free(garbage);
968 }
969
970 shash_add(&all_dpif_backers, type, backer);
971
972 check_support(backer);
973 atomic_count_init(&backer->tnl_count, 0);
974
975 error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
976 if (error) {
977 VLOG_ERR("failed to listen on datapath of type %s: %s",
978 type, ovs_strerror(error));
979 close_dpif_backer(backer);
980 return error;
981 }
982
983 if (backer->recv_set_enable) {
984 udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
985 }
986
987 /* This check fails if performed before udpif threads have been set,
988 * as the kernel module checks that the 'pid' in userspace action
989 * is non-zero. */
990 backer->support.variable_length_userdata
991 = check_variable_length_userdata(backer);
992 backer->dp_version_string = dpif_get_dp_version(backer->dpif);
993
994 return error;
995 }
996
997 bool
998 ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto)
999 {
1000 return ofproto_use_tnl_push_pop && ofproto->backer->support.tnl_push_pop &&
1001 atomic_count_get(&ofproto->backer->tnl_count);
1002 }
1003
1004 /* Tests whether 'backer''s datapath supports recirculation. Only newer
1005 * datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
1006 * features on older datapaths that don't support this feature.
1007 *
1008 * Returns false if 'backer' definitely does not support recirculation, true if
1009 * it seems to support recirculation or if at least the error we get is
1010 * ambiguous. */
1011 static bool
1012 check_recirc(struct dpif_backer *backer)
1013 {
1014 struct flow flow;
1015 struct odputil_keybuf keybuf;
1016 struct ofpbuf key;
1017 bool enable_recirc;
1018 struct odp_flow_key_parms odp_parms = {
1019 .flow = &flow,
1020 .support = {
1021 .recirc = true,
1022 },
1023 };
1024
1025 memset(&flow, 0, sizeof flow);
1026 flow.recirc_id = 1;
1027 flow.dp_hash = 1;
1028
1029 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1030 odp_flow_key_from_flow(&odp_parms, &key);
1031 enable_recirc = dpif_probe_feature(backer->dpif, "recirculation", &key,
1032 NULL);
1033
1034 if (enable_recirc) {
1035 VLOG_INFO("%s: Datapath supports recirculation",
1036 dpif_name(backer->dpif));
1037 } else {
1038 VLOG_INFO("%s: Datapath does not support recirculation",
1039 dpif_name(backer->dpif));
1040 }
1041
1042 return enable_recirc;
1043 }
1044
1045 /* Tests whether 'dpif' supports unique flow ids. We can skip serializing
1046 * some flow attributes for datapaths that support this feature.
1047 *
1048 * Returns true if 'dpif' supports UFID for flow operations.
1049 * Returns false if 'dpif' does not support UFID. */
1050 static bool
1051 check_ufid(struct dpif_backer *backer)
1052 {
1053 struct flow flow;
1054 struct odputil_keybuf keybuf;
1055 struct ofpbuf key;
1056 ovs_u128 ufid;
1057 bool enable_ufid;
1058 struct odp_flow_key_parms odp_parms = {
1059 .flow = &flow,
1060 };
1061
1062 memset(&flow, 0, sizeof flow);
1063 flow.dl_type = htons(0x1234);
1064
1065 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1066 odp_flow_key_from_flow(&odp_parms, &key);
1067 dpif_flow_hash(backer->dpif, key.data, key.size, &ufid);
1068
1069 enable_ufid = dpif_probe_feature(backer->dpif, "UFID", &key, &ufid);
1070
1071 if (enable_ufid) {
1072 VLOG_INFO("%s: Datapath supports unique flow ids",
1073 dpif_name(backer->dpif));
1074 } else {
1075 VLOG_INFO("%s: Datapath does not support unique flow ids",
1076 dpif_name(backer->dpif));
1077 }
1078 return enable_ufid;
1079 }
1080
1081 /* Tests whether 'backer''s datapath supports variable-length
1082 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
1083 * to disable some features on older datapaths that don't support this
1084 * feature.
1085 *
1086 * Returns false if 'backer' definitely does not support variable-length
1087 * userdata, true if it seems to support them or if at least the error we get
1088 * is ambiguous. */
1089 static bool
1090 check_variable_length_userdata(struct dpif_backer *backer)
1091 {
1092 struct eth_header *eth;
1093 struct ofpbuf actions;
1094 struct dpif_execute execute;
1095 struct dp_packet packet;
1096 size_t start;
1097 int error;
1098
1099 /* Compose a userspace action that will cause an ERANGE error on older
1100 * datapaths that don't support variable-length userdata.
1101 *
1102 * We really test for using userdata longer than 8 bytes, but older
1103 * datapaths accepted these, silently truncating the userdata to 8 bytes.
1104 * The same older datapaths rejected userdata shorter than 8 bytes, so we
1105 * test for that instead as a proxy for longer userdata support. */
1106 ofpbuf_init(&actions, 64);
1107 start = nl_msg_start_nested(&actions, OVS_ACTION_ATTR_USERSPACE);
1108 nl_msg_put_u32(&actions, OVS_USERSPACE_ATTR_PID,
1109 dpif_port_get_pid(backer->dpif, ODPP_NONE, 0));
1110 nl_msg_put_unspec_zero(&actions, OVS_USERSPACE_ATTR_USERDATA, 4);
1111 nl_msg_end_nested(&actions, start);
1112
1113 /* Compose a dummy ethernet packet. */
1114 dp_packet_init(&packet, ETH_HEADER_LEN);
1115 eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
1116 eth->eth_type = htons(0x1234);
1117
1118 /* Execute the actions. On older datapaths this fails with ERANGE, on
1119 * newer datapaths it succeeds. */
1120 execute.actions = actions.data;
1121 execute.actions_len = actions.size;
1122 execute.packet = &packet;
1123 execute.needs_help = false;
1124 execute.probe = true;
1125 execute.mtu = 0;
1126
1127 error = dpif_execute(backer->dpif, &execute);
1128
1129 dp_packet_uninit(&packet);
1130 ofpbuf_uninit(&actions);
1131
1132 switch (error) {
1133 case 0:
1134 return true;
1135
1136 case ERANGE:
1137 /* Variable-length userdata is not supported. */
1138 VLOG_WARN("%s: datapath does not support variable-length userdata "
1139 "feature (needs Linux 3.10+ or kernel module from OVS "
1140 "1..11+). The NXAST_SAMPLE action will be ignored.",
1141 dpif_name(backer->dpif));
1142 return false;
1143
1144 default:
1145 /* Something odd happened. We're not sure whether variable-length
1146 * userdata is supported. Default to "yes". */
1147 VLOG_WARN("%s: variable-length userdata feature probe failed (%s)",
1148 dpif_name(backer->dpif), ovs_strerror(error));
1149 return true;
1150 }
1151 }
1152
1153 /* Tests the MPLS label stack depth supported by 'backer''s datapath.
1154 *
1155 * Returns the number of elements in a struct flow's mpls_lse field
1156 * if the datapath supports at least that many entries in an
1157 * MPLS label stack.
1158 * Otherwise returns the number of MPLS push actions supported by
1159 * the datapath. */
1160 static size_t
1161 check_max_mpls_depth(struct dpif_backer *backer)
1162 {
1163 struct flow flow;
1164 int n;
1165
1166 for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) {
1167 struct odputil_keybuf keybuf;
1168 struct ofpbuf key;
1169 struct odp_flow_key_parms odp_parms = {
1170 .flow = &flow,
1171 };
1172
1173 memset(&flow, 0, sizeof flow);
1174 flow.dl_type = htons(ETH_TYPE_MPLS);
1175 flow_set_mpls_bos(&flow, n, 1);
1176
1177 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1178 odp_flow_key_from_flow(&odp_parms, &key);
1179 if (!dpif_probe_feature(backer->dpif, "MPLS", &key, NULL)) {
1180 break;
1181 }
1182 }
1183
1184 VLOG_INFO("%s: MPLS label stack length probed as %d",
1185 dpif_name(backer->dpif), n);
1186 return n;
1187 }
1188
1189 /* Tests whether 'backer''s datapath supports masked data in
1190 * OVS_ACTION_ATTR_SET actions. We need to disable some features on older
1191 * datapaths that don't support this feature. */
1192 static bool
1193 check_masked_set_action(struct dpif_backer *backer)
1194 {
1195 struct eth_header *eth;
1196 struct ofpbuf actions;
1197 struct dpif_execute execute;
1198 struct dp_packet packet;
1199 int error;
1200 struct ovs_key_ethernet key, mask;
1201
1202 /* Compose a set action that will cause an EINVAL error on older
1203 * datapaths that don't support masked set actions.
1204 * Avoid using a full mask, as it could be translated to a non-masked
1205 * set action instead. */
1206 ofpbuf_init(&actions, 64);
1207 memset(&key, 0x53, sizeof key);
1208 memset(&mask, 0x7f, sizeof mask);
1209 commit_masked_set_action(&actions, OVS_KEY_ATTR_ETHERNET, &key, &mask,
1210 sizeof key);
1211
1212 /* Compose a dummy ethernet packet. */
1213 dp_packet_init(&packet, ETH_HEADER_LEN);
1214 eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
1215 eth->eth_type = htons(0x1234);
1216
1217 /* Execute the actions. On older datapaths this fails with EINVAL, on
1218 * newer datapaths it succeeds. */
1219 execute.actions = actions.data;
1220 execute.actions_len = actions.size;
1221 execute.packet = &packet;
1222 execute.needs_help = false;
1223 execute.probe = true;
1224 execute.mtu = 0;
1225
1226 error = dpif_execute(backer->dpif, &execute);
1227
1228 dp_packet_uninit(&packet);
1229 ofpbuf_uninit(&actions);
1230
1231 if (error) {
1232 /* Masked set action is not supported. */
1233 VLOG_INFO("%s: datapath does not support masked set action feature.",
1234 dpif_name(backer->dpif));
1235 }
1236 return !error;
1237 }
1238
1239 #define CHECK_FEATURE__(NAME, FIELD) \
1240 static bool \
1241 check_##NAME(struct dpif_backer *backer) \
1242 { \
1243 struct flow flow; \
1244 struct odputil_keybuf keybuf; \
1245 struct ofpbuf key; \
1246 bool enable; \
1247 struct odp_flow_key_parms odp_parms = { \
1248 .flow = &flow, \
1249 .support = { \
1250 .NAME = true, \
1251 }, \
1252 }; \
1253 \
1254 memset(&flow, 0, sizeof flow); \
1255 flow.FIELD = 1; \
1256 \
1257 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); \
1258 odp_flow_key_from_flow(&odp_parms, &key); \
1259 enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL); \
1260 \
1261 if (enable) { \
1262 VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif)); \
1263 } else { \
1264 VLOG_INFO("%s: Datapath does not support "#NAME, \
1265 dpif_name(backer->dpif)); \
1266 } \
1267 \
1268 return enable; \
1269 }
1270 #define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD)
1271
1272 CHECK_FEATURE(ct_state)
1273 CHECK_FEATURE(ct_zone)
1274 CHECK_FEATURE(ct_mark)
1275 CHECK_FEATURE__(ct_label, ct_label.u64.lo)
1276
1277 #undef CHECK_FEATURE
1278 #undef CHECK_FEATURE__
1279
1280 static void
1281 check_support(struct dpif_backer *backer)
1282 {
1283 /* This feature needs to be tested after udpif threads are set. */
1284 backer->support.variable_length_userdata = false;
1285
1286 backer->support.odp.recirc = check_recirc(backer);
1287 backer->support.odp.max_mpls_depth = check_max_mpls_depth(backer);
1288 backer->support.masked_set_action = check_masked_set_action(backer);
1289 backer->support.ufid = check_ufid(backer);
1290 backer->support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
1291
1292 backer->support.odp.ct_state = check_ct_state(backer);
1293 backer->support.odp.ct_zone = check_ct_zone(backer);
1294 backer->support.odp.ct_mark = check_ct_mark(backer);
1295 backer->support.odp.ct_label = check_ct_label(backer);
1296 }
1297
1298 static int
1299 construct(struct ofproto *ofproto_)
1300 {
1301 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1302 struct shash_node *node, *next;
1303 int error;
1304
1305 /* Tunnel module can get used right after the udpif threads are running. */
1306 ofproto_tunnel_init();
1307
1308 error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
1309 if (error) {
1310 return error;
1311 }
1312
1313 atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
1314 ofproto->netflow = NULL;
1315 ofproto->sflow = NULL;
1316 ofproto->ipfix = NULL;
1317 ofproto->stp = NULL;
1318 ofproto->rstp = NULL;
1319 ofproto->dump_seq = 0;
1320 hmap_init(&ofproto->bundles);
1321 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
1322 ofproto->ms = NULL;
1323 ofproto->mbridge = mbridge_create();
1324 ofproto->has_bonded_bundles = false;
1325 ofproto->lacp_enabled = false;
1326 ovs_mutex_init_adaptive(&ofproto->stats_mutex);
1327 ovs_mutex_init(&ofproto->vsp_mutex);
1328
1329 guarded_list_init(&ofproto->pins);
1330
1331 hmap_init(&ofproto->vlandev_map);
1332 hmap_init(&ofproto->realdev_vid_map);
1333
1334 sset_init(&ofproto->ports);
1335 sset_init(&ofproto->ghost_ports);
1336 sset_init(&ofproto->port_poll_set);
1337 ofproto->port_poll_errno = 0;
1338 ofproto->change_seq = 0;
1339 ofproto->pins_seq = seq_create();
1340 ofproto->pins_seqno = seq_read(ofproto->pins_seq);
1341
1342
1343 SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
1344 struct iface_hint *iface_hint = node->data;
1345
1346 if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1347 /* Check if the datapath already has this port. */
1348 if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1349 sset_add(&ofproto->ports, node->name);
1350 }
1351
1352 free(iface_hint->br_name);
1353 free(iface_hint->br_type);
1354 free(iface_hint);
1355 shash_delete(&init_ofp_ports, node);
1356 }
1357 }
1358
1359 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1360 hash_string(ofproto->up.name, 0));
1361 memset(&ofproto->stats, 0, sizeof ofproto->stats);
1362
1363 ofproto_init_tables(ofproto_, N_TABLES);
1364 error = add_internal_flows(ofproto);
1365
1366 ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1367
1368 return error;
1369 }
1370
1371 static int
1372 add_internal_miss_flow(struct ofproto_dpif *ofproto, int id,
1373 const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
1374 {
1375 struct match match;
1376 int error;
1377 struct rule *rule;
1378
1379 match_init_catchall(&match);
1380 match_set_reg(&match, 0, id);
1381
1382 error = ofproto_dpif_add_internal_flow(ofproto, &match, 0, 0, ofpacts,
1383 &rule);
1384 *rulep = error ? NULL : rule_dpif_cast(rule);
1385
1386 return error;
1387 }
1388
1389 static int
1390 add_internal_flows(struct ofproto_dpif *ofproto)
1391 {
1392 struct ofpact_controller *controller;
1393 uint64_t ofpacts_stub[128 / 8];
1394 struct ofpbuf ofpacts;
1395 struct rule *unused_rulep OVS_UNUSED;
1396 struct match match;
1397 int error;
1398 int id;
1399
1400 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1401 id = 1;
1402
1403 controller = ofpact_put_CONTROLLER(&ofpacts);
1404 controller->max_len = UINT16_MAX;
1405 controller->controller_id = 0;
1406 controller->reason = OFPR_NO_MATCH;
1407 ofpact_pad(&ofpacts);
1408
1409 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1410 &ofproto->miss_rule);
1411 if (error) {
1412 return error;
1413 }
1414
1415 ofpbuf_clear(&ofpacts);
1416 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1417 &ofproto->no_packet_in_rule);
1418 if (error) {
1419 return error;
1420 }
1421
1422 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1423 &ofproto->drop_frags_rule);
1424 if (error) {
1425 return error;
1426 }
1427
1428 /* Drop any run away non-recirc rule lookups. Recirc_id has to be
1429 * zero when reaching this rule.
1430 *
1431 * (priority=2), recirc_id=0, actions=drop
1432 */
1433 ofpbuf_clear(&ofpacts);
1434 match_init_catchall(&match);
1435 match_set_recirc_id(&match, 0);
1436 error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, 0, &ofpacts,
1437 &unused_rulep);
1438 return error;
1439 }
1440
1441 static void
1442 destruct(struct ofproto *ofproto_)
1443 {
1444 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1445 struct ofproto_packet_in *pin;
1446 struct rule_dpif *rule;
1447 struct oftable *table;
1448 struct ovs_list pins;
1449
1450 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1451 xlate_txn_start();
1452 xlate_remove_ofproto(ofproto);
1453 xlate_txn_commit();
1454
1455 /* Ensure that the upcall processing threads have no remaining references
1456 * to the ofproto or anything in it. */
1457 udpif_synchronize(ofproto->backer->udpif);
1458
1459 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
1460
1461 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1462 CLS_FOR_EACH (rule, up.cr, &table->cls) {
1463 ofproto_rule_delete(&ofproto->up, &rule->up);
1464 }
1465 }
1466 ofproto_group_delete_all(&ofproto->up);
1467
1468 guarded_list_pop_all(&ofproto->pins, &pins);
1469 LIST_FOR_EACH_POP (pin, list_node, &pins) {
1470 free(CONST_CAST(void *, pin->up.packet));
1471 free(pin);
1472 }
1473 guarded_list_destroy(&ofproto->pins);
1474
1475 recirc_free_ofproto(ofproto, ofproto->up.name);
1476
1477 mbridge_unref(ofproto->mbridge);
1478
1479 netflow_unref(ofproto->netflow);
1480 dpif_sflow_unref(ofproto->sflow);
1481 dpif_ipfix_unref(ofproto->ipfix);
1482 hmap_destroy(&ofproto->bundles);
1483 mac_learning_unref(ofproto->ml);
1484 mcast_snooping_unref(ofproto->ms);
1485
1486 hmap_destroy(&ofproto->vlandev_map);
1487 hmap_destroy(&ofproto->realdev_vid_map);
1488
1489 sset_destroy(&ofproto->ports);
1490 sset_destroy(&ofproto->ghost_ports);
1491 sset_destroy(&ofproto->port_poll_set);
1492
1493 ovs_mutex_destroy(&ofproto->stats_mutex);
1494 ovs_mutex_destroy(&ofproto->vsp_mutex);
1495
1496 seq_destroy(ofproto->pins_seq);
1497
1498 close_dpif_backer(ofproto->backer);
1499 }
1500
1501 static int
1502 run(struct ofproto *ofproto_)
1503 {
1504 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1505 uint64_t new_seq, new_dump_seq;
1506
1507 if (mbridge_need_revalidate(ofproto->mbridge)) {
1508 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1509 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1510 mac_learning_flush(ofproto->ml);
1511 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1512 mcast_snooping_mdb_flush(ofproto->ms);
1513 }
1514
1515 /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
1516 * flow restore. Even though nothing is processed during flow restore,
1517 * all queued 'pins' will be handled immediately when flow restore
1518 * completes. */
1519 ofproto->pins_seqno = seq_read(ofproto->pins_seq);
1520
1521 /* Do not perform any periodic activity required by 'ofproto' while
1522 * waiting for flow restore to complete. */
1523 if (!ofproto_get_flow_restore_wait()) {
1524 struct ofproto_packet_in *pin;
1525 struct ovs_list pins;
1526
1527 guarded_list_pop_all(&ofproto->pins, &pins);
1528 LIST_FOR_EACH_POP (pin, list_node, &pins) {
1529 connmgr_send_packet_in(ofproto->up.connmgr, pin);
1530 free(CONST_CAST(void *, pin->up.packet));
1531 free(pin);
1532 }
1533 }
1534
1535 if (ofproto->netflow) {
1536 netflow_run(ofproto->netflow);
1537 }
1538 if (ofproto->sflow) {
1539 dpif_sflow_run(ofproto->sflow);
1540 }
1541 if (ofproto->ipfix) {
1542 dpif_ipfix_run(ofproto->ipfix);
1543 }
1544
1545 new_seq = seq_read(connectivity_seq_get());
1546 if (ofproto->change_seq != new_seq) {
1547 struct ofport_dpif *ofport;
1548
1549 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1550 port_run(ofport);
1551 }
1552
1553 ofproto->change_seq = new_seq;
1554 }
1555 if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
1556 struct ofbundle *bundle;
1557
1558 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1559 bundle_run(bundle);
1560 }
1561 }
1562
1563 stp_run(ofproto);
1564 rstp_run(ofproto);
1565 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1566 if (mac_learning_run(ofproto->ml)) {
1567 ofproto->backer->need_revalidate = REV_MAC_LEARNING;
1568 }
1569 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1570
1571 if (mcast_snooping_run(ofproto->ms)) {
1572 ofproto->backer->need_revalidate = REV_MCAST_SNOOPING;
1573 }
1574
1575 new_dump_seq = seq_read(udpif_dump_seq(ofproto->backer->udpif));
1576 if (ofproto->dump_seq != new_dump_seq) {
1577 struct rule *rule, *next_rule;
1578
1579 /* We know stats are relatively fresh, so now is a good time to do some
1580 * periodic work. */
1581 ofproto->dump_seq = new_dump_seq;
1582
1583 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
1584 * has passed. */
1585 ovs_mutex_lock(&ofproto_mutex);
1586 LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
1587 &ofproto->up.expirable) {
1588 rule_expire(rule_dpif_cast(rule));
1589 }
1590 ovs_mutex_unlock(&ofproto_mutex);
1591
1592 /* All outstanding data in existing flows has been accounted, so it's a
1593 * good time to do bond rebalancing. */
1594 if (ofproto->has_bonded_bundles) {
1595 struct ofbundle *bundle;
1596
1597 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1598 if (bundle->bond) {
1599 bond_rebalance(bundle->bond);
1600 }
1601 }
1602 }
1603 }
1604 return 0;
1605 }
1606
1607 static void
1608 wait(struct ofproto *ofproto_)
1609 {
1610 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1611
1612 if (ofproto_get_flow_restore_wait()) {
1613 return;
1614 }
1615
1616 if (ofproto->sflow) {
1617 dpif_sflow_wait(ofproto->sflow);
1618 }
1619 if (ofproto->ipfix) {
1620 dpif_ipfix_wait(ofproto->ipfix);
1621 }
1622 if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
1623 struct ofbundle *bundle;
1624
1625 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1626 bundle_wait(bundle);
1627 }
1628 }
1629 if (ofproto->netflow) {
1630 netflow_wait(ofproto->netflow);
1631 }
1632 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
1633 mac_learning_wait(ofproto->ml);
1634 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1635 mcast_snooping_wait(ofproto->ms);
1636 stp_wait(ofproto);
1637 if (ofproto->backer->need_revalidate) {
1638 poll_immediate_wake();
1639 }
1640
1641 seq_wait(udpif_dump_seq(ofproto->backer->udpif), ofproto->dump_seq);
1642 seq_wait(ofproto->pins_seq, ofproto->pins_seqno);
1643 }
1644
1645 static void
1646 type_get_memory_usage(const char *type, struct simap *usage)
1647 {
1648 struct dpif_backer *backer;
1649
1650 backer = shash_find_data(&all_dpif_backers, type);
1651 if (backer) {
1652 udpif_get_memory_usage(backer->udpif, usage);
1653 }
1654 }
1655
1656 static void
1657 flush(struct ofproto *ofproto_)
1658 {
1659 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1660 struct dpif_backer *backer = ofproto->backer;
1661
1662 if (backer) {
1663 udpif_flush(backer->udpif);
1664 }
1665 }
1666
1667 static void
1668 query_tables(struct ofproto *ofproto,
1669 struct ofputil_table_features *features,
1670 struct ofputil_table_stats *stats)
1671 {
1672 strcpy(features->name, "classifier");
1673
1674 if (stats) {
1675 int i;
1676
1677 for (i = 0; i < ofproto->n_tables; i++) {
1678 unsigned long missed, matched;
1679
1680 atomic_read_relaxed(&ofproto->tables[i].n_matched, &matched);
1681 atomic_read_relaxed(&ofproto->tables[i].n_missed, &missed);
1682
1683 stats[i].matched_count = matched;
1684 stats[i].lookup_count = matched + missed;
1685 }
1686 }
1687 }
1688
1689 static void
1690 set_tables_version(struct ofproto *ofproto_, cls_version_t version)
1691 {
1692 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1693
1694 atomic_store_relaxed(&ofproto->tables_version, version);
1695 }
1696
1697
1698 static struct ofport *
1699 port_alloc(void)
1700 {
1701 struct ofport_dpif *port = xzalloc(sizeof *port);
1702 return &port->up;
1703 }
1704
1705 static void
1706 port_dealloc(struct ofport *port_)
1707 {
1708 struct ofport_dpif *port = ofport_dpif_cast(port_);
1709 free(port);
1710 }
1711
1712 static int
1713 port_construct(struct ofport *port_)
1714 {
1715 struct ofport_dpif *port = ofport_dpif_cast(port_);
1716 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1717 const struct netdev *netdev = port->up.netdev;
1718 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1719 const char *dp_port_name;
1720 struct dpif_port dpif_port;
1721 int error;
1722
1723 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1724 port->bundle = NULL;
1725 port->cfm = NULL;
1726 port->bfd = NULL;
1727 port->lldp = NULL;
1728 port->may_enable = false;
1729 port->stp_port = NULL;
1730 port->stp_state = STP_DISABLED;
1731 port->rstp_port = NULL;
1732 port->rstp_state = RSTP_DISABLED;
1733 port->is_tunnel = false;
1734 port->peer = NULL;
1735 port->qdscp = NULL;
1736 port->n_qdscp = 0;
1737 port->realdev_ofp_port = 0;
1738 port->vlandev_vid = 0;
1739 port->carrier_seq = netdev_get_carrier_resets(netdev);
1740 port->is_layer3 = netdev_vport_is_layer3(netdev);
1741
1742 if (netdev_vport_is_patch(netdev)) {
1743 /* By bailing out here, we don't submit the port to the sFlow module
1744 * to be considered for counter polling export. This is correct
1745 * because the patch port represents an interface that sFlow considers
1746 * to be "internal" to the switch as a whole, and therefore not a
1747 * candidate for counter polling. */
1748 port->odp_port = ODPP_NONE;
1749 ofport_update_peer(port);
1750 return 0;
1751 }
1752
1753 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
1754 error = dpif_port_query_by_name(ofproto->backer->dpif, dp_port_name,
1755 &dpif_port);
1756 if (error) {
1757 return error;
1758 }
1759
1760 port->odp_port = dpif_port.port_no;
1761
1762 if (netdev_get_tunnel_config(netdev)) {
1763 atomic_count_inc(&ofproto->backer->tnl_count);
1764 error = tnl_port_add(port, port->up.netdev, port->odp_port,
1765 ovs_native_tunneling_is_on(ofproto), dp_port_name);
1766 if (error) {
1767 atomic_count_dec(&ofproto->backer->tnl_count);
1768 dpif_port_destroy(&dpif_port);
1769 return error;
1770 }
1771
1772 port->is_tunnel = true;
1773 if (ofproto->ipfix) {
1774 dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
1775 }
1776 } else {
1777 /* Sanity-check that a mapping doesn't already exist. This
1778 * shouldn't happen for non-tunnel ports. */
1779 if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1780 VLOG_ERR("port %s already has an OpenFlow port number",
1781 dpif_port.name);
1782 dpif_port_destroy(&dpif_port);
1783 return EBUSY;
1784 }
1785
1786 ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1787 hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1788 hash_odp_port(port->odp_port));
1789 ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1790 }
1791 dpif_port_destroy(&dpif_port);
1792
1793 if (ofproto->sflow) {
1794 dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
1795 }
1796
1797 return 0;
1798 }
1799
1800 static void
1801 port_destruct(struct ofport *port_)
1802 {
1803 struct ofport_dpif *port = ofport_dpif_cast(port_);
1804 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1805 const char *devname = netdev_get_name(port->up.netdev);
1806 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1807 const char *dp_port_name;
1808
1809 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1810 xlate_txn_start();
1811 xlate_ofport_remove(port);
1812 xlate_txn_commit();
1813
1814 dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
1815 sizeof namebuf);
1816 if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
1817 /* The underlying device is still there, so delete it. This
1818 * happens when the ofproto is being destroyed, since the caller
1819 * assumes that removal of attached ports will happen as part of
1820 * destruction. */
1821 if (!port->is_tunnel) {
1822 dpif_port_del(ofproto->backer->dpif, port->odp_port);
1823 }
1824 }
1825
1826 if (port->peer) {
1827 port->peer->peer = NULL;
1828 port->peer = NULL;
1829 }
1830
1831 if (port->odp_port != ODPP_NONE && !port->is_tunnel) {
1832 ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1833 hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1834 ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1835 }
1836
1837 if (port->is_tunnel) {
1838 atomic_count_dec(&ofproto->backer->tnl_count);
1839 }
1840
1841 if (port->is_tunnel && ofproto->ipfix) {
1842 dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
1843 }
1844
1845 tnl_port_del(port);
1846 sset_find_and_delete(&ofproto->ports, devname);
1847 sset_find_and_delete(&ofproto->ghost_ports, devname);
1848 bundle_remove(port_);
1849 set_cfm(port_, NULL);
1850 set_bfd(port_, NULL);
1851 set_lldp(port_, NULL);
1852 if (port->stp_port) {
1853 stp_port_disable(port->stp_port);
1854 }
1855 set_rstp_port(port_, NULL);
1856 if (ofproto->sflow) {
1857 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
1858 }
1859
1860 free(port->qdscp);
1861 }
1862
1863 static void
1864 port_modified(struct ofport *port_)
1865 {
1866 struct ofport_dpif *port = ofport_dpif_cast(port_);
1867 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1868 const char *dp_port_name;
1869 struct netdev *netdev = port->up.netdev;
1870
1871 if (port->bundle && port->bundle->bond) {
1872 bond_slave_set_netdev(port->bundle->bond, port, netdev);
1873 }
1874
1875 if (port->cfm) {
1876 cfm_set_netdev(port->cfm, netdev);
1877 }
1878
1879 if (port->bfd) {
1880 bfd_set_netdev(port->bfd, netdev);
1881 }
1882
1883 ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
1884 port->lldp, &port->up.pp.hw_addr);
1885
1886 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
1887
1888 if (port->is_tunnel) {
1889 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1890
1891 if (tnl_port_reconfigure(port, netdev, port->odp_port,
1892 ovs_native_tunneling_is_on(ofproto),
1893 dp_port_name)) {
1894 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1895 }
1896 }
1897
1898 ofport_update_peer(port);
1899 }
1900
1901 static void
1902 port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
1903 {
1904 struct ofport_dpif *port = ofport_dpif_cast(port_);
1905 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1906 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
1907
1908 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
1909 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1910 OFPUTIL_PC_NO_PACKET_IN)) {
1911 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1912
1913 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
1914 bundle_update(port->bundle);
1915 }
1916 }
1917 }
1918
1919 static int
1920 set_sflow(struct ofproto *ofproto_,
1921 const struct ofproto_sflow_options *sflow_options)
1922 {
1923 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1924 struct dpif_sflow *ds = ofproto->sflow;
1925
1926 if (sflow_options) {
1927 uint32_t old_probability = ds ? dpif_sflow_get_probability(ds) : 0;
1928 if (!ds) {
1929 struct ofport_dpif *ofport;
1930
1931 ds = ofproto->sflow = dpif_sflow_create();
1932 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1933 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
1934 }
1935 }
1936 dpif_sflow_set_options(ds, sflow_options);
1937 if (dpif_sflow_get_probability(ds) != old_probability) {
1938 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1939 }
1940 } else {
1941 if (ds) {
1942 dpif_sflow_unref(ds);
1943 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1944 ofproto->sflow = NULL;
1945 }
1946 }
1947 return 0;
1948 }
1949
1950 static int
1951 set_ipfix(
1952 struct ofproto *ofproto_,
1953 const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
1954 const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
1955 size_t n_flow_exporters_options)
1956 {
1957 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1958 struct dpif_ipfix *di = ofproto->ipfix;
1959 bool has_options = bridge_exporter_options || flow_exporters_options;
1960 bool new_di = false;
1961
1962 if (has_options && !di) {
1963 di = ofproto->ipfix = dpif_ipfix_create();
1964 new_di = true;
1965 }
1966
1967 if (di) {
1968 /* Call set_options in any case to cleanly flush the flow
1969 * caches in the last exporters that are to be destroyed. */
1970 dpif_ipfix_set_options(
1971 di, bridge_exporter_options, flow_exporters_options,
1972 n_flow_exporters_options);
1973
1974 /* Add tunnel ports only when a new ipfix created */
1975 if (new_di == true) {
1976 struct ofport_dpif *ofport;
1977 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1978 if (ofport->is_tunnel == true) {
1979 dpif_ipfix_add_tunnel_port(di, &ofport->up, ofport->odp_port);
1980 }
1981 }
1982 }
1983
1984 if (!has_options) {
1985 dpif_ipfix_unref(di);
1986 ofproto->ipfix = NULL;
1987 }
1988 }
1989
1990 return 0;
1991 }
1992
1993 static int
1994 set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
1995 {
1996 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1997 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1998 struct cfm *old = ofport->cfm;
1999 int error = 0;
2000
2001 if (s) {
2002 if (!ofport->cfm) {
2003 ofport->cfm = cfm_create(ofport->up.netdev);
2004 }
2005
2006 if (cfm_configure(ofport->cfm, s)) {
2007 error = 0;
2008 goto out;
2009 }
2010
2011 error = EINVAL;
2012 }
2013 cfm_unref(ofport->cfm);
2014 ofport->cfm = NULL;
2015 out:
2016 if (ofport->cfm != old) {
2017 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2018 }
2019 ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
2020 ofport->lldp, &ofport->up.pp.hw_addr);
2021 return error;
2022 }
2023
2024 static bool
2025 cfm_status_changed(struct ofport *ofport_)
2026 {
2027 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2028
2029 return ofport->cfm ? cfm_check_status_change(ofport->cfm) : true;
2030 }
2031
2032 static int
2033 get_cfm_status(const struct ofport *ofport_,
2034 struct cfm_status *status)
2035 {
2036 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2037 int ret = 0;
2038
2039 if (ofport->cfm) {
2040 cfm_get_status(ofport->cfm, status);
2041 } else {
2042 ret = ENOENT;
2043 }
2044
2045 return ret;
2046 }
2047
2048 static int
2049 set_bfd(struct ofport *ofport_, const struct smap *cfg)
2050 {
2051 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
2052 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2053 struct bfd *old;
2054
2055 old = ofport->bfd;
2056 ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev),
2057 cfg, ofport->up.netdev);
2058 if (ofport->bfd != old) {
2059 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2060 }
2061 ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
2062 ofport->lldp, &ofport->up.pp.hw_addr);
2063 return 0;
2064 }
2065
2066 static bool
2067 bfd_status_changed(struct ofport *ofport_)
2068 {
2069 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2070
2071 return ofport->bfd ? bfd_check_status_change(ofport->bfd) : true;
2072 }
2073
2074 static int
2075 get_bfd_status(struct ofport *ofport_, struct smap *smap)
2076 {
2077 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2078 int ret = 0;
2079
2080 if (ofport->bfd) {
2081 bfd_get_status(ofport->bfd, smap);
2082 } else {
2083 ret = ENOENT;
2084 }
2085
2086 return ret;
2087 }
2088
2089 static int
2090 set_lldp(struct ofport *ofport_,
2091 const struct smap *cfg)
2092 {
2093 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2094 int error = 0;
2095
2096 if (cfg) {
2097 if (!ofport->lldp) {
2098 struct ofproto_dpif *ofproto;
2099
2100 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2101 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2102 ofport->lldp = lldp_create(ofport->up.netdev, ofport_->mtu, cfg);
2103 }
2104
2105 if (!lldp_configure(ofport->lldp, cfg)) {
2106 error = EINVAL;
2107 }
2108 }
2109 if (error) {
2110 lldp_unref(ofport->lldp);
2111 ofport->lldp = NULL;
2112 }
2113
2114 ofproto_dpif_monitor_port_update(ofport,
2115 ofport->bfd,
2116 ofport->cfm,
2117 ofport->lldp,
2118 &ofport->up.pp.hw_addr);
2119 return error;
2120 }
2121
2122 static bool
2123 get_lldp_status(const struct ofport *ofport_,
2124 struct lldp_status *status OVS_UNUSED)
2125 {
2126 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2127
2128 return ofport->lldp ? true : false;
2129 }
2130
2131 static int
2132 set_aa(struct ofproto *ofproto OVS_UNUSED,
2133 const struct aa_settings *s)
2134 {
2135 return aa_configure(s);
2136 }
2137
2138 static int
2139 aa_mapping_set(struct ofproto *ofproto_ OVS_UNUSED, void *aux,
2140 const struct aa_mapping_settings *s)
2141 {
2142 return aa_mapping_register(aux, s);
2143 }
2144
2145 static int
2146 aa_mapping_unset(struct ofproto *ofproto OVS_UNUSED, void *aux)
2147 {
2148 return aa_mapping_unregister(aux);
2149 }
2150
2151 static int
2152 aa_vlan_get_queued(struct ofproto *ofproto OVS_UNUSED, struct ovs_list *list)
2153 {
2154 return aa_get_vlan_queued(list);
2155 }
2156
2157 static unsigned int
2158 aa_vlan_get_queue_size(struct ofproto *ofproto OVS_UNUSED)
2159 {
2160 return aa_get_vlan_queue_size();
2161 }
2162
2163 \f
2164 /* Spanning Tree. */
2165
2166 /* Called while rstp_mutex is held. */
2167 static void
2168 rstp_send_bpdu_cb(struct dp_packet *pkt, void *ofport_, void *ofproto_)
2169 {
2170 struct ofproto_dpif *ofproto = ofproto_;
2171 struct ofport_dpif *ofport = ofport_;
2172 struct eth_header *eth = dp_packet_l2(pkt);
2173
2174 netdev_get_etheraddr(ofport->up.netdev, &eth->eth_src);
2175 if (eth_addr_is_zero(eth->eth_src)) {
2176 VLOG_WARN_RL(&rl, "%s port %d: cannot send RSTP BPDU on a port which "
2177 "does not have a configured source MAC address.",
2178 ofproto->up.name, ofp_to_u16(ofport->up.ofp_port));
2179 } else {
2180 ofproto_dpif_send_packet(ofport, pkt);
2181 }
2182 dp_packet_delete(pkt);
2183 }
2184
2185 static void
2186 send_bpdu_cb(struct dp_packet *pkt, int port_num, void *ofproto_)
2187 {
2188 struct ofproto_dpif *ofproto = ofproto_;
2189 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
2190 struct ofport_dpif *ofport;
2191
2192 ofport = stp_port_get_aux(sp);
2193 if (!ofport) {
2194 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
2195 ofproto->up.name, port_num);
2196 } else {
2197 struct eth_header *eth = dp_packet_l2(pkt);
2198
2199 netdev_get_etheraddr(ofport->up.netdev, &eth->eth_src);
2200 if (eth_addr_is_zero(eth->eth_src)) {
2201 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
2202 "with unknown MAC", ofproto->up.name, port_num);
2203 } else {
2204 ofproto_dpif_send_packet(ofport, pkt);
2205 }
2206 }
2207 dp_packet_delete(pkt);
2208 }
2209
2210 /* Configure RSTP on 'ofproto_' using the settings defined in 's'. */
2211 static void
2212 set_rstp(struct ofproto *ofproto_, const struct ofproto_rstp_settings *s)
2213 {
2214 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2215
2216 /* Only revalidate flows if the configuration changed. */
2217 if (!s != !ofproto->rstp) {
2218 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2219 }
2220
2221 if (s) {
2222 if (!ofproto->rstp) {
2223 ofproto->rstp = rstp_create(ofproto_->name, s->address,
2224 rstp_send_bpdu_cb, ofproto);
2225 ofproto->rstp_last_tick = time_msec();
2226 }
2227 rstp_set_bridge_address(ofproto->rstp, s->address);
2228 rstp_set_bridge_priority(ofproto->rstp, s->priority);
2229 rstp_set_bridge_ageing_time(ofproto->rstp, s->ageing_time);
2230 rstp_set_bridge_force_protocol_version(ofproto->rstp,
2231 s->force_protocol_version);
2232 rstp_set_bridge_max_age(ofproto->rstp, s->bridge_max_age);
2233 rstp_set_bridge_forward_delay(ofproto->rstp, s->bridge_forward_delay);
2234 rstp_set_bridge_transmit_hold_count(ofproto->rstp,
2235 s->transmit_hold_count);
2236 } else {
2237 struct ofport *ofport;
2238 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
2239 set_rstp_port(ofport, NULL);
2240 }
2241 rstp_unref(ofproto->rstp);
2242 ofproto->rstp = NULL;
2243 }
2244 }
2245
2246 static void
2247 get_rstp_status(struct ofproto *ofproto_, struct ofproto_rstp_status *s)
2248 {
2249 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2250
2251 if (ofproto->rstp) {
2252 s->enabled = true;
2253 s->root_id = rstp_get_root_id(ofproto->rstp);
2254 s->bridge_id = rstp_get_bridge_id(ofproto->rstp);
2255 s->designated_id = rstp_get_designated_id(ofproto->rstp);
2256 s->root_path_cost = rstp_get_root_path_cost(ofproto->rstp);
2257 s->designated_port_id = rstp_get_designated_port_id(ofproto->rstp);
2258 s->bridge_port_id = rstp_get_bridge_port_id(ofproto->rstp);
2259 } else {
2260 s->enabled = false;
2261 }
2262 }
2263
2264 static void
2265 update_rstp_port_state(struct ofport_dpif *ofport)
2266 {
2267 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2268 enum rstp_state state;
2269
2270 /* Figure out new state. */
2271 state = ofport->rstp_port ? rstp_port_get_state(ofport->rstp_port)
2272 : RSTP_DISABLED;
2273
2274 /* Update state. */
2275 if (ofport->rstp_state != state) {
2276 enum ofputil_port_state of_state;
2277 bool fwd_change;
2278
2279 VLOG_DBG("port %s: RSTP state changed from %s to %s",
2280 netdev_get_name(ofport->up.netdev),
2281 rstp_state_name(ofport->rstp_state),
2282 rstp_state_name(state));
2283
2284 if (rstp_learn_in_state(ofport->rstp_state)
2285 != rstp_learn_in_state(state)) {
2286 /* XXX: Learning action flows should also be flushed. */
2287 if (ofport->bundle) {
2288 if (!rstp_shift_root_learned_address(ofproto->rstp)
2289 || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
2290 bundle_flush_macs(ofport->bundle, false);
2291 }
2292 }
2293 }
2294 fwd_change = rstp_forward_in_state(ofport->rstp_state)
2295 != rstp_forward_in_state(state);
2296
2297 ofproto->backer->need_revalidate = REV_RSTP;
2298 ofport->rstp_state = state;
2299
2300 if (fwd_change && ofport->bundle) {
2301 bundle_update(ofport->bundle);
2302 }
2303
2304 /* Update the RSTP state bits in the OpenFlow port description. */
2305 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
2306 of_state |= (state == RSTP_LEARNING ? OFPUTIL_PS_STP_LEARN
2307 : state == RSTP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
2308 : state == RSTP_DISCARDING ? OFPUTIL_PS_STP_LISTEN
2309 : 0);
2310 ofproto_port_set_state(&ofport->up, of_state);
2311 }
2312 }
2313
2314 static void
2315 rstp_run(struct ofproto_dpif *ofproto)
2316 {
2317 if (ofproto->rstp) {
2318 long long int now = time_msec();
2319 long long int elapsed = now - ofproto->rstp_last_tick;
2320 struct rstp_port *rp;
2321 struct ofport_dpif *ofport;
2322
2323 /* Every second, decrease the values of the timers. */
2324 if (elapsed >= 1000) {
2325 rstp_tick_timers(ofproto->rstp);
2326 ofproto->rstp_last_tick = now;
2327 }
2328 rp = NULL;
2329 while ((ofport = rstp_get_next_changed_port_aux(ofproto->rstp, &rp))) {
2330 update_rstp_port_state(ofport);
2331 }
2332 rp = NULL;
2333 ofport = NULL;
2334 /* FIXME: This check should be done on-event (i.e., when setting
2335 * p->fdb_flush) and not periodically.
2336 */
2337 while ((ofport = rstp_check_and_reset_fdb_flush(ofproto->rstp, &rp))) {
2338 if (!rstp_shift_root_learned_address(ofproto->rstp)
2339 || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
2340 bundle_flush_macs(ofport->bundle, false);
2341 }
2342 }
2343
2344 if (rstp_shift_root_learned_address(ofproto->rstp)) {
2345 struct ofport_dpif *old_root_aux =
2346 (struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp);
2347 struct ofport_dpif *new_root_aux =
2348 (struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp);
2349 if (old_root_aux != NULL && new_root_aux != NULL) {
2350 bundle_move(old_root_aux->bundle, new_root_aux->bundle);
2351 rstp_reset_root_changed(ofproto->rstp);
2352 }
2353 }
2354 }
2355 }
2356
2357 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2358 static int
2359 set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
2360 {
2361 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2362
2363 /* Only revalidate flows if the configuration changed. */
2364 if (!s != !ofproto->stp) {
2365 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2366 }
2367
2368 if (s) {
2369 if (!ofproto->stp) {
2370 ofproto->stp = stp_create(ofproto_->name, s->system_id,
2371 send_bpdu_cb, ofproto);
2372 ofproto->stp_last_tick = time_msec();
2373 }
2374
2375 stp_set_bridge_id(ofproto->stp, s->system_id);
2376 stp_set_bridge_priority(ofproto->stp, s->priority);
2377 stp_set_hello_time(ofproto->stp, s->hello_time);
2378 stp_set_max_age(ofproto->stp, s->max_age);
2379 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
2380 } else {
2381 struct ofport *ofport;
2382
2383 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
2384 set_stp_port(ofport, NULL);
2385 }
2386
2387 stp_unref(ofproto->stp);
2388 ofproto->stp = NULL;
2389 }
2390
2391 return 0;
2392 }
2393
2394 static int
2395 get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
2396 {
2397 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2398
2399 if (ofproto->stp) {
2400 s->enabled = true;
2401 s->bridge_id = stp_get_bridge_id(ofproto->stp);
2402 s->designated_root = stp_get_designated_root(ofproto->stp);
2403 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
2404 } else {
2405 s->enabled = false;
2406 }
2407
2408 return 0;
2409 }
2410
2411 static void
2412 update_stp_port_state(struct ofport_dpif *ofport)
2413 {
2414 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2415 enum stp_state state;
2416
2417 /* Figure out new state. */
2418 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
2419 : STP_DISABLED;
2420
2421 /* Update state. */
2422 if (ofport->stp_state != state) {
2423 enum ofputil_port_state of_state;
2424 bool fwd_change;
2425
2426 VLOG_DBG("port %s: STP state changed from %s to %s",
2427 netdev_get_name(ofport->up.netdev),
2428 stp_state_name(ofport->stp_state),
2429 stp_state_name(state));
2430 if (stp_learn_in_state(ofport->stp_state)
2431 != stp_learn_in_state(state)) {
2432 /* xxx Learning action flows should also be flushed. */
2433 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2434 mac_learning_flush(ofproto->ml);
2435 ovs_rwlock_unlock(&ofproto->ml->rwlock);
2436 mcast_snooping_mdb_flush(ofproto->ms);
2437 }
2438 fwd_change = stp_forward_in_state(ofport->stp_state)
2439 != stp_forward_in_state(state);
2440
2441 ofproto->backer->need_revalidate = REV_STP;
2442 ofport->stp_state = state;
2443 ofport->stp_state_entered = time_msec();
2444
2445 if (fwd_change && ofport->bundle) {
2446 bundle_update(ofport->bundle);
2447 }
2448
2449 /* Update the STP state bits in the OpenFlow port description. */
2450 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
2451 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
2452 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
2453 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
2454 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
2455 : 0);
2456 ofproto_port_set_state(&ofport->up, of_state);
2457 }
2458 }
2459
2460 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2461 * caller is responsible for assigning STP port numbers and ensuring
2462 * there are no duplicates. */
2463 static int
2464 set_stp_port(struct ofport *ofport_,
2465 const struct ofproto_port_stp_settings *s)
2466 {
2467 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2468 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2469 struct stp_port *sp = ofport->stp_port;
2470
2471 if (!s || !s->enable) {
2472 if (sp) {
2473 ofport->stp_port = NULL;
2474 stp_port_disable(sp);
2475 update_stp_port_state(ofport);
2476 }
2477 return 0;
2478 } else if (sp && stp_port_no(sp) != s->port_num
2479 && ofport == stp_port_get_aux(sp)) {
2480 /* The port-id changed, so disable the old one if it's not
2481 * already in use by another port. */
2482 stp_port_disable(sp);
2483 }
2484
2485 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
2486
2487 /* Set name before enabling the port so that debugging messages can print
2488 * the name. */
2489 stp_port_set_name(sp, netdev_get_name(ofport->up.netdev));
2490 stp_port_enable(sp);
2491
2492 stp_port_set_aux(sp, ofport);
2493 stp_port_set_priority(sp, s->priority);
2494 stp_port_set_path_cost(sp, s->path_cost);
2495
2496 update_stp_port_state(ofport);
2497
2498 return 0;
2499 }
2500
2501 static int
2502 get_stp_port_status(struct ofport *ofport_,
2503 struct ofproto_port_stp_status *s)
2504 {
2505 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2506 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2507 struct stp_port *sp = ofport->stp_port;
2508
2509 if (!ofproto->stp || !sp) {
2510 s->enabled = false;
2511 return 0;
2512 }
2513
2514 s->enabled = true;
2515 s->port_id = stp_port_get_id(sp);
2516 s->state = stp_port_get_state(sp);
2517 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
2518 s->role = stp_port_get_role(sp);
2519
2520 return 0;
2521 }
2522
2523 static int
2524 get_stp_port_stats(struct ofport *ofport_,
2525 struct ofproto_port_stp_stats *s)
2526 {
2527 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2528 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2529 struct stp_port *sp = ofport->stp_port;
2530
2531 if (!ofproto->stp || !sp) {
2532 s->enabled = false;
2533 return 0;
2534 }
2535
2536 s->enabled = true;
2537 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
2538
2539 return 0;
2540 }
2541
2542 static void
2543 stp_run(struct ofproto_dpif *ofproto)
2544 {
2545 if (ofproto->stp) {
2546 long long int now = time_msec();
2547 long long int elapsed = now - ofproto->stp_last_tick;
2548 struct stp_port *sp;
2549
2550 if (elapsed > 0) {
2551 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
2552 ofproto->stp_last_tick = now;
2553 }
2554 while (stp_get_changed_port(ofproto->stp, &sp)) {
2555 struct ofport_dpif *ofport = stp_port_get_aux(sp);
2556
2557 if (ofport) {
2558 update_stp_port_state(ofport);
2559 }
2560 }
2561
2562 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2563 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2564 mac_learning_flush(ofproto->ml);
2565 ovs_rwlock_unlock(&ofproto->ml->rwlock);
2566 mcast_snooping_mdb_flush(ofproto->ms);
2567 }
2568 }
2569 }
2570
2571 static void
2572 stp_wait(struct ofproto_dpif *ofproto)
2573 {
2574 if (ofproto->stp) {
2575 poll_timer_wait(1000);
2576 }
2577 }
2578
2579 /* Configures RSTP on 'ofport_' using the settings defined in 's'. The
2580 * caller is responsible for assigning RSTP port numbers and ensuring
2581 * there are no duplicates. */
2582 static void
2583 set_rstp_port(struct ofport *ofport_,
2584 const struct ofproto_port_rstp_settings *s)
2585 {
2586 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2587 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2588 struct rstp_port *rp = ofport->rstp_port;
2589
2590 if (!s || !s->enable) {
2591 if (rp) {
2592 rstp_port_set_aux(rp, NULL);
2593 rstp_port_set_state(rp, RSTP_DISABLED);
2594 rstp_port_set_mac_operational(rp, false);
2595 ofport->rstp_port = NULL;
2596 rstp_port_unref(rp);
2597 update_rstp_port_state(ofport);
2598 }
2599 return;
2600 }
2601
2602 /* Check if need to add a new port. */
2603 if (!rp) {
2604 rp = ofport->rstp_port = rstp_add_port(ofproto->rstp);
2605 }
2606
2607 rstp_port_set(rp, s->port_num, s->priority, s->path_cost,
2608 s->admin_edge_port, s->auto_edge,
2609 s->admin_p2p_mac_state, s->admin_port_state, s->mcheck,
2610 ofport);
2611 update_rstp_port_state(ofport);
2612 /* Synchronize operational status. */
2613 rstp_port_set_mac_operational(rp, ofport->may_enable);
2614 }
2615
2616 static void
2617 get_rstp_port_status(struct ofport *ofport_,
2618 struct ofproto_port_rstp_status *s)
2619 {
2620 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2621 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2622 struct rstp_port *rp = ofport->rstp_port;
2623
2624 if (!ofproto->rstp || !rp) {
2625 s->enabled = false;
2626 return;
2627 }
2628
2629 s->enabled = true;
2630 rstp_port_get_status(rp, &s->port_id, &s->state, &s->role,
2631 &s->designated_bridge_id, &s->designated_port_id,
2632 &s->designated_path_cost, &s->tx_count,
2633 &s->rx_count, &s->error_count, &s->uptime);
2634 }
2635
2636 \f
2637 static int
2638 set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp,
2639 size_t n_qdscp)
2640 {
2641 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2642 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2643
2644 if (ofport->n_qdscp != n_qdscp
2645 || (n_qdscp && memcmp(ofport->qdscp, qdscp,
2646 n_qdscp * sizeof *qdscp))) {
2647 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2648 free(ofport->qdscp);
2649 ofport->qdscp = n_qdscp
2650 ? xmemdup(qdscp, n_qdscp * sizeof *qdscp)
2651 : NULL;
2652 ofport->n_qdscp = n_qdscp;
2653 }
2654
2655 return 0;
2656 }
2657 \f
2658 /* Bundles. */
2659
2660 /* Expires all MAC learning entries associated with 'bundle' and forces its
2661 * ofproto to revalidate every flow.
2662 *
2663 * Normally MAC learning entries are removed only from the ofproto associated
2664 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2665 * are removed from every ofproto. When patch ports and SLB bonds are in use
2666 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2667 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2668 * with the host from which it migrated. */
2669 static void
2670 bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
2671 {
2672 struct ofproto_dpif *ofproto = bundle->ofproto;
2673 struct mac_learning *ml = ofproto->ml;
2674 struct mac_entry *mac, *next_mac;
2675
2676 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2677 ovs_rwlock_wrlock(&ml->rwlock);
2678 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2679 if (mac_entry_get_port(ml, mac) == bundle) {
2680 if (all_ofprotos) {
2681 struct ofproto_dpif *o;
2682
2683 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2684 if (o != ofproto) {
2685 struct mac_entry *e;
2686
2687 ovs_rwlock_wrlock(&o->ml->rwlock);
2688 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan);
2689 if (e) {
2690 mac_learning_expire(o->ml, e);
2691 }
2692 ovs_rwlock_unlock(&o->ml->rwlock);
2693 }
2694 }
2695 }
2696
2697 mac_learning_expire(ml, mac);
2698 }
2699 }
2700 ovs_rwlock_unlock(&ml->rwlock);
2701 }
2702
2703 static void
2704 bundle_move(struct ofbundle *old, struct ofbundle *new)
2705 {
2706 struct ofproto_dpif *ofproto = old->ofproto;
2707 struct mac_learning *ml = ofproto->ml;
2708 struct mac_entry *mac, *next_mac;
2709
2710 ovs_assert(new->ofproto == old->ofproto);
2711
2712 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2713 ovs_rwlock_wrlock(&ml->rwlock);
2714 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2715 if (mac_entry_get_port(ml, mac) == old) {
2716 mac_entry_set_port(ml, mac, new);
2717 }
2718 }
2719 ovs_rwlock_unlock(&ml->rwlock);
2720 }
2721
2722 static struct ofbundle *
2723 bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2724 {
2725 struct ofbundle *bundle;
2726
2727 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2728 &ofproto->bundles) {
2729 if (bundle->aux == aux) {
2730 return bundle;
2731 }
2732 }
2733 return NULL;
2734 }
2735
2736 static void
2737 bundle_update(struct ofbundle *bundle)
2738 {
2739 struct ofport_dpif *port;
2740
2741 bundle->floodable = true;
2742 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2743 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2744 || port->is_layer3
2745 || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
2746 || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
2747 bundle->floodable = false;
2748 break;
2749 }
2750 }
2751 }
2752
2753 static void
2754 bundle_del_port(struct ofport_dpif *port)
2755 {
2756 struct ofbundle *bundle = port->bundle;
2757
2758 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2759
2760 list_remove(&port->bundle_node);
2761 port->bundle = NULL;
2762
2763 if (bundle->lacp) {
2764 lacp_slave_unregister(bundle->lacp, port);
2765 }
2766 if (bundle->bond) {
2767 bond_slave_unregister(bundle->bond, port);
2768 }
2769
2770 bundle_update(bundle);
2771 }
2772
2773 static bool
2774 bundle_add_port(struct ofbundle *bundle, ofp_port_t ofp_port,
2775 struct lacp_slave_settings *lacp)
2776 {
2777 struct ofport_dpif *port;
2778
2779 port = ofp_port_to_ofport(bundle->ofproto, ofp_port);
2780 if (!port) {
2781 return false;
2782 }
2783
2784 if (port->bundle != bundle) {
2785 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2786 if (port->bundle) {
2787 bundle_remove(&port->up);
2788 }
2789
2790 port->bundle = bundle;
2791 list_push_back(&bundle->ports, &port->bundle_node);
2792 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2793 || port->is_layer3
2794 || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
2795 || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
2796 bundle->floodable = false;
2797 }
2798 }
2799 if (lacp) {
2800 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2801 lacp_slave_register(bundle->lacp, port, lacp);
2802 }
2803
2804 return true;
2805 }
2806
2807 static void
2808 bundle_destroy(struct ofbundle *bundle)
2809 {
2810 struct ofproto_dpif *ofproto;
2811 struct ofport_dpif *port, *next_port;
2812
2813 if (!bundle) {
2814 return;
2815 }
2816
2817 ofproto = bundle->ofproto;
2818 mbridge_unregister_bundle(ofproto->mbridge, bundle);
2819
2820 xlate_txn_start();
2821 xlate_bundle_remove(bundle);
2822 xlate_txn_commit();
2823
2824 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2825 bundle_del_port(port);
2826 }
2827
2828 bundle_flush_macs(bundle, true);
2829 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2830 free(bundle->name);
2831 free(bundle->trunks);
2832 lacp_unref(bundle->lacp);
2833 bond_unref(bundle->bond);
2834 free(bundle);
2835 }
2836
2837 static int
2838 bundle_set(struct ofproto *ofproto_, void *aux,
2839 const struct ofproto_bundle_settings *s)
2840 {
2841 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2842 bool need_flush = false;
2843 struct ofport_dpif *port;
2844 struct ofbundle *bundle;
2845 unsigned long *trunks;
2846 int vlan;
2847 size_t i;
2848 bool ok;
2849
2850 if (!s) {
2851 bundle_destroy(bundle_lookup(ofproto, aux));
2852 return 0;
2853 }
2854
2855 ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2856 ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
2857
2858 bundle = bundle_lookup(ofproto, aux);
2859 if (!bundle) {
2860 bundle = xmalloc(sizeof *bundle);
2861
2862 bundle->ofproto = ofproto;
2863 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2864 hash_pointer(aux, 0));
2865 bundle->aux = aux;
2866 bundle->name = NULL;
2867
2868 list_init(&bundle->ports);
2869 bundle->vlan_mode = PORT_VLAN_TRUNK;
2870 bundle->vlan = -1;
2871 bundle->trunks = NULL;
2872 bundle->use_priority_tags = s->use_priority_tags;
2873 bundle->lacp = NULL;
2874 bundle->bond = NULL;
2875
2876 bundle->floodable = true;
2877 mbridge_register_bundle(ofproto->mbridge, bundle);
2878 }
2879
2880 if (!bundle->name || strcmp(s->name, bundle->name)) {
2881 free(bundle->name);
2882 bundle->name = xstrdup(s->name);
2883 }
2884
2885 /* LACP. */
2886 if (s->lacp) {
2887 ofproto->lacp_enabled = true;
2888 if (!bundle->lacp) {
2889 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2890 bundle->lacp = lacp_create();
2891 }
2892 lacp_configure(bundle->lacp, s->lacp);
2893 } else {
2894 lacp_unref(bundle->lacp);
2895 bundle->lacp = NULL;
2896 }
2897
2898 /* Update set of ports. */
2899 ok = true;
2900 for (i = 0; i < s->n_slaves; i++) {
2901 if (!bundle_add_port(bundle, s->slaves[i],
2902 s->lacp ? &s->lacp_slaves[i] : NULL)) {
2903 ok = false;
2904 }
2905 }
2906 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
2907 struct ofport_dpif *next_port;
2908
2909 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2910 for (i = 0; i < s->n_slaves; i++) {
2911 if (s->slaves[i] == port->up.ofp_port) {
2912 goto found;
2913 }
2914 }
2915
2916 bundle_del_port(port);
2917 found: ;
2918 }
2919 }
2920 ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
2921
2922 if (list_is_empty(&bundle->ports)) {
2923 bundle_destroy(bundle);
2924 return EINVAL;
2925 }
2926
2927 /* Set VLAN tagging mode */
2928 if (s->vlan_mode != bundle->vlan_mode
2929 || s->use_priority_tags != bundle->use_priority_tags) {
2930 bundle->vlan_mode = s->vlan_mode;
2931 bundle->use_priority_tags = s->use_priority_tags;
2932 need_flush = true;
2933 }
2934
2935 /* Set VLAN tag. */
2936 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2937 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2938 : 0);
2939 if (vlan != bundle->vlan) {
2940 bundle->vlan = vlan;
2941 need_flush = true;
2942 }
2943
2944 /* Get trunked VLANs. */
2945 switch (s->vlan_mode) {
2946 case PORT_VLAN_ACCESS:
2947 trunks = NULL;
2948 break;
2949
2950 case PORT_VLAN_TRUNK:
2951 trunks = CONST_CAST(unsigned long *, s->trunks);
2952 break;
2953
2954 case PORT_VLAN_NATIVE_UNTAGGED:
2955 case PORT_VLAN_NATIVE_TAGGED:
2956 if (vlan != 0 && (!s->trunks
2957 || !bitmap_is_set(s->trunks, vlan)
2958 || bitmap_is_set(s->trunks, 0))) {
2959 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2960 if (s->trunks) {
2961 trunks = bitmap_clone(s->trunks, 4096);
2962 } else {
2963 trunks = bitmap_allocate1(4096);
2964 }
2965 bitmap_set1(trunks, vlan);
2966 bitmap_set0(trunks, 0);
2967 } else {
2968 trunks = CONST_CAST(unsigned long *, s->trunks);
2969 }
2970 break;
2971
2972 default:
2973 OVS_NOT_REACHED();
2974 }
2975 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2976 free(bundle->trunks);
2977 if (trunks == s->trunks) {
2978 bundle->trunks = vlan_bitmap_clone(trunks);
2979 } else {
2980 bundle->trunks = trunks;
2981 trunks = NULL;
2982 }
2983 need_flush = true;
2984 }
2985 if (trunks != s->trunks) {
2986 free(trunks);
2987 }
2988
2989 /* Bonding. */
2990 if (!list_is_short(&bundle->ports)) {
2991 bundle->ofproto->has_bonded_bundles = true;
2992 if (bundle->bond) {
2993 if (bond_reconfigure(bundle->bond, s->bond)) {
2994 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2995 }
2996 } else {
2997 bundle->bond = bond_create(s->bond, ofproto);
2998 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2999 }
3000
3001 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
3002 bond_slave_register(bundle->bond, port,
3003 port->up.ofp_port, port->up.netdev);
3004 }
3005 } else {
3006 bond_unref(bundle->bond);
3007 bundle->bond = NULL;
3008 }
3009
3010 /* If we changed something that would affect MAC learning, un-learn
3011 * everything on this port and force flow revalidation. */
3012 if (need_flush) {
3013 bundle_flush_macs(bundle, false);
3014 }
3015
3016 return 0;
3017 }
3018
3019 static void
3020 bundle_remove(struct ofport *port_)
3021 {
3022 struct ofport_dpif *port = ofport_dpif_cast(port_);
3023 struct ofbundle *bundle = port->bundle;
3024
3025 if (bundle) {
3026 bundle_del_port(port);
3027 if (list_is_empty(&bundle->ports)) {
3028 bundle_destroy(bundle);
3029 } else if (list_is_short(&bundle->ports)) {
3030 bond_unref(bundle->bond);
3031 bundle->bond = NULL;
3032 }
3033 }
3034 }
3035
3036 static void
3037 send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
3038 {
3039 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
3040 struct ofport_dpif *port = port_;
3041 struct eth_addr ea;
3042 int error;
3043
3044 error = netdev_get_etheraddr(port->up.netdev, &ea);
3045 if (!error) {
3046 struct dp_packet packet;
3047 void *packet_pdu;
3048
3049 dp_packet_init(&packet, 0);
3050 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
3051 pdu_size);
3052 memcpy(packet_pdu, pdu, pdu_size);
3053
3054 ofproto_dpif_send_packet(port, &packet);
3055 dp_packet_uninit(&packet);
3056 } else {
3057 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
3058 "%s (%s)", port->bundle->name,
3059 netdev_get_name(port->up.netdev), ovs_strerror(error));
3060 }
3061 }
3062
3063 static void
3064 bundle_send_learning_packets(struct ofbundle *bundle)
3065 {
3066 struct ofproto_dpif *ofproto = bundle->ofproto;
3067 int error, n_packets, n_errors;
3068 struct mac_entry *e;
3069 struct pkt_list {
3070 struct ovs_list list_node;
3071 struct ofport_dpif *port;
3072 struct dp_packet *pkt;
3073 } *pkt_node;
3074 struct ovs_list packets;
3075
3076 list_init(&packets);
3077 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
3078 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
3079 if (mac_entry_get_port(ofproto->ml, e) != bundle) {
3080 pkt_node = xmalloc(sizeof *pkt_node);
3081 pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
3082 e->mac, e->vlan,
3083 (void **)&pkt_node->port);
3084 list_push_back(&packets, &pkt_node->list_node);
3085 }
3086 }
3087 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3088
3089 error = n_packets = n_errors = 0;
3090 LIST_FOR_EACH_POP (pkt_node, list_node, &packets) {
3091 int ret;
3092
3093 ret = ofproto_dpif_send_packet(pkt_node->port, pkt_node->pkt);
3094 dp_packet_delete(pkt_node->pkt);
3095 free(pkt_node);
3096 if (ret) {
3097 error = ret;
3098 n_errors++;
3099 }
3100 n_packets++;
3101 }
3102
3103 if (n_errors) {
3104 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3105 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
3106 "packets, last error was: %s",
3107 bundle->name, n_errors, n_packets, ovs_strerror(error));
3108 } else {
3109 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
3110 bundle->name, n_packets);
3111 }
3112 }
3113
3114 static void
3115 bundle_run(struct ofbundle *bundle)
3116 {
3117 if (bundle->lacp) {
3118 lacp_run(bundle->lacp, send_pdu_cb);
3119 }
3120 if (bundle->bond) {
3121 struct ofport_dpif *port;
3122
3123 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
3124 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
3125 }
3126
3127 if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
3128 bundle->ofproto->backer->need_revalidate = REV_BOND;
3129 }
3130
3131 if (bond_should_send_learning_packets(bundle->bond)) {
3132 bundle_send_learning_packets(bundle);
3133 }
3134 }
3135 }
3136
3137 static void
3138 bundle_wait(struct ofbundle *bundle)
3139 {
3140 if (bundle->lacp) {
3141 lacp_wait(bundle->lacp);
3142 }
3143 if (bundle->bond) {
3144 bond_wait(bundle->bond);
3145 }
3146 }
3147 \f
3148 /* Mirrors. */
3149
3150 static int
3151 mirror_set__(struct ofproto *ofproto_, void *aux,
3152 const struct ofproto_mirror_settings *s)
3153 {
3154 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3155 struct ofbundle **srcs, **dsts;
3156 int error;
3157 size_t i;
3158
3159 if (!s) {
3160 mirror_destroy(ofproto->mbridge, aux);
3161 return 0;
3162 }
3163
3164 srcs = xmalloc(s->n_srcs * sizeof *srcs);
3165 dsts = xmalloc(s->n_dsts * sizeof *dsts);
3166
3167 for (i = 0; i < s->n_srcs; i++) {
3168 srcs[i] = bundle_lookup(ofproto, s->srcs[i]);
3169 }
3170
3171 for (i = 0; i < s->n_dsts; i++) {
3172 dsts[i] = bundle_lookup(ofproto, s->dsts[i]);
3173 }
3174
3175 error = mirror_set(ofproto->mbridge, aux, s->name, srcs, s->n_srcs, dsts,
3176 s->n_dsts, s->src_vlans,
3177 bundle_lookup(ofproto, s->out_bundle), s->out_vlan);
3178 free(srcs);
3179 free(dsts);
3180 return error;
3181 }
3182
3183 static int
3184 mirror_get_stats__(struct ofproto *ofproto, void *aux,
3185 uint64_t *packets, uint64_t *bytes)
3186 {
3187 return mirror_get_stats(ofproto_dpif_cast(ofproto)->mbridge, aux, packets,
3188 bytes);
3189 }
3190
3191 static int
3192 set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
3193 {
3194 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3195 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
3196 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
3197 mac_learning_flush(ofproto->ml);
3198 }
3199 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3200 return 0;
3201 }
3202
3203 static bool
3204 is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
3205 {
3206 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3207 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
3208 return bundle && mirror_bundle_out(ofproto->mbridge, bundle) != 0;
3209 }
3210
3211 static void
3212 forward_bpdu_changed(struct ofproto *ofproto_)
3213 {
3214 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3215 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3216 }
3217
3218 static void
3219 set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
3220 size_t max_entries)
3221 {
3222 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3223 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
3224 mac_learning_set_idle_time(ofproto->ml, idle_time);
3225 mac_learning_set_max_entries(ofproto->ml, max_entries);
3226 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3227 }
3228
3229 /* Configures multicast snooping on 'ofport' using the settings
3230 * defined in 's'. */
3231 static int
3232 set_mcast_snooping(struct ofproto *ofproto_,
3233 const struct ofproto_mcast_snooping_settings *s)
3234 {
3235 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3236
3237 /* Only revalidate flows if the configuration changed. */
3238 if (!s != !ofproto->ms) {
3239 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3240 }
3241
3242 if (s) {
3243 if (!ofproto->ms) {
3244 ofproto->ms = mcast_snooping_create();
3245 }
3246
3247 ovs_rwlock_wrlock(&ofproto->ms->rwlock);
3248 mcast_snooping_set_idle_time(ofproto->ms, s->idle_time);
3249 mcast_snooping_set_max_entries(ofproto->ms, s->max_entries);
3250 if (mcast_snooping_set_flood_unreg(ofproto->ms, s->flood_unreg)) {
3251 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3252 }
3253 ovs_rwlock_unlock(&ofproto->ms->rwlock);
3254 } else {
3255 mcast_snooping_unref(ofproto->ms);
3256 ofproto->ms = NULL;
3257 }
3258
3259 return 0;
3260 }
3261
3262 /* Configures multicast snooping port's flood settings on 'ofproto'. */
3263 static int
3264 set_mcast_snooping_port(struct ofproto *ofproto_, void *aux,
3265 const struct ofproto_mcast_snooping_port_settings *s)
3266 {
3267 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3268 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
3269
3270 if (ofproto->ms && s) {
3271 ovs_rwlock_wrlock(&ofproto->ms->rwlock);
3272 mcast_snooping_set_port_flood(ofproto->ms, bundle, s->flood);
3273 mcast_snooping_set_port_flood_reports(ofproto->ms, bundle,
3274 s->flood_reports);
3275 ovs_rwlock_unlock(&ofproto->ms->rwlock);
3276 }
3277 return 0;
3278 }
3279
3280 \f
3281 /* Ports. */
3282
3283 struct ofport_dpif *
3284 ofp_port_to_ofport(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
3285 {
3286 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
3287 return ofport ? ofport_dpif_cast(ofport) : NULL;
3288 }
3289
3290 static void
3291 ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
3292 struct ofproto_port *ofproto_port,
3293 struct dpif_port *dpif_port)
3294 {
3295 ofproto_port->name = dpif_port->name;
3296 ofproto_port->type = dpif_port->type;
3297 ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
3298 }
3299
3300 static void
3301 ofport_update_peer(struct ofport_dpif *ofport)
3302 {
3303 const struct ofproto_dpif *ofproto;
3304 struct dpif_backer *backer;
3305 char *peer_name;
3306
3307 if (!netdev_vport_is_patch(ofport->up.netdev)) {
3308 return;
3309 }
3310
3311 backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
3312 backer->need_revalidate = REV_RECONFIGURE;
3313
3314 if (ofport->peer) {
3315 ofport->peer->peer = NULL;
3316 ofport->peer = NULL;
3317 }
3318
3319 peer_name = netdev_vport_patch_peer(ofport->up.netdev);
3320 if (!peer_name) {
3321 return;
3322 }
3323
3324 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
3325 struct ofport *peer_ofport;
3326 struct ofport_dpif *peer;
3327 char *peer_peer;
3328
3329 if (ofproto->backer != backer) {
3330 continue;
3331 }
3332
3333 peer_ofport = shash_find_data(&ofproto->up.port_by_name, peer_name);
3334 if (!peer_ofport) {
3335 continue;
3336 }
3337
3338 peer = ofport_dpif_cast(peer_ofport);
3339 peer_peer = netdev_vport_patch_peer(peer->up.netdev);
3340 if (peer_peer && !strcmp(netdev_get_name(ofport->up.netdev),
3341 peer_peer)) {
3342 ofport->peer = peer;
3343 ofport->peer->peer = ofport;
3344 }
3345 free(peer_peer);
3346
3347 break;
3348 }
3349 free(peer_name);
3350 }
3351
3352 static void
3353 port_run(struct ofport_dpif *ofport)
3354 {
3355 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
3356 bool carrier_changed = carrier_seq != ofport->carrier_seq;
3357 bool enable = netdev_get_carrier(ofport->up.netdev);
3358 bool cfm_enable = false;
3359 bool bfd_enable = false;
3360
3361 ofport->carrier_seq = carrier_seq;
3362
3363 if (ofport->cfm) {
3364 int cfm_opup = cfm_get_opup(ofport->cfm);
3365
3366 cfm_enable = !cfm_get_fault(ofport->cfm);
3367
3368 if (cfm_opup >= 0) {
3369 cfm_enable = cfm_enable && cfm_opup;
3370 }
3371 }
3372
3373 if (ofport->bfd) {
3374 bfd_enable = bfd_forwarding(ofport->bfd);
3375 }
3376
3377 if (ofport->bfd || ofport->cfm) {
3378 enable = enable && (cfm_enable || bfd_enable);
3379 }
3380
3381 if (ofport->bundle) {
3382 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3383 if (carrier_changed) {
3384 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
3385 }
3386 }
3387
3388 if (ofport->may_enable != enable) {
3389 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3390
3391 ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
3392
3393 if (ofport->rstp_port) {
3394 rstp_port_set_mac_operational(ofport->rstp_port, enable);
3395 }
3396 }
3397
3398 ofport->may_enable = enable;
3399 }
3400
3401 static int
3402 port_query_by_name(const struct ofproto *ofproto_, const char *devname,
3403 struct ofproto_port *ofproto_port)
3404 {
3405 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3406 struct dpif_port dpif_port;
3407 int error;
3408
3409 if (sset_contains(&ofproto->ghost_ports, devname)) {
3410 const char *type = netdev_get_type_from_name(devname);
3411
3412 /* We may be called before ofproto->up.port_by_name is populated with
3413 * the appropriate ofport. For this reason, we must get the name and
3414 * type from the netdev layer directly. */
3415 if (type) {
3416 const struct ofport *ofport;
3417
3418 ofport = shash_find_data(&ofproto->up.port_by_name, devname);
3419 ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
3420 ofproto_port->name = xstrdup(devname);
3421 ofproto_port->type = xstrdup(type);
3422 return 0;
3423 }
3424 return ENODEV;
3425 }
3426
3427 if (!sset_contains(&ofproto->ports, devname)) {
3428 return ENODEV;
3429 }
3430 error = dpif_port_query_by_name(ofproto->backer->dpif,
3431 devname, &dpif_port);
3432 if (!error) {
3433 ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
3434 }
3435 return error;
3436 }
3437
3438 static int
3439 port_add(struct ofproto *ofproto_, struct netdev *netdev)
3440 {
3441 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3442 const char *devname = netdev_get_name(netdev);
3443 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
3444 const char *dp_port_name;
3445
3446 if (netdev_vport_is_patch(netdev)) {
3447 sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
3448 return 0;
3449 }
3450
3451 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
3452 if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
3453 odp_port_t port_no = ODPP_NONE;
3454 int error;
3455
3456 error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
3457 if (error) {
3458 return error;
3459 }
3460 if (netdev_get_tunnel_config(netdev)) {
3461 simap_put(&ofproto->backer->tnl_backers,
3462 dp_port_name, odp_to_u32(port_no));
3463 }
3464 }
3465
3466 if (netdev_get_tunnel_config(netdev)) {
3467 sset_add(&ofproto->ghost_ports, devname);
3468 } else {
3469 sset_add(&ofproto->ports, devname);
3470 }
3471 return 0;
3472 }
3473
3474 static int
3475 port_del(struct ofproto *ofproto_, ofp_port_t ofp_port)
3476 {
3477 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3478 struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
3479 int error = 0;
3480
3481 if (!ofport) {
3482 return 0;
3483 }
3484
3485 sset_find_and_delete(&ofproto->ghost_ports,
3486 netdev_get_name(ofport->up.netdev));
3487 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3488 if (!ofport->is_tunnel && !netdev_vport_is_patch(ofport->up.netdev)) {
3489 error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3490 if (!error) {
3491 /* The caller is going to close ofport->up.netdev. If this is a
3492 * bonded port, then the bond is using that netdev, so remove it
3493 * from the bond. The client will need to reconfigure everything
3494 * after deleting ports, so then the slave will get re-added. */
3495 bundle_remove(&ofport->up);
3496 }
3497 }
3498 return error;
3499 }
3500
3501 static int
3502 port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3503 {
3504 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3505 int error;
3506
3507 error = netdev_get_stats(ofport->up.netdev, stats);
3508
3509 if (!error && ofport_->ofp_port == OFPP_LOCAL) {
3510 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3511
3512 ovs_mutex_lock(&ofproto->stats_mutex);
3513 /* ofproto->stats.tx_packets represents packets that we created
3514 * internally and sent to some port (e.g. packets sent with
3515 * ofproto_dpif_send_packet()). Account for them as if they had
3516 * come from OFPP_LOCAL and got forwarded. */
3517
3518 if (stats->rx_packets != UINT64_MAX) {
3519 stats->rx_packets += ofproto->stats.tx_packets;
3520 }
3521
3522 if (stats->rx_bytes != UINT64_MAX) {
3523 stats->rx_bytes += ofproto->stats.tx_bytes;
3524 }
3525
3526 /* ofproto->stats.rx_packets represents packets that were received on
3527 * some port and we processed internally and dropped (e.g. STP).
3528 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3529
3530 if (stats->tx_packets != UINT64_MAX) {
3531 stats->tx_packets += ofproto->stats.rx_packets;
3532 }
3533
3534 if (stats->tx_bytes != UINT64_MAX) {
3535 stats->tx_bytes += ofproto->stats.rx_bytes;
3536 }
3537 ovs_mutex_unlock(&ofproto->stats_mutex);
3538 }
3539
3540 return error;
3541 }
3542
3543 static int
3544 port_get_lacp_stats(const struct ofport *ofport_, struct lacp_slave_stats *stats)
3545 {
3546 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3547 if (ofport->bundle && ofport->bundle->lacp) {
3548 if (lacp_get_slave_stats(ofport->bundle->lacp, ofport, stats)) {
3549 return 0;
3550 }
3551 }
3552 return -1;
3553 }
3554
3555 struct port_dump_state {
3556 uint32_t bucket;
3557 uint32_t offset;
3558 bool ghost;
3559
3560 struct ofproto_port port;
3561 bool has_port;
3562 };
3563
3564 static int
3565 port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
3566 {
3567 *statep = xzalloc(sizeof(struct port_dump_state));
3568 return 0;
3569 }
3570
3571 static int
3572 port_dump_next(const struct ofproto *ofproto_, void *state_,
3573 struct ofproto_port *port)
3574 {
3575 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3576 struct port_dump_state *state = state_;
3577 const struct sset *sset;
3578 struct sset_node *node;
3579
3580 if (state->has_port) {
3581 ofproto_port_destroy(&state->port);
3582 state->has_port = false;
3583 }
3584 sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3585 while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
3586 int error;
3587
3588 error = port_query_by_name(ofproto_, node->name, &state->port);
3589 if (!error) {
3590 *port = state->port;
3591 state->has_port = true;
3592 return 0;
3593 } else if (error != ENODEV) {
3594 return error;
3595 }
3596 }
3597
3598 if (!state->ghost) {
3599 state->ghost = true;
3600 state->bucket = 0;
3601 state->offset = 0;
3602 return port_dump_next(ofproto_, state_, port);
3603 }
3604
3605 return EOF;
3606 }
3607
3608 static int
3609 port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3610 {
3611 struct port_dump_state *state = state_;
3612
3613 if (state->has_port) {
3614 ofproto_port_destroy(&state->port);
3615 }
3616 free(state);
3617 return 0;
3618 }
3619
3620 static int
3621 port_poll(const struct ofproto *ofproto_, char **devnamep)
3622 {
3623 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3624
3625 if (ofproto->port_poll_errno) {
3626 int error = ofproto->port_poll_errno;
3627 ofproto->port_poll_errno = 0;
3628 return error;
3629 }
3630
3631 if (sset_is_empty(&ofproto->port_poll_set)) {
3632 return EAGAIN;
3633 }
3634
3635 *devnamep = sset_pop(&ofproto->port_poll_set);
3636 return 0;
3637 }
3638
3639 static void
3640 port_poll_wait(const struct ofproto *ofproto_)
3641 {
3642 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3643 dpif_port_poll_wait(ofproto->backer->dpif);
3644 }
3645
3646 static int
3647 port_is_lacp_current(const struct ofport *ofport_)
3648 {
3649 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3650 return (ofport->bundle && ofport->bundle->lacp
3651 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3652 : -1);
3653 }
3654 \f
3655 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3656 * then delete it entirely. */
3657 static void
3658 rule_expire(struct rule_dpif *rule)
3659 OVS_REQUIRES(ofproto_mutex)
3660 {
3661 uint16_t hard_timeout, idle_timeout;
3662 long long int now = time_msec();
3663 int reason = -1;
3664
3665 hard_timeout = rule->up.hard_timeout;
3666 idle_timeout = rule->up.idle_timeout;
3667
3668 /* Has 'rule' expired? */
3669 if (hard_timeout) {
3670 long long int modified;
3671
3672 ovs_mutex_lock(&rule->up.mutex);
3673 modified = rule->up.modified;
3674 ovs_mutex_unlock(&rule->up.mutex);
3675
3676 if (now > modified + hard_timeout * 1000) {
3677 reason = OFPRR_HARD_TIMEOUT;
3678 }
3679 }
3680
3681 if (reason < 0 && idle_timeout) {
3682 long long int used;
3683
3684 ovs_mutex_lock(&rule->stats_mutex);
3685 used = rule->stats.used;
3686 ovs_mutex_unlock(&rule->stats_mutex);
3687
3688 if (now > used + idle_timeout * 1000) {
3689 reason = OFPRR_IDLE_TIMEOUT;
3690 }
3691 }
3692
3693 if (reason >= 0) {
3694 COVERAGE_INC(ofproto_dpif_expired);
3695 ofproto_rule_expire(&rule->up, reason);
3696 }
3697 }
3698
3699 int
3700 ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
3701 const struct flow *flow,
3702 struct rule_dpif *rule,
3703 const struct ofpact *ofpacts, size_t ofpacts_len,
3704 int recurse, int resubmits,
3705 struct dp_packet *packet)
3706 {
3707 struct dpif_flow_stats stats;
3708 struct xlate_out xout;
3709 struct xlate_in xin;
3710 ofp_port_t in_port;
3711 struct dpif_execute execute;
3712 int error;
3713
3714 ovs_assert((rule != NULL) != (ofpacts != NULL));
3715
3716 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
3717
3718 if (rule) {
3719 rule_dpif_credit_stats(rule, &stats);
3720 }
3721
3722 uint64_t odp_actions_stub[1024 / 8];
3723 struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
3724 xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
3725 stats.tcp_flags, packet, NULL, &odp_actions);
3726 xin.ofpacts = ofpacts;
3727 xin.ofpacts_len = ofpacts_len;
3728 xin.resubmit_stats = &stats;
3729 xin.recurse = recurse;
3730 xin.resubmits = resubmits;
3731 if (xlate_actions(&xin, &xout) != XLATE_OK) {
3732 error = EINVAL;
3733 goto out;
3734 }
3735
3736 execute.actions = odp_actions.data;
3737 execute.actions_len = odp_actions.size;
3738
3739 pkt_metadata_from_flow(&packet->md, flow);
3740 execute.packet = packet;
3741 execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
3742 execute.probe = false;
3743 execute.mtu = 0;
3744
3745 /* Fix up in_port. */
3746 in_port = flow->in_port.ofp_port;
3747 if (in_port == OFPP_NONE) {
3748 in_port = OFPP_LOCAL;
3749 }
3750 execute.packet->md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
3751
3752 error = dpif_execute(ofproto->backer->dpif, &execute);
3753 out:
3754 xlate_out_uninit(&xout);
3755 ofpbuf_uninit(&odp_actions);
3756
3757 return error;
3758 }
3759
3760 /* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
3761 * 'flow' must reflect the data in 'packet'. */
3762 int
3763 ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
3764 const struct flow *flow,
3765 struct rule_dpif *rule,
3766 const struct ofpact *ofpacts, size_t ofpacts_len,
3767 struct dp_packet *packet)
3768 {
3769 return ofproto_dpif_execute_actions__(ofproto, flow, rule, ofpacts,
3770 ofpacts_len, 0, 0, packet);
3771 }
3772
3773 void
3774 rule_dpif_credit_stats(struct rule_dpif *rule,
3775 const struct dpif_flow_stats *stats)
3776 {
3777 ovs_mutex_lock(&rule->stats_mutex);
3778 if (OVS_UNLIKELY(rule->new_rule)) {
3779 rule_dpif_credit_stats(rule->new_rule, stats);
3780 } else {
3781 rule->stats.n_packets += stats->n_packets;
3782 rule->stats.n_bytes += stats->n_bytes;
3783 rule->stats.used = MAX(rule->stats.used, stats->used);
3784 }
3785 ovs_mutex_unlock(&rule->stats_mutex);
3786 }
3787
3788 ovs_be64
3789 rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
3790 OVS_REQUIRES(rule->up.mutex)
3791 {
3792 return rule->up.flow_cookie;
3793 }
3794
3795 void
3796 rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
3797 uint16_t hard_timeout)
3798 {
3799 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
3800 }
3801
3802 /* Returns 'rule''s actions. The returned actions are RCU-protected, and can
3803 * be read until the calling thread quiesces. */
3804 const struct rule_actions *
3805 rule_dpif_get_actions(const struct rule_dpif *rule)
3806 {
3807 return rule_get_actions(&rule->up);
3808 }
3809
3810 /* Sets 'rule''s recirculation id. */
3811 static void
3812 rule_dpif_set_recirc_id(struct rule_dpif *rule, uint32_t id)
3813 OVS_REQUIRES(rule->up.mutex)
3814 {
3815 ovs_assert(!rule->recirc_id || rule->recirc_id == id);
3816 if (rule->recirc_id == id) {
3817 /* Release the new reference to the same id. */
3818 recirc_free_id(id);
3819 } else {
3820 rule->recirc_id = id;
3821 }
3822 }
3823
3824 /* Sets 'rule''s recirculation id. */
3825 void
3826 rule_set_recirc_id(struct rule *rule_, uint32_t id)
3827 {
3828 struct rule_dpif *rule = rule_dpif_cast(rule_);
3829
3830 ovs_mutex_lock(&rule->up.mutex);
3831 rule_dpif_set_recirc_id(rule, id);
3832 ovs_mutex_unlock(&rule->up.mutex);
3833 }
3834
3835 cls_version_t
3836 ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto OVS_UNUSED)
3837 {
3838 cls_version_t version;
3839
3840 atomic_read_relaxed(&ofproto->tables_version, &version);
3841
3842 return version;
3843 }
3844
3845 /* The returned rule (if any) is valid at least until the next RCU quiescent
3846 * period. If the rule needs to stay around longer, the caller should take
3847 * a reference.
3848 *
3849 * 'flow' is non-const to allow for temporary modifications during the lookup.
3850 * Any changes are restored before returning. */
3851 static struct rule_dpif *
3852 rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, cls_version_t version,
3853 uint8_t table_id, struct flow *flow,
3854 struct flow_wildcards *wc)
3855 {
3856 struct classifier *cls = &ofproto->up.tables[table_id].cls;
3857 return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls, version,
3858 flow, wc)));
3859 }
3860
3861 /* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
3862 * table '*table_id'. Returns the rule that was found, which may be one of the
3863 * special rules according to packet miss hadling. If 'may_packet_in' is
3864 * false, returning of the miss_rule (which issues packet ins for the
3865 * controller) is avoided. Updates 'wc', if nonnull, to reflect the fields
3866 * that were used during the lookup.
3867 *
3868 * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
3869 * if none is found then the table miss configuration for that table is
3870 * honored, which can result in additional lookups in other OpenFlow tables.
3871 * In this case the function updates '*table_id' to reflect the final OpenFlow
3872 * table that was searched.
3873 *
3874 * If 'honor_table_miss' is false, then only one table lookup occurs, in
3875 * '*table_id'.
3876 *
3877 * The rule is returned in '*rule', which is valid at least until the next
3878 * RCU quiescent period. If the '*rule' needs to stay around longer, the
3879 * caller must take a reference.
3880 *
3881 * 'in_port' allows the lookup to take place as if the in port had the value
3882 * 'in_port'. This is needed for resubmit action support.
3883 *
3884 * 'flow' is non-const to allow for temporary modifications during the lookup.
3885 * Any changes are restored before returning. */
3886 struct rule_dpif *
3887 rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto,
3888 cls_version_t version, struct flow *flow,
3889 struct flow_wildcards *wc,
3890 const struct dpif_flow_stats *stats,
3891 uint8_t *table_id, ofp_port_t in_port,
3892 bool may_packet_in, bool honor_table_miss)
3893 {
3894 ovs_be16 old_tp_src = flow->tp_src, old_tp_dst = flow->tp_dst;
3895 ofp_port_t old_in_port = flow->in_port.ofp_port;
3896 enum ofputil_table_miss miss_config;
3897 struct rule_dpif *rule;
3898 uint8_t next_id;
3899
3900 /* We always unwildcard nw_frag (for IP), so they
3901 * need not be unwildcarded here. */
3902 if (flow->nw_frag & FLOW_NW_FRAG_ANY
3903 && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
3904 if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
3905 /* We must pretend that transport ports are unavailable. */
3906 flow->tp_src = htons(0);
3907 flow->tp_dst = htons(0);
3908 } else {
3909 /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
3910 * Use the drop_frags_rule (which cannot disappear). */
3911 rule = ofproto->drop_frags_rule;
3912 if (stats) {
3913 struct oftable *tbl = &ofproto->up.tables[*table_id];
3914 unsigned long orig;
3915
3916 atomic_add_relaxed(&tbl->n_matched, stats->n_packets, &orig);
3917 }
3918 return rule;
3919 }
3920 }
3921
3922 /* Look up a flow with 'in_port' as the input port. Then restore the
3923 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
3924 * have surprising behavior). */
3925 flow->in_port.ofp_port = in_port;
3926
3927 /* Our current implementation depends on n_tables == N_TABLES, and
3928 * TBL_INTERNAL being the last table. */
3929 BUILD_ASSERT_DECL(N_TABLES == TBL_INTERNAL + 1);
3930
3931 miss_config = OFPUTIL_TABLE_MISS_CONTINUE;
3932
3933 for (next_id = *table_id;
3934 next_id < ofproto->up.n_tables;
3935 next_id++, next_id += (next_id == TBL_INTERNAL))
3936 {
3937 *table_id = next_id;
3938 rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc);
3939 if (stats) {
3940 struct oftable *tbl = &ofproto->up.tables[next_id];
3941 unsigned long orig;
3942
3943 atomic_add_relaxed(rule ? &tbl->n_matched : &tbl->n_missed,
3944 stats->n_packets, &orig);
3945 }
3946 if (rule) {
3947 goto out; /* Match. */
3948 }
3949 if (honor_table_miss) {
3950 miss_config = ofproto_table_get_miss_config(&ofproto->up,
3951 *table_id);
3952 if (miss_config == OFPUTIL_TABLE_MISS_CONTINUE) {
3953 continue;
3954 }
3955 }
3956 break;
3957 }
3958 /* Miss. */
3959 rule = ofproto->no_packet_in_rule;
3960 if (may_packet_in) {
3961 if (miss_config == OFPUTIL_TABLE_MISS_CONTINUE
3962 || miss_config == OFPUTIL_TABLE_MISS_CONTROLLER) {
3963 struct ofport_dpif *port;
3964
3965 port = ofp_port_to_ofport(ofproto, old_in_port);
3966 if (!port) {
3967 VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
3968 old_in_port);
3969 } else if (!(port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN)) {
3970 rule = ofproto->miss_rule;
3971 }
3972 } else if (miss_config == OFPUTIL_TABLE_MISS_DEFAULT &&
3973 connmgr_wants_packet_in_on_miss(ofproto->up.connmgr)) {
3974 rule = ofproto->miss_rule;
3975 }
3976 }
3977 out:
3978 /* Restore port numbers, as they may have been modified above. */
3979 flow->tp_src = old_tp_src;
3980 flow->tp_dst = old_tp_dst;
3981 /* Restore the old in port. */
3982 flow->in_port.ofp_port = old_in_port;
3983
3984 return rule;
3985 }
3986
3987 static void
3988 complete_operation(struct rule_dpif *rule)
3989 OVS_REQUIRES(ofproto_mutex)
3990 {
3991 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3992
3993 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
3994 }
3995
3996 static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
3997 {
3998 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
3999 }
4000
4001 static struct rule *
4002 rule_alloc(void)
4003 {
4004 struct rule_dpif *rule = xzalloc(sizeof *rule);
4005 return &rule->up;
4006 }
4007
4008 static void
4009 rule_dealloc(struct rule *rule_)
4010 {
4011 struct rule_dpif *rule = rule_dpif_cast(rule_);
4012 free(rule);
4013 }
4014
4015 static enum ofperr
4016 rule_check(struct rule *rule)
4017 {
4018 uint16_t ct_state, ct_zone;
4019 const ovs_u128 *labelp;
4020 ovs_u128 ct_label = { { 0, 0 } };
4021 uint32_t ct_mark;
4022
4023 ct_state = MINIFLOW_GET_U16(rule->cr.match.flow, ct_state);
4024 ct_zone = MINIFLOW_GET_U16(rule->cr.match.flow, ct_zone);
4025 ct_mark = MINIFLOW_GET_U32(rule->cr.match.flow, ct_mark);
4026 labelp = MINIFLOW_GET_U128_PTR(rule->cr.match.flow, ct_label);
4027 if (labelp) {
4028 ct_label = *labelp;
4029 }
4030
4031 if (ct_state || ct_zone || ct_mark
4032 || !ovs_u128_is_zero(&ct_label)) {
4033 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->ofproto);
4034 const struct odp_support *support = &ofproto_dpif_get_support(ofproto)->odp;
4035
4036 if ((ct_state && !support->ct_state)
4037 || (ct_zone && !support->ct_zone)
4038 || (ct_mark && !support->ct_mark)
4039 || (!ovs_u128_is_zero(&ct_label) && !support->ct_label)) {
4040 return OFPERR_OFPBMC_BAD_FIELD;
4041 }
4042 if (ct_state & CS_UNSUPPORTED_MASK) {
4043 return OFPERR_OFPBMC_BAD_MASK;
4044 }
4045 }
4046 return 0;
4047 }
4048
4049 static enum ofperr
4050 rule_construct(struct rule *rule_)
4051 OVS_NO_THREAD_SAFETY_ANALYSIS
4052 {
4053 struct rule_dpif *rule = rule_dpif_cast(rule_);
4054 int error;
4055
4056 error = rule_check(rule_);
4057 if (error) {
4058 return error;
4059 }
4060
4061 ovs_mutex_init_adaptive(&rule->stats_mutex);
4062 rule->stats.n_packets = 0;
4063 rule->stats.n_bytes = 0;
4064 rule->stats.used = rule->up.modified;
4065 rule->recirc_id = 0;
4066 rule->new_rule = NULL;
4067
4068 return 0;
4069 }
4070
4071 static void
4072 rule_insert(struct rule *rule_, struct rule *old_rule_, bool forward_stats)
4073 OVS_REQUIRES(ofproto_mutex)
4074 {
4075 struct rule_dpif *rule = rule_dpif_cast(rule_);
4076
4077 if (old_rule_ && forward_stats) {
4078 struct rule_dpif *old_rule = rule_dpif_cast(old_rule_);
4079
4080 ovs_assert(!old_rule->new_rule);
4081
4082 /* Take a reference to the new rule, and refer all stats updates from
4083 * the old rule to the new rule. */
4084 rule_dpif_ref(rule);
4085
4086 ovs_mutex_lock(&old_rule->stats_mutex);
4087 ovs_mutex_lock(&rule->stats_mutex);
4088 old_rule->new_rule = rule; /* Forward future stats. */
4089 rule->stats = old_rule->stats; /* Transfer stats to the new rule. */
4090 ovs_mutex_unlock(&rule->stats_mutex);
4091 ovs_mutex_unlock(&old_rule->stats_mutex);
4092 }
4093
4094 complete_operation(rule);
4095 }
4096
4097 static void
4098 rule_delete(struct rule *rule_)
4099 OVS_REQUIRES(ofproto_mutex)
4100 {
4101 struct rule_dpif *rule = rule_dpif_cast(rule_);
4102 complete_operation(rule);
4103 }
4104
4105 static void
4106 rule_destruct(struct rule *rule_)
4107 OVS_NO_THREAD_SAFETY_ANALYSIS
4108 {
4109 struct rule_dpif *rule = rule_dpif_cast(rule_);
4110
4111 ovs_mutex_destroy(&rule->stats_mutex);
4112 /* Release reference to the new rule, if any. */
4113 if (rule->new_rule) {
4114 rule_dpif_unref(rule->new_rule);
4115 }
4116 if (rule->recirc_id) {
4117 recirc_free_id(rule->recirc_id);
4118 }
4119 }
4120
4121 static void
4122 rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
4123 long long int *used)
4124 {
4125 struct rule_dpif *rule = rule_dpif_cast(rule_);
4126
4127 ovs_mutex_lock(&rule->stats_mutex);
4128 if (OVS_UNLIKELY(rule->new_rule)) {
4129 rule_get_stats(&rule->new_rule->up, packets, bytes, used);
4130 } else {
4131 *packets = rule->stats.n_packets;
4132 *bytes = rule->stats.n_bytes;
4133 *used = rule->stats.used;
4134 }
4135 ovs_mutex_unlock(&rule->stats_mutex);
4136 }
4137
4138 static void
4139 rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
4140 struct dp_packet *packet)
4141 {
4142 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4143
4144 ofproto_dpif_execute_actions(ofproto, flow, rule, NULL, 0, packet);
4145 }
4146
4147 static enum ofperr
4148 rule_execute(struct rule *rule, const struct flow *flow,
4149 struct dp_packet *packet)
4150 {
4151 rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
4152 dp_packet_delete(packet);
4153 return 0;
4154 }
4155
4156 static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
4157 {
4158 return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
4159 }
4160
4161 static struct ofgroup *
4162 group_alloc(void)
4163 {
4164 struct group_dpif *group = xzalloc(sizeof *group);
4165 return &group->up;
4166 }
4167
4168 static void
4169 group_dealloc(struct ofgroup *group_)
4170 {
4171 struct group_dpif *group = group_dpif_cast(group_);
4172 free(group);
4173 }
4174
4175 static void
4176 group_construct_stats(struct group_dpif *group)
4177 OVS_REQUIRES(group->stats_mutex)
4178 {
4179 struct ofputil_bucket *bucket;
4180 const struct ovs_list *buckets;
4181
4182 group->packet_count = 0;
4183 group->byte_count = 0;
4184
4185 group_dpif_get_buckets(group, &buckets);
4186 LIST_FOR_EACH (bucket, list_node, buckets) {
4187 bucket->stats.packet_count = 0;
4188 bucket->stats.byte_count = 0;
4189 }
4190 }
4191
4192 void
4193 group_dpif_credit_stats(struct group_dpif *group,
4194 struct ofputil_bucket *bucket,
4195 const struct dpif_flow_stats *stats)
4196 {
4197 ovs_mutex_lock(&group->stats_mutex);
4198 group->packet_count += stats->n_packets;
4199 group->byte_count += stats->n_bytes;
4200 if (bucket) {
4201 bucket->stats.packet_count += stats->n_packets;
4202 bucket->stats.byte_count += stats->n_bytes;
4203 } else { /* Credit to all buckets */
4204 const struct ovs_list *buckets;
4205
4206 group_dpif_get_buckets(group, &buckets);
4207 LIST_FOR_EACH (bucket, list_node, buckets) {
4208 bucket->stats.packet_count += stats->n_packets;
4209 bucket->stats.byte_count += stats->n_bytes;
4210 }
4211 }
4212 ovs_mutex_unlock(&group->stats_mutex);
4213 }
4214
4215 static enum ofperr
4216 group_construct(struct ofgroup *group_)
4217 {
4218 struct group_dpif *group = group_dpif_cast(group_);
4219
4220 ovs_mutex_init_adaptive(&group->stats_mutex);
4221 ovs_mutex_lock(&group->stats_mutex);
4222 group_construct_stats(group);
4223 ovs_mutex_unlock(&group->stats_mutex);
4224 return 0;
4225 }
4226
4227 static void
4228 group_destruct(struct ofgroup *group_)
4229 {
4230 struct group_dpif *group = group_dpif_cast(group_);
4231 ovs_mutex_destroy(&group->stats_mutex);
4232 }
4233
4234 static enum ofperr
4235 group_modify(struct ofgroup *group_)
4236 {
4237 struct ofproto_dpif *ofproto = ofproto_dpif_cast(group_->ofproto);
4238
4239 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
4240
4241 return 0;
4242 }
4243
4244 static enum ofperr
4245 group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
4246 {
4247 struct group_dpif *group = group_dpif_cast(group_);
4248 struct ofputil_bucket *bucket;
4249 const struct ovs_list *buckets;
4250 struct bucket_counter *bucket_stats;
4251
4252 ovs_mutex_lock(&group->stats_mutex);
4253 ogs->packet_count = group->packet_count;
4254 ogs->byte_count = group->byte_count;
4255
4256 group_dpif_get_buckets(group, &buckets);
4257 bucket_stats = ogs->bucket_stats;
4258 LIST_FOR_EACH (bucket, list_node, buckets) {
4259 bucket_stats->packet_count = bucket->stats.packet_count;
4260 bucket_stats->byte_count = bucket->stats.byte_count;
4261 bucket_stats++;
4262 }
4263 ovs_mutex_unlock(&group->stats_mutex);
4264
4265 return 0;
4266 }
4267
4268 /* If the group exists, this function increments the groups's reference count.
4269 *
4270 * Make sure to call group_dpif_unref() after no longer needing to maintain
4271 * a reference to the group. */
4272 bool
4273 group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
4274 struct group_dpif **group)
4275 {
4276 struct ofgroup *ofgroup;
4277 bool found;
4278
4279 found = ofproto_group_lookup(&ofproto->up, group_id, &ofgroup);
4280 *group = found ? group_dpif_cast(ofgroup) : NULL;
4281
4282 return found;
4283 }
4284
4285 void
4286 group_dpif_get_buckets(const struct group_dpif *group,
4287 const struct ovs_list **buckets)
4288 {
4289 *buckets = &group->up.buckets;
4290 }
4291
4292 enum ofp11_group_type
4293 group_dpif_get_type(const struct group_dpif *group)
4294 {
4295 return group->up.type;
4296 }
4297
4298 const char *
4299 group_dpif_get_selection_method(const struct group_dpif *group)
4300 {
4301 return group->up.props.selection_method;
4302 }
4303 \f
4304 /* Sends 'packet' out 'ofport'.
4305 * May modify 'packet'.
4306 * Returns 0 if successful, otherwise a positive errno value. */
4307 int
4308 ofproto_dpif_send_packet(const struct ofport_dpif *ofport, struct dp_packet *packet)
4309 {
4310 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
4311 int error;
4312
4313 error = xlate_send_packet(ofport, packet);
4314
4315 ovs_mutex_lock(&ofproto->stats_mutex);
4316 ofproto->stats.tx_packets++;
4317 ofproto->stats.tx_bytes += dp_packet_size(packet);
4318 ovs_mutex_unlock(&ofproto->stats_mutex);
4319 return error;
4320 }
4321
4322 uint64_t
4323 group_dpif_get_selection_method_param(const struct group_dpif *group)
4324 {
4325 return group->up.props.selection_method_param;
4326 }
4327
4328 const struct field_array *
4329 group_dpif_get_fields(const struct group_dpif *group)
4330 {
4331 return &group->up.props.fields;
4332 }
4333 \f
4334 /* Return the version string of the datapath that backs up
4335 * this 'ofproto'.
4336 */
4337 static const char *
4338 get_datapath_version(const struct ofproto *ofproto_)
4339 {
4340 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4341
4342 return ofproto->backer->dp_version_string;
4343 }
4344
4345 static bool
4346 set_frag_handling(struct ofproto *ofproto_,
4347 enum ofp_config_flags frag_handling)
4348 {
4349 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4350 if (frag_handling != OFPC_FRAG_REASM) {
4351 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4352 return true;
4353 } else {
4354 return false;
4355 }
4356 }
4357
4358 static enum ofperr
4359 packet_out(struct ofproto *ofproto_, struct dp_packet *packet,
4360 const struct flow *flow,
4361 const struct ofpact *ofpacts, size_t ofpacts_len)
4362 {
4363 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4364
4365 ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
4366 ofpacts_len, packet);
4367 return 0;
4368 }
4369 \f
4370 /* NetFlow. */
4371
4372 static int
4373 set_netflow(struct ofproto *ofproto_,
4374 const struct netflow_options *netflow_options)
4375 {
4376 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4377
4378 if (netflow_options) {
4379 if (!ofproto->netflow) {
4380 ofproto->netflow = netflow_create();
4381 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4382 }
4383 return netflow_set_options(ofproto->netflow, netflow_options);
4384 } else if (ofproto->netflow) {
4385 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4386 netflow_unref(ofproto->netflow);
4387 ofproto->netflow = NULL;
4388 }
4389
4390 return 0;
4391 }
4392
4393 static void
4394 get_netflow_ids(const struct ofproto *ofproto_,
4395 uint8_t *engine_type, uint8_t *engine_id)
4396 {
4397 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4398
4399 dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
4400 }
4401 \f
4402 static struct ofproto_dpif *
4403 ofproto_dpif_lookup(const char *name)
4404 {
4405 struct ofproto_dpif *ofproto;
4406
4407 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
4408 hash_string(name, 0), &all_ofproto_dpifs) {
4409 if (!strcmp(ofproto->up.name, name)) {
4410 return ofproto;
4411 }
4412 }
4413 return NULL;
4414 }
4415
4416 static void
4417 ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
4418 const char *argv[], void *aux OVS_UNUSED)
4419 {
4420 struct ofproto_dpif *ofproto;
4421
4422 if (argc > 1) {
4423 ofproto = ofproto_dpif_lookup(argv[1]);
4424 if (!ofproto) {
4425 unixctl_command_reply_error(conn, "no such bridge");
4426 return;
4427 }
4428 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
4429 mac_learning_flush(ofproto->ml);
4430 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4431 } else {
4432 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
4433 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
4434 mac_learning_flush(ofproto->ml);
4435 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4436 }
4437 }
4438
4439 unixctl_command_reply(conn, "table successfully flushed");
4440 }
4441
4442 static void
4443 ofproto_unixctl_mcast_snooping_flush(struct unixctl_conn *conn, int argc,
4444 const char *argv[], void *aux OVS_UNUSED)
4445 {
4446 struct ofproto_dpif *ofproto;
4447
4448 if (argc > 1) {
4449 ofproto = ofproto_dpif_lookup(argv[1]);
4450 if (!ofproto) {
4451 unixctl_command_reply_error(conn, "no such bridge");
4452 return;
4453 }
4454
4455 if (!mcast_snooping_enabled(ofproto->ms)) {
4456 unixctl_command_reply_error(conn, "multicast snooping is disabled");
4457 return;
4458 }
4459 mcast_snooping_mdb_flush(ofproto->ms);
4460 } else {
4461 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
4462 if (!mcast_snooping_enabled(ofproto->ms)) {
4463 continue;
4464 }
4465 mcast_snooping_mdb_flush(ofproto->ms);
4466 }
4467 }
4468
4469 unixctl_command_reply(conn, "table successfully flushed");
4470 }
4471
4472 static struct ofport_dpif *
4473 ofbundle_get_a_port(const struct ofbundle *bundle)
4474 {
4475 return CONTAINER_OF(list_front(&bundle->ports), struct ofport_dpif,
4476 bundle_node);
4477 }
4478
4479 static void
4480 ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
4481 const char *argv[], void *aux OVS_UNUSED)
4482 {
4483 struct ds ds = DS_EMPTY_INITIALIZER;
4484 const struct ofproto_dpif *ofproto;
4485 const struct mac_entry *e;
4486
4487 ofproto = ofproto_dpif_lookup(argv[1]);
4488 if (!ofproto) {
4489 unixctl_command_reply_error(conn, "no such bridge");
4490 return;
4491 }
4492
4493 ds_put_cstr(&ds, " port VLAN MAC Age\n");
4494 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
4495 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
4496 struct ofbundle *bundle = mac_entry_get_port(ofproto->ml, e);
4497 char name[OFP_MAX_PORT_NAME_LEN];
4498
4499 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4500 name, sizeof name);
4501 ds_put_format(&ds, "%5s %4d "ETH_ADDR_FMT" %3d\n",
4502 name, e->vlan, ETH_ADDR_ARGS(e->mac),
4503 mac_entry_age(ofproto->ml, e));
4504 }
4505 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4506 unixctl_command_reply(conn, ds_cstr(&ds));
4507 ds_destroy(&ds);
4508 }
4509
4510 static void
4511 ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
4512 int argc OVS_UNUSED,
4513 const char *argv[],
4514 void *aux OVS_UNUSED)
4515 {
4516 struct ds ds = DS_EMPTY_INITIALIZER;
4517 const struct ofproto_dpif *ofproto;
4518 const struct ofbundle *bundle;
4519 const struct mcast_group *grp;
4520 struct mcast_group_bundle *b;
4521 struct mcast_mrouter_bundle *mrouter;
4522
4523 ofproto = ofproto_dpif_lookup(argv[1]);
4524 if (!ofproto) {
4525 unixctl_command_reply_error(conn, "no such bridge");
4526 return;
4527 }
4528
4529 if (!mcast_snooping_enabled(ofproto->ms)) {
4530 unixctl_command_reply_error(conn, "multicast snooping is disabled");
4531 return;
4532 }
4533
4534 ds_put_cstr(&ds, " port VLAN GROUP Age\n");
4535 ovs_rwlock_rdlock(&ofproto->ms->rwlock);
4536 LIST_FOR_EACH (grp, group_node, &ofproto->ms->group_lru) {
4537 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
4538 char name[OFP_MAX_PORT_NAME_LEN];
4539
4540 bundle = b->port;
4541 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4542 name, sizeof name);
4543 ds_put_format(&ds, "%5s %4d ", name, grp->vlan);
4544 ipv6_format_mapped(&grp->addr, &ds);
4545 ds_put_format(&ds, " %3d\n",
4546 mcast_bundle_age(ofproto->ms, b));
4547 }
4548 }
4549
4550 /* ports connected to multicast routers */
4551 LIST_FOR_EACH(mrouter, mrouter_node, &ofproto->ms->mrouter_lru) {
4552 char name[OFP_MAX_PORT_NAME_LEN];
4553
4554 bundle = mrouter->port;
4555 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4556 name, sizeof name);
4557 ds_put_format(&ds, "%5s %4d querier %3d\n",
4558 name, mrouter->vlan,
4559 mcast_mrouter_age(ofproto->ms, mrouter));
4560 }
4561 ovs_rwlock_unlock(&ofproto->ms->rwlock);
4562 unixctl_command_reply(conn, ds_cstr(&ds));
4563 ds_destroy(&ds);
4564 }
4565
4566 struct trace_ctx {
4567 struct xlate_out xout;
4568 struct xlate_in xin;
4569 const struct flow *key;
4570 struct flow flow;
4571 struct ds *result;
4572 struct flow_wildcards wc;
4573 struct ofpbuf odp_actions;
4574 };
4575
4576 static void
4577 trace_format_rule(struct ds *result, int level, const struct rule_dpif *rule)
4578 {
4579 const struct rule_actions *actions;
4580 ovs_be64 cookie;
4581
4582 ds_put_char_multiple(result, '\t', level);
4583 if (!rule) {
4584 ds_put_cstr(result, "No match\n");
4585 return;
4586 }
4587
4588 ovs_mutex_lock(&rule->up.mutex);
4589 cookie = rule->up.flow_cookie;
4590 ovs_mutex_unlock(&rule->up.mutex);
4591
4592 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
4593 rule ? rule->up.table_id : 0, ntohll(cookie));
4594 cls_rule_format(&rule->up.cr, result);
4595 ds_put_char(result, '\n');
4596
4597 actions = rule_dpif_get_actions(rule);
4598
4599 ds_put_char_multiple(result, '\t', level);
4600 ds_put_cstr(result, "OpenFlow actions=");
4601 ofpacts_format(actions->ofpacts, actions->ofpacts_len, result);
4602 ds_put_char(result, '\n');
4603 }
4604
4605 static void
4606 trace_format_flow(struct ds *result, int level, const char *title,
4607 struct trace_ctx *trace)
4608 {
4609 ds_put_char_multiple(result, '\t', level);
4610 ds_put_format(result, "%s: ", title);
4611 /* Do not report unchanged flows for resubmits. */
4612 if ((level > 0 && flow_equal(&trace->xin.flow, &trace->flow))
4613 || (level == 0 && flow_equal(&trace->xin.flow, trace->key))) {
4614 ds_put_cstr(result, "unchanged");
4615 } else {
4616 flow_format(result, &trace->xin.flow);
4617 trace->flow = trace->xin.flow;
4618 }
4619 ds_put_char(result, '\n');
4620 }
4621
4622 static void
4623 trace_format_regs(struct ds *result, int level, const char *title,
4624 struct trace_ctx *trace)
4625 {
4626 size_t i;
4627
4628 ds_put_char_multiple(result, '\t', level);
4629 ds_put_format(result, "%s:", title);
4630 for (i = 0; i < FLOW_N_REGS; i++) {
4631 ds_put_format(result, " reg%"PRIuSIZE"=0x%"PRIx32, i, trace->flow.regs[i]);
4632 }
4633 ds_put_char(result, '\n');
4634 }
4635
4636 static void
4637 trace_format_odp(struct ds *result, int level, const char *title,
4638 struct trace_ctx *trace)
4639 {
4640 struct ofpbuf *odp_actions = &trace->odp_actions;
4641
4642 ds_put_char_multiple(result, '\t', level);
4643 ds_put_format(result, "%s: ", title);
4644 format_odp_actions(result, odp_actions->data, odp_actions->size);
4645 ds_put_char(result, '\n');
4646 }
4647
4648 static void
4649 trace_format_megaflow(struct ds *result, int level, const char *title,
4650 struct trace_ctx *trace)
4651 {
4652 struct match match;
4653
4654 ds_put_char_multiple(result, '\t', level);
4655 ds_put_format(result, "%s: ", title);
4656 match_init(&match, trace->key, &trace->wc);
4657 match_format(&match, result, OFP_DEFAULT_PRIORITY);
4658 ds_put_char(result, '\n');
4659 }
4660
4661 static void trace_report(struct xlate_in *, int recurse,
4662 const char *format, ...)
4663 OVS_PRINTF_FORMAT(3, 4);
4664 static void trace_report_valist(struct xlate_in *, int recurse,
4665 const char *format, va_list args)
4666 OVS_PRINTF_FORMAT(3, 0);
4667
4668 static void
4669 trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
4670 {
4671 struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
4672 struct ds *result = trace->result;
4673
4674 if (!recurse) {
4675 if (rule == xin->ofproto->miss_rule) {
4676 trace_report(xin, recurse,
4677 "No match, flow generates \"packet in\"s.");
4678 } else if (rule == xin->ofproto->no_packet_in_rule) {
4679 trace_report(xin, recurse, "No match, packets dropped because "
4680 "OFPPC_NO_PACKET_IN is set on in_port.");
4681 } else if (rule == xin->ofproto->drop_frags_rule) {
4682 trace_report(xin, recurse, "Packets dropped because they are IP "
4683 "fragments and the fragment handling mode is "
4684 "\"drop\".");
4685 }
4686 }
4687
4688 ds_put_char(result, '\n');
4689 if (recurse) {
4690 trace_format_flow(result, recurse, "Resubmitted flow", trace);
4691 trace_format_regs(result, recurse, "Resubmitted regs", trace);
4692 trace_format_odp(result, recurse, "Resubmitted odp", trace);
4693 trace_format_megaflow(result, recurse, "Resubmitted megaflow", trace);
4694 }
4695 trace_format_rule(result, recurse, rule);
4696 }
4697
4698 static void
4699 trace_report_valist(struct xlate_in *xin, int recurse,
4700 const char *format, va_list args)
4701 {
4702 struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
4703 struct ds *result = trace->result;
4704
4705 ds_put_char_multiple(result, '\t', recurse);
4706 ds_put_format_valist(result, format, args);
4707 ds_put_char(result, '\n');
4708 }
4709
4710 static void
4711 trace_report(struct xlate_in *xin, int recurse, const char *format, ...)
4712 {
4713 va_list args;
4714
4715 va_start(args, format);
4716 trace_report_valist(xin, recurse, format, args);
4717 va_end(args);
4718 }
4719
4720 /* Parses the 'argc' elements of 'argv', ignoring argv[0]. The following
4721 * forms are supported:
4722 *
4723 * - [dpname] odp_flow [-generate | packet]
4724 * - bridge br_flow [-generate | packet]
4725 *
4726 * On success, initializes '*ofprotop' and 'flow' and returns NULL. On failure
4727 * returns a nonnull malloced error message. */
4728 static char * OVS_WARN_UNUSED_RESULT
4729 parse_flow_and_packet(int argc, const char *argv[],
4730 struct ofproto_dpif **ofprotop, struct flow *flow,
4731 struct dp_packet **packetp)
4732 {
4733 const struct dpif_backer *backer = NULL;
4734 const char *error = NULL;
4735 char *m_err = NULL;
4736 struct simap port_names = SIMAP_INITIALIZER(&port_names);
4737 struct dp_packet *packet;
4738 struct ofpbuf odp_key;
4739 struct ofpbuf odp_mask;
4740
4741 ofpbuf_init(&odp_key, 0);
4742 ofpbuf_init(&odp_mask, 0);
4743
4744 /* Handle "-generate" or a hex string as the last argument. */
4745 if (!strcmp(argv[argc - 1], "-generate")) {
4746 packet = dp_packet_new(0);
4747 argc--;
4748 } else {
4749 error = eth_from_hex(argv[argc - 1], &packet);
4750 if (!error) {
4751 argc--;
4752 } else if (argc == 4) {
4753 /* The 3-argument form must end in "-generate' or a hex string. */
4754 goto exit;
4755 }
4756 error = NULL;
4757 }
4758
4759 /* odp_flow can have its in_port specified as a name instead of port no.
4760 * We do not yet know whether a given flow is a odp_flow or a br_flow.
4761 * But, to know whether a flow is odp_flow through odp_flow_from_string(),
4762 * we need to create a simap of name to port no. */
4763 if (argc == 3) {
4764 const char *dp_type;
4765 if (!strncmp(argv[1], "ovs-", 4)) {
4766 dp_type = argv[1] + 4;
4767 } else {
4768 dp_type = argv[1];
4769 }
4770 backer = shash_find_data(&all_dpif_backers, dp_type);
4771 } else if (argc == 2) {
4772 struct shash_node *node;
4773 if (shash_count(&all_dpif_backers) == 1) {
4774 node = shash_first(&all_dpif_backers);
4775 backer = node->data;
4776 }
4777 } else {
4778 error = "Syntax error";
4779 goto exit;
4780 }
4781 if (backer && backer->dpif) {
4782 struct dpif_port dpif_port;
4783 struct dpif_port_dump port_dump;
4784 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, backer->dpif) {
4785 simap_put(&port_names, dpif_port.name,
4786 odp_to_u32(dpif_port.port_no));
4787 }
4788 }
4789
4790 /* Parse the flow and determine whether a datapath or
4791 * bridge is specified. If function odp_flow_key_from_string()
4792 * returns 0, the flow is a odp_flow. If function
4793 * parse_ofp_exact_flow() returns NULL, the flow is a br_flow. */
4794 if (!odp_flow_from_string(argv[argc - 1], &port_names,
4795 &odp_key, &odp_mask)) {
4796 if (!backer) {
4797 error = "Cannot find the datapath";
4798 goto exit;
4799 }
4800
4801 if (odp_flow_key_to_flow(odp_key.data, odp_key.size, flow) == ODP_FIT_ERROR) {
4802 error = "Failed to parse datapath flow key";
4803 goto exit;
4804 }
4805
4806 *ofprotop = xlate_lookup_ofproto(backer, flow,
4807 &flow->in_port.ofp_port);
4808 if (*ofprotop == NULL) {
4809 error = "Invalid datapath flow";
4810 goto exit;
4811 }
4812
4813 vsp_adjust_flow(*ofprotop, flow, NULL);
4814
4815 } else {
4816 char *err = parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL);
4817
4818 if (err) {
4819 m_err = xasprintf("Bad openflow flow syntax: %s", err);
4820 free(err);
4821 goto exit;
4822 } else {
4823 if (argc != 3) {
4824 error = "Must specify bridge name";
4825 goto exit;
4826 }
4827
4828 *ofprotop = ofproto_dpif_lookup(argv[1]);
4829 if (!*ofprotop) {
4830 error = "Unknown bridge name";
4831 goto exit;
4832 }
4833 }
4834 }
4835
4836 /* Generate a packet, if requested. */
4837 if (packet) {
4838 if (!dp_packet_size(packet)) {
4839 flow_compose(packet, flow);
4840 } else {
4841 /* Use the metadata from the flow and the packet argument
4842 * to reconstruct the flow. */
4843 pkt_metadata_from_flow(&packet->md, flow);
4844 flow_extract(packet, flow);
4845 }
4846 }
4847
4848 exit:
4849 if (error && !m_err) {
4850 m_err = xstrdup(error);
4851 }
4852 if (m_err) {
4853 dp_packet_delete(packet);
4854 packet = NULL;
4855 }
4856 *packetp = packet;
4857 ofpbuf_uninit(&odp_key);
4858 ofpbuf_uninit(&odp_mask);
4859 simap_destroy(&port_names);
4860 return m_err;
4861 }
4862
4863 static void
4864 ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
4865 void *aux OVS_UNUSED)
4866 {
4867 struct ofproto_dpif *ofproto;
4868 struct dp_packet *packet;
4869 char *error;
4870 struct flow flow;
4871
4872 error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
4873 if (!error) {
4874 struct ds result;
4875
4876 ds_init(&result);
4877 ofproto_trace(ofproto, &flow, packet, NULL, 0, &result);
4878 unixctl_command_reply(conn, ds_cstr(&result));
4879 ds_destroy(&result);
4880 dp_packet_delete(packet);
4881 } else {
4882 unixctl_command_reply_error(conn, error);
4883 free(error);
4884 }
4885 }
4886
4887 static void
4888 ofproto_unixctl_trace_actions(struct unixctl_conn *conn, int argc,
4889 const char *argv[], void *aux OVS_UNUSED)
4890 {
4891 enum ofputil_protocol usable_protocols;
4892 struct ofproto_dpif *ofproto;
4893 bool enforce_consistency;
4894 struct ofpbuf ofpacts;
4895 struct dp_packet *packet;
4896 struct ds result;
4897 struct flow flow;
4898 uint16_t in_port;
4899
4900 /* Three kinds of error return values! */
4901 enum ofperr retval;
4902 char *error;
4903
4904 packet = NULL;
4905 ds_init(&result);
4906 ofpbuf_init(&ofpacts, 0);
4907
4908 /* Parse actions. */
4909 error = ofpacts_parse_actions(argv[--argc], &ofpacts, &usable_protocols);
4910 if (error) {
4911 unixctl_command_reply_error(conn, error);
4912 free(error);
4913 goto exit;
4914 }
4915
4916 /* OpenFlow 1.1 and later suggest that the switch enforces certain forms of
4917 * consistency between the flow and the actions. With -consistent, we
4918 * enforce consistency even for a flow supported in OpenFlow 1.0. */
4919 if (!strcmp(argv[1], "-consistent")) {
4920 enforce_consistency = true;
4921 argv++;
4922 argc--;
4923 } else {
4924 enforce_consistency = false;
4925 }
4926
4927 error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
4928 if (error) {
4929 unixctl_command_reply_error(conn, error);
4930 free(error);
4931 goto exit;
4932 }
4933
4934 /* Do the same checks as handle_packet_out() in ofproto.c.
4935 *
4936 * We pass a 'table_id' of 0 to ofpacts_check(), which isn't
4937 * strictly correct because these actions aren't in any table, but it's OK
4938 * because it 'table_id' is used only to check goto_table instructions, but
4939 * packet-outs take a list of actions and therefore it can't include
4940 * instructions.
4941 *
4942 * We skip the "meter" check here because meter is an instruction, not an
4943 * action, and thus cannot appear in ofpacts. */
4944 in_port = ofp_to_u16(flow.in_port.ofp_port);
4945 if (in_port >= ofproto->up.max_ports && in_port < ofp_to_u16(OFPP_MAX)) {
4946 unixctl_command_reply_error(conn, "invalid in_port");
4947 goto exit;
4948 }
4949 if (enforce_consistency) {
4950 retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
4951 u16_to_ofp(ofproto->up.max_ports),
4952 0, ofproto->up.n_tables,
4953 usable_protocols);
4954 } else {
4955 retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
4956 u16_to_ofp(ofproto->up.max_ports), 0,
4957 ofproto->up.n_tables, &usable_protocols);
4958 }
4959 if (!retval) {
4960 retval = ofproto_check_ofpacts(&ofproto->up, ofpacts.data,
4961 ofpacts.size);
4962 }
4963
4964 if (retval) {
4965 ds_clear(&result);
4966 ds_put_format(&result, "Bad actions: %s", ofperr_to_string(retval));
4967 unixctl_command_reply_error(conn, ds_cstr(&result));
4968 goto exit;
4969 }
4970
4971 ofproto_trace(ofproto, &flow, packet,
4972 ofpacts.data, ofpacts.size, &result);
4973 unixctl_command_reply(conn, ds_cstr(&result));
4974
4975 exit:
4976 ds_destroy(&result);
4977 dp_packet_delete(packet);
4978 ofpbuf_uninit(&ofpacts);
4979 }
4980
4981 /* Implements a "trace" through 'ofproto''s flow table, appending a textual
4982 * description of the results to 'ds'.
4983 *
4984 * The trace follows a packet with the specified 'flow' through the flow
4985 * table. 'packet' may be nonnull to trace an actual packet, with consequent
4986 * side effects (if it is nonnull then its flow must be 'flow').
4987 *
4988 * If 'ofpacts' is nonnull then its 'ofpacts_len' bytes specify the actions to
4989 * trace, otherwise the actions are determined by a flow table lookup. */
4990 static void
4991 ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow,
4992 const struct dp_packet *packet,
4993 const struct ofpact ofpacts[], size_t ofpacts_len,
4994 struct ds *ds)
4995 {
4996 struct trace_ctx trace;
4997 enum xlate_error error;
4998
4999 ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
5000 ds_put_cstr(ds, "Flow: ");
5001 flow_format(ds, flow);
5002 ds_put_char(ds, '\n');
5003
5004 ofpbuf_init(&trace.odp_actions, 0);
5005
5006 trace.result = ds;
5007 trace.key = flow; /* Original flow key, used for megaflow. */
5008 trace.flow = *flow; /* May be modified by actions. */
5009 xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
5010 ntohs(flow->tcp_flags), packet, &trace.wc,
5011 &trace.odp_actions);
5012 trace.xin.ofpacts = ofpacts;
5013 trace.xin.ofpacts_len = ofpacts_len;
5014 trace.xin.resubmit_hook = trace_resubmit;
5015 trace.xin.report_hook = trace_report_valist;
5016
5017 error = xlate_actions(&trace.xin, &trace.xout);
5018 ds_put_char(ds, '\n');
5019 trace_format_flow(ds, 0, "Final flow", &trace);
5020 trace_format_megaflow(ds, 0, "Megaflow", &trace);
5021
5022 ds_put_cstr(ds, "Datapath actions: ");
5023 format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size);
5024
5025 if (error != XLATE_OK) {
5026 ds_put_format(ds, "\nTranslation failed (%s), packet is dropped.\n",
5027 xlate_strerror(error));
5028 } else if (trace.xout.slow) {
5029 enum slow_path_reason slow;
5030
5031 ds_put_cstr(ds, "\nThis flow is handled by the userspace "
5032 "slow path because it:");
5033
5034 slow = trace.xout.slow;
5035 while (slow) {
5036 enum slow_path_reason bit = rightmost_1bit(slow);
5037
5038 ds_put_format(ds, "\n\t- %s.",
5039 slow_path_reason_to_explanation(bit));
5040
5041 slow &= ~bit;
5042 }
5043 }
5044
5045 xlate_out_uninit(&trace.xout);
5046 ofpbuf_uninit(&trace.odp_actions);
5047 }
5048
5049 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
5050 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
5051 * to destroy 'ofproto_shash' and free the returned value. */
5052 static const struct shash_node **
5053 get_ofprotos(struct shash *ofproto_shash)
5054 {
5055 const struct ofproto_dpif *ofproto;
5056
5057 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5058 char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
5059 shash_add_nocopy(ofproto_shash, name, ofproto);
5060 }
5061
5062 return shash_sort(ofproto_shash);
5063 }
5064
5065 static void
5066 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
5067 const char *argv[] OVS_UNUSED,
5068 void *aux OVS_UNUSED)
5069 {
5070 struct ds ds = DS_EMPTY_INITIALIZER;
5071 struct shash ofproto_shash;
5072 const struct shash_node **sorted_ofprotos;
5073 int i;
5074
5075 shash_init(&ofproto_shash);
5076 sorted_ofprotos = get_ofprotos(&ofproto_shash);
5077 for (i = 0; i < shash_count(&ofproto_shash); i++) {
5078 const struct shash_node *node = sorted_ofprotos[i];
5079 ds_put_format(&ds, "%s\n", node->name);
5080 }
5081
5082 shash_destroy(&ofproto_shash);
5083 free(sorted_ofprotos);
5084
5085 unixctl_command_reply(conn, ds_cstr(&ds));
5086 ds_destroy(&ds);
5087 }
5088
5089 static void
5090 dpif_show_backer(const struct dpif_backer *backer, struct ds *ds)
5091 {
5092 const struct shash_node **ofprotos;
5093 struct dpif_dp_stats dp_stats;
5094 struct shash ofproto_shash;
5095 size_t i;
5096
5097 dpif_get_dp_stats(backer->dpif, &dp_stats);
5098
5099 ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n",
5100 dpif_name(backer->dpif), dp_stats.n_hit, dp_stats.n_missed);
5101
5102 shash_init(&ofproto_shash);
5103 ofprotos = get_ofprotos(&ofproto_shash);
5104 for (i = 0; i < shash_count(&ofproto_shash); i++) {
5105 struct ofproto_dpif *ofproto = ofprotos[i]->data;
5106 const struct shash_node **ports;
5107 size_t j;
5108
5109 if (ofproto->backer != backer) {
5110 continue;
5111 }
5112
5113 ds_put_format(ds, "\t%s:\n", ofproto->up.name);
5114
5115 ports = shash_sort(&ofproto->up.port_by_name);
5116 for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
5117 const struct shash_node *node = ports[j];
5118 struct ofport *ofport = node->data;
5119 struct smap config;
5120 odp_port_t odp_port;
5121
5122 ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
5123 ofport->ofp_port);
5124
5125 odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
5126 if (odp_port != ODPP_NONE) {
5127 ds_put_format(ds, "%"PRIu32":", odp_port);
5128 } else {
5129 ds_put_cstr(ds, "none:");
5130 }
5131
5132 ds_put_format(ds, " (%s", netdev_get_type(ofport->netdev));
5133
5134 smap_init(&config);
5135 if (!netdev_get_config(ofport->netdev, &config)) {
5136 const struct smap_node **nodes;
5137 size_t i;
5138
5139 nodes = smap_sort(&config);
5140 for (i = 0; i < smap_count(&config); i++) {
5141 const struct smap_node *node = nodes[i];
5142 ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
5143 node->key, node->value);
5144 }
5145 free(nodes);
5146 }
5147 smap_destroy(&config);
5148
5149 ds_put_char(ds, ')');
5150 ds_put_char(ds, '\n');
5151 }
5152 free(ports);
5153 }
5154 shash_destroy(&ofproto_shash);
5155 free(ofprotos);
5156 }
5157
5158 static void
5159 ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
5160 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
5161 {
5162 struct ds ds = DS_EMPTY_INITIALIZER;
5163 const struct shash_node **backers;
5164 int i;
5165
5166 backers = shash_sort(&all_dpif_backers);
5167 for (i = 0; i < shash_count(&all_dpif_backers); i++) {
5168 dpif_show_backer(backers[i]->data, &ds);
5169 }
5170 free(backers);
5171
5172 unixctl_command_reply(conn, ds_cstr(&ds));
5173 ds_destroy(&ds);
5174 }
5175
5176 static void
5177 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
5178 int argc OVS_UNUSED, const char *argv[],
5179 void *aux OVS_UNUSED)
5180 {
5181 const struct ofproto_dpif *ofproto;
5182
5183 struct ds ds = DS_EMPTY_INITIALIZER;
5184 bool verbosity = false;
5185
5186 struct dpif_port dpif_port;
5187 struct dpif_port_dump port_dump;
5188 struct hmap portno_names;
5189
5190 struct dpif_flow_dump *flow_dump;
5191 struct dpif_flow_dump_thread *flow_dump_thread;
5192 struct dpif_flow f;
5193 int error;
5194
5195 ofproto = ofproto_dpif_lookup(argv[argc - 1]);
5196 if (!ofproto) {
5197 unixctl_command_reply_error(conn, "no such bridge");
5198 return;
5199 }
5200
5201 if (argc > 2 && !strcmp(argv[1], "-m")) {
5202 verbosity = true;
5203 }
5204
5205 hmap_init(&portno_names);
5206 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, ofproto->backer->dpif) {
5207 odp_portno_names_set(&portno_names, dpif_port.port_no, dpif_port.name);
5208 }
5209
5210 ds_init(&ds);
5211 flow_dump = dpif_flow_dump_create(ofproto->backer->dpif, false);
5212 flow_dump_thread = dpif_flow_dump_thread_create(flow_dump);
5213 while (dpif_flow_dump_next(flow_dump_thread, &f, 1)) {
5214 struct flow flow;
5215
5216 if (odp_flow_key_to_flow(f.key, f.key_len, &flow) == ODP_FIT_ERROR
5217 || xlate_lookup_ofproto(ofproto->backer, &flow, NULL) != ofproto) {
5218 continue;
5219 }
5220
5221 if (verbosity) {
5222 odp_format_ufid(&f.ufid, &ds);
5223 ds_put_cstr(&ds, " ");
5224 }
5225 odp_flow_format(f.key, f.key_len, f.mask, f.mask_len,
5226 &portno_names, &ds, verbosity);
5227 ds_put_cstr(&ds, ", ");
5228 dpif_flow_stats_format(&f.stats, &ds);
5229 ds_put_cstr(&ds, ", actions:");
5230 format_odp_actions(&ds, f.actions, f.actions_len);
5231 ds_put_char(&ds, '\n');
5232 }
5233 dpif_flow_dump_thread_destroy(flow_dump_thread);
5234 error = dpif_flow_dump_destroy(flow_dump);
5235
5236 if (error) {
5237 ds_clear(&ds);
5238 ds_put_format(&ds, "dpif/dump_flows failed: %s", ovs_strerror(errno));
5239 unixctl_command_reply_error(conn, ds_cstr(&ds));
5240 } else {
5241 unixctl_command_reply(conn, ds_cstr(&ds));
5242 }
5243 odp_portno_names_destroy(&portno_names);
5244 hmap_destroy(&portno_names);
5245 ds_destroy(&ds);
5246 }
5247
5248 static void
5249 ofproto_revalidate_all_backers(void)
5250 {
5251 const struct shash_node **backers;
5252 int i;
5253
5254 backers = shash_sort(&all_dpif_backers);
5255 for (i = 0; i < shash_count(&all_dpif_backers); i++) {
5256 struct dpif_backer *backer = backers[i]->data;
5257 backer->need_revalidate = REV_RECONFIGURE;
5258 }
5259 free(backers);
5260 }
5261
5262 static void
5263 disable_tnl_push_pop(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
5264 const char *argv[], void *aux OVS_UNUSED)
5265 {
5266 if (!strcasecmp(argv[1], "off")) {
5267 ofproto_use_tnl_push_pop = false;
5268 unixctl_command_reply(conn, "Tunnel push-pop off");
5269 ofproto_revalidate_all_backers();
5270 } else if (!strcasecmp(argv[1], "on")) {
5271 ofproto_use_tnl_push_pop = true;
5272 unixctl_command_reply(conn, "Tunnel push-pop on");
5273 ofproto_revalidate_all_backers();
5274 }
5275 }
5276
5277 static void
5278 ofproto_unixctl_init(void)
5279 {
5280 static bool registered;
5281 if (registered) {
5282 return;
5283 }
5284 registered = true;
5285
5286 unixctl_command_register(
5287 "ofproto/trace",
5288 "{[dp_name] odp_flow | bridge br_flow} [-generate|packet]",
5289 1, 3, ofproto_unixctl_trace, NULL);
5290 unixctl_command_register(
5291 "ofproto/trace-packet-out",
5292 "[-consistent] {[dp_name] odp_flow | bridge br_flow} [-generate|packet] actions",
5293 2, 6, ofproto_unixctl_trace_actions, NULL);
5294 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
5295 ofproto_unixctl_fdb_flush, NULL);
5296 unixctl_command_register("fdb/show", "bridge", 1, 1,
5297 ofproto_unixctl_fdb_show, NULL);
5298 unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
5299 ofproto_unixctl_mcast_snooping_flush, NULL);
5300 unixctl_command_register("mdb/show", "bridge", 1, 1,
5301 ofproto_unixctl_mcast_snooping_show, NULL);
5302 unixctl_command_register("dpif/dump-dps", "", 0, 0,
5303 ofproto_unixctl_dpif_dump_dps, NULL);
5304 unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show,
5305 NULL);
5306 unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
5307 ofproto_unixctl_dpif_dump_flows, NULL);
5308
5309 unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
5310 disable_tnl_push_pop, NULL);
5311 }
5312
5313 /* Returns true if 'table' is the table used for internal rules,
5314 * false otherwise. */
5315 bool
5316 table_is_internal(uint8_t table_id)
5317 {
5318 return table_id == TBL_INTERNAL;
5319 }
5320 \f
5321 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
5322 *
5323 * This is deprecated. It is only for compatibility with broken device drivers
5324 * in old versions of Linux that do not properly support VLANs when VLAN
5325 * devices are not used. When broken device drivers are no longer in
5326 * widespread use, we will delete these interfaces. */
5327
5328 static int
5329 set_realdev(struct ofport *ofport_, ofp_port_t realdev_ofp_port, int vid)
5330 {
5331 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
5332 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
5333
5334 if (realdev_ofp_port == ofport->realdev_ofp_port
5335 && vid == ofport->vlandev_vid) {
5336 return 0;
5337 }
5338
5339 ofproto->backer->need_revalidate = REV_RECONFIGURE;
5340
5341 if (ofport->realdev_ofp_port) {
5342 vsp_remove(ofport);
5343 }
5344 if (realdev_ofp_port && ofport->bundle) {
5345 /* vlandevs are enslaved to their realdevs, so they are not allowed to
5346 * themselves be part of a bundle. */
5347 bundle_set(ofport_->ofproto, ofport->bundle, NULL);
5348 }
5349
5350 ofport->realdev_ofp_port = realdev_ofp_port;
5351 ofport->vlandev_vid = vid;
5352
5353 if (realdev_ofp_port) {
5354 vsp_add(ofport, realdev_ofp_port, vid);
5355 }
5356
5357 return 0;
5358 }
5359
5360 static uint32_t
5361 hash_realdev_vid(ofp_port_t realdev_ofp_port, int vid)
5362 {
5363 return hash_2words(ofp_to_u16(realdev_ofp_port), vid);
5364 }
5365
5366 bool
5367 ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto)
5368 OVS_EXCLUDED(ofproto->vsp_mutex)
5369 {
5370 /* hmap_is_empty is thread safe. */
5371 return !hmap_is_empty(&ofproto->realdev_vid_map);
5372 }
5373
5374
5375 static ofp_port_t
5376 vsp_realdev_to_vlandev__(const struct ofproto_dpif *ofproto,
5377 ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
5378 OVS_REQUIRES(ofproto->vsp_mutex)
5379 {
5380 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
5381 int vid = vlan_tci_to_vid(vlan_tci);
5382 const struct vlan_splinter *vsp;
5383
5384 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
5385 hash_realdev_vid(realdev_ofp_port, vid),
5386 &ofproto->realdev_vid_map) {
5387 if (vsp->realdev_ofp_port == realdev_ofp_port
5388 && vsp->vid == vid) {
5389 return vsp->vlandev_ofp_port;
5390 }
5391 }
5392 }
5393 return realdev_ofp_port;
5394 }
5395
5396 /* Returns the OFP port number of the Linux VLAN device that corresponds to
5397 * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
5398 * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
5399 * 'vlan_tci' 9, it would return the port number of eth0.9.
5400 *
5401 * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
5402 * function just returns its 'realdev_ofp_port' argument. */
5403 ofp_port_t
5404 vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
5405 ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
5406 OVS_EXCLUDED(ofproto->vsp_mutex)
5407 {
5408 ofp_port_t ret;
5409
5410 /* hmap_is_empty is thread safe, see if we can return immediately. */
5411 if (hmap_is_empty(&ofproto->realdev_vid_map)) {
5412 return realdev_ofp_port;
5413 }
5414 ovs_mutex_lock(&ofproto->vsp_mutex);
5415 ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci);
5416 ovs_mutex_unlock(&ofproto->vsp_mutex);
5417 return ret;
5418 }
5419
5420 static struct vlan_splinter *
5421 vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port)
5422 {
5423 struct vlan_splinter *vsp;
5424
5425 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node,
5426 hash_ofp_port(vlandev_ofp_port),
5427 &ofproto->vlandev_map) {
5428 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
5429 return vsp;
5430 }
5431 }
5432
5433 return NULL;
5434 }
5435
5436 /* Returns the OpenFlow port number of the "real" device underlying the Linux
5437 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
5438 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
5439 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
5440 * eth0 and store 9 in '*vid'.
5441 *
5442 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
5443 * VLAN device. Unless VLAN splinters are enabled, this is what this function
5444 * always does.*/
5445 static ofp_port_t
5446 vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
5447 ofp_port_t vlandev_ofp_port, int *vid)
5448 OVS_REQUIRES(ofproto->vsp_mutex)
5449 {
5450 if (!hmap_is_empty(&ofproto->vlandev_map)) {
5451 const struct vlan_splinter *vsp;
5452
5453 vsp = vlandev_find(ofproto, vlandev_ofp_port);
5454 if (vsp) {
5455 if (vid) {
5456 *vid = vsp->vid;
5457 }
5458 return vsp->realdev_ofp_port;
5459 }
5460 }
5461 return 0;
5462 }
5463
5464 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
5465 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
5466 * 'flow->in_port' to the "real" device backing the VLAN device, sets
5467 * 'flow->vlan_tci' to the VLAN VID, and returns true. Optionally pushes the
5468 * appropriate VLAN on 'packet' if provided. Otherwise (which is always the
5469 * case unless VLAN splinters are enabled), returns false without making any
5470 * changes. */
5471 bool
5472 vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow,
5473 struct dp_packet *packet)
5474 OVS_EXCLUDED(ofproto->vsp_mutex)
5475 {
5476 ofp_port_t realdev;
5477 int vid;
5478
5479 /* hmap_is_empty is thread safe. */
5480 if (hmap_is_empty(&ofproto->vlandev_map)) {
5481 return false;
5482 }
5483
5484 ovs_mutex_lock(&ofproto->vsp_mutex);
5485 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
5486 ovs_mutex_unlock(&ofproto->vsp_mutex);
5487 if (!realdev) {
5488 return false;
5489 }
5490
5491 /* Cause the flow to be processed as if it came in on the real device with
5492 * the VLAN device's VLAN ID. */
5493 flow->in_port.ofp_port = realdev;
5494 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
5495
5496 if (packet) {
5497 /* Make the packet resemble the flow, so that it gets sent to an
5498 * OpenFlow controller properly, so that it looks correct for sFlow,
5499 * and so that flow_extract() will get the correct vlan_tci if it is
5500 * called on 'packet'. */
5501 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
5502 }
5503
5504 return true;
5505 }
5506
5507 static void
5508 vsp_remove(struct ofport_dpif *port)
5509 {
5510 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
5511 struct vlan_splinter *vsp;
5512
5513 ovs_mutex_lock(&ofproto->vsp_mutex);
5514 vsp = vlandev_find(ofproto, port->up.ofp_port);
5515 if (vsp) {
5516 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
5517 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
5518 free(vsp);
5519
5520 port->realdev_ofp_port = 0;
5521 } else {
5522 VLOG_ERR("missing vlan device record");
5523 }
5524 ovs_mutex_unlock(&ofproto->vsp_mutex);
5525 }
5526
5527 static void
5528 vsp_add(struct ofport_dpif *port, ofp_port_t realdev_ofp_port, int vid)
5529 {
5530 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
5531
5532 ovs_mutex_lock(&ofproto->vsp_mutex);
5533 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
5534 && (vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, htons(vid))
5535 == realdev_ofp_port)) {
5536 struct vlan_splinter *vsp;
5537
5538 vsp = xmalloc(sizeof *vsp);
5539 vsp->realdev_ofp_port = realdev_ofp_port;
5540 vsp->vlandev_ofp_port = port->up.ofp_port;
5541 vsp->vid = vid;
5542
5543 port->realdev_ofp_port = realdev_ofp_port;
5544
5545 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
5546 hash_ofp_port(port->up.ofp_port));
5547 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
5548 hash_realdev_vid(realdev_ofp_port, vid));
5549 } else {
5550 VLOG_ERR("duplicate vlan device record");
5551 }
5552 ovs_mutex_unlock(&ofproto->vsp_mutex);
5553 }
5554
5555 static odp_port_t
5556 ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
5557 {
5558 const struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
5559 return ofport ? ofport->odp_port : ODPP_NONE;
5560 }
5561
5562 struct ofport_dpif *
5563 odp_port_to_ofport(const struct dpif_backer *backer, odp_port_t odp_port)
5564 {
5565 struct ofport_dpif *port;
5566
5567 ovs_rwlock_rdlock(&backer->odp_to_ofport_lock);
5568 HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port),
5569 &backer->odp_to_ofport_map) {
5570 if (port->odp_port == odp_port) {
5571 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
5572 return port;
5573 }
5574 }
5575
5576 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
5577 return NULL;
5578 }
5579
5580 static ofp_port_t
5581 odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
5582 {
5583 struct ofport_dpif *port;
5584
5585 port = odp_port_to_ofport(ofproto->backer, odp_port);
5586 if (port && &ofproto->up == port->up.ofproto) {
5587 return port->up.ofp_port;
5588 } else {
5589 return OFPP_NONE;
5590 }
5591 }
5592
5593 int
5594 ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
5595 const struct match *match, int priority,
5596 uint16_t idle_timeout,
5597 const struct ofpbuf *ofpacts,
5598 struct rule **rulep)
5599 {
5600 struct ofproto_flow_mod ofm;
5601 struct rule_dpif *rule;
5602 int error;
5603
5604 ofm.fm.match = *match;
5605 ofm.fm.priority = priority;
5606 ofm.fm.new_cookie = htonll(0);
5607 ofm.fm.cookie = htonll(0);
5608 ofm.fm.cookie_mask = htonll(0);
5609 ofm.fm.modify_cookie = false;
5610 ofm.fm.table_id = TBL_INTERNAL;
5611 ofm.fm.command = OFPFC_ADD;
5612 ofm.fm.idle_timeout = idle_timeout;
5613 ofm.fm.hard_timeout = 0;
5614 ofm.fm.importance = 0;
5615 ofm.fm.buffer_id = 0;
5616 ofm.fm.out_port = 0;
5617 ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
5618 ofm.fm.ofpacts = ofpacts->data;
5619 ofm.fm.ofpacts_len = ofpacts->size;
5620
5621 error = ofproto_flow_mod(&ofproto->up, &ofm);
5622 if (error) {
5623 VLOG_ERR_RL(&rl, "failed to add internal flow (%s)",
5624 ofperr_to_string(error));
5625 *rulep = NULL;
5626 return error;
5627 }
5628
5629 rule = rule_dpif_lookup_in_table(ofproto,
5630 ofproto_dpif_get_tables_version(ofproto),
5631 TBL_INTERNAL, &ofm.fm.match.flow,
5632 &ofm.fm.match.wc);
5633 if (rule) {
5634 *rulep = &rule->up;
5635 } else {
5636 OVS_NOT_REACHED();
5637 }
5638 return 0;
5639 }
5640
5641 int
5642 ofproto_dpif_delete_internal_flow(struct ofproto_dpif *ofproto,
5643 struct match *match, int priority)
5644 {
5645 struct ofproto_flow_mod ofm;
5646 int error;
5647
5648 ofm.fm.match = *match;
5649 ofm.fm.priority = priority;
5650 ofm.fm.new_cookie = htonll(0);
5651 ofm.fm.cookie = htonll(0);
5652 ofm.fm.cookie_mask = htonll(0);
5653 ofm.fm.modify_cookie = false;
5654 ofm.fm.table_id = TBL_INTERNAL;
5655 ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
5656 ofm.fm.command = OFPFC_DELETE_STRICT;
5657
5658 error = ofproto_flow_mod(&ofproto->up, &ofm);
5659 if (error) {
5660 VLOG_ERR_RL(&rl, "failed to delete internal flow (%s)",
5661 ofperr_to_string(error));
5662 return error;
5663 }
5664
5665 return 0;
5666 }
5667
5668 const struct ofproto_class ofproto_dpif_class = {
5669 init,
5670 enumerate_types,
5671 enumerate_names,
5672 del,
5673 port_open_type,
5674 type_run,
5675 type_wait,
5676 alloc,
5677 construct,
5678 destruct,
5679 dealloc,
5680 run,
5681 wait,
5682 NULL, /* get_memory_usage. */
5683 type_get_memory_usage,
5684 flush,
5685 query_tables,
5686 set_tables_version,
5687 port_alloc,
5688 port_construct,
5689 port_destruct,
5690 port_dealloc,
5691 port_modified,
5692 port_reconfigured,
5693 port_query_by_name,
5694 port_add,
5695 port_del,
5696 port_get_stats,
5697 port_dump_start,
5698 port_dump_next,
5699 port_dump_done,
5700 port_poll,
5701 port_poll_wait,
5702 port_is_lacp_current,
5703 port_get_lacp_stats,
5704 NULL, /* rule_choose_table */
5705 rule_alloc,
5706 rule_construct,
5707 rule_insert,
5708 rule_delete,
5709 rule_destruct,
5710 rule_dealloc,
5711 rule_get_stats,
5712 rule_execute,
5713 set_frag_handling,
5714 packet_out,
5715 set_netflow,
5716 get_netflow_ids,
5717 set_sflow,
5718 set_ipfix,
5719 set_cfm,
5720 cfm_status_changed,
5721 get_cfm_status,
5722 set_lldp,
5723 get_lldp_status,
5724 set_aa,
5725 aa_mapping_set,
5726 aa_mapping_unset,
5727 aa_vlan_get_queued,
5728 aa_vlan_get_queue_size,
5729 set_bfd,
5730 bfd_status_changed,
5731 get_bfd_status,
5732 set_stp,
5733 get_stp_status,
5734 set_stp_port,
5735 get_stp_port_status,
5736 get_stp_port_stats,
5737 set_rstp,
5738 get_rstp_status,
5739 set_rstp_port,
5740 get_rstp_port_status,
5741 set_queues,
5742 bundle_set,
5743 bundle_remove,
5744 mirror_set__,
5745 mirror_get_stats__,
5746 set_flood_vlans,
5747 is_mirror_output_bundle,
5748 forward_bpdu_changed,
5749 set_mac_table_config,
5750 set_mcast_snooping,
5751 set_mcast_snooping_port,
5752 set_realdev,
5753 NULL, /* meter_get_features */
5754 NULL, /* meter_set */
5755 NULL, /* meter_get */
5756 NULL, /* meter_del */
5757 group_alloc, /* group_alloc */
5758 group_construct, /* group_construct */
5759 group_destruct, /* group_destruct */
5760 group_dealloc, /* group_dealloc */
5761 group_modify, /* group_modify */
5762 group_get_stats, /* group_get_stats */
5763 get_datapath_version, /* get_datapath_version */
5764 };