2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "byte-order.h"
24 #include "connectivity.h"
29 #include "fail-open.h"
30 #include "guarded-list.h"
34 #include "mac-learning.h"
35 #include "mcast-snooping.h"
36 #include "multipath.h"
37 #include "netdev-vport.h"
42 #include "odp-execute.h"
43 #include "ofproto/ofproto-dpif.h"
44 #include "ofproto/ofproto-provider.h"
45 #include "ofproto-dpif-ipfix.h"
46 #include "ofproto-dpif-mirror.h"
47 #include "ofproto-dpif-monitor.h"
48 #include "ofproto-dpif-rid.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "ofproto-dpif-trace.h"
51 #include "ofproto-dpif-upcall.h"
52 #include "ofproto-dpif-xlate.h"
53 #include "ofproto-dpif-xlate-cache.h"
54 #include "openvswitch/ofp-actions.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/ofp-parse.h"
58 #include "openvswitch/ofp-print.h"
59 #include "openvswitch/ofp-util.h"
60 #include "openvswitch/ofpbuf.h"
61 #include "openvswitch/vlog.h"
64 #include "ovs-router.h"
65 #include "poll-loop.h"
71 #include "unaligned.h"
74 #include "vlan-bitmap.h"
76 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
78 COVERAGE_DEFINE(ofproto_dpif_expired
);
79 COVERAGE_DEFINE(packet_in_overflow
);
83 static void rule_get_stats(struct rule
*, uint64_t *packets
, uint64_t *bytes
,
85 static struct rule_dpif
*rule_dpif_cast(const struct rule
*);
86 static void rule_expire(struct rule_dpif
*, long long now
);
89 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
90 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
91 void *aux
; /* Key supplied by ofproto's client. */
92 char *name
; /* Identifier for log messages. */
95 struct ovs_list ports
; /* Contains "struct ofport"s. */
96 enum port_vlan_mode vlan_mode
; /* VLAN mode */
97 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
98 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
99 * NULL if all VLANs are trunked. */
100 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
101 struct bond
*bond
; /* Nonnull iff more than one port. */
102 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
104 bool protected; /* Protected port mode */
107 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
110 static void bundle_remove(struct ofport
*);
111 static void bundle_update(struct ofbundle
*);
112 static void bundle_destroy(struct ofbundle
*);
113 static void bundle_del_port(struct ofport_dpif
*);
114 static void bundle_run(struct ofbundle
*);
115 static void bundle_wait(struct ofbundle
*);
116 static void bundle_flush_macs(struct ofbundle
*, bool);
117 static void bundle_move(struct ofbundle
*, struct ofbundle
*);
119 static void stp_run(struct ofproto_dpif
*ofproto
);
120 static void stp_wait(struct ofproto_dpif
*ofproto
);
121 static int set_stp_port(struct ofport
*,
122 const struct ofproto_port_stp_settings
*);
124 static void rstp_run(struct ofproto_dpif
*ofproto
);
125 static void set_rstp_port(struct ofport
*,
126 const struct ofproto_port_rstp_settings
*);
129 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
133 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
134 struct ovs_list bundle_node
;/* In struct ofbundle's "ports" list. */
135 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
136 struct bfd
*bfd
; /* BFD, if any. */
137 struct lldp
*lldp
; /* lldp, if any. */
138 bool may_enable
; /* May be enabled in bonds. */
139 bool is_tunnel
; /* This port is a tunnel. */
140 bool is_layer3
; /* This is a layer 3 port. */
141 long long int carrier_seq
; /* Carrier status changes. */
142 struct ofport_dpif
*peer
; /* Peer if patch port. */
145 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
146 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
147 long long int stp_state_entered
;
149 /* Rapid Spanning Tree. */
150 struct rstp_port
*rstp_port
; /* Rapid Spanning Tree Protocol, if any. */
151 enum rstp_state rstp_state
; /* Always RSTP_DISABLED if RSTP not in use. */
153 /* Queue to DSCP mapping. */
154 struct ofproto_port_queue
*qdscp
;
158 static odp_port_t
ofp_port_to_odp_port(const struct ofproto_dpif
*,
161 static ofp_port_t
odp_port_to_ofp_port(const struct ofproto_dpif
*,
164 static struct ofport_dpif
*
165 ofport_dpif_cast(const struct ofport
*ofport
)
167 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
170 static void port_run(struct ofport_dpif
*);
171 static int set_bfd(struct ofport
*, const struct smap
*);
172 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
173 static int set_lldp(struct ofport
*ofport_
, const struct smap
*cfg
);
174 static void ofport_update_peer(struct ofport_dpif
*);
176 COVERAGE_DEFINE(rev_reconfigure
);
177 COVERAGE_DEFINE(rev_stp
);
178 COVERAGE_DEFINE(rev_rstp
);
179 COVERAGE_DEFINE(rev_bond
);
180 COVERAGE_DEFINE(rev_port_toggled
);
181 COVERAGE_DEFINE(rev_flow_table
);
182 COVERAGE_DEFINE(rev_mac_learning
);
183 COVERAGE_DEFINE(rev_mcast_snooping
);
185 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
186 struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
188 /* All existing ofproto_dpif instances, indexed by ->up.name. */
189 struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
191 static bool ofproto_use_tnl_push_pop
= true;
192 static void ofproto_unixctl_init(void);
194 static inline struct ofproto_dpif
*
195 ofproto_dpif_cast(const struct ofproto
*ofproto
)
197 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
198 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
201 /* Global variables. */
202 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
204 /* Initial mappings of port to bridge mappings. */
205 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
207 /* Initialize 'ofm' for a learn action. If the rule already existed, reference
208 * to that rule is taken, otherwise a new rule is created. 'ofm' keeps the
209 * rule reference in both cases. */
211 ofproto_dpif_flow_mod_init_for_learn(struct ofproto_dpif
*ofproto
,
212 const struct ofputil_flow_mod
*fm
,
213 struct ofproto_flow_mod
*ofm
)
215 /* This will not take the global 'ofproto_mutex'. */
216 return ofproto_flow_mod_init_for_learn(&ofproto
->up
, fm
, ofm
);
219 /* Appends 'am' to the queue of asynchronous messages to be sent to the
220 * controller. Takes ownership of 'am' and any data it points to. */
222 ofproto_dpif_send_async_msg(struct ofproto_dpif
*ofproto
,
223 struct ofproto_async_msg
*am
)
225 if (!guarded_list_push_back(&ofproto
->ams
, &am
->list_node
, 1024)) {
226 COVERAGE_INC(packet_in_overflow
);
227 ofproto_async_msg_free(am
);
230 /* Wakes up main thread for packet-in I/O. */
231 seq_change(ofproto
->ams_seq
);
234 /* Factory functions. */
237 init(const struct shash
*iface_hints
)
239 struct shash_node
*node
;
241 /* Make a local copy, since we don't own 'iface_hints' elements. */
242 SHASH_FOR_EACH(node
, iface_hints
) {
243 const struct iface_hint
*orig_hint
= node
->data
;
244 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
246 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
247 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
248 new_hint
->ofp_port
= orig_hint
->ofp_port
;
250 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
253 ofproto_unixctl_init();
254 ofproto_dpif_trace_init();
259 enumerate_types(struct sset
*types
)
261 dp_enumerate_types(types
);
265 enumerate_names(const char *type
, struct sset
*names
)
267 struct ofproto_dpif
*ofproto
;
270 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
271 if (strcmp(type
, ofproto
->up
.type
)) {
274 sset_add(names
, ofproto
->up
.name
);
281 del(const char *type
, const char *name
)
286 error
= dpif_open(name
, type
, &dpif
);
288 error
= dpif_delete(dpif
);
295 port_open_type(const char *datapath_type
, const char *port_type
)
297 return dpif_port_open_type(datapath_type
, port_type
);
300 /* Type functions. */
302 static void process_dpif_port_changes(struct dpif_backer
*);
303 static void process_dpif_all_ports_changed(struct dpif_backer
*);
304 static void process_dpif_port_change(struct dpif_backer
*,
305 const char *devname
);
306 static void process_dpif_port_error(struct dpif_backer
*, int error
);
308 static struct ofproto_dpif
*
309 lookup_ofproto_dpif_by_port_name(const char *name
)
311 struct ofproto_dpif
*ofproto
;
313 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
314 if (sset_contains(&ofproto
->ports
, name
)) {
323 type_run(const char *type
)
325 struct dpif_backer
*backer
;
327 backer
= shash_find_data(&all_dpif_backers
, type
);
329 /* This is not necessarily a problem, since backers are only
330 * created on demand. */
334 /* This must be called before dpif_run() */
335 dpif_poll_threads_set(backer
->dpif
, pmd_cpu_mask
);
337 if (dpif_run(backer
->dpif
)) {
338 backer
->need_revalidate
= REV_RECONFIGURE
;
341 udpif_run(backer
->udpif
);
343 /* If vswitchd started with other_config:flow_restore_wait set as "true",
344 * and the configuration has now changed to "false", enable receiving
345 * packets from the datapath. */
346 if (!backer
->recv_set_enable
&& !ofproto_get_flow_restore_wait()) {
349 backer
->recv_set_enable
= true;
351 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
353 VLOG_ERR("Failed to enable receiving packets in dpif.");
356 dpif_flow_flush(backer
->dpif
);
357 backer
->need_revalidate
= REV_RECONFIGURE
;
360 if (backer
->recv_set_enable
) {
361 udpif_set_threads(backer
->udpif
, n_handlers
, n_revalidators
);
364 if (backer
->need_revalidate
) {
365 struct ofproto_dpif
*ofproto
;
366 struct simap_node
*node
;
367 struct simap tmp_backers
;
369 /* Handle tunnel garbage collection. */
370 simap_init(&tmp_backers
);
371 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
373 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
374 struct ofport_dpif
*iter
;
376 if (backer
!= ofproto
->backer
) {
380 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
381 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
384 if (!iter
->is_tunnel
) {
388 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
,
389 namebuf
, sizeof namebuf
);
390 node
= simap_find(&tmp_backers
, dp_port
);
392 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
393 simap_delete(&tmp_backers
, node
);
394 node
= simap_find(&backer
->tnl_backers
, dp_port
);
396 node
= simap_find(&backer
->tnl_backers
, dp_port
);
398 odp_port_t odp_port
= ODPP_NONE
;
400 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
402 simap_put(&backer
->tnl_backers
, dp_port
,
403 odp_to_u32(odp_port
));
404 node
= simap_find(&backer
->tnl_backers
, dp_port
);
409 iter
->odp_port
= node
? u32_to_odp(node
->data
) : ODPP_NONE
;
410 if (tnl_port_reconfigure(iter
, iter
->up
.netdev
,
412 ovs_native_tunneling_is_on(ofproto
), dp_port
)) {
413 backer
->need_revalidate
= REV_RECONFIGURE
;
418 SIMAP_FOR_EACH (node
, &tmp_backers
) {
419 dpif_port_del(backer
->dpif
, u32_to_odp(node
->data
));
421 simap_destroy(&tmp_backers
);
423 switch (backer
->need_revalidate
) {
424 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
425 case REV_STP
: COVERAGE_INC(rev_stp
); break;
426 case REV_RSTP
: COVERAGE_INC(rev_rstp
); break;
427 case REV_BOND
: COVERAGE_INC(rev_bond
); break;
428 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
429 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
430 case REV_MAC_LEARNING
: COVERAGE_INC(rev_mac_learning
); break;
431 case REV_MCAST_SNOOPING
: COVERAGE_INC(rev_mcast_snooping
); break;
433 backer
->need_revalidate
= 0;
435 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
436 struct ofport_dpif
*ofport
;
437 struct ofbundle
*bundle
;
439 if (ofproto
->backer
!= backer
) {
444 xlate_ofproto_set(ofproto
, ofproto
->up
.name
,
445 ofproto
->backer
->dpif
, ofproto
->ml
,
446 ofproto
->stp
, ofproto
->rstp
, ofproto
->ms
,
447 ofproto
->mbridge
, ofproto
->sflow
, ofproto
->ipfix
,
449 ofproto
->up
.forward_bpdu
,
450 connmgr_has_in_band(ofproto
->up
.connmgr
),
451 &ofproto
->backer
->support
);
453 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
454 xlate_bundle_set(ofproto
, bundle
, bundle
->name
,
455 bundle
->vlan_mode
, bundle
->vlan
,
456 bundle
->trunks
, bundle
->use_priority_tags
,
457 bundle
->bond
, bundle
->lacp
,
458 bundle
->floodable
, bundle
->protected);
461 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
462 int stp_port
= ofport
->stp_port
463 ? stp_port_no(ofport
->stp_port
)
465 xlate_ofport_set(ofproto
, ofport
->bundle
, ofport
,
466 ofport
->up
.ofp_port
, ofport
->odp_port
,
467 ofport
->up
.netdev
, ofport
->cfm
, ofport
->bfd
,
468 ofport
->lldp
, ofport
->peer
, stp_port
,
469 ofport
->rstp_port
, ofport
->qdscp
,
470 ofport
->n_qdscp
, ofport
->up
.pp
.config
,
471 ofport
->up
.pp
.state
, ofport
->is_tunnel
,
477 udpif_revalidate(backer
->udpif
);
480 process_dpif_port_changes(backer
);
485 /* Check for and handle port changes in 'backer''s dpif. */
487 process_dpif_port_changes(struct dpif_backer
*backer
)
493 error
= dpif_port_poll(backer
->dpif
, &devname
);
499 process_dpif_all_ports_changed(backer
);
503 process_dpif_port_change(backer
, devname
);
508 process_dpif_port_error(backer
, error
);
515 process_dpif_all_ports_changed(struct dpif_backer
*backer
)
517 struct ofproto_dpif
*ofproto
;
518 struct dpif_port dpif_port
;
519 struct dpif_port_dump dump
;
520 struct sset devnames
;
523 sset_init(&devnames
);
524 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
525 if (ofproto
->backer
== backer
) {
526 struct ofport
*ofport
;
528 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
529 sset_add(&devnames
, netdev_get_name(ofport
->netdev
));
533 DPIF_PORT_FOR_EACH (&dpif_port
, &dump
, backer
->dpif
) {
534 sset_add(&devnames
, dpif_port
.name
);
537 SSET_FOR_EACH (devname
, &devnames
) {
538 process_dpif_port_change(backer
, devname
);
540 sset_destroy(&devnames
);
544 process_dpif_port_change(struct dpif_backer
*backer
, const char *devname
)
546 struct ofproto_dpif
*ofproto
;
547 struct dpif_port port
;
549 /* Don't report on the datapath's device. */
550 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
554 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
555 &all_ofproto_dpifs
) {
556 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
561 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
562 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
563 /* The port was removed. If we know the datapath,
564 * report it through poll_set(). If we don't, it may be
565 * notifying us of a removal we initiated, so ignore it.
566 * If there's a pending ENOBUFS, let it stand, since
567 * everything will be reevaluated. */
568 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
569 sset_add(&ofproto
->port_poll_set
, devname
);
570 ofproto
->port_poll_errno
= 0;
572 } else if (!ofproto
) {
573 /* The port was added, but we don't know with which
574 * ofproto we should associate it. Delete it. */
575 dpif_port_del(backer
->dpif
, port
.port_no
);
577 struct ofport_dpif
*ofport
;
579 ofport
= ofport_dpif_cast(shash_find_data(
580 &ofproto
->up
.port_by_name
, devname
));
582 && ofport
->odp_port
!= port
.port_no
583 && !odp_port_to_ofport(backer
, port
.port_no
))
585 /* 'ofport''s datapath port number has changed from
586 * 'ofport->odp_port' to 'port.port_no'. Update our internal data
587 * structures to match. */
588 ovs_rwlock_wrlock(&backer
->odp_to_ofport_lock
);
589 hmap_remove(&backer
->odp_to_ofport_map
, &ofport
->odp_port_node
);
590 ofport
->odp_port
= port
.port_no
;
591 hmap_insert(&backer
->odp_to_ofport_map
, &ofport
->odp_port_node
,
592 hash_odp_port(port
.port_no
));
593 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
594 backer
->need_revalidate
= REV_RECONFIGURE
;
597 dpif_port_destroy(&port
);
600 /* Propagate 'error' to all ofprotos based on 'backer'. */
602 process_dpif_port_error(struct dpif_backer
*backer
, int error
)
604 struct ofproto_dpif
*ofproto
;
606 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
607 if (ofproto
->backer
== backer
) {
608 sset_clear(&ofproto
->port_poll_set
);
609 ofproto
->port_poll_errno
= error
;
615 type_wait(const char *type
)
617 struct dpif_backer
*backer
;
619 backer
= shash_find_data(&all_dpif_backers
, type
);
621 /* This is not necessarily a problem, since backers are only
622 * created on demand. */
626 dpif_wait(backer
->dpif
);
629 /* Basic life-cycle. */
631 static int add_internal_flows(struct ofproto_dpif
*);
633 static struct ofproto
*
636 struct ofproto_dpif
*ofproto
= xzalloc(sizeof *ofproto
);
641 dealloc(struct ofproto
*ofproto_
)
643 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
648 close_dpif_backer(struct dpif_backer
*backer
)
650 ovs_assert(backer
->refcount
> 0);
652 if (--backer
->refcount
) {
656 udpif_destroy(backer
->udpif
);
658 simap_destroy(&backer
->tnl_backers
);
659 ovs_rwlock_destroy(&backer
->odp_to_ofport_lock
);
660 hmap_destroy(&backer
->odp_to_ofport_map
);
661 shash_find_and_delete(&all_dpif_backers
, backer
->type
);
663 free(backer
->dp_version_string
);
664 dpif_close(backer
->dpif
);
668 /* Datapath port slated for removal from datapath. */
670 struct ovs_list list_node
;
674 static bool check_variable_length_userdata(struct dpif_backer
*backer
);
675 static void check_support(struct dpif_backer
*backer
);
678 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
680 struct dpif_backer
*backer
;
681 struct dpif_port_dump port_dump
;
682 struct dpif_port port
;
683 struct shash_node
*node
;
684 struct ovs_list garbage_list
;
685 struct odp_garbage
*garbage
;
692 backer
= shash_find_data(&all_dpif_backers
, type
);
699 backer_name
= xasprintf("ovs-%s", type
);
701 /* Remove any existing datapaths, since we assume we're the only
702 * userspace controlling the datapath. */
704 dp_enumerate_names(type
, &names
);
705 SSET_FOR_EACH(name
, &names
) {
706 struct dpif
*old_dpif
;
708 /* Don't remove our backer if it exists. */
709 if (!strcmp(name
, backer_name
)) {
713 if (dpif_open(name
, type
, &old_dpif
)) {
714 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
716 dpif_delete(old_dpif
);
717 dpif_close(old_dpif
);
720 sset_destroy(&names
);
722 backer
= xmalloc(sizeof *backer
);
724 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
727 VLOG_ERR("failed to open datapath of type %s: %s", type
,
728 ovs_strerror(error
));
732 backer
->udpif
= udpif_create(backer
, backer
->dpif
);
734 backer
->type
= xstrdup(type
);
735 backer
->refcount
= 1;
736 hmap_init(&backer
->odp_to_ofport_map
);
737 ovs_rwlock_init(&backer
->odp_to_ofport_lock
);
738 backer
->need_revalidate
= 0;
739 simap_init(&backer
->tnl_backers
);
740 backer
->recv_set_enable
= !ofproto_get_flow_restore_wait();
743 if (backer
->recv_set_enable
) {
744 dpif_flow_flush(backer
->dpif
);
747 /* Loop through the ports already on the datapath and remove any
748 * that we don't need anymore. */
749 ovs_list_init(&garbage_list
);
750 dpif_port_dump_start(&port_dump
, backer
->dpif
);
751 while (dpif_port_dump_next(&port_dump
, &port
)) {
752 node
= shash_find(&init_ofp_ports
, port
.name
);
753 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
754 garbage
= xmalloc(sizeof *garbage
);
755 garbage
->odp_port
= port
.port_no
;
756 ovs_list_push_front(&garbage_list
, &garbage
->list_node
);
759 dpif_port_dump_done(&port_dump
);
761 LIST_FOR_EACH_POP (garbage
, list_node
, &garbage_list
) {
762 dpif_port_del(backer
->dpif
, garbage
->odp_port
);
766 shash_add(&all_dpif_backers
, type
, backer
);
768 check_support(backer
);
769 atomic_count_init(&backer
->tnl_count
, 0);
771 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
773 VLOG_ERR("failed to listen on datapath of type %s: %s",
774 type
, ovs_strerror(error
));
775 close_dpif_backer(backer
);
779 if (backer
->recv_set_enable
) {
780 udpif_set_threads(backer
->udpif
, n_handlers
, n_revalidators
);
783 /* This check fails if performed before udpif threads have been set,
784 * as the kernel module checks that the 'pid' in userspace action
786 backer
->support
.variable_length_userdata
787 = check_variable_length_userdata(backer
);
788 backer
->dp_version_string
= dpif_get_dp_version(backer
->dpif
);
794 ovs_native_tunneling_is_on(struct ofproto_dpif
*ofproto
)
796 return ofproto_use_tnl_push_pop
&& ofproto
->backer
->support
.tnl_push_pop
&&
797 atomic_count_get(&ofproto
->backer
->tnl_count
);
800 /* Tests whether 'backer''s datapath supports recirculation. Only newer
801 * datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
802 * features on older datapaths that don't support this feature.
804 * Returns false if 'backer' definitely does not support recirculation, true if
805 * it seems to support recirculation or if at least the error we get is
808 check_recirc(struct dpif_backer
*backer
)
811 struct odputil_keybuf keybuf
;
814 struct odp_flow_key_parms odp_parms
= {
821 memset(&flow
, 0, sizeof flow
);
825 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
826 odp_flow_key_from_flow(&odp_parms
, &key
);
827 enable_recirc
= dpif_probe_feature(backer
->dpif
, "recirculation", &key
,
831 VLOG_INFO("%s: Datapath supports recirculation",
832 dpif_name(backer
->dpif
));
834 VLOG_INFO("%s: Datapath does not support recirculation",
835 dpif_name(backer
->dpif
));
838 return enable_recirc
;
841 /* Tests whether 'dpif' supports unique flow ids. We can skip serializing
842 * some flow attributes for datapaths that support this feature.
844 * Returns true if 'dpif' supports UFID for flow operations.
845 * Returns false if 'dpif' does not support UFID. */
847 check_ufid(struct dpif_backer
*backer
)
850 struct odputil_keybuf keybuf
;
854 struct odp_flow_key_parms odp_parms
= {
858 memset(&flow
, 0, sizeof flow
);
859 flow
.dl_type
= htons(0x1234);
861 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
862 odp_flow_key_from_flow(&odp_parms
, &key
);
863 dpif_flow_hash(backer
->dpif
, key
.data
, key
.size
, &ufid
);
865 enable_ufid
= dpif_probe_feature(backer
->dpif
, "UFID", &key
, &ufid
);
868 VLOG_INFO("%s: Datapath supports unique flow ids",
869 dpif_name(backer
->dpif
));
871 VLOG_INFO("%s: Datapath does not support unique flow ids",
872 dpif_name(backer
->dpif
));
877 /* Tests whether 'backer''s datapath supports variable-length
878 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
879 * to disable some features on older datapaths that don't support this
882 * Returns false if 'backer' definitely does not support variable-length
883 * userdata, true if it seems to support them or if at least the error we get
886 check_variable_length_userdata(struct dpif_backer
*backer
)
888 struct eth_header
*eth
;
889 struct ofpbuf actions
;
890 struct dpif_execute execute
;
891 struct dp_packet packet
;
896 /* Compose a userspace action that will cause an ERANGE error on older
897 * datapaths that don't support variable-length userdata.
899 * We really test for using userdata longer than 8 bytes, but older
900 * datapaths accepted these, silently truncating the userdata to 8 bytes.
901 * The same older datapaths rejected userdata shorter than 8 bytes, so we
902 * test for that instead as a proxy for longer userdata support. */
903 ofpbuf_init(&actions
, 64);
904 start
= nl_msg_start_nested(&actions
, OVS_ACTION_ATTR_USERSPACE
);
905 nl_msg_put_u32(&actions
, OVS_USERSPACE_ATTR_PID
,
906 dpif_port_get_pid(backer
->dpif
, ODPP_NONE
, 0));
907 nl_msg_put_unspec_zero(&actions
, OVS_USERSPACE_ATTR_USERDATA
, 4);
908 nl_msg_end_nested(&actions
, start
);
910 /* Compose a dummy ethernet packet. */
911 dp_packet_init(&packet
, ETH_HEADER_LEN
);
912 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
913 eth
->eth_type
= htons(0x1234);
915 flow_extract(&packet
, &flow
);
917 /* Execute the actions. On older datapaths this fails with ERANGE, on
918 * newer datapaths it succeeds. */
919 execute
.actions
= actions
.data
;
920 execute
.actions_len
= actions
.size
;
921 execute
.packet
= &packet
;
922 execute
.flow
= &flow
;
923 execute
.needs_help
= false;
924 execute
.probe
= true;
927 error
= dpif_execute(backer
->dpif
, &execute
);
929 dp_packet_uninit(&packet
);
930 ofpbuf_uninit(&actions
);
937 /* Variable-length userdata is not supported. */
938 VLOG_WARN("%s: datapath does not support variable-length userdata "
939 "feature (needs Linux 3.10+ or kernel module from OVS "
940 "1..11+). The NXAST_SAMPLE action will be ignored.",
941 dpif_name(backer
->dpif
));
945 /* Something odd happened. We're not sure whether variable-length
946 * userdata is supported. Default to "yes". */
947 VLOG_WARN("%s: variable-length userdata feature probe failed (%s)",
948 dpif_name(backer
->dpif
), ovs_strerror(error
));
953 /* Tests the MPLS label stack depth supported by 'backer''s datapath.
955 * Returns the number of elements in a struct flow's mpls_lse field
956 * if the datapath supports at least that many entries in an
958 * Otherwise returns the number of MPLS push actions supported by
961 check_max_mpls_depth(struct dpif_backer
*backer
)
966 for (n
= 0; n
< FLOW_MAX_MPLS_LABELS
; n
++) {
967 struct odputil_keybuf keybuf
;
969 struct odp_flow_key_parms odp_parms
= {
973 memset(&flow
, 0, sizeof flow
);
974 flow
.dl_type
= htons(ETH_TYPE_MPLS
);
975 flow_set_mpls_bos(&flow
, n
, 1);
977 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
978 odp_flow_key_from_flow(&odp_parms
, &key
);
979 if (!dpif_probe_feature(backer
->dpif
, "MPLS", &key
, NULL
)) {
984 VLOG_INFO("%s: MPLS label stack length probed as %d",
985 dpif_name(backer
->dpif
), n
);
989 /* Tests whether 'backer''s datapath supports masked data in
990 * OVS_ACTION_ATTR_SET actions. We need to disable some features on older
991 * datapaths that don't support this feature. */
993 check_masked_set_action(struct dpif_backer
*backer
)
995 struct eth_header
*eth
;
996 struct ofpbuf actions
;
997 struct dpif_execute execute
;
998 struct dp_packet packet
;
1001 struct ovs_key_ethernet key
, mask
;
1003 /* Compose a set action that will cause an EINVAL error on older
1004 * datapaths that don't support masked set actions.
1005 * Avoid using a full mask, as it could be translated to a non-masked
1006 * set action instead. */
1007 ofpbuf_init(&actions
, 64);
1008 memset(&key
, 0x53, sizeof key
);
1009 memset(&mask
, 0x7f, sizeof mask
);
1010 commit_masked_set_action(&actions
, OVS_KEY_ATTR_ETHERNET
, &key
, &mask
,
1013 /* Compose a dummy ethernet packet. */
1014 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1015 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1016 eth
->eth_type
= htons(0x1234);
1018 flow_extract(&packet
, &flow
);
1020 /* Execute the actions. On older datapaths this fails with EINVAL, on
1021 * newer datapaths it succeeds. */
1022 execute
.actions
= actions
.data
;
1023 execute
.actions_len
= actions
.size
;
1024 execute
.packet
= &packet
;
1025 execute
.flow
= &flow
;
1026 execute
.needs_help
= false;
1027 execute
.probe
= true;
1030 error
= dpif_execute(backer
->dpif
, &execute
);
1032 dp_packet_uninit(&packet
);
1033 ofpbuf_uninit(&actions
);
1036 /* Masked set action is not supported. */
1037 VLOG_INFO("%s: datapath does not support masked set action feature.",
1038 dpif_name(backer
->dpif
));
1043 /* Tests whether 'backer''s datapath supports truncation of a packet in
1044 * OVS_ACTION_ATTR_TRUNC. We need to disable some features on older
1045 * datapaths that don't support this feature. */
1047 check_trunc_action(struct dpif_backer
*backer
)
1049 struct eth_header
*eth
;
1050 struct ofpbuf actions
;
1051 struct dpif_execute execute
;
1052 struct dp_packet packet
;
1053 struct ovs_action_trunc
*trunc
;
1057 /* Compose an action with output(port:1,
1058 * max_len:OVS_ACTION_OUTPUT_MIN + 1).
1059 * This translates to one truncate action and one output action. */
1060 ofpbuf_init(&actions
, 64);
1061 trunc
= nl_msg_put_unspec_uninit(&actions
,
1062 OVS_ACTION_ATTR_TRUNC
, sizeof *trunc
);
1064 trunc
->max_len
= ETH_HEADER_LEN
+ 1;
1065 nl_msg_put_odp_port(&actions
, OVS_ACTION_ATTR_OUTPUT
, u32_to_odp(1));
1067 /* Compose a dummy Ethernet packet. */
1068 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1069 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1070 eth
->eth_type
= htons(0x1234);
1072 flow_extract(&packet
, &flow
);
1074 /* Execute the actions. On older datapaths this fails with EINVAL, on
1075 * newer datapaths it succeeds. */
1076 execute
.actions
= actions
.data
;
1077 execute
.actions_len
= actions
.size
;
1078 execute
.packet
= &packet
;
1079 execute
.flow
= &flow
;
1080 execute
.needs_help
= false;
1081 execute
.probe
= true;
1084 error
= dpif_execute(backer
->dpif
, &execute
);
1086 dp_packet_uninit(&packet
);
1087 ofpbuf_uninit(&actions
);
1090 VLOG_INFO("%s: Datapath does not support truncate action",
1091 dpif_name(backer
->dpif
));
1093 VLOG_INFO("%s: Datapath supports truncate action",
1094 dpif_name(backer
->dpif
));
1100 /* Tests whether 'backer''s datapath supports the clone action
1101 * OVS_ACTION_ATTR_CLONE. */
1103 check_clone(struct dpif_backer
*backer
)
1105 struct dpif_execute execute
;
1106 struct eth_header
*eth
;
1108 struct dp_packet packet
;
1109 struct ofpbuf actions
;
1113 /* Compose clone with an empty action list.
1114 * and check if datapath can decode the message. */
1115 ofpbuf_init(&actions
, 64);
1116 clone_start
= nl_msg_start_nested(&actions
, OVS_ACTION_ATTR_CLONE
);
1117 nl_msg_end_nested(&actions
, clone_start
);
1119 /* Compose a dummy Ethernet packet. */
1120 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1121 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1122 eth
->eth_type
= htons(0x1234);
1124 flow_extract(&packet
, &flow
);
1126 /* Execute the actions. On older datapaths this fails with EINVAL, on
1127 * newer datapaths it succeeds. */
1128 execute
.actions
= actions
.data
;
1129 execute
.actions_len
= actions
.size
;
1130 execute
.packet
= &packet
;
1131 execute
.flow
= &flow
;
1132 execute
.needs_help
= false;
1133 execute
.probe
= true;
1136 error
= dpif_execute(backer
->dpif
, &execute
);
1138 dp_packet_uninit(&packet
);
1139 ofpbuf_uninit(&actions
);
1142 VLOG_INFO("%s: Datapath does not support clone action",
1143 dpif_name(backer
->dpif
));
1145 VLOG_INFO("%s: Datapath supports clone action",
1146 dpif_name(backer
->dpif
));
1152 #define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE) \
1154 check_##NAME(struct dpif_backer *backer) \
1157 struct odputil_keybuf keybuf; \
1158 struct ofpbuf key; \
1160 struct odp_flow_key_parms odp_parms = { \
1167 memset(&flow, 0, sizeof flow); \
1168 flow.FIELD = VALUE; \
1170 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); \
1171 odp_flow_key_from_flow(&odp_parms, &key); \
1172 enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL); \
1175 VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif)); \
1177 VLOG_INFO("%s: Datapath does not support "#NAME, \
1178 dpif_name(backer->dpif)); \
1183 #define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD, FIELD, 1)
1185 CHECK_FEATURE(ct_state
)
1186 CHECK_FEATURE(ct_zone
)
1187 CHECK_FEATURE(ct_mark
)
1188 CHECK_FEATURE__(ct_label
, ct_label
, ct_label
.u64
.lo
, 1)
1189 CHECK_FEATURE__(ct_state_nat
, ct_state
, ct_state
, CS_TRACKED
|CS_SRC_NAT
)
1191 #undef CHECK_FEATURE
1192 #undef CHECK_FEATURE__
1195 check_support(struct dpif_backer
*backer
)
1197 /* This feature needs to be tested after udpif threads are set. */
1198 backer
->support
.variable_length_userdata
= false;
1201 backer
->support
.odp
.recirc
= check_recirc(backer
);
1202 backer
->support
.odp
.max_mpls_depth
= check_max_mpls_depth(backer
);
1203 backer
->support
.masked_set_action
= check_masked_set_action(backer
);
1204 backer
->support
.trunc
= check_trunc_action(backer
);
1205 backer
->support
.ufid
= check_ufid(backer
);
1206 backer
->support
.tnl_push_pop
= dpif_supports_tnl_push_pop(backer
->dpif
);
1207 backer
->support
.clone
= check_clone(backer
);
1210 backer
->support
.odp
.ct_state
= check_ct_state(backer
);
1211 backer
->support
.odp
.ct_zone
= check_ct_zone(backer
);
1212 backer
->support
.odp
.ct_mark
= check_ct_mark(backer
);
1213 backer
->support
.odp
.ct_label
= check_ct_label(backer
);
1215 backer
->support
.odp
.ct_state_nat
= check_ct_state_nat(backer
);
1219 construct(struct ofproto
*ofproto_
)
1221 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1222 struct shash_node
*node
, *next
;
1225 /* Tunnel module can get used right after the udpif threads are running. */
1226 ofproto_tunnel_init();
1228 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1233 uuid_generate(&ofproto
->uuid
);
1234 atomic_init(&ofproto
->tables_version
, OVS_VERSION_MIN
);
1235 ofproto
->netflow
= NULL
;
1236 ofproto
->sflow
= NULL
;
1237 ofproto
->ipfix
= NULL
;
1238 ofproto
->stp
= NULL
;
1239 ofproto
->rstp
= NULL
;
1240 ofproto
->dump_seq
= 0;
1241 hmap_init(&ofproto
->bundles
);
1242 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1244 ofproto
->mbridge
= mbridge_create();
1245 ofproto
->has_bonded_bundles
= false;
1246 ofproto
->lacp_enabled
= false;
1247 ovs_mutex_init_adaptive(&ofproto
->stats_mutex
);
1249 guarded_list_init(&ofproto
->ams
);
1251 sset_init(&ofproto
->ports
);
1252 sset_init(&ofproto
->ghost_ports
);
1253 sset_init(&ofproto
->port_poll_set
);
1254 ofproto
->port_poll_errno
= 0;
1255 ofproto
->change_seq
= 0;
1256 ofproto
->ams_seq
= seq_create();
1257 ofproto
->ams_seqno
= seq_read(ofproto
->ams_seq
);
1260 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1261 struct iface_hint
*iface_hint
= node
->data
;
1263 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1264 /* Check if the datapath already has this port. */
1265 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1266 sset_add(&ofproto
->ports
, node
->name
);
1269 free(iface_hint
->br_name
);
1270 free(iface_hint
->br_type
);
1272 shash_delete(&init_ofp_ports
, node
);
1276 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
1277 hash_string(ofproto
->up
.name
, 0));
1278 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1280 ofproto_init_tables(ofproto_
, N_TABLES
);
1281 error
= add_internal_flows(ofproto
);
1283 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1289 add_internal_miss_flow(struct ofproto_dpif
*ofproto
, int id
,
1290 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1296 match_init_catchall(&match
);
1297 match_set_reg(&match
, 0, id
);
1299 error
= ofproto_dpif_add_internal_flow(ofproto
, &match
, 0, 0, ofpacts
,
1301 *rulep
= error
? NULL
: rule_dpif_cast(rule
);
1307 add_internal_flows(struct ofproto_dpif
*ofproto
)
1309 struct ofpact_controller
*controller
;
1310 uint64_t ofpacts_stub
[128 / 8];
1311 struct ofpbuf ofpacts
;
1312 struct rule
*unused_rulep OVS_UNUSED
;
1317 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1320 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1321 controller
->max_len
= UINT16_MAX
;
1322 controller
->controller_id
= 0;
1323 controller
->reason
= OFPR_IMPLICIT_MISS
;
1324 ofpact_finish_CONTROLLER(&ofpacts
, &controller
);
1326 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1327 &ofproto
->miss_rule
);
1332 ofpbuf_clear(&ofpacts
);
1333 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1334 &ofproto
->no_packet_in_rule
);
1339 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1340 &ofproto
->drop_frags_rule
);
1345 /* Drop any run away non-recirc rule lookups. Recirc_id has to be
1346 * zero when reaching this rule.
1348 * (priority=2), recirc_id=0, actions=drop
1350 ofpbuf_clear(&ofpacts
);
1351 match_init_catchall(&match
);
1352 match_set_recirc_id(&match
, 0);
1353 error
= ofproto_dpif_add_internal_flow(ofproto
, &match
, 2, 0, &ofpacts
,
1359 destruct(struct ofproto
*ofproto_
)
1361 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1362 struct ofproto_async_msg
*am
;
1363 struct rule_dpif
*rule
;
1364 struct oftable
*table
;
1365 struct ovs_list ams
;
1367 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1369 xlate_remove_ofproto(ofproto
);
1372 /* Ensure that the upcall processing threads have no remaining references
1373 * to the ofproto or anything in it. */
1374 udpif_synchronize(ofproto
->backer
->udpif
);
1376 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
1378 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1379 CLS_FOR_EACH (rule
, up
.cr
, &table
->cls
) {
1380 ofproto_rule_delete(&ofproto
->up
, &rule
->up
);
1383 ofproto_group_delete_all(&ofproto
->up
);
1385 guarded_list_pop_all(&ofproto
->ams
, &ams
);
1386 LIST_FOR_EACH_POP (am
, list_node
, &ams
) {
1387 ofproto_async_msg_free(am
);
1389 guarded_list_destroy(&ofproto
->ams
);
1391 recirc_free_ofproto(ofproto
, ofproto
->up
.name
);
1393 mbridge_unref(ofproto
->mbridge
);
1395 netflow_unref(ofproto
->netflow
);
1396 dpif_sflow_unref(ofproto
->sflow
);
1397 dpif_ipfix_unref(ofproto
->ipfix
);
1398 hmap_destroy(&ofproto
->bundles
);
1399 mac_learning_unref(ofproto
->ml
);
1400 mcast_snooping_unref(ofproto
->ms
);
1402 sset_destroy(&ofproto
->ports
);
1403 sset_destroy(&ofproto
->ghost_ports
);
1404 sset_destroy(&ofproto
->port_poll_set
);
1406 ovs_mutex_destroy(&ofproto
->stats_mutex
);
1408 seq_destroy(ofproto
->ams_seq
);
1410 close_dpif_backer(ofproto
->backer
);
1414 run(struct ofproto
*ofproto_
)
1416 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1417 uint64_t new_seq
, new_dump_seq
;
1419 if (mbridge_need_revalidate(ofproto
->mbridge
)) {
1420 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1421 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
1422 mac_learning_flush(ofproto
->ml
);
1423 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1424 mcast_snooping_mdb_flush(ofproto
->ms
);
1427 /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
1428 * flow restore. Even though nothing is processed during flow restore,
1429 * all queued 'ams' will be handled immediately when flow restore
1431 ofproto
->ams_seqno
= seq_read(ofproto
->ams_seq
);
1433 /* Do not perform any periodic activity required by 'ofproto' while
1434 * waiting for flow restore to complete. */
1435 if (!ofproto_get_flow_restore_wait()) {
1436 struct ofproto_async_msg
*am
;
1437 struct ovs_list ams
;
1439 guarded_list_pop_all(&ofproto
->ams
, &ams
);
1440 LIST_FOR_EACH_POP (am
, list_node
, &ams
) {
1441 connmgr_send_async_msg(ofproto
->up
.connmgr
, am
);
1442 ofproto_async_msg_free(am
);
1446 if (ofproto
->netflow
) {
1447 netflow_run(ofproto
->netflow
);
1449 if (ofproto
->sflow
) {
1450 dpif_sflow_run(ofproto
->sflow
);
1452 if (ofproto
->ipfix
) {
1453 dpif_ipfix_run(ofproto
->ipfix
);
1456 new_seq
= seq_read(connectivity_seq_get());
1457 if (ofproto
->change_seq
!= new_seq
) {
1458 struct ofport_dpif
*ofport
;
1460 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1464 ofproto
->change_seq
= new_seq
;
1466 if (ofproto
->lacp_enabled
|| ofproto
->has_bonded_bundles
) {
1467 struct ofbundle
*bundle
;
1469 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1476 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
1477 if (mac_learning_run(ofproto
->ml
)) {
1478 ofproto
->backer
->need_revalidate
= REV_MAC_LEARNING
;
1480 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1482 if (mcast_snooping_run(ofproto
->ms
)) {
1483 ofproto
->backer
->need_revalidate
= REV_MCAST_SNOOPING
;
1486 new_dump_seq
= seq_read(udpif_dump_seq(ofproto
->backer
->udpif
));
1487 if (ofproto
->dump_seq
!= new_dump_seq
) {
1488 struct rule
*rule
, *next_rule
;
1489 long long now
= time_msec();
1491 /* We know stats are relatively fresh, so now is a good time to do some
1493 ofproto
->dump_seq
= new_dump_seq
;
1495 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
1497 ovs_mutex_lock(&ofproto_mutex
);
1498 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
1499 &ofproto
->up
.expirable
) {
1500 rule_expire(rule_dpif_cast(rule
), now
);
1502 ovs_mutex_unlock(&ofproto_mutex
);
1504 /* All outstanding data in existing flows has been accounted, so it's a
1505 * good time to do bond rebalancing. */
1506 if (ofproto
->has_bonded_bundles
) {
1507 struct ofbundle
*bundle
;
1509 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1511 bond_rebalance(bundle
->bond
);
1520 ofproto_dpif_wait(struct ofproto
*ofproto_
)
1522 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1524 if (ofproto_get_flow_restore_wait()) {
1528 if (ofproto
->sflow
) {
1529 dpif_sflow_wait(ofproto
->sflow
);
1531 if (ofproto
->ipfix
) {
1532 dpif_ipfix_wait(ofproto
->ipfix
);
1534 if (ofproto
->lacp_enabled
|| ofproto
->has_bonded_bundles
) {
1535 struct ofbundle
*bundle
;
1537 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1538 bundle_wait(bundle
);
1541 if (ofproto
->netflow
) {
1542 netflow_wait(ofproto
->netflow
);
1544 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
1545 mac_learning_wait(ofproto
->ml
);
1546 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1547 mcast_snooping_wait(ofproto
->ms
);
1549 if (ofproto
->backer
->need_revalidate
) {
1550 poll_immediate_wake();
1553 seq_wait(udpif_dump_seq(ofproto
->backer
->udpif
), ofproto
->dump_seq
);
1554 seq_wait(ofproto
->ams_seq
, ofproto
->ams_seqno
);
1558 type_get_memory_usage(const char *type
, struct simap
*usage
)
1560 struct dpif_backer
*backer
;
1562 backer
= shash_find_data(&all_dpif_backers
, type
);
1564 udpif_get_memory_usage(backer
->udpif
, usage
);
1569 flush(struct ofproto
*ofproto_
)
1571 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1572 struct dpif_backer
*backer
= ofproto
->backer
;
1575 udpif_flush(backer
->udpif
);
1580 query_tables(struct ofproto
*ofproto
,
1581 struct ofputil_table_features
*features
,
1582 struct ofputil_table_stats
*stats
)
1584 strcpy(features
->name
, "classifier");
1589 for (i
= 0; i
< ofproto
->n_tables
; i
++) {
1590 unsigned long missed
, matched
;
1592 atomic_read_relaxed(&ofproto
->tables
[i
].n_matched
, &matched
);
1593 atomic_read_relaxed(&ofproto
->tables
[i
].n_missed
, &missed
);
1595 stats
[i
].matched_count
= matched
;
1596 stats
[i
].lookup_count
= matched
+ missed
;
1602 set_tables_version(struct ofproto
*ofproto_
, ovs_version_t version
)
1604 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1606 /* Use memory_order_release to signify that any prior memory accesses can
1607 * not be reordered to happen after this atomic store. This makes sure the
1608 * new version is properly set up when the readers can read this 'version'
1610 atomic_store_explicit(&ofproto
->tables_version
, version
,
1611 memory_order_release
);
1612 /* 'need_revalidate' can be reordered to happen before the atomic_store
1613 * above, but it does not matter as this variable is not accessed by other
1615 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
1618 static struct ofport
*
1621 struct ofport_dpif
*port
= xzalloc(sizeof *port
);
1626 port_dealloc(struct ofport
*port_
)
1628 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1633 port_construct(struct ofport
*port_
)
1635 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1636 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1637 const struct netdev
*netdev
= port
->up
.netdev
;
1638 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1639 const char *dp_port_name
;
1640 struct dpif_port dpif_port
;
1643 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1644 port
->bundle
= NULL
;
1648 port
->may_enable
= false;
1649 port
->stp_port
= NULL
;
1650 port
->stp_state
= STP_DISABLED
;
1651 port
->rstp_port
= NULL
;
1652 port
->rstp_state
= RSTP_DISABLED
;
1653 port
->is_tunnel
= false;
1657 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1658 port
->is_layer3
= netdev_vport_is_layer3(netdev
);
1660 if (netdev_vport_is_patch(netdev
)) {
1661 /* By bailing out here, we don't submit the port to the sFlow module
1662 * to be considered for counter polling export. This is correct
1663 * because the patch port represents an interface that sFlow considers
1664 * to be "internal" to the switch as a whole, and therefore not a
1665 * candidate for counter polling. */
1666 port
->odp_port
= ODPP_NONE
;
1667 ofport_update_peer(port
);
1671 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1672 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
, dp_port_name
,
1678 port
->odp_port
= dpif_port
.port_no
;
1680 if (netdev_get_tunnel_config(netdev
)) {
1681 atomic_count_inc(&ofproto
->backer
->tnl_count
);
1682 error
= tnl_port_add(port
, port
->up
.netdev
, port
->odp_port
,
1683 ovs_native_tunneling_is_on(ofproto
), dp_port_name
);
1685 atomic_count_dec(&ofproto
->backer
->tnl_count
);
1686 dpif_port_destroy(&dpif_port
);
1690 port
->is_tunnel
= true;
1691 if (ofproto
->ipfix
) {
1692 dpif_ipfix_add_tunnel_port(ofproto
->ipfix
, port_
, port
->odp_port
);
1695 /* Sanity-check that a mapping doesn't already exist. This
1696 * shouldn't happen for non-tunnel ports. */
1697 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1698 VLOG_ERR("port %s already has an OpenFlow port number",
1700 dpif_port_destroy(&dpif_port
);
1704 ovs_rwlock_wrlock(&ofproto
->backer
->odp_to_ofport_lock
);
1705 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1706 hash_odp_port(port
->odp_port
));
1707 ovs_rwlock_unlock(&ofproto
->backer
->odp_to_ofport_lock
);
1709 dpif_port_destroy(&dpif_port
);
1711 if (ofproto
->sflow
) {
1712 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1719 port_destruct(struct ofport
*port_
, bool del
)
1721 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1722 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1723 const char *devname
= netdev_get_name(port
->up
.netdev
);
1724 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1725 const char *dp_port_name
;
1727 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1729 xlate_ofport_remove(port
);
1732 dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
, namebuf
,
1734 if (del
&& dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1735 /* The underlying device is still there, so delete it. This
1736 * happens when the ofproto is being destroyed, since the caller
1737 * assumes that removal of attached ports will happen as part of
1739 if (!port
->is_tunnel
) {
1740 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
);
1745 port
->peer
->peer
= NULL
;
1749 if (port
->odp_port
!= ODPP_NONE
&& !port
->is_tunnel
) {
1750 ovs_rwlock_wrlock(&ofproto
->backer
->odp_to_ofport_lock
);
1751 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1752 ovs_rwlock_unlock(&ofproto
->backer
->odp_to_ofport_lock
);
1755 if (port
->is_tunnel
) {
1756 atomic_count_dec(&ofproto
->backer
->tnl_count
);
1759 if (port
->is_tunnel
&& ofproto
->ipfix
) {
1760 dpif_ipfix_del_tunnel_port(ofproto
->ipfix
, port
->odp_port
);
1764 sset_find_and_delete(&ofproto
->ports
, devname
);
1765 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1766 bundle_remove(port_
);
1767 set_cfm(port_
, NULL
);
1768 set_bfd(port_
, NULL
);
1769 set_lldp(port_
, NULL
);
1770 if (port
->stp_port
) {
1771 stp_port_disable(port
->stp_port
);
1773 set_rstp_port(port_
, NULL
);
1774 if (ofproto
->sflow
) {
1775 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1782 port_modified(struct ofport
*port_
)
1784 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1785 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1786 const char *dp_port_name
;
1787 struct netdev
*netdev
= port
->up
.netdev
;
1789 if (port
->bundle
&& port
->bundle
->bond
) {
1790 bond_slave_set_netdev(port
->bundle
->bond
, port
, netdev
);
1794 cfm_set_netdev(port
->cfm
, netdev
);
1798 bfd_set_netdev(port
->bfd
, netdev
);
1801 ofproto_dpif_monitor_port_update(port
, port
->bfd
, port
->cfm
,
1802 port
->lldp
, &port
->up
.pp
.hw_addr
);
1804 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1806 if (port
->is_tunnel
) {
1807 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1809 if (tnl_port_reconfigure(port
, netdev
, port
->odp_port
,
1810 ovs_native_tunneling_is_on(ofproto
),
1812 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1816 ofport_update_peer(port
);
1820 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1822 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1823 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1824 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1826 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1827 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1828 OFPUTIL_PC_NO_PACKET_IN
)) {
1829 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1831 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1832 bundle_update(port
->bundle
);
1838 set_sflow(struct ofproto
*ofproto_
,
1839 const struct ofproto_sflow_options
*sflow_options
)
1841 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1842 struct dpif_sflow
*ds
= ofproto
->sflow
;
1844 if (sflow_options
) {
1845 uint32_t old_probability
= ds
? dpif_sflow_get_probability(ds
) : 0;
1847 struct ofport_dpif
*ofport
;
1849 ds
= ofproto
->sflow
= dpif_sflow_create();
1850 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1851 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
1854 dpif_sflow_set_options(ds
, sflow_options
);
1855 if (dpif_sflow_get_probability(ds
) != old_probability
) {
1856 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1860 dpif_sflow_unref(ds
);
1861 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1862 ofproto
->sflow
= NULL
;
1870 struct ofproto
*ofproto_
,
1871 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
1872 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
1873 size_t n_flow_exporters_options
)
1875 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1876 struct dpif_ipfix
*di
= ofproto
->ipfix
;
1877 bool has_options
= bridge_exporter_options
|| flow_exporters_options
;
1878 bool new_di
= false;
1880 if (has_options
&& !di
) {
1881 di
= ofproto
->ipfix
= dpif_ipfix_create();
1886 /* Call set_options in any case to cleanly flush the flow
1887 * caches in the last exporters that are to be destroyed. */
1888 dpif_ipfix_set_options(
1889 di
, bridge_exporter_options
, flow_exporters_options
,
1890 n_flow_exporters_options
);
1892 /* Add tunnel ports only when a new ipfix created */
1893 if (new_di
== true) {
1894 struct ofport_dpif
*ofport
;
1895 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1896 if (ofport
->is_tunnel
== true) {
1897 dpif_ipfix_add_tunnel_port(di
, &ofport
->up
, ofport
->odp_port
);
1903 dpif_ipfix_unref(di
);
1904 ofproto
->ipfix
= NULL
;
1912 get_ipfix_stats(const struct ofproto
*ofproto_
,
1914 struct ovs_list
*replies
)
1916 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1917 struct dpif_ipfix
*di
= ofproto
->ipfix
;
1920 return OFPERR_NXST_NOT_CONFIGURED
;
1923 return dpif_ipfix_get_stats(di
, bridge_ipfix
, replies
);
1927 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1929 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1930 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1931 struct cfm
*old
= ofport
->cfm
;
1936 ofport
->cfm
= cfm_create(ofport
->up
.netdev
);
1939 if (cfm_configure(ofport
->cfm
, s
)) {
1946 cfm_unref(ofport
->cfm
);
1949 if (ofport
->cfm
!= old
) {
1950 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1952 ofproto_dpif_monitor_port_update(ofport
, ofport
->bfd
, ofport
->cfm
,
1953 ofport
->lldp
, &ofport
->up
.pp
.hw_addr
);
1958 cfm_status_changed(struct ofport
*ofport_
)
1960 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1962 return ofport
->cfm
? cfm_check_status_change(ofport
->cfm
) : true;
1966 get_cfm_status(const struct ofport
*ofport_
,
1967 struct cfm_status
*status
)
1969 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1973 cfm_get_status(ofport
->cfm
, status
);
1982 set_bfd(struct ofport
*ofport_
, const struct smap
*cfg
)
1984 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
1985 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1989 ofport
->bfd
= bfd_configure(old
, netdev_get_name(ofport
->up
.netdev
),
1990 cfg
, ofport
->up
.netdev
);
1991 if (ofport
->bfd
!= old
) {
1992 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1994 ofproto_dpif_monitor_port_update(ofport
, ofport
->bfd
, ofport
->cfm
,
1995 ofport
->lldp
, &ofport
->up
.pp
.hw_addr
);
2000 bfd_status_changed(struct ofport
*ofport_
)
2002 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2004 return ofport
->bfd
? bfd_check_status_change(ofport
->bfd
) : true;
2008 get_bfd_status(struct ofport
*ofport_
, struct smap
*smap
)
2010 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2014 bfd_get_status(ofport
->bfd
, smap
);
2023 set_lldp(struct ofport
*ofport_
,
2024 const struct smap
*cfg
)
2026 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2030 if (!ofport
->lldp
) {
2031 struct ofproto_dpif
*ofproto
;
2033 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2034 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2035 ofport
->lldp
= lldp_create(ofport
->up
.netdev
, ofport_
->mtu
, cfg
);
2038 if (!lldp_configure(ofport
->lldp
, cfg
)) {
2043 lldp_unref(ofport
->lldp
);
2044 ofport
->lldp
= NULL
;
2047 ofproto_dpif_monitor_port_update(ofport
,
2051 &ofport
->up
.pp
.hw_addr
);
2056 get_lldp_status(const struct ofport
*ofport_
,
2057 struct lldp_status
*status OVS_UNUSED
)
2059 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2061 return ofport
->lldp
? true : false;
2065 set_aa(struct ofproto
*ofproto OVS_UNUSED
,
2066 const struct aa_settings
*s
)
2068 return aa_configure(s
);
2072 aa_mapping_set(struct ofproto
*ofproto_ OVS_UNUSED
, void *aux
,
2073 const struct aa_mapping_settings
*s
)
2075 return aa_mapping_register(aux
, s
);
2079 aa_mapping_unset(struct ofproto
*ofproto OVS_UNUSED
, void *aux
)
2081 return aa_mapping_unregister(aux
);
2085 aa_vlan_get_queued(struct ofproto
*ofproto OVS_UNUSED
, struct ovs_list
*list
)
2087 return aa_get_vlan_queued(list
);
2091 aa_vlan_get_queue_size(struct ofproto
*ofproto OVS_UNUSED
)
2093 return aa_get_vlan_queue_size();
2097 /* Spanning Tree. */
2099 /* Called while rstp_mutex is held. */
2101 rstp_send_bpdu_cb(struct dp_packet
*pkt
, void *ofport_
, void *ofproto_
)
2103 struct ofproto_dpif
*ofproto
= ofproto_
;
2104 struct ofport_dpif
*ofport
= ofport_
;
2105 struct eth_header
*eth
= dp_packet_l2(pkt
);
2107 netdev_get_etheraddr(ofport
->up
.netdev
, ð
->eth_src
);
2108 if (eth_addr_is_zero(eth
->eth_src
)) {
2109 VLOG_WARN_RL(&rl
, "%s port %d: cannot send RSTP BPDU on a port which "
2110 "does not have a configured source MAC address.",
2111 ofproto
->up
.name
, ofp_to_u16(ofport
->up
.ofp_port
));
2113 ofproto_dpif_send_packet(ofport
, false, pkt
);
2115 dp_packet_delete(pkt
);
2119 send_bpdu_cb(struct dp_packet
*pkt
, int port_num
, void *ofproto_
)
2121 struct ofproto_dpif
*ofproto
= ofproto_
;
2122 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
2123 struct ofport_dpif
*ofport
;
2125 ofport
= stp_port_get_aux(sp
);
2127 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
2128 ofproto
->up
.name
, port_num
);
2130 struct eth_header
*eth
= dp_packet_l2(pkt
);
2132 netdev_get_etheraddr(ofport
->up
.netdev
, ð
->eth_src
);
2133 if (eth_addr_is_zero(eth
->eth_src
)) {
2134 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
2135 "with unknown MAC", ofproto
->up
.name
, port_num
);
2137 ofproto_dpif_send_packet(ofport
, false, pkt
);
2140 dp_packet_delete(pkt
);
2143 /* Configure RSTP on 'ofproto_' using the settings defined in 's'. */
2145 set_rstp(struct ofproto
*ofproto_
, const struct ofproto_rstp_settings
*s
)
2147 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2149 /* Only revalidate flows if the configuration changed. */
2150 if (!s
!= !ofproto
->rstp
) {
2151 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2155 if (!ofproto
->rstp
) {
2156 ofproto
->rstp
= rstp_create(ofproto_
->name
, s
->address
,
2157 rstp_send_bpdu_cb
, ofproto
);
2158 ofproto
->rstp_last_tick
= time_msec();
2160 rstp_set_bridge_address(ofproto
->rstp
, s
->address
);
2161 rstp_set_bridge_priority(ofproto
->rstp
, s
->priority
);
2162 rstp_set_bridge_ageing_time(ofproto
->rstp
, s
->ageing_time
);
2163 rstp_set_bridge_force_protocol_version(ofproto
->rstp
,
2164 s
->force_protocol_version
);
2165 rstp_set_bridge_max_age(ofproto
->rstp
, s
->bridge_max_age
);
2166 rstp_set_bridge_forward_delay(ofproto
->rstp
, s
->bridge_forward_delay
);
2167 rstp_set_bridge_transmit_hold_count(ofproto
->rstp
,
2168 s
->transmit_hold_count
);
2170 struct ofport
*ofport
;
2171 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2172 set_rstp_port(ofport
, NULL
);
2174 rstp_unref(ofproto
->rstp
);
2175 ofproto
->rstp
= NULL
;
2180 get_rstp_status(struct ofproto
*ofproto_
, struct ofproto_rstp_status
*s
)
2182 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2184 if (ofproto
->rstp
) {
2186 s
->root_id
= rstp_get_root_id(ofproto
->rstp
);
2187 s
->bridge_id
= rstp_get_bridge_id(ofproto
->rstp
);
2188 s
->designated_id
= rstp_get_designated_id(ofproto
->rstp
);
2189 s
->root_path_cost
= rstp_get_root_path_cost(ofproto
->rstp
);
2190 s
->designated_port_id
= rstp_get_designated_port_id(ofproto
->rstp
);
2191 s
->bridge_port_id
= rstp_get_bridge_port_id(ofproto
->rstp
);
2198 update_rstp_port_state(struct ofport_dpif
*ofport
)
2200 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2201 enum rstp_state state
;
2203 /* Figure out new state. */
2204 state
= ofport
->rstp_port
? rstp_port_get_state(ofport
->rstp_port
)
2208 if (ofport
->rstp_state
!= state
) {
2209 enum ofputil_port_state of_state
;
2212 VLOG_DBG("port %s: RSTP state changed from %s to %s",
2213 netdev_get_name(ofport
->up
.netdev
),
2214 rstp_state_name(ofport
->rstp_state
),
2215 rstp_state_name(state
));
2217 if (rstp_learn_in_state(ofport
->rstp_state
)
2218 != rstp_learn_in_state(state
)) {
2219 /* XXX: Learning action flows should also be flushed. */
2220 if (ofport
->bundle
) {
2221 if (!rstp_shift_root_learned_address(ofproto
->rstp
)
2222 || rstp_get_old_root_aux(ofproto
->rstp
) != ofport
) {
2223 bundle_flush_macs(ofport
->bundle
, false);
2227 fwd_change
= rstp_forward_in_state(ofport
->rstp_state
)
2228 != rstp_forward_in_state(state
);
2230 ofproto
->backer
->need_revalidate
= REV_RSTP
;
2231 ofport
->rstp_state
= state
;
2233 if (fwd_change
&& ofport
->bundle
) {
2234 bundle_update(ofport
->bundle
);
2237 /* Update the RSTP state bits in the OpenFlow port description. */
2238 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2239 of_state
|= (state
== RSTP_LEARNING
? OFPUTIL_PS_STP_LEARN
2240 : state
== RSTP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2241 : state
== RSTP_DISCARDING
? OFPUTIL_PS_STP_LISTEN
2243 ofproto_port_set_state(&ofport
->up
, of_state
);
2248 rstp_run(struct ofproto_dpif
*ofproto
)
2250 if (ofproto
->rstp
) {
2251 long long int now
= time_msec();
2252 long long int elapsed
= now
- ofproto
->rstp_last_tick
;
2253 struct rstp_port
*rp
;
2254 struct ofport_dpif
*ofport
;
2256 /* Every second, decrease the values of the timers. */
2257 if (elapsed
>= 1000) {
2258 rstp_tick_timers(ofproto
->rstp
);
2259 ofproto
->rstp_last_tick
= now
;
2262 while ((ofport
= rstp_get_next_changed_port_aux(ofproto
->rstp
, &rp
))) {
2263 update_rstp_port_state(ofport
);
2267 /* FIXME: This check should be done on-event (i.e., when setting
2268 * p->fdb_flush) and not periodically.
2270 while ((ofport
= rstp_check_and_reset_fdb_flush(ofproto
->rstp
, &rp
))) {
2271 if (!rstp_shift_root_learned_address(ofproto
->rstp
)
2272 || rstp_get_old_root_aux(ofproto
->rstp
) != ofport
) {
2273 bundle_flush_macs(ofport
->bundle
, false);
2277 if (rstp_shift_root_learned_address(ofproto
->rstp
)) {
2278 struct ofport_dpif
*old_root_aux
=
2279 (struct ofport_dpif
*)rstp_get_old_root_aux(ofproto
->rstp
);
2280 struct ofport_dpif
*new_root_aux
=
2281 (struct ofport_dpif
*)rstp_get_new_root_aux(ofproto
->rstp
);
2282 if (old_root_aux
!= NULL
&& new_root_aux
!= NULL
) {
2283 bundle_move(old_root_aux
->bundle
, new_root_aux
->bundle
);
2284 rstp_reset_root_changed(ofproto
->rstp
);
2290 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2292 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
2294 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2296 /* Only revalidate flows if the configuration changed. */
2297 if (!s
!= !ofproto
->stp
) {
2298 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2302 if (!ofproto
->stp
) {
2303 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
2304 send_bpdu_cb
, ofproto
);
2305 ofproto
->stp_last_tick
= time_msec();
2308 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
2309 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
2310 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
2311 stp_set_max_age(ofproto
->stp
, s
->max_age
);
2312 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
2314 struct ofport
*ofport
;
2316 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2317 set_stp_port(ofport
, NULL
);
2320 stp_unref(ofproto
->stp
);
2321 ofproto
->stp
= NULL
;
2328 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
2330 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2334 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
2335 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
2336 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
2345 update_stp_port_state(struct ofport_dpif
*ofport
)
2347 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2348 enum stp_state state
;
2350 /* Figure out new state. */
2351 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
2355 if (ofport
->stp_state
!= state
) {
2356 enum ofputil_port_state of_state
;
2359 VLOG_DBG("port %s: STP state changed from %s to %s",
2360 netdev_get_name(ofport
->up
.netdev
),
2361 stp_state_name(ofport
->stp_state
),
2362 stp_state_name(state
));
2363 if (stp_learn_in_state(ofport
->stp_state
)
2364 != stp_learn_in_state(state
)) {
2365 /* xxx Learning action flows should also be flushed. */
2366 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
2367 mac_learning_flush(ofproto
->ml
);
2368 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
2369 mcast_snooping_mdb_flush(ofproto
->ms
);
2371 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
2372 != stp_forward_in_state(state
);
2374 ofproto
->backer
->need_revalidate
= REV_STP
;
2375 ofport
->stp_state
= state
;
2376 ofport
->stp_state_entered
= time_msec();
2378 if (fwd_change
&& ofport
->bundle
) {
2379 bundle_update(ofport
->bundle
);
2382 /* Update the STP state bits in the OpenFlow port description. */
2383 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2384 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
2385 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
2386 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2387 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
2389 ofproto_port_set_state(&ofport
->up
, of_state
);
2393 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2394 * caller is responsible for assigning STP port numbers and ensuring
2395 * there are no duplicates. */
2397 set_stp_port(struct ofport
*ofport_
,
2398 const struct ofproto_port_stp_settings
*s
)
2400 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2401 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2402 struct stp_port
*sp
= ofport
->stp_port
;
2404 if (!s
|| !s
->enable
) {
2406 ofport
->stp_port
= NULL
;
2407 stp_port_disable(sp
);
2408 update_stp_port_state(ofport
);
2411 } else if (sp
&& stp_port_no(sp
) != s
->port_num
2412 && ofport
== stp_port_get_aux(sp
)) {
2413 /* The port-id changed, so disable the old one if it's not
2414 * already in use by another port. */
2415 stp_port_disable(sp
);
2418 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
2420 /* Set name before enabling the port so that debugging messages can print
2422 stp_port_set_name(sp
, netdev_get_name(ofport
->up
.netdev
));
2423 stp_port_enable(sp
);
2425 stp_port_set_aux(sp
, ofport
);
2426 stp_port_set_priority(sp
, s
->priority
);
2427 stp_port_set_path_cost(sp
, s
->path_cost
);
2429 update_stp_port_state(ofport
);
2435 get_stp_port_status(struct ofport
*ofport_
,
2436 struct ofproto_port_stp_status
*s
)
2438 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2439 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2440 struct stp_port
*sp
= ofport
->stp_port
;
2442 if (!ofproto
->stp
|| !sp
) {
2448 s
->port_id
= stp_port_get_id(sp
);
2449 s
->state
= stp_port_get_state(sp
);
2450 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
2451 s
->role
= stp_port_get_role(sp
);
2457 get_stp_port_stats(struct ofport
*ofport_
,
2458 struct ofproto_port_stp_stats
*s
)
2460 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2461 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2462 struct stp_port
*sp
= ofport
->stp_port
;
2464 if (!ofproto
->stp
|| !sp
) {
2470 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
2476 stp_run(struct ofproto_dpif
*ofproto
)
2479 long long int now
= time_msec();
2480 long long int elapsed
= now
- ofproto
->stp_last_tick
;
2481 struct stp_port
*sp
;
2484 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
2485 ofproto
->stp_last_tick
= now
;
2487 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
2488 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2491 update_stp_port_state(ofport
);
2495 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2496 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
2497 mac_learning_flush(ofproto
->ml
);
2498 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
2499 mcast_snooping_mdb_flush(ofproto
->ms
);
2505 stp_wait(struct ofproto_dpif
*ofproto
)
2508 poll_timer_wait(1000);
2512 /* Configures RSTP on 'ofport_' using the settings defined in 's'. The
2513 * caller is responsible for assigning RSTP port numbers and ensuring
2514 * there are no duplicates. */
2516 set_rstp_port(struct ofport
*ofport_
,
2517 const struct ofproto_port_rstp_settings
*s
)
2519 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2520 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2521 struct rstp_port
*rp
= ofport
->rstp_port
;
2523 if (!s
|| !s
->enable
) {
2525 rstp_port_set_aux(rp
, NULL
);
2526 rstp_port_set_state(rp
, RSTP_DISABLED
);
2527 rstp_port_set_mac_operational(rp
, false);
2528 ofport
->rstp_port
= NULL
;
2529 rstp_port_unref(rp
);
2530 update_rstp_port_state(ofport
);
2535 /* Check if need to add a new port. */
2537 rp
= ofport
->rstp_port
= rstp_add_port(ofproto
->rstp
);
2540 rstp_port_set(rp
, s
->port_num
, s
->priority
, s
->path_cost
,
2541 s
->admin_edge_port
, s
->auto_edge
,
2542 s
->admin_p2p_mac_state
, s
->admin_port_state
, s
->mcheck
,
2544 update_rstp_port_state(ofport
);
2545 /* Synchronize operational status. */
2546 rstp_port_set_mac_operational(rp
, ofport
->may_enable
);
2550 get_rstp_port_status(struct ofport
*ofport_
,
2551 struct ofproto_port_rstp_status
*s
)
2553 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2554 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2555 struct rstp_port
*rp
= ofport
->rstp_port
;
2557 if (!ofproto
->rstp
|| !rp
) {
2563 rstp_port_get_status(rp
, &s
->port_id
, &s
->state
, &s
->role
,
2564 &s
->designated_bridge_id
, &s
->designated_port_id
,
2565 &s
->designated_path_cost
, &s
->tx_count
,
2566 &s
->rx_count
, &s
->error_count
, &s
->uptime
);
2571 set_queues(struct ofport
*ofport_
, const struct ofproto_port_queue
*qdscp
,
2574 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2575 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2577 if (ofport
->n_qdscp
!= n_qdscp
2578 || (n_qdscp
&& memcmp(ofport
->qdscp
, qdscp
,
2579 n_qdscp
* sizeof *qdscp
))) {
2580 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2581 free(ofport
->qdscp
);
2582 ofport
->qdscp
= n_qdscp
2583 ? xmemdup(qdscp
, n_qdscp
* sizeof *qdscp
)
2585 ofport
->n_qdscp
= n_qdscp
;
2593 /* Expires all MAC learning entries associated with 'bundle' and forces its
2594 * ofproto to revalidate every flow.
2596 * Normally MAC learning entries are removed only from the ofproto associated
2597 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2598 * are removed from every ofproto. When patch ports and SLB bonds are in use
2599 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2600 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2601 * with the host from which it migrated. */
2603 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2605 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2606 struct mac_learning
*ml
= ofproto
->ml
;
2607 struct mac_entry
*mac
, *next_mac
;
2609 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2610 ovs_rwlock_wrlock(&ml
->rwlock
);
2611 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2612 if (mac_entry_get_port(ml
, mac
) == bundle
) {
2614 struct ofproto_dpif
*o
;
2616 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2618 struct mac_entry
*e
;
2620 ovs_rwlock_wrlock(&o
->ml
->rwlock
);
2621 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
);
2623 mac_learning_expire(o
->ml
, e
);
2625 ovs_rwlock_unlock(&o
->ml
->rwlock
);
2630 mac_learning_expire(ml
, mac
);
2633 ovs_rwlock_unlock(&ml
->rwlock
);
2637 bundle_move(struct ofbundle
*old
, struct ofbundle
*new)
2639 struct ofproto_dpif
*ofproto
= old
->ofproto
;
2640 struct mac_learning
*ml
= ofproto
->ml
;
2641 struct mac_entry
*mac
, *next_mac
;
2643 ovs_assert(new->ofproto
== old
->ofproto
);
2645 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2646 ovs_rwlock_wrlock(&ml
->rwlock
);
2647 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2648 if (mac_entry_get_port(ml
, mac
) == old
) {
2649 mac_entry_set_port(ml
, mac
, new);
2652 ovs_rwlock_unlock(&ml
->rwlock
);
2655 static struct ofbundle
*
2656 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2658 struct ofbundle
*bundle
;
2660 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2661 &ofproto
->bundles
) {
2662 if (bundle
->aux
== aux
) {
2670 bundle_update(struct ofbundle
*bundle
)
2672 struct ofport_dpif
*port
;
2674 bundle
->floodable
= true;
2675 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2676 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2678 || (bundle
->ofproto
->stp
&& !stp_forward_in_state(port
->stp_state
))
2679 || (bundle
->ofproto
->rstp
&& !rstp_forward_in_state(port
->rstp_state
))) {
2680 bundle
->floodable
= false;
2687 bundle_del_port(struct ofport_dpif
*port
)
2689 struct ofbundle
*bundle
= port
->bundle
;
2691 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2693 ovs_list_remove(&port
->bundle_node
);
2694 port
->bundle
= NULL
;
2697 lacp_slave_unregister(bundle
->lacp
, port
);
2700 bond_slave_unregister(bundle
->bond
, port
);
2703 bundle_update(bundle
);
2707 bundle_add_port(struct ofbundle
*bundle
, ofp_port_t ofp_port
,
2708 struct lacp_slave_settings
*lacp
)
2710 struct ofport_dpif
*port
;
2712 port
= ofp_port_to_ofport(bundle
->ofproto
, ofp_port
);
2717 if (port
->bundle
!= bundle
) {
2718 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2720 bundle_remove(&port
->up
);
2723 port
->bundle
= bundle
;
2724 ovs_list_push_back(&bundle
->ports
, &port
->bundle_node
);
2725 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2727 || (bundle
->ofproto
->stp
&& !stp_forward_in_state(port
->stp_state
))
2728 || (bundle
->ofproto
->rstp
&& !rstp_forward_in_state(port
->rstp_state
))) {
2729 bundle
->floodable
= false;
2733 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2734 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2741 bundle_destroy(struct ofbundle
*bundle
)
2743 struct ofproto_dpif
*ofproto
;
2744 struct ofport_dpif
*port
, *next_port
;
2750 ofproto
= bundle
->ofproto
;
2751 mbridge_unregister_bundle(ofproto
->mbridge
, bundle
);
2754 xlate_bundle_remove(bundle
);
2757 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2758 bundle_del_port(port
);
2761 bundle_flush_macs(bundle
, true);
2762 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2764 free(bundle
->trunks
);
2765 lacp_unref(bundle
->lacp
);
2766 bond_unref(bundle
->bond
);
2771 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2772 const struct ofproto_bundle_settings
*s
)
2774 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2775 bool need_flush
= false;
2776 struct ofport_dpif
*port
;
2777 struct ofbundle
*bundle
;
2778 unsigned long *trunks
;
2784 bundle_destroy(bundle_lookup(ofproto
, aux
));
2788 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2789 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
2791 bundle
= bundle_lookup(ofproto
, aux
);
2793 bundle
= xmalloc(sizeof *bundle
);
2795 bundle
->ofproto
= ofproto
;
2796 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
2797 hash_pointer(aux
, 0));
2799 bundle
->name
= NULL
;
2801 ovs_list_init(&bundle
->ports
);
2802 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
2804 bundle
->trunks
= NULL
;
2805 bundle
->use_priority_tags
= s
->use_priority_tags
;
2806 bundle
->lacp
= NULL
;
2807 bundle
->bond
= NULL
;
2809 bundle
->floodable
= true;
2810 bundle
->protected = false;
2811 mbridge_register_bundle(ofproto
->mbridge
, bundle
);
2814 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
2816 bundle
->name
= xstrdup(s
->name
);
2821 ofproto
->lacp_enabled
= true;
2822 if (!bundle
->lacp
) {
2823 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2824 bundle
->lacp
= lacp_create();
2826 lacp_configure(bundle
->lacp
, s
->lacp
);
2828 lacp_unref(bundle
->lacp
);
2829 bundle
->lacp
= NULL
;
2832 /* Update set of ports. */
2834 for (i
= 0; i
< s
->n_slaves
; i
++) {
2835 if (!bundle_add_port(bundle
, s
->slaves
[i
],
2836 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
2840 if (!ok
|| ovs_list_size(&bundle
->ports
) != s
->n_slaves
) {
2841 struct ofport_dpif
*next_port
;
2843 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2844 for (i
= 0; i
< s
->n_slaves
; i
++) {
2845 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
2850 bundle_del_port(port
);
2854 ovs_assert(ovs_list_size(&bundle
->ports
) <= s
->n_slaves
);
2856 if (ovs_list_is_empty(&bundle
->ports
)) {
2857 bundle_destroy(bundle
);
2861 /* Set VLAN tagging mode */
2862 if (s
->vlan_mode
!= bundle
->vlan_mode
2863 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
2864 bundle
->vlan_mode
= s
->vlan_mode
;
2865 bundle
->use_priority_tags
= s
->use_priority_tags
;
2870 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
2871 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
2873 if (vlan
!= bundle
->vlan
) {
2874 bundle
->vlan
= vlan
;
2878 /* Get trunked VLANs. */
2879 switch (s
->vlan_mode
) {
2880 case PORT_VLAN_ACCESS
:
2884 case PORT_VLAN_TRUNK
:
2885 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2888 case PORT_VLAN_NATIVE_UNTAGGED
:
2889 case PORT_VLAN_NATIVE_TAGGED
:
2890 if (vlan
!= 0 && (!s
->trunks
2891 || !bitmap_is_set(s
->trunks
, vlan
)
2892 || bitmap_is_set(s
->trunks
, 0))) {
2893 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2895 trunks
= bitmap_clone(s
->trunks
, 4096);
2897 trunks
= bitmap_allocate1(4096);
2899 bitmap_set1(trunks
, vlan
);
2900 bitmap_set0(trunks
, 0);
2902 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2909 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2910 free(bundle
->trunks
);
2911 if (trunks
== s
->trunks
) {
2912 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2914 bundle
->trunks
= trunks
;
2919 if (trunks
!= s
->trunks
) {
2924 if (!ovs_list_is_short(&bundle
->ports
)) {
2925 bundle
->ofproto
->has_bonded_bundles
= true;
2927 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2928 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2931 bundle
->bond
= bond_create(s
->bond
, ofproto
);
2932 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2935 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2936 bond_slave_register(bundle
->bond
, port
,
2937 port
->up
.ofp_port
, port
->up
.netdev
);
2940 bond_unref(bundle
->bond
);
2941 bundle
->bond
= NULL
;
2944 /* Set proteced port mode */
2945 if (s
->protected != bundle
->protected) {
2946 bundle
->protected = s
->protected;
2950 /* If we changed something that would affect MAC learning, un-learn
2951 * everything on this port and force flow revalidation. */
2953 bundle_flush_macs(bundle
, false);
2960 bundle_remove(struct ofport
*port_
)
2962 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2963 struct ofbundle
*bundle
= port
->bundle
;
2966 bundle_del_port(port
);
2967 if (ovs_list_is_empty(&bundle
->ports
)) {
2968 bundle_destroy(bundle
);
2969 } else if (ovs_list_is_short(&bundle
->ports
)) {
2970 bond_unref(bundle
->bond
);
2971 bundle
->bond
= NULL
;
2977 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2979 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2980 struct ofport_dpif
*port
= port_
;
2984 error
= netdev_get_etheraddr(port
->up
.netdev
, &ea
);
2986 struct dp_packet packet
;
2989 dp_packet_init(&packet
, 0);
2990 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2992 memcpy(packet_pdu
, pdu
, pdu_size
);
2994 ofproto_dpif_send_packet(port
, false, &packet
);
2995 dp_packet_uninit(&packet
);
2997 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2998 "%s (%s)", port
->bundle
->name
,
2999 netdev_get_name(port
->up
.netdev
), ovs_strerror(error
));
3004 bundle_send_learning_packets(struct ofbundle
*bundle
)
3006 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
3007 int error
, n_packets
, n_errors
;
3008 struct mac_entry
*e
;
3010 struct ovs_list list_node
;
3011 struct ofport_dpif
*port
;
3012 struct dp_packet
*pkt
;
3014 struct ovs_list packets
;
3016 ovs_list_init(&packets
);
3017 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
3018 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
3019 if (mac_entry_get_port(ofproto
->ml
, e
) != bundle
) {
3020 pkt_node
= xmalloc(sizeof *pkt_node
);
3021 pkt_node
->pkt
= bond_compose_learning_packet(bundle
->bond
,
3023 (void **)&pkt_node
->port
);
3024 ovs_list_push_back(&packets
, &pkt_node
->list_node
);
3027 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3029 error
= n_packets
= n_errors
= 0;
3030 LIST_FOR_EACH_POP (pkt_node
, list_node
, &packets
) {
3033 ret
= ofproto_dpif_send_packet(pkt_node
->port
, false, pkt_node
->pkt
);
3034 dp_packet_delete(pkt_node
->pkt
);
3044 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3045 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
3046 "packets, last error was: %s",
3047 bundle
->name
, n_errors
, n_packets
, ovs_strerror(error
));
3049 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
3050 bundle
->name
, n_packets
);
3055 bundle_run(struct ofbundle
*bundle
)
3058 lacp_run(bundle
->lacp
, send_pdu_cb
);
3061 struct ofport_dpif
*port
;
3063 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
3064 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
3067 if (bond_run(bundle
->bond
, lacp_status(bundle
->lacp
))) {
3068 bundle
->ofproto
->backer
->need_revalidate
= REV_BOND
;
3071 if (bond_should_send_learning_packets(bundle
->bond
)) {
3072 bundle_send_learning_packets(bundle
);
3078 bundle_wait(struct ofbundle
*bundle
)
3081 lacp_wait(bundle
->lacp
);
3084 bond_wait(bundle
->bond
);
3091 mirror_set__(struct ofproto
*ofproto_
, void *aux
,
3092 const struct ofproto_mirror_settings
*s
)
3094 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3095 struct ofbundle
**srcs
, **dsts
;
3100 mirror_destroy(ofproto
->mbridge
, aux
);
3104 srcs
= xmalloc(s
->n_srcs
* sizeof *srcs
);
3105 dsts
= xmalloc(s
->n_dsts
* sizeof *dsts
);
3107 for (i
= 0; i
< s
->n_srcs
; i
++) {
3108 srcs
[i
] = bundle_lookup(ofproto
, s
->srcs
[i
]);
3111 for (i
= 0; i
< s
->n_dsts
; i
++) {
3112 dsts
[i
] = bundle_lookup(ofproto
, s
->dsts
[i
]);
3115 error
= mirror_set(ofproto
->mbridge
, aux
, s
->name
, srcs
, s
->n_srcs
, dsts
,
3116 s
->n_dsts
, s
->src_vlans
,
3117 bundle_lookup(ofproto
, s
->out_bundle
),
3118 s
->snaplen
, s
->out_vlan
);
3125 mirror_get_stats__(struct ofproto
*ofproto
, void *aux
,
3126 uint64_t *packets
, uint64_t *bytes
)
3128 return mirror_get_stats(ofproto_dpif_cast(ofproto
)->mbridge
, aux
, packets
,
3133 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
3135 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3136 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
3137 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
3138 mac_learning_flush(ofproto
->ml
);
3140 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3145 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
3147 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3148 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3149 return bundle
&& mirror_bundle_out(ofproto
->mbridge
, bundle
) != 0;
3153 forward_bpdu_changed(struct ofproto
*ofproto_
)
3155 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3156 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3160 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
3163 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3164 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
3165 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
3166 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
3167 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3170 /* Configures multicast snooping on 'ofport' using the settings
3171 * defined in 's'. */
3173 set_mcast_snooping(struct ofproto
*ofproto_
,
3174 const struct ofproto_mcast_snooping_settings
*s
)
3176 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3178 /* Only revalidate flows if the configuration changed. */
3179 if (!s
!= !ofproto
->ms
) {
3180 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3185 ofproto
->ms
= mcast_snooping_create();
3188 ovs_rwlock_wrlock(&ofproto
->ms
->rwlock
);
3189 mcast_snooping_set_idle_time(ofproto
->ms
, s
->idle_time
);
3190 mcast_snooping_set_max_entries(ofproto
->ms
, s
->max_entries
);
3191 if (mcast_snooping_set_flood_unreg(ofproto
->ms
, s
->flood_unreg
)) {
3192 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3194 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
3196 mcast_snooping_unref(ofproto
->ms
);
3203 /* Configures multicast snooping port's flood settings on 'ofproto'. */
3205 set_mcast_snooping_port(struct ofproto
*ofproto_
, void *aux
,
3206 const struct ofproto_mcast_snooping_port_settings
*s
)
3208 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3209 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3211 if (ofproto
->ms
&& s
) {
3212 ovs_rwlock_wrlock(&ofproto
->ms
->rwlock
);
3213 mcast_snooping_set_port_flood(ofproto
->ms
, bundle
, s
->flood
);
3214 mcast_snooping_set_port_flood_reports(ofproto
->ms
, bundle
,
3216 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
3224 struct ofport_dpif
*
3225 ofp_port_to_ofport(const struct ofproto_dpif
*ofproto
, ofp_port_t ofp_port
)
3227 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
3228 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
3232 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
3233 struct ofproto_port
*ofproto_port
,
3234 struct dpif_port
*dpif_port
)
3236 ofproto_port
->name
= dpif_port
->name
;
3237 ofproto_port
->type
= dpif_port
->type
;
3238 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
3242 ofport_update_peer(struct ofport_dpif
*ofport
)
3244 const struct ofproto_dpif
*ofproto
;
3245 struct dpif_backer
*backer
;
3248 if (!netdev_vport_is_patch(ofport
->up
.netdev
)) {
3252 backer
= ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
;
3253 backer
->need_revalidate
= REV_RECONFIGURE
;
3256 ofport
->peer
->peer
= NULL
;
3257 ofport
->peer
= NULL
;
3260 peer_name
= netdev_vport_patch_peer(ofport
->up
.netdev
);
3265 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
3266 struct ofport
*peer_ofport
;
3267 struct ofport_dpif
*peer
;
3270 if (ofproto
->backer
!= backer
) {
3274 peer_ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer_name
);
3279 peer
= ofport_dpif_cast(peer_ofport
);
3280 peer_peer
= netdev_vport_patch_peer(peer
->up
.netdev
);
3281 if (peer_peer
&& !strcmp(netdev_get_name(ofport
->up
.netdev
),
3283 ofport
->peer
= peer
;
3284 ofport
->peer
->peer
= ofport
;
3294 port_run(struct ofport_dpif
*ofport
)
3296 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
3297 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
3298 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
3299 bool cfm_enable
= false;
3300 bool bfd_enable
= false;
3302 ofport
->carrier_seq
= carrier_seq
;
3305 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
3307 cfm_enable
= !cfm_get_fault(ofport
->cfm
);
3309 if (cfm_opup
>= 0) {
3310 cfm_enable
= cfm_enable
&& cfm_opup
;
3315 bfd_enable
= bfd_forwarding(ofport
->bfd
);
3318 if (ofport
->bfd
|| ofport
->cfm
) {
3319 enable
= enable
&& (cfm_enable
|| bfd_enable
);
3322 if (ofport
->bundle
) {
3323 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
3324 if (carrier_changed
) {
3325 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
3329 if (ofport
->may_enable
!= enable
) {
3330 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3332 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
3334 if (ofport
->rstp_port
) {
3335 rstp_port_set_mac_operational(ofport
->rstp_port
, enable
);
3339 ofport
->may_enable
= enable
;
3343 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
3344 struct ofproto_port
*ofproto_port
)
3346 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3347 struct dpif_port dpif_port
;
3350 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
3351 const char *type
= netdev_get_type_from_name(devname
);
3353 /* We may be called before ofproto->up.port_by_name is populated with
3354 * the appropriate ofport. For this reason, we must get the name and
3355 * type from the netdev layer directly. */
3357 const struct ofport
*ofport
;
3359 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3360 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3361 ofproto_port
->name
= xstrdup(devname
);
3362 ofproto_port
->type
= xstrdup(type
);
3368 if (!sset_contains(&ofproto
->ports
, devname
)) {
3371 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3372 devname
, &dpif_port
);
3374 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3380 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3382 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3383 const char *devname
= netdev_get_name(netdev
);
3384 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
3385 const char *dp_port_name
;
3387 if (netdev_vport_is_patch(netdev
)) {
3388 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3392 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
3393 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3394 odp_port_t port_no
= ODPP_NONE
;
3397 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3401 if (netdev_get_tunnel_config(netdev
)) {
3402 simap_put(&ofproto
->backer
->tnl_backers
,
3403 dp_port_name
, odp_to_u32(port_no
));
3407 if (netdev_get_tunnel_config(netdev
)) {
3408 sset_add(&ofproto
->ghost_ports
, devname
);
3410 sset_add(&ofproto
->ports
, devname
);
3416 port_del(struct ofproto
*ofproto_
, ofp_port_t ofp_port
)
3418 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3419 struct ofport_dpif
*ofport
= ofp_port_to_ofport(ofproto
, ofp_port
);
3426 sset_find_and_delete(&ofproto
->ghost_ports
,
3427 netdev_get_name(ofport
->up
.netdev
));
3428 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3429 if (!ofport
->is_tunnel
&& !netdev_vport_is_patch(ofport
->up
.netdev
)) {
3430 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
);
3432 /* The caller is going to close ofport->up.netdev. If this is a
3433 * bonded port, then the bond is using that netdev, so remove it
3434 * from the bond. The client will need to reconfigure everything
3435 * after deleting ports, so then the slave will get re-added. */
3436 bundle_remove(&ofport
->up
);
3443 port_set_config(const struct ofport
*ofport_
, const struct smap
*cfg
)
3445 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3446 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3448 if (sset_contains(&ofproto
->ghost_ports
,
3449 netdev_get_name(ofport
->up
.netdev
))) {
3453 return dpif_port_set_config(ofproto
->backer
->dpif
, ofport
->odp_port
, cfg
);
3457 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3459 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3462 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3464 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3465 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3467 ovs_mutex_lock(&ofproto
->stats_mutex
);
3468 /* ofproto->stats.tx_packets represents packets that we created
3469 * internally and sent to some port (e.g. packets sent with
3470 * ofproto_dpif_send_packet()). Account for them as if they had
3471 * come from OFPP_LOCAL and got forwarded. */
3473 if (stats
->rx_packets
!= UINT64_MAX
) {
3474 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3477 if (stats
->rx_bytes
!= UINT64_MAX
) {
3478 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3481 /* ofproto->stats.rx_packets represents packets that were received on
3482 * some port and we processed internally and dropped (e.g. STP).
3483 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3485 if (stats
->tx_packets
!= UINT64_MAX
) {
3486 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3489 if (stats
->tx_bytes
!= UINT64_MAX
) {
3490 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3492 ovs_mutex_unlock(&ofproto
->stats_mutex
);
3499 port_get_lacp_stats(const struct ofport
*ofport_
, struct lacp_slave_stats
*stats
)
3501 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3502 if (ofport
->bundle
&& ofport
->bundle
->lacp
) {
3503 if (lacp_get_slave_stats(ofport
->bundle
->lacp
, ofport
, stats
)) {
3510 struct port_dump_state
{
3511 struct sset_position pos
;
3514 struct ofproto_port port
;
3519 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3521 *statep
= xzalloc(sizeof(struct port_dump_state
));
3526 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3527 struct ofproto_port
*port
)
3529 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3530 struct port_dump_state
*state
= state_
;
3531 const struct sset
*sset
;
3532 struct sset_node
*node
;
3534 if (state
->has_port
) {
3535 ofproto_port_destroy(&state
->port
);
3536 state
->has_port
= false;
3538 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3539 while ((node
= sset_at_position(sset
, &state
->pos
))) {
3542 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3544 *port
= state
->port
;
3545 state
->has_port
= true;
3547 } else if (error
!= ENODEV
) {
3552 if (!state
->ghost
) {
3553 state
->ghost
= true;
3554 memset(&state
->pos
, 0, sizeof state
->pos
);
3555 return port_dump_next(ofproto_
, state_
, port
);
3562 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3564 struct port_dump_state
*state
= state_
;
3566 if (state
->has_port
) {
3567 ofproto_port_destroy(&state
->port
);
3574 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3576 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3578 if (ofproto
->port_poll_errno
) {
3579 int error
= ofproto
->port_poll_errno
;
3580 ofproto
->port_poll_errno
= 0;
3584 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3588 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3593 port_poll_wait(const struct ofproto
*ofproto_
)
3595 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3596 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3600 port_is_lacp_current(const struct ofport
*ofport_
)
3602 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3603 return (ofport
->bundle
&& ofport
->bundle
->lacp
3604 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3608 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3609 * then delete it entirely. */
3611 rule_expire(struct rule_dpif
*rule
, long long now
)
3612 OVS_REQUIRES(ofproto_mutex
)
3614 uint16_t hard_timeout
, idle_timeout
;
3617 hard_timeout
= rule
->up
.hard_timeout
;
3618 idle_timeout
= rule
->up
.idle_timeout
;
3620 /* Has 'rule' expired? */
3622 long long int modified
;
3624 ovs_mutex_lock(&rule
->up
.mutex
);
3625 modified
= rule
->up
.modified
;
3626 ovs_mutex_unlock(&rule
->up
.mutex
);
3628 if (now
> modified
+ hard_timeout
* 1000) {
3629 reason
= OFPRR_HARD_TIMEOUT
;
3633 if (reason
< 0 && idle_timeout
) {
3636 ovs_mutex_lock(&rule
->stats_mutex
);
3637 used
= rule
->stats
.used
;
3638 ovs_mutex_unlock(&rule
->stats_mutex
);
3640 if (now
> used
+ idle_timeout
* 1000) {
3641 reason
= OFPRR_IDLE_TIMEOUT
;
3646 COVERAGE_INC(ofproto_dpif_expired
);
3647 ofproto_rule_expire(&rule
->up
, reason
);
3652 ofproto_dpif_set_packet_odp_port(const struct ofproto_dpif
*ofproto
,
3653 ofp_port_t in_port
, struct dp_packet
*packet
)
3655 if (in_port
== OFPP_NONE
) {
3656 in_port
= OFPP_LOCAL
;
3658 packet
->md
.in_port
.odp_port
= ofp_port_to_odp_port(ofproto
, in_port
);
3662 ofproto_dpif_execute_actions__(struct ofproto_dpif
*ofproto
,
3663 ovs_version_t version
, const struct flow
*flow
,
3664 struct rule_dpif
*rule
,
3665 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
3666 int depth
, int resubmits
,
3667 struct dp_packet
*packet
)
3669 struct dpif_flow_stats stats
;
3670 struct xlate_out xout
;
3671 struct xlate_in xin
;
3672 struct dpif_execute execute
;
3675 ovs_assert((rule
!= NULL
) != (ofpacts
!= NULL
));
3677 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
3680 rule_dpif_credit_stats(rule
, &stats
);
3683 uint64_t odp_actions_stub
[1024 / 8];
3684 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
3685 xlate_in_init(&xin
, ofproto
, version
, flow
, flow
->in_port
.ofp_port
, rule
,
3686 stats
.tcp_flags
, packet
, NULL
, &odp_actions
);
3687 xin
.ofpacts
= ofpacts
;
3688 xin
.ofpacts_len
= ofpacts_len
;
3689 xin
.resubmit_stats
= &stats
;
3691 xin
.resubmits
= resubmits
;
3692 if (xlate_actions(&xin
, &xout
) != XLATE_OK
) {
3697 execute
.actions
= odp_actions
.data
;
3698 execute
.actions_len
= odp_actions
.size
;
3700 pkt_metadata_from_flow(&packet
->md
, flow
);
3701 execute
.packet
= packet
;
3702 execute
.flow
= flow
;
3703 execute
.needs_help
= (xout
.slow
& SLOW_ACTION
) != 0;
3704 execute
.probe
= false;
3707 /* Fix up in_port. */
3708 ofproto_dpif_set_packet_odp_port(ofproto
, flow
->in_port
.ofp_port
, packet
);
3710 error
= dpif_execute(ofproto
->backer
->dpif
, &execute
);
3712 xlate_out_uninit(&xout
);
3713 ofpbuf_uninit(&odp_actions
);
3718 /* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
3719 * 'flow' must reflect the data in 'packet'. */
3721 ofproto_dpif_execute_actions(struct ofproto_dpif
*ofproto
,
3722 ovs_version_t version
, const struct flow
*flow
,
3723 struct rule_dpif
*rule
,
3724 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
3725 struct dp_packet
*packet
)
3727 return ofproto_dpif_execute_actions__(ofproto
, version
, flow
, rule
,
3728 ofpacts
, ofpacts_len
, 0, 0, packet
);
3732 rule_dpif_credit_stats__(struct rule_dpif
*rule
,
3733 const struct dpif_flow_stats
*stats
,
3735 OVS_REQUIRES(rule
->stats_mutex
)
3737 if (credit_counts
) {
3738 rule
->stats
.n_packets
+= stats
->n_packets
;
3739 rule
->stats
.n_bytes
+= stats
->n_bytes
;
3741 rule
->stats
.used
= MAX(rule
->stats
.used
, stats
->used
);
3745 rule_dpif_credit_stats(struct rule_dpif
*rule
,
3746 const struct dpif_flow_stats
*stats
)
3748 ovs_mutex_lock(&rule
->stats_mutex
);
3749 if (OVS_UNLIKELY(rule
->new_rule
)) {
3750 ovs_mutex_lock(&rule
->new_rule
->stats_mutex
);
3751 rule_dpif_credit_stats__(rule
->new_rule
, stats
, rule
->forward_counts
);
3752 ovs_mutex_unlock(&rule
->new_rule
->stats_mutex
);
3754 rule_dpif_credit_stats__(rule
, stats
, true);
3756 ovs_mutex_unlock(&rule
->stats_mutex
);
3759 /* Sets 'rule''s recirculation id. */
3761 rule_dpif_set_recirc_id(struct rule_dpif
*rule
, uint32_t id
)
3762 OVS_REQUIRES(rule
->up
.mutex
)
3764 ovs_assert(!rule
->recirc_id
|| rule
->recirc_id
== id
);
3765 if (rule
->recirc_id
== id
) {
3766 /* Release the new reference to the same id. */
3769 rule
->recirc_id
= id
;
3773 /* Sets 'rule''s recirculation id. */
3775 rule_set_recirc_id(struct rule
*rule_
, uint32_t id
)
3777 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3779 ovs_mutex_lock(&rule
->up
.mutex
);
3780 rule_dpif_set_recirc_id(rule
, id
);
3781 ovs_mutex_unlock(&rule
->up
.mutex
);
3785 ofproto_dpif_get_tables_version(struct ofproto_dpif
*ofproto
)
3787 ovs_version_t version
;
3789 /* Use memory_order_acquire to signify that any following memory accesses
3790 * can not be reordered to happen before this atomic read. This makes sure
3791 * all following reads relate to this or a newer version, but never to an
3793 atomic_read_explicit(&ofproto
->tables_version
, &version
,
3794 memory_order_acquire
);
3798 /* The returned rule (if any) is valid at least until the next RCU quiescent
3799 * period. If the rule needs to stay around longer, the caller should take
3802 * 'flow' is non-const to allow for temporary modifications during the lookup.
3803 * Any changes are restored before returning. */
3804 static struct rule_dpif
*
3805 rule_dpif_lookup_in_table(struct ofproto_dpif
*ofproto
, ovs_version_t version
,
3806 uint8_t table_id
, struct flow
*flow
,
3807 struct flow_wildcards
*wc
)
3809 struct classifier
*cls
= &ofproto
->up
.tables
[table_id
].cls
;
3810 return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls
, version
,
3815 ofproto_dpif_credit_table_stats(struct ofproto_dpif
*ofproto
, uint8_t table_id
,
3816 uint64_t n_matches
, uint64_t n_misses
)
3818 struct oftable
*tbl
= &ofproto
->up
.tables
[table_id
];
3822 atomic_add_relaxed(&tbl
->n_matched
, n_matches
, &orig
);
3825 atomic_add_relaxed(&tbl
->n_missed
, n_misses
, &orig
);
3829 /* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
3830 * table '*table_id'. Returns the rule that was found, which may be one of the
3831 * special rules according to packet miss hadling. If 'may_packet_in' is
3832 * false, returning of the miss_rule (which issues packet ins for the
3833 * controller) is avoided. Updates 'wc', if nonnull, to reflect the fields
3834 * that were used during the lookup.
3836 * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
3837 * if none is found then the table miss configuration for that table is
3838 * honored, which can result in additional lookups in other OpenFlow tables.
3839 * In this case the function updates '*table_id' to reflect the final OpenFlow
3840 * table that was searched.
3842 * If 'honor_table_miss' is false, then only one table lookup occurs, in
3845 * The rule is returned in '*rule', which is valid at least until the next
3846 * RCU quiescent period. If the '*rule' needs to stay around longer, the
3847 * caller must take a reference.
3849 * 'in_port' allows the lookup to take place as if the in port had the value
3850 * 'in_port'. This is needed for resubmit action support.
3852 * 'flow' is non-const to allow for temporary modifications during the lookup.
3853 * Any changes are restored before returning. */
3855 rule_dpif_lookup_from_table(struct ofproto_dpif
*ofproto
,
3856 ovs_version_t version
, struct flow
*flow
,
3857 struct flow_wildcards
*wc
,
3858 const struct dpif_flow_stats
*stats
,
3859 uint8_t *table_id
, ofp_port_t in_port
,
3860 bool may_packet_in
, bool honor_table_miss
,
3861 struct xlate_cache
*xcache
)
3863 ovs_be16 old_tp_src
= flow
->tp_src
, old_tp_dst
= flow
->tp_dst
;
3864 ofp_port_t old_in_port
= flow
->in_port
.ofp_port
;
3865 enum ofputil_table_miss miss_config
;
3866 struct rule_dpif
*rule
;
3869 /* We always unwildcard nw_frag (for IP), so they
3870 * need not be unwildcarded here. */
3871 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
3872 && ofproto
->up
.frag_handling
!= OFPUTIL_FRAG_NX_MATCH
) {
3873 if (ofproto
->up
.frag_handling
== OFPUTIL_FRAG_NORMAL
) {
3874 /* We must pretend that transport ports are unavailable. */
3875 flow
->tp_src
= htons(0);
3876 flow
->tp_dst
= htons(0);
3878 /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
3879 * Use the drop_frags_rule (which cannot disappear). */
3880 rule
= ofproto
->drop_frags_rule
;
3882 struct oftable
*tbl
= &ofproto
->up
.tables
[*table_id
];
3885 atomic_add_relaxed(&tbl
->n_matched
, stats
->n_packets
, &orig
);
3888 struct xc_entry
*entry
;
3890 entry
= xlate_cache_add_entry(xcache
, XC_TABLE
);
3891 entry
->table
.ofproto
= ofproto
;
3892 entry
->table
.id
= *table_id
;
3893 entry
->table
.match
= true;
3899 /* Look up a flow with 'in_port' as the input port. Then restore the
3900 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
3901 * have surprising behavior). */
3902 flow
->in_port
.ofp_port
= in_port
;
3904 /* Our current implementation depends on n_tables == N_TABLES, and
3905 * TBL_INTERNAL being the last table. */
3906 BUILD_ASSERT_DECL(N_TABLES
== TBL_INTERNAL
+ 1);
3908 miss_config
= OFPUTIL_TABLE_MISS_CONTINUE
;
3910 for (next_id
= *table_id
;
3911 next_id
< ofproto
->up
.n_tables
;
3912 next_id
++, next_id
+= (next_id
== TBL_INTERNAL
))
3914 *table_id
= next_id
;
3915 rule
= rule_dpif_lookup_in_table(ofproto
, version
, next_id
, flow
, wc
);
3917 struct oftable
*tbl
= &ofproto
->up
.tables
[next_id
];
3920 atomic_add_relaxed(rule
? &tbl
->n_matched
: &tbl
->n_missed
,
3921 stats
->n_packets
, &orig
);
3924 struct xc_entry
*entry
;
3926 entry
= xlate_cache_add_entry(xcache
, XC_TABLE
);
3927 entry
->table
.ofproto
= ofproto
;
3928 entry
->table
.id
= next_id
;
3929 entry
->table
.match
= (rule
!= NULL
);
3932 goto out
; /* Match. */
3934 if (honor_table_miss
) {
3935 miss_config
= ofproto_table_get_miss_config(&ofproto
->up
,
3937 if (miss_config
== OFPUTIL_TABLE_MISS_CONTINUE
) {
3944 rule
= ofproto
->no_packet_in_rule
;
3945 if (may_packet_in
) {
3946 if (miss_config
== OFPUTIL_TABLE_MISS_CONTINUE
3947 || miss_config
== OFPUTIL_TABLE_MISS_CONTROLLER
) {
3948 struct ofport_dpif
*port
;
3950 port
= ofp_port_to_ofport(ofproto
, old_in_port
);
3952 VLOG_WARN_RL(&rl
, "packet-in on unknown OpenFlow port %"PRIu32
,
3954 } else if (!(port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
)) {
3955 rule
= ofproto
->miss_rule
;
3957 } else if (miss_config
== OFPUTIL_TABLE_MISS_DEFAULT
&&
3958 connmgr_wants_packet_in_on_miss(ofproto
->up
.connmgr
)) {
3959 rule
= ofproto
->miss_rule
;
3963 /* Restore port numbers, as they may have been modified above. */
3964 flow
->tp_src
= old_tp_src
;
3965 flow
->tp_dst
= old_tp_dst
;
3966 /* Restore the old in port. */
3967 flow
->in_port
.ofp_port
= old_in_port
;
3972 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
3974 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
3977 static struct rule
*
3980 struct rule_dpif
*rule
= xzalloc(sizeof *rule
);
3985 rule_dealloc(struct rule
*rule_
)
3987 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3992 check_mask(struct ofproto_dpif
*ofproto
, const struct miniflow
*flow
)
3994 const struct odp_support
*support
;
3995 uint16_t ct_state
, ct_zone
;
3999 support
= &ofproto
->backer
->support
.odp
;
4000 ct_state
= MINIFLOW_GET_U16(flow
, ct_state
);
4001 if (support
->ct_state
&& support
->ct_zone
&& support
->ct_mark
4002 && support
->ct_label
&& support
->ct_state_nat
) {
4003 return ct_state
& CS_UNSUPPORTED_MASK
? OFPERR_OFPBMC_BAD_MASK
: 0;
4006 ct_zone
= MINIFLOW_GET_U16(flow
, ct_zone
);
4007 ct_mark
= MINIFLOW_GET_U32(flow
, ct_mark
);
4008 ct_label
= MINIFLOW_GET_U128(flow
, ct_label
);
4010 if ((ct_state
&& !support
->ct_state
)
4011 || (ct_state
& CS_UNSUPPORTED_MASK
)
4012 || ((ct_state
& (CS_SRC_NAT
| CS_DST_NAT
)) && !support
->ct_state_nat
)
4013 || (ct_zone
&& !support
->ct_zone
)
4014 || (ct_mark
&& !support
->ct_mark
)
4015 || (!ovs_u128_is_zero(ct_label
) && !support
->ct_label
)) {
4016 return OFPERR_OFPBMC_BAD_MASK
;
4023 report_unsupported_ct(const char *detail
)
4025 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4026 VLOG_WARN_RL(&rl
, "Rejecting ct action because datapath does not support "
4027 "ct action%s%s (your kernel module may be out of date)",
4029 detail
? detail
: "");
4033 check_actions(const struct ofproto_dpif
*ofproto
,
4034 const struct rule_actions
*const actions
)
4036 const struct ofpact
*ofpact
;
4038 OFPACT_FOR_EACH (ofpact
, actions
->ofpacts
, actions
->ofpacts_len
) {
4039 const struct odp_support
*support
;
4040 const struct ofpact_conntrack
*ct
;
4041 const struct ofpact
*a
;
4043 if (ofpact
->type
!= OFPACT_CT
) {
4047 ct
= CONTAINER_OF(ofpact
, struct ofpact_conntrack
, ofpact
);
4048 support
= &ofproto
->backer
->support
.odp
;
4050 if (!support
->ct_state
) {
4051 report_unsupported_ct(NULL
);
4052 return OFPERR_OFPBAC_BAD_TYPE
;
4054 if ((ct
->zone_imm
|| ct
->zone_src
.field
) && !support
->ct_zone
) {
4055 report_unsupported_ct("zone");
4056 return OFPERR_OFPBAC_BAD_ARGUMENT
;
4059 OFPACT_FOR_EACH(a
, ct
->actions
, ofpact_ct_get_action_len(ct
)) {
4060 const struct mf_field
*dst
= ofpact_get_mf_dst(a
);
4062 if (a
->type
== OFPACT_NAT
&& !support
->ct_state_nat
) {
4063 /* The backer doesn't seem to support the NAT bits in
4064 * 'ct_state': assume that it doesn't support the NAT
4066 report_unsupported_ct("nat");
4067 return OFPERR_OFPBAC_BAD_TYPE
;
4069 if (dst
&& ((dst
->id
== MFF_CT_MARK
&& !support
->ct_mark
)
4070 || (dst
->id
== MFF_CT_LABEL
&& !support
->ct_label
))) {
4071 report_unsupported_ct("setting mark and/or label");
4072 return OFPERR_OFPBAC_BAD_SET_ARGUMENT
;
4081 rule_check(struct rule
*rule
)
4083 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->ofproto
);
4086 err
= check_mask(ofproto
, &rule
->cr
.match
.mask
->masks
);
4090 return check_actions(ofproto
, rule
->actions
);
4094 rule_construct(struct rule
*rule_
)
4095 OVS_NO_THREAD_SAFETY_ANALYSIS
4097 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4100 error
= rule_check(rule_
);
4105 ovs_mutex_init_adaptive(&rule
->stats_mutex
);
4106 rule
->stats
.n_packets
= 0;
4107 rule
->stats
.n_bytes
= 0;
4108 rule
->stats
.used
= rule
->up
.modified
;
4109 rule
->recirc_id
= 0;
4110 rule
->new_rule
= NULL
;
4111 rule
->forward_counts
= false;
4117 rule_insert(struct rule
*rule_
, struct rule
*old_rule_
, bool forward_counts
)
4118 OVS_REQUIRES(ofproto_mutex
)
4120 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4123 struct rule_dpif
*old_rule
= rule_dpif_cast(old_rule_
);
4125 ovs_assert(!old_rule
->new_rule
);
4127 /* Take a reference to the new rule, and refer all stats updates from
4128 * the old rule to the new rule. */
4129 ofproto_rule_ref(&rule
->up
);
4131 ovs_mutex_lock(&old_rule
->stats_mutex
);
4132 ovs_mutex_lock(&rule
->stats_mutex
);
4133 old_rule
->new_rule
= rule
; /* Forward future stats. */
4134 old_rule
->forward_counts
= forward_counts
;
4136 if (forward_counts
) {
4137 rule
->stats
= old_rule
->stats
; /* Transfer stats to the new
4140 /* Used timestamp must be forwarded whenever a rule is modified. */
4141 rule
->stats
.used
= old_rule
->stats
.used
;
4143 ovs_mutex_unlock(&rule
->stats_mutex
);
4144 ovs_mutex_unlock(&old_rule
->stats_mutex
);
4149 rule_destruct(struct rule
*rule_
)
4150 OVS_NO_THREAD_SAFETY_ANALYSIS
4152 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4154 ovs_mutex_destroy(&rule
->stats_mutex
);
4155 /* Release reference to the new rule, if any. */
4156 if (rule
->new_rule
) {
4157 ofproto_rule_unref(&rule
->new_rule
->up
);
4159 if (rule
->recirc_id
) {
4160 recirc_free_id(rule
->recirc_id
);
4165 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
,
4166 long long int *used
)
4168 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4170 ovs_mutex_lock(&rule
->stats_mutex
);
4171 if (OVS_UNLIKELY(rule
->new_rule
)) {
4172 rule_get_stats(&rule
->new_rule
->up
, packets
, bytes
, used
);
4174 *packets
= rule
->stats
.n_packets
;
4175 *bytes
= rule
->stats
.n_bytes
;
4176 *used
= rule
->stats
.used
;
4178 ovs_mutex_unlock(&rule
->stats_mutex
);
4181 struct ofproto_dpif_packet_out
{
4182 struct xlate_cache xcache
;
4183 struct ofpbuf odp_actions
;
4184 struct recirc_refs rr
;
4189 static struct ofproto_dpif_packet_out
*
4190 ofproto_dpif_packet_out_new(void)
4192 struct ofproto_dpif_packet_out
*aux
= xmalloc(sizeof *aux
);
4193 xlate_cache_init(&aux
->xcache
);
4194 ofpbuf_init(&aux
->odp_actions
, 64);
4195 aux
->rr
= RECIRC_REFS_EMPTY_INITIALIZER
;
4196 aux
->needs_help
= false;
4202 ofproto_dpif_packet_out_delete(struct ofproto_dpif_packet_out
*aux
)
4205 xlate_cache_uninit(&aux
->xcache
);
4206 ofpbuf_uninit(&aux
->odp_actions
);
4207 recirc_refs_unref(&aux
->rr
);
4213 packet_xlate(struct ofproto
*ofproto_
, struct ofproto_packet_out
*opo
)
4214 OVS_REQUIRES(ofproto_mutex
)
4216 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4217 struct xlate_out xout
;
4218 struct xlate_in xin
;
4219 enum ofperr error
= 0;
4221 struct ofproto_dpif_packet_out
*aux
= ofproto_dpif_packet_out_new();
4223 xlate_in_init(&xin
, ofproto
, opo
->version
, opo
->flow
,
4224 opo
->flow
->in_port
.ofp_port
, NULL
, 0, opo
->packet
, NULL
,
4226 xin
.ofpacts
= opo
->ofpacts
;
4227 xin
.ofpacts_len
= opo
->ofpacts_len
;
4228 /* No learning or stats, but collect side effects to xcache. */
4229 xin
.allow_side_effects
= false;
4230 xin
.resubmit_stats
= NULL
;
4231 xin
.xcache
= &aux
->xcache
;
4233 if (xlate_actions(&xin
, &xout
) != XLATE_OK
) {
4234 error
= OFPERR_OFPFMFC_UNKNOWN
; /* Error processing actions. */
4237 /* Prepare learn actions. */
4238 struct xc_entry
*entry
;
4239 struct ofpbuf entries
= aux
->xcache
.entries
;
4241 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4242 if (entry
->type
== XC_LEARN
) {
4243 struct ofproto_flow_mod
*ofm
= entry
->learn
.ofm
;
4245 error
= ofproto_flow_mod_learn_refresh(ofm
);
4249 struct rule
*rule
= ofm
->temp_rule
;
4250 ofm
->learn_adds_rule
= (rule
->state
== RULE_INITIALIZED
);
4251 if (ofm
->learn_adds_rule
) {
4252 /* If learning on a different bridge, must use its next
4253 * version number. */
4254 ofm
->version
= (rule
->ofproto
== ofproto_
)
4255 ? opo
->version
: rule
->ofproto
->tables_version
+ 1;
4256 error
= ofproto_flow_mod_learn_start(ofm
);
4265 aux
->needs_help
= (xout
.slow
& SLOW_ACTION
) != 0;
4266 recirc_refs_swap(&aux
->rr
, &xout
.recircs
); /* Hold recirc refs. */
4268 xlate_out_uninit(&xout
);
4273 xlate_out_uninit(&xout
);
4274 ofproto_dpif_packet_out_delete(aux
);
4280 packet_xlate_revert(struct ofproto
*ofproto OVS_UNUSED
,
4281 struct ofproto_packet_out
*opo
)
4282 OVS_REQUIRES(ofproto_mutex
)
4284 struct ofproto_dpif_packet_out
*aux
= opo
->aux
;
4287 /* Revert the learned flows. */
4288 struct xc_entry
*entry
;
4289 struct ofpbuf entries
= aux
->xcache
.entries
;
4291 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4292 if (entry
->type
== XC_LEARN
&& entry
->learn
.ofm
->learn_adds_rule
) {
4293 ofproto_flow_mod_learn_revert(entry
->learn
.ofm
);
4297 ofproto_dpif_packet_out_delete(aux
);
4301 /* Push stats and perform side effects of flow translation. */
4303 ofproto_dpif_xcache_execute(struct ofproto_dpif
*ofproto
,
4304 struct xlate_cache
*xcache
,
4305 const struct dpif_flow_stats
*stats
)
4306 OVS_REQUIRES(ofproto_mutex
)
4308 struct xc_entry
*entry
;
4309 struct ofpbuf entries
= xcache
->entries
;
4311 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4312 switch (entry
->type
) {
4314 /* Finish the learned flows. */
4315 if (entry
->learn
.ofm
->learn_adds_rule
) {
4316 ofproto_flow_mod_learn_finish(entry
->learn
.ofm
, &ofproto
->up
);
4319 case XC_FIN_TIMEOUT
:
4320 if (stats
->tcp_flags
& (TCP_FIN
| TCP_RST
)) {
4321 /* 'ofproto_mutex' already held */
4322 ofproto_rule_reduce_timeouts__(&entry
->fin
.rule
->up
,
4327 /* All the rest can be dealt with by the xlate layer. */
4338 xlate_push_stats_entry(entry
, stats
);
4347 packet_execute(struct ofproto
*ofproto_
, struct ofproto_packet_out
*opo
)
4348 OVS_REQUIRES(ofproto_mutex
)
4350 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4351 struct dpif_flow_stats stats
;
4352 struct dpif_execute execute
;
4354 struct ofproto_dpif_packet_out
*aux
= opo
->aux
;
4357 /* Run the side effects from the xcache. */
4358 dpif_flow_stats_extract(opo
->flow
, opo
->packet
, time_msec(), &stats
);
4359 ofproto_dpif_xcache_execute(ofproto
, &aux
->xcache
, &stats
);
4361 execute
.actions
= aux
->odp_actions
.data
;
4362 execute
.actions_len
= aux
->odp_actions
.size
;
4364 pkt_metadata_from_flow(&opo
->packet
->md
, opo
->flow
);
4365 execute
.packet
= opo
->packet
;
4366 execute
.flow
= opo
->flow
;
4367 execute
.needs_help
= aux
->needs_help
;
4368 execute
.probe
= false;
4371 /* Fix up in_port. */
4372 ofproto_dpif_set_packet_odp_port(ofproto
, opo
->flow
->in_port
.ofp_port
,
4375 dpif_execute(ofproto
->backer
->dpif
, &execute
);
4376 ofproto_dpif_packet_out_delete(aux
);
4380 static struct group_dpif
*group_dpif_cast(const struct ofgroup
*group
)
4382 return group
? CONTAINER_OF(group
, struct group_dpif
, up
) : NULL
;
4385 static struct ofgroup
*
4388 struct group_dpif
*group
= xzalloc(sizeof *group
);
4393 group_dealloc(struct ofgroup
*group_
)
4395 struct group_dpif
*group
= group_dpif_cast(group_
);
4400 group_construct_stats(struct group_dpif
*group
)
4401 OVS_REQUIRES(group
->stats_mutex
)
4403 group
->packet_count
= 0;
4404 group
->byte_count
= 0;
4406 struct ofputil_bucket
*bucket
;
4407 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4408 bucket
->stats
.packet_count
= 0;
4409 bucket
->stats
.byte_count
= 0;
4414 group_dpif_credit_stats(struct group_dpif
*group
,
4415 struct ofputil_bucket
*bucket
,
4416 const struct dpif_flow_stats
*stats
)
4418 ovs_mutex_lock(&group
->stats_mutex
);
4419 group
->packet_count
+= stats
->n_packets
;
4420 group
->byte_count
+= stats
->n_bytes
;
4422 bucket
->stats
.packet_count
+= stats
->n_packets
;
4423 bucket
->stats
.byte_count
+= stats
->n_bytes
;
4424 } else { /* Credit to all buckets */
4425 struct ofputil_bucket
*bucket
;
4426 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4427 bucket
->stats
.packet_count
+= stats
->n_packets
;
4428 bucket
->stats
.byte_count
+= stats
->n_bytes
;
4431 ovs_mutex_unlock(&group
->stats_mutex
);
4435 group_construct(struct ofgroup
*group_
)
4437 struct group_dpif
*group
= group_dpif_cast(group_
);
4439 ovs_mutex_init_adaptive(&group
->stats_mutex
);
4440 ovs_mutex_lock(&group
->stats_mutex
);
4441 group_construct_stats(group
);
4442 ovs_mutex_unlock(&group
->stats_mutex
);
4447 group_destruct(struct ofgroup
*group_
)
4449 struct group_dpif
*group
= group_dpif_cast(group_
);
4450 ovs_mutex_destroy(&group
->stats_mutex
);
4454 group_get_stats(const struct ofgroup
*group_
, struct ofputil_group_stats
*ogs
)
4456 struct group_dpif
*group
= group_dpif_cast(group_
);
4458 ovs_mutex_lock(&group
->stats_mutex
);
4459 ogs
->packet_count
= group
->packet_count
;
4460 ogs
->byte_count
= group
->byte_count
;
4462 struct bucket_counter
*bucket_stats
= ogs
->bucket_stats
;
4463 struct ofputil_bucket
*bucket
;
4464 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4465 bucket_stats
->packet_count
= bucket
->stats
.packet_count
;
4466 bucket_stats
->byte_count
= bucket
->stats
.byte_count
;
4469 ovs_mutex_unlock(&group
->stats_mutex
);
4474 /* If the group exists, this function increments the groups's reference count.
4476 * Make sure to call ofproto_group_unref() after no longer needing to maintain
4477 * a reference to the group. */
4479 group_dpif_lookup(struct ofproto_dpif
*ofproto
, uint32_t group_id
,
4480 ovs_version_t version
, bool take_ref
)
4482 struct ofgroup
*ofgroup
= ofproto_group_lookup(&ofproto
->up
, group_id
,
4484 return ofgroup
? group_dpif_cast(ofgroup
) : NULL
;
4487 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
4488 * supports a notion of an OAM flag, sets it if 'oam' is true.
4489 * May modify 'packet'.
4490 * Returns 0 if successful, otherwise a positive errno value. */
4492 ofproto_dpif_send_packet(const struct ofport_dpif
*ofport
, bool oam
,
4493 struct dp_packet
*packet
)
4495 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
4498 error
= xlate_send_packet(ofport
, oam
, packet
);
4500 ovs_mutex_lock(&ofproto
->stats_mutex
);
4501 ofproto
->stats
.tx_packets
++;
4502 ofproto
->stats
.tx_bytes
+= dp_packet_size(packet
);
4503 ovs_mutex_unlock(&ofproto
->stats_mutex
);
4507 /* Return the version string of the datapath that backs up
4511 get_datapath_version(const struct ofproto
*ofproto_
)
4513 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4515 return ofproto
->backer
->dp_version_string
;
4519 ct_flush(const struct ofproto
*ofproto_
, const uint16_t *zone
)
4521 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4523 ct_dpif_flush(ofproto
->backer
->dpif
, zone
);
4527 set_frag_handling(struct ofproto
*ofproto_
,
4528 enum ofputil_frag_handling frag_handling
)
4530 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4531 if (frag_handling
!= OFPUTIL_FRAG_REASM
) {
4532 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4540 nxt_resume(struct ofproto
*ofproto_
,
4541 const struct ofputil_packet_in_private
*pin
)
4543 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4545 /* Translate pin into datapath actions. */
4546 uint64_t odp_actions_stub
[1024 / 8];
4547 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
4548 enum slow_path_reason slow
;
4549 enum ofperr error
= xlate_resume(ofproto
, pin
, &odp_actions
, &slow
);
4551 /* Steal 'pin->packet' and put it into a dp_packet. */
4552 struct dp_packet packet
;
4553 dp_packet_init(&packet
, pin
->public.packet_len
);
4554 dp_packet_put(&packet
, pin
->public.packet
, pin
->public.packet_len
);
4556 pkt_metadata_from_flow(&packet
.md
, &pin
->public.flow_metadata
.flow
);
4558 /* Fix up in_port. */
4559 ofproto_dpif_set_packet_odp_port(ofproto
,
4560 pin
->public.flow_metadata
.flow
.in_port
.ofp_port
,
4563 struct flow headers
;
4564 flow_extract(&packet
, &headers
);
4566 /* Execute the datapath actions on the packet. */
4567 struct dpif_execute execute
= {
4568 .actions
= odp_actions
.data
,
4569 .actions_len
= odp_actions
.size
,
4570 .needs_help
= (slow
& SLOW_ACTION
) != 0,
4574 dpif_execute(ofproto
->backer
->dpif
, &execute
);
4577 ofpbuf_uninit(&odp_actions
);
4578 dp_packet_uninit(&packet
);
4586 set_netflow(struct ofproto
*ofproto_
,
4587 const struct netflow_options
*netflow_options
)
4589 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4591 if (netflow_options
) {
4592 if (!ofproto
->netflow
) {
4593 ofproto
->netflow
= netflow_create();
4594 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4596 return netflow_set_options(ofproto
->netflow
, netflow_options
);
4597 } else if (ofproto
->netflow
) {
4598 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4599 netflow_unref(ofproto
->netflow
);
4600 ofproto
->netflow
= NULL
;
4607 get_netflow_ids(const struct ofproto
*ofproto_
,
4608 uint8_t *engine_type
, uint8_t *engine_id
)
4610 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4612 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
4615 struct ofproto_dpif
*
4616 ofproto_dpif_lookup(const char *name
)
4618 struct ofproto_dpif
*ofproto
;
4620 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
4621 hash_string(name
, 0), &all_ofproto_dpifs
) {
4622 if (!strcmp(ofproto
->up
.name
, name
)) {
4630 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
4631 const char *argv
[], void *aux OVS_UNUSED
)
4633 struct ofproto_dpif
*ofproto
;
4636 ofproto
= ofproto_dpif_lookup(argv
[1]);
4638 unixctl_command_reply_error(conn
, "no such bridge");
4641 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
4642 mac_learning_flush(ofproto
->ml
);
4643 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
4645 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4646 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
4647 mac_learning_flush(ofproto
->ml
);
4648 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
4652 unixctl_command_reply(conn
, "table successfully flushed");
4656 ofproto_unixctl_mcast_snooping_flush(struct unixctl_conn
*conn
, int argc
,
4657 const char *argv
[], void *aux OVS_UNUSED
)
4659 struct ofproto_dpif
*ofproto
;
4662 ofproto
= ofproto_dpif_lookup(argv
[1]);
4664 unixctl_command_reply_error(conn
, "no such bridge");
4668 if (!mcast_snooping_enabled(ofproto
->ms
)) {
4669 unixctl_command_reply_error(conn
, "multicast snooping is disabled");
4672 mcast_snooping_mdb_flush(ofproto
->ms
);
4674 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4675 if (!mcast_snooping_enabled(ofproto
->ms
)) {
4678 mcast_snooping_mdb_flush(ofproto
->ms
);
4682 unixctl_command_reply(conn
, "table successfully flushed");
4685 static struct ofport_dpif
*
4686 ofbundle_get_a_port(const struct ofbundle
*bundle
)
4688 return CONTAINER_OF(ovs_list_front(&bundle
->ports
), struct ofport_dpif
,
4693 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4694 const char *argv
[], void *aux OVS_UNUSED
)
4696 struct ds ds
= DS_EMPTY_INITIALIZER
;
4697 const struct ofproto_dpif
*ofproto
;
4698 const struct mac_entry
*e
;
4700 ofproto
= ofproto_dpif_lookup(argv
[1]);
4702 unixctl_command_reply_error(conn
, "no such bridge");
4706 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
4707 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
4708 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
4709 struct ofbundle
*bundle
= mac_entry_get_port(ofproto
->ml
, e
);
4710 char name
[OFP_MAX_PORT_NAME_LEN
];
4712 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
4714 ds_put_format(&ds
, "%5s %4d "ETH_ADDR_FMT
" %3d\n",
4715 name
, e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
4716 mac_entry_age(ofproto
->ml
, e
));
4718 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
4719 unixctl_command_reply(conn
, ds_cstr(&ds
));
4724 ofproto_unixctl_mcast_snooping_show(struct unixctl_conn
*conn
,
4725 int argc OVS_UNUSED
,
4727 void *aux OVS_UNUSED
)
4729 struct ds ds
= DS_EMPTY_INITIALIZER
;
4730 const struct ofproto_dpif
*ofproto
;
4731 const struct ofbundle
*bundle
;
4732 const struct mcast_group
*grp
;
4733 struct mcast_group_bundle
*b
;
4734 struct mcast_mrouter_bundle
*mrouter
;
4736 ofproto
= ofproto_dpif_lookup(argv
[1]);
4738 unixctl_command_reply_error(conn
, "no such bridge");
4742 if (!mcast_snooping_enabled(ofproto
->ms
)) {
4743 unixctl_command_reply_error(conn
, "multicast snooping is disabled");
4747 ds_put_cstr(&ds
, " port VLAN GROUP Age\n");
4748 ovs_rwlock_rdlock(&ofproto
->ms
->rwlock
);
4749 LIST_FOR_EACH (grp
, group_node
, &ofproto
->ms
->group_lru
) {
4750 LIST_FOR_EACH(b
, bundle_node
, &grp
->bundle_lru
) {
4751 char name
[OFP_MAX_PORT_NAME_LEN
];
4754 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
4756 ds_put_format(&ds
, "%5s %4d ", name
, grp
->vlan
);
4757 ipv6_format_mapped(&grp
->addr
, &ds
);
4758 ds_put_format(&ds
, " %3d\n",
4759 mcast_bundle_age(ofproto
->ms
, b
));
4763 /* ports connected to multicast routers */
4764 LIST_FOR_EACH(mrouter
, mrouter_node
, &ofproto
->ms
->mrouter_lru
) {
4765 char name
[OFP_MAX_PORT_NAME_LEN
];
4767 bundle
= mrouter
->port
;
4768 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
4770 ds_put_format(&ds
, "%5s %4d querier %3d\n",
4771 name
, mrouter
->vlan
,
4772 mcast_mrouter_age(ofproto
->ms
, mrouter
));
4774 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
4775 unixctl_command_reply(conn
, ds_cstr(&ds
));
4779 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
4780 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
4781 * to destroy 'ofproto_shash' and free the returned value. */
4782 static const struct shash_node
**
4783 get_ofprotos(struct shash
*ofproto_shash
)
4785 const struct ofproto_dpif
*ofproto
;
4787 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4788 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
4789 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
4792 return shash_sort(ofproto_shash
);
4796 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4797 const char *argv
[] OVS_UNUSED
,
4798 void *aux OVS_UNUSED
)
4800 struct ds ds
= DS_EMPTY_INITIALIZER
;
4801 struct shash ofproto_shash
;
4802 const struct shash_node
**sorted_ofprotos
;
4805 shash_init(&ofproto_shash
);
4806 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
4807 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
4808 const struct shash_node
*node
= sorted_ofprotos
[i
];
4809 ds_put_format(&ds
, "%s\n", node
->name
);
4812 shash_destroy(&ofproto_shash
);
4813 free(sorted_ofprotos
);
4815 unixctl_command_reply(conn
, ds_cstr(&ds
));
4820 dpif_show_backer(const struct dpif_backer
*backer
, struct ds
*ds
)
4822 const struct shash_node
**ofprotos
;
4823 struct dpif_dp_stats dp_stats
;
4824 struct shash ofproto_shash
;
4827 dpif_get_dp_stats(backer
->dpif
, &dp_stats
);
4829 ds_put_format(ds
, "%s: hit:%"PRIu64
" missed:%"PRIu64
"\n",
4830 dpif_name(backer
->dpif
), dp_stats
.n_hit
, dp_stats
.n_missed
);
4832 shash_init(&ofproto_shash
);
4833 ofprotos
= get_ofprotos(&ofproto_shash
);
4834 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
4835 struct ofproto_dpif
*ofproto
= ofprotos
[i
]->data
;
4836 const struct shash_node
**ports
;
4839 if (ofproto
->backer
!= backer
) {
4843 ds_put_format(ds
, "\t%s:\n", ofproto
->up
.name
);
4845 ports
= shash_sort(&ofproto
->up
.port_by_name
);
4846 for (j
= 0; j
< shash_count(&ofproto
->up
.port_by_name
); j
++) {
4847 const struct shash_node
*node
= ports
[j
];
4848 struct ofport
*ofport
= node
->data
;
4850 odp_port_t odp_port
;
4852 ds_put_format(ds
, "\t\t%s %u/", netdev_get_name(ofport
->netdev
),
4855 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
4856 if (odp_port
!= ODPP_NONE
) {
4857 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
4859 ds_put_cstr(ds
, "none:");
4862 ds_put_format(ds
, " (%s", netdev_get_type(ofport
->netdev
));
4865 if (!netdev_get_config(ofport
->netdev
, &config
)) {
4866 const struct smap_node
**nodes
;
4869 nodes
= smap_sort(&config
);
4870 for (i
= 0; i
< smap_count(&config
); i
++) {
4871 const struct smap_node
*node
= nodes
[i
];
4872 ds_put_format(ds
, "%c %s=%s", i
? ',' : ':',
4873 node
->key
, node
->value
);
4877 smap_destroy(&config
);
4879 ds_put_char(ds
, ')');
4880 ds_put_char(ds
, '\n');
4884 shash_destroy(&ofproto_shash
);
4889 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4890 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
4892 struct ds ds
= DS_EMPTY_INITIALIZER
;
4893 const struct shash_node
**backers
;
4896 backers
= shash_sort(&all_dpif_backers
);
4897 for (i
= 0; i
< shash_count(&all_dpif_backers
); i
++) {
4898 dpif_show_backer(backers
[i
]->data
, &ds
);
4902 unixctl_command_reply(conn
, ds_cstr(&ds
));
4907 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
4908 int argc OVS_UNUSED
, const char *argv
[],
4909 void *aux OVS_UNUSED
)
4911 const struct ofproto_dpif
*ofproto
;
4913 struct ds ds
= DS_EMPTY_INITIALIZER
;
4914 bool verbosity
= false;
4916 struct dpif_port dpif_port
;
4917 struct dpif_port_dump port_dump
;
4918 struct hmap portno_names
;
4920 struct dpif_flow_dump
*flow_dump
;
4921 struct dpif_flow_dump_thread
*flow_dump_thread
;
4925 ofproto
= ofproto_dpif_lookup(argv
[argc
- 1]);
4927 unixctl_command_reply_error(conn
, "no such bridge");
4931 if (argc
> 2 && !strcmp(argv
[1], "-m")) {
4935 hmap_init(&portno_names
);
4936 DPIF_PORT_FOR_EACH (&dpif_port
, &port_dump
, ofproto
->backer
->dpif
) {
4937 odp_portno_names_set(&portno_names
, dpif_port
.port_no
, dpif_port
.name
);
4941 flow_dump
= dpif_flow_dump_create(ofproto
->backer
->dpif
, false);
4942 flow_dump_thread
= dpif_flow_dump_thread_create(flow_dump
);
4943 while (dpif_flow_dump_next(flow_dump_thread
, &f
, 1)) {
4946 if (odp_flow_key_to_flow(f
.key
, f
.key_len
, &flow
) == ODP_FIT_ERROR
4947 || xlate_lookup_ofproto(ofproto
->backer
, &flow
, NULL
) != ofproto
) {
4952 odp_format_ufid(&f
.ufid
, &ds
);
4953 ds_put_cstr(&ds
, " ");
4955 odp_flow_format(f
.key
, f
.key_len
, f
.mask
, f
.mask_len
,
4956 &portno_names
, &ds
, verbosity
);
4957 ds_put_cstr(&ds
, ", ");
4958 dpif_flow_stats_format(&f
.stats
, &ds
);
4959 ds_put_cstr(&ds
, ", actions:");
4960 format_odp_actions(&ds
, f
.actions
, f
.actions_len
);
4961 ds_put_char(&ds
, '\n');
4963 dpif_flow_dump_thread_destroy(flow_dump_thread
);
4964 error
= dpif_flow_dump_destroy(flow_dump
);
4968 ds_put_format(&ds
, "dpif/dump_flows failed: %s", ovs_strerror(errno
));
4969 unixctl_command_reply_error(conn
, ds_cstr(&ds
));
4971 unixctl_command_reply(conn
, ds_cstr(&ds
));
4973 odp_portno_names_destroy(&portno_names
);
4974 hmap_destroy(&portno_names
);
4979 ofproto_revalidate_all_backers(void)
4981 const struct shash_node
**backers
;
4984 backers
= shash_sort(&all_dpif_backers
);
4985 for (i
= 0; i
< shash_count(&all_dpif_backers
); i
++) {
4986 struct dpif_backer
*backer
= backers
[i
]->data
;
4987 backer
->need_revalidate
= REV_RECONFIGURE
;
4993 disable_tnl_push_pop(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
4994 const char *argv
[], void *aux OVS_UNUSED
)
4996 if (!strcasecmp(argv
[1], "off")) {
4997 ofproto_use_tnl_push_pop
= false;
4998 unixctl_command_reply(conn
, "Tunnel push-pop off");
4999 ofproto_revalidate_all_backers();
5000 } else if (!strcasecmp(argv
[1], "on")) {
5001 ofproto_use_tnl_push_pop
= true;
5002 unixctl_command_reply(conn
, "Tunnel push-pop on");
5003 ofproto_revalidate_all_backers();
5005 unixctl_command_reply_error(conn
, "Invalid argument");
5010 disable_datapath_truncate(struct unixctl_conn
*conn OVS_UNUSED
,
5011 int argc OVS_UNUSED
,
5012 const char *argv
[] OVS_UNUSED
,
5013 void *aux OVS_UNUSED
)
5015 const struct shash_node
**backers
;
5018 backers
= shash_sort(&all_dpif_backers
);
5019 for (i
= 0; i
< shash_count(&all_dpif_backers
); i
++) {
5020 struct dpif_backer
*backer
= backers
[i
]->data
;
5021 backer
->support
.trunc
= false;
5024 unixctl_command_reply(conn
, "Datapath truncate action diabled");
5028 ofproto_unixctl_init(void)
5030 static bool registered
;
5036 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
5037 ofproto_unixctl_fdb_flush
, NULL
);
5038 unixctl_command_register("fdb/show", "bridge", 1, 1,
5039 ofproto_unixctl_fdb_show
, NULL
);
5040 unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
5041 ofproto_unixctl_mcast_snooping_flush
, NULL
);
5042 unixctl_command_register("mdb/show", "bridge", 1, 1,
5043 ofproto_unixctl_mcast_snooping_show
, NULL
);
5044 unixctl_command_register("dpif/dump-dps", "", 0, 0,
5045 ofproto_unixctl_dpif_dump_dps
, NULL
);
5046 unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show
,
5048 unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
5049 ofproto_unixctl_dpif_dump_flows
, NULL
);
5051 unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
5052 disable_tnl_push_pop
, NULL
);
5054 unixctl_command_register("dpif/disable-truncate", "", 0, 0,
5055 disable_datapath_truncate
, NULL
);
5059 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, ofp_port_t ofp_port
)
5061 const struct ofport_dpif
*ofport
= ofp_port_to_ofport(ofproto
, ofp_port
);
5062 return ofport
? ofport
->odp_port
: ODPP_NONE
;
5065 struct ofport_dpif
*
5066 odp_port_to_ofport(const struct dpif_backer
*backer
, odp_port_t odp_port
)
5068 struct ofport_dpif
*port
;
5070 ovs_rwlock_rdlock(&backer
->odp_to_ofport_lock
);
5071 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
, hash_odp_port(odp_port
),
5072 &backer
->odp_to_ofport_map
) {
5073 if (port
->odp_port
== odp_port
) {
5074 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
5079 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
5084 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, odp_port_t odp_port
)
5086 struct ofport_dpif
*port
;
5088 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
5089 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
5090 return port
->up
.ofp_port
;
5097 ofproto_dpif_add_internal_flow(struct ofproto_dpif
*ofproto
,
5098 const struct match
*match
, int priority
,
5099 uint16_t idle_timeout
,
5100 const struct ofpbuf
*ofpacts
,
5101 struct rule
**rulep
)
5103 struct ofputil_flow_mod fm
;
5104 struct rule_dpif
*rule
;
5107 fm
= (struct ofputil_flow_mod
) {
5108 .buffer_id
= UINT32_MAX
,
5110 .priority
= priority
,
5111 .table_id
= TBL_INTERNAL
,
5112 .command
= OFPFC_ADD
,
5113 .idle_timeout
= idle_timeout
,
5114 .flags
= OFPUTIL_FF_HIDDEN_FIELDS
| OFPUTIL_FF_NO_READONLY
,
5115 .ofpacts
= ofpacts
->data
,
5116 .ofpacts_len
= ofpacts
->size
,
5119 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
5121 VLOG_ERR_RL(&rl
, "failed to add internal flow (%s)",
5122 ofperr_to_string(error
));
5127 rule
= rule_dpif_lookup_in_table(ofproto
,
5128 ofproto_dpif_get_tables_version(ofproto
),
5129 TBL_INTERNAL
, &fm
.match
.flow
,
5140 ofproto_dpif_delete_internal_flow(struct ofproto_dpif
*ofproto
,
5141 struct match
*match
, int priority
)
5143 struct ofputil_flow_mod fm
;
5146 fm
= (struct ofputil_flow_mod
) {
5147 .buffer_id
= UINT32_MAX
,
5149 .priority
= priority
,
5150 .table_id
= TBL_INTERNAL
,
5151 .flags
= OFPUTIL_FF_HIDDEN_FIELDS
| OFPUTIL_FF_NO_READONLY
,
5152 .command
= OFPFC_DELETE_STRICT
,
5155 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
5157 VLOG_ERR_RL(&rl
, "failed to delete internal flow (%s)",
5158 ofperr_to_string(error
));
5165 const struct ofproto_class ofproto_dpif_class
= {
5179 NULL
, /* get_memory_usage. */
5180 type_get_memory_usage
,
5200 port_is_lacp_current
,
5201 port_get_lacp_stats
,
5202 NULL
, /* rule_choose_table */
5206 NULL
, /* rule_delete */
5211 packet_xlate_revert
,
5229 aa_vlan_get_queue_size
,
5236 get_stp_port_status
,
5241 get_rstp_port_status
,
5248 is_mirror_output_bundle
,
5249 forward_bpdu_changed
,
5250 set_mac_table_config
,
5252 set_mcast_snooping_port
,
5253 NULL
, /* meter_get_features */
5254 NULL
, /* meter_set */
5255 NULL
, /* meter_get */
5256 NULL
, /* meter_del */
5257 group_alloc
, /* group_alloc */
5258 group_construct
, /* group_construct */
5259 group_destruct
, /* group_destruct */
5260 group_dealloc
, /* group_dealloc */
5261 NULL
, /* group_modify */
5262 group_get_stats
, /* group_get_stats */
5263 get_datapath_version
, /* get_datapath_version */
5264 ct_flush
, /* ct_flush */