2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "byte-order.h"
24 #include "connectivity.h"
29 #include "fail-open.h"
30 #include "guarded-list.h"
34 #include "mac-learning.h"
35 #include "mcast-snooping.h"
36 #include "multipath.h"
37 #include "netdev-vport.h"
42 #include "odp-execute.h"
43 #include "ofproto/ofproto-dpif.h"
44 #include "ofproto/ofproto-provider.h"
45 #include "ofproto-dpif-ipfix.h"
46 #include "ofproto-dpif-mirror.h"
47 #include "ofproto-dpif-monitor.h"
48 #include "ofproto-dpif-rid.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "ofproto-dpif-trace.h"
51 #include "ofproto-dpif-upcall.h"
52 #include "ofproto-dpif-xlate.h"
53 #include "ofproto-dpif-xlate-cache.h"
54 #include "openvswitch/ofp-actions.h"
55 #include "openvswitch/dynamic-string.h"
56 #include "openvswitch/meta-flow.h"
57 #include "openvswitch/ofp-print.h"
58 #include "openvswitch/ofpbuf.h"
59 #include "openvswitch/uuid.h"
60 #include "openvswitch/vlog.h"
63 #include "ovs-router.h"
64 #include "openvswitch/poll-loop.h"
70 #include "unaligned.h"
74 #include "vlan-bitmap.h"
76 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
78 COVERAGE_DEFINE(ofproto_dpif_expired
);
79 COVERAGE_DEFINE(packet_in_overflow
);
83 static void rule_get_stats(struct rule
*, uint64_t *packets
, uint64_t *bytes
,
85 static struct rule_dpif
*rule_dpif_cast(const struct rule
*);
86 static void rule_expire(struct rule_dpif
*, long long now
);
89 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
90 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
91 void *aux
; /* Key supplied by ofproto's client. */
92 char *name
; /* Identifier for log messages. */
95 struct ovs_list ports
; /* Contains "struct ofport_dpif"s. */
96 enum port_vlan_mode vlan_mode
; /* VLAN mode */
97 uint16_t qinq_ethtype
;
98 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
99 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
100 * NULL if all VLANs are trunked. */
101 unsigned long *cvlans
;
102 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
103 struct bond
*bond
; /* Nonnull iff more than one port. */
104 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
106 bool protected; /* Protected port mode */
109 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
112 static void bundle_remove(struct ofport
*);
113 static void bundle_update(struct ofbundle
*);
114 static void bundle_destroy(struct ofbundle
*);
115 static void bundle_del_port(struct ofport_dpif
*);
116 static void bundle_run(struct ofbundle
*);
117 static void bundle_wait(struct ofbundle
*);
118 static void bundle_flush_macs(struct ofbundle
*, bool);
119 static void bundle_move(struct ofbundle
*, struct ofbundle
*);
121 static void stp_run(struct ofproto_dpif
*ofproto
);
122 static void stp_wait(struct ofproto_dpif
*ofproto
);
123 static int set_stp_port(struct ofport
*,
124 const struct ofproto_port_stp_settings
*);
126 static void rstp_run(struct ofproto_dpif
*ofproto
);
127 static void set_rstp_port(struct ofport
*,
128 const struct ofproto_port_rstp_settings
*);
131 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
135 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
136 struct ovs_list bundle_node
;/* In struct ofbundle's "ports" list. */
137 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
138 struct bfd
*bfd
; /* BFD, if any. */
139 struct lldp
*lldp
; /* lldp, if any. */
140 bool may_enable
; /* May be enabled in bonds. */
141 bool is_tunnel
; /* This port is a tunnel. */
142 long long int carrier_seq
; /* Carrier status changes. */
143 struct ofport_dpif
*peer
; /* Peer if patch port. */
146 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
147 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
148 long long int stp_state_entered
;
150 /* Rapid Spanning Tree. */
151 struct rstp_port
*rstp_port
; /* Rapid Spanning Tree Protocol, if any. */
152 enum rstp_state rstp_state
; /* Always RSTP_DISABLED if RSTP not in use. */
154 /* Queue to DSCP mapping. */
155 struct ofproto_port_queue
*qdscp
;
159 static odp_port_t
ofp_port_to_odp_port(const struct ofproto_dpif
*,
162 static ofp_port_t
odp_port_to_ofp_port(const struct ofproto_dpif
*,
165 static struct ofport_dpif
*
166 ofport_dpif_cast(const struct ofport
*ofport
)
168 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
171 static void port_run(struct ofport_dpif
*);
172 static int set_bfd(struct ofport
*, const struct smap
*);
173 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
174 static int set_lldp(struct ofport
*ofport_
, const struct smap
*cfg
);
175 static void ofport_update_peer(struct ofport_dpif
*);
177 COVERAGE_DEFINE(rev_reconfigure
);
178 COVERAGE_DEFINE(rev_stp
);
179 COVERAGE_DEFINE(rev_rstp
);
180 COVERAGE_DEFINE(rev_bond
);
181 COVERAGE_DEFINE(rev_port_toggled
);
182 COVERAGE_DEFINE(rev_flow_table
);
183 COVERAGE_DEFINE(rev_mac_learning
);
184 COVERAGE_DEFINE(rev_mcast_snooping
);
186 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
187 struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
189 /* All existing ofproto_dpif instances, indexed by ->up.name. */
190 static struct hmap all_ofproto_dpifs_by_name
=
191 HMAP_INITIALIZER(&all_ofproto_dpifs_by_name
);
193 /* All existing ofproto_dpif instances, indexed by ->uuid. */
194 static struct hmap all_ofproto_dpifs_by_uuid
=
195 HMAP_INITIALIZER(&all_ofproto_dpifs_by_uuid
);
197 static bool ofproto_use_tnl_push_pop
= true;
198 static void ofproto_unixctl_init(void);
200 static inline struct ofproto_dpif
*
201 ofproto_dpif_cast(const struct ofproto
*ofproto
)
203 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
204 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
207 /* Global variables. */
208 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
210 /* Initial mappings of port to bridge mappings. */
211 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
213 /* Initialize 'ofm' for a learn action. If the rule already existed, reference
214 * to that rule is taken, otherwise a new rule is created. 'ofm' keeps the
215 * rule reference in both cases. */
217 ofproto_dpif_flow_mod_init_for_learn(struct ofproto_dpif
*ofproto
,
218 const struct ofputil_flow_mod
*fm
,
219 struct ofproto_flow_mod
*ofm
)
221 /* This will not take the global 'ofproto_mutex'. */
222 return ofproto_flow_mod_init_for_learn(&ofproto
->up
, fm
, ofm
);
225 /* Appends 'am' to the queue of asynchronous messages to be sent to the
226 * controller. Takes ownership of 'am' and any data it points to. */
228 ofproto_dpif_send_async_msg(struct ofproto_dpif
*ofproto
,
229 struct ofproto_async_msg
*am
)
231 if (!guarded_list_push_back(&ofproto
->ams
, &am
->list_node
, 1024)) {
232 COVERAGE_INC(packet_in_overflow
);
233 ofproto_async_msg_free(am
);
236 /* Wakes up main thread for packet-in I/O. */
237 seq_change(ofproto
->ams_seq
);
240 /* Factory functions. */
243 init(const struct shash
*iface_hints
)
245 struct shash_node
*node
;
247 /* Make a local copy, since we don't own 'iface_hints' elements. */
248 SHASH_FOR_EACH(node
, iface_hints
) {
249 const struct iface_hint
*orig_hint
= node
->data
;
250 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
252 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
253 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
254 new_hint
->ofp_port
= orig_hint
->ofp_port
;
256 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
259 ofproto_unixctl_init();
260 ofproto_dpif_trace_init();
265 enumerate_types(struct sset
*types
)
267 dp_enumerate_types(types
);
271 enumerate_names(const char *type
, struct sset
*names
)
273 struct ofproto_dpif
*ofproto
;
276 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
277 &all_ofproto_dpifs_by_name
) {
278 if (strcmp(type
, ofproto
->up
.type
)) {
281 sset_add(names
, ofproto
->up
.name
);
288 del(const char *type
, const char *name
)
293 error
= dpif_open(name
, type
, &dpif
);
295 error
= dpif_delete(dpif
);
302 port_open_type(const char *datapath_type
, const char *port_type
)
304 return dpif_port_open_type(datapath_type
, port_type
);
307 /* Type functions. */
309 static void process_dpif_port_changes(struct dpif_backer
*);
310 static void process_dpif_all_ports_changed(struct dpif_backer
*);
311 static void process_dpif_port_change(struct dpif_backer
*,
312 const char *devname
);
313 static void process_dpif_port_error(struct dpif_backer
*, int error
);
315 static struct ofproto_dpif
*
316 lookup_ofproto_dpif_by_port_name(const char *name
)
318 struct ofproto_dpif
*ofproto
;
320 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
321 &all_ofproto_dpifs_by_name
) {
322 if (sset_contains(&ofproto
->ports
, name
)) {
331 type_run(const char *type
)
333 struct dpif_backer
*backer
;
335 backer
= shash_find_data(&all_dpif_backers
, type
);
337 /* This is not necessarily a problem, since backers are only
338 * created on demand. */
342 if (dpif_run(backer
->dpif
)) {
343 backer
->need_revalidate
= REV_RECONFIGURE
;
346 udpif_run(backer
->udpif
);
348 /* If vswitchd started with other_config:flow_restore_wait set as "true",
349 * and the configuration has now changed to "false", enable receiving
350 * packets from the datapath. */
351 if (!backer
->recv_set_enable
&& !ofproto_get_flow_restore_wait()) {
354 backer
->recv_set_enable
= true;
356 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
358 VLOG_ERR("Failed to enable receiving packets in dpif.");
361 dpif_flow_flush(backer
->dpif
);
362 backer
->need_revalidate
= REV_RECONFIGURE
;
365 if (backer
->recv_set_enable
) {
366 udpif_set_threads(backer
->udpif
, n_handlers
, n_revalidators
);
369 if (backer
->need_revalidate
) {
370 struct ofproto_dpif
*ofproto
;
371 struct simap_node
*node
;
372 struct simap tmp_backers
;
374 /* Handle tunnel garbage collection. */
375 simap_init(&tmp_backers
);
376 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
378 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
379 &all_ofproto_dpifs_by_name
) {
380 struct ofport_dpif
*iter
;
382 if (backer
!= ofproto
->backer
) {
386 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
387 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
389 odp_port_t old_odp_port
;
391 if (!iter
->is_tunnel
) {
395 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
,
396 namebuf
, sizeof namebuf
);
397 old_odp_port
= iter
->odp_port
;
398 node
= simap_find(&tmp_backers
, dp_port
);
400 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
401 simap_delete(&tmp_backers
, node
);
402 node
= simap_find(&backer
->tnl_backers
, dp_port
);
404 node
= simap_find(&backer
->tnl_backers
, dp_port
);
406 odp_port_t odp_port
= ODPP_NONE
;
408 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
410 simap_put(&backer
->tnl_backers
, dp_port
,
411 odp_to_u32(odp_port
));
412 node
= simap_find(&backer
->tnl_backers
, dp_port
);
417 iter
->odp_port
= node
? u32_to_odp(node
->data
) : ODPP_NONE
;
418 if (tnl_port_reconfigure(iter
, iter
->up
.netdev
,
419 iter
->odp_port
, old_odp_port
,
420 ovs_native_tunneling_is_on(ofproto
), dp_port
)) {
421 backer
->need_revalidate
= REV_RECONFIGURE
;
426 SIMAP_FOR_EACH (node
, &tmp_backers
) {
427 dpif_port_del(backer
->dpif
, u32_to_odp(node
->data
), false);
429 simap_destroy(&tmp_backers
);
431 switch (backer
->need_revalidate
) {
432 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
433 case REV_STP
: COVERAGE_INC(rev_stp
); break;
434 case REV_RSTP
: COVERAGE_INC(rev_rstp
); break;
435 case REV_BOND
: COVERAGE_INC(rev_bond
); break;
436 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
437 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
438 case REV_MAC_LEARNING
: COVERAGE_INC(rev_mac_learning
); break;
439 case REV_MCAST_SNOOPING
: COVERAGE_INC(rev_mcast_snooping
); break;
441 backer
->need_revalidate
= 0;
444 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
445 &all_ofproto_dpifs_by_name
) {
446 struct ofport_dpif
*ofport
;
447 struct ofbundle
*bundle
;
449 if (ofproto
->backer
!= backer
) {
453 xlate_ofproto_set(ofproto
, ofproto
->up
.name
,
454 ofproto
->backer
->dpif
, ofproto
->ml
,
455 ofproto
->stp
, ofproto
->rstp
, ofproto
->ms
,
456 ofproto
->mbridge
, ofproto
->sflow
, ofproto
->ipfix
,
458 ofproto
->up
.forward_bpdu
,
459 connmgr_has_in_band(ofproto
->up
.connmgr
),
460 &ofproto
->backer
->rt_support
);
462 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
463 xlate_bundle_set(ofproto
, bundle
, bundle
->name
,
464 bundle
->vlan_mode
, bundle
->qinq_ethtype
,
465 bundle
->vlan
, bundle
->trunks
, bundle
->cvlans
,
466 bundle
->use_priority_tags
,
467 bundle
->bond
, bundle
->lacp
,
468 bundle
->floodable
, bundle
->protected);
471 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
472 int stp_port
= ofport
->stp_port
473 ? stp_port_no(ofport
->stp_port
)
475 xlate_ofport_set(ofproto
, ofport
->bundle
, ofport
,
476 ofport
->up
.ofp_port
, ofport
->odp_port
,
477 ofport
->up
.netdev
, ofport
->cfm
, ofport
->bfd
,
478 ofport
->lldp
, ofport
->peer
, stp_port
,
479 ofport
->rstp_port
, ofport
->qdscp
,
480 ofport
->n_qdscp
, ofport
->up
.pp
.config
,
481 ofport
->up
.pp
.state
, ofport
->is_tunnel
,
487 udpif_revalidate(backer
->udpif
);
490 process_dpif_port_changes(backer
);
495 /* Check for and handle port changes in 'backer''s dpif. */
497 process_dpif_port_changes(struct dpif_backer
*backer
)
503 error
= dpif_port_poll(backer
->dpif
, &devname
);
509 process_dpif_all_ports_changed(backer
);
513 process_dpif_port_change(backer
, devname
);
518 process_dpif_port_error(backer
, error
);
525 process_dpif_all_ports_changed(struct dpif_backer
*backer
)
527 struct ofproto_dpif
*ofproto
;
528 struct dpif_port dpif_port
;
529 struct dpif_port_dump dump
;
530 struct sset devnames
;
533 sset_init(&devnames
);
534 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
535 &all_ofproto_dpifs_by_name
) {
536 if (ofproto
->backer
== backer
) {
537 struct ofport
*ofport
;
539 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
540 sset_add(&devnames
, netdev_get_name(ofport
->netdev
));
544 DPIF_PORT_FOR_EACH (&dpif_port
, &dump
, backer
->dpif
) {
545 sset_add(&devnames
, dpif_port
.name
);
548 SSET_FOR_EACH (devname
, &devnames
) {
549 process_dpif_port_change(backer
, devname
);
551 sset_destroy(&devnames
);
555 process_dpif_port_change(struct dpif_backer
*backer
, const char *devname
)
557 struct ofproto_dpif
*ofproto
;
558 struct dpif_port port
;
560 /* Don't report on the datapath's device. */
561 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
565 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
566 &all_ofproto_dpifs_by_name
) {
567 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
572 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
573 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
574 /* The port was removed. If we know the datapath,
575 * report it through poll_set(). If we don't, it may be
576 * notifying us of a removal we initiated, so ignore it.
577 * If there's a pending ENOBUFS, let it stand, since
578 * everything will be reevaluated. */
579 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
580 sset_add(&ofproto
->port_poll_set
, devname
);
581 ofproto
->port_poll_errno
= 0;
583 } else if (!ofproto
) {
584 /* The port was added, but we don't know with which
585 * ofproto we should associate it. Delete it. */
586 dpif_port_del(backer
->dpif
, port
.port_no
, false);
588 struct ofport_dpif
*ofport
;
590 ofport
= ofport_dpif_cast(shash_find_data(
591 &ofproto
->up
.port_by_name
, devname
));
593 && ofport
->odp_port
!= port
.port_no
594 && !odp_port_to_ofport(backer
, port
.port_no
))
596 /* 'ofport''s datapath port number has changed from
597 * 'ofport->odp_port' to 'port.port_no'. Update our internal data
598 * structures to match. */
599 ovs_rwlock_wrlock(&backer
->odp_to_ofport_lock
);
600 hmap_remove(&backer
->odp_to_ofport_map
, &ofport
->odp_port_node
);
601 ofport
->odp_port
= port
.port_no
;
602 hmap_insert(&backer
->odp_to_ofport_map
, &ofport
->odp_port_node
,
603 hash_odp_port(port
.port_no
));
604 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
605 backer
->need_revalidate
= REV_RECONFIGURE
;
608 dpif_port_destroy(&port
);
611 /* Propagate 'error' to all ofprotos based on 'backer'. */
613 process_dpif_port_error(struct dpif_backer
*backer
, int error
)
615 struct ofproto_dpif
*ofproto
;
617 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
618 &all_ofproto_dpifs_by_name
) {
619 if (ofproto
->backer
== backer
) {
620 sset_clear(&ofproto
->port_poll_set
);
621 ofproto
->port_poll_errno
= error
;
627 type_wait(const char *type
)
629 struct dpif_backer
*backer
;
631 backer
= shash_find_data(&all_dpif_backers
, type
);
633 /* This is not necessarily a problem, since backers are only
634 * created on demand. */
638 dpif_wait(backer
->dpif
);
641 /* Basic life-cycle. */
643 static int add_internal_flows(struct ofproto_dpif
*);
645 static struct ofproto
*
648 struct ofproto_dpif
*ofproto
= xzalloc(sizeof *ofproto
);
653 dealloc(struct ofproto
*ofproto_
)
655 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
660 close_dpif_backer(struct dpif_backer
*backer
, bool del
)
662 struct simap_node
*node
;
664 ovs_assert(backer
->refcount
> 0);
666 if (--backer
->refcount
) {
670 udpif_destroy(backer
->udpif
);
672 SIMAP_FOR_EACH (node
, &backer
->tnl_backers
) {
673 dpif_port_del(backer
->dpif
, u32_to_odp(node
->data
), false);
675 simap_destroy(&backer
->tnl_backers
);
676 ovs_rwlock_destroy(&backer
->odp_to_ofport_lock
);
677 hmap_destroy(&backer
->odp_to_ofport_map
);
678 shash_find_and_delete(&all_dpif_backers
, backer
->type
);
680 free(backer
->dp_version_string
);
682 dpif_delete(backer
->dpif
);
684 dpif_close(backer
->dpif
);
685 id_pool_destroy(backer
->meter_ids
);
689 /* Datapath port slated for removal from datapath. */
691 struct ovs_list list_node
;
695 static void check_support(struct dpif_backer
*backer
);
698 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
700 struct dpif_backer
*backer
;
701 struct dpif_port_dump port_dump
;
702 struct dpif_port port
;
703 struct shash_node
*node
;
704 struct ovs_list garbage_list
;
705 struct odp_garbage
*garbage
;
712 backer
= shash_find_data(&all_dpif_backers
, type
);
719 backer_name
= xasprintf("ovs-%s", type
);
721 /* Remove any existing datapaths, since we assume we're the only
722 * userspace controlling the datapath. */
724 dp_enumerate_names(type
, &names
);
725 SSET_FOR_EACH(name
, &names
) {
726 struct dpif
*old_dpif
;
728 /* Don't remove our backer if it exists. */
729 if (!strcmp(name
, backer_name
)) {
733 if (dpif_open(name
, type
, &old_dpif
)) {
734 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
736 dpif_delete(old_dpif
);
737 dpif_close(old_dpif
);
740 sset_destroy(&names
);
742 backer
= xmalloc(sizeof *backer
);
744 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
747 VLOG_ERR("failed to open datapath of type %s: %s", type
,
748 ovs_strerror(error
));
752 backer
->udpif
= udpif_create(backer
, backer
->dpif
);
754 backer
->type
= xstrdup(type
);
755 backer
->refcount
= 1;
756 hmap_init(&backer
->odp_to_ofport_map
);
757 ovs_rwlock_init(&backer
->odp_to_ofport_lock
);
758 backer
->need_revalidate
= 0;
759 simap_init(&backer
->tnl_backers
);
760 backer
->recv_set_enable
= !ofproto_get_flow_restore_wait();
763 if (backer
->recv_set_enable
) {
764 dpif_flow_flush(backer
->dpif
);
767 /* Loop through the ports already on the datapath and remove any
768 * that we don't need anymore. */
769 ovs_list_init(&garbage_list
);
770 dpif_port_dump_start(&port_dump
, backer
->dpif
);
771 while (dpif_port_dump_next(&port_dump
, &port
)) {
772 node
= shash_find(&init_ofp_ports
, port
.name
);
773 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
774 garbage
= xmalloc(sizeof *garbage
);
775 garbage
->odp_port
= port
.port_no
;
776 ovs_list_push_front(&garbage_list
, &garbage
->list_node
);
779 dpif_port_dump_done(&port_dump
);
781 LIST_FOR_EACH_POP (garbage
, list_node
, &garbage_list
) {
782 dpif_port_del(backer
->dpif
, garbage
->odp_port
, false);
786 shash_add(&all_dpif_backers
, type
, backer
);
788 check_support(backer
);
789 atomic_count_init(&backer
->tnl_count
, 0);
791 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
793 VLOG_ERR("failed to listen on datapath of type %s: %s",
794 type
, ovs_strerror(error
));
795 close_dpif_backer(backer
, false);
799 if (backer
->recv_set_enable
) {
800 udpif_set_threads(backer
->udpif
, n_handlers
, n_revalidators
);
803 backer
->dp_version_string
= dpif_get_dp_version(backer
->dpif
);
805 /* Manage Datapath meter IDs if supported. */
806 struct ofputil_meter_features features
;
807 dpif_meter_get_features(backer
->dpif
, &features
);
808 if (features
.max_meters
) {
809 backer
->meter_ids
= id_pool_create(0, features
.max_meters
);
811 backer
->meter_ids
= NULL
;
814 /* Make a pristine snapshot of 'support' into 'boottime_support'.
815 * 'boottime_support' can be checked to prevent 'support' to be changed
816 * beyond the datapath capabilities. In case 'support' is changed by
817 * the user, 'boottime_support' can be used to restore it. */
818 backer
->bt_support
= backer
->rt_support
;
824 ovs_native_tunneling_is_on(struct ofproto_dpif
*ofproto
)
826 return ofproto_use_tnl_push_pop
827 && ofproto
->backer
->rt_support
.tnl_push_pop
828 && atomic_count_get(&ofproto
->backer
->tnl_count
);
831 /* Tests whether 'backer''s datapath supports recirculation. Only newer
832 * datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
833 * features on older datapaths that don't support this feature.
835 * Returns false if 'backer' definitely does not support recirculation, true if
836 * it seems to support recirculation or if at least the error we get is
839 check_recirc(struct dpif_backer
*backer
)
842 struct odputil_keybuf keybuf
;
845 struct odp_flow_key_parms odp_parms
= {
852 memset(&flow
, 0, sizeof flow
);
856 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
857 odp_flow_key_from_flow(&odp_parms
, &key
);
858 enable_recirc
= dpif_probe_feature(backer
->dpif
, "recirculation", &key
,
862 VLOG_INFO("%s: Datapath supports recirculation",
863 dpif_name(backer
->dpif
));
865 VLOG_INFO("%s: Datapath does not support recirculation",
866 dpif_name(backer
->dpif
));
869 return enable_recirc
;
872 /* Tests whether 'dpif' supports unique flow ids. We can skip serializing
873 * some flow attributes for datapaths that support this feature.
875 * Returns true if 'dpif' supports UFID for flow operations.
876 * Returns false if 'dpif' does not support UFID. */
878 check_ufid(struct dpif_backer
*backer
)
881 struct odputil_keybuf keybuf
;
885 struct odp_flow_key_parms odp_parms
= {
889 memset(&flow
, 0, sizeof flow
);
890 flow
.dl_type
= htons(0x1234);
892 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
893 odp_flow_key_from_flow(&odp_parms
, &key
);
894 dpif_flow_hash(backer
->dpif
, key
.data
, key
.size
, &ufid
);
896 enable_ufid
= dpif_probe_feature(backer
->dpif
, "UFID", &key
, NULL
, &ufid
);
899 VLOG_INFO("%s: Datapath supports unique flow ids",
900 dpif_name(backer
->dpif
));
902 VLOG_INFO("%s: Datapath does not support unique flow ids",
903 dpif_name(backer
->dpif
));
908 /* Tests number of 802.1q VLAN headers supported by 'backer''s datapath.
910 * Returns the number of elements in a struct flow's vlan
911 * if the datapath supports at least that many VLAN headers. */
913 check_max_vlan_headers(struct dpif_backer
*backer
)
916 struct odp_flow_key_parms odp_parms
= {
922 memset(&flow
, 0, sizeof flow
);
923 flow
.dl_type
= htons(ETH_TYPE_IP
);
924 for (n
= 0; n
< FLOW_MAX_VLAN_HEADERS
; n
++) {
925 struct odputil_keybuf keybuf
;
928 flow_push_vlan_uninit(&flow
, NULL
);
929 flow
.vlans
[0].tpid
= htons(ETH_TYPE_VLAN
);
930 flow
.vlans
[0].tci
= htons(1) | htons(VLAN_CFI
);
932 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
933 odp_flow_key_from_flow(&odp_parms
, &key
);
934 if (!dpif_probe_feature(backer
->dpif
, "VLAN", &key
, NULL
, NULL
)) {
939 VLOG_INFO("%s: VLAN header stack length probed as %d",
940 dpif_name(backer
->dpif
), n
);
943 /* Tests the MPLS label stack depth supported by 'backer''s datapath.
945 * Returns the number of elements in a struct flow's mpls_lse field
946 * if the datapath supports at least that many entries in an
948 * Otherwise returns the number of MPLS push actions supported by
951 check_max_mpls_depth(struct dpif_backer
*backer
)
956 for (n
= 0; n
< FLOW_MAX_MPLS_LABELS
; n
++) {
957 struct odputil_keybuf keybuf
;
959 struct odp_flow_key_parms odp_parms
= {
963 memset(&flow
, 0, sizeof flow
);
964 flow
.dl_type
= htons(ETH_TYPE_MPLS
);
965 flow_set_mpls_bos(&flow
, n
, 1);
967 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
968 odp_flow_key_from_flow(&odp_parms
, &key
);
969 if (!dpif_probe_feature(backer
->dpif
, "MPLS", &key
, NULL
, NULL
)) {
974 VLOG_INFO("%s: MPLS label stack length probed as %d",
975 dpif_name(backer
->dpif
), n
);
980 add_sample_actions(struct ofpbuf
*actions
, int nesting
)
983 nl_msg_put_odp_port(actions
, OVS_ACTION_ATTR_OUTPUT
, u32_to_odp(1));
987 size_t start
, actions_start
;
989 start
= nl_msg_start_nested(actions
, OVS_ACTION_ATTR_SAMPLE
);
990 actions_start
= nl_msg_start_nested(actions
, OVS_SAMPLE_ATTR_ACTIONS
);
991 add_sample_actions(actions
, nesting
- 1);
992 nl_msg_end_nested(actions
, actions_start
);
993 nl_msg_put_u32(actions
, OVS_SAMPLE_ATTR_PROBABILITY
, UINT32_MAX
);
994 nl_msg_end_nested(actions
, start
);
997 /* Tests the nested sample actions levels supported by 'backer''s datapath.
999 * Returns the number of nested sample actions accepted by the datapath. */
1001 check_max_sample_nesting(struct dpif_backer
*backer
)
1003 struct odputil_keybuf keybuf
;
1008 struct odp_flow_key_parms odp_parms
= {
1012 memset(&flow
, 0, sizeof flow
);
1013 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
1014 odp_flow_key_from_flow(&odp_parms
, &key
);
1016 /* OVS datapath has always supported at least 3 nested levels. */
1017 for (n
= 3; n
< FLOW_MAX_SAMPLE_NESTING
; n
++) {
1018 struct ofpbuf actions
;
1021 ofpbuf_init(&actions
, 300);
1022 add_sample_actions(&actions
, n
);
1023 ok
= dpif_probe_feature(backer
->dpif
, "Sample action nesting", &key
,
1025 ofpbuf_uninit(&actions
);
1031 VLOG_INFO("%s: Max sample nesting level probed as %d",
1032 dpif_name(backer
->dpif
), n
);
1036 /* Tests whether 'backer''s datapath supports masked data in
1037 * OVS_ACTION_ATTR_SET actions. We need to disable some features on older
1038 * datapaths that don't support this feature. */
1040 check_masked_set_action(struct dpif_backer
*backer
)
1042 struct eth_header
*eth
;
1043 struct ofpbuf actions
;
1044 struct dpif_execute execute
;
1045 struct dp_packet packet
;
1048 struct ovs_key_ethernet key
, mask
;
1050 /* Compose a set action that will cause an EINVAL error on older
1051 * datapaths that don't support masked set actions.
1052 * Avoid using a full mask, as it could be translated to a non-masked
1053 * set action instead. */
1054 ofpbuf_init(&actions
, 64);
1055 memset(&key
, 0x53, sizeof key
);
1056 memset(&mask
, 0x7f, sizeof mask
);
1057 commit_masked_set_action(&actions
, OVS_KEY_ATTR_ETHERNET
, &key
, &mask
,
1060 /* Compose a dummy ethernet packet. */
1061 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1062 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1063 eth
->eth_type
= htons(0x1234);
1065 flow_extract(&packet
, &flow
);
1067 /* Execute the actions. On older datapaths this fails with EINVAL, on
1068 * newer datapaths it succeeds. */
1069 execute
.actions
= actions
.data
;
1070 execute
.actions_len
= actions
.size
;
1071 execute
.packet
= &packet
;
1072 execute
.flow
= &flow
;
1073 execute
.needs_help
= false;
1074 execute
.probe
= true;
1077 error
= dpif_execute(backer
->dpif
, &execute
);
1079 dp_packet_uninit(&packet
);
1080 ofpbuf_uninit(&actions
);
1083 /* Masked set action is not supported. */
1084 VLOG_INFO("%s: datapath does not support masked set action feature.",
1085 dpif_name(backer
->dpif
));
1090 /* Tests whether 'backer''s datapath supports truncation of a packet in
1091 * OVS_ACTION_ATTR_TRUNC. We need to disable some features on older
1092 * datapaths that don't support this feature. */
1094 check_trunc_action(struct dpif_backer
*backer
)
1096 struct eth_header
*eth
;
1097 struct ofpbuf actions
;
1098 struct dpif_execute execute
;
1099 struct dp_packet packet
;
1100 struct ovs_action_trunc
*trunc
;
1104 /* Compose an action with output(port:1,
1105 * max_len:OVS_ACTION_OUTPUT_MIN + 1).
1106 * This translates to one truncate action and one output action. */
1107 ofpbuf_init(&actions
, 64);
1108 trunc
= nl_msg_put_unspec_uninit(&actions
,
1109 OVS_ACTION_ATTR_TRUNC
, sizeof *trunc
);
1111 trunc
->max_len
= ETH_HEADER_LEN
+ 1;
1112 nl_msg_put_odp_port(&actions
, OVS_ACTION_ATTR_OUTPUT
, u32_to_odp(1));
1114 /* Compose a dummy Ethernet packet. */
1115 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1116 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1117 eth
->eth_type
= htons(0x1234);
1119 flow_extract(&packet
, &flow
);
1121 /* Execute the actions. On older datapaths this fails with EINVAL, on
1122 * newer datapaths it succeeds. */
1123 execute
.actions
= actions
.data
;
1124 execute
.actions_len
= actions
.size
;
1125 execute
.packet
= &packet
;
1126 execute
.flow
= &flow
;
1127 execute
.needs_help
= false;
1128 execute
.probe
= true;
1131 error
= dpif_execute(backer
->dpif
, &execute
);
1133 dp_packet_uninit(&packet
);
1134 ofpbuf_uninit(&actions
);
1137 VLOG_INFO("%s: Datapath does not support truncate action",
1138 dpif_name(backer
->dpif
));
1140 VLOG_INFO("%s: Datapath supports truncate action",
1141 dpif_name(backer
->dpif
));
1147 /* Tests whether 'backer''s datapath supports the clone action
1148 * OVS_ACTION_ATTR_CLONE. */
1150 check_clone(struct dpif_backer
*backer
)
1152 struct dpif_execute execute
;
1153 struct eth_header
*eth
;
1155 struct dp_packet packet
;
1156 struct ofpbuf actions
;
1160 /* Compose clone with an empty action list.
1161 * and check if datapath can decode the message. */
1162 ofpbuf_init(&actions
, 64);
1163 clone_start
= nl_msg_start_nested(&actions
, OVS_ACTION_ATTR_CLONE
);
1164 nl_msg_end_nested(&actions
, clone_start
);
1166 /* Compose a dummy Ethernet packet. */
1167 dp_packet_init(&packet
, ETH_HEADER_LEN
);
1168 eth
= dp_packet_put_zeros(&packet
, ETH_HEADER_LEN
);
1169 eth
->eth_type
= htons(0x1234);
1171 flow_extract(&packet
, &flow
);
1173 /* Execute the actions. On older datapaths this fails with EINVAL, on
1174 * newer datapaths it succeeds. */
1175 execute
.actions
= actions
.data
;
1176 execute
.actions_len
= actions
.size
;
1177 execute
.packet
= &packet
;
1178 execute
.flow
= &flow
;
1179 execute
.needs_help
= false;
1180 execute
.probe
= true;
1183 error
= dpif_execute(backer
->dpif
, &execute
);
1185 dp_packet_uninit(&packet
);
1186 ofpbuf_uninit(&actions
);
1189 VLOG_INFO("%s: Datapath does not support clone action",
1190 dpif_name(backer
->dpif
));
1192 VLOG_INFO("%s: Datapath supports clone action",
1193 dpif_name(backer
->dpif
));
1199 /* Tests whether 'backer''s datapath supports the OVS_CT_ATTR_EVENTMASK
1200 * attribute in OVS_ACTION_ATTR_CT. */
1202 check_ct_eventmask(struct dpif_backer
*backer
)
1204 struct dpif_execute execute
;
1205 struct dp_packet packet
;
1206 struct ofpbuf actions
;
1207 struct flow flow
= {
1208 .dl_type
= CONSTANT_HTONS(ETH_TYPE_IP
),
1209 .nw_proto
= IPPROTO_UDP
,
1211 /* Use the broadcast address on the loopback address range 127/8 to
1212 * avoid hitting any real conntrack entries. We leave the UDP ports to
1213 * zeroes for the same purpose. */
1214 .nw_src
= CONSTANT_HTONL(0x7fffffff),
1215 .nw_dst
= CONSTANT_HTONL(0x7fffffff),
1220 /* Compose CT action with eventmask attribute and check if datapath can
1221 * decode the message. */
1222 ofpbuf_init(&actions
, 64);
1223 ct_start
= nl_msg_start_nested(&actions
, OVS_ACTION_ATTR_CT
);
1224 /* Eventmask has no effect without the commit flag, but currently the
1225 * datapath will accept an eventmask even without commit. This is useful
1226 * as we do not want to persist the probe connection in the conntrack
1228 nl_msg_put_u32(&actions
, OVS_CT_ATTR_EVENTMASK
, ~0);
1229 nl_msg_end_nested(&actions
, ct_start
);
1231 /* Compose a dummy UDP packet. */
1232 dp_packet_init(&packet
, 0);
1233 flow_compose(&packet
, &flow
, NULL
, 64);
1235 /* Execute the actions. On older datapaths this fails with EINVAL, on
1236 * newer datapaths it succeeds. */
1237 execute
.actions
= actions
.data
;
1238 execute
.actions_len
= actions
.size
;
1239 execute
.packet
= &packet
;
1240 execute
.flow
= &flow
;
1241 execute
.needs_help
= false;
1242 execute
.probe
= true;
1245 error
= dpif_execute(backer
->dpif
, &execute
);
1247 dp_packet_uninit(&packet
);
1248 ofpbuf_uninit(&actions
);
1251 VLOG_INFO("%s: Datapath does not support eventmask in conntrack action",
1252 dpif_name(backer
->dpif
));
1254 VLOG_INFO("%s: Datapath supports eventmask in conntrack action",
1255 dpif_name(backer
->dpif
));
1261 /* Tests whether 'backer''s datapath supports the OVS_ACTION_ATTR_CT_CLEAR
1264 check_ct_clear(struct dpif_backer
*backer
)
1266 struct odputil_keybuf keybuf
;
1267 uint8_t actbuf
[NL_A_FLAG_SIZE
];
1268 struct ofpbuf actions
;
1273 struct odp_flow_key_parms odp_parms
= {
1278 memset(&flow
, 0, sizeof flow
);
1279 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
1280 odp_flow_key_from_flow(&odp_parms
, &key
);
1282 ofpbuf_use_stack(&actions
, &actbuf
, sizeof actbuf
);
1283 nl_msg_put_flag(&actions
, OVS_ACTION_ATTR_CT_CLEAR
);
1285 supported
= dpif_probe_feature(backer
->dpif
, "ct_clear", &key
,
1288 VLOG_INFO("%s: Datapath %s ct_clear action",
1289 dpif_name(backer
->dpif
), (supported
) ? "supports"
1290 : "does not support");
1294 #define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE, ETHTYPE) \
1296 check_##NAME(struct dpif_backer *backer) \
1299 struct odputil_keybuf keybuf; \
1300 struct ofpbuf key; \
1302 struct odp_flow_key_parms odp_parms = { \
1309 memset(&flow, 0, sizeof flow); \
1310 flow.FIELD = VALUE; \
1311 flow.dl_type = htons(ETHTYPE); \
1313 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); \
1314 odp_flow_key_from_flow(&odp_parms, &key); \
1315 enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL, NULL); \
1318 VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif)); \
1320 VLOG_INFO("%s: Datapath does not support "#NAME, \
1321 dpif_name(backer->dpif)); \
1326 #define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD, FIELD, 1, \
1329 CHECK_FEATURE(ct_state
)
1330 CHECK_FEATURE(ct_zone
)
1331 CHECK_FEATURE(ct_mark
)
1332 CHECK_FEATURE__(ct_label
, ct_label
, ct_label
.u64
.lo
, 1, ETH_TYPE_IP
)
1333 CHECK_FEATURE__(ct_state_nat
, ct_state
, ct_state
, \
1334 CS_TRACKED
|CS_SRC_NAT
, ETH_TYPE_IP
)
1335 CHECK_FEATURE__(ct_orig_tuple
, ct_orig_tuple
, ct_nw_proto
, 1, ETH_TYPE_IP
)
1336 CHECK_FEATURE__(ct_orig_tuple6
, ct_orig_tuple6
, ct_nw_proto
, 1, ETH_TYPE_IPV6
)
1338 #undef CHECK_FEATURE
1339 #undef CHECK_FEATURE__
1342 check_support(struct dpif_backer
*backer
)
1345 backer
->rt_support
.odp
.recirc
= check_recirc(backer
);
1346 backer
->rt_support
.odp
.max_vlan_headers
= check_max_vlan_headers(backer
);
1347 backer
->rt_support
.odp
.max_mpls_depth
= check_max_mpls_depth(backer
);
1348 backer
->rt_support
.masked_set_action
= check_masked_set_action(backer
);
1349 backer
->rt_support
.trunc
= check_trunc_action(backer
);
1350 backer
->rt_support
.ufid
= check_ufid(backer
);
1351 backer
->rt_support
.tnl_push_pop
= dpif_supports_tnl_push_pop(backer
->dpif
);
1352 backer
->rt_support
.clone
= check_clone(backer
);
1353 backer
->rt_support
.sample_nesting
= check_max_sample_nesting(backer
);
1354 backer
->rt_support
.ct_eventmask
= check_ct_eventmask(backer
);
1355 backer
->rt_support
.ct_clear
= check_ct_clear(backer
);
1358 backer
->rt_support
.odp
.ct_state
= check_ct_state(backer
);
1359 backer
->rt_support
.odp
.ct_zone
= check_ct_zone(backer
);
1360 backer
->rt_support
.odp
.ct_mark
= check_ct_mark(backer
);
1361 backer
->rt_support
.odp
.ct_label
= check_ct_label(backer
);
1363 backer
->rt_support
.odp
.ct_state_nat
= check_ct_state_nat(backer
);
1364 backer
->rt_support
.odp
.ct_orig_tuple
= check_ct_orig_tuple(backer
);
1365 backer
->rt_support
.odp
.ct_orig_tuple6
= check_ct_orig_tuple6(backer
);
1369 construct(struct ofproto
*ofproto_
)
1371 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1372 struct shash_node
*node
, *next
;
1375 /* Tunnel module can get used right after the udpif threads are running. */
1376 ofproto_tunnel_init();
1378 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1383 uuid_generate(&ofproto
->uuid
);
1384 atomic_init(&ofproto
->tables_version
, OVS_VERSION_MIN
);
1385 ofproto
->netflow
= NULL
;
1386 ofproto
->sflow
= NULL
;
1387 ofproto
->ipfix
= NULL
;
1388 ofproto
->stp
= NULL
;
1389 ofproto
->rstp
= NULL
;
1390 ofproto
->dump_seq
= 0;
1391 hmap_init(&ofproto
->bundles
);
1392 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1394 ofproto
->mbridge
= mbridge_create();
1395 ofproto
->has_bonded_bundles
= false;
1396 ofproto
->lacp_enabled
= false;
1397 ovs_mutex_init_adaptive(&ofproto
->stats_mutex
);
1399 guarded_list_init(&ofproto
->ams
);
1401 sset_init(&ofproto
->ports
);
1402 sset_init(&ofproto
->ghost_ports
);
1403 sset_init(&ofproto
->port_poll_set
);
1404 ofproto
->port_poll_errno
= 0;
1405 ofproto
->change_seq
= 0;
1406 ofproto
->ams_seq
= seq_create();
1407 ofproto
->ams_seqno
= seq_read(ofproto
->ams_seq
);
1410 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1411 struct iface_hint
*iface_hint
= node
->data
;
1413 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1414 /* Check if the datapath already has this port. */
1415 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1416 sset_add(&ofproto
->ports
, node
->name
);
1419 free(iface_hint
->br_name
);
1420 free(iface_hint
->br_type
);
1422 shash_delete(&init_ofp_ports
, node
);
1426 hmap_insert(&all_ofproto_dpifs_by_name
,
1427 &ofproto
->all_ofproto_dpifs_by_name_node
,
1428 hash_string(ofproto
->up
.name
, 0));
1429 hmap_insert(&all_ofproto_dpifs_by_uuid
,
1430 &ofproto
->all_ofproto_dpifs_by_uuid_node
,
1431 uuid_hash(&ofproto
->uuid
));
1432 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1434 ofproto_init_tables(ofproto_
, N_TABLES
);
1435 error
= add_internal_flows(ofproto
);
1437 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1443 add_internal_miss_flow(struct ofproto_dpif
*ofproto
, int id
,
1444 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1450 match_init_catchall(&match
);
1451 match_set_reg(&match
, 0, id
);
1453 error
= ofproto_dpif_add_internal_flow(ofproto
, &match
, 0, 0, ofpacts
,
1455 *rulep
= error
? NULL
: rule_dpif_cast(rule
);
1461 add_internal_flows(struct ofproto_dpif
*ofproto
)
1463 struct ofpact_controller
*controller
;
1464 uint64_t ofpacts_stub
[128 / 8];
1465 struct ofpbuf ofpacts
;
1466 struct rule
*unused_rulep OVS_UNUSED
;
1471 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1474 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1475 controller
->max_len
= UINT16_MAX
;
1476 controller
->controller_id
= 0;
1477 controller
->reason
= OFPR_IMPLICIT_MISS
;
1478 ofpact_finish_CONTROLLER(&ofpacts
, &controller
);
1480 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1481 &ofproto
->miss_rule
);
1486 ofpbuf_clear(&ofpacts
);
1487 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1488 &ofproto
->no_packet_in_rule
);
1493 error
= add_internal_miss_flow(ofproto
, id
++, &ofpacts
,
1494 &ofproto
->drop_frags_rule
);
1499 /* Drop any run away non-recirc rule lookups. Recirc_id has to be
1500 * zero when reaching this rule.
1502 * (priority=2), recirc_id=0, actions=drop
1504 ofpbuf_clear(&ofpacts
);
1505 match_init_catchall(&match
);
1506 match_set_recirc_id(&match
, 0);
1507 error
= ofproto_dpif_add_internal_flow(ofproto
, &match
, 2, 0, &ofpacts
,
1513 destruct(struct ofproto
*ofproto_
, bool del
)
1515 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1516 struct ofproto_async_msg
*am
;
1517 struct rule_dpif
*rule
;
1518 struct oftable
*table
;
1519 struct ovs_list ams
;
1521 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1523 xlate_remove_ofproto(ofproto
);
1526 /* Ensure that the upcall processing threads have no remaining references
1527 * to the ofproto or anything in it. */
1528 udpif_synchronize(ofproto
->backer
->udpif
);
1530 hmap_remove(&all_ofproto_dpifs_by_name
,
1531 &ofproto
->all_ofproto_dpifs_by_name_node
);
1532 hmap_remove(&all_ofproto_dpifs_by_uuid
,
1533 &ofproto
->all_ofproto_dpifs_by_uuid_node
);
1535 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1536 CLS_FOR_EACH (rule
, up
.cr
, &table
->cls
) {
1537 ofproto_rule_delete(&ofproto
->up
, &rule
->up
);
1540 ofproto_group_delete_all(&ofproto
->up
);
1542 guarded_list_pop_all(&ofproto
->ams
, &ams
);
1543 LIST_FOR_EACH_POP (am
, list_node
, &ams
) {
1544 ofproto_async_msg_free(am
);
1546 guarded_list_destroy(&ofproto
->ams
);
1548 recirc_free_ofproto(ofproto
, ofproto
->up
.name
);
1550 mbridge_unref(ofproto
->mbridge
);
1552 netflow_unref(ofproto
->netflow
);
1553 dpif_sflow_unref(ofproto
->sflow
);
1554 dpif_ipfix_unref(ofproto
->ipfix
);
1555 hmap_destroy(&ofproto
->bundles
);
1556 mac_learning_unref(ofproto
->ml
);
1557 mcast_snooping_unref(ofproto
->ms
);
1558 stp_unref(ofproto
->stp
);
1559 rstp_unref(ofproto
->rstp
);
1561 sset_destroy(&ofproto
->ports
);
1562 sset_destroy(&ofproto
->ghost_ports
);
1563 sset_destroy(&ofproto
->port_poll_set
);
1565 ovs_mutex_destroy(&ofproto
->stats_mutex
);
1567 seq_destroy(ofproto
->ams_seq
);
1569 close_dpif_backer(ofproto
->backer
, del
);
1573 run(struct ofproto
*ofproto_
)
1575 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1576 uint64_t new_seq
, new_dump_seq
;
1578 if (mbridge_need_revalidate(ofproto
->mbridge
)) {
1579 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1580 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
1581 mac_learning_flush(ofproto
->ml
);
1582 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1583 mcast_snooping_mdb_flush(ofproto
->ms
);
1586 /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
1587 * flow restore. Even though nothing is processed during flow restore,
1588 * all queued 'ams' will be handled immediately when flow restore
1590 ofproto
->ams_seqno
= seq_read(ofproto
->ams_seq
);
1592 /* Do not perform any periodic activity required by 'ofproto' while
1593 * waiting for flow restore to complete. */
1594 if (!ofproto_get_flow_restore_wait()) {
1595 struct ofproto_async_msg
*am
;
1596 struct ovs_list ams
;
1598 guarded_list_pop_all(&ofproto
->ams
, &ams
);
1599 LIST_FOR_EACH_POP (am
, list_node
, &ams
) {
1600 connmgr_send_async_msg(ofproto
->up
.connmgr
, am
);
1601 ofproto_async_msg_free(am
);
1605 if (ofproto
->netflow
) {
1606 netflow_run(ofproto
->netflow
);
1608 if (ofproto
->sflow
) {
1609 dpif_sflow_run(ofproto
->sflow
);
1611 if (ofproto
->ipfix
) {
1612 dpif_ipfix_run(ofproto
->ipfix
);
1615 new_seq
= seq_read(connectivity_seq_get());
1616 if (ofproto
->change_seq
!= new_seq
) {
1617 struct ofport_dpif
*ofport
;
1619 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1623 ofproto
->change_seq
= new_seq
;
1625 if (ofproto
->lacp_enabled
|| ofproto
->has_bonded_bundles
) {
1626 struct ofbundle
*bundle
;
1628 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1635 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
1636 if (mac_learning_run(ofproto
->ml
)) {
1637 ofproto
->backer
->need_revalidate
= REV_MAC_LEARNING
;
1639 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1641 if (mcast_snooping_run(ofproto
->ms
)) {
1642 ofproto
->backer
->need_revalidate
= REV_MCAST_SNOOPING
;
1645 new_dump_seq
= seq_read(udpif_dump_seq(ofproto
->backer
->udpif
));
1646 if (ofproto
->dump_seq
!= new_dump_seq
) {
1647 struct rule
*rule
, *next_rule
;
1648 long long now
= time_msec();
1650 /* We know stats are relatively fresh, so now is a good time to do some
1652 ofproto
->dump_seq
= new_dump_seq
;
1654 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
1656 ovs_mutex_lock(&ofproto_mutex
);
1657 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
1658 &ofproto
->up
.expirable
) {
1659 rule_expire(rule_dpif_cast(rule
), now
);
1661 ovs_mutex_unlock(&ofproto_mutex
);
1663 /* All outstanding data in existing flows has been accounted, so it's a
1664 * good time to do bond rebalancing. */
1665 if (ofproto
->has_bonded_bundles
) {
1666 struct ofbundle
*bundle
;
1668 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1670 bond_rebalance(bundle
->bond
);
1679 ofproto_dpif_wait(struct ofproto
*ofproto_
)
1681 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1683 if (ofproto_get_flow_restore_wait()) {
1687 if (ofproto
->sflow
) {
1688 dpif_sflow_wait(ofproto
->sflow
);
1690 if (ofproto
->ipfix
) {
1691 dpif_ipfix_wait(ofproto
->ipfix
);
1693 if (ofproto
->lacp_enabled
|| ofproto
->has_bonded_bundles
) {
1694 struct ofbundle
*bundle
;
1696 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1697 bundle_wait(bundle
);
1700 if (ofproto
->netflow
) {
1701 netflow_wait(ofproto
->netflow
);
1703 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
1704 mac_learning_wait(ofproto
->ml
);
1705 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
1706 mcast_snooping_wait(ofproto
->ms
);
1708 if (ofproto
->backer
->need_revalidate
) {
1709 poll_immediate_wake();
1712 seq_wait(udpif_dump_seq(ofproto
->backer
->udpif
), ofproto
->dump_seq
);
1713 seq_wait(ofproto
->ams_seq
, ofproto
->ams_seqno
);
1717 type_get_memory_usage(const char *type
, struct simap
*usage
)
1719 struct dpif_backer
*backer
;
1721 backer
= shash_find_data(&all_dpif_backers
, type
);
1723 udpif_get_memory_usage(backer
->udpif
, usage
);
1728 flush(struct ofproto
*ofproto_
)
1730 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1731 struct dpif_backer
*backer
= ofproto
->backer
;
1734 udpif_flush(backer
->udpif
);
1739 query_tables(struct ofproto
*ofproto
,
1740 struct ofputil_table_features
*features OVS_UNUSED
,
1741 struct ofputil_table_stats
*stats
)
1746 for (i
= 0; i
< ofproto
->n_tables
; i
++) {
1747 unsigned long missed
, matched
;
1749 atomic_read_relaxed(&ofproto
->tables
[i
].n_matched
, &matched
);
1750 atomic_read_relaxed(&ofproto
->tables
[i
].n_missed
, &missed
);
1752 stats
[i
].matched_count
= matched
;
1753 stats
[i
].lookup_count
= matched
+ missed
;
1759 set_tables_version(struct ofproto
*ofproto_
, ovs_version_t version
)
1761 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1763 /* Use memory_order_release to signify that any prior memory accesses can
1764 * not be reordered to happen after this atomic store. This makes sure the
1765 * new version is properly set up when the readers can read this 'version'
1767 atomic_store_explicit(&ofproto
->tables_version
, version
,
1768 memory_order_release
);
1769 /* 'need_revalidate' can be reordered to happen before the atomic_store
1770 * above, but it does not matter as this variable is not accessed by other
1772 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
1775 static struct ofport
*
1778 struct ofport_dpif
*port
= xzalloc(sizeof *port
);
1783 port_dealloc(struct ofport
*port_
)
1785 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1790 port_construct(struct ofport
*port_
)
1792 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1793 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1794 const struct netdev
*netdev
= port
->up
.netdev
;
1795 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1796 const char *dp_port_name
;
1797 struct dpif_port dpif_port
;
1800 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1801 port
->bundle
= NULL
;
1805 port
->may_enable
= false;
1806 port
->stp_port
= NULL
;
1807 port
->stp_state
= STP_DISABLED
;
1808 port
->rstp_port
= NULL
;
1809 port
->rstp_state
= RSTP_DISABLED
;
1810 port
->is_tunnel
= false;
1814 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1816 if (netdev_vport_is_patch(netdev
)) {
1817 /* By bailing out here, we don't submit the port to the sFlow module
1818 * to be considered for counter polling export. This is correct
1819 * because the patch port represents an interface that sFlow considers
1820 * to be "internal" to the switch as a whole, and therefore not a
1821 * candidate for counter polling. */
1822 port
->odp_port
= ODPP_NONE
;
1823 ofport_update_peer(port
);
1827 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1828 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
, dp_port_name
,
1834 port
->odp_port
= dpif_port
.port_no
;
1836 if (netdev_get_tunnel_config(netdev
)) {
1837 atomic_count_inc(&ofproto
->backer
->tnl_count
);
1838 error
= tnl_port_add(port
, port
->up
.netdev
, port
->odp_port
,
1839 ovs_native_tunneling_is_on(ofproto
), dp_port_name
);
1841 atomic_count_dec(&ofproto
->backer
->tnl_count
);
1842 dpif_port_destroy(&dpif_port
);
1846 port
->is_tunnel
= true;
1848 /* Sanity-check that a mapping doesn't already exist. This
1849 * shouldn't happen for non-tunnel ports. */
1850 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1851 VLOG_ERR("port %s already has an OpenFlow port number",
1853 dpif_port_destroy(&dpif_port
);
1857 ovs_rwlock_wrlock(&ofproto
->backer
->odp_to_ofport_lock
);
1858 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1859 hash_odp_port(port
->odp_port
));
1860 ovs_rwlock_unlock(&ofproto
->backer
->odp_to_ofport_lock
);
1862 dpif_port_destroy(&dpif_port
);
1864 if (ofproto
->sflow
) {
1865 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1867 if (ofproto
->ipfix
) {
1868 dpif_ipfix_add_port(ofproto
->ipfix
, port_
, port
->odp_port
);
1875 port_destruct(struct ofport
*port_
, bool del
)
1877 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1878 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1879 const char *devname
= netdev_get_name(port
->up
.netdev
);
1880 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1881 const char *dp_port_name
;
1883 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1885 xlate_ofport_remove(port
);
1888 dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
, namebuf
,
1890 if (del
&& dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1891 /* The underlying device is still there, so delete it. This
1892 * happens when the ofproto is being destroyed, since the caller
1893 * assumes that removal of attached ports will happen as part of
1895 if (!port
->is_tunnel
) {
1896 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
, false);
1899 /* The underlying device is already deleted (e.g. tunctl -d).
1900 * Calling dpif_port_remove to do local cleanup for the netdev */
1901 if (!port
->is_tunnel
) {
1902 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
, true);
1907 port
->peer
->peer
= NULL
;
1911 if (port
->odp_port
!= ODPP_NONE
&& !port
->is_tunnel
) {
1912 ovs_rwlock_wrlock(&ofproto
->backer
->odp_to_ofport_lock
);
1913 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1914 ovs_rwlock_unlock(&ofproto
->backer
->odp_to_ofport_lock
);
1917 if (port
->is_tunnel
) {
1918 atomic_count_dec(&ofproto
->backer
->tnl_count
);
1921 tnl_port_del(port
, port
->odp_port
);
1922 sset_find_and_delete(&ofproto
->ports
, devname
);
1923 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1924 bundle_remove(port_
);
1925 set_cfm(port_
, NULL
);
1926 set_bfd(port_
, NULL
);
1927 set_lldp(port_
, NULL
);
1928 if (port
->stp_port
) {
1929 stp_port_disable(port
->stp_port
);
1931 set_rstp_port(port_
, NULL
);
1932 if (ofproto
->sflow
) {
1933 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1935 if (ofproto
->ipfix
) {
1936 dpif_ipfix_del_port(ofproto
->ipfix
, port
->odp_port
);
1943 port_modified(struct ofport
*port_
)
1945 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1946 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1947 const char *dp_port_name
;
1948 struct netdev
*netdev
= port
->up
.netdev
;
1950 if (port
->bundle
&& port
->bundle
->bond
) {
1951 bond_slave_set_netdev(port
->bundle
->bond
, port
, netdev
);
1955 cfm_set_netdev(port
->cfm
, netdev
);
1959 bfd_set_netdev(port
->bfd
, netdev
);
1962 /* Set liveness, unless the link is administratively or
1963 * operationally down or link monitoring false */
1964 if (!(port
->up
.pp
.config
& OFPUTIL_PC_PORT_DOWN
) &&
1965 !(port
->up
.pp
.state
& OFPUTIL_PS_LINK_DOWN
) &&
1967 port
->up
.pp
.state
|= OFPUTIL_PS_LIVE
;
1969 port
->up
.pp
.state
&= ~OFPUTIL_PS_LIVE
;
1972 ofproto_dpif_monitor_port_update(port
, port
->bfd
, port
->cfm
,
1973 port
->lldp
, &port
->up
.pp
.hw_addr
);
1975 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1977 if (port
->is_tunnel
) {
1978 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1980 if (tnl_port_reconfigure(port
, netdev
, port
->odp_port
, port
->odp_port
,
1981 ovs_native_tunneling_is_on(ofproto
),
1983 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1987 ofport_update_peer(port
);
1991 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1993 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1994 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1995 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1997 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1998 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1999 OFPUTIL_PC_NO_PACKET_IN
)) {
2000 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2002 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
2003 bundle_update(port
->bundle
);
2009 set_sflow(struct ofproto
*ofproto_
,
2010 const struct ofproto_sflow_options
*sflow_options
)
2012 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2013 struct dpif_sflow
*ds
= ofproto
->sflow
;
2015 if (sflow_options
) {
2016 uint32_t old_probability
= ds
? dpif_sflow_get_probability(ds
) : 0;
2018 struct ofport_dpif
*ofport
;
2020 ds
= ofproto
->sflow
= dpif_sflow_create();
2021 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
2022 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
2025 dpif_sflow_set_options(ds
, sflow_options
);
2026 if (dpif_sflow_get_probability(ds
) != old_probability
) {
2027 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2031 dpif_sflow_unref(ds
);
2032 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2033 ofproto
->sflow
= NULL
;
2041 struct ofproto
*ofproto_
,
2042 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
2043 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
2044 size_t n_flow_exporters_options
)
2046 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2047 struct dpif_ipfix
*di
= ofproto
->ipfix
;
2048 bool has_options
= bridge_exporter_options
|| flow_exporters_options
;
2049 bool new_di
= false;
2051 if (has_options
&& !di
) {
2052 di
= ofproto
->ipfix
= dpif_ipfix_create();
2057 /* Call set_options in any case to cleanly flush the flow
2058 * caches in the last exporters that are to be destroyed. */
2059 dpif_ipfix_set_options(
2060 di
, bridge_exporter_options
, flow_exporters_options
,
2061 n_flow_exporters_options
);
2063 /* Add ports only when a new ipfix created */
2064 if (new_di
== true) {
2065 struct ofport_dpif
*ofport
;
2066 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
2067 dpif_ipfix_add_port(di
, &ofport
->up
, ofport
->odp_port
);
2072 dpif_ipfix_unref(di
);
2073 ofproto
->ipfix
= NULL
;
2081 get_ipfix_stats(const struct ofproto
*ofproto_
,
2083 struct ovs_list
*replies
)
2085 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2086 struct dpif_ipfix
*di
= ofproto
->ipfix
;
2089 return OFPERR_NXST_NOT_CONFIGURED
;
2092 return dpif_ipfix_get_stats(di
, bridge_ipfix
, replies
);
2096 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
2098 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2099 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2100 struct cfm
*old
= ofport
->cfm
;
2105 ofport
->cfm
= cfm_create(ofport
->up
.netdev
);
2108 if (cfm_configure(ofport
->cfm
, s
)) {
2115 cfm_unref(ofport
->cfm
);
2118 if (ofport
->cfm
!= old
) {
2119 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2121 ofproto_dpif_monitor_port_update(ofport
, ofport
->bfd
, ofport
->cfm
,
2122 ofport
->lldp
, &ofport
->up
.pp
.hw_addr
);
2127 cfm_status_changed(struct ofport
*ofport_
)
2129 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2131 return ofport
->cfm
? cfm_check_status_change(ofport
->cfm
) : true;
2135 get_cfm_status(const struct ofport
*ofport_
,
2136 struct cfm_status
*status
)
2138 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2142 cfm_get_status(ofport
->cfm
, status
);
2151 set_bfd(struct ofport
*ofport_
, const struct smap
*cfg
)
2153 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
2154 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2158 ofport
->bfd
= bfd_configure(old
, netdev_get_name(ofport
->up
.netdev
),
2159 cfg
, ofport
->up
.netdev
);
2160 if (ofport
->bfd
!= old
) {
2161 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2163 ofproto_dpif_monitor_port_update(ofport
, ofport
->bfd
, ofport
->cfm
,
2164 ofport
->lldp
, &ofport
->up
.pp
.hw_addr
);
2169 bfd_status_changed(struct ofport
*ofport_
)
2171 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2173 return ofport
->bfd
? bfd_check_status_change(ofport
->bfd
) : true;
2177 get_bfd_status(struct ofport
*ofport_
, struct smap
*smap
)
2179 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2183 bfd_get_status(ofport
->bfd
, smap
);
2192 set_lldp(struct ofport
*ofport_
,
2193 const struct smap
*cfg
)
2195 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2199 if (!ofport
->lldp
) {
2200 struct ofproto_dpif
*ofproto
;
2202 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2203 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2204 ofport
->lldp
= lldp_create(ofport
->up
.netdev
, ofport_
->mtu
, cfg
);
2207 if (!lldp_configure(ofport
->lldp
, cfg
)) {
2212 lldp_unref(ofport
->lldp
);
2213 ofport
->lldp
= NULL
;
2216 ofproto_dpif_monitor_port_update(ofport
,
2220 &ofport
->up
.pp
.hw_addr
);
2225 get_lldp_status(const struct ofport
*ofport_
,
2226 struct lldp_status
*status OVS_UNUSED
)
2228 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2230 return ofport
->lldp
? true : false;
2234 set_aa(struct ofproto
*ofproto OVS_UNUSED
,
2235 const struct aa_settings
*s
)
2237 return aa_configure(s
);
2241 aa_mapping_set(struct ofproto
*ofproto_ OVS_UNUSED
, void *aux
,
2242 const struct aa_mapping_settings
*s
)
2244 return aa_mapping_register(aux
, s
);
2248 aa_mapping_unset(struct ofproto
*ofproto OVS_UNUSED
, void *aux
)
2250 return aa_mapping_unregister(aux
);
2254 aa_vlan_get_queued(struct ofproto
*ofproto OVS_UNUSED
, struct ovs_list
*list
)
2256 return aa_get_vlan_queued(list
);
2260 aa_vlan_get_queue_size(struct ofproto
*ofproto OVS_UNUSED
)
2262 return aa_get_vlan_queue_size();
2266 /* Spanning Tree. */
2268 /* Called while rstp_mutex is held. */
2270 rstp_send_bpdu_cb(struct dp_packet
*pkt
, void *ofport_
, void *ofproto_
)
2272 struct ofproto_dpif
*ofproto
= ofproto_
;
2273 struct ofport_dpif
*ofport
= ofport_
;
2274 struct eth_header
*eth
= dp_packet_eth(pkt
);
2276 netdev_get_etheraddr(ofport
->up
.netdev
, ð
->eth_src
);
2277 if (eth_addr_is_zero(eth
->eth_src
)) {
2278 VLOG_WARN_RL(&rl
, "%s port %d: cannot send RSTP BPDU on a port which "
2279 "does not have a configured source MAC address.",
2280 ofproto
->up
.name
, ofp_to_u16(ofport
->up
.ofp_port
));
2282 ofproto_dpif_send_packet(ofport
, false, pkt
);
2284 dp_packet_delete(pkt
);
2288 send_bpdu_cb(struct dp_packet
*pkt
, int port_num
, void *ofproto_
)
2290 struct ofproto_dpif
*ofproto
= ofproto_
;
2291 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
2292 struct ofport_dpif
*ofport
;
2294 ofport
= stp_port_get_aux(sp
);
2296 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
2297 ofproto
->up
.name
, port_num
);
2299 struct eth_header
*eth
= dp_packet_eth(pkt
);
2301 netdev_get_etheraddr(ofport
->up
.netdev
, ð
->eth_src
);
2302 if (eth_addr_is_zero(eth
->eth_src
)) {
2303 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
2304 "with unknown MAC", ofproto
->up
.name
, port_num
);
2306 ofproto_dpif_send_packet(ofport
, false, pkt
);
2309 dp_packet_delete(pkt
);
2312 /* Configure RSTP on 'ofproto_' using the settings defined in 's'. */
2314 set_rstp(struct ofproto
*ofproto_
, const struct ofproto_rstp_settings
*s
)
2316 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2318 /* Only revalidate flows if the configuration changed. */
2319 if (!s
!= !ofproto
->rstp
) {
2320 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2324 if (!ofproto
->rstp
) {
2325 ofproto
->rstp
= rstp_create(ofproto_
->name
, s
->address
,
2326 rstp_send_bpdu_cb
, ofproto
);
2327 ofproto
->rstp_last_tick
= time_msec();
2329 rstp_set_bridge_address(ofproto
->rstp
, s
->address
);
2330 rstp_set_bridge_priority(ofproto
->rstp
, s
->priority
);
2331 rstp_set_bridge_ageing_time(ofproto
->rstp
, s
->ageing_time
);
2332 rstp_set_bridge_force_protocol_version(ofproto
->rstp
,
2333 s
->force_protocol_version
);
2334 rstp_set_bridge_max_age(ofproto
->rstp
, s
->bridge_max_age
);
2335 rstp_set_bridge_forward_delay(ofproto
->rstp
, s
->bridge_forward_delay
);
2336 rstp_set_bridge_transmit_hold_count(ofproto
->rstp
,
2337 s
->transmit_hold_count
);
2339 struct ofport
*ofport
;
2340 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2341 set_rstp_port(ofport
, NULL
);
2343 rstp_unref(ofproto
->rstp
);
2344 ofproto
->rstp
= NULL
;
2349 get_rstp_status(struct ofproto
*ofproto_
, struct ofproto_rstp_status
*s
)
2351 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2353 if (ofproto
->rstp
) {
2355 s
->root_id
= rstp_get_root_id(ofproto
->rstp
);
2356 s
->bridge_id
= rstp_get_bridge_id(ofproto
->rstp
);
2357 s
->designated_id
= rstp_get_designated_id(ofproto
->rstp
);
2358 s
->root_path_cost
= rstp_get_root_path_cost(ofproto
->rstp
);
2359 s
->designated_port_id
= rstp_get_designated_port_id(ofproto
->rstp
);
2360 s
->bridge_port_id
= rstp_get_bridge_port_id(ofproto
->rstp
);
2367 update_rstp_port_state(struct ofport_dpif
*ofport
)
2369 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2370 enum rstp_state state
;
2372 /* Figure out new state. */
2373 state
= ofport
->rstp_port
? rstp_port_get_state(ofport
->rstp_port
)
2377 if (ofport
->rstp_state
!= state
) {
2378 enum ofputil_port_state of_state
;
2381 VLOG_DBG("port %s: RSTP state changed from %s to %s",
2382 netdev_get_name(ofport
->up
.netdev
),
2383 rstp_state_name(ofport
->rstp_state
),
2384 rstp_state_name(state
));
2386 if (rstp_learn_in_state(ofport
->rstp_state
)
2387 != rstp_learn_in_state(state
)) {
2388 /* XXX: Learning action flows should also be flushed. */
2389 if (ofport
->bundle
) {
2390 if (!rstp_shift_root_learned_address(ofproto
->rstp
)
2391 || rstp_get_old_root_aux(ofproto
->rstp
) != ofport
) {
2392 bundle_flush_macs(ofport
->bundle
, false);
2396 fwd_change
= rstp_forward_in_state(ofport
->rstp_state
)
2397 != rstp_forward_in_state(state
);
2399 ofproto
->backer
->need_revalidate
= REV_RSTP
;
2400 ofport
->rstp_state
= state
;
2402 if (fwd_change
&& ofport
->bundle
) {
2403 bundle_update(ofport
->bundle
);
2406 /* Update the RSTP state bits in the OpenFlow port description. */
2407 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2408 of_state
|= (state
== RSTP_LEARNING
? OFPUTIL_PS_STP_LEARN
2409 : state
== RSTP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2410 : state
== RSTP_DISCARDING
? OFPUTIL_PS_STP_LISTEN
2412 ofproto_port_set_state(&ofport
->up
, of_state
);
2417 rstp_run(struct ofproto_dpif
*ofproto
)
2419 if (ofproto
->rstp
) {
2420 long long int now
= time_msec();
2421 long long int elapsed
= now
- ofproto
->rstp_last_tick
;
2422 struct rstp_port
*rp
;
2423 struct ofport_dpif
*ofport
;
2425 /* Every second, decrease the values of the timers. */
2426 if (elapsed
>= 1000) {
2427 rstp_tick_timers(ofproto
->rstp
);
2428 ofproto
->rstp_last_tick
= now
;
2431 while ((ofport
= rstp_get_next_changed_port_aux(ofproto
->rstp
, &rp
))) {
2432 update_rstp_port_state(ofport
);
2436 /* FIXME: This check should be done on-event (i.e., when setting
2437 * p->fdb_flush) and not periodically.
2439 while ((ofport
= rstp_check_and_reset_fdb_flush(ofproto
->rstp
, &rp
))) {
2440 if (!rstp_shift_root_learned_address(ofproto
->rstp
)
2441 || rstp_get_old_root_aux(ofproto
->rstp
) != ofport
) {
2442 bundle_flush_macs(ofport
->bundle
, false);
2446 if (rstp_shift_root_learned_address(ofproto
->rstp
)) {
2447 struct ofport_dpif
*old_root_aux
=
2448 (struct ofport_dpif
*)rstp_get_old_root_aux(ofproto
->rstp
);
2449 struct ofport_dpif
*new_root_aux
=
2450 (struct ofport_dpif
*)rstp_get_new_root_aux(ofproto
->rstp
);
2451 if (old_root_aux
!= NULL
&& new_root_aux
!= NULL
) {
2452 bundle_move(old_root_aux
->bundle
, new_root_aux
->bundle
);
2453 rstp_reset_root_changed(ofproto
->rstp
);
2459 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2461 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
2463 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2465 /* Only revalidate flows if the configuration changed. */
2466 if (!s
!= !ofproto
->stp
) {
2467 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2471 if (!ofproto
->stp
) {
2472 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
2473 send_bpdu_cb
, ofproto
);
2474 ofproto
->stp_last_tick
= time_msec();
2477 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
2478 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
2479 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
2480 stp_set_max_age(ofproto
->stp
, s
->max_age
);
2481 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
2483 struct ofport
*ofport
;
2485 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2486 set_stp_port(ofport
, NULL
);
2489 stp_unref(ofproto
->stp
);
2490 ofproto
->stp
= NULL
;
2497 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
2499 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2503 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
2504 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
2505 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
2514 update_stp_port_state(struct ofport_dpif
*ofport
)
2516 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2517 enum stp_state state
;
2519 /* Figure out new state. */
2520 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
2524 if (ofport
->stp_state
!= state
) {
2525 enum ofputil_port_state of_state
;
2528 VLOG_DBG("port %s: STP state changed from %s to %s",
2529 netdev_get_name(ofport
->up
.netdev
),
2530 stp_state_name(ofport
->stp_state
),
2531 stp_state_name(state
));
2532 if (stp_learn_in_state(ofport
->stp_state
)
2533 != stp_learn_in_state(state
)) {
2534 /* xxx Learning action flows should also be flushed. */
2535 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
2536 mac_learning_flush(ofproto
->ml
);
2537 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
2538 mcast_snooping_mdb_flush(ofproto
->ms
);
2540 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
2541 != stp_forward_in_state(state
);
2543 ofproto
->backer
->need_revalidate
= REV_STP
;
2544 ofport
->stp_state
= state
;
2545 ofport
->stp_state_entered
= time_msec();
2547 if (fwd_change
&& ofport
->bundle
) {
2548 bundle_update(ofport
->bundle
);
2551 /* Update the STP state bits in the OpenFlow port description. */
2552 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2553 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
2554 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
2555 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2556 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
2558 ofproto_port_set_state(&ofport
->up
, of_state
);
2563 stp_check_and_update_link_state(struct ofproto_dpif
*ofproto
)
2565 struct ofport_dpif
*ofport
;
2567 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
2568 bool up
= netdev_get_carrier(ofport
->up
.netdev
);
2570 if (ofport
->stp_port
&&
2571 up
!= (stp_port_get_state(ofport
->stp_port
) != STP_DISABLED
)) {
2573 VLOG_DBG("bridge %s, port %s is %s, %s it.",
2574 ofproto
->up
.name
, netdev_get_name(ofport
->up
.netdev
),
2576 up
? "enabling" : "disabling");
2579 stp_port_enable(ofport
->stp_port
);
2580 stp_port_set_aux(ofport
->stp_port
, ofport
);
2582 stp_port_disable(ofport
->stp_port
);
2585 update_stp_port_state(ofport
);
2590 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2591 * caller is responsible for assigning STP port numbers and ensuring
2592 * there are no duplicates. */
2594 set_stp_port(struct ofport
*ofport_
,
2595 const struct ofproto_port_stp_settings
*s
)
2597 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2598 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2599 struct stp_port
*sp
= ofport
->stp_port
;
2601 if (!s
|| !s
->enable
) {
2603 ofport
->stp_port
= NULL
;
2604 stp_port_disable(sp
);
2605 update_stp_port_state(ofport
);
2608 } else if (sp
&& stp_port_no(sp
) != s
->port_num
2609 && ofport
== stp_port_get_aux(sp
)) {
2610 /* The port-id changed, so disable the old one if it's not
2611 * already in use by another port. */
2612 stp_port_disable(sp
);
2615 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
2617 /* Set name before enabling the port so that debugging messages can print
2619 stp_port_set_name(sp
, netdev_get_name(ofport
->up
.netdev
));
2621 if (netdev_get_carrier(ofport_
->netdev
)) {
2622 stp_port_enable(sp
);
2624 stp_port_disable(sp
);
2627 stp_port_set_aux(sp
, ofport
);
2628 stp_port_set_priority(sp
, s
->priority
);
2629 stp_port_set_path_cost(sp
, s
->path_cost
);
2631 update_stp_port_state(ofport
);
2637 get_stp_port_status(struct ofport
*ofport_
,
2638 struct ofproto_port_stp_status
*s
)
2640 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2641 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2642 struct stp_port
*sp
= ofport
->stp_port
;
2644 if (!ofproto
->stp
|| !sp
) {
2650 stp_port_get_status(sp
, &s
->port_id
, &s
->state
, &s
->role
);
2651 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
2657 get_stp_port_stats(struct ofport
*ofport_
,
2658 struct ofproto_port_stp_stats
*s
)
2660 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2661 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2662 struct stp_port
*sp
= ofport
->stp_port
;
2664 if (!ofproto
->stp
|| !sp
) {
2670 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
2676 stp_run(struct ofproto_dpif
*ofproto
)
2679 long long int now
= time_msec();
2680 long long int elapsed
= now
- ofproto
->stp_last_tick
;
2681 struct stp_port
*sp
;
2684 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
2685 ofproto
->stp_last_tick
= now
;
2688 stp_check_and_update_link_state(ofproto
);
2690 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
2691 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2694 update_stp_port_state(ofport
);
2698 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2699 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
2700 mac_learning_flush(ofproto
->ml
);
2701 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
2702 mcast_snooping_mdb_flush(ofproto
->ms
);
2708 stp_wait(struct ofproto_dpif
*ofproto
)
2711 poll_timer_wait(1000);
2715 /* Configures RSTP on 'ofport_' using the settings defined in 's'. The
2716 * caller is responsible for assigning RSTP port numbers and ensuring
2717 * there are no duplicates. */
2719 set_rstp_port(struct ofport
*ofport_
,
2720 const struct ofproto_port_rstp_settings
*s
)
2722 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2723 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2724 struct rstp_port
*rp
= ofport
->rstp_port
;
2726 if (!s
|| !s
->enable
) {
2728 rstp_port_set_aux(rp
, NULL
);
2729 rstp_port_set_state(rp
, RSTP_DISABLED
);
2730 rstp_port_set_mac_operational(rp
, false);
2731 ofport
->rstp_port
= NULL
;
2732 rstp_port_unref(rp
);
2733 update_rstp_port_state(ofport
);
2738 /* Check if need to add a new port. */
2740 rp
= ofport
->rstp_port
= rstp_add_port(ofproto
->rstp
);
2743 rstp_port_set(rp
, s
->port_num
, s
->priority
, s
->path_cost
,
2744 s
->admin_edge_port
, s
->auto_edge
,
2745 s
->admin_p2p_mac_state
, s
->admin_port_state
, s
->mcheck
,
2746 ofport
, netdev_get_name(ofport
->up
.netdev
));
2747 update_rstp_port_state(ofport
);
2748 /* Synchronize operational status. */
2749 rstp_port_set_mac_operational(rp
, ofport
->may_enable
);
2753 get_rstp_port_status(struct ofport
*ofport_
,
2754 struct ofproto_port_rstp_status
*s
)
2756 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2757 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2758 struct rstp_port
*rp
= ofport
->rstp_port
;
2760 if (!ofproto
->rstp
|| !rp
) {
2766 rstp_port_get_status(rp
, &s
->port_id
, &s
->state
, &s
->role
,
2767 &s
->designated_bridge_id
, &s
->designated_port_id
,
2768 &s
->designated_path_cost
, &s
->tx_count
,
2769 &s
->rx_count
, &s
->error_count
, &s
->uptime
);
2774 set_queues(struct ofport
*ofport_
, const struct ofproto_port_queue
*qdscp
,
2777 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2778 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2780 if (ofport
->n_qdscp
!= n_qdscp
2781 || (n_qdscp
&& memcmp(ofport
->qdscp
, qdscp
,
2782 n_qdscp
* sizeof *qdscp
))) {
2783 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2784 free(ofport
->qdscp
);
2785 ofport
->qdscp
= n_qdscp
2786 ? xmemdup(qdscp
, n_qdscp
* sizeof *qdscp
)
2788 ofport
->n_qdscp
= n_qdscp
;
2796 /* Expires all MAC learning entries associated with 'bundle' and forces its
2797 * ofproto to revalidate every flow.
2799 * Normally MAC learning entries are removed only from the ofproto associated
2800 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2801 * are removed from every ofproto. When patch ports and SLB bonds are in use
2802 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2803 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2804 * with the host from which it migrated. */
2806 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2808 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2809 struct mac_learning
*ml
= ofproto
->ml
;
2810 struct mac_entry
*mac
, *next_mac
;
2812 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2813 ovs_rwlock_wrlock(&ml
->rwlock
);
2814 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2815 if (mac_entry_get_port(ml
, mac
) == bundle
) {
2817 struct ofproto_dpif
*o
;
2819 HMAP_FOR_EACH (o
, all_ofproto_dpifs_by_name_node
,
2820 &all_ofproto_dpifs_by_name
) {
2822 struct mac_entry
*e
;
2824 ovs_rwlock_wrlock(&o
->ml
->rwlock
);
2825 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
);
2827 mac_learning_expire(o
->ml
, e
);
2829 ovs_rwlock_unlock(&o
->ml
->rwlock
);
2834 mac_learning_expire(ml
, mac
);
2837 ovs_rwlock_unlock(&ml
->rwlock
);
2841 bundle_move(struct ofbundle
*old
, struct ofbundle
*new)
2843 struct ofproto_dpif
*ofproto
= old
->ofproto
;
2844 struct mac_learning
*ml
= ofproto
->ml
;
2845 struct mac_entry
*mac
, *next_mac
;
2847 ovs_assert(new->ofproto
== old
->ofproto
);
2849 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2850 ovs_rwlock_wrlock(&ml
->rwlock
);
2851 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2852 if (mac_entry_get_port(ml
, mac
) == old
) {
2853 mac_entry_set_port(ml
, mac
, new);
2856 ovs_rwlock_unlock(&ml
->rwlock
);
2859 static struct ofbundle
*
2860 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2862 struct ofbundle
*bundle
;
2864 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2865 &ofproto
->bundles
) {
2866 if (bundle
->aux
== aux
) {
2874 bundle_update(struct ofbundle
*bundle
)
2876 struct ofport_dpif
*port
;
2878 bundle
->floodable
= true;
2879 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2880 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2881 || netdev_get_pt_mode(port
->up
.netdev
) == NETDEV_PT_LEGACY_L3
2882 || (bundle
->ofproto
->stp
&& !stp_forward_in_state(port
->stp_state
))
2883 || (bundle
->ofproto
->rstp
&& !rstp_forward_in_state(port
->rstp_state
))) {
2884 bundle
->floodable
= false;
2891 bundle_del_port(struct ofport_dpif
*port
)
2893 struct ofbundle
*bundle
= port
->bundle
;
2895 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2897 ovs_list_remove(&port
->bundle_node
);
2898 port
->bundle
= NULL
;
2901 lacp_slave_unregister(bundle
->lacp
, port
);
2904 bond_slave_unregister(bundle
->bond
, port
);
2907 bundle_update(bundle
);
2911 bundle_add_port(struct ofbundle
*bundle
, ofp_port_t ofp_port
,
2912 struct lacp_slave_settings
*lacp
)
2914 struct ofport_dpif
*port
;
2916 port
= ofp_port_to_ofport(bundle
->ofproto
, ofp_port
);
2921 if (port
->bundle
!= bundle
) {
2922 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2924 bundle_remove(&port
->up
);
2927 port
->bundle
= bundle
;
2928 ovs_list_push_back(&bundle
->ports
, &port
->bundle_node
);
2929 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2930 || netdev_get_pt_mode(port
->up
.netdev
) == NETDEV_PT_LEGACY_L3
2931 || (bundle
->ofproto
->stp
&& !stp_forward_in_state(port
->stp_state
))
2932 || (bundle
->ofproto
->rstp
&& !rstp_forward_in_state(port
->rstp_state
))) {
2933 bundle
->floodable
= false;
2937 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2938 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2945 bundle_destroy(struct ofbundle
*bundle
)
2947 struct ofproto_dpif
*ofproto
;
2948 struct ofport_dpif
*port
, *next_port
;
2954 ofproto
= bundle
->ofproto
;
2955 mbridge_unregister_bundle(ofproto
->mbridge
, bundle
);
2958 xlate_bundle_remove(bundle
);
2961 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2962 bundle_del_port(port
);
2965 bundle_flush_macs(bundle
, true);
2966 mcast_snooping_flush_bundle(ofproto
->ms
, bundle
);
2967 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2969 free(bundle
->trunks
);
2970 free(bundle
->cvlans
);
2971 lacp_unref(bundle
->lacp
);
2972 bond_unref(bundle
->bond
);
2977 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2978 const struct ofproto_bundle_settings
*s
)
2980 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2981 bool need_flush
= false;
2982 struct ofport_dpif
*port
;
2983 struct ofbundle
*bundle
;
2984 unsigned long *trunks
= NULL
;
2985 unsigned long *cvlans
= NULL
;
2990 bundle
= bundle_lookup(ofproto
, aux
);
2993 bundle_destroy(bundle
);
2997 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2998 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
3001 bundle
= xmalloc(sizeof *bundle
);
3003 bundle
->ofproto
= ofproto
;
3004 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
3005 hash_pointer(aux
, 0));
3007 bundle
->name
= NULL
;
3009 ovs_list_init(&bundle
->ports
);
3010 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
3011 bundle
->qinq_ethtype
= ETH_TYPE_VLAN_8021AD
;
3013 bundle
->trunks
= NULL
;
3014 bundle
->cvlans
= NULL
;
3015 bundle
->use_priority_tags
= s
->use_priority_tags
;
3016 bundle
->lacp
= NULL
;
3017 bundle
->bond
= NULL
;
3019 bundle
->floodable
= true;
3020 bundle
->protected = false;
3021 mbridge_register_bundle(ofproto
->mbridge
, bundle
);
3024 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
3026 bundle
->name
= xstrdup(s
->name
);
3031 ofproto
->lacp_enabled
= true;
3032 if (!bundle
->lacp
) {
3033 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3034 bundle
->lacp
= lacp_create();
3036 lacp_configure(bundle
->lacp
, s
->lacp
);
3038 lacp_unref(bundle
->lacp
);
3039 bundle
->lacp
= NULL
;
3042 /* Update set of ports. */
3044 for (i
= 0; i
< s
->n_slaves
; i
++) {
3045 if (!bundle_add_port(bundle
, s
->slaves
[i
],
3046 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
3050 if (!ok
|| ovs_list_size(&bundle
->ports
) != s
->n_slaves
) {
3051 struct ofport_dpif
*next_port
;
3053 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
3054 for (i
= 0; i
< s
->n_slaves
; i
++) {
3055 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
3060 bundle_del_port(port
);
3064 ovs_assert(ovs_list_size(&bundle
->ports
) <= s
->n_slaves
);
3066 if (ovs_list_is_empty(&bundle
->ports
)) {
3067 bundle_destroy(bundle
);
3071 /* Set VLAN tagging mode */
3072 if (s
->vlan_mode
!= bundle
->vlan_mode
3073 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
3074 bundle
->vlan_mode
= s
->vlan_mode
;
3075 bundle
->use_priority_tags
= s
->use_priority_tags
;
3079 if (s
->qinq_ethtype
!= bundle
->qinq_ethtype
) {
3080 bundle
->qinq_ethtype
= s
->qinq_ethtype
;
3085 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
3086 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
3088 if (vlan
!= bundle
->vlan
) {
3089 bundle
->vlan
= vlan
;
3093 /* Get trunked VLANs. */
3094 switch (s
->vlan_mode
) {
3095 case PORT_VLAN_ACCESS
:
3099 case PORT_VLAN_TRUNK
:
3100 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
3103 case PORT_VLAN_NATIVE_UNTAGGED
:
3104 case PORT_VLAN_NATIVE_TAGGED
:
3105 if (vlan
!= 0 && (!s
->trunks
3106 || !bitmap_is_set(s
->trunks
, vlan
)
3107 || bitmap_is_set(s
->trunks
, 0))) {
3108 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
3110 trunks
= bitmap_clone(s
->trunks
, 4096);
3112 trunks
= bitmap_allocate1(4096);
3114 bitmap_set1(trunks
, vlan
);
3115 bitmap_set0(trunks
, 0);
3117 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
3121 case PORT_VLAN_DOT1Q_TUNNEL
:
3122 cvlans
= CONST_CAST(unsigned long *, s
->cvlans
);
3128 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
3129 free(bundle
->trunks
);
3130 if (trunks
== s
->trunks
) {
3131 bundle
->trunks
= vlan_bitmap_clone(trunks
);
3133 bundle
->trunks
= trunks
;
3138 if (trunks
!= s
->trunks
) {
3142 if (!vlan_bitmap_equal(cvlans
, bundle
->cvlans
)) {
3143 free(bundle
->cvlans
);
3144 if (cvlans
== s
->cvlans
) {
3145 bundle
->cvlans
= vlan_bitmap_clone(cvlans
);
3147 bundle
->cvlans
= cvlans
;
3152 if (cvlans
!= s
->cvlans
) {
3157 if (!ovs_list_is_short(&bundle
->ports
)) {
3158 bundle
->ofproto
->has_bonded_bundles
= true;
3160 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
3161 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3164 bundle
->bond
= bond_create(s
->bond
, ofproto
);
3165 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3168 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
3169 bond_slave_register(bundle
->bond
, port
,
3170 port
->up
.ofp_port
, port
->up
.netdev
);
3173 bond_unref(bundle
->bond
);
3174 bundle
->bond
= NULL
;
3177 /* Set proteced port mode */
3178 if (s
->protected != bundle
->protected) {
3179 bundle
->protected = s
->protected;
3183 /* If we changed something that would affect MAC learning, un-learn
3184 * everything on this port and force flow revalidation. */
3186 bundle_flush_macs(bundle
, false);
3187 mcast_snooping_flush_bundle(ofproto
->ms
, bundle
);
3194 bundle_remove(struct ofport
*port_
)
3196 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
3197 struct ofbundle
*bundle
= port
->bundle
;
3200 bundle_del_port(port
);
3201 if (ovs_list_is_empty(&bundle
->ports
)) {
3202 bundle_destroy(bundle
);
3203 } else if (ovs_list_is_short(&bundle
->ports
)) {
3204 bond_unref(bundle
->bond
);
3205 bundle
->bond
= NULL
;
3211 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
3213 struct ofport_dpif
*port
= port_
;
3217 error
= netdev_get_etheraddr(port
->up
.netdev
, &ea
);
3219 struct dp_packet packet
;
3222 dp_packet_init(&packet
, 0);
3223 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
3225 memcpy(packet_pdu
, pdu
, pdu_size
);
3227 ofproto_dpif_send_packet(port
, false, &packet
);
3228 dp_packet_uninit(&packet
);
3230 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 10);
3231 VLOG_ERR_RL(&rll
, "port %s: cannot obtain Ethernet address of iface "
3232 "%s (%s)", port
->bundle
->name
,
3233 netdev_get_name(port
->up
.netdev
), ovs_strerror(error
));
3238 bundle_send_learning_packets(struct ofbundle
*bundle
)
3240 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
3241 int error
, n_packets
, n_errors
;
3242 struct mac_entry
*e
;
3244 struct ovs_list list_node
;
3245 struct ofport_dpif
*port
;
3246 struct dp_packet
*pkt
;
3248 struct ovs_list packets
;
3250 ovs_list_init(&packets
);
3251 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
3252 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
3253 if (mac_entry_get_port(ofproto
->ml
, e
) != bundle
) {
3254 pkt_node
= xmalloc(sizeof *pkt_node
);
3255 pkt_node
->pkt
= bond_compose_learning_packet(bundle
->bond
,
3257 (void **)&pkt_node
->port
);
3258 ovs_list_push_back(&packets
, &pkt_node
->list_node
);
3261 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3263 error
= n_packets
= n_errors
= 0;
3264 LIST_FOR_EACH_POP (pkt_node
, list_node
, &packets
) {
3267 ret
= ofproto_dpif_send_packet(pkt_node
->port
, false, pkt_node
->pkt
);
3268 dp_packet_delete(pkt_node
->pkt
);
3278 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 5);
3279 VLOG_WARN_RL(&rll
, "bond %s: %d errors sending %d gratuitous learning "
3280 "packets, last error was: %s",
3281 bundle
->name
, n_errors
, n_packets
, ovs_strerror(error
));
3283 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
3284 bundle
->name
, n_packets
);
3289 bundle_run(struct ofbundle
*bundle
)
3292 lacp_run(bundle
->lacp
, send_pdu_cb
);
3295 struct ofport_dpif
*port
;
3297 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
3298 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
3301 if (bond_run(bundle
->bond
, lacp_status(bundle
->lacp
))) {
3302 bundle
->ofproto
->backer
->need_revalidate
= REV_BOND
;
3305 if (bond_should_send_learning_packets(bundle
->bond
)) {
3306 bundle_send_learning_packets(bundle
);
3312 bundle_wait(struct ofbundle
*bundle
)
3315 lacp_wait(bundle
->lacp
);
3318 bond_wait(bundle
->bond
);
3325 mirror_set__(struct ofproto
*ofproto_
, void *aux
,
3326 const struct ofproto_mirror_settings
*s
)
3328 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3329 struct ofbundle
**srcs
, **dsts
;
3334 mirror_destroy(ofproto
->mbridge
, aux
);
3338 srcs
= xmalloc(s
->n_srcs
* sizeof *srcs
);
3339 dsts
= xmalloc(s
->n_dsts
* sizeof *dsts
);
3341 for (i
= 0; i
< s
->n_srcs
; i
++) {
3342 srcs
[i
] = bundle_lookup(ofproto
, s
->srcs
[i
]);
3345 for (i
= 0; i
< s
->n_dsts
; i
++) {
3346 dsts
[i
] = bundle_lookup(ofproto
, s
->dsts
[i
]);
3349 error
= mirror_set(ofproto
->mbridge
, aux
, s
->name
, srcs
, s
->n_srcs
, dsts
,
3350 s
->n_dsts
, s
->src_vlans
,
3351 bundle_lookup(ofproto
, s
->out_bundle
),
3352 s
->snaplen
, s
->out_vlan
);
3359 mirror_get_stats__(struct ofproto
*ofproto
, void *aux
,
3360 uint64_t *packets
, uint64_t *bytes
)
3362 return mirror_get_stats(ofproto_dpif_cast(ofproto
)->mbridge
, aux
, packets
,
3367 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
3369 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3370 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
3371 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
3372 mac_learning_flush(ofproto
->ml
);
3374 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3379 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
3381 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3382 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3383 return bundle
&& mirror_bundle_out(ofproto
->mbridge
, bundle
) != 0;
3387 forward_bpdu_changed(struct ofproto
*ofproto_
)
3389 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3390 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3394 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
3397 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3398 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
3399 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
3400 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
3401 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
3404 /* Configures multicast snooping on 'ofport' using the settings
3405 * defined in 's'. */
3407 set_mcast_snooping(struct ofproto
*ofproto_
,
3408 const struct ofproto_mcast_snooping_settings
*s
)
3410 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3412 /* Only revalidate flows if the configuration changed. */
3413 if (!s
!= !ofproto
->ms
) {
3414 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3419 ofproto
->ms
= mcast_snooping_create();
3422 ovs_rwlock_wrlock(&ofproto
->ms
->rwlock
);
3423 mcast_snooping_set_idle_time(ofproto
->ms
, s
->idle_time
);
3424 mcast_snooping_set_max_entries(ofproto
->ms
, s
->max_entries
);
3425 if (mcast_snooping_set_flood_unreg(ofproto
->ms
, s
->flood_unreg
)) {
3426 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3428 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
3430 mcast_snooping_unref(ofproto
->ms
);
3437 /* Configures multicast snooping port's flood settings on 'ofproto'. */
3439 set_mcast_snooping_port(struct ofproto
*ofproto_
, void *aux
,
3440 const struct ofproto_mcast_snooping_port_settings
*s
)
3442 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3443 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3445 if (ofproto
->ms
&& s
) {
3446 ovs_rwlock_wrlock(&ofproto
->ms
->rwlock
);
3447 mcast_snooping_set_port_flood(ofproto
->ms
, bundle
, s
->flood
);
3448 mcast_snooping_set_port_flood_reports(ofproto
->ms
, bundle
,
3450 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
3458 struct ofport_dpif
*
3459 ofp_port_to_ofport(const struct ofproto_dpif
*ofproto
, ofp_port_t ofp_port
)
3461 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
3462 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
3466 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
3467 struct ofproto_port
*ofproto_port
,
3468 struct dpif_port
*dpif_port
)
3470 ofproto_port
->name
= dpif_port
->name
;
3471 ofproto_port
->type
= dpif_port
->type
;
3472 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
3476 ofport_update_peer(struct ofport_dpif
*ofport
)
3478 const struct ofproto_dpif
*ofproto
;
3479 struct dpif_backer
*backer
;
3482 if (!netdev_vport_is_patch(ofport
->up
.netdev
)) {
3486 backer
= ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
;
3487 backer
->need_revalidate
= REV_RECONFIGURE
;
3490 ofport
->peer
->peer
= NULL
;
3491 ofport
->peer
= NULL
;
3494 peer_name
= netdev_vport_patch_peer(ofport
->up
.netdev
);
3499 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
3500 &all_ofproto_dpifs_by_name
) {
3501 struct ofport
*peer_ofport
;
3502 struct ofport_dpif
*peer
;
3505 if (ofproto
->backer
!= backer
) {
3509 peer_ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer_name
);
3514 peer
= ofport_dpif_cast(peer_ofport
);
3515 peer_peer
= netdev_vport_patch_peer(peer
->up
.netdev
);
3516 if (peer_peer
&& !strcmp(netdev_get_name(ofport
->up
.netdev
),
3518 ofport
->peer
= peer
;
3519 ofport
->peer
->peer
= ofport
;
3529 port_run(struct ofport_dpif
*ofport
)
3531 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
3532 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
3533 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
3534 bool cfm_enable
= false;
3535 bool bfd_enable
= false;
3537 ofport
->carrier_seq
= carrier_seq
;
3540 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
3542 cfm_enable
= !cfm_get_fault(ofport
->cfm
);
3544 if (cfm_opup
>= 0) {
3545 cfm_enable
= cfm_enable
&& cfm_opup
;
3550 bfd_enable
= bfd_forwarding(ofport
->bfd
);
3553 if (ofport
->bfd
|| ofport
->cfm
) {
3554 enable
= enable
&& (cfm_enable
|| bfd_enable
);
3557 if (ofport
->bundle
) {
3558 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
3559 if (carrier_changed
) {
3560 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
3564 if (ofport
->may_enable
!= enable
) {
3565 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3567 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
3569 if (ofport
->rstp_port
) {
3570 rstp_port_set_mac_operational(ofport
->rstp_port
, enable
);
3573 /* Propagate liveness, unless the link is administratively or
3574 * operationally down. */
3575 if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_PORT_DOWN
) &&
3576 !(ofport
->up
.pp
.state
& OFPUTIL_PS_LINK_DOWN
)) {
3577 enum ofputil_port_state of_state
= ofport
->up
.pp
.state
;
3579 of_state
|= OFPUTIL_PS_LIVE
;
3581 of_state
&= ~OFPUTIL_PS_LIVE
;
3583 ofproto_port_set_state(&ofport
->up
, of_state
);
3587 ofport
->may_enable
= enable
;
3591 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
3592 struct ofproto_port
*ofproto_port
)
3594 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3595 struct dpif_port dpif_port
;
3598 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
3599 const char *type
= netdev_get_type_from_name(devname
);
3601 /* We may be called before ofproto->up.port_by_name is populated with
3602 * the appropriate ofport. For this reason, we must get the name and
3603 * type from the netdev layer directly. */
3605 const struct ofport
*ofport
;
3607 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3608 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3609 ofproto_port
->name
= xstrdup(devname
);
3610 ofproto_port
->type
= xstrdup(type
);
3616 if (!sset_contains(&ofproto
->ports
, devname
)) {
3619 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3620 devname
, &dpif_port
);
3622 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3628 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3630 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3631 const char *devname
= netdev_get_name(netdev
);
3632 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
3633 const char *dp_port_name
;
3635 if (netdev_vport_is_patch(netdev
)) {
3636 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3640 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
3641 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3642 odp_port_t port_no
= ODPP_NONE
;
3645 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3649 if (netdev_get_tunnel_config(netdev
)) {
3650 simap_put(&ofproto
->backer
->tnl_backers
,
3651 dp_port_name
, odp_to_u32(port_no
));
3655 if (netdev_get_tunnel_config(netdev
)) {
3656 sset_add(&ofproto
->ghost_ports
, devname
);
3658 sset_add(&ofproto
->ports
, devname
);
3664 port_del(struct ofproto
*ofproto_
, ofp_port_t ofp_port
)
3666 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3667 struct ofport_dpif
*ofport
= ofp_port_to_ofport(ofproto
, ofp_port
);
3674 sset_find_and_delete(&ofproto
->ghost_ports
,
3675 netdev_get_name(ofport
->up
.netdev
));
3676 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3677 if (!ofport
->is_tunnel
&& !netdev_vport_is_patch(ofport
->up
.netdev
)) {
3678 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
, false);
3680 /* The caller is going to close ofport->up.netdev. If this is a
3681 * bonded port, then the bond is using that netdev, so remove it
3682 * from the bond. The client will need to reconfigure everything
3683 * after deleting ports, so then the slave will get re-added. */
3684 bundle_remove(&ofport
->up
);
3691 port_set_config(const struct ofport
*ofport_
, const struct smap
*cfg
)
3693 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3694 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3696 if (sset_contains(&ofproto
->ghost_ports
,
3697 netdev_get_name(ofport
->up
.netdev
))) {
3701 return dpif_port_set_config(ofproto
->backer
->dpif
, ofport
->odp_port
, cfg
);
3705 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3707 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3710 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3712 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3713 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3715 ovs_mutex_lock(&ofproto
->stats_mutex
);
3716 /* ofproto->stats.tx_packets represents packets that we created
3717 * internally and sent to some port (e.g. packets sent with
3718 * ofproto_dpif_send_packet()). Account for them as if they had
3719 * come from OFPP_LOCAL and got forwarded. */
3721 if (stats
->rx_packets
!= UINT64_MAX
) {
3722 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3725 if (stats
->rx_bytes
!= UINT64_MAX
) {
3726 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3729 /* ofproto->stats.rx_packets represents packets that were received on
3730 * some port and we processed internally and dropped (e.g. STP).
3731 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3733 if (stats
->tx_packets
!= UINT64_MAX
) {
3734 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3737 if (stats
->tx_bytes
!= UINT64_MAX
) {
3738 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3740 ovs_mutex_unlock(&ofproto
->stats_mutex
);
3747 port_get_lacp_stats(const struct ofport
*ofport_
, struct lacp_slave_stats
*stats
)
3749 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3750 if (ofport
->bundle
&& ofport
->bundle
->lacp
) {
3751 if (lacp_get_slave_stats(ofport
->bundle
->lacp
, ofport
, stats
)) {
3758 struct port_dump_state
{
3759 struct sset_position pos
;
3762 struct ofproto_port port
;
3767 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3769 *statep
= xzalloc(sizeof(struct port_dump_state
));
3774 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3775 struct ofproto_port
*port
)
3777 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3778 struct port_dump_state
*state
= state_
;
3779 const struct sset
*sset
;
3780 struct sset_node
*node
;
3782 if (state
->has_port
) {
3783 ofproto_port_destroy(&state
->port
);
3784 state
->has_port
= false;
3786 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3787 while ((node
= sset_at_position(sset
, &state
->pos
))) {
3790 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3792 *port
= state
->port
;
3793 state
->has_port
= true;
3795 } else if (error
!= ENODEV
) {
3800 if (!state
->ghost
) {
3801 state
->ghost
= true;
3802 memset(&state
->pos
, 0, sizeof state
->pos
);
3803 return port_dump_next(ofproto_
, state_
, port
);
3810 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3812 struct port_dump_state
*state
= state_
;
3814 if (state
->has_port
) {
3815 ofproto_port_destroy(&state
->port
);
3822 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3824 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3826 if (ofproto
->port_poll_errno
) {
3827 int error
= ofproto
->port_poll_errno
;
3828 ofproto
->port_poll_errno
= 0;
3832 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3836 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3841 port_poll_wait(const struct ofproto
*ofproto_
)
3843 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3844 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3848 port_is_lacp_current(const struct ofport
*ofport_
)
3850 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3851 return (ofport
->bundle
&& ofport
->bundle
->lacp
3852 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3856 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3857 * then delete it entirely. */
3859 rule_expire(struct rule_dpif
*rule
, long long now
)
3860 OVS_REQUIRES(ofproto_mutex
)
3862 uint16_t hard_timeout
, idle_timeout
;
3865 hard_timeout
= rule
->up
.hard_timeout
;
3866 idle_timeout
= rule
->up
.idle_timeout
;
3868 /* Has 'rule' expired? */
3870 long long int modified
;
3872 ovs_mutex_lock(&rule
->up
.mutex
);
3873 modified
= rule
->up
.modified
;
3874 ovs_mutex_unlock(&rule
->up
.mutex
);
3876 if (now
> modified
+ hard_timeout
* 1000) {
3877 reason
= OFPRR_HARD_TIMEOUT
;
3881 if (reason
< 0 && idle_timeout
) {
3884 ovs_mutex_lock(&rule
->stats_mutex
);
3885 used
= rule
->stats
.used
;
3886 ovs_mutex_unlock(&rule
->stats_mutex
);
3888 if (now
> used
+ idle_timeout
* 1000) {
3889 reason
= OFPRR_IDLE_TIMEOUT
;
3894 COVERAGE_INC(ofproto_dpif_expired
);
3895 ofproto_rule_expire(&rule
->up
, reason
);
3900 ofproto_dpif_set_packet_odp_port(const struct ofproto_dpif
*ofproto
,
3901 ofp_port_t in_port
, struct dp_packet
*packet
)
3903 if (in_port
== OFPP_NONE
) {
3904 in_port
= OFPP_LOCAL
;
3906 packet
->md
.in_port
.odp_port
= ofp_port_to_odp_port(ofproto
, in_port
);
3910 ofproto_dpif_execute_actions__(struct ofproto_dpif
*ofproto
,
3911 ovs_version_t version
, const struct flow
*flow
,
3912 struct rule_dpif
*rule
,
3913 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
3914 int depth
, int resubmits
,
3915 struct dp_packet
*packet
)
3917 struct dpif_flow_stats stats
;
3918 struct xlate_out xout
;
3919 struct xlate_in xin
;
3920 struct dpif_execute execute
;
3923 ovs_assert((rule
!= NULL
) != (ofpacts
!= NULL
));
3925 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
3928 rule_dpif_credit_stats(rule
, &stats
);
3931 uint64_t odp_actions_stub
[1024 / 8];
3932 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
3933 xlate_in_init(&xin
, ofproto
, version
, flow
, flow
->in_port
.ofp_port
, rule
,
3934 stats
.tcp_flags
, packet
, NULL
, &odp_actions
);
3935 xin
.ofpacts
= ofpacts
;
3936 xin
.ofpacts_len
= ofpacts_len
;
3937 xin
.resubmit_stats
= &stats
;
3939 xin
.resubmits
= resubmits
;
3940 if (xlate_actions(&xin
, &xout
) != XLATE_OK
) {
3945 execute
.actions
= odp_actions
.data
;
3946 execute
.actions_len
= odp_actions
.size
;
3948 pkt_metadata_from_flow(&packet
->md
, flow
);
3949 execute
.packet
= packet
;
3950 execute
.flow
= flow
;
3951 execute
.needs_help
= (xout
.slow
& SLOW_ACTION
) != 0;
3952 execute
.probe
= false;
3955 /* Fix up in_port. */
3956 ofproto_dpif_set_packet_odp_port(ofproto
, flow
->in_port
.ofp_port
, packet
);
3958 error
= dpif_execute(ofproto
->backer
->dpif
, &execute
);
3960 xlate_out_uninit(&xout
);
3961 ofpbuf_uninit(&odp_actions
);
3966 /* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
3967 * 'flow' must reflect the data in 'packet'. */
3969 ofproto_dpif_execute_actions(struct ofproto_dpif
*ofproto
,
3970 ovs_version_t version
, const struct flow
*flow
,
3971 struct rule_dpif
*rule
,
3972 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
3973 struct dp_packet
*packet
)
3975 return ofproto_dpif_execute_actions__(ofproto
, version
, flow
, rule
,
3976 ofpacts
, ofpacts_len
, 0, 0, packet
);
3980 rule_dpif_credit_stats__(struct rule_dpif
*rule
,
3981 const struct dpif_flow_stats
*stats
,
3983 OVS_REQUIRES(rule
->stats_mutex
)
3985 if (credit_counts
) {
3986 rule
->stats
.n_packets
+= stats
->n_packets
;
3987 rule
->stats
.n_bytes
+= stats
->n_bytes
;
3989 rule
->stats
.used
= MAX(rule
->stats
.used
, stats
->used
);
3993 rule_dpif_credit_stats(struct rule_dpif
*rule
,
3994 const struct dpif_flow_stats
*stats
)
3996 ovs_mutex_lock(&rule
->stats_mutex
);
3997 if (OVS_UNLIKELY(rule
->new_rule
)) {
3998 ovs_mutex_lock(&rule
->new_rule
->stats_mutex
);
3999 rule_dpif_credit_stats__(rule
->new_rule
, stats
, rule
->forward_counts
);
4000 ovs_mutex_unlock(&rule
->new_rule
->stats_mutex
);
4002 rule_dpif_credit_stats__(rule
, stats
, true);
4004 ovs_mutex_unlock(&rule
->stats_mutex
);
4007 /* Sets 'rule''s recirculation id. */
4009 rule_dpif_set_recirc_id(struct rule_dpif
*rule
, uint32_t id
)
4010 OVS_REQUIRES(rule
->up
.mutex
)
4012 ovs_assert(!rule
->recirc_id
|| rule
->recirc_id
== id
);
4013 if (rule
->recirc_id
== id
) {
4014 /* Release the new reference to the same id. */
4017 rule
->recirc_id
= id
;
4021 /* Sets 'rule''s recirculation id. */
4023 rule_set_recirc_id(struct rule
*rule_
, uint32_t id
)
4025 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4027 ovs_mutex_lock(&rule
->up
.mutex
);
4028 rule_dpif_set_recirc_id(rule
, id
);
4029 ovs_mutex_unlock(&rule
->up
.mutex
);
4033 ofproto_dpif_get_tables_version(struct ofproto_dpif
*ofproto
)
4035 ovs_version_t version
;
4037 /* Use memory_order_acquire to signify that any following memory accesses
4038 * can not be reordered to happen before this atomic read. This makes sure
4039 * all following reads relate to this or a newer version, but never to an
4041 atomic_read_explicit(&ofproto
->tables_version
, &version
,
4042 memory_order_acquire
);
4046 /* The returned rule (if any) is valid at least until the next RCU quiescent
4047 * period. If the rule needs to stay around longer, the caller should take
4050 * 'flow' is non-const to allow for temporary modifications during the lookup.
4051 * Any changes are restored before returning. */
4052 static struct rule_dpif
*
4053 rule_dpif_lookup_in_table(struct ofproto_dpif
*ofproto
, ovs_version_t version
,
4054 uint8_t table_id
, struct flow
*flow
,
4055 struct flow_wildcards
*wc
)
4057 struct classifier
*cls
= &ofproto
->up
.tables
[table_id
].cls
;
4058 return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls
, version
,
4063 ofproto_dpif_credit_table_stats(struct ofproto_dpif
*ofproto
, uint8_t table_id
,
4064 uint64_t n_matches
, uint64_t n_misses
)
4066 struct oftable
*tbl
= &ofproto
->up
.tables
[table_id
];
4070 atomic_add_relaxed(&tbl
->n_matched
, n_matches
, &orig
);
4073 atomic_add_relaxed(&tbl
->n_missed
, n_misses
, &orig
);
4077 /* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
4078 * table '*table_id'. Returns the rule that was found, which may be one of the
4079 * special rules according to packet miss hadling. If 'may_packet_in' is
4080 * false, returning of the miss_rule (which issues packet ins for the
4081 * controller) is avoided. Updates 'wc', if nonnull, to reflect the fields
4082 * that were used during the lookup.
4084 * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
4085 * if none is found then the table miss configuration for that table is
4086 * honored, which can result in additional lookups in other OpenFlow tables.
4087 * In this case the function updates '*table_id' to reflect the final OpenFlow
4088 * table that was searched.
4090 * If 'honor_table_miss' is false, then only one table lookup occurs, in
4093 * The rule is returned in '*rule', which is valid at least until the next
4094 * RCU quiescent period. If the '*rule' needs to stay around longer, the
4095 * caller must take a reference.
4097 * 'in_port' allows the lookup to take place as if the in port had the value
4098 * 'in_port'. This is needed for resubmit action support.
4100 * 'flow' is non-const to allow for temporary modifications during the lookup.
4101 * Any changes are restored before returning. */
4103 rule_dpif_lookup_from_table(struct ofproto_dpif
*ofproto
,
4104 ovs_version_t version
, struct flow
*flow
,
4105 struct flow_wildcards
*wc
,
4106 const struct dpif_flow_stats
*stats
,
4107 uint8_t *table_id
, ofp_port_t in_port
,
4108 bool may_packet_in
, bool honor_table_miss
,
4109 struct xlate_cache
*xcache
)
4111 ovs_be16 old_tp_src
= flow
->tp_src
, old_tp_dst
= flow
->tp_dst
;
4112 ofp_port_t old_in_port
= flow
->in_port
.ofp_port
;
4113 enum ofputil_table_miss miss_config
;
4114 struct rule_dpif
*rule
;
4117 /* We always unwildcard nw_frag (for IP), so they
4118 * need not be unwildcarded here. */
4119 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
4120 && ofproto
->up
.frag_handling
!= OFPUTIL_FRAG_NX_MATCH
) {
4121 if (ofproto
->up
.frag_handling
== OFPUTIL_FRAG_NORMAL
) {
4122 /* We must pretend that transport ports are unavailable. */
4123 flow
->tp_src
= htons(0);
4124 flow
->tp_dst
= htons(0);
4126 /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
4127 * Use the drop_frags_rule (which cannot disappear). */
4128 rule
= ofproto
->drop_frags_rule
;
4130 struct oftable
*tbl
= &ofproto
->up
.tables
[*table_id
];
4133 atomic_add_relaxed(&tbl
->n_matched
, stats
->n_packets
, &orig
);
4136 struct xc_entry
*entry
;
4138 entry
= xlate_cache_add_entry(xcache
, XC_TABLE
);
4139 entry
->table
.ofproto
= ofproto
;
4140 entry
->table
.id
= *table_id
;
4141 entry
->table
.match
= true;
4147 /* Look up a flow with 'in_port' as the input port. Then restore the
4148 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
4149 * have surprising behavior). */
4150 flow
->in_port
.ofp_port
= in_port
;
4152 /* Our current implementation depends on n_tables == N_TABLES, and
4153 * TBL_INTERNAL being the last table. */
4154 BUILD_ASSERT_DECL(N_TABLES
== TBL_INTERNAL
+ 1);
4156 miss_config
= OFPUTIL_TABLE_MISS_CONTINUE
;
4158 for (next_id
= *table_id
;
4159 next_id
< ofproto
->up
.n_tables
;
4160 next_id
++, next_id
+= (next_id
== TBL_INTERNAL
))
4162 *table_id
= next_id
;
4163 rule
= rule_dpif_lookup_in_table(ofproto
, version
, next_id
, flow
, wc
);
4165 struct oftable
*tbl
= &ofproto
->up
.tables
[next_id
];
4168 atomic_add_relaxed(rule
? &tbl
->n_matched
: &tbl
->n_missed
,
4169 stats
->n_packets
, &orig
);
4172 struct xc_entry
*entry
;
4174 entry
= xlate_cache_add_entry(xcache
, XC_TABLE
);
4175 entry
->table
.ofproto
= ofproto
;
4176 entry
->table
.id
= next_id
;
4177 entry
->table
.match
= (rule
!= NULL
);
4180 goto out
; /* Match. */
4182 if (honor_table_miss
) {
4183 miss_config
= ofproto_table_get_miss_config(&ofproto
->up
,
4185 if (miss_config
== OFPUTIL_TABLE_MISS_CONTINUE
) {
4192 rule
= ofproto
->no_packet_in_rule
;
4193 if (may_packet_in
) {
4194 if (miss_config
== OFPUTIL_TABLE_MISS_CONTINUE
4195 || miss_config
== OFPUTIL_TABLE_MISS_CONTROLLER
) {
4196 struct ofport_dpif
*port
;
4198 port
= ofp_port_to_ofport(ofproto
, old_in_port
);
4200 VLOG_WARN_RL(&rl
, "packet-in on unknown OpenFlow port %"PRIu32
,
4202 } else if (!(port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
)) {
4203 rule
= ofproto
->miss_rule
;
4205 } else if (miss_config
== OFPUTIL_TABLE_MISS_DEFAULT
&&
4206 connmgr_wants_packet_in_on_miss(ofproto
->up
.connmgr
)) {
4207 rule
= ofproto
->miss_rule
;
4211 /* Restore port numbers, as they may have been modified above. */
4212 flow
->tp_src
= old_tp_src
;
4213 flow
->tp_dst
= old_tp_dst
;
4214 /* Restore the old in port. */
4215 flow
->in_port
.ofp_port
= old_in_port
;
4220 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
4222 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
4225 static struct rule
*
4228 struct rule_dpif
*rule
= xzalloc(sizeof *rule
);
4233 rule_dealloc(struct rule
*rule_
)
4235 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4240 check_mask(struct ofproto_dpif
*ofproto
, const struct miniflow
*flow
)
4242 const struct odp_support
*support
;
4243 uint16_t ct_state
, ct_zone
;
4247 support
= &ofproto
->backer
->rt_support
.odp
;
4248 ct_state
= MINIFLOW_GET_U8(flow
, ct_state
);
4250 if (ct_state
& CS_UNSUPPORTED_MASK
) {
4251 return OFPERR_OFPBMC_BAD_MASK
;
4254 /* Do not bother dissecting the flow further if the datapath supports all
4255 * the features we know of. */
4256 if (support
->ct_state
&& support
->ct_zone
&& support
->ct_mark
4257 && support
->ct_label
&& support
->ct_state_nat
4258 && support
->ct_orig_tuple
&& support
->ct_orig_tuple6
) {
4262 ct_zone
= MINIFLOW_GET_U16(flow
, ct_zone
);
4263 ct_mark
= MINIFLOW_GET_U32(flow
, ct_mark
);
4264 ct_label
= MINIFLOW_GET_U128(flow
, ct_label
);
4266 if ((ct_state
&& !support
->ct_state
)
4267 || ((ct_state
& (CS_SRC_NAT
| CS_DST_NAT
)) && !support
->ct_state_nat
)
4268 || (ct_zone
&& !support
->ct_zone
)
4269 || (ct_mark
&& !support
->ct_mark
)
4270 || (!ovs_u128_is_zero(ct_label
) && !support
->ct_label
)) {
4271 return OFPERR_NXBMC_CT_DATAPATH_SUPPORT
;
4274 if (!support
->ct_orig_tuple
&& !support
->ct_orig_tuple6
4275 && (MINIFLOW_GET_U8(flow
, ct_nw_proto
)
4276 || MINIFLOW_GET_U16(flow
, ct_tp_src
)
4277 || MINIFLOW_GET_U16(flow
, ct_tp_dst
))) {
4278 return OFPERR_NXBMC_CT_DATAPATH_SUPPORT
;
4281 if (!support
->ct_orig_tuple
4282 && (MINIFLOW_GET_U32(flow
, ct_nw_src
)
4283 || MINIFLOW_GET_U32(flow
, ct_nw_dst
))) {
4284 return OFPERR_NXBMC_CT_DATAPATH_SUPPORT
;
4287 if (!support
->ct_orig_tuple6
4288 && (!ovs_u128_is_zero(MINIFLOW_GET_U128(flow
, ct_ipv6_src
))
4289 || !ovs_u128_is_zero(MINIFLOW_GET_U128(flow
, ct_ipv6_dst
)))) {
4290 return OFPERR_NXBMC_CT_DATAPATH_SUPPORT
;
4297 report_unsupported_act(const char *action
, const char *detail
)
4299 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 5);
4300 VLOG_WARN_RL(&rll
, "Rejecting %s action because datapath does not support"
4301 "%s%s (your kernel module may be out of date)",
4302 action
, detail
? " " : "", detail
? detail
: "");
4306 check_actions(const struct ofproto_dpif
*ofproto
,
4307 const struct rule_actions
*const actions
)
4309 const struct ofpact
*ofpact
;
4310 const struct odp_support
*support
= &ofproto
->backer
->rt_support
.odp
;
4312 OFPACT_FOR_EACH (ofpact
, actions
->ofpacts
, actions
->ofpacts_len
) {
4313 if (ofpact
->type
== OFPACT_CT
) {
4314 const struct ofpact_conntrack
*ct
;
4315 const struct ofpact
*a
;
4317 ct
= CONTAINER_OF(ofpact
, struct ofpact_conntrack
, ofpact
);
4319 if (!support
->ct_state
) {
4320 report_unsupported_act("ct", "ct action");
4321 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4323 if ((ct
->zone_imm
|| ct
->zone_src
.field
) && !support
->ct_zone
) {
4324 report_unsupported_act("ct", "ct zones");
4325 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4327 /* So far the force commit feature is implemented together with the
4328 * original direction tuple feature by all datapaths, so we use the
4329 * support flag for the 'ct_orig_tuple' to indicate support for the
4330 * force commit feature as well. */
4331 if ((ct
->flags
& NX_CT_F_FORCE
) && !support
->ct_orig_tuple
) {
4332 report_unsupported_act("ct", "force commit");
4333 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4336 OFPACT_FOR_EACH(a
, ct
->actions
, ofpact_ct_get_action_len(ct
)) {
4337 const struct mf_field
*dst
= ofpact_get_mf_dst(a
);
4339 if (a
->type
== OFPACT_NAT
&& !support
->ct_state_nat
) {
4340 /* The backer doesn't seem to support the NAT bits in
4341 * 'ct_state': assume that it doesn't support the NAT
4343 report_unsupported_act("ct", "nat");
4344 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4346 if (dst
&& ((dst
->id
== MFF_CT_MARK
&& !support
->ct_mark
) ||
4347 (dst
->id
== MFF_CT_LABEL
&& !support
->ct_label
))) {
4348 report_unsupported_act("ct", "setting mark and/or label");
4349 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4352 } else if (ofpact
->type
== OFPACT_RESUBMIT
) {
4353 struct ofpact_resubmit
*resubmit
= ofpact_get_RESUBMIT(ofpact
);
4355 if (resubmit
->with_ct_orig
&& !support
->ct_orig_tuple
) {
4356 report_unsupported_act("resubmit",
4357 "ct original direction tuple");
4358 return OFPERR_NXBAC_CT_DATAPATH_SUPPORT
;
4367 rule_check(struct rule
*rule
)
4369 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->ofproto
);
4372 err
= check_mask(ofproto
, &rule
->cr
.match
.mask
->masks
);
4376 return check_actions(ofproto
, rule
->actions
);
4380 rule_construct(struct rule
*rule_
)
4381 OVS_NO_THREAD_SAFETY_ANALYSIS
4383 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4386 error
= rule_check(rule_
);
4391 ovs_mutex_init_adaptive(&rule
->stats_mutex
);
4392 rule
->stats
.n_packets
= 0;
4393 rule
->stats
.n_bytes
= 0;
4394 rule
->stats
.used
= rule
->up
.modified
;
4395 rule
->recirc_id
= 0;
4396 rule
->new_rule
= NULL
;
4397 rule
->forward_counts
= false;
4403 rule_insert(struct rule
*rule_
, struct rule
*old_rule_
, bool forward_counts
)
4404 OVS_REQUIRES(ofproto_mutex
)
4406 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4409 struct rule_dpif
*old_rule
= rule_dpif_cast(old_rule_
);
4411 ovs_assert(!old_rule
->new_rule
);
4413 /* Take a reference to the new rule, and refer all stats updates from
4414 * the old rule to the new rule. */
4415 ofproto_rule_ref(&rule
->up
);
4417 ovs_mutex_lock(&old_rule
->stats_mutex
);
4418 ovs_mutex_lock(&rule
->stats_mutex
);
4419 old_rule
->new_rule
= rule
; /* Forward future stats. */
4420 old_rule
->forward_counts
= forward_counts
;
4422 if (forward_counts
) {
4423 rule
->stats
= old_rule
->stats
; /* Transfer stats to the new
4426 /* Used timestamp must be forwarded whenever a rule is modified. */
4427 rule
->stats
.used
= old_rule
->stats
.used
;
4429 ovs_mutex_unlock(&rule
->stats_mutex
);
4430 ovs_mutex_unlock(&old_rule
->stats_mutex
);
4435 rule_destruct(struct rule
*rule_
)
4436 OVS_NO_THREAD_SAFETY_ANALYSIS
4438 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4440 ovs_mutex_destroy(&rule
->stats_mutex
);
4441 /* Release reference to the new rule, if any. */
4442 if (rule
->new_rule
) {
4443 ofproto_rule_unref(&rule
->new_rule
->up
);
4445 if (rule
->recirc_id
) {
4446 recirc_free_id(rule
->recirc_id
);
4451 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
,
4452 long long int *used
)
4454 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4456 ovs_mutex_lock(&rule
->stats_mutex
);
4457 if (OVS_UNLIKELY(rule
->new_rule
)) {
4458 rule_get_stats(&rule
->new_rule
->up
, packets
, bytes
, used
);
4460 *packets
= rule
->stats
.n_packets
;
4461 *bytes
= rule
->stats
.n_bytes
;
4462 *used
= rule
->stats
.used
;
4464 ovs_mutex_unlock(&rule
->stats_mutex
);
4467 struct ofproto_dpif_packet_out
{
4468 struct xlate_cache xcache
;
4469 struct ofpbuf odp_actions
;
4470 struct recirc_refs rr
;
4475 static struct ofproto_dpif_packet_out
*
4476 ofproto_dpif_packet_out_new(void)
4478 struct ofproto_dpif_packet_out
*aux
= xmalloc(sizeof *aux
);
4479 xlate_cache_init(&aux
->xcache
);
4480 ofpbuf_init(&aux
->odp_actions
, 64);
4481 aux
->rr
= RECIRC_REFS_EMPTY_INITIALIZER
;
4482 aux
->needs_help
= false;
4488 ofproto_dpif_packet_out_delete(struct ofproto_dpif_packet_out
*aux
)
4491 xlate_cache_uninit(&aux
->xcache
);
4492 ofpbuf_uninit(&aux
->odp_actions
);
4493 recirc_refs_unref(&aux
->rr
);
4499 packet_xlate(struct ofproto
*ofproto_
, struct ofproto_packet_out
*opo
)
4500 OVS_REQUIRES(ofproto_mutex
)
4502 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4503 struct xlate_out xout
;
4504 struct xlate_in xin
;
4505 enum ofperr error
= 0;
4507 struct ofproto_dpif_packet_out
*aux
= ofproto_dpif_packet_out_new();
4509 xlate_in_init(&xin
, ofproto
, opo
->version
, opo
->flow
,
4510 opo
->flow
->in_port
.ofp_port
, NULL
, 0, opo
->packet
, NULL
,
4512 xin
.ofpacts
= opo
->ofpacts
;
4513 xin
.ofpacts_len
= opo
->ofpacts_len
;
4514 /* No learning or stats, but collect side effects to xcache. */
4515 xin
.allow_side_effects
= false;
4516 xin
.resubmit_stats
= NULL
;
4517 xin
.xcache
= &aux
->xcache
;
4518 xin
.in_packet_out
= true;
4520 if (xlate_actions(&xin
, &xout
) != XLATE_OK
) {
4521 error
= OFPERR_OFPFMFC_UNKNOWN
; /* Error processing actions. */
4524 /* Prepare learn actions. */
4525 struct xc_entry
*entry
;
4526 struct ofpbuf entries
= aux
->xcache
.entries
;
4528 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4529 if (entry
->type
== XC_LEARN
) {
4530 struct ofproto_flow_mod
*ofm
= entry
->learn
.ofm
;
4532 error
= ofproto_flow_mod_learn_refresh(ofm
);
4536 struct rule
*rule
= ofm
->temp_rule
;
4537 ofm
->learn_adds_rule
= (rule
->state
== RULE_INITIALIZED
);
4538 if (ofm
->learn_adds_rule
) {
4539 /* If learning on a different bridge, must use its next
4540 * version number. */
4541 ofm
->version
= (rule
->ofproto
== ofproto_
)
4542 ? opo
->version
: rule
->ofproto
->tables_version
+ 1;
4543 error
= ofproto_flow_mod_learn_start(ofm
);
4552 aux
->needs_help
= (xout
.slow
& SLOW_ACTION
) != 0;
4553 recirc_refs_swap(&aux
->rr
, &xout
.recircs
); /* Hold recirc refs. */
4555 xlate_out_uninit(&xout
);
4560 xlate_out_uninit(&xout
);
4561 ofproto_dpif_packet_out_delete(aux
);
4567 packet_xlate_revert(struct ofproto
*ofproto OVS_UNUSED
,
4568 struct ofproto_packet_out
*opo
)
4569 OVS_REQUIRES(ofproto_mutex
)
4571 struct ofproto_dpif_packet_out
*aux
= opo
->aux
;
4574 /* Revert the learned flows. */
4575 struct xc_entry
*entry
;
4576 struct ofpbuf entries
= aux
->xcache
.entries
;
4578 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4579 if (entry
->type
== XC_LEARN
&& entry
->learn
.ofm
->learn_adds_rule
) {
4580 ofproto_flow_mod_learn_revert(entry
->learn
.ofm
);
4584 ofproto_dpif_packet_out_delete(aux
);
4588 /* Push stats and perform side effects of flow translation. */
4590 ofproto_dpif_xcache_execute(struct ofproto_dpif
*ofproto
,
4591 struct xlate_cache
*xcache
,
4592 struct dpif_flow_stats
*stats
)
4593 OVS_REQUIRES(ofproto_mutex
)
4595 struct xc_entry
*entry
;
4596 struct ofpbuf entries
= xcache
->entries
;
4598 XC_ENTRY_FOR_EACH (entry
, &entries
) {
4599 switch (entry
->type
) {
4601 /* Finish the learned flows. */
4602 if (entry
->learn
.ofm
->learn_adds_rule
) {
4603 ofproto_flow_mod_learn_finish(entry
->learn
.ofm
, &ofproto
->up
);
4606 case XC_FIN_TIMEOUT
:
4607 if (stats
->tcp_flags
& (TCP_FIN
| TCP_RST
)) {
4608 /* 'ofproto_mutex' already held */
4609 ofproto_rule_reduce_timeouts__(&entry
->fin
.rule
->up
,
4614 /* All the rest can be dealt with by the xlate layer. */
4624 case XC_TUNNEL_HEADER
:
4625 xlate_push_stats_entry(entry
, stats
);
4634 packet_execute(struct ofproto
*ofproto_
, struct ofproto_packet_out
*opo
)
4635 OVS_REQUIRES(ofproto_mutex
)
4637 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4638 struct dpif_flow_stats stats
;
4639 struct dpif_execute execute
;
4641 struct ofproto_dpif_packet_out
*aux
= opo
->aux
;
4644 /* Run the side effects from the xcache. */
4645 dpif_flow_stats_extract(opo
->flow
, opo
->packet
, time_msec(), &stats
);
4646 ofproto_dpif_xcache_execute(ofproto
, &aux
->xcache
, &stats
);
4648 execute
.actions
= aux
->odp_actions
.data
;
4649 execute
.actions_len
= aux
->odp_actions
.size
;
4651 pkt_metadata_from_flow(&opo
->packet
->md
, opo
->flow
);
4652 execute
.packet
= opo
->packet
;
4653 execute
.flow
= opo
->flow
;
4654 execute
.needs_help
= aux
->needs_help
;
4655 execute
.probe
= false;
4658 /* Fix up in_port. */
4659 ofproto_dpif_set_packet_odp_port(ofproto
, opo
->flow
->in_port
.ofp_port
,
4662 dpif_execute(ofproto
->backer
->dpif
, &execute
);
4663 ofproto_dpif_packet_out_delete(aux
);
4667 static struct group_dpif
*group_dpif_cast(const struct ofgroup
*group
)
4669 return group
? CONTAINER_OF(group
, struct group_dpif
, up
) : NULL
;
4672 static struct ofgroup
*
4675 struct group_dpif
*group
= xzalloc(sizeof *group
);
4680 group_dealloc(struct ofgroup
*group_
)
4682 struct group_dpif
*group
= group_dpif_cast(group_
);
4687 group_construct_stats(struct group_dpif
*group
)
4688 OVS_REQUIRES(group
->stats_mutex
)
4690 group
->packet_count
= 0;
4691 group
->byte_count
= 0;
4693 struct ofputil_bucket
*bucket
;
4694 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4695 bucket
->stats
.packet_count
= 0;
4696 bucket
->stats
.byte_count
= 0;
4701 group_dpif_credit_stats(struct group_dpif
*group
,
4702 struct ofputil_bucket
*bucket
,
4703 const struct dpif_flow_stats
*stats
)
4705 ovs_mutex_lock(&group
->stats_mutex
);
4706 group
->packet_count
+= stats
->n_packets
;
4707 group
->byte_count
+= stats
->n_bytes
;
4709 bucket
->stats
.packet_count
+= stats
->n_packets
;
4710 bucket
->stats
.byte_count
+= stats
->n_bytes
;
4711 } else { /* Credit to all buckets */
4712 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4713 bucket
->stats
.packet_count
+= stats
->n_packets
;
4714 bucket
->stats
.byte_count
+= stats
->n_bytes
;
4717 ovs_mutex_unlock(&group
->stats_mutex
);
4721 group_construct(struct ofgroup
*group_
)
4723 struct group_dpif
*group
= group_dpif_cast(group_
);
4725 ovs_mutex_init_adaptive(&group
->stats_mutex
);
4726 ovs_mutex_lock(&group
->stats_mutex
);
4727 group_construct_stats(group
);
4728 ovs_mutex_unlock(&group
->stats_mutex
);
4733 group_destruct(struct ofgroup
*group_
)
4735 struct group_dpif
*group
= group_dpif_cast(group_
);
4736 ovs_mutex_destroy(&group
->stats_mutex
);
4740 group_get_stats(const struct ofgroup
*group_
, struct ofputil_group_stats
*ogs
)
4742 struct group_dpif
*group
= group_dpif_cast(group_
);
4744 ovs_mutex_lock(&group
->stats_mutex
);
4745 ogs
->packet_count
= group
->packet_count
;
4746 ogs
->byte_count
= group
->byte_count
;
4748 struct bucket_counter
*bucket_stats
= ogs
->bucket_stats
;
4749 struct ofputil_bucket
*bucket
;
4750 LIST_FOR_EACH (bucket
, list_node
, &group
->up
.buckets
) {
4751 bucket_stats
->packet_count
= bucket
->stats
.packet_count
;
4752 bucket_stats
->byte_count
= bucket
->stats
.byte_count
;
4755 ovs_mutex_unlock(&group
->stats_mutex
);
4760 /* If the group exists, this function increments the groups's reference count.
4762 * Make sure to call ofproto_group_unref() after no longer needing to maintain
4763 * a reference to the group. */
4765 group_dpif_lookup(struct ofproto_dpif
*ofproto
, uint32_t group_id
,
4766 ovs_version_t version
, bool take_ref
)
4768 struct ofgroup
*ofgroup
= ofproto_group_lookup(&ofproto
->up
, group_id
,
4770 return ofgroup
? group_dpif_cast(ofgroup
) : NULL
;
4773 /* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
4774 * supports a notion of an OAM flag, sets it if 'oam' is true.
4775 * May modify 'packet'.
4776 * Returns 0 if successful, otherwise a positive errno value. */
4778 ofproto_dpif_send_packet(const struct ofport_dpif
*ofport
, bool oam
,
4779 struct dp_packet
*packet
)
4781 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
4784 error
= xlate_send_packet(ofport
, oam
, packet
);
4786 ovs_mutex_lock(&ofproto
->stats_mutex
);
4787 ofproto
->stats
.tx_packets
++;
4788 ofproto
->stats
.tx_bytes
+= dp_packet_size(packet
);
4789 ovs_mutex_unlock(&ofproto
->stats_mutex
);
4793 /* Return the version string of the datapath that backs up
4797 get_datapath_version(const struct ofproto
*ofproto_
)
4799 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4801 return ofproto
->backer
->dp_version_string
;
4805 type_set_config(const char *type
, const struct smap
*other_config
)
4807 struct dpif_backer
*backer
;
4809 backer
= shash_find_data(&all_dpif_backers
, type
);
4811 /* This is not necessarily a problem, since backers are only
4812 * created on demand. */
4816 dpif_set_config(backer
->dpif
, other_config
);
4820 ct_flush(const struct ofproto
*ofproto_
, const uint16_t *zone
)
4822 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4824 ct_dpif_flush(ofproto
->backer
->dpif
, zone
, NULL
);
4828 set_frag_handling(struct ofproto
*ofproto_
,
4829 enum ofputil_frag_handling frag_handling
)
4831 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4832 if (frag_handling
!= OFPUTIL_FRAG_REASM
) {
4833 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4841 nxt_resume(struct ofproto
*ofproto_
,
4842 const struct ofputil_packet_in_private
*pin
)
4844 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4846 /* Translate pin into datapath actions. */
4847 uint64_t odp_actions_stub
[1024 / 8];
4848 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
4849 enum slow_path_reason slow
;
4850 enum ofperr error
= xlate_resume(ofproto
, pin
, &odp_actions
, &slow
);
4852 /* Steal 'pin->packet' and put it into a dp_packet. */
4853 struct dp_packet packet
;
4854 dp_packet_init(&packet
, pin
->base
.packet_len
);
4855 dp_packet_put(&packet
, pin
->base
.packet
, pin
->base
.packet_len
);
4857 pkt_metadata_from_flow(&packet
.md
, &pin
->base
.flow_metadata
.flow
);
4859 /* Fix up in_port. */
4860 ofproto_dpif_set_packet_odp_port(ofproto
,
4861 pin
->base
.flow_metadata
.flow
.in_port
.ofp_port
,
4864 struct flow headers
;
4865 flow_extract(&packet
, &headers
);
4867 /* Execute the datapath actions on the packet. */
4868 struct dpif_execute execute
= {
4869 .actions
= odp_actions
.data
,
4870 .actions_len
= odp_actions
.size
,
4871 .needs_help
= (slow
& SLOW_ACTION
) != 0,
4875 dpif_execute(ofproto
->backer
->dpif
, &execute
);
4878 ofpbuf_uninit(&odp_actions
);
4879 dp_packet_uninit(&packet
);
4887 set_netflow(struct ofproto
*ofproto_
,
4888 const struct netflow_options
*netflow_options
)
4890 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4892 if (netflow_options
) {
4893 if (!ofproto
->netflow
) {
4894 ofproto
->netflow
= netflow_create();
4895 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4897 return netflow_set_options(ofproto
->netflow
, netflow_options
);
4898 } else if (ofproto
->netflow
) {
4899 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
4900 netflow_unref(ofproto
->netflow
);
4901 ofproto
->netflow
= NULL
;
4908 get_netflow_ids(const struct ofproto
*ofproto_
,
4909 uint8_t *engine_type
, uint8_t *engine_id
)
4911 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
4913 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
4916 struct ofproto_dpif
*
4917 ofproto_dpif_lookup_by_name(const char *name
)
4919 struct ofproto_dpif
*ofproto
;
4921 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_by_name_node
,
4922 hash_string(name
, 0),
4923 &all_ofproto_dpifs_by_name
) {
4924 if (!strcmp(ofproto
->up
.name
, name
)) {
4931 struct ofproto_dpif
*
4932 ofproto_dpif_lookup_by_uuid(const struct uuid
*uuid
)
4934 struct ofproto_dpif
*ofproto
;
4936 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_by_uuid_node
,
4937 uuid_hash(uuid
), &all_ofproto_dpifs_by_uuid
) {
4938 if (uuid_equals(&ofproto
->uuid
, uuid
)) {
4946 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
4947 const char *argv
[], void *aux OVS_UNUSED
)
4949 struct ofproto_dpif
*ofproto
;
4952 ofproto
= ofproto_dpif_lookup_by_name(argv
[1]);
4954 unixctl_command_reply_error(conn
, "no such bridge");
4957 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
4958 mac_learning_flush(ofproto
->ml
);
4959 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
4961 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
4962 &all_ofproto_dpifs_by_name
) {
4963 ovs_rwlock_wrlock(&ofproto
->ml
->rwlock
);
4964 mac_learning_flush(ofproto
->ml
);
4965 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
4969 unixctl_command_reply(conn
, "table successfully flushed");
4973 ofproto_unixctl_mcast_snooping_flush(struct unixctl_conn
*conn
, int argc
,
4974 const char *argv
[], void *aux OVS_UNUSED
)
4976 struct ofproto_dpif
*ofproto
;
4979 ofproto
= ofproto_dpif_lookup_by_name(argv
[1]);
4981 unixctl_command_reply_error(conn
, "no such bridge");
4985 if (!mcast_snooping_enabled(ofproto
->ms
)) {
4986 unixctl_command_reply_error(conn
, "multicast snooping is disabled");
4989 mcast_snooping_mdb_flush(ofproto
->ms
);
4991 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
4992 &all_ofproto_dpifs_by_name
) {
4993 if (!mcast_snooping_enabled(ofproto
->ms
)) {
4996 mcast_snooping_mdb_flush(ofproto
->ms
);
5000 unixctl_command_reply(conn
, "table successfully flushed");
5003 static struct ofport_dpif
*
5004 ofbundle_get_a_port(const struct ofbundle
*bundle
)
5006 return CONTAINER_OF(ovs_list_front(&bundle
->ports
), struct ofport_dpif
,
5011 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
5012 const char *argv
[], void *aux OVS_UNUSED
)
5014 struct ds ds
= DS_EMPTY_INITIALIZER
;
5015 const struct ofproto_dpif
*ofproto
;
5016 const struct mac_entry
*e
;
5018 ofproto
= ofproto_dpif_lookup_by_name(argv
[1]);
5020 unixctl_command_reply_error(conn
, "no such bridge");
5024 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
5025 ovs_rwlock_rdlock(&ofproto
->ml
->rwlock
);
5026 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
5027 struct ofbundle
*bundle
= mac_entry_get_port(ofproto
->ml
, e
);
5028 char name
[OFP10_MAX_PORT_NAME_LEN
];
5030 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
5031 NULL
, name
, sizeof name
);
5032 ds_put_format(&ds
, "%5s %4d "ETH_ADDR_FMT
" %3d\n",
5033 name
, e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
5034 mac_entry_age(ofproto
->ml
, e
));
5036 ovs_rwlock_unlock(&ofproto
->ml
->rwlock
);
5037 unixctl_command_reply(conn
, ds_cstr(&ds
));
5042 ofproto_unixctl_mcast_snooping_show(struct unixctl_conn
*conn
,
5043 int argc OVS_UNUSED
,
5045 void *aux OVS_UNUSED
)
5047 struct ds ds
= DS_EMPTY_INITIALIZER
;
5048 const struct ofproto_dpif
*ofproto
;
5049 const struct ofbundle
*bundle
;
5050 const struct mcast_group
*grp
;
5051 struct mcast_group_bundle
*b
;
5052 struct mcast_mrouter_bundle
*mrouter
;
5054 ofproto
= ofproto_dpif_lookup_by_name(argv
[1]);
5056 unixctl_command_reply_error(conn
, "no such bridge");
5060 if (!mcast_snooping_enabled(ofproto
->ms
)) {
5061 unixctl_command_reply_error(conn
, "multicast snooping is disabled");
5065 ds_put_cstr(&ds
, " port VLAN GROUP Age\n");
5066 ovs_rwlock_rdlock(&ofproto
->ms
->rwlock
);
5067 LIST_FOR_EACH (grp
, group_node
, &ofproto
->ms
->group_lru
) {
5068 LIST_FOR_EACH(b
, bundle_node
, &grp
->bundle_lru
) {
5069 char name
[OFP10_MAX_PORT_NAME_LEN
];
5072 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
5073 NULL
, name
, sizeof name
);
5074 ds_put_format(&ds
, "%5s %4d ", name
, grp
->vlan
);
5075 ipv6_format_mapped(&grp
->addr
, &ds
);
5076 ds_put_format(&ds
, " %3d\n",
5077 mcast_bundle_age(ofproto
->ms
, b
));
5081 /* ports connected to multicast routers */
5082 LIST_FOR_EACH(mrouter
, mrouter_node
, &ofproto
->ms
->mrouter_lru
) {
5083 char name
[OFP10_MAX_PORT_NAME_LEN
];
5085 bundle
= mrouter
->port
;
5086 ofputil_port_to_string(ofbundle_get_a_port(bundle
)->up
.ofp_port
,
5087 NULL
, name
, sizeof name
);
5088 ds_put_format(&ds
, "%5s %4d querier %3d\n",
5089 name
, mrouter
->vlan
,
5090 mcast_mrouter_age(ofproto
->ms
, mrouter
));
5092 ovs_rwlock_unlock(&ofproto
->ms
->rwlock
);
5093 unixctl_command_reply(conn
, ds_cstr(&ds
));
5097 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
5098 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
5099 * to destroy 'ofproto_shash' and free the returned value. */
5100 static const struct shash_node
**
5101 get_ofprotos(struct shash
*ofproto_shash
)
5103 const struct ofproto_dpif
*ofproto
;
5105 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_by_name_node
,
5106 &all_ofproto_dpifs_by_name
) {
5107 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
5108 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
5111 return shash_sort(ofproto_shash
);
5115 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
5116 const char *argv
[] OVS_UNUSED
,
5117 void *aux OVS_UNUSED
)
5119 struct ds ds
= DS_EMPTY_INITIALIZER
;
5120 struct shash ofproto_shash
;
5121 const struct shash_node
**sorted_ofprotos
;
5124 shash_init(&ofproto_shash
);
5125 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
5126 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
5127 const struct shash_node
*node
= sorted_ofprotos
[i
];
5128 ds_put_format(&ds
, "%s\n", node
->name
);
5131 shash_destroy(&ofproto_shash
);
5132 free(sorted_ofprotos
);
5134 unixctl_command_reply(conn
, ds_cstr(&ds
));
5139 show_dp_feature_bool(struct ds
*ds
, const char *feature
, bool b
)
5141 ds_put_format(ds
, "%s: %s\n", feature
, b
? "Yes" : "No");
5145 show_dp_feature_size_t(struct ds
*ds
, const char *feature
, size_t s
)
5147 ds_put_format(ds
, "%s: %"PRIuSIZE
"\n", feature
, s
);
5150 enum dpif_support_field_type
{
5151 DPIF_SUPPORT_FIELD_bool
,
5152 DPIF_SUPPORT_FIELD_size_t
,
5155 struct dpif_support_field
{
5156 void *rt_ptr
; /* Points to the 'rt_support' field. */
5157 const void *bt_ptr
; /* Points to the 'bt_support' field. */
5159 enum dpif_support_field_type type
;
5162 #define DPIF_SUPPORT_FIELD_INTIALIZER(RT_PTR, BT_PTR, TITLE, TYPE) \
5163 (struct dpif_support_field) {RT_PTR, BT_PTR, TITLE, TYPE}
5166 dpif_show_support(const struct dpif_backer_support
*support
, struct ds
*ds
)
5168 #define DPIF_SUPPORT_FIELD(TYPE, NAME, TITLE) \
5169 show_dp_feature_##TYPE (ds, TITLE, support->NAME);
5171 #undef DPIF_SUPPORT_FIELD
5173 #define ODP_SUPPORT_FIELD(TYPE, NAME, TITLE) \
5174 show_dp_feature_##TYPE (ds, TITLE, support->odp.NAME );
5176 #undef ODP_SUPPORT_FIELD
5180 display_support_field(const char *name
,
5181 const struct dpif_support_field
*field
,
5184 switch (field
->type
) {
5185 case DPIF_SUPPORT_FIELD_bool
: {
5186 bool v
= *(bool *)field
->rt_ptr
;
5187 bool b
= *(bool *)field
->bt_ptr
;
5188 ds_put_format(ds
, "%s (%s) : [run time]:%s, [boot time]:%s\n", name
,
5189 field
->title
, v
? "true" : "false",
5190 b
? "true" : "false");
5193 case DPIF_SUPPORT_FIELD_size_t
:
5194 ds_put_format(ds
, "%s (%s) : [run time]:%"PRIuSIZE
5195 ", [boot time]:%"PRIuSIZE
"\n", name
,
5196 field
->title
, *(size_t *)field
->rt_ptr
,
5197 *(size_t *)field
->bt_ptr
);
5204 /* Set a field of 'rt_support' to a new value.
5206 * Returns 'true' if the value is actually set. */
5208 dpif_set_support(struct dpif_backer_support
*rt_support
,
5209 struct dpif_backer_support
*bt_support
,
5210 const char *name
, const char *value
, struct ds
*ds
)
5212 struct shash all_fields
= SHASH_INITIALIZER(&all_fields
);
5213 struct dpif_support_field
*field
;
5214 struct shash_node
*node
;
5215 bool changed
= false;
5217 #define DPIF_SUPPORT_FIELD(TYPE, NAME, TITLE) \
5219 struct dpif_support_field *f = xmalloc(sizeof *f); \
5220 *f = DPIF_SUPPORT_FIELD_INTIALIZER(&rt_support->NAME, \
5221 &bt_support->NAME, \
5223 DPIF_SUPPORT_FIELD_##TYPE);\
5224 shash_add_once(&all_fields, #NAME, f); \
5226 DPIF_SUPPORT_FIELDS
;
5227 #undef DPIF_SUPPORT_FIELD
5229 #define ODP_SUPPORT_FIELD(TYPE, NAME, TITLE) \
5231 struct dpif_support_field *f = xmalloc(sizeof *f); \
5232 *f = DPIF_SUPPORT_FIELD_INTIALIZER(&rt_support->odp.NAME, \
5233 &bt_support->odp.NAME, \
5235 DPIF_SUPPORT_FIELD_##TYPE);\
5236 shash_add_once(&all_fields, #NAME, f); \
5239 #undef ODP_SUPPORT_FIELD
5242 SHASH_FOR_EACH (node
, &all_fields
) {
5243 display_support_field(node
->name
, node
->data
, ds
);
5248 node
= shash_find(&all_fields
, name
);
5250 ds_put_cstr(ds
, "Unexpected support field");
5256 display_support_field(node
->name
, field
, ds
);
5260 if (field
->type
== DPIF_SUPPORT_FIELD_bool
) {
5261 if (!strcasecmp(value
, "true")) {
5262 if (*(bool *)field
->bt_ptr
) {
5263 *(bool *)field
->rt_ptr
= true;
5266 ds_put_cstr(ds
, "Can not enable features not supported by the datapth");
5268 } else if (!strcasecmp(value
, "false")) {
5269 *(bool *)field
->rt_ptr
= false;
5272 ds_put_cstr(ds
, "Boolean value expected");
5274 } else if (field
->type
== DPIF_SUPPORT_FIELD_size_t
) {
5276 if (str_to_int(value
, 10, &v
)) {
5278 if (v
<= *(size_t *)field
->bt_ptr
) {
5279 *(size_t *)field
->rt_ptr
= v
;
5282 ds_put_cstr(ds
, "Can not set value beyond the datapath capability");
5285 ds_put_format(ds
, "Negative number not expected");
5288 ds_put_cstr(ds
, "Integer number expected");
5293 shash_destroy_free_data(&all_fields
);
5298 dpif_show_backer(const struct dpif_backer
*backer
, struct ds
*ds
)
5300 const struct shash_node
**ofprotos
;
5301 struct dpif_dp_stats dp_stats
;
5302 struct shash ofproto_shash
;
5305 dpif_get_dp_stats(backer
->dpif
, &dp_stats
);
5306 ds_put_format(ds
, "%s: hit:%"PRIu64
" missed:%"PRIu64
"\n",
5307 dpif_name(backer
->dpif
), dp_stats
.n_hit
, dp_stats
.n_missed
);
5309 shash_init(&ofproto_shash
);
5310 ofprotos
= get_ofprotos(&ofproto_shash
);
5311 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
5312 struct ofproto_dpif
*ofproto
= ofprotos
[i
]->data
;
5313 const struct shash_node
**ports
;
5316 if (ofproto
->backer
!= backer
) {
5320 ds_put_format(ds
, "\t%s:\n", ofproto
->up
.name
);
5322 ports
= shash_sort(&ofproto
->up
.port_by_name
);
5323 for (j
= 0; j
< shash_count(&ofproto
->up
.port_by_name
); j
++) {
5324 const struct shash_node
*node
= ports
[j
];
5325 struct ofport
*ofport
= node
->data
;
5327 odp_port_t odp_port
;
5329 ds_put_format(ds
, "\t\t%s %u/", netdev_get_name(ofport
->netdev
),
5332 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
5333 if (odp_port
!= ODPP_NONE
) {
5334 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
5336 ds_put_cstr(ds
, "none:");
5339 ds_put_format(ds
, " (%s", netdev_get_type(ofport
->netdev
));
5342 if (!netdev_get_config(ofport
->netdev
, &config
)) {
5343 const struct smap_node
**nodes
= smap_sort(&config
);
5344 for (size_t k
= 0; k
< smap_count(&config
); k
++) {
5345 ds_put_format(ds
, "%c %s=%s", k
? ',' : ':',
5346 nodes
[k
]->key
, nodes
[k
]->value
);
5350 smap_destroy(&config
);
5352 ds_put_char(ds
, ')');
5353 ds_put_char(ds
, '\n');
5357 shash_destroy(&ofproto_shash
);
5362 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
5363 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
5365 struct ds ds
= DS_EMPTY_INITIALIZER
;
5366 const struct shash_node
**backers
;
5369 backers
= shash_sort(&all_dpif_backers
);
5370 for (i
= 0; i
< shash_count(&all_dpif_backers
); i
++) {
5371 dpif_show_backer(backers
[i
]->data
, &ds
);
5375 unixctl_command_reply(conn
, ds_cstr(&ds
));
5380 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
5381 int argc OVS_UNUSED
, const char *argv
[],
5382 void *aux OVS_UNUSED
)
5384 const struct ofproto_dpif
*ofproto
;
5386 struct ds ds
= DS_EMPTY_INITIALIZER
;
5388 struct dpif_flow_dump
*flow_dump
;
5389 struct dpif_flow_dump_thread
*flow_dump_thread
;
5393 ofproto
= ofproto_dpif_lookup_by_name(argv
[argc
- 1]);
5395 unixctl_command_reply_error(conn
, "no such bridge");
5399 bool verbosity
= false;
5401 bool set_names
= false;
5402 for (int i
= 1; i
< argc
- 1; i
++) {
5403 if (!strcmp(argv
[i
], "-m")) {
5405 } else if (!strcmp(argv
[i
], "--names")) {
5408 } else if (!strcmp(argv
[i
], "--no-names")) {
5417 struct hmap
*portno_names
= NULL
;
5419 portno_names
= xmalloc(sizeof *portno_names
);
5420 hmap_init(portno_names
);
5422 struct dpif_port dpif_port
;
5423 struct dpif_port_dump port_dump
;
5424 DPIF_PORT_FOR_EACH (&dpif_port
, &port_dump
, ofproto
->backer
->dpif
) {
5425 odp_portno_names_set(portno_names
, dpif_port
.port_no
,
5431 flow_dump
= dpif_flow_dump_create(ofproto
->backer
->dpif
, false, NULL
);
5432 flow_dump_thread
= dpif_flow_dump_thread_create(flow_dump
);
5433 while (dpif_flow_dump_next(flow_dump_thread
, &f
, 1)) {
5436 if (odp_flow_key_to_flow(f
.key
, f
.key_len
, &flow
) == ODP_FIT_ERROR
5437 || xlate_lookup_ofproto(ofproto
->backer
, &flow
, NULL
) != ofproto
) {
5442 odp_format_ufid(&f
.ufid
, &ds
);
5443 ds_put_cstr(&ds
, " ");
5445 odp_flow_format(f
.key
, f
.key_len
, f
.mask
, f
.mask_len
,
5446 portno_names
, &ds
, verbosity
);
5447 ds_put_cstr(&ds
, ", ");
5448 dpif_flow_stats_format(&f
.stats
, &ds
);
5449 ds_put_cstr(&ds
, ", actions:");
5450 format_odp_actions(&ds
, f
.actions
, f
.actions_len
, portno_names
);
5451 ds_put_char(&ds
, '\n');
5453 dpif_flow_dump_thread_destroy(flow_dump_thread
);
5454 error
= dpif_flow_dump_destroy(flow_dump
);
5458 ds_put_format(&ds
, "dpif/dump_flows failed: %s", ovs_strerror(errno
));
5459 unixctl_command_reply_error(conn
, ds_cstr(&ds
));
5461 unixctl_command_reply(conn
, ds_cstr(&ds
));
5464 odp_portno_names_destroy(portno_names
);
5465 hmap_destroy(portno_names
);
5472 ofproto_unixctl_dpif_show_dp_features(struct unixctl_conn
*conn
,
5473 int argc
, const char *argv
[],
5474 void *aux OVS_UNUSED
)
5476 struct ds ds
= DS_EMPTY_INITIALIZER
;
5477 const char *br
= argv
[argc
-1];
5478 struct ofproto_dpif
*ofproto
= ofproto_dpif_lookup_by_name(br
);
5481 unixctl_command_reply_error(conn
, "no such bridge");
5485 dpif_show_support(&ofproto
->backer
->bt_support
, &ds
);
5486 unixctl_command_reply(conn
, ds_cstr(&ds
));
5490 ofproto_unixctl_dpif_set_dp_features(struct unixctl_conn
*conn
,
5491 int argc
, const char *argv
[],
5492 void *aux OVS_UNUSED
)
5494 struct ds ds
= DS_EMPTY_INITIALIZER
;
5495 const char *br
= argv
[1];
5496 const char *name
, *value
;
5497 struct ofproto_dpif
*ofproto
= ofproto_dpif_lookup_by_name(br
);
5501 unixctl_command_reply_error(conn
, "no such bridge");
5505 name
= argc
> 2 ? argv
[2] : NULL
;
5506 value
= argc
> 3 ? argv
[3] : NULL
;
5507 changed
= dpif_set_support(&ofproto
->backer
->rt_support
,
5508 &ofproto
->backer
->bt_support
,
5511 xlate_set_support(ofproto
, &ofproto
->backer
->rt_support
);
5512 udpif_flush(ofproto
->backer
->udpif
);
5514 unixctl_command_reply(conn
, ds_cstr(&ds
));
5519 ofproto_unixctl_init(void)
5521 static bool registered
;
5527 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
5528 ofproto_unixctl_fdb_flush
, NULL
);
5529 unixctl_command_register("fdb/show", "bridge", 1, 1,
5530 ofproto_unixctl_fdb_show
, NULL
);
5531 unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
5532 ofproto_unixctl_mcast_snooping_flush
, NULL
);
5533 unixctl_command_register("mdb/show", "bridge", 1, 1,
5534 ofproto_unixctl_mcast_snooping_show
, NULL
);
5535 unixctl_command_register("dpif/dump-dps", "", 0, 0,
5536 ofproto_unixctl_dpif_dump_dps
, NULL
);
5537 unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show
,
5539 unixctl_command_register("dpif/show-dp-features", "bridge", 1, 1,
5540 ofproto_unixctl_dpif_show_dp_features
, NULL
);
5541 unixctl_command_register("dpif/dump-flows", "[-m] [--names | --no-nmaes] bridge", 1, INT_MAX
,
5542 ofproto_unixctl_dpif_dump_flows
, NULL
);
5543 unixctl_command_register("dpif/set-dp-features", "bridge", 1, 3 ,
5544 ofproto_unixctl_dpif_set_dp_features
, NULL
);
5548 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, ofp_port_t ofp_port
)
5550 const struct ofport_dpif
*ofport
= ofp_port_to_ofport(ofproto
, ofp_port
);
5551 return ofport
? ofport
->odp_port
: ODPP_NONE
;
5554 struct ofport_dpif
*
5555 odp_port_to_ofport(const struct dpif_backer
*backer
, odp_port_t odp_port
)
5557 struct ofport_dpif
*port
;
5559 ovs_rwlock_rdlock(&backer
->odp_to_ofport_lock
);
5560 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
, hash_odp_port(odp_port
),
5561 &backer
->odp_to_ofport_map
) {
5562 if (port
->odp_port
== odp_port
) {
5563 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
5568 ovs_rwlock_unlock(&backer
->odp_to_ofport_lock
);
5573 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, odp_port_t odp_port
)
5575 struct ofport_dpif
*port
;
5577 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
5578 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
5579 return port
->up
.ofp_port
;
5585 /* 'match' is non-const to allow for temporary modifications. Any changes are
5586 * restored before returning. */
5588 ofproto_dpif_add_internal_flow(struct ofproto_dpif
*ofproto
,
5589 struct match
*match
, int priority
,
5590 uint16_t idle_timeout
,
5591 const struct ofpbuf
*ofpacts
,
5592 struct rule
**rulep
)
5594 struct ofputil_flow_mod fm
;
5595 struct rule_dpif
*rule
;
5598 fm
= (struct ofputil_flow_mod
) {
5599 .buffer_id
= UINT32_MAX
,
5600 .priority
= priority
,
5601 .table_id
= TBL_INTERNAL
,
5602 .command
= OFPFC_ADD
,
5603 .idle_timeout
= idle_timeout
,
5604 .flags
= OFPUTIL_FF_HIDDEN_FIELDS
| OFPUTIL_FF_NO_READONLY
,
5605 .ofpacts
= ofpacts
->data
,
5606 .ofpacts_len
= ofpacts
->size
,
5608 minimatch_init(&fm
.match
, match
);
5609 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
5610 minimatch_destroy(&fm
.match
);
5613 VLOG_ERR_RL(&rl
, "failed to add internal flow (%s)",
5614 ofperr_to_string(error
));
5619 rule
= rule_dpif_lookup_in_table(ofproto
,
5620 ofproto_dpif_get_tables_version(ofproto
),
5621 TBL_INTERNAL
, &match
->flow
, &match
->wc
);
5631 ofproto_dpif_delete_internal_flow(struct ofproto_dpif
*ofproto
,
5632 struct match
*match
, int priority
)
5634 struct ofputil_flow_mod fm
;
5637 fm
= (struct ofputil_flow_mod
) {
5638 .buffer_id
= UINT32_MAX
,
5639 .priority
= priority
,
5640 .table_id
= TBL_INTERNAL
,
5641 .out_port
= OFPP_ANY
,
5642 .out_group
= OFPG_ANY
,
5643 .flags
= OFPUTIL_FF_HIDDEN_FIELDS
| OFPUTIL_FF_NO_READONLY
,
5644 .command
= OFPFC_DELETE_STRICT
,
5646 minimatch_init(&fm
.match
, match
);
5647 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
5648 minimatch_destroy(&fm
.match
);
5651 VLOG_ERR_RL(&rl
, "failed to delete internal flow (%s)",
5652 ofperr_to_string(error
));
5660 meter_get_features(const struct ofproto
*ofproto_
,
5661 struct ofputil_meter_features
*features
)
5663 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5665 dpif_meter_get_features(ofproto
->backer
->dpif
, features
);
5669 meter_set(struct ofproto
*ofproto_
, ofproto_meter_id
*meter_id
,
5670 struct ofputil_meter_config
*config
)
5672 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5674 /* Provider ID unknown. Use backer to allocate a new DP meter */
5675 if (meter_id
->uint32
== UINT32_MAX
) {
5676 if (!ofproto
->backer
->meter_ids
) {
5677 return EFBIG
; /* Datapath does not support meter. */
5680 if(!id_pool_alloc_id(ofproto
->backer
->meter_ids
, &meter_id
->uint32
)) {
5681 return ENOMEM
; /* Can't allocate a DP meter. */
5685 switch (dpif_meter_set(ofproto
->backer
->dpif
, meter_id
, config
)) {
5688 case EFBIG
: /* meter_id out of range */
5689 case ENOMEM
: /* Cannot allocate meter */
5690 return OFPERR_OFPMMFC_OUT_OF_METERS
;
5691 case EBADF
: /* Unsupported flags */
5692 return OFPERR_OFPMMFC_BAD_FLAGS
;
5693 case EINVAL
: /* Too many bands */
5694 return OFPERR_OFPMMFC_OUT_OF_BANDS
;
5695 case ENODEV
: /* Unsupported band type */
5696 return OFPERR_OFPMMFC_BAD_BAND
;
5697 case EDOM
: /* Rate must be non-zero */
5698 return OFPERR_OFPMMFC_BAD_RATE
;
5700 return OFPERR_OFPMMFC_UNKNOWN
;
5705 meter_get(const struct ofproto
*ofproto_
, ofproto_meter_id meter_id
,
5706 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
5708 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5710 if (!dpif_meter_get(ofproto
->backer
->dpif
, meter_id
, stats
, n_bands
)) {
5713 return OFPERR_OFPMMFC_UNKNOWN_METER
;
5716 struct free_meter_id_args
{
5717 struct ofproto_dpif
*ofproto
;
5718 ofproto_meter_id meter_id
;
5722 free_meter_id(struct free_meter_id_args
*args
)
5724 struct ofproto_dpif
*ofproto
= args
->ofproto
;
5726 dpif_meter_del(ofproto
->backer
->dpif
, args
->meter_id
, NULL
, 0);
5727 id_pool_free_id(ofproto
->backer
->meter_ids
, args
->meter_id
.uint32
);
5732 meter_del(struct ofproto
*ofproto_
, ofproto_meter_id meter_id
)
5734 struct free_meter_id_args
*arg
= xmalloc(sizeof *arg
);
5736 /* Before a meter can be deleted, Openflow spec requires all rules
5737 * referring to the meter to be (automatically) removed before the
5738 * meter is deleted. However, since vswitchd is multi-threaded,
5739 * those rules and their actions remain accessible by other threads,
5740 * especially by the handler and revalidator threads.
5741 * Postpone meter deletion after RCU grace period, so that ongoing
5742 * upcall translation or flow revalidation can complete. */
5743 arg
->ofproto
= ofproto_dpif_cast(ofproto_
);
5744 arg
->meter_id
= meter_id
;
5745 ovsrcu_postpone(free_meter_id
, arg
);
5748 const struct ofproto_class ofproto_dpif_class
= {
5762 NULL
, /* get_memory_usage. */
5763 type_get_memory_usage
,
5783 port_is_lacp_current
,
5784 port_get_lacp_stats
,
5785 NULL
, /* rule_choose_table */
5789 NULL
, /* rule_delete */
5794 packet_xlate_revert
,
5812 aa_vlan_get_queue_size
,
5819 get_stp_port_status
,
5824 get_rstp_port_status
,
5831 is_mirror_output_bundle
,
5832 forward_bpdu_changed
,
5833 set_mac_table_config
,
5835 set_mcast_snooping_port
,
5840 group_alloc
, /* group_alloc */
5841 group_construct
, /* group_construct */
5842 group_destruct
, /* group_destruct */
5843 group_dealloc
, /* group_dealloc */
5844 NULL
, /* group_modify */
5845 group_get_stats
, /* group_get_stats */
5846 get_datapath_version
, /* get_datapath_version */
5848 ct_flush
, /* ct_flush */