2 * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "multipath.h"
44 #include "ofp-print.h"
45 #include "ofproto-dpif-sflow.h"
46 #include "poll-loop.h"
48 #include "unaligned.h"
50 #include "vlan-bitmap.h"
53 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
55 COVERAGE_DEFINE(ofproto_dpif_ctlr_action
);
56 COVERAGE_DEFINE(ofproto_dpif_expired
);
57 COVERAGE_DEFINE(ofproto_dpif_no_packet_in
);
58 COVERAGE_DEFINE(ofproto_dpif_xlate
);
59 COVERAGE_DEFINE(facet_changed_rule
);
60 COVERAGE_DEFINE(facet_invalidated
);
61 COVERAGE_DEFINE(facet_revalidate
);
62 COVERAGE_DEFINE(facet_unexpected
);
64 /* Maximum depth of flow table recursion (due to resubmit actions) in a
65 * flow translation. */
66 #define MAX_RESUBMIT_RECURSION 32
68 /* Number of implemented OpenFlow tables. */
69 enum { N_TABLES
= 255 };
70 BUILD_ASSERT_DECL(N_TABLES
>= 1 && N_TABLES
<= 255);
78 long long int used
; /* Time last used; time created if not used. */
82 * - Do include packets and bytes from facets that have been deleted or
83 * whose own statistics have been folded into the rule.
85 * - Do include packets and bytes sent "by hand" that were accounted to
86 * the rule without any facet being involved (this is a rare corner
87 * case in rule_execute()).
89 * - Do not include packet or bytes that can be obtained from any facet's
90 * packet_count or byte_count member or that can be obtained from the
91 * datapath by, e.g., dpif_flow_get() for any subfacet.
93 uint64_t packet_count
; /* Number of packets received. */
94 uint64_t byte_count
; /* Number of bytes received. */
96 tag_type tag
; /* Caches rule_calculate_tag() result. */
98 struct list facets
; /* List of "struct facet"s. */
101 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
103 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
106 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
107 const struct flow
*, uint8_t table
);
109 static void flow_push_stats(const struct rule_dpif
*, const struct flow
*,
110 uint64_t packets
, uint64_t bytes
,
113 static uint32_t rule_calculate_tag(const struct flow
*,
114 const struct flow_wildcards
*,
116 static void rule_invalidate(const struct rule_dpif
*);
118 #define MAX_MIRRORS 32
119 typedef uint32_t mirror_mask_t
;
120 #define MIRROR_MASK_C(X) UINT32_C(X)
121 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
123 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
124 size_t idx
; /* In ofproto's "mirrors" array. */
125 void *aux
; /* Key supplied by ofproto's client. */
126 char *name
; /* Identifier for log messages. */
128 /* Selection criteria. */
129 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
130 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
131 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
133 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
134 struct ofbundle
*out
; /* Output port or NULL. */
135 int out_vlan
; /* Output VLAN or -1. */
136 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
139 int64_t packet_count
; /* Number of packets sent. */
140 int64_t byte_count
; /* Number of bytes sent. */
143 static void mirror_destroy(struct ofmirror
*);
144 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
145 mirror_mask_t mirrors
,
146 uint64_t packets
, uint64_t bytes
);
149 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
150 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
151 void *aux
; /* Key supplied by ofproto's client. */
152 char *name
; /* Identifier for log messages. */
155 struct list ports
; /* Contains "struct ofport"s. */
156 enum port_vlan_mode vlan_mode
; /* VLAN mode */
157 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
158 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
159 * NULL if all VLANs are trunked. */
160 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
161 struct bond
*bond
; /* Nonnull iff more than one port. */
162 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
165 bool floodable
; /* True if no port has OFPPC_NO_FLOOD set. */
167 /* Port mirroring info. */
168 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
169 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
170 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
173 static void bundle_remove(struct ofport
*);
174 static void bundle_update(struct ofbundle
*);
175 static void bundle_destroy(struct ofbundle
*);
176 static void bundle_del_port(struct ofport_dpif
*);
177 static void bundle_run(struct ofbundle
*);
178 static void bundle_wait(struct ofbundle
*);
179 static struct ofbundle
*lookup_input_bundle(struct ofproto_dpif
*,
180 uint16_t in_port
, bool warn
);
182 /* A controller may use OFPP_NONE as the ingress port to indicate that
183 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
184 * when an input bundle is needed for validation (e.g., mirroring or
185 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
186 * any 'port' structs, so care must be taken when dealing with it. */
187 static struct ofbundle ofpp_none_bundle
= {
189 .vlan_mode
= PORT_VLAN_TRUNK
192 static void stp_run(struct ofproto_dpif
*ofproto
);
193 static void stp_wait(struct ofproto_dpif
*ofproto
);
195 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
197 struct action_xlate_ctx
{
198 /* action_xlate_ctx_init() initializes these members. */
201 struct ofproto_dpif
*ofproto
;
203 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
204 * this flow when actions change header fields. */
207 /* The packet corresponding to 'flow', or a null pointer if we are
208 * revalidating without a packet to refer to. */
209 const struct ofpbuf
*packet
;
211 /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
212 * want to execute them if we are actually processing a packet, or if we
213 * are accounting for packets that the datapath has processed, but not if
214 * we are just revalidating. */
217 /* If nonnull, called just before executing a resubmit action.
219 * This is normally null so the client has to set it manually after
220 * calling action_xlate_ctx_init(). */
221 void (*resubmit_hook
)(struct action_xlate_ctx
*, struct rule_dpif
*);
223 /* xlate_actions() initializes and uses these members. The client might want
224 * to look at them after it returns. */
226 struct ofpbuf
*odp_actions
; /* Datapath actions. */
227 tag_type tags
; /* Tags associated with actions. */
228 bool may_set_up_flow
; /* True ordinarily; false if the actions must
229 * be reassessed for every packet. */
230 bool has_learn
; /* Actions include NXAST_LEARN? */
231 bool has_normal
; /* Actions output to OFPP_NORMAL? */
232 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
233 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
235 /* xlate_actions() initializes and uses these members, but the client has no
236 * reason to look at them. */
238 int recurse
; /* Recursion level, via xlate_table_action. */
239 struct flow base_flow
; /* Flow at the last commit. */
240 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
241 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
242 uint32_t sflow_n_outputs
; /* Number of output ports. */
243 uint16_t sflow_odp_port
; /* Output port for composing sFlow action. */
244 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
245 bool exit
; /* No further actions should be processed. */
248 static void action_xlate_ctx_init(struct action_xlate_ctx
*,
249 struct ofproto_dpif
*, const struct flow
*,
250 ovs_be16 initial_tci
, const struct ofpbuf
*);
251 static struct ofpbuf
*xlate_actions(struct action_xlate_ctx
*,
252 const union ofp_action
*in
, size_t n_in
);
254 /* An exact-match instantiation of an OpenFlow flow.
256 * A facet associates a "struct flow", which represents the Open vSwitch
257 * userspace idea of an exact-match flow, with one or more subfacets. Each
258 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
259 * the facet. When the kernel module (or other dpif implementation) and Open
260 * vSwitch userspace agree on the definition of a flow key, there is exactly
261 * one subfacet per facet. If the dpif implementation supports more-specific
262 * flow matching than userspace, however, a facet can have more than one
263 * subfacet, each of which corresponds to some distinction in flow that
264 * userspace simply doesn't understand.
266 * Flow expiration works in terms of subfacets, so a facet must have at least
267 * one subfacet or it will never expire, leaking memory. */
270 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
271 struct list list_node
; /* In owning rule's 'facets' list. */
272 struct rule_dpif
*rule
; /* Owning rule. */
275 struct list subfacets
;
276 long long int used
; /* Time last used; time created if not used. */
283 * - Do include packets and bytes sent "by hand", e.g. with
286 * - Do include packets and bytes that were obtained from the datapath
287 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
288 * DPIF_FP_ZERO_STATS).
290 * - Do not include packets or bytes that can be obtained from the
291 * datapath for any existing subfacet.
293 uint64_t packet_count
; /* Number of packets received. */
294 uint64_t byte_count
; /* Number of bytes received. */
296 /* Resubmit statistics. */
297 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
298 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
299 long long int prev_used
; /* Used time from last stats push. */
302 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
303 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
305 /* Properties of datapath actions.
307 * Every subfacet has its own actions because actions can differ slightly
308 * between splintered and non-splintered subfacets due to the VLAN tag
309 * being initially different (present vs. absent). All of them have these
310 * properties in common so we just store one copy of them here. */
311 bool may_install
; /* Reassess actions for every packet? */
312 bool has_learn
; /* Actions include NXAST_LEARN? */
313 bool has_normal
; /* Actions output to OFPP_NORMAL? */
314 tag_type tags
; /* Tags that would require revalidation. */
315 mirror_mask_t mirrors
; /* Bitmap of dependent mirrors. */
318 static struct facet
*facet_create(struct rule_dpif
*, const struct flow
*);
319 static void facet_remove(struct ofproto_dpif
*, struct facet
*);
320 static void facet_free(struct facet
*);
322 static struct facet
*facet_find(struct ofproto_dpif
*, const struct flow
*);
323 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
324 const struct flow
*);
325 static bool facet_revalidate(struct ofproto_dpif
*, struct facet
*);
327 static bool execute_controller_action(struct ofproto_dpif
*,
329 const struct nlattr
*odp_actions
,
331 struct ofpbuf
*packet
);
333 static void facet_flush_stats(struct ofproto_dpif
*, struct facet
*);
335 static void facet_update_time(struct ofproto_dpif
*, struct facet
*,
337 static void facet_reset_counters(struct facet
*);
338 static void facet_push_stats(struct facet
*);
339 static void facet_account(struct ofproto_dpif
*, struct facet
*);
341 static bool facet_is_controller_flow(struct facet
*);
343 /* A dpif flow and actions associated with a facet.
345 * See also the large comment on struct facet. */
348 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
349 struct list list_node
; /* In struct facet's 'facets' list. */
350 struct facet
*facet
; /* Owning facet. */
354 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
355 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
356 * regenerate the ODP flow key from ->facet->flow. */
357 enum odp_key_fitness key_fitness
;
361 long long int used
; /* Time last used; time created if not used. */
363 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
364 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
368 * These should be essentially identical for every subfacet in a facet, but
369 * may differ in trivial ways due to VLAN splinters. */
370 size_t actions_len
; /* Number of bytes in actions[]. */
371 struct nlattr
*actions
; /* Datapath actions. */
373 bool installed
; /* Installed in datapath? */
375 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
376 * splinters can cause it to differ. This value should be removed when
377 * the VLAN splinters feature is no longer needed. */
378 ovs_be16 initial_tci
; /* Initial VLAN TCI value. */
381 static struct subfacet
*subfacet_create(struct ofproto_dpif
*, struct facet
*,
382 enum odp_key_fitness
,
383 const struct nlattr
*key
,
384 size_t key_len
, ovs_be16 initial_tci
);
385 static struct subfacet
*subfacet_find(struct ofproto_dpif
*,
386 const struct nlattr
*key
, size_t key_len
);
387 static void subfacet_destroy(struct ofproto_dpif
*, struct subfacet
*);
388 static void subfacet_destroy__(struct ofproto_dpif
*, struct subfacet
*);
389 static void subfacet_reset_dp_stats(struct subfacet
*,
390 struct dpif_flow_stats
*);
391 static void subfacet_update_time(struct ofproto_dpif
*, struct subfacet
*,
393 static void subfacet_update_stats(struct ofproto_dpif
*, struct subfacet
*,
394 const struct dpif_flow_stats
*);
395 static void subfacet_make_actions(struct ofproto_dpif
*, struct subfacet
*,
396 const struct ofpbuf
*packet
);
397 static int subfacet_install(struct ofproto_dpif
*, struct subfacet
*,
398 const struct nlattr
*actions
, size_t actions_len
,
399 struct dpif_flow_stats
*);
400 static void subfacet_uninstall(struct ofproto_dpif
*, struct subfacet
*);
406 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
407 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
408 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
409 tag_type tag
; /* Tag associated with this port. */
410 uint32_t bond_stable_id
; /* stable_id to use as bond slave, or 0. */
411 bool may_enable
; /* May be enabled in bonds. */
414 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
415 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
416 long long int stp_state_entered
;
418 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
420 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
422 * This is deprecated. It is only for compatibility with broken device
423 * drivers in old versions of Linux that do not properly support VLANs when
424 * VLAN devices are not used. When broken device drivers are no longer in
425 * widespread use, we will delete these interfaces. */
426 uint16_t realdev_ofp_port
;
430 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
431 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
432 * traffic egressing the 'ofport' with that priority should be marked with. */
433 struct priority_to_dscp
{
434 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
435 uint32_t priority
; /* Priority of this queue (see struct flow). */
437 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
440 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
442 * This is deprecated. It is only for compatibility with broken device drivers
443 * in old versions of Linux that do not properly support VLANs when VLAN
444 * devices are not used. When broken device drivers are no longer in
445 * widespread use, we will delete these interfaces. */
446 struct vlan_splinter
{
447 struct hmap_node realdev_vid_node
;
448 struct hmap_node vlandev_node
;
449 uint16_t realdev_ofp_port
;
450 uint16_t vlandev_ofp_port
;
454 static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
455 uint32_t realdev
, ovs_be16 vlan_tci
);
456 static uint16_t vsp_vlandev_to_realdev(const struct ofproto_dpif
*,
457 uint16_t vlandev
, int *vid
);
458 static void vsp_remove(struct ofport_dpif
*);
459 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
461 static struct ofport_dpif
*
462 ofport_dpif_cast(const struct ofport
*ofport
)
464 assert(ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
);
465 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
468 static void port_run(struct ofport_dpif
*);
469 static void port_wait(struct ofport_dpif
*);
470 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
471 static void ofport_clear_priorities(struct ofport_dpif
*);
473 struct dpif_completion
{
474 struct list list_node
;
475 struct ofoperation
*op
;
478 /* Extra information about a classifier table.
479 * Currently used just for optimized flow revalidation. */
481 /* If either of these is nonnull, then this table has a form that allows
482 * flows to be tagged to avoid revalidating most flows for the most common
483 * kinds of flow table changes. */
484 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
485 struct cls_table
*other_table
; /* Table with any other wildcard set. */
486 uint32_t basis
; /* Keeps each table's tags separate. */
489 struct ofproto_dpif
{
490 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
499 struct netflow
*netflow
;
500 struct dpif_sflow
*sflow
;
501 struct hmap bundles
; /* Contains "struct ofbundle"s. */
502 struct mac_learning
*ml
;
503 struct ofmirror
*mirrors
[MAX_MIRRORS
];
504 bool has_bonded_bundles
;
507 struct timer next_expiration
;
511 struct hmap subfacets
;
514 struct table_dpif tables
[N_TABLES
];
515 bool need_revalidate
;
516 struct tag_set revalidate_set
;
518 /* Support for debugging async flow mods. */
519 struct list completions
;
521 bool has_bundle_action
; /* True when the first bundle action appears. */
522 struct netdev_stats stats
; /* To account packets generated and consumed in
527 long long int stp_last_tick
;
529 /* VLAN splinters. */
530 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
531 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
534 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
535 * for debugging the asynchronous flow_mod implementation.) */
538 /* All existing ofproto_dpif instances, indexed by ->up.name. */
539 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
541 static void ofproto_dpif_unixctl_init(void);
543 static struct ofproto_dpif
*
544 ofproto_dpif_cast(const struct ofproto
*ofproto
)
546 assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
547 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
550 static struct ofport_dpif
*get_ofp_port(struct ofproto_dpif
*,
552 static struct ofport_dpif
*get_odp_port(struct ofproto_dpif
*,
555 /* Packet processing. */
556 static void update_learning_table(struct ofproto_dpif
*,
557 const struct flow
*, int vlan
,
560 #define FLOW_MISS_MAX_BATCH 50
561 static int handle_upcalls(struct ofproto_dpif
*, unsigned int max_batch
);
563 /* Flow expiration. */
564 static int expire(struct ofproto_dpif
*);
567 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
570 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
572 compose_sflow_action(const struct ofproto_dpif
*, struct ofpbuf
*odp_actions
,
573 const struct flow
*, uint32_t odp_port
);
574 static void add_mirror_actions(struct action_xlate_ctx
*ctx
,
575 const struct flow
*flow
);
576 /* Global variables. */
577 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
579 /* Factory functions. */
582 enumerate_types(struct sset
*types
)
584 dp_enumerate_types(types
);
588 enumerate_names(const char *type
, struct sset
*names
)
590 return dp_enumerate_names(type
, names
);
594 del(const char *type
, const char *name
)
599 error
= dpif_open(name
, type
, &dpif
);
601 error
= dpif_delete(dpif
);
607 /* Basic life-cycle. */
609 static struct ofproto
*
612 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
617 dealloc(struct ofproto
*ofproto_
)
619 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
624 construct(struct ofproto
*ofproto_
, int *n_tablesp
)
626 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
627 const char *name
= ofproto
->up
.name
;
631 error
= dpif_create_and_open(name
, ofproto
->up
.type
, &ofproto
->dpif
);
633 VLOG_ERR("failed to open datapath %s: %s", name
, strerror(error
));
637 ofproto
->max_ports
= dpif_get_max_ports(ofproto
->dpif
);
638 ofproto
->n_matches
= 0;
640 dpif_flow_flush(ofproto
->dpif
);
641 dpif_recv_purge(ofproto
->dpif
);
643 error
= dpif_recv_set_mask(ofproto
->dpif
,
644 ((1u << DPIF_UC_MISS
) |
645 (1u << DPIF_UC_ACTION
)));
647 VLOG_ERR("failed to listen on datapath %s: %s", name
, strerror(error
));
648 dpif_close(ofproto
->dpif
);
652 ofproto
->netflow
= NULL
;
653 ofproto
->sflow
= NULL
;
655 hmap_init(&ofproto
->bundles
);
656 ofproto
->ml
= mac_learning_create();
657 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
658 ofproto
->mirrors
[i
] = NULL
;
660 ofproto
->has_bonded_bundles
= false;
662 timer_set_duration(&ofproto
->next_expiration
, 1000);
664 hmap_init(&ofproto
->facets
);
665 hmap_init(&ofproto
->subfacets
);
667 for (i
= 0; i
< N_TABLES
; i
++) {
668 struct table_dpif
*table
= &ofproto
->tables
[i
];
670 table
->catchall_table
= NULL
;
671 table
->other_table
= NULL
;
672 table
->basis
= random_uint32();
674 ofproto
->need_revalidate
= false;
675 tag_set_init(&ofproto
->revalidate_set
);
677 list_init(&ofproto
->completions
);
679 ofproto_dpif_unixctl_init();
681 ofproto
->has_bundle_action
= false;
683 hmap_init(&ofproto
->vlandev_map
);
684 hmap_init(&ofproto
->realdev_vid_map
);
686 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
687 hash_string(ofproto
->up
.name
, 0));
689 *n_tablesp
= N_TABLES
;
690 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
695 complete_operations(struct ofproto_dpif
*ofproto
)
697 struct dpif_completion
*c
, *next
;
699 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
700 ofoperation_complete(c
->op
, 0);
701 list_remove(&c
->list_node
);
707 destruct(struct ofproto
*ofproto_
)
709 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
710 struct rule_dpif
*rule
, *next_rule
;
711 struct classifier
*table
;
714 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
715 complete_operations(ofproto
);
717 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
718 struct cls_cursor cursor
;
720 cls_cursor_init(&cursor
, table
, NULL
);
721 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
722 ofproto_rule_destroy(&rule
->up
);
726 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
727 mirror_destroy(ofproto
->mirrors
[i
]);
730 netflow_destroy(ofproto
->netflow
);
731 dpif_sflow_destroy(ofproto
->sflow
);
732 hmap_destroy(&ofproto
->bundles
);
733 mac_learning_destroy(ofproto
->ml
);
735 hmap_destroy(&ofproto
->facets
);
736 hmap_destroy(&ofproto
->subfacets
);
738 hmap_destroy(&ofproto
->vlandev_map
);
739 hmap_destroy(&ofproto
->realdev_vid_map
);
741 dpif_close(ofproto
->dpif
);
745 run_fast(struct ofproto
*ofproto_
)
747 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
750 /* Handle one or more batches of upcalls, until there's nothing left to do
751 * or until we do a fixed total amount of work.
753 * We do work in batches because it can be much cheaper to set up a number
754 * of flows and fire off their patches all at once. We do multiple batches
755 * because in some cases handling a packet can cause another packet to be
756 * queued almost immediately as part of the return flow. Both
757 * optimizations can make major improvements on some benchmarks and
758 * presumably for real traffic as well. */
760 while (work
< FLOW_MISS_MAX_BATCH
) {
761 int retval
= handle_upcalls(ofproto
, FLOW_MISS_MAX_BATCH
- work
);
771 run(struct ofproto
*ofproto_
)
773 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
774 struct ofport_dpif
*ofport
;
775 struct ofbundle
*bundle
;
779 complete_operations(ofproto
);
781 dpif_run(ofproto
->dpif
);
783 error
= run_fast(ofproto_
);
788 if (timer_expired(&ofproto
->next_expiration
)) {
789 int delay
= expire(ofproto
);
790 timer_set_duration(&ofproto
->next_expiration
, delay
);
793 if (ofproto
->netflow
) {
794 if (netflow_run(ofproto
->netflow
)) {
795 send_netflow_active_timeouts(ofproto
);
798 if (ofproto
->sflow
) {
799 dpif_sflow_run(ofproto
->sflow
);
802 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
805 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
810 mac_learning_run(ofproto
->ml
, &ofproto
->revalidate_set
);
812 /* Now revalidate if there's anything to do. */
813 if (ofproto
->need_revalidate
814 || !tag_set_is_empty(&ofproto
->revalidate_set
)) {
815 struct tag_set revalidate_set
= ofproto
->revalidate_set
;
816 bool revalidate_all
= ofproto
->need_revalidate
;
817 struct facet
*facet
, *next
;
819 /* Clear the revalidation flags. */
820 tag_set_init(&ofproto
->revalidate_set
);
821 ofproto
->need_revalidate
= false;
823 HMAP_FOR_EACH_SAFE (facet
, next
, hmap_node
, &ofproto
->facets
) {
825 || tag_set_intersects(&revalidate_set
, facet
->tags
)) {
826 facet_revalidate(ofproto
, facet
);
835 wait(struct ofproto
*ofproto_
)
837 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
838 struct ofport_dpif
*ofport
;
839 struct ofbundle
*bundle
;
841 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
842 poll_immediate_wake();
845 dpif_wait(ofproto
->dpif
);
846 dpif_recv_wait(ofproto
->dpif
);
847 if (ofproto
->sflow
) {
848 dpif_sflow_wait(ofproto
->sflow
);
850 if (!tag_set_is_empty(&ofproto
->revalidate_set
)) {
851 poll_immediate_wake();
853 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
856 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
859 if (ofproto
->netflow
) {
860 netflow_wait(ofproto
->netflow
);
862 mac_learning_wait(ofproto
->ml
);
864 if (ofproto
->need_revalidate
) {
865 /* Shouldn't happen, but if it does just go around again. */
866 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
867 poll_immediate_wake();
869 timer_wait(&ofproto
->next_expiration
);
874 flush(struct ofproto
*ofproto_
)
876 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
877 struct facet
*facet
, *next_facet
;
879 HMAP_FOR_EACH_SAFE (facet
, next_facet
, hmap_node
, &ofproto
->facets
) {
880 /* Mark the facet as not installed so that facet_remove() doesn't
881 * bother trying to uninstall it. There is no point in uninstalling it
882 * individually since we are about to blow away all the facets with
883 * dpif_flow_flush(). */
884 struct subfacet
*subfacet
;
886 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
887 subfacet
->installed
= false;
888 subfacet
->dp_packet_count
= 0;
889 subfacet
->dp_byte_count
= 0;
891 facet_remove(ofproto
, facet
);
893 dpif_flow_flush(ofproto
->dpif
);
897 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
898 bool *arp_match_ip
, uint32_t *actions
)
900 *arp_match_ip
= true;
901 *actions
= ((1u << OFPAT_OUTPUT
) |
902 (1u << OFPAT_SET_VLAN_VID
) |
903 (1u << OFPAT_SET_VLAN_PCP
) |
904 (1u << OFPAT_STRIP_VLAN
) |
905 (1u << OFPAT_SET_DL_SRC
) |
906 (1u << OFPAT_SET_DL_DST
) |
907 (1u << OFPAT_SET_NW_SRC
) |
908 (1u << OFPAT_SET_NW_DST
) |
909 (1u << OFPAT_SET_NW_TOS
) |
910 (1u << OFPAT_SET_TP_SRC
) |
911 (1u << OFPAT_SET_TP_DST
) |
912 (1u << OFPAT_ENQUEUE
));
916 get_tables(struct ofproto
*ofproto_
, struct ofp_table_stats
*ots
)
918 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
919 struct dpif_dp_stats s
;
921 strcpy(ots
->name
, "classifier");
923 dpif_get_dp_stats(ofproto
->dpif
, &s
);
924 put_32aligned_be64(&ots
->lookup_count
, htonll(s
.n_hit
+ s
.n_missed
));
925 put_32aligned_be64(&ots
->matched_count
,
926 htonll(s
.n_hit
+ ofproto
->n_matches
));
929 static struct ofport
*
932 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
937 port_dealloc(struct ofport
*port_
)
939 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
944 port_construct(struct ofport
*port_
)
946 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
947 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
949 ofproto
->need_revalidate
= true;
950 port
->odp_port
= ofp_port_to_odp_port(port
->up
.ofp_port
);
953 port
->tag
= tag_create_random();
954 port
->may_enable
= true;
955 port
->stp_port
= NULL
;
956 port
->stp_state
= STP_DISABLED
;
957 hmap_init(&port
->priorities
);
958 port
->realdev_ofp_port
= 0;
959 port
->vlandev_vid
= 0;
961 if (ofproto
->sflow
) {
962 dpif_sflow_add_port(ofproto
->sflow
, port_
);
969 port_destruct(struct ofport
*port_
)
971 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
972 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
974 ofproto
->need_revalidate
= true;
975 bundle_remove(port_
);
976 set_cfm(port_
, NULL
);
977 if (ofproto
->sflow
) {
978 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
981 ofport_clear_priorities(port
);
982 hmap_destroy(&port
->priorities
);
986 port_modified(struct ofport
*port_
)
988 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
990 if (port
->bundle
&& port
->bundle
->bond
) {
991 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
996 port_reconfigured(struct ofport
*port_
, ovs_be32 old_config
)
998 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
999 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1000 ovs_be32 changed
= old_config
^ port
->up
.opp
.config
;
1002 if (changed
& htonl(OFPPC_NO_RECV
| OFPPC_NO_RECV_STP
|
1003 OFPPC_NO_FWD
| OFPPC_NO_FLOOD
)) {
1004 ofproto
->need_revalidate
= true;
1006 if (changed
& htonl(OFPPC_NO_FLOOD
) && port
->bundle
) {
1007 bundle_update(port
->bundle
);
1013 set_sflow(struct ofproto
*ofproto_
,
1014 const struct ofproto_sflow_options
*sflow_options
)
1016 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1017 struct dpif_sflow
*ds
= ofproto
->sflow
;
1019 if (sflow_options
) {
1021 struct ofport_dpif
*ofport
;
1023 ds
= ofproto
->sflow
= dpif_sflow_create(ofproto
->dpif
);
1024 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1025 dpif_sflow_add_port(ds
, &ofport
->up
);
1027 ofproto
->need_revalidate
= true;
1029 dpif_sflow_set_options(ds
, sflow_options
);
1032 dpif_sflow_destroy(ds
);
1033 ofproto
->need_revalidate
= true;
1034 ofproto
->sflow
= NULL
;
1041 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1043 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1050 struct ofproto_dpif
*ofproto
;
1052 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1053 ofproto
->need_revalidate
= true;
1054 ofport
->cfm
= cfm_create(netdev_get_name(ofport
->up
.netdev
));
1057 if (cfm_configure(ofport
->cfm
, s
)) {
1063 cfm_destroy(ofport
->cfm
);
1069 get_cfm_fault(const struct ofport
*ofport_
)
1071 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1073 return ofport
->cfm
? cfm_get_fault(ofport
->cfm
) : -1;
1077 get_cfm_remote_mpids(const struct ofport
*ofport_
, const uint64_t **rmps
,
1080 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1083 cfm_get_remote_mpids(ofport
->cfm
, rmps
, n_rmps
);
1090 /* Spanning Tree. */
1093 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
1095 struct ofproto_dpif
*ofproto
= ofproto_
;
1096 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
1097 struct ofport_dpif
*ofport
;
1099 ofport
= stp_port_get_aux(sp
);
1101 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
1102 ofproto
->up
.name
, port_num
);
1104 struct eth_header
*eth
= pkt
->l2
;
1106 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
1107 if (eth_addr_is_zero(eth
->eth_src
)) {
1108 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
1109 "with unknown MAC", ofproto
->up
.name
, port_num
);
1111 send_packet(ofport
, pkt
);
1117 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
1119 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
1121 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1123 /* Only revalidate flows if the configuration changed. */
1124 if (!s
!= !ofproto
->stp
) {
1125 ofproto
->need_revalidate
= true;
1129 if (!ofproto
->stp
) {
1130 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
1131 send_bpdu_cb
, ofproto
);
1132 ofproto
->stp_last_tick
= time_msec();
1135 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
1136 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
1137 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
1138 stp_set_max_age(ofproto
->stp
, s
->max_age
);
1139 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
1141 stp_destroy(ofproto
->stp
);
1142 ofproto
->stp
= NULL
;
1149 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
1151 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1155 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
1156 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
1157 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
1166 update_stp_port_state(struct ofport_dpif
*ofport
)
1168 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1169 enum stp_state state
;
1171 /* Figure out new state. */
1172 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
1176 if (ofport
->stp_state
!= state
) {
1180 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
1181 netdev_get_name(ofport
->up
.netdev
),
1182 stp_state_name(ofport
->stp_state
),
1183 stp_state_name(state
));
1184 if (stp_learn_in_state(ofport
->stp_state
)
1185 != stp_learn_in_state(state
)) {
1186 /* xxx Learning action flows should also be flushed. */
1187 mac_learning_flush(ofproto
->ml
);
1189 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
1190 != stp_forward_in_state(state
);
1192 ofproto
->need_revalidate
= true;
1193 ofport
->stp_state
= state
;
1194 ofport
->stp_state_entered
= time_msec();
1196 if (fwd_change
&& ofport
->bundle
) {
1197 bundle_update(ofport
->bundle
);
1200 /* Update the STP state bits in the OpenFlow port description. */
1201 of_state
= (ofport
->up
.opp
.state
& htonl(~OFPPS_STP_MASK
))
1202 | htonl(state
== STP_LISTENING
? OFPPS_STP_LISTEN
1203 : state
== STP_LEARNING
? OFPPS_STP_LEARN
1204 : state
== STP_FORWARDING
? OFPPS_STP_FORWARD
1205 : state
== STP_BLOCKING
? OFPPS_STP_BLOCK
1207 ofproto_port_set_state(&ofport
->up
, of_state
);
1211 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1212 * caller is responsible for assigning STP port numbers and ensuring
1213 * there are no duplicates. */
1215 set_stp_port(struct ofport
*ofport_
,
1216 const struct ofproto_port_stp_settings
*s
)
1218 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1219 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1220 struct stp_port
*sp
= ofport
->stp_port
;
1222 if (!s
|| !s
->enable
) {
1224 ofport
->stp_port
= NULL
;
1225 stp_port_disable(sp
);
1226 update_stp_port_state(ofport
);
1229 } else if (sp
&& stp_port_no(sp
) != s
->port_num
1230 && ofport
== stp_port_get_aux(sp
)) {
1231 /* The port-id changed, so disable the old one if it's not
1232 * already in use by another port. */
1233 stp_port_disable(sp
);
1236 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
1237 stp_port_enable(sp
);
1239 stp_port_set_aux(sp
, ofport
);
1240 stp_port_set_priority(sp
, s
->priority
);
1241 stp_port_set_path_cost(sp
, s
->path_cost
);
1243 update_stp_port_state(ofport
);
1249 get_stp_port_status(struct ofport
*ofport_
,
1250 struct ofproto_port_stp_status
*s
)
1252 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1253 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1254 struct stp_port
*sp
= ofport
->stp_port
;
1256 if (!ofproto
->stp
|| !sp
) {
1262 s
->port_id
= stp_port_get_id(sp
);
1263 s
->state
= stp_port_get_state(sp
);
1264 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
1265 s
->role
= stp_port_get_role(sp
);
1266 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
1272 stp_run(struct ofproto_dpif
*ofproto
)
1275 long long int now
= time_msec();
1276 long long int elapsed
= now
- ofproto
->stp_last_tick
;
1277 struct stp_port
*sp
;
1280 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
1281 ofproto
->stp_last_tick
= now
;
1283 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
1284 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
1287 update_stp_port_state(ofport
);
1294 stp_wait(struct ofproto_dpif
*ofproto
)
1297 poll_timer_wait(1000);
1301 /* Returns true if STP should process 'flow'. */
1303 stp_should_process_flow(const struct flow
*flow
)
1305 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
1309 stp_process_packet(const struct ofport_dpif
*ofport
,
1310 const struct ofpbuf
*packet
)
1312 struct ofpbuf payload
= *packet
;
1313 struct eth_header
*eth
= payload
.data
;
1314 struct stp_port
*sp
= ofport
->stp_port
;
1316 /* Sink packets on ports that have STP disabled when the bridge has
1318 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
1322 /* Trim off padding on payload. */
1323 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
1324 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
1327 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
1328 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
1332 static struct priority_to_dscp
*
1333 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
1335 struct priority_to_dscp
*pdscp
;
1338 hash
= hash_int(priority
, 0);
1339 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
1340 if (pdscp
->priority
== priority
) {
1348 ofport_clear_priorities(struct ofport_dpif
*ofport
)
1350 struct priority_to_dscp
*pdscp
, *next
;
1352 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
1353 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
1359 set_queues(struct ofport
*ofport_
,
1360 const struct ofproto_port_queue
*qdscp_list
,
1363 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1364 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1365 struct hmap
new = HMAP_INITIALIZER(&new);
1368 for (i
= 0; i
< n_qdscp
; i
++) {
1369 struct priority_to_dscp
*pdscp
;
1373 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
1374 if (dpif_queue_to_priority(ofproto
->dpif
, qdscp_list
[i
].queue
,
1379 pdscp
= get_priority(ofport
, priority
);
1381 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
1383 pdscp
= xmalloc(sizeof *pdscp
);
1384 pdscp
->priority
= priority
;
1386 ofproto
->need_revalidate
= true;
1389 if (pdscp
->dscp
!= dscp
) {
1391 ofproto
->need_revalidate
= true;
1394 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
1397 if (!hmap_is_empty(&ofport
->priorities
)) {
1398 ofport_clear_priorities(ofport
);
1399 ofproto
->need_revalidate
= true;
1402 hmap_swap(&new, &ofport
->priorities
);
1410 /* Expires all MAC learning entries associated with 'bundle' and forces its
1411 * ofproto to revalidate every flow.
1413 * Normally MAC learning entries are removed only from the ofproto associated
1414 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
1415 * are removed from every ofproto. When patch ports and SLB bonds are in use
1416 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
1417 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
1418 * with the host from which it migrated. */
1420 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
1422 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
1423 struct mac_learning
*ml
= ofproto
->ml
;
1424 struct mac_entry
*mac
, *next_mac
;
1426 ofproto
->need_revalidate
= true;
1427 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
1428 if (mac
->port
.p
== bundle
) {
1430 struct ofproto_dpif
*o
;
1432 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1434 struct mac_entry
*e
;
1436 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
1439 tag_set_add(&o
->revalidate_set
, e
->tag
);
1440 mac_learning_expire(o
->ml
, e
);
1446 mac_learning_expire(ml
, mac
);
1451 static struct ofbundle
*
1452 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
1454 struct ofbundle
*bundle
;
1456 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
1457 &ofproto
->bundles
) {
1458 if (bundle
->aux
== aux
) {
1465 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
1466 * ones that are found to 'bundles'. */
1468 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
1469 void **auxes
, size_t n_auxes
,
1470 struct hmapx
*bundles
)
1474 hmapx_init(bundles
);
1475 for (i
= 0; i
< n_auxes
; i
++) {
1476 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
1478 hmapx_add(bundles
, bundle
);
1484 bundle_update(struct ofbundle
*bundle
)
1486 struct ofport_dpif
*port
;
1488 bundle
->floodable
= true;
1489 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1490 if (port
->up
.opp
.config
& htonl(OFPPC_NO_FLOOD
)) {
1491 bundle
->floodable
= false;
1498 bundle_del_port(struct ofport_dpif
*port
)
1500 struct ofbundle
*bundle
= port
->bundle
;
1502 bundle
->ofproto
->need_revalidate
= true;
1504 list_remove(&port
->bundle_node
);
1505 port
->bundle
= NULL
;
1508 lacp_slave_unregister(bundle
->lacp
, port
);
1511 bond_slave_unregister(bundle
->bond
, port
);
1514 bundle_update(bundle
);
1518 bundle_add_port(struct ofbundle
*bundle
, uint32_t ofp_port
,
1519 struct lacp_slave_settings
*lacp
,
1520 uint32_t bond_stable_id
)
1522 struct ofport_dpif
*port
;
1524 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
1529 if (port
->bundle
!= bundle
) {
1530 bundle
->ofproto
->need_revalidate
= true;
1532 bundle_del_port(port
);
1535 port
->bundle
= bundle
;
1536 list_push_back(&bundle
->ports
, &port
->bundle_node
);
1537 if (port
->up
.opp
.config
& htonl(OFPPC_NO_FLOOD
)) {
1538 bundle
->floodable
= false;
1542 port
->bundle
->ofproto
->need_revalidate
= true;
1543 lacp_slave_register(bundle
->lacp
, port
, lacp
);
1546 port
->bond_stable_id
= bond_stable_id
;
1552 bundle_destroy(struct ofbundle
*bundle
)
1554 struct ofproto_dpif
*ofproto
;
1555 struct ofport_dpif
*port
, *next_port
;
1562 ofproto
= bundle
->ofproto
;
1563 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1564 struct ofmirror
*m
= ofproto
->mirrors
[i
];
1566 if (m
->out
== bundle
) {
1568 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
1569 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
1570 ofproto
->need_revalidate
= true;
1575 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1576 bundle_del_port(port
);
1579 bundle_flush_macs(bundle
, true);
1580 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
1582 free(bundle
->trunks
);
1583 lacp_destroy(bundle
->lacp
);
1584 bond_destroy(bundle
->bond
);
1589 bundle_set(struct ofproto
*ofproto_
, void *aux
,
1590 const struct ofproto_bundle_settings
*s
)
1592 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1593 bool need_flush
= false;
1594 struct ofport_dpif
*port
;
1595 struct ofbundle
*bundle
;
1596 unsigned long *trunks
;
1602 bundle_destroy(bundle_lookup(ofproto
, aux
));
1606 assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
1607 assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
1609 bundle
= bundle_lookup(ofproto
, aux
);
1611 bundle
= xmalloc(sizeof *bundle
);
1613 bundle
->ofproto
= ofproto
;
1614 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
1615 hash_pointer(aux
, 0));
1617 bundle
->name
= NULL
;
1619 list_init(&bundle
->ports
);
1620 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
1622 bundle
->trunks
= NULL
;
1623 bundle
->use_priority_tags
= s
->use_priority_tags
;
1624 bundle
->lacp
= NULL
;
1625 bundle
->bond
= NULL
;
1627 bundle
->floodable
= true;
1629 bundle
->src_mirrors
= 0;
1630 bundle
->dst_mirrors
= 0;
1631 bundle
->mirror_out
= 0;
1634 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
1636 bundle
->name
= xstrdup(s
->name
);
1641 if (!bundle
->lacp
) {
1642 ofproto
->need_revalidate
= true;
1643 bundle
->lacp
= lacp_create();
1645 lacp_configure(bundle
->lacp
, s
->lacp
);
1647 lacp_destroy(bundle
->lacp
);
1648 bundle
->lacp
= NULL
;
1651 /* Update set of ports. */
1653 for (i
= 0; i
< s
->n_slaves
; i
++) {
1654 if (!bundle_add_port(bundle
, s
->slaves
[i
],
1655 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
,
1656 s
->bond_stable_ids
? s
->bond_stable_ids
[i
] : 0)) {
1660 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
1661 struct ofport_dpif
*next_port
;
1663 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1664 for (i
= 0; i
< s
->n_slaves
; i
++) {
1665 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
1670 bundle_del_port(port
);
1674 assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
1676 if (list_is_empty(&bundle
->ports
)) {
1677 bundle_destroy(bundle
);
1681 /* Set VLAN tagging mode */
1682 if (s
->vlan_mode
!= bundle
->vlan_mode
1683 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
1684 bundle
->vlan_mode
= s
->vlan_mode
;
1685 bundle
->use_priority_tags
= s
->use_priority_tags
;
1690 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
1691 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
1693 if (vlan
!= bundle
->vlan
) {
1694 bundle
->vlan
= vlan
;
1698 /* Get trunked VLANs. */
1699 switch (s
->vlan_mode
) {
1700 case PORT_VLAN_ACCESS
:
1704 case PORT_VLAN_TRUNK
:
1705 trunks
= (unsigned long *) s
->trunks
;
1708 case PORT_VLAN_NATIVE_UNTAGGED
:
1709 case PORT_VLAN_NATIVE_TAGGED
:
1710 if (vlan
!= 0 && (!s
->trunks
1711 || !bitmap_is_set(s
->trunks
, vlan
)
1712 || bitmap_is_set(s
->trunks
, 0))) {
1713 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
1715 trunks
= bitmap_clone(s
->trunks
, 4096);
1717 trunks
= bitmap_allocate1(4096);
1719 bitmap_set1(trunks
, vlan
);
1720 bitmap_set0(trunks
, 0);
1722 trunks
= (unsigned long *) s
->trunks
;
1729 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
1730 free(bundle
->trunks
);
1731 if (trunks
== s
->trunks
) {
1732 bundle
->trunks
= vlan_bitmap_clone(trunks
);
1734 bundle
->trunks
= trunks
;
1739 if (trunks
!= s
->trunks
) {
1744 if (!list_is_short(&bundle
->ports
)) {
1745 bundle
->ofproto
->has_bonded_bundles
= true;
1747 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
1748 ofproto
->need_revalidate
= true;
1751 bundle
->bond
= bond_create(s
->bond
);
1752 ofproto
->need_revalidate
= true;
1755 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1756 bond_slave_register(bundle
->bond
, port
, port
->bond_stable_id
,
1760 bond_destroy(bundle
->bond
);
1761 bundle
->bond
= NULL
;
1764 /* If we changed something that would affect MAC learning, un-learn
1765 * everything on this port and force flow revalidation. */
1767 bundle_flush_macs(bundle
, false);
1774 bundle_remove(struct ofport
*port_
)
1776 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1777 struct ofbundle
*bundle
= port
->bundle
;
1780 bundle_del_port(port
);
1781 if (list_is_empty(&bundle
->ports
)) {
1782 bundle_destroy(bundle
);
1783 } else if (list_is_short(&bundle
->ports
)) {
1784 bond_destroy(bundle
->bond
);
1785 bundle
->bond
= NULL
;
1791 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
1793 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
1794 struct ofport_dpif
*port
= port_
;
1795 uint8_t ea
[ETH_ADDR_LEN
];
1798 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
1800 struct ofpbuf packet
;
1803 ofpbuf_init(&packet
, 0);
1804 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
1806 memcpy(packet_pdu
, pdu
, pdu_size
);
1808 send_packet(port
, &packet
);
1809 ofpbuf_uninit(&packet
);
1811 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
1812 "%s (%s)", port
->bundle
->name
,
1813 netdev_get_name(port
->up
.netdev
), strerror(error
));
1818 bundle_send_learning_packets(struct ofbundle
*bundle
)
1820 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
1821 int error
, n_packets
, n_errors
;
1822 struct mac_entry
*e
;
1824 error
= n_packets
= n_errors
= 0;
1825 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
1826 if (e
->port
.p
!= bundle
) {
1827 struct ofpbuf
*learning_packet
;
1828 struct ofport_dpif
*port
;
1832 /* The assignment to "port" is unnecessary but makes "grep"ing for
1833 * struct ofport_dpif more effective. */
1834 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
1838 ret
= send_packet(port
, learning_packet
);
1839 ofpbuf_delete(learning_packet
);
1849 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1850 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
1851 "packets, last error was: %s",
1852 bundle
->name
, n_errors
, n_packets
, strerror(error
));
1854 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1855 bundle
->name
, n_packets
);
1860 bundle_run(struct ofbundle
*bundle
)
1863 lacp_run(bundle
->lacp
, send_pdu_cb
);
1866 struct ofport_dpif
*port
;
1868 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1869 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
1872 bond_run(bundle
->bond
, &bundle
->ofproto
->revalidate_set
,
1873 lacp_negotiated(bundle
->lacp
));
1874 if (bond_should_send_learning_packets(bundle
->bond
)) {
1875 bundle_send_learning_packets(bundle
);
1881 bundle_wait(struct ofbundle
*bundle
)
1884 lacp_wait(bundle
->lacp
);
1887 bond_wait(bundle
->bond
);
1894 mirror_scan(struct ofproto_dpif
*ofproto
)
1898 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
1899 if (!ofproto
->mirrors
[idx
]) {
1906 static struct ofmirror
*
1907 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
1911 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1912 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
1913 if (mirror
&& mirror
->aux
== aux
) {
1921 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
1923 mirror_update_dups(struct ofproto_dpif
*ofproto
)
1927 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1928 struct ofmirror
*m
= ofproto
->mirrors
[i
];
1931 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
1935 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1936 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
1943 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
1944 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
1946 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
1947 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
1948 m2
->dup_mirrors
|= m1
->dup_mirrors
;
1955 mirror_set(struct ofproto
*ofproto_
, void *aux
,
1956 const struct ofproto_mirror_settings
*s
)
1958 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1959 mirror_mask_t mirror_bit
;
1960 struct ofbundle
*bundle
;
1961 struct ofmirror
*mirror
;
1962 struct ofbundle
*out
;
1963 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
1964 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
1967 mirror
= mirror_lookup(ofproto
, aux
);
1969 mirror_destroy(mirror
);
1975 idx
= mirror_scan(ofproto
);
1977 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
1979 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
1983 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
1984 mirror
->ofproto
= ofproto
;
1987 mirror
->out_vlan
= -1;
1988 mirror
->name
= NULL
;
1991 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
1993 mirror
->name
= xstrdup(s
->name
);
1996 /* Get the new configuration. */
1997 if (s
->out_bundle
) {
1998 out
= bundle_lookup(ofproto
, s
->out_bundle
);
2000 mirror_destroy(mirror
);
2006 out_vlan
= s
->out_vlan
;
2008 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
2009 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
2011 /* If the configuration has not changed, do nothing. */
2012 if (hmapx_equals(&srcs
, &mirror
->srcs
)
2013 && hmapx_equals(&dsts
, &mirror
->dsts
)
2014 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
2015 && mirror
->out
== out
2016 && mirror
->out_vlan
== out_vlan
)
2018 hmapx_destroy(&srcs
);
2019 hmapx_destroy(&dsts
);
2023 hmapx_swap(&srcs
, &mirror
->srcs
);
2024 hmapx_destroy(&srcs
);
2026 hmapx_swap(&dsts
, &mirror
->dsts
);
2027 hmapx_destroy(&dsts
);
2029 free(mirror
->vlans
);
2030 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
2033 mirror
->out_vlan
= out_vlan
;
2035 /* Update bundles. */
2036 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2037 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
2038 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
2039 bundle
->src_mirrors
|= mirror_bit
;
2041 bundle
->src_mirrors
&= ~mirror_bit
;
2044 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
2045 bundle
->dst_mirrors
|= mirror_bit
;
2047 bundle
->dst_mirrors
&= ~mirror_bit
;
2050 if (mirror
->out
== bundle
) {
2051 bundle
->mirror_out
|= mirror_bit
;
2053 bundle
->mirror_out
&= ~mirror_bit
;
2057 ofproto
->need_revalidate
= true;
2058 mac_learning_flush(ofproto
->ml
);
2059 mirror_update_dups(ofproto
);
2065 mirror_destroy(struct ofmirror
*mirror
)
2067 struct ofproto_dpif
*ofproto
;
2068 mirror_mask_t mirror_bit
;
2069 struct ofbundle
*bundle
;
2075 ofproto
= mirror
->ofproto
;
2076 ofproto
->need_revalidate
= true;
2077 mac_learning_flush(ofproto
->ml
);
2079 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2080 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2081 bundle
->src_mirrors
&= ~mirror_bit
;
2082 bundle
->dst_mirrors
&= ~mirror_bit
;
2083 bundle
->mirror_out
&= ~mirror_bit
;
2086 hmapx_destroy(&mirror
->srcs
);
2087 hmapx_destroy(&mirror
->dsts
);
2088 free(mirror
->vlans
);
2090 ofproto
->mirrors
[mirror
->idx
] = NULL
;
2094 mirror_update_dups(ofproto
);
2098 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
2099 uint64_t *packets
, uint64_t *bytes
)
2101 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2102 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
2105 *packets
= *bytes
= UINT64_MAX
;
2109 *packets
= mirror
->packet_count
;
2110 *bytes
= mirror
->byte_count
;
2116 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
2118 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2119 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
2120 ofproto
->need_revalidate
= true;
2121 mac_learning_flush(ofproto
->ml
);
2127 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
2129 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2130 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
2131 return bundle
&& bundle
->mirror_out
!= 0;
2135 forward_bpdu_changed(struct ofproto
*ofproto_
)
2137 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2138 /* Revalidate cached flows whenever forward_bpdu option changes. */
2139 ofproto
->need_revalidate
= true;
2144 static struct ofport_dpif
*
2145 get_ofp_port(struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
2147 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
2148 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
2151 static struct ofport_dpif
*
2152 get_odp_port(struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
2154 return get_ofp_port(ofproto
, odp_port_to_ofp_port(odp_port
));
2158 ofproto_port_from_dpif_port(struct ofproto_port
*ofproto_port
,
2159 struct dpif_port
*dpif_port
)
2161 ofproto_port
->name
= dpif_port
->name
;
2162 ofproto_port
->type
= dpif_port
->type
;
2163 ofproto_port
->ofp_port
= odp_port_to_ofp_port(dpif_port
->port_no
);
2167 port_run(struct ofport_dpif
*ofport
)
2169 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
2172 cfm_run(ofport
->cfm
);
2174 if (cfm_should_send_ccm(ofport
->cfm
)) {
2175 struct ofpbuf packet
;
2177 ofpbuf_init(&packet
, 0);
2178 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.opp
.hw_addr
);
2179 send_packet(ofport
, &packet
);
2180 ofpbuf_uninit(&packet
);
2183 enable
= enable
&& !cfm_get_fault(ofport
->cfm
)
2184 && cfm_get_opup(ofport
->cfm
);
2187 if (ofport
->bundle
) {
2188 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
2191 if (ofport
->may_enable
!= enable
) {
2192 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2194 if (ofproto
->has_bundle_action
) {
2195 ofproto
->need_revalidate
= true;
2199 ofport
->may_enable
= enable
;
2203 port_wait(struct ofport_dpif
*ofport
)
2206 cfm_wait(ofport
->cfm
);
2211 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
2212 struct ofproto_port
*ofproto_port
)
2214 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2215 struct dpif_port dpif_port
;
2218 error
= dpif_port_query_by_name(ofproto
->dpif
, devname
, &dpif_port
);
2220 ofproto_port_from_dpif_port(ofproto_port
, &dpif_port
);
2226 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
, uint16_t *ofp_portp
)
2228 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2232 error
= dpif_port_add(ofproto
->dpif
, netdev
, &odp_port
);
2234 *ofp_portp
= odp_port_to_ofp_port(odp_port
);
2240 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
2242 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2245 error
= dpif_port_del(ofproto
->dpif
, ofp_port_to_odp_port(ofp_port
));
2247 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
2249 /* The caller is going to close ofport->up.netdev. If this is a
2250 * bonded port, then the bond is using that netdev, so remove it
2251 * from the bond. The client will need to reconfigure everything
2252 * after deleting ports, so then the slave will get re-added. */
2253 bundle_remove(&ofport
->up
);
2260 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
2262 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2265 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
2267 if (!error
&& ofport
->odp_port
== OVSP_LOCAL
) {
2268 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2270 /* ofproto->stats.tx_packets represents packets that we created
2271 * internally and sent to some port (e.g. packets sent with
2272 * send_packet()). Account for them as if they had come from
2273 * OFPP_LOCAL and got forwarded. */
2275 if (stats
->rx_packets
!= UINT64_MAX
) {
2276 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
2279 if (stats
->rx_bytes
!= UINT64_MAX
) {
2280 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
2283 /* ofproto->stats.rx_packets represents packets that were received on
2284 * some port and we processed internally and dropped (e.g. STP).
2285 * Account fro them as if they had been forwarded to OFPP_LOCAL. */
2287 if (stats
->tx_packets
!= UINT64_MAX
) {
2288 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
2291 if (stats
->tx_bytes
!= UINT64_MAX
) {
2292 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
2299 /* Account packets for LOCAL port. */
2301 ofproto_update_local_port_stats(const struct ofproto
*ofproto_
,
2302 size_t tx_size
, size_t rx_size
)
2304 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2307 ofproto
->stats
.rx_packets
++;
2308 ofproto
->stats
.rx_bytes
+= rx_size
;
2311 ofproto
->stats
.tx_packets
++;
2312 ofproto
->stats
.tx_bytes
+= tx_size
;
2316 struct port_dump_state
{
2317 struct dpif_port_dump dump
;
2322 port_dump_start(const struct ofproto
*ofproto_
, void **statep
)
2324 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2325 struct port_dump_state
*state
;
2327 *statep
= state
= xmalloc(sizeof *state
);
2328 dpif_port_dump_start(&state
->dump
, ofproto
->dpif
);
2329 state
->done
= false;
2334 port_dump_next(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
,
2335 struct ofproto_port
*port
)
2337 struct port_dump_state
*state
= state_
;
2338 struct dpif_port dpif_port
;
2340 if (dpif_port_dump_next(&state
->dump
, &dpif_port
)) {
2341 ofproto_port_from_dpif_port(port
, &dpif_port
);
2344 int error
= dpif_port_dump_done(&state
->dump
);
2346 return error
? error
: EOF
;
2351 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
2353 struct port_dump_state
*state
= state_
;
2356 dpif_port_dump_done(&state
->dump
);
2363 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
2365 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2366 return dpif_port_poll(ofproto
->dpif
, devnamep
);
2370 port_poll_wait(const struct ofproto
*ofproto_
)
2372 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2373 dpif_port_poll_wait(ofproto
->dpif
);
2377 port_is_lacp_current(const struct ofport
*ofport_
)
2379 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2380 return (ofport
->bundle
&& ofport
->bundle
->lacp
2381 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
2385 /* Upcall handling. */
2387 /* Flow miss batching.
2389 * Some dpifs implement operations faster when you hand them off in a batch.
2390 * To allow batching, "struct flow_miss" queues the dpif-related work needed
2391 * for a given flow. Each "struct flow_miss" corresponds to sending one or
2392 * more packets, plus possibly installing the flow in the dpif.
2394 * So far we only batch the operations that affect flow setup time the most.
2395 * It's possible to batch more than that, but the benefit might be minimal. */
2397 struct hmap_node hmap_node
;
2399 enum odp_key_fitness key_fitness
;
2400 const struct nlattr
*key
;
2402 ovs_be16 initial_tci
;
2403 struct list packets
;
2406 struct flow_miss_op
{
2407 union dpif_op dpif_op
;
2408 struct subfacet
*subfacet
;
2411 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
2412 * OpenFlow controller as necessary according to their individual
2413 * configurations. */
2415 send_packet_in_miss(struct ofproto_dpif
*ofproto
, struct ofpbuf
*packet
,
2416 const struct flow
*flow
)
2418 struct ofputil_packet_in pin
;
2420 pin
.packet
= packet
;
2421 pin
.in_port
= flow
->in_port
;
2422 pin
.reason
= OFPR_NO_MATCH
;
2423 pin
.buffer_id
= 0; /* not yet known */
2424 pin
.send_len
= 0; /* not used for flow table misses */
2425 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
, flow
);
2428 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_ACTION to each
2429 * OpenFlow controller as necessary according to their individual
2432 * 'send_len' should be the number of bytes of 'packet' to send to the
2433 * controller, as specified in the action that caused the packet to be sent. */
2435 send_packet_in_action(struct ofproto_dpif
*ofproto
, struct ofpbuf
*packet
,
2436 uint64_t userdata
, const struct flow
*flow
)
2438 struct ofputil_packet_in pin
;
2439 struct user_action_cookie cookie
;
2441 memcpy(&cookie
, &userdata
, sizeof(cookie
));
2443 pin
.packet
= packet
;
2444 pin
.in_port
= flow
->in_port
;
2445 pin
.reason
= OFPR_ACTION
;
2446 pin
.buffer_id
= 0; /* not yet known */
2447 pin
.send_len
= cookie
.data
;
2448 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
, flow
);
2452 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
2453 const struct ofpbuf
*packet
)
2455 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, flow
->in_port
);
2461 if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
2463 cfm_process_heartbeat(ofport
->cfm
, packet
);
2466 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
2467 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
2469 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
2472 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
2474 stp_process_packet(ofport
, packet
);
2481 static struct flow_miss
*
2482 flow_miss_create(struct hmap
*todo
, const struct flow
*flow
,
2483 enum odp_key_fitness key_fitness
,
2484 const struct nlattr
*key
, size_t key_len
,
2485 ovs_be16 initial_tci
)
2487 uint32_t hash
= flow_hash(flow
, 0);
2488 struct flow_miss
*miss
;
2490 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
2491 if (flow_equal(&miss
->flow
, flow
)) {
2496 miss
= xmalloc(sizeof *miss
);
2497 hmap_insert(todo
, &miss
->hmap_node
, hash
);
2499 miss
->key_fitness
= key_fitness
;
2501 miss
->key_len
= key_len
;
2502 miss
->initial_tci
= initial_tci
;
2503 list_init(&miss
->packets
);
2508 handle_flow_miss(struct ofproto_dpif
*ofproto
, struct flow_miss
*miss
,
2509 struct flow_miss_op
*ops
, size_t *n_ops
)
2511 const struct flow
*flow
= &miss
->flow
;
2512 struct ofpbuf
*packet
, *next_packet
;
2513 struct subfacet
*subfacet
;
2514 struct facet
*facet
;
2516 facet
= facet_lookup_valid(ofproto
, flow
);
2518 struct rule_dpif
*rule
;
2520 rule
= rule_dpif_lookup(ofproto
, flow
, 0);
2522 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
2523 struct ofport_dpif
*port
= get_ofp_port(ofproto
, flow
->in_port
);
2525 if (port
->up
.opp
.config
& htonl(OFPPC_NO_PACKET_IN
)) {
2526 COVERAGE_INC(ofproto_dpif_no_packet_in
);
2527 /* XXX install 'drop' flow entry */
2531 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
,
2535 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
2536 send_packet_in_miss(ofproto
, packet
, flow
);
2542 facet
= facet_create(rule
, flow
);
2545 subfacet
= subfacet_create(ofproto
, facet
,
2546 miss
->key_fitness
, miss
->key
, miss
->key_len
,
2549 LIST_FOR_EACH_SAFE (packet
, next_packet
, list_node
, &miss
->packets
) {
2550 struct dpif_flow_stats stats
;
2552 list_remove(&packet
->list_node
);
2553 ofproto
->n_matches
++;
2555 if (facet
->rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
2557 * Extra-special case for fail-open mode.
2559 * We are in fail-open mode and the packet matched the fail-open
2560 * rule, but we are connected to a controller too. We should send
2561 * the packet up to the controller in the hope that it will try to
2562 * set up a flow and thereby allow us to exit fail-open.
2564 * See the top-level comment in fail-open.c for more information.
2566 send_packet_in_miss(ofproto
, packet
, flow
);
2569 if (!facet
->may_install
|| !subfacet
->actions
) {
2570 subfacet_make_actions(ofproto
, subfacet
, packet
);
2573 /* Credit statistics to subfacet for this packet. We must do this now
2574 * because execute_controller_action() below may destroy 'packet'. */
2575 dpif_flow_stats_extract(&facet
->flow
, packet
, &stats
);
2576 subfacet_update_stats(ofproto
, subfacet
, &stats
);
2578 if (!execute_controller_action(ofproto
, &facet
->flow
,
2580 subfacet
->actions_len
, packet
)
2581 && subfacet
->actions_len
> 0) {
2582 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
2583 struct dpif_execute
*execute
= &op
->dpif_op
.execute
;
2585 if (flow
->vlan_tci
!= subfacet
->initial_tci
) {
2586 /* This packet was received on a VLAN splinter port. We added
2587 * a VLAN to the packet to make the packet resemble the flow,
2588 * but the actions were composed assuming that the packet
2589 * contained no VLAN. So, we must remove the VLAN header from
2590 * the packet before trying to execute the actions. */
2591 eth_pop_vlan(packet
);
2594 op
->subfacet
= subfacet
;
2595 execute
->type
= DPIF_OP_EXECUTE
;
2596 execute
->key
= miss
->key
;
2597 execute
->key_len
= miss
->key_len
;
2599 = (facet
->may_install
2601 : xmemdup(subfacet
->actions
, subfacet
->actions_len
));
2602 execute
->actions_len
= subfacet
->actions_len
;
2603 execute
->packet
= packet
;
2607 if (facet
->may_install
&& subfacet
->key_fitness
!= ODP_FIT_TOO_LITTLE
) {
2608 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
2609 struct dpif_flow_put
*put
= &op
->dpif_op
.flow_put
;
2611 op
->subfacet
= subfacet
;
2612 put
->type
= DPIF_OP_FLOW_PUT
;
2613 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
2614 put
->key
= miss
->key
;
2615 put
->key_len
= miss
->key_len
;
2616 put
->actions
= subfacet
->actions
;
2617 put
->actions_len
= subfacet
->actions_len
;
2622 /* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
2623 * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
2624 * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
2625 * what a flow key should contain.
2627 * This function also includes some logic to help make VLAN splinters
2628 * transparent to the rest of the upcall processing logic. In particular, if
2629 * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
2630 * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
2631 * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
2633 * Sets '*initial_tci' to the VLAN TCI with which the packet was really
2634 * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
2635 * (This differs from the value returned in flow->vlan_tci only for packets
2636 * received on VLAN splinters.)
2638 static enum odp_key_fitness
2639 ofproto_dpif_extract_flow_key(const struct ofproto_dpif
*ofproto
,
2640 const struct nlattr
*key
, size_t key_len
,
2641 struct flow
*flow
, ovs_be16
*initial_tci
,
2642 struct ofpbuf
*packet
)
2644 enum odp_key_fitness fitness
;
2648 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
2649 if (fitness
== ODP_FIT_ERROR
) {
2652 *initial_tci
= flow
->vlan_tci
;
2654 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
2656 /* Cause the flow to be processed as if it came in on the real device
2657 * with the VLAN device's VLAN ID. */
2658 flow
->in_port
= realdev
;
2659 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
2661 /* Make the packet resemble the flow, so that it gets sent to an
2662 * OpenFlow controller properly, so that it looks correct for
2663 * sFlow, and so that flow_extract() will get the correct vlan_tci
2664 * if it is called on 'packet'.
2666 * The allocated space inside 'packet' probably also contains
2667 * 'key', that is, both 'packet' and 'key' are probably part of a
2668 * struct dpif_upcall (see the large comment on that structure
2669 * definition), so pushing data on 'packet' is in general not a
2670 * good idea since it could overwrite 'key' or free it as a side
2671 * effect. However, it's OK in this special case because we know
2672 * that 'packet' is inside a Netlink attribute: pushing 4 bytes
2673 * will just overwrite the 4-byte "struct nlattr", which is fine
2674 * since we don't need that header anymore. */
2675 eth_push_vlan(packet
, flow
->vlan_tci
);
2678 /* Let the caller know that we can't reproduce 'key' from 'flow'. */
2679 if (fitness
== ODP_FIT_PERFECT
) {
2680 fitness
= ODP_FIT_TOO_MUCH
;
2688 handle_miss_upcalls(struct ofproto_dpif
*ofproto
, struct dpif_upcall
*upcalls
,
2691 struct dpif_upcall
*upcall
;
2692 struct flow_miss
*miss
, *next_miss
;
2693 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
2694 union dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
2703 /* Construct the to-do list.
2705 * This just amounts to extracting the flow from each packet and sticking
2706 * the packets that have the same flow in the same "flow_miss" structure so
2707 * that we can process them together. */
2709 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
2710 enum odp_key_fitness fitness
;
2711 struct flow_miss
*miss
;
2712 ovs_be16 initial_tci
;
2715 /* Obtain metadata and check userspace/kernel agreement on flow match,
2716 * then set 'flow''s header pointers. */
2717 fitness
= ofproto_dpif_extract_flow_key(ofproto
,
2718 upcall
->key
, upcall
->key_len
,
2719 &flow
, &initial_tci
,
2721 if (fitness
== ODP_FIT_ERROR
) {
2722 ofpbuf_delete(upcall
->packet
);
2725 flow_extract(upcall
->packet
, flow
.skb_priority
, flow
.tun_id
,
2726 flow
.in_port
, &flow
);
2728 /* Handle 802.1ag, LACP, and STP specially. */
2729 if (process_special(ofproto
, &flow
, upcall
->packet
)) {
2730 ofproto_update_local_port_stats(&ofproto
->up
,
2731 0, upcall
->packet
->size
);
2732 ofpbuf_delete(upcall
->packet
);
2733 ofproto
->n_matches
++;
2737 /* Add other packets to a to-do list. */
2738 miss
= flow_miss_create(&todo
, &flow
, fitness
,
2739 upcall
->key
, upcall
->key_len
, initial_tci
);
2740 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
2743 /* Process each element in the to-do list, constructing the set of
2744 * operations to batch. */
2746 HMAP_FOR_EACH_SAFE (miss
, next_miss
, hmap_node
, &todo
) {
2747 handle_flow_miss(ofproto
, miss
, flow_miss_ops
, &n_ops
);
2748 ofpbuf_list_delete(&miss
->packets
);
2749 hmap_remove(&todo
, &miss
->hmap_node
);
2752 assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
2753 hmap_destroy(&todo
);
2755 /* Execute batch. */
2756 for (i
= 0; i
< n_ops
; i
++) {
2757 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
2759 dpif_operate(ofproto
->dpif
, dpif_ops
, n_ops
);
2761 /* Free memory and update facets. */
2762 for (i
= 0; i
< n_ops
; i
++) {
2763 struct flow_miss_op
*op
= &flow_miss_ops
[i
];
2764 struct dpif_execute
*execute
;
2765 struct dpif_flow_put
*put
;
2767 switch (op
->dpif_op
.type
) {
2768 case DPIF_OP_EXECUTE
:
2769 execute
= &op
->dpif_op
.execute
;
2770 if (op
->subfacet
->actions
!= execute
->actions
) {
2771 free((struct nlattr
*) execute
->actions
);
2773 ofpbuf_delete((struct ofpbuf
*) execute
->packet
);
2776 case DPIF_OP_FLOW_PUT
:
2777 put
= &op
->dpif_op
.flow_put
;
2779 op
->subfacet
->installed
= true;
2787 handle_userspace_upcall(struct ofproto_dpif
*ofproto
,
2788 struct dpif_upcall
*upcall
)
2790 struct user_action_cookie cookie
;
2791 enum odp_key_fitness fitness
;
2792 ovs_be16 initial_tci
;
2795 memcpy(&cookie
, &upcall
->userdata
, sizeof(cookie
));
2797 fitness
= ofproto_dpif_extract_flow_key(ofproto
, upcall
->key
,
2798 upcall
->key_len
, &flow
,
2799 &initial_tci
, upcall
->packet
);
2800 if (fitness
== ODP_FIT_ERROR
) {
2801 ofpbuf_delete(upcall
->packet
);
2805 if (cookie
.type
== USER_ACTION_COOKIE_SFLOW
) {
2806 if (ofproto
->sflow
) {
2807 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
,
2810 } else if (cookie
.type
== USER_ACTION_COOKIE_CONTROLLER
) {
2811 COVERAGE_INC(ofproto_dpif_ctlr_action
);
2812 send_packet_in_action(ofproto
, upcall
->packet
, upcall
->userdata
,
2815 VLOG_WARN_RL(&rl
, "invalid user cookie : 0x%"PRIx64
, upcall
->userdata
);
2817 ofpbuf_delete(upcall
->packet
);
2821 handle_upcalls(struct ofproto_dpif
*ofproto
, unsigned int max_batch
)
2823 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
2827 assert (max_batch
<= FLOW_MISS_MAX_BATCH
);
2830 for (i
= 0; i
< max_batch
; i
++) {
2831 struct dpif_upcall
*upcall
= &misses
[n_misses
];
2834 error
= dpif_recv(ofproto
->dpif
, upcall
);
2839 switch (upcall
->type
) {
2840 case DPIF_UC_ACTION
:
2841 handle_userspace_upcall(ofproto
, upcall
);
2845 /* Handle it later. */
2849 case DPIF_N_UC_TYPES
:
2851 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
,
2857 handle_miss_upcalls(ofproto
, misses
, n_misses
);
2862 /* Flow expiration. */
2864 static int subfacet_max_idle(const struct ofproto_dpif
*);
2865 static void update_stats(struct ofproto_dpif
*);
2866 static void rule_expire(struct rule_dpif
*);
2867 static void expire_subfacets(struct ofproto_dpif
*, int dp_max_idle
);
2869 /* This function is called periodically by run(). Its job is to collect
2870 * updates for the flows that have been installed into the datapath, most
2871 * importantly when they last were used, and then use that information to
2872 * expire flows that have not been used recently.
2874 * Returns the number of milliseconds after which it should be called again. */
2876 expire(struct ofproto_dpif
*ofproto
)
2878 struct rule_dpif
*rule
, *next_rule
;
2879 struct classifier
*table
;
2882 /* Update stats for each flow in the datapath. */
2883 update_stats(ofproto
);
2885 /* Expire subfacets that have been idle too long. */
2886 dp_max_idle
= subfacet_max_idle(ofproto
);
2887 expire_subfacets(ofproto
, dp_max_idle
);
2889 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
2890 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
2891 struct cls_cursor cursor
;
2893 cls_cursor_init(&cursor
, table
, NULL
);
2894 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
2899 /* All outstanding data in existing flows has been accounted, so it's a
2900 * good time to do bond rebalancing. */
2901 if (ofproto
->has_bonded_bundles
) {
2902 struct ofbundle
*bundle
;
2904 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2906 bond_rebalance(bundle
->bond
, &ofproto
->revalidate_set
);
2911 return MIN(dp_max_idle
, 1000);
2914 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
2916 * This function also pushes statistics updates to rules which each facet
2917 * resubmits into. Generally these statistics will be accurate. However, if a
2918 * facet changes the rule it resubmits into at some time in between
2919 * update_stats() runs, it is possible that statistics accrued to the
2920 * old rule will be incorrectly attributed to the new rule. This could be
2921 * avoided by calling update_stats() whenever rules are created or
2922 * deleted. However, the performance impact of making so many calls to the
2923 * datapath do not justify the benefit of having perfectly accurate statistics.
2926 update_stats(struct ofproto_dpif
*p
)
2928 const struct dpif_flow_stats
*stats
;
2929 struct dpif_flow_dump dump
;
2930 const struct nlattr
*key
;
2933 dpif_flow_dump_start(&dump
, p
->dpif
);
2934 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
2935 struct subfacet
*subfacet
;
2937 subfacet
= subfacet_find(p
, key
, key_len
);
2938 if (subfacet
&& subfacet
->installed
) {
2939 struct facet
*facet
= subfacet
->facet
;
2941 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
2942 uint64_t extra
= stats
->n_packets
- subfacet
->dp_packet_count
;
2943 facet
->packet_count
+= extra
;
2945 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
2948 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
2949 facet
->byte_count
+= stats
->n_bytes
- subfacet
->dp_byte_count
;
2951 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
2954 subfacet
->dp_packet_count
= stats
->n_packets
;
2955 subfacet
->dp_byte_count
= stats
->n_bytes
;
2957 subfacet_update_time(p
, subfacet
, stats
->used
);
2958 facet_account(p
, facet
);
2959 facet_push_stats(facet
);
2961 if (!VLOG_DROP_WARN(&rl
)) {
2965 odp_flow_key_format(key
, key_len
, &s
);
2966 VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s
));
2970 COVERAGE_INC(facet_unexpected
);
2971 /* There's a flow in the datapath that we know nothing about, or a
2972 * flow that shouldn't be installed but was anyway. Delete it. */
2973 dpif_flow_del(p
->dpif
, key
, key_len
, NULL
);
2976 dpif_flow_dump_done(&dump
);
2979 /* Calculates and returns the number of milliseconds of idle time after which
2980 * subfacets should expire from the datapath. When a subfacet expires, we fold
2981 * its statistics into its facet, and when a facet's last subfacet expires, we
2982 * fold its statistic into its rule. */
2984 subfacet_max_idle(const struct ofproto_dpif
*ofproto
)
2987 * Idle time histogram.
2989 * Most of the time a switch has a relatively small number of subfacets.
2990 * When this is the case we might as well keep statistics for all of them
2991 * in userspace and to cache them in the kernel datapath for performance as
2994 * As the number of subfacets increases, the memory required to maintain
2995 * statistics about them in userspace and in the kernel becomes
2996 * significant. However, with a large number of subfacets it is likely
2997 * that only a few of them are "heavy hitters" that consume a large amount
2998 * of bandwidth. At this point, only heavy hitters are worth caching in
2999 * the kernel and maintaining in userspaces; other subfacets we can
3002 * The technique used to compute the idle time is to build a histogram with
3003 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
3004 * that is installed in the kernel gets dropped in the appropriate bucket.
3005 * After the histogram has been built, we compute the cutoff so that only
3006 * the most-recently-used 1% of subfacets (but at least
3007 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
3008 * the most-recently-used bucket of subfacets is kept, so actually an
3009 * arbitrary number of subfacets can be kept in any given expiration run
3010 * (though the next run will delete most of those unless they receive
3013 * This requires a second pass through the subfacets, in addition to the
3014 * pass made by update_stats(), because the former function never looks at
3015 * uninstallable subfacets.
3017 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
3018 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
3019 int buckets
[N_BUCKETS
] = { 0 };
3020 int total
, subtotal
, bucket
;
3021 struct subfacet
*subfacet
;
3025 total
= hmap_count(&ofproto
->subfacets
);
3026 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
3027 return N_BUCKETS
* BUCKET_WIDTH
;
3030 /* Build histogram. */
3032 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
3033 long long int idle
= now
- subfacet
->used
;
3034 int bucket
= (idle
<= 0 ? 0
3035 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
3036 : (unsigned int) idle
/ BUCKET_WIDTH
);
3040 /* Find the first bucket whose flows should be expired. */
3041 subtotal
= bucket
= 0;
3043 subtotal
+= buckets
[bucket
++];
3044 } while (bucket
< N_BUCKETS
&&
3045 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
3047 if (VLOG_IS_DBG_ENABLED()) {
3051 ds_put_cstr(&s
, "keep");
3052 for (i
= 0; i
< N_BUCKETS
; i
++) {
3054 ds_put_cstr(&s
, ", drop");
3057 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
3060 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
3064 return bucket
* BUCKET_WIDTH
;
3068 expire_subfacets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
3070 long long int cutoff
= time_msec() - dp_max_idle
;
3071 struct subfacet
*subfacet
, *next_subfacet
;
3073 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
3074 &ofproto
->subfacets
) {
3075 if (subfacet
->used
< cutoff
) {
3076 subfacet_destroy(ofproto
, subfacet
);
3081 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3082 * then delete it entirely. */
3084 rule_expire(struct rule_dpif
*rule
)
3086 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3087 struct facet
*facet
, *next_facet
;
3091 /* Has 'rule' expired? */
3093 if (rule
->up
.hard_timeout
3094 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
3095 reason
= OFPRR_HARD_TIMEOUT
;
3096 } else if (rule
->up
.idle_timeout
&& list_is_empty(&rule
->facets
)
3097 && now
> rule
->used
+ rule
->up
.idle_timeout
* 1000) {
3098 reason
= OFPRR_IDLE_TIMEOUT
;
3103 COVERAGE_INC(ofproto_dpif_expired
);
3105 /* Update stats. (This is a no-op if the rule expired due to an idle
3106 * timeout, because that only happens when the rule has no facets left.) */
3107 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
3108 facet_remove(ofproto
, facet
);
3111 /* Get rid of the rule. */
3112 ofproto_rule_expire(&rule
->up
, reason
);
3117 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
3119 * The caller must already have determined that no facet with an identical
3120 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
3121 * the ofproto's classifier table.
3123 * The facet will initially have no subfacets. The caller should create (at
3124 * least) one subfacet with subfacet_create(). */
3125 static struct facet
*
3126 facet_create(struct rule_dpif
*rule
, const struct flow
*flow
)
3128 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3129 struct facet
*facet
;
3131 facet
= xzalloc(sizeof *facet
);
3132 facet
->used
= time_msec();
3133 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, flow_hash(flow
, 0));
3134 list_push_back(&rule
->facets
, &facet
->list_node
);
3136 facet
->flow
= *flow
;
3137 list_init(&facet
->subfacets
);
3138 netflow_flow_init(&facet
->nf_flow
);
3139 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
3145 facet_free(struct facet
*facet
)
3150 /* If the 'actions_len' bytes of actions in 'odp_actions' are just a single
3151 * OVS_ACTION_ATTR_USERSPACE action, executes it internally and returns true.
3152 * Otherwise, returns false without doing anything. */
3154 execute_controller_action(struct ofproto_dpif
*ofproto
,
3155 const struct flow
*flow
,
3156 const struct nlattr
*odp_actions
, size_t actions_len
,
3157 struct ofpbuf
*packet
)
3160 && odp_actions
->nla_type
== OVS_ACTION_ATTR_USERSPACE
3161 && NLA_ALIGN(odp_actions
->nla_len
) == actions_len
) {
3162 /* As an optimization, avoid a round-trip from userspace to kernel to
3163 * userspace. This also avoids possibly filling up kernel packet
3164 * buffers along the way.
3166 * This optimization will not accidentally catch sFlow
3167 * OVS_ACTION_ATTR_USERSPACE actions, since those are encapsulated
3168 * inside OVS_ACTION_ATTR_SAMPLE. */
3169 const struct nlattr
*nla
;
3171 nla
= nl_attr_find_nested(odp_actions
, OVS_USERSPACE_ATTR_USERDATA
);
3172 send_packet_in_action(ofproto
, packet
, nl_attr_get_u64(nla
), flow
);
3179 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
3180 * 'packet', which arrived on 'in_port'.
3182 * Takes ownership of 'packet'. */
3184 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3185 const struct nlattr
*odp_actions
, size_t actions_len
,
3186 struct ofpbuf
*packet
)
3188 struct odputil_keybuf keybuf
;
3192 if (execute_controller_action(ofproto
, flow
, odp_actions
, actions_len
,
3194 ofpbuf_delete(packet
);
3198 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
3199 odp_flow_key_from_flow(&key
, flow
);
3201 error
= dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
3202 odp_actions
, actions_len
, packet
);
3204 ofpbuf_delete(packet
);
3208 /* Remove 'facet' from 'ofproto' and free up the associated memory:
3210 * - If 'facet' was installed in the datapath, uninstalls it and updates its
3211 * rule's statistics, via subfacet_uninstall().
3213 * - Removes 'facet' from its rule and from ofproto->facets.
3216 facet_remove(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
3218 struct subfacet
*subfacet
, *next_subfacet
;
3220 assert(!list_is_empty(&facet
->subfacets
));
3222 /* First uninstall all of the subfacets to get final statistics. */
3223 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3224 subfacet_uninstall(ofproto
, subfacet
);
3227 /* Flush the final stats to the rule.
3229 * This might require us to have at least one subfacet around so that we
3230 * can use its actions for accounting in facet_account(), which is why we
3231 * have uninstalled but not yet destroyed the subfacets. */
3232 facet_flush_stats(ofproto
, facet
);
3234 /* Now we're really all done so destroy everything. */
3235 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
3236 &facet
->subfacets
) {
3237 subfacet_destroy__(ofproto
, subfacet
);
3239 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
3240 list_remove(&facet
->list_node
);
3245 facet_account(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
3248 struct subfacet
*subfacet
;
3249 const struct nlattr
*a
;
3253 if (facet
->byte_count
<= facet
->accounted_bytes
) {
3256 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
3257 facet
->accounted_bytes
= facet
->byte_count
;
3259 /* Feed information from the active flows back into the learning table to
3260 * ensure that table is always in sync with what is actually flowing
3261 * through the datapath. */
3262 if (facet
->has_learn
|| facet
->has_normal
) {
3263 struct action_xlate_ctx ctx
;
3265 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
3266 facet
->flow
.vlan_tci
, NULL
);
3267 ctx
.may_learn
= true;
3268 ofpbuf_delete(xlate_actions(&ctx
, facet
->rule
->up
.actions
,
3269 facet
->rule
->up
.n_actions
));
3272 if (!facet
->has_normal
|| !ofproto
->has_bonded_bundles
) {
3276 /* This loop feeds byte counters to bond_account() for rebalancing to use
3277 * as a basis. We also need to track the actual VLAN on which the packet
3278 * is going to be sent to ensure that it matches the one passed to
3279 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
3282 * We use the actions from an arbitrary subfacet because they should all
3283 * be equally valid for our purpose. */
3284 subfacet
= CONTAINER_OF(list_front(&facet
->subfacets
),
3285 struct subfacet
, list_node
);
3286 vlan_tci
= facet
->flow
.vlan_tci
;
3287 NL_ATTR_FOR_EACH_UNSAFE (a
, left
,
3288 subfacet
->actions
, subfacet
->actions_len
) {
3289 const struct ovs_action_push_vlan
*vlan
;
3290 struct ofport_dpif
*port
;
3292 switch (nl_attr_type(a
)) {
3293 case OVS_ACTION_ATTR_OUTPUT
:
3294 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
3295 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
3296 bond_account(port
->bundle
->bond
, &facet
->flow
,
3297 vlan_tci_to_vid(vlan_tci
), n_bytes
);
3301 case OVS_ACTION_ATTR_POP_VLAN
:
3302 vlan_tci
= htons(0);
3305 case OVS_ACTION_ATTR_PUSH_VLAN
:
3306 vlan
= nl_attr_get(a
);
3307 vlan_tci
= vlan
->vlan_tci
;
3313 /* Returns true if the only action for 'facet' is to send to the controller.
3314 * (We don't report NetFlow expiration messages for such facets because they
3315 * are just part of the control logic for the network, not real traffic). */
3317 facet_is_controller_flow(struct facet
*facet
)
3320 && facet
->rule
->up
.n_actions
== 1
3321 && action_outputs_to_port(&facet
->rule
->up
.actions
[0],
3322 htons(OFPP_CONTROLLER
)));
3325 /* Folds all of 'facet''s statistics into its rule. Also updates the
3326 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
3327 * 'facet''s statistics in the datapath should have been zeroed and folded into
3328 * its packet and byte counts before this function is called. */
3330 facet_flush_stats(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
3332 struct subfacet
*subfacet
;
3334 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3335 assert(!subfacet
->dp_byte_count
);
3336 assert(!subfacet
->dp_packet_count
);
3339 facet_push_stats(facet
);
3340 facet_account(ofproto
, facet
);
3342 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
3343 struct ofexpired expired
;
3344 expired
.flow
= facet
->flow
;
3345 expired
.packet_count
= facet
->packet_count
;
3346 expired
.byte_count
= facet
->byte_count
;
3347 expired
.used
= facet
->used
;
3348 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
3351 facet
->rule
->packet_count
+= facet
->packet_count
;
3352 facet
->rule
->byte_count
+= facet
->byte_count
;
3354 /* Reset counters to prevent double counting if 'facet' ever gets
3356 facet_reset_counters(facet
);
3358 netflow_flow_clear(&facet
->nf_flow
);
3361 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3362 * Returns it if found, otherwise a null pointer.
3364 * The returned facet might need revalidation; use facet_lookup_valid()
3365 * instead if that is important. */
3366 static struct facet
*
3367 facet_find(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
3369 struct facet
*facet
;
3371 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, flow_hash(flow
, 0),
3373 if (flow_equal(flow
, &facet
->flow
)) {
3381 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3382 * Returns it if found, otherwise a null pointer.
3384 * The returned facet is guaranteed to be valid. */
3385 static struct facet
*
3386 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
3388 struct facet
*facet
= facet_find(ofproto
, flow
);
3390 /* The facet we found might not be valid, since we could be in need of
3391 * revalidation. If it is not valid, don't return it. */
3393 && (ofproto
->need_revalidate
3394 || tag_set_intersects(&ofproto
->revalidate_set
, facet
->tags
))
3395 && !facet_revalidate(ofproto
, facet
)) {
3396 COVERAGE_INC(facet_invalidated
);
3403 /* Re-searches 'ofproto''s classifier for a rule matching 'facet':
3405 * - If the rule found is different from 'facet''s current rule, moves
3406 * 'facet' to the new rule and recompiles its actions.
3408 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
3409 * where it is and recompiles its actions anyway.
3411 * - If there is none, destroys 'facet'.
3413 * Returns true if 'facet' still exists, false if it has been destroyed. */
3415 facet_revalidate(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
3418 struct nlattr
*odp_actions
;
3421 struct actions
*new_actions
;
3423 struct action_xlate_ctx ctx
;
3424 struct rule_dpif
*new_rule
;
3425 struct subfacet
*subfacet
;
3426 bool actions_changed
;
3429 COVERAGE_INC(facet_revalidate
);
3431 /* Determine the new rule. */
3432 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
, 0);
3434 /* No new rule, so delete the facet. */
3435 facet_remove(ofproto
, facet
);
3439 /* Calculate new datapath actions.
3441 * We do not modify any 'facet' state yet, because we might need to, e.g.,
3442 * emit a NetFlow expiration and, if so, we need to have the old state
3443 * around to properly compose it. */
3445 /* If the datapath actions changed or the installability changed,
3446 * then we need to talk to the datapath. */
3449 memset(&ctx
, 0, sizeof ctx
);
3450 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3451 struct ofpbuf
*odp_actions
;
3452 bool should_install
;
3454 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
3455 subfacet
->initial_tci
, NULL
);
3456 odp_actions
= xlate_actions(&ctx
, new_rule
->up
.actions
,
3457 new_rule
->up
.n_actions
);
3458 actions_changed
= (subfacet
->actions_len
!= odp_actions
->size
3459 || memcmp(subfacet
->actions
, odp_actions
->data
,
3460 subfacet
->actions_len
));
3462 should_install
= (ctx
.may_set_up_flow
3463 && subfacet
->key_fitness
!= ODP_FIT_TOO_LITTLE
);
3464 if (actions_changed
|| should_install
!= subfacet
->installed
) {
3465 if (should_install
) {
3466 struct dpif_flow_stats stats
;
3468 subfacet_install(ofproto
, subfacet
,
3469 odp_actions
->data
, odp_actions
->size
, &stats
);
3470 subfacet_update_stats(ofproto
, subfacet
, &stats
);
3472 subfacet_uninstall(ofproto
, subfacet
);
3476 new_actions
= xcalloc(list_size(&facet
->subfacets
),
3477 sizeof *new_actions
);
3479 new_actions
[i
].odp_actions
= xmemdup(odp_actions
->data
,
3481 new_actions
[i
].actions_len
= odp_actions
->size
;
3484 ofpbuf_delete(odp_actions
);
3488 facet_flush_stats(ofproto
, facet
);
3491 /* Update 'facet' now that we've taken care of all the old state. */
3492 facet
->tags
= ctx
.tags
;
3493 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
3494 facet
->may_install
= ctx
.may_set_up_flow
;
3495 facet
->has_learn
= ctx
.has_learn
;
3496 facet
->has_normal
= ctx
.has_normal
;
3497 facet
->mirrors
= ctx
.mirrors
;
3500 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3501 if (new_actions
[i
].odp_actions
) {
3502 free(subfacet
->actions
);
3503 subfacet
->actions
= new_actions
[i
].odp_actions
;
3504 subfacet
->actions_len
= new_actions
[i
].actions_len
;
3510 if (facet
->rule
!= new_rule
) {
3511 COVERAGE_INC(facet_changed_rule
);
3512 list_remove(&facet
->list_node
);
3513 list_push_back(&new_rule
->facets
, &facet
->list_node
);
3514 facet
->rule
= new_rule
;
3515 facet
->used
= new_rule
->up
.created
;
3516 facet
->prev_used
= facet
->used
;
3522 /* Updates 'facet''s used time. Caller is responsible for calling
3523 * facet_push_stats() to update the flows which 'facet' resubmits into. */
3525 facet_update_time(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
3528 if (used
> facet
->used
) {
3530 if (used
> facet
->rule
->used
) {
3531 facet
->rule
->used
= used
;
3533 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, used
);
3538 facet_reset_counters(struct facet
*facet
)
3540 facet
->packet_count
= 0;
3541 facet
->byte_count
= 0;
3542 facet
->prev_packet_count
= 0;
3543 facet
->prev_byte_count
= 0;
3544 facet
->accounted_bytes
= 0;
3548 facet_push_stats(struct facet
*facet
)
3550 uint64_t new_packets
, new_bytes
;
3552 assert(facet
->packet_count
>= facet
->prev_packet_count
);
3553 assert(facet
->byte_count
>= facet
->prev_byte_count
);
3554 assert(facet
->used
>= facet
->prev_used
);
3556 new_packets
= facet
->packet_count
- facet
->prev_packet_count
;
3557 new_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
3559 if (new_packets
|| new_bytes
|| facet
->used
> facet
->prev_used
) {
3560 facet
->prev_packet_count
= facet
->packet_count
;
3561 facet
->prev_byte_count
= facet
->byte_count
;
3562 facet
->prev_used
= facet
->used
;
3564 flow_push_stats(facet
->rule
, &facet
->flow
,
3565 new_packets
, new_bytes
, facet
->used
);
3567 update_mirror_stats(ofproto_dpif_cast(facet
->rule
->up
.ofproto
),
3568 facet
->mirrors
, new_packets
, new_bytes
);
3572 struct ofproto_push
{
3573 struct action_xlate_ctx ctx
;
3580 push_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
3582 struct ofproto_push
*push
= CONTAINER_OF(ctx
, struct ofproto_push
, ctx
);
3585 rule
->packet_count
+= push
->packets
;
3586 rule
->byte_count
+= push
->bytes
;
3587 rule
->used
= MAX(push
->used
, rule
->used
);
3591 /* Pushes flow statistics to the rules which 'flow' resubmits into given
3592 * 'rule''s actions and mirrors. */
3594 flow_push_stats(const struct rule_dpif
*rule
,
3595 const struct flow
*flow
, uint64_t packets
, uint64_t bytes
,
3598 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3599 struct ofproto_push push
;
3601 push
.packets
= packets
;
3605 action_xlate_ctx_init(&push
.ctx
, ofproto
, flow
, flow
->vlan_tci
, NULL
);
3606 push
.ctx
.resubmit_hook
= push_resubmit
;
3607 ofpbuf_delete(xlate_actions(&push
.ctx
,
3608 rule
->up
.actions
, rule
->up
.n_actions
));
3613 static struct subfacet
*
3614 subfacet_find__(struct ofproto_dpif
*ofproto
,
3615 const struct nlattr
*key
, size_t key_len
, uint32_t key_hash
,
3616 const struct flow
*flow
)
3618 struct subfacet
*subfacet
;
3620 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
3621 &ofproto
->subfacets
) {
3623 ? (subfacet
->key_len
== key_len
3624 && !memcmp(key
, subfacet
->key
, key_len
))
3625 : flow_equal(flow
, &subfacet
->facet
->flow
)) {
3633 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
3634 * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
3635 * there is one, otherwise creates and returns a new subfacet.
3637 * If the returned subfacet is new, then subfacet->actions will be NULL, in
3638 * which case the caller must populate the actions with
3639 * subfacet_make_actions(). */
3640 static struct subfacet
*
3641 subfacet_create(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
3642 enum odp_key_fitness key_fitness
,
3643 const struct nlattr
*key
, size_t key_len
, ovs_be16 initial_tci
)
3645 uint32_t key_hash
= odp_flow_key_hash(key
, key_len
);
3646 struct subfacet
*subfacet
;
3648 subfacet
= subfacet_find__(ofproto
, key
, key_len
, key_hash
, &facet
->flow
);
3650 if (subfacet
->facet
== facet
) {
3654 /* This shouldn't happen. */
3655 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
3656 subfacet_destroy(ofproto
, subfacet
);
3659 subfacet
= xzalloc(sizeof *subfacet
);
3660 hmap_insert(&ofproto
->subfacets
, &subfacet
->hmap_node
, key_hash
);
3661 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
3662 subfacet
->facet
= facet
;
3663 subfacet
->used
= time_msec();
3664 subfacet
->key_fitness
= key_fitness
;
3665 if (key_fitness
!= ODP_FIT_PERFECT
) {
3666 subfacet
->key
= xmemdup(key
, key_len
);
3667 subfacet
->key_len
= key_len
;
3669 subfacet
->installed
= false;
3670 subfacet
->initial_tci
= initial_tci
;
3675 /* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
3676 * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
3677 static struct subfacet
*
3678 subfacet_find(struct ofproto_dpif
*ofproto
,
3679 const struct nlattr
*key
, size_t key_len
)
3681 uint32_t key_hash
= odp_flow_key_hash(key
, key_len
);
3682 enum odp_key_fitness fitness
;
3685 fitness
= odp_flow_key_to_flow(key
, key_len
, &flow
);
3686 if (fitness
== ODP_FIT_ERROR
) {
3690 return subfacet_find__(ofproto
, key
, key_len
, key_hash
, &flow
);
3693 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
3694 * its facet within 'ofproto', and frees it. */
3696 subfacet_destroy__(struct ofproto_dpif
*ofproto
, struct subfacet
*subfacet
)
3698 subfacet_uninstall(ofproto
, subfacet
);
3699 hmap_remove(&ofproto
->subfacets
, &subfacet
->hmap_node
);
3700 list_remove(&subfacet
->list_node
);
3701 free(subfacet
->key
);
3702 free(subfacet
->actions
);
3706 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
3707 * last remaining subfacet in its facet destroys the facet too. */
3709 subfacet_destroy(struct ofproto_dpif
*ofproto
, struct subfacet
*subfacet
)
3711 struct facet
*facet
= subfacet
->facet
;
3713 if (list_is_singleton(&facet
->subfacets
)) {
3714 /* facet_remove() needs at least one subfacet (it will remove it). */
3715 facet_remove(ofproto
, facet
);
3717 subfacet_destroy__(ofproto
, subfacet
);
3721 /* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
3722 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
3723 * for use as temporary storage. */
3725 subfacet_get_key(struct subfacet
*subfacet
, struct odputil_keybuf
*keybuf
,
3728 if (!subfacet
->key
) {
3729 ofpbuf_use_stack(key
, keybuf
, sizeof *keybuf
);
3730 odp_flow_key_from_flow(key
, &subfacet
->facet
->flow
);
3732 ofpbuf_use_const(key
, subfacet
->key
, subfacet
->key_len
);
3736 /* Composes the datapath actions for 'subfacet' based on its rule's actions. */
3738 subfacet_make_actions(struct ofproto_dpif
*p
, struct subfacet
*subfacet
,
3739 const struct ofpbuf
*packet
)
3741 struct facet
*facet
= subfacet
->facet
;
3742 const struct rule_dpif
*rule
= facet
->rule
;
3743 struct ofpbuf
*odp_actions
;
3744 struct action_xlate_ctx ctx
;
3746 action_xlate_ctx_init(&ctx
, p
, &facet
->flow
, subfacet
->initial_tci
,
3748 odp_actions
= xlate_actions(&ctx
, rule
->up
.actions
, rule
->up
.n_actions
);
3749 facet
->tags
= ctx
.tags
;
3750 facet
->may_install
= ctx
.may_set_up_flow
;
3751 facet
->has_learn
= ctx
.has_learn
;
3752 facet
->has_normal
= ctx
.has_normal
;
3753 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
3754 facet
->mirrors
= ctx
.mirrors
;
3756 if (subfacet
->actions_len
!= odp_actions
->size
3757 || memcmp(subfacet
->actions
, odp_actions
->data
, odp_actions
->size
)) {
3758 free(subfacet
->actions
);
3759 subfacet
->actions_len
= odp_actions
->size
;
3760 subfacet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
3763 ofpbuf_delete(odp_actions
);
3766 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
3767 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
3768 * in the datapath will be zeroed and 'stats' will be updated with traffic new
3769 * since 'subfacet' was last updated.
3771 * Returns 0 if successful, otherwise a positive errno value. */
3773 subfacet_install(struct ofproto_dpif
*ofproto
, struct subfacet
*subfacet
,
3774 const struct nlattr
*actions
, size_t actions_len
,
3775 struct dpif_flow_stats
*stats
)
3777 struct odputil_keybuf keybuf
;
3778 enum dpif_flow_put_flags flags
;
3782 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
3784 flags
|= DPIF_FP_ZERO_STATS
;
3787 subfacet_get_key(subfacet
, &keybuf
, &key
);
3788 ret
= dpif_flow_put(ofproto
->dpif
, flags
, key
.data
, key
.size
,
3789 actions
, actions_len
, stats
);
3792 subfacet_reset_dp_stats(subfacet
, stats
);
3798 /* If 'subfacet' is installed in the datapath, uninstalls it. */
3800 subfacet_uninstall(struct ofproto_dpif
*p
, struct subfacet
*subfacet
)
3802 if (subfacet
->installed
) {
3803 struct odputil_keybuf keybuf
;
3804 struct dpif_flow_stats stats
;
3808 subfacet_get_key(subfacet
, &keybuf
, &key
);
3809 error
= dpif_flow_del(p
->dpif
, key
.data
, key
.size
, &stats
);
3810 subfacet_reset_dp_stats(subfacet
, &stats
);
3812 subfacet_update_stats(p
, subfacet
, &stats
);
3814 subfacet
->installed
= false;
3816 assert(subfacet
->dp_packet_count
== 0);
3817 assert(subfacet
->dp_byte_count
== 0);
3821 /* Resets 'subfacet''s datapath statistics counters. This should be called
3822 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
3823 * non-null, it should contain the statistics returned by dpif when 'subfacet'
3824 * was reset in the datapath. 'stats' will be modified to include only
3825 * statistics new since 'subfacet' was last updated. */
3827 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
3828 struct dpif_flow_stats
*stats
)
3831 && subfacet
->dp_packet_count
<= stats
->n_packets
3832 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
3833 stats
->n_packets
-= subfacet
->dp_packet_count
;
3834 stats
->n_bytes
-= subfacet
->dp_byte_count
;
3837 subfacet
->dp_packet_count
= 0;
3838 subfacet
->dp_byte_count
= 0;
3841 /* Updates 'subfacet''s used time. The caller is responsible for calling
3842 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
3844 subfacet_update_time(struct ofproto_dpif
*ofproto
, struct subfacet
*subfacet
,
3847 if (used
> subfacet
->used
) {
3848 subfacet
->used
= used
;
3849 facet_update_time(ofproto
, subfacet
->facet
, used
);
3853 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
3855 * Because of the meaning of a subfacet's counters, it only makes sense to do
3856 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
3857 * represents a packet that was sent by hand or if it represents statistics
3858 * that have been cleared out of the datapath. */
3860 subfacet_update_stats(struct ofproto_dpif
*ofproto
, struct subfacet
*subfacet
,
3861 const struct dpif_flow_stats
*stats
)
3863 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
3864 struct facet
*facet
= subfacet
->facet
;
3866 subfacet_update_time(ofproto
, subfacet
, stats
->used
);
3867 facet
->packet_count
+= stats
->n_packets
;
3868 facet
->byte_count
+= stats
->n_bytes
;
3869 facet_push_stats(facet
);
3870 netflow_flow_update_flags(&facet
->nf_flow
, stats
->tcp_flags
);
3876 static struct rule_dpif
*
3877 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3880 struct cls_rule
*cls_rule
;
3881 struct classifier
*cls
;
3883 if (table_id
>= N_TABLES
) {
3887 cls
= &ofproto
->up
.tables
[table_id
];
3888 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
3889 && ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
3890 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
3891 * are unavailable. */
3892 struct flow ofpc_normal_flow
= *flow
;
3893 ofpc_normal_flow
.tp_src
= htons(0);
3894 ofpc_normal_flow
.tp_dst
= htons(0);
3895 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
3897 cls_rule
= classifier_lookup(cls
, flow
);
3899 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
3903 complete_operation(struct rule_dpif
*rule
)
3905 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3907 rule_invalidate(rule
);
3909 struct dpif_completion
*c
= xmalloc(sizeof *c
);
3910 c
->op
= rule
->up
.pending
;
3911 list_push_back(&ofproto
->completions
, &c
->list_node
);
3913 ofoperation_complete(rule
->up
.pending
, 0);
3917 static struct rule
*
3920 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
3925 rule_dealloc(struct rule
*rule_
)
3927 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3932 rule_construct(struct rule
*rule_
)
3934 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3935 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3936 struct rule_dpif
*victim
;
3940 error
= validate_actions(rule
->up
.actions
, rule
->up
.n_actions
,
3941 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
3946 rule
->used
= rule
->up
.created
;
3947 rule
->packet_count
= 0;
3948 rule
->byte_count
= 0;
3950 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
3951 if (victim
&& !list_is_empty(&victim
->facets
)) {
3952 struct facet
*facet
;
3954 rule
->facets
= victim
->facets
;
3955 list_moved(&rule
->facets
);
3956 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
3957 /* XXX: We're only clearing our local counters here. It's possible
3958 * that quite a few packets are unaccounted for in the datapath
3959 * statistics. These will be accounted to the new rule instead of
3960 * cleared as required. This could be fixed by clearing out the
3961 * datapath statistics for this facet, but currently it doesn't
3963 facet_reset_counters(facet
);
3967 /* Must avoid list_moved() in this case. */
3968 list_init(&rule
->facets
);
3971 table_id
= rule
->up
.table_id
;
3972 rule
->tag
= (victim
? victim
->tag
3974 : rule_calculate_tag(&rule
->up
.cr
.flow
, &rule
->up
.cr
.wc
,
3975 ofproto
->tables
[table_id
].basis
));
3977 complete_operation(rule
);
3982 rule_destruct(struct rule
*rule_
)
3984 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3985 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3986 struct facet
*facet
, *next_facet
;
3988 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
3989 facet_revalidate(ofproto
, facet
);
3992 complete_operation(rule
);
3996 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
3998 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3999 struct facet
*facet
;
4001 /* Start from historical data for 'rule' itself that are no longer tracked
4002 * in facets. This counts, for example, facets that have expired. */
4003 *packets
= rule
->packet_count
;
4004 *bytes
= rule
->byte_count
;
4006 /* Add any statistics that are tracked by facets. This includes
4007 * statistical data recently updated by ofproto_update_stats() as well as
4008 * stats for packets that were executed "by hand" via dpif_execute(). */
4009 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
4010 *packets
+= facet
->packet_count
;
4011 *bytes
+= facet
->byte_count
;
4016 rule_execute(struct rule
*rule_
, const struct flow
*flow
,
4017 struct ofpbuf
*packet
)
4019 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4020 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4021 struct action_xlate_ctx ctx
;
4022 struct ofpbuf
*odp_actions
;
4025 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, packet
);
4026 odp_actions
= xlate_actions(&ctx
, rule
->up
.actions
, rule
->up
.n_actions
);
4027 size
= packet
->size
;
4028 if (execute_odp_actions(ofproto
, flow
, odp_actions
->data
,
4029 odp_actions
->size
, packet
)) {
4030 rule
->used
= time_msec();
4031 rule
->packet_count
++;
4032 rule
->byte_count
+= size
;
4033 flow_push_stats(rule
, flow
, 1, size
, rule
->used
);
4035 ofpbuf_delete(odp_actions
);
4041 rule_modify_actions(struct rule
*rule_
)
4043 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4044 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4047 error
= validate_actions(rule
->up
.actions
, rule
->up
.n_actions
,
4048 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
4050 ofoperation_complete(rule
->up
.pending
, error
);
4054 complete_operation(rule
);
4057 /* Sends 'packet' out 'ofport'.
4058 * May modify 'packet'.
4059 * Returns 0 if successful, otherwise a positive errno value. */
4061 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
4063 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
4064 struct ofpbuf key
, odp_actions
;
4065 struct odputil_keybuf keybuf
;
4070 flow_extract((struct ofpbuf
*) packet
, 0, 0, 0, &flow
);
4071 odp_port
= vsp_realdev_to_vlandev(ofproto
, ofport
->odp_port
,
4073 if (odp_port
!= ofport
->odp_port
) {
4074 eth_pop_vlan(packet
);
4075 flow
.vlan_tci
= htons(0);
4078 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4079 odp_flow_key_from_flow(&key
, &flow
);
4081 ofpbuf_init(&odp_actions
, 32);
4082 compose_sflow_action(ofproto
, &odp_actions
, &flow
, odp_port
);
4084 nl_msg_put_u32(&odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
4085 error
= dpif_execute(ofproto
->dpif
,
4087 odp_actions
.data
, odp_actions
.size
,
4089 ofpbuf_uninit(&odp_actions
);
4092 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %"PRIu32
" (%s)",
4093 ofproto
->up
.name
, odp_port
, strerror(error
));
4095 ofproto_update_local_port_stats(ofport
->up
.ofproto
, packet
->size
, 0);
4099 /* OpenFlow to datapath action translation. */
4101 static void do_xlate_actions(const union ofp_action
*in
, size_t n_in
,
4102 struct action_xlate_ctx
*ctx
);
4103 static void xlate_normal(struct action_xlate_ctx
*);
4106 put_userspace_action(const struct ofproto_dpif
*ofproto
,
4107 struct ofpbuf
*odp_actions
,
4108 const struct flow
*flow
,
4109 const struct user_action_cookie
*cookie
)
4113 pid
= dpif_port_get_pid(ofproto
->dpif
,
4114 ofp_port_to_odp_port(flow
->in_port
));
4116 return odp_put_userspace_action(pid
, cookie
, odp_actions
);
4119 /* Compose SAMPLE action for sFlow. */
4121 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
4122 struct ofpbuf
*odp_actions
,
4123 const struct flow
*flow
,
4126 uint32_t port_ifindex
;
4127 uint32_t probability
;
4128 struct user_action_cookie cookie
;
4129 size_t sample_offset
, actions_offset
;
4130 int cookie_offset
, n_output
;
4132 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
4136 if (odp_port
== OVSP_NONE
) {
4140 port_ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
4144 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
4146 /* Number of packets out of UINT_MAX to sample. */
4147 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
4148 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
4150 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
4152 cookie
.type
= USER_ACTION_COOKIE_SFLOW
;
4153 cookie
.data
= port_ifindex
;
4154 cookie
.n_output
= n_output
;
4155 cookie
.vlan_tci
= 0;
4156 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, &cookie
);
4158 nl_msg_end_nested(odp_actions
, actions_offset
);
4159 nl_msg_end_nested(odp_actions
, sample_offset
);
4160 return cookie_offset
;
4163 /* SAMPLE action must be first action in any given list of actions.
4164 * At this point we do not have all information required to build it. So try to
4165 * build sample action as complete as possible. */
4167 add_sflow_action(struct action_xlate_ctx
*ctx
)
4169 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
4171 &ctx
->flow
, OVSP_NONE
);
4172 ctx
->sflow_odp_port
= 0;
4173 ctx
->sflow_n_outputs
= 0;
4176 /* Fix SAMPLE action according to data collected while composing ODP actions.
4177 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
4178 * USERSPACE action's user-cookie which is required for sflow. */
4180 fix_sflow_action(struct action_xlate_ctx
*ctx
)
4182 const struct flow
*base
= &ctx
->base_flow
;
4183 struct user_action_cookie
*cookie
;
4185 if (!ctx
->user_cookie_offset
) {
4189 cookie
= ofpbuf_at(ctx
->odp_actions
, ctx
->user_cookie_offset
,
4191 assert(cookie
!= NULL
);
4192 assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
4194 if (ctx
->sflow_n_outputs
) {
4195 cookie
->data
= dpif_sflow_odp_port_to_ifindex(ctx
->ofproto
->sflow
,
4196 ctx
->sflow_odp_port
);
4198 if (ctx
->sflow_n_outputs
>= 255) {
4199 cookie
->n_output
= 255;
4201 cookie
->n_output
= ctx
->sflow_n_outputs
;
4203 cookie
->vlan_tci
= base
->vlan_tci
;
4207 compose_output_action__(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
,
4210 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
4211 uint16_t odp_port
= ofp_port_to_odp_port(ofp_port
);
4212 ovs_be16 flow_vlan_tci
= ctx
->flow
.vlan_tci
;
4213 uint8_t flow_nw_tos
= ctx
->flow
.nw_tos
;
4217 struct priority_to_dscp
*pdscp
;
4219 if (ofport
->up
.opp
.config
& htonl(OFPPC_NO_FWD
)
4220 || (check_stp
&& !stp_forward_in_state(ofport
->stp_state
))) {
4224 pdscp
= get_priority(ofport
, ctx
->flow
.skb_priority
);
4226 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
4227 ctx
->flow
.nw_tos
|= pdscp
->dscp
;
4230 /* We may not have an ofport record for this port, but it doesn't hurt
4231 * to allow forwarding to it anyhow. Maybe such a port will appear
4232 * later and we're pre-populating the flow table. */
4235 out_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, odp_port
,
4236 ctx
->flow
.vlan_tci
);
4237 if (out_port
!= odp_port
) {
4238 ctx
->flow
.vlan_tci
= htons(0);
4240 commit_odp_actions(&ctx
->flow
, &ctx
->base_flow
, ctx
->odp_actions
);
4241 nl_msg_put_u32(ctx
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
4243 ctx
->sflow_odp_port
= odp_port
;
4244 ctx
->sflow_n_outputs
++;
4245 ctx
->nf_output_iface
= ofp_port
;
4246 ctx
->flow
.vlan_tci
= flow_vlan_tci
;
4247 ctx
->flow
.nw_tos
= flow_nw_tos
;
4251 compose_output_action(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
)
4253 compose_output_action__(ctx
, ofp_port
, true);
4257 xlate_table_action(struct action_xlate_ctx
*ctx
,
4258 uint16_t in_port
, uint8_t table_id
)
4260 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
4261 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
4262 struct rule_dpif
*rule
;
4263 uint16_t old_in_port
;
4264 uint8_t old_table_id
;
4266 old_table_id
= ctx
->table_id
;
4267 ctx
->table_id
= table_id
;
4269 /* Look up a flow with 'in_port' as the input port. */
4270 old_in_port
= ctx
->flow
.in_port
;
4271 ctx
->flow
.in_port
= in_port
;
4272 rule
= rule_dpif_lookup(ofproto
, &ctx
->flow
, table_id
);
4275 if (table_id
> 0 && table_id
< N_TABLES
) {
4276 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
4277 if (table
->other_table
) {
4280 : rule_calculate_tag(&ctx
->flow
,
4281 &table
->other_table
->wc
,
4286 /* Restore the original input port. Otherwise OFPP_NORMAL and
4287 * OFPP_IN_PORT will have surprising behavior. */
4288 ctx
->flow
.in_port
= old_in_port
;
4290 if (ctx
->resubmit_hook
) {
4291 ctx
->resubmit_hook(ctx
, rule
);
4296 do_xlate_actions(rule
->up
.actions
, rule
->up
.n_actions
, ctx
);
4300 ctx
->table_id
= old_table_id
;
4302 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
4304 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
4305 MAX_RESUBMIT_RECURSION
);
4310 xlate_resubmit_table(struct action_xlate_ctx
*ctx
,
4311 const struct nx_action_resubmit
*nar
)
4316 in_port
= (nar
->in_port
== htons(OFPP_IN_PORT
)
4318 : ntohs(nar
->in_port
));
4319 table_id
= nar
->table
== 255 ? ctx
->table_id
: nar
->table
;
4321 xlate_table_action(ctx
, in_port
, table_id
);
4325 flood_packets(struct action_xlate_ctx
*ctx
, bool all
)
4327 struct ofport_dpif
*ofport
;
4329 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
4330 uint16_t ofp_port
= ofport
->up
.ofp_port
;
4332 if (ofp_port
== ctx
->flow
.in_port
) {
4337 compose_output_action__(ctx
, ofp_port
, false);
4338 } else if (!(ofport
->up
.opp
.config
& htonl(OFPPC_NO_FLOOD
))) {
4339 compose_output_action(ctx
, ofp_port
);
4343 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
4347 compose_controller_action(struct action_xlate_ctx
*ctx
, int len
)
4349 struct user_action_cookie cookie
;
4351 commit_odp_actions(&ctx
->flow
, &ctx
->base_flow
, ctx
->odp_actions
);
4352 cookie
.type
= USER_ACTION_COOKIE_CONTROLLER
;
4354 cookie
.n_output
= 0;
4355 cookie
.vlan_tci
= 0;
4356 put_userspace_action(ctx
->ofproto
, ctx
->odp_actions
, &ctx
->flow
, &cookie
);
4360 xlate_output_action__(struct action_xlate_ctx
*ctx
,
4361 uint16_t port
, uint16_t max_len
)
4363 uint16_t prev_nf_output_iface
= ctx
->nf_output_iface
;
4365 ctx
->nf_output_iface
= NF_OUT_DROP
;
4369 compose_output_action(ctx
, ctx
->flow
.in_port
);
4372 xlate_table_action(ctx
, ctx
->flow
.in_port
, ctx
->table_id
);
4378 flood_packets(ctx
, false);
4381 flood_packets(ctx
, true);
4383 case OFPP_CONTROLLER
:
4384 compose_controller_action(ctx
, max_len
);
4387 compose_output_action(ctx
, OFPP_LOCAL
);
4392 if (port
!= ctx
->flow
.in_port
) {
4393 compose_output_action(ctx
, port
);
4398 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
4399 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
4400 } else if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
4401 ctx
->nf_output_iface
= prev_nf_output_iface
;
4402 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
4403 ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
4404 ctx
->nf_output_iface
= NF_OUT_MULTI
;
4409 xlate_output_reg_action(struct action_xlate_ctx
*ctx
,
4410 const struct nx_action_output_reg
*naor
)
4414 ofp_port
= nxm_read_field_bits(naor
->src
, naor
->ofs_nbits
, &ctx
->flow
);
4416 if (ofp_port
<= UINT16_MAX
) {
4417 xlate_output_action__(ctx
, ofp_port
, ntohs(naor
->max_len
));
4422 xlate_output_action(struct action_xlate_ctx
*ctx
,
4423 const struct ofp_action_output
*oao
)
4425 xlate_output_action__(ctx
, ntohs(oao
->port
), ntohs(oao
->max_len
));
4429 xlate_enqueue_action(struct action_xlate_ctx
*ctx
,
4430 const struct ofp_action_enqueue
*oae
)
4433 uint32_t flow_priority
, priority
;
4436 error
= dpif_queue_to_priority(ctx
->ofproto
->dpif
, ntohl(oae
->queue_id
),
4439 /* Fall back to ordinary output action. */
4440 xlate_output_action__(ctx
, ntohs(oae
->port
), 0);
4444 /* Figure out datapath output port. */
4445 ofp_port
= ntohs(oae
->port
);
4446 if (ofp_port
== OFPP_IN_PORT
) {
4447 ofp_port
= ctx
->flow
.in_port
;
4448 } else if (ofp_port
== ctx
->flow
.in_port
) {
4452 /* Add datapath actions. */
4453 flow_priority
= ctx
->flow
.skb_priority
;
4454 ctx
->flow
.skb_priority
= priority
;
4455 compose_output_action(ctx
, ofp_port
);
4456 ctx
->flow
.skb_priority
= flow_priority
;
4458 /* Update NetFlow output port. */
4459 if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
4460 ctx
->nf_output_iface
= ofp_port
;
4461 } else if (ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
4462 ctx
->nf_output_iface
= NF_OUT_MULTI
;
4467 xlate_set_queue_action(struct action_xlate_ctx
*ctx
,
4468 const struct nx_action_set_queue
*nasq
)
4473 error
= dpif_queue_to_priority(ctx
->ofproto
->dpif
, ntohl(nasq
->queue_id
),
4476 /* Couldn't translate queue to a priority, so ignore. A warning
4477 * has already been logged. */
4481 ctx
->flow
.skb_priority
= priority
;
4484 struct xlate_reg_state
{
4490 xlate_autopath(struct action_xlate_ctx
*ctx
,
4491 const struct nx_action_autopath
*naa
)
4493 uint16_t ofp_port
= ntohl(naa
->id
);
4494 struct ofport_dpif
*port
= get_ofp_port(ctx
->ofproto
, ofp_port
);
4496 if (!port
|| !port
->bundle
) {
4497 ofp_port
= OFPP_NONE
;
4498 } else if (port
->bundle
->bond
) {
4499 /* Autopath does not support VLAN hashing. */
4500 struct ofport_dpif
*slave
= bond_choose_output_slave(
4501 port
->bundle
->bond
, &ctx
->flow
, 0, &ctx
->tags
);
4503 ofp_port
= slave
->up
.ofp_port
;
4506 autopath_execute(naa
, &ctx
->flow
, ofp_port
);
4510 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
4512 struct ofproto_dpif
*ofproto
= ofproto_
;
4513 struct ofport_dpif
*port
;
4523 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
4526 port
= get_ofp_port(ofproto
, ofp_port
);
4527 return port
? port
->may_enable
: false;
4532 xlate_learn_action(struct action_xlate_ctx
*ctx
,
4533 const struct nx_action_learn
*learn
)
4535 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
4536 struct ofputil_flow_mod fm
;
4539 learn_execute(learn
, &ctx
->flow
, &fm
);
4541 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
4542 if (error
&& !VLOG_DROP_WARN(&rl
)) {
4543 char *msg
= ofputil_error_to_string(error
);
4544 VLOG_WARN("learning action failed to modify flow table (%s)", msg
);
4552 may_receive(const struct ofport_dpif
*port
, struct action_xlate_ctx
*ctx
)
4554 if (port
->up
.opp
.config
& (eth_addr_equals(ctx
->flow
.dl_dst
, eth_addr_stp
)
4555 ? htonl(OFPPC_NO_RECV_STP
)
4556 : htonl(OFPPC_NO_RECV
))) {
4560 /* Only drop packets here if both forwarding and learning are
4561 * disabled. If just learning is enabled, we need to have
4562 * OFPP_NORMAL and the learning action have a look at the packet
4563 * before we can drop it. */
4564 if (!stp_forward_in_state(port
->stp_state
)
4565 && !stp_learn_in_state(port
->stp_state
)) {
4573 do_xlate_actions(const union ofp_action
*in
, size_t n_in
,
4574 struct action_xlate_ctx
*ctx
)
4576 const struct ofport_dpif
*port
;
4577 const union ofp_action
*ia
;
4580 port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
4581 if (port
&& !may_receive(port
, ctx
)) {
4582 /* Drop this flow. */
4586 OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia
, left
, in
, n_in
) {
4587 const struct ofp_action_dl_addr
*oada
;
4588 const struct nx_action_resubmit
*nar
;
4589 const struct nx_action_set_tunnel
*nast
;
4590 const struct nx_action_set_queue
*nasq
;
4591 const struct nx_action_multipath
*nam
;
4592 const struct nx_action_autopath
*naa
;
4593 const struct nx_action_bundle
*nab
;
4594 const struct nx_action_output_reg
*naor
;
4595 enum ofputil_action_code code
;
4602 code
= ofputil_decode_action_unsafe(ia
);
4604 case OFPUTIL_OFPAT_OUTPUT
:
4605 xlate_output_action(ctx
, &ia
->output
);
4608 case OFPUTIL_OFPAT_SET_VLAN_VID
:
4609 ctx
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
4610 ctx
->flow
.vlan_tci
|= ia
->vlan_vid
.vlan_vid
| htons(VLAN_CFI
);
4613 case OFPUTIL_OFPAT_SET_VLAN_PCP
:
4614 ctx
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
4615 ctx
->flow
.vlan_tci
|= htons(
4616 (ia
->vlan_pcp
.vlan_pcp
<< VLAN_PCP_SHIFT
) | VLAN_CFI
);
4619 case OFPUTIL_OFPAT_STRIP_VLAN
:
4620 ctx
->flow
.vlan_tci
= htons(0);
4623 case OFPUTIL_OFPAT_SET_DL_SRC
:
4624 oada
= ((struct ofp_action_dl_addr
*) ia
);
4625 memcpy(ctx
->flow
.dl_src
, oada
->dl_addr
, ETH_ADDR_LEN
);
4628 case OFPUTIL_OFPAT_SET_DL_DST
:
4629 oada
= ((struct ofp_action_dl_addr
*) ia
);
4630 memcpy(ctx
->flow
.dl_dst
, oada
->dl_addr
, ETH_ADDR_LEN
);
4633 case OFPUTIL_OFPAT_SET_NW_SRC
:
4634 ctx
->flow
.nw_src
= ia
->nw_addr
.nw_addr
;
4637 case OFPUTIL_OFPAT_SET_NW_DST
:
4638 ctx
->flow
.nw_dst
= ia
->nw_addr
.nw_addr
;
4641 case OFPUTIL_OFPAT_SET_NW_TOS
:
4642 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
4643 ctx
->flow
.nw_tos
|= ia
->nw_tos
.nw_tos
& IP_DSCP_MASK
;
4646 case OFPUTIL_OFPAT_SET_TP_SRC
:
4647 ctx
->flow
.tp_src
= ia
->tp_port
.tp_port
;
4650 case OFPUTIL_OFPAT_SET_TP_DST
:
4651 ctx
->flow
.tp_dst
= ia
->tp_port
.tp_port
;
4654 case OFPUTIL_OFPAT_ENQUEUE
:
4655 xlate_enqueue_action(ctx
, (const struct ofp_action_enqueue
*) ia
);
4658 case OFPUTIL_NXAST_RESUBMIT
:
4659 nar
= (const struct nx_action_resubmit
*) ia
;
4660 xlate_table_action(ctx
, ntohs(nar
->in_port
), ctx
->table_id
);
4663 case OFPUTIL_NXAST_RESUBMIT_TABLE
:
4664 xlate_resubmit_table(ctx
, (const struct nx_action_resubmit
*) ia
);
4667 case OFPUTIL_NXAST_SET_TUNNEL
:
4668 nast
= (const struct nx_action_set_tunnel
*) ia
;
4669 tun_id
= htonll(ntohl(nast
->tun_id
));
4670 ctx
->flow
.tun_id
= tun_id
;
4673 case OFPUTIL_NXAST_SET_QUEUE
:
4674 nasq
= (const struct nx_action_set_queue
*) ia
;
4675 xlate_set_queue_action(ctx
, nasq
);
4678 case OFPUTIL_NXAST_POP_QUEUE
:
4679 ctx
->flow
.skb_priority
= ctx
->orig_skb_priority
;
4682 case OFPUTIL_NXAST_REG_MOVE
:
4683 nxm_execute_reg_move((const struct nx_action_reg_move
*) ia
,
4687 case OFPUTIL_NXAST_REG_LOAD
:
4688 nxm_execute_reg_load((const struct nx_action_reg_load
*) ia
,
4692 case OFPUTIL_NXAST_NOTE
:
4693 /* Nothing to do. */
4696 case OFPUTIL_NXAST_SET_TUNNEL64
:
4697 tun_id
= ((const struct nx_action_set_tunnel64
*) ia
)->tun_id
;
4698 ctx
->flow
.tun_id
= tun_id
;
4701 case OFPUTIL_NXAST_MULTIPATH
:
4702 nam
= (const struct nx_action_multipath
*) ia
;
4703 multipath_execute(nam
, &ctx
->flow
);
4706 case OFPUTIL_NXAST_AUTOPATH
:
4707 naa
= (const struct nx_action_autopath
*) ia
;
4708 xlate_autopath(ctx
, naa
);
4711 case OFPUTIL_NXAST_BUNDLE
:
4712 ctx
->ofproto
->has_bundle_action
= true;
4713 nab
= (const struct nx_action_bundle
*) ia
;
4714 xlate_output_action__(ctx
, bundle_execute(nab
, &ctx
->flow
,
4719 case OFPUTIL_NXAST_BUNDLE_LOAD
:
4720 ctx
->ofproto
->has_bundle_action
= true;
4721 nab
= (const struct nx_action_bundle
*) ia
;
4722 bundle_execute_load(nab
, &ctx
->flow
, slave_enabled_cb
,
4726 case OFPUTIL_NXAST_OUTPUT_REG
:
4727 naor
= (const struct nx_action_output_reg
*) ia
;
4728 xlate_output_reg_action(ctx
, naor
);
4731 case OFPUTIL_NXAST_LEARN
:
4732 ctx
->has_learn
= true;
4733 if (ctx
->may_learn
) {
4734 xlate_learn_action(ctx
, (const struct nx_action_learn
*) ia
);
4738 case OFPUTIL_NXAST_EXIT
:
4744 /* We've let OFPP_NORMAL and the learning action look at the packet,
4745 * so drop it now if forwarding is disabled. */
4746 if (port
&& !stp_forward_in_state(port
->stp_state
)) {
4747 ofpbuf_clear(ctx
->odp_actions
);
4748 add_sflow_action(ctx
);
4753 action_xlate_ctx_init(struct action_xlate_ctx
*ctx
,
4754 struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4755 ovs_be16 initial_tci
, const struct ofpbuf
*packet
)
4757 ctx
->ofproto
= ofproto
;
4759 ctx
->base_flow
= ctx
->flow
;
4760 ctx
->base_flow
.tun_id
= 0;
4761 ctx
->base_flow
.vlan_tci
= initial_tci
;
4762 ctx
->packet
= packet
;
4763 ctx
->may_learn
= packet
!= NULL
;
4764 ctx
->resubmit_hook
= NULL
;
4767 static struct ofpbuf
*
4768 xlate_actions(struct action_xlate_ctx
*ctx
,
4769 const union ofp_action
*in
, size_t n_in
)
4771 struct flow orig_flow
= ctx
->flow
;
4773 COVERAGE_INC(ofproto_dpif_xlate
);
4775 ctx
->odp_actions
= ofpbuf_new(512);
4776 ofpbuf_reserve(ctx
->odp_actions
, NL_A_U32_SIZE
);
4778 ctx
->may_set_up_flow
= true;
4779 ctx
->has_learn
= false;
4780 ctx
->has_normal
= false;
4781 ctx
->nf_output_iface
= NF_OUT_DROP
;
4784 ctx
->orig_skb_priority
= ctx
->flow
.skb_priority
;
4788 if (ctx
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
4789 switch (ctx
->ofproto
->up
.frag_handling
) {
4790 case OFPC_FRAG_NORMAL
:
4791 /* We must pretend that transport ports are unavailable. */
4792 ctx
->flow
.tp_src
= ctx
->base_flow
.tp_src
= htons(0);
4793 ctx
->flow
.tp_dst
= ctx
->base_flow
.tp_dst
= htons(0);
4796 case OFPC_FRAG_DROP
:
4797 return ctx
->odp_actions
;
4799 case OFPC_FRAG_REASM
:
4802 case OFPC_FRAG_NX_MATCH
:
4803 /* Nothing to do. */
4808 if (process_special(ctx
->ofproto
, &ctx
->flow
, ctx
->packet
)) {
4809 ctx
->may_set_up_flow
= false;
4810 return ctx
->odp_actions
;
4812 add_sflow_action(ctx
);
4813 do_xlate_actions(in
, n_in
, ctx
);
4815 if (!connmgr_may_set_up_flow(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
4816 ctx
->odp_actions
->data
,
4817 ctx
->odp_actions
->size
)) {
4818 ctx
->may_set_up_flow
= false;
4820 && connmgr_msg_in_hook(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
4822 compose_output_action(ctx
, OFPP_LOCAL
);
4825 add_mirror_actions(ctx
, &orig_flow
);
4826 fix_sflow_action(ctx
);
4829 return ctx
->odp_actions
;
4832 /* OFPP_NORMAL implementation. */
4834 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
4836 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
4837 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
4838 * the bundle on which the packet was received, returns the VLAN to which the
4841 * Both 'vid' and the return value are in the range 0...4095. */
4843 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
4845 switch (in_bundle
->vlan_mode
) {
4846 case PORT_VLAN_ACCESS
:
4847 return in_bundle
->vlan
;
4850 case PORT_VLAN_TRUNK
:
4853 case PORT_VLAN_NATIVE_UNTAGGED
:
4854 case PORT_VLAN_NATIVE_TAGGED
:
4855 return vid
? vid
: in_bundle
->vlan
;
4862 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
4863 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
4866 * 'vid' should be the VID obtained from the 802.1Q header that was received as
4867 * part of a packet (specify 0 if there was no 802.1Q header), in the range
4870 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
4872 /* Allow any VID on the OFPP_NONE port. */
4873 if (in_bundle
== &ofpp_none_bundle
) {
4877 switch (in_bundle
->vlan_mode
) {
4878 case PORT_VLAN_ACCESS
:
4881 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4882 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
4883 "packet received on port %s configured as VLAN "
4884 "%"PRIu16
" access port",
4885 in_bundle
->ofproto
->up
.name
, vid
,
4886 in_bundle
->name
, in_bundle
->vlan
);
4892 case PORT_VLAN_NATIVE_UNTAGGED
:
4893 case PORT_VLAN_NATIVE_TAGGED
:
4895 /* Port must always carry its native VLAN. */
4899 case PORT_VLAN_TRUNK
:
4900 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
4902 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4903 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
4904 "received on port %s not configured for trunking "
4906 in_bundle
->ofproto
->up
.name
, vid
,
4907 in_bundle
->name
, vid
);
4919 /* Given 'vlan', the VLAN that a packet belongs to, and
4920 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
4921 * that should be included in the 802.1Q header. (If the return value is 0,
4922 * then the 802.1Q header should only be included in the packet if there is a
4925 * Both 'vlan' and the return value are in the range 0...4095. */
4927 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
4929 switch (out_bundle
->vlan_mode
) {
4930 case PORT_VLAN_ACCESS
:
4933 case PORT_VLAN_TRUNK
:
4934 case PORT_VLAN_NATIVE_TAGGED
:
4937 case PORT_VLAN_NATIVE_UNTAGGED
:
4938 return vlan
== out_bundle
->vlan
? 0 : vlan
;
4946 output_normal(struct action_xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
4949 struct ofport_dpif
*port
;
4951 ovs_be16 tci
, old_tci
;
4953 vid
= output_vlan_to_vid(out_bundle
, vlan
);
4954 if (!out_bundle
->bond
) {
4955 port
= ofbundle_get_a_port(out_bundle
);
4957 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->flow
,
4960 /* No slaves enabled, so drop packet. */
4965 old_tci
= ctx
->flow
.vlan_tci
;
4967 if (tci
|| out_bundle
->use_priority_tags
) {
4968 tci
|= ctx
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
4970 tci
|= htons(VLAN_CFI
);
4973 ctx
->flow
.vlan_tci
= tci
;
4975 compose_output_action(ctx
, port
->up
.ofp_port
);
4976 ctx
->flow
.vlan_tci
= old_tci
;
4980 mirror_mask_ffs(mirror_mask_t mask
)
4982 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
4987 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
4989 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
4990 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
4994 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
4996 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
4999 /* Returns an arbitrary interface within 'bundle'. */
5000 static struct ofport_dpif
*
5001 ofbundle_get_a_port(const struct ofbundle
*bundle
)
5003 return CONTAINER_OF(list_front(&bundle
->ports
),
5004 struct ofport_dpif
, bundle_node
);
5008 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
5010 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
5013 /* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
5014 * to a VLAN. In general most packets may be mirrored but we want to drop
5015 * protocols that may confuse switches. */
5017 eth_dst_may_rspan(const uint8_t dst
[ETH_ADDR_LEN
])
5019 /* If you change this function's behavior, please update corresponding
5020 * documentation in vswitch.xml at the same time. */
5021 if (dst
[0] != 0x01) {
5022 /* All the currently banned MACs happen to start with 01 currently, so
5023 * this is a quick way to eliminate most of the good ones. */
5025 if (eth_addr_is_reserved(dst
)) {
5026 /* Drop STP, IEEE pause frames, and other reserved protocols
5027 * (01-80-c2-00-00-0x). */
5031 if (dst
[0] == 0x01 && dst
[1] == 0x00 && dst
[2] == 0x0c) {
5033 if ((dst
[3] & 0xfe) == 0xcc &&
5034 (dst
[4] & 0xfe) == 0xcc &&
5035 (dst
[5] & 0xfe) == 0xcc) {
5036 /* Drop the following protocols plus others following the same
5039 CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc)
5040 Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd)
5041 STP Uplink Fast (01-00-0c-cd-cd-cd) */
5045 if (!(dst
[3] | dst
[4] | dst
[5])) {
5046 /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */
5055 add_mirror_actions(struct action_xlate_ctx
*ctx
, const struct flow
*orig_flow
)
5057 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
5058 mirror_mask_t mirrors
;
5059 struct ofbundle
*in_bundle
;
5062 const struct nlattr
*a
;
5065 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
5066 ctx
->packet
!= NULL
);
5070 mirrors
= in_bundle
->src_mirrors
;
5072 /* Drop frames on bundles reserved for mirroring. */
5073 if (in_bundle
->mirror_out
) {
5074 if (ctx
->packet
!= NULL
) {
5075 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5076 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
5077 "%s, which is reserved exclusively for mirroring",
5078 ctx
->ofproto
->up
.name
, in_bundle
->name
);
5084 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
5085 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
5088 vlan
= input_vid_to_vlan(in_bundle
, vid
);
5090 /* Look at the output ports to check for destination selections. */
5092 NL_ATTR_FOR_EACH (a
, left
, ctx
->odp_actions
->data
,
5093 ctx
->odp_actions
->size
) {
5094 enum ovs_action_attr type
= nl_attr_type(a
);
5095 struct ofport_dpif
*ofport
;
5097 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
5101 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
5102 if (ofport
&& ofport
->bundle
) {
5103 mirrors
|= ofport
->bundle
->dst_mirrors
;
5111 /* Restore the original packet before adding the mirror actions. */
5112 ctx
->flow
= *orig_flow
;
5117 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
5119 if (!vlan_is_mirrored(m
, vlan
)) {
5120 mirrors
&= mirrors
- 1;
5124 mirrors
&= ~m
->dup_mirrors
;
5125 ctx
->mirrors
|= m
->dup_mirrors
;
5127 output_normal(ctx
, m
->out
, vlan
);
5128 } else if (eth_dst_may_rspan(orig_flow
->dl_dst
)
5129 && vlan
!= m
->out_vlan
) {
5130 struct ofbundle
*bundle
;
5132 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
5133 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
5134 && !bundle
->mirror_out
) {
5135 output_normal(ctx
, bundle
, m
->out_vlan
);
5143 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
5144 uint64_t packets
, uint64_t bytes
)
5150 for (; mirrors
; mirrors
&= mirrors
- 1) {
5153 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
5156 /* In normal circumstances 'm' will not be NULL. However,
5157 * if mirrors are reconfigured, we can temporarily get out
5158 * of sync in facet_revalidate(). We could "correct" the
5159 * mirror list before reaching here, but doing that would
5160 * not properly account the traffic stats we've currently
5161 * accumulated for previous mirror configuration. */
5165 m
->packet_count
+= packets
;
5166 m
->byte_count
+= bytes
;
5170 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
5171 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
5172 * indicate this; newer upstream kernels use gratuitous ARP requests. */
5174 is_gratuitous_arp(const struct flow
*flow
)
5176 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
5177 && eth_addr_is_broadcast(flow
->dl_dst
)
5178 && (flow
->nw_proto
== ARP_OP_REPLY
5179 || (flow
->nw_proto
== ARP_OP_REQUEST
5180 && flow
->nw_src
== flow
->nw_dst
)));
5184 update_learning_table(struct ofproto_dpif
*ofproto
,
5185 const struct flow
*flow
, int vlan
,
5186 struct ofbundle
*in_bundle
)
5188 struct mac_entry
*mac
;
5190 /* Don't learn the OFPP_NONE port. */
5191 if (in_bundle
== &ofpp_none_bundle
) {
5195 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
5199 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
5200 if (is_gratuitous_arp(flow
)) {
5201 /* We don't want to learn from gratuitous ARP packets that are
5202 * reflected back over bond slaves so we lock the learning table. */
5203 if (!in_bundle
->bond
) {
5204 mac_entry_set_grat_arp_lock(mac
);
5205 } else if (mac_entry_is_grat_arp_locked(mac
)) {
5210 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
5211 /* The log messages here could actually be useful in debugging,
5212 * so keep the rate limit relatively high. */
5213 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
5214 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
5215 "on port %s in VLAN %d",
5216 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
5217 in_bundle
->name
, vlan
);
5219 mac
->port
.p
= in_bundle
;
5220 tag_set_add(&ofproto
->revalidate_set
,
5221 mac_learning_changed(ofproto
->ml
, mac
));
5225 static struct ofbundle
*
5226 lookup_input_bundle(struct ofproto_dpif
*ofproto
, uint16_t in_port
, bool warn
)
5228 struct ofport_dpif
*ofport
;
5230 /* Special-case OFPP_NONE, which a controller may use as the ingress
5231 * port for traffic that it is sourcing. */
5232 if (in_port
== OFPP_NONE
) {
5233 return &ofpp_none_bundle
;
5236 /* Find the port and bundle for the received packet. */
5237 ofport
= get_ofp_port(ofproto
, in_port
);
5238 if (ofport
&& ofport
->bundle
) {
5239 return ofport
->bundle
;
5242 /* Odd. A few possible reasons here:
5244 * - We deleted a port but there are still a few packets queued up
5247 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
5248 * we don't know about.
5250 * - The ofproto client didn't configure the port as part of a bundle.
5253 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5255 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
5256 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
5261 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
5262 * dropped. Returns true if they may be forwarded, false if they should be
5265 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
5266 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
5268 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
5269 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
5270 * checked by input_vid_is_valid().
5272 * May also add tags to '*tags', although the current implementation only does
5273 * so in one special case.
5276 is_admissible(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5277 struct ofport_dpif
*in_port
, uint16_t vlan
, tag_type
*tags
)
5279 struct ofbundle
*in_bundle
= in_port
->bundle
;
5281 /* Drop frames for reserved multicast addresses
5282 * only if forward_bpdu option is absent. */
5283 if (eth_addr_is_reserved(flow
->dl_dst
) && !ofproto
->up
.forward_bpdu
) {
5287 if (in_bundle
->bond
) {
5288 struct mac_entry
*mac
;
5290 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
5291 flow
->dl_dst
, tags
)) {
5298 case BV_DROP_IF_MOVED
:
5299 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
5300 if (mac
&& mac
->port
.p
!= in_bundle
&&
5301 (!is_gratuitous_arp(flow
)
5302 || mac_entry_is_grat_arp_locked(mac
))) {
5313 xlate_normal(struct action_xlate_ctx
*ctx
)
5315 struct ofport_dpif
*in_port
;
5316 struct ofbundle
*in_bundle
;
5317 struct mac_entry
*mac
;
5321 ctx
->has_normal
= true;
5323 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->flow
.in_port
,
5324 ctx
->packet
!= NULL
);
5329 /* We know 'in_port' exists unless it is "ofpp_none_bundle",
5330 * since lookup_input_bundle() succeeded. */
5331 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
5333 /* Drop malformed frames. */
5334 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
5335 !(ctx
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
5336 if (ctx
->packet
!= NULL
) {
5337 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5338 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
5339 "VLAN tag received on port %s",
5340 ctx
->ofproto
->up
.name
, in_bundle
->name
);
5345 /* Drop frames on bundles reserved for mirroring. */
5346 if (in_bundle
->mirror_out
) {
5347 if (ctx
->packet
!= NULL
) {
5348 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5349 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
5350 "%s, which is reserved exclusively for mirroring",
5351 ctx
->ofproto
->up
.name
, in_bundle
->name
);
5357 vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
5358 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
5361 vlan
= input_vid_to_vlan(in_bundle
, vid
);
5363 /* Check other admissibility requirements. */
5365 !is_admissible(ctx
->ofproto
, &ctx
->flow
, in_port
, vlan
, &ctx
->tags
)) {
5369 /* Learn source MAC. */
5370 if (ctx
->may_learn
) {
5371 update_learning_table(ctx
->ofproto
, &ctx
->flow
, vlan
, in_bundle
);
5374 /* Determine output bundle. */
5375 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->flow
.dl_dst
, vlan
,
5378 if (mac
->port
.p
!= in_bundle
) {
5379 output_normal(ctx
, mac
->port
.p
, vlan
);
5382 struct ofbundle
*bundle
;
5384 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
5385 if (bundle
!= in_bundle
5386 && ofbundle_includes_vlan(bundle
, vlan
)
5387 && bundle
->floodable
5388 && !bundle
->mirror_out
) {
5389 output_normal(ctx
, bundle
, vlan
);
5392 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
5396 /* Optimized flow revalidation.
5398 * It's a difficult problem, in general, to tell which facets need to have
5399 * their actions recalculated whenever the OpenFlow flow table changes. We
5400 * don't try to solve that general problem: for most kinds of OpenFlow flow
5401 * table changes, we recalculate the actions for every facet. This is
5402 * relatively expensive, but it's good enough if the OpenFlow flow table
5403 * doesn't change very often.
5405 * However, we can expect one particular kind of OpenFlow flow table change to
5406 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
5407 * of CPU on revalidating every facet whenever MAC learning modifies the flow
5408 * table, we add a special case that applies to flow tables in which every rule
5409 * has the same form (that is, the same wildcards), except that the table is
5410 * also allowed to have a single "catch-all" flow that matches all packets. We
5411 * optimize this case by tagging all of the facets that resubmit into the table
5412 * and invalidating the same tag whenever a flow changes in that table. The
5413 * end result is that we revalidate just the facets that need it (and sometimes
5414 * a few more, but not all of the facets or even all of the facets that
5415 * resubmit to the table modified by MAC learning). */
5417 /* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
5418 * into an OpenFlow table with the given 'basis'. */
5420 rule_calculate_tag(const struct flow
*flow
, const struct flow_wildcards
*wc
,
5423 if (flow_wildcards_is_catchall(wc
)) {
5426 struct flow tag_flow
= *flow
;
5427 flow_zero_wildcards(&tag_flow
, wc
);
5428 return tag_create_deterministic(flow_hash(&tag_flow
, secret
));
5432 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
5433 * taggability of that table.
5435 * This function must be called after *each* change to a flow table. If you
5436 * skip calling it on some changes then the pointer comparisons at the end can
5437 * be invalid if you get unlucky. For example, if a flow removal causes a
5438 * cls_table to be destroyed and then a flow insertion causes a cls_table with
5439 * different wildcards to be created with the same address, then this function
5440 * will incorrectly skip revalidation. */
5442 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
5444 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
5445 const struct classifier
*cls
= &ofproto
->up
.tables
[table_id
];
5446 struct cls_table
*catchall
, *other
;
5447 struct cls_table
*t
;
5449 catchall
= other
= NULL
;
5451 switch (hmap_count(&cls
->tables
)) {
5453 /* We could tag this OpenFlow table but it would make the logic a
5454 * little harder and it's a corner case that doesn't seem worth it
5460 HMAP_FOR_EACH (t
, hmap_node
, &cls
->tables
) {
5461 if (cls_table_is_catchall(t
)) {
5463 } else if (!other
) {
5466 /* Indicate that we can't tag this by setting both tables to
5467 * NULL. (We know that 'catchall' is already NULL.) */
5474 /* Can't tag this table. */
5478 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
5479 table
->catchall_table
= catchall
;
5480 table
->other_table
= other
;
5481 ofproto
->need_revalidate
= true;
5485 /* Given 'rule' that has changed in some way (either it is a rule being
5486 * inserted, a rule being deleted, or a rule whose actions are being
5487 * modified), marks facets for revalidation to ensure that packets will be
5488 * forwarded correctly according to the new state of the flow table.
5490 * This function must be called after *each* change to a flow table. See
5491 * the comment on table_update_taggable() for more information. */
5493 rule_invalidate(const struct rule_dpif
*rule
)
5495 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5497 table_update_taggable(ofproto
, rule
->up
.table_id
);
5499 if (!ofproto
->need_revalidate
) {
5500 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
5502 if (table
->other_table
&& rule
->tag
) {
5503 tag_set_add(&ofproto
->revalidate_set
, rule
->tag
);
5505 ofproto
->need_revalidate
= true;
5511 set_frag_handling(struct ofproto
*ofproto_
,
5512 enum ofp_config_flags frag_handling
)
5514 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5516 if (frag_handling
!= OFPC_FRAG_REASM
) {
5517 ofproto
->need_revalidate
= true;
5525 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
5526 const struct flow
*flow
,
5527 const union ofp_action
*ofp_actions
, size_t n_ofp_actions
)
5529 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5532 if (flow
->in_port
>= ofproto
->max_ports
&& flow
->in_port
< OFPP_MAX
) {
5533 return ofp_mkerr_nicira(OFPET_BAD_REQUEST
, NXBRC_BAD_IN_PORT
);
5536 error
= validate_actions(ofp_actions
, n_ofp_actions
, flow
,
5537 ofproto
->max_ports
);
5539 struct odputil_keybuf keybuf
;
5540 struct action_xlate_ctx ctx
;
5541 struct ofpbuf
*odp_actions
;
5544 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5545 odp_flow_key_from_flow(&key
, flow
);
5547 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, packet
);
5548 odp_actions
= xlate_actions(&ctx
, ofp_actions
, n_ofp_actions
);
5549 dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
5550 odp_actions
->data
, odp_actions
->size
, packet
);
5551 ofpbuf_delete(odp_actions
);
5559 set_netflow(struct ofproto
*ofproto_
,
5560 const struct netflow_options
*netflow_options
)
5562 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5564 if (netflow_options
) {
5565 if (!ofproto
->netflow
) {
5566 ofproto
->netflow
= netflow_create();
5568 return netflow_set_options(ofproto
->netflow
, netflow_options
);
5570 netflow_destroy(ofproto
->netflow
);
5571 ofproto
->netflow
= NULL
;
5577 get_netflow_ids(const struct ofproto
*ofproto_
,
5578 uint8_t *engine_type
, uint8_t *engine_id
)
5580 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5582 dpif_get_netflow_ids(ofproto
->dpif
, engine_type
, engine_id
);
5586 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
5588 if (!facet_is_controller_flow(facet
) &&
5589 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
5590 struct subfacet
*subfacet
;
5591 struct ofexpired expired
;
5593 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
5594 if (subfacet
->installed
) {
5595 struct dpif_flow_stats stats
;
5597 subfacet_install(ofproto
, subfacet
, subfacet
->actions
,
5598 subfacet
->actions_len
, &stats
);
5599 subfacet_update_stats(ofproto
, subfacet
, &stats
);
5603 expired
.flow
= facet
->flow
;
5604 expired
.packet_count
= facet
->packet_count
;
5605 expired
.byte_count
= facet
->byte_count
;
5606 expired
.used
= facet
->used
;
5607 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
5612 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
5614 struct facet
*facet
;
5616 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
5617 send_active_timeout(ofproto
, facet
);
5621 static struct ofproto_dpif
*
5622 ofproto_dpif_lookup(const char *name
)
5624 struct ofproto_dpif
*ofproto
;
5626 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
5627 hash_string(name
, 0), &all_ofproto_dpifs
) {
5628 if (!strcmp(ofproto
->up
.name
, name
)) {
5636 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
5637 const char *argv
[], void *aux OVS_UNUSED
)
5639 const struct ofproto_dpif
*ofproto
;
5641 ofproto
= ofproto_dpif_lookup(argv
[1]);
5643 unixctl_command_reply(conn
, 501, "no such bridge");
5646 mac_learning_flush(ofproto
->ml
);
5648 unixctl_command_reply(conn
, 200, "table successfully flushed");
5652 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
5653 const char *argv
[], void *aux OVS_UNUSED
)
5655 struct ds ds
= DS_EMPTY_INITIALIZER
;
5656 const struct ofproto_dpif
*ofproto
;
5657 const struct mac_entry
*e
;
5659 ofproto
= ofproto_dpif_lookup(argv
[1]);
5661 unixctl_command_reply(conn
, 501, "no such bridge");
5665 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
5666 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
5667 struct ofbundle
*bundle
= e
->port
.p
;
5668 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
5669 ofbundle_get_a_port(bundle
)->odp_port
,
5670 e
->vlan
, ETH_ADDR_ARGS(e
->mac
), mac_entry_age(e
));
5672 unixctl_command_reply(conn
, 200, ds_cstr(&ds
));
5676 struct ofproto_trace
{
5677 struct action_xlate_ctx ctx
;
5683 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
5684 const struct rule_dpif
*rule
)
5686 ds_put_char_multiple(result
, '\t', level
);
5688 ds_put_cstr(result
, "No match\n");
5692 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
5693 table_id
, ntohll(rule
->up
.flow_cookie
));
5694 cls_rule_format(&rule
->up
.cr
, result
);
5695 ds_put_char(result
, '\n');
5697 ds_put_char_multiple(result
, '\t', level
);
5698 ds_put_cstr(result
, "OpenFlow ");
5699 ofp_print_actions(result
, rule
->up
.actions
, rule
->up
.n_actions
);
5700 ds_put_char(result
, '\n');
5704 trace_format_flow(struct ds
*result
, int level
, const char *title
,
5705 struct ofproto_trace
*trace
)
5707 ds_put_char_multiple(result
, '\t', level
);
5708 ds_put_format(result
, "%s: ", title
);
5709 if (flow_equal(&trace
->ctx
.flow
, &trace
->flow
)) {
5710 ds_put_cstr(result
, "unchanged");
5712 flow_format(result
, &trace
->ctx
.flow
);
5713 trace
->flow
= trace
->ctx
.flow
;
5715 ds_put_char(result
, '\n');
5719 trace_format_regs(struct ds
*result
, int level
, const char *title
,
5720 struct ofproto_trace
*trace
)
5724 ds_put_char_multiple(result
, '\t', level
);
5725 ds_put_format(result
, "%s:", title
);
5726 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
5727 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
5729 ds_put_char(result
, '\n');
5733 trace_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
5735 struct ofproto_trace
*trace
= CONTAINER_OF(ctx
, struct ofproto_trace
, ctx
);
5736 struct ds
*result
= trace
->result
;
5738 ds_put_char(result
, '\n');
5739 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
5740 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
5741 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
5745 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
5746 void *aux OVS_UNUSED
)
5748 const char *dpname
= argv
[1];
5749 struct ofproto_dpif
*ofproto
;
5750 struct ofpbuf odp_key
;
5751 struct ofpbuf
*packet
;
5752 struct rule_dpif
*rule
;
5753 ovs_be16 initial_tci
;
5759 ofpbuf_init(&odp_key
, 0);
5762 ofproto
= ofproto_dpif_lookup(dpname
);
5764 unixctl_command_reply(conn
, 501, "Unknown ofproto (use ofproto/list "
5768 if (argc
== 3 || (argc
== 4 && !strcmp(argv
[3], "-generate"))) {
5769 /* ofproto/trace dpname flow [-generate] */
5770 const char *flow_s
= argv
[2];
5771 const char *generate_s
= argv
[3];
5774 /* Convert string to datapath key. */
5775 ofpbuf_init(&odp_key
, 0);
5776 error
= odp_flow_key_from_string(flow_s
, NULL
, &odp_key
);
5778 unixctl_command_reply(conn
, 501, "Bad flow syntax");
5782 /* Convert odp_key to flow. */
5783 error
= ofproto_dpif_extract_flow_key(ofproto
, odp_key
.data
,
5784 odp_key
.size
, &flow
,
5785 &initial_tci
, NULL
);
5786 if (error
== ODP_FIT_ERROR
) {
5787 unixctl_command_reply(conn
, 501, "Invalid flow");
5791 /* Generate a packet, if requested. */
5793 packet
= ofpbuf_new(0);
5794 flow_compose(packet
, &flow
);
5796 } else if (argc
== 6) {
5797 /* ofproto/trace dpname priority tun_id in_port packet */
5798 const char *priority_s
= argv
[2];
5799 const char *tun_id_s
= argv
[3];
5800 const char *in_port_s
= argv
[4];
5801 const char *packet_s
= argv
[5];
5802 uint16_t in_port
= ofp_port_to_odp_port(atoi(in_port_s
));
5803 ovs_be64 tun_id
= htonll(strtoull(tun_id_s
, NULL
, 0));
5804 uint32_t priority
= atoi(priority_s
);
5807 msg
= eth_from_hex(packet_s
, &packet
);
5809 unixctl_command_reply(conn
, 501, msg
);
5813 ds_put_cstr(&result
, "Packet: ");
5814 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
5815 ds_put_cstr(&result
, s
);
5818 flow_extract(packet
, priority
, tun_id
, in_port
, &flow
);
5819 initial_tci
= flow
.vlan_tci
;
5821 unixctl_command_reply(conn
, 501, "Bad command syntax");
5825 ds_put_cstr(&result
, "Flow: ");
5826 flow_format(&result
, &flow
);
5827 ds_put_char(&result
, '\n');
5829 rule
= rule_dpif_lookup(ofproto
, &flow
, 0);
5830 trace_format_rule(&result
, 0, 0, rule
);
5832 struct ofproto_trace trace
;
5833 struct ofpbuf
*odp_actions
;
5835 trace
.result
= &result
;
5837 action_xlate_ctx_init(&trace
.ctx
, ofproto
, &flow
, initial_tci
, packet
);
5838 trace
.ctx
.resubmit_hook
= trace_resubmit
;
5839 odp_actions
= xlate_actions(&trace
.ctx
,
5840 rule
->up
.actions
, rule
->up
.n_actions
);
5842 ds_put_char(&result
, '\n');
5843 trace_format_flow(&result
, 0, "Final flow", &trace
);
5844 ds_put_cstr(&result
, "Datapath actions: ");
5845 format_odp_actions(&result
, odp_actions
->data
, odp_actions
->size
);
5846 ofpbuf_delete(odp_actions
);
5848 if (!trace
.ctx
.may_set_up_flow
) {
5850 ds_put_cstr(&result
, "\nThis flow is not cachable.");
5852 ds_put_cstr(&result
, "\nThe datapath actions are incomplete--"
5853 "for complete actions, please supply a packet.");
5858 unixctl_command_reply(conn
, 200, ds_cstr(&result
));
5861 ds_destroy(&result
);
5862 ofpbuf_delete(packet
);
5863 ofpbuf_uninit(&odp_key
);
5867 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
5868 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
5871 unixctl_command_reply(conn
, 200, NULL
);
5875 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
5876 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
5879 unixctl_command_reply(conn
, 200, NULL
);
5883 ofproto_dpif_unixctl_init(void)
5885 static bool registered
;
5891 unixctl_command_register(
5893 "bridge {tun_id in_port packet | odp_flow [-generate]}",
5894 2, 4, ofproto_unixctl_trace
, NULL
);
5895 unixctl_command_register("fdb/flush", "bridge", 1, 1,
5896 ofproto_unixctl_fdb_flush
, NULL
);
5897 unixctl_command_register("fdb/show", "bridge", 1, 1,
5898 ofproto_unixctl_fdb_show
, NULL
);
5899 unixctl_command_register("ofproto/clog", "", 0, 0,
5900 ofproto_dpif_clog
, NULL
);
5901 unixctl_command_register("ofproto/unclog", "", 0, 0,
5902 ofproto_dpif_unclog
, NULL
);
5905 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
5907 * This is deprecated. It is only for compatibility with broken device drivers
5908 * in old versions of Linux that do not properly support VLANs when VLAN
5909 * devices are not used. When broken device drivers are no longer in
5910 * widespread use, we will delete these interfaces. */
5913 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
5915 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
5916 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
5918 if (realdev_ofp_port
== ofport
->realdev_ofp_port
5919 && vid
== ofport
->vlandev_vid
) {
5923 ofproto
->need_revalidate
= true;
5925 if (ofport
->realdev_ofp_port
) {
5928 if (realdev_ofp_port
&& ofport
->bundle
) {
5929 /* vlandevs are enslaved to their realdevs, so they are not allowed to
5930 * themselves be part of a bundle. */
5931 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
5934 ofport
->realdev_ofp_port
= realdev_ofp_port
;
5935 ofport
->vlandev_vid
= vid
;
5937 if (realdev_ofp_port
) {
5938 vsp_add(ofport
, realdev_ofp_port
, vid
);
5945 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
5947 return hash_2words(realdev_ofp_port
, vid
);
5951 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
5952 uint32_t realdev_odp_port
, ovs_be16 vlan_tci
)
5954 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
5955 uint16_t realdev_ofp_port
= odp_port_to_ofp_port(realdev_odp_port
);
5956 int vid
= vlan_tci_to_vid(vlan_tci
);
5957 const struct vlan_splinter
*vsp
;
5959 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
5960 hash_realdev_vid(realdev_ofp_port
, vid
),
5961 &ofproto
->realdev_vid_map
) {
5962 if (vsp
->realdev_ofp_port
== realdev_ofp_port
5963 && vsp
->vid
== vid
) {
5964 return ofp_port_to_odp_port(vsp
->vlandev_ofp_port
);
5968 return realdev_odp_port
;
5971 static struct vlan_splinter
*
5972 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
5974 struct vlan_splinter
*vsp
;
5976 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
5977 &ofproto
->vlandev_map
) {
5978 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
5987 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
5988 uint16_t vlandev_ofp_port
, int *vid
)
5990 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
5991 const struct vlan_splinter
*vsp
;
5993 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
5998 return vsp
->realdev_ofp_port
;
6005 vsp_remove(struct ofport_dpif
*port
)
6007 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
6008 struct vlan_splinter
*vsp
;
6010 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
6012 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
6013 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
6016 port
->realdev_ofp_port
= 0;
6018 VLOG_ERR("missing vlan device record");
6023 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
6025 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
6027 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
6028 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
6029 == realdev_ofp_port
)) {
6030 struct vlan_splinter
*vsp
;
6032 vsp
= xmalloc(sizeof *vsp
);
6033 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
6034 hash_int(port
->up
.ofp_port
, 0));
6035 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
6036 hash_realdev_vid(realdev_ofp_port
, vid
));
6037 vsp
->realdev_ofp_port
= realdev_ofp_port
;
6038 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
6041 port
->realdev_ofp_port
= realdev_ofp_port
;
6043 VLOG_ERR("duplicate vlan device record");
6047 const struct ofproto_class ofproto_dpif_class
= {
6076 port_is_lacp_current
,
6077 NULL
, /* rule_choose_table */
6084 rule_modify_actions
,
6092 get_cfm_remote_mpids
,
6096 get_stp_port_status
,
6103 is_mirror_output_bundle
,
6104 forward_bpdu_changed
,