2 * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "meta-flow.h"
38 #include "multipath.h"
45 #include "ofp-actions.h"
46 #include "ofp-parse.h"
47 #include "ofp-print.h"
48 #include "ofproto-dpif-governor.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "poll-loop.h"
53 #include "unaligned.h"
55 #include "vlan-bitmap.h"
58 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
60 COVERAGE_DEFINE(ofproto_dpif_expired
);
61 COVERAGE_DEFINE(ofproto_dpif_xlate
);
62 COVERAGE_DEFINE(facet_changed_rule
);
63 COVERAGE_DEFINE(facet_revalidate
);
64 COVERAGE_DEFINE(facet_unexpected
);
65 COVERAGE_DEFINE(facet_suppress
);
67 /* Maximum depth of flow table recursion (due to resubmit actions) in a
68 * flow translation. */
69 #define MAX_RESUBMIT_RECURSION 32
71 /* Number of implemented OpenFlow tables. */
72 enum { N_TABLES
= 255 };
73 enum { TBL_INTERNAL
= N_TABLES
- 1 }; /* Used for internal hidden rules. */
74 BUILD_ASSERT_DECL(N_TABLES
>= 2 && N_TABLES
<= 255);
84 * - Do include packets and bytes from facets that have been deleted or
85 * whose own statistics have been folded into the rule.
87 * - Do include packets and bytes sent "by hand" that were accounted to
88 * the rule without any facet being involved (this is a rare corner
89 * case in rule_execute()).
91 * - Do not include packet or bytes that can be obtained from any facet's
92 * packet_count or byte_count member or that can be obtained from the
93 * datapath by, e.g., dpif_flow_get() for any subfacet.
95 uint64_t packet_count
; /* Number of packets received. */
96 uint64_t byte_count
; /* Number of bytes received. */
98 tag_type tag
; /* Caches rule_calculate_tag() result. */
100 struct list facets
; /* List of "struct facet"s. */
103 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
105 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
108 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
109 const struct flow
*);
110 static struct rule_dpif
*rule_dpif_lookup__(struct ofproto_dpif
*,
114 static void rule_credit_stats(struct rule_dpif
*,
115 const struct dpif_flow_stats
*);
116 static void flow_push_stats(struct rule_dpif
*, const struct flow
*,
117 const struct dpif_flow_stats
*);
118 static tag_type
rule_calculate_tag(const struct flow
*,
119 const struct flow_wildcards
*,
121 static void rule_invalidate(const struct rule_dpif
*);
123 #define MAX_MIRRORS 32
124 typedef uint32_t mirror_mask_t
;
125 #define MIRROR_MASK_C(X) UINT32_C(X)
126 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
128 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
129 size_t idx
; /* In ofproto's "mirrors" array. */
130 void *aux
; /* Key supplied by ofproto's client. */
131 char *name
; /* Identifier for log messages. */
133 /* Selection criteria. */
134 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
135 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
136 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
138 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
139 struct ofbundle
*out
; /* Output port or NULL. */
140 int out_vlan
; /* Output VLAN or -1. */
141 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
144 int64_t packet_count
; /* Number of packets sent. */
145 int64_t byte_count
; /* Number of bytes sent. */
148 static void mirror_destroy(struct ofmirror
*);
149 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
150 mirror_mask_t mirrors
,
151 uint64_t packets
, uint64_t bytes
);
154 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
155 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
156 void *aux
; /* Key supplied by ofproto's client. */
157 char *name
; /* Identifier for log messages. */
160 struct list ports
; /* Contains "struct ofport"s. */
161 enum port_vlan_mode vlan_mode
; /* VLAN mode */
162 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
163 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
164 * NULL if all VLANs are trunked. */
165 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
166 struct bond
*bond
; /* Nonnull iff more than one port. */
167 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
170 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
172 /* Port mirroring info. */
173 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
174 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
175 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
178 static void bundle_remove(struct ofport
*);
179 static void bundle_update(struct ofbundle
*);
180 static void bundle_destroy(struct ofbundle
*);
181 static void bundle_del_port(struct ofport_dpif
*);
182 static void bundle_run(struct ofbundle
*);
183 static void bundle_wait(struct ofbundle
*);
184 static struct ofbundle
*lookup_input_bundle(const struct ofproto_dpif
*,
185 uint16_t in_port
, bool warn
,
186 struct ofport_dpif
**in_ofportp
);
188 /* A controller may use OFPP_NONE as the ingress port to indicate that
189 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
190 * when an input bundle is needed for validation (e.g., mirroring or
191 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
192 * any 'port' structs, so care must be taken when dealing with it. */
193 static struct ofbundle ofpp_none_bundle
= {
195 .vlan_mode
= PORT_VLAN_TRUNK
198 static void stp_run(struct ofproto_dpif
*ofproto
);
199 static void stp_wait(struct ofproto_dpif
*ofproto
);
200 static int set_stp_port(struct ofport
*,
201 const struct ofproto_port_stp_settings
*);
203 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
205 struct action_xlate_ctx
{
206 /* action_xlate_ctx_init() initializes these members. */
209 struct ofproto_dpif
*ofproto
;
211 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
212 * this flow when actions change header fields. */
215 /* The packet corresponding to 'flow', or a null pointer if we are
216 * revalidating without a packet to refer to. */
217 const struct ofpbuf
*packet
;
219 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
220 * actions update the flow table?
222 * We want to update these tables if we are actually processing a packet,
223 * or if we are accounting for packets that the datapath has processed, but
224 * not if we are just revalidating. */
227 /* The rule that we are currently translating, or NULL. */
228 struct rule_dpif
*rule
;
230 /* Union of the set of TCP flags seen so far in this flow. (Used only by
231 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
235 /* If nonnull, flow translation calls this function just before executing a
236 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
237 * when the recursion depth is exceeded.
239 * 'rule' is the rule being submitted into. It will be null if the
240 * resubmit or OFPP_TABLE action didn't find a matching rule.
242 * This is normally null so the client has to set it manually after
243 * calling action_xlate_ctx_init(). */
244 void (*resubmit_hook
)(struct action_xlate_ctx
*, struct rule_dpif
*rule
);
246 /* If nonnull, flow translation calls this function to report some
247 * significant decision, e.g. to explain why OFPP_NORMAL translation
248 * dropped a packet. */
249 void (*report_hook
)(struct action_xlate_ctx
*, const char *s
);
251 /* If nonnull, flow translation credits the specified statistics to each
252 * rule reached through a resubmit or OFPP_TABLE action.
254 * This is normally null so the client has to set it manually after
255 * calling action_xlate_ctx_init(). */
256 const struct dpif_flow_stats
*resubmit_stats
;
258 /* xlate_actions() initializes and uses these members. The client might want
259 * to look at them after it returns. */
261 struct ofpbuf
*odp_actions
; /* Datapath actions. */
262 tag_type tags
; /* Tags associated with actions. */
263 enum slow_path_reason slow
; /* 0 if fast path may be used. */
264 bool has_learn
; /* Actions include NXAST_LEARN? */
265 bool has_normal
; /* Actions output to OFPP_NORMAL? */
266 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
267 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
268 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
270 /* xlate_actions() initializes and uses these members, but the client has no
271 * reason to look at them. */
273 int recurse
; /* Recursion level, via xlate_table_action. */
274 bool max_resubmit_trigger
; /* Recursed too deeply during translation. */
275 struct flow base_flow
; /* Flow at the last commit. */
276 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
277 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
278 uint32_t sflow_n_outputs
; /* Number of output ports. */
279 uint16_t sflow_odp_port
; /* Output port for composing sFlow action. */
280 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
281 bool exit
; /* No further actions should be processed. */
282 struct flow orig_flow
; /* Copy of original flow. */
285 static void action_xlate_ctx_init(struct action_xlate_ctx
*,
286 struct ofproto_dpif
*, const struct flow
*,
287 ovs_be16 initial_tci
, struct rule_dpif
*,
288 uint8_t tcp_flags
, const struct ofpbuf
*);
289 static void xlate_actions(struct action_xlate_ctx
*,
290 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
291 struct ofpbuf
*odp_actions
);
292 static void xlate_actions_for_side_effects(struct action_xlate_ctx
*,
293 const struct ofpact
*ofpacts
,
296 static size_t put_userspace_action(const struct ofproto_dpif
*,
297 struct ofpbuf
*odp_actions
,
299 const union user_action_cookie
*);
301 static void compose_slow_path(const struct ofproto_dpif
*, const struct flow
*,
302 enum slow_path_reason
,
303 uint64_t *stub
, size_t stub_size
,
304 const struct nlattr
**actionsp
,
305 size_t *actions_lenp
);
307 static void xlate_report(struct action_xlate_ctx
*ctx
, const char *s
);
309 /* A subfacet (see "struct subfacet" below) has three possible installation
312 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
313 * case just after the subfacet is created, just before the subfacet is
314 * destroyed, or if the datapath returns an error when we try to install a
317 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
319 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
320 * ofproto_dpif is installed in the datapath.
323 SF_NOT_INSTALLED
, /* No datapath flow for this subfacet. */
324 SF_FAST_PATH
, /* Full actions are installed. */
325 SF_SLOW_PATH
, /* Send-to-userspace action is installed. */
328 static const char *subfacet_path_to_string(enum subfacet_path
);
330 /* A dpif flow and actions associated with a facet.
332 * See also the large comment on struct facet. */
335 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
336 struct list list_node
; /* In struct facet's 'facets' list. */
337 struct facet
*facet
; /* Owning facet. */
341 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
342 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
343 * regenerate the ODP flow key from ->facet->flow. */
344 enum odp_key_fitness key_fitness
;
348 long long int used
; /* Time last used; time created if not used. */
350 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
351 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
355 * These should be essentially identical for every subfacet in a facet, but
356 * may differ in trivial ways due to VLAN splinters. */
357 size_t actions_len
; /* Number of bytes in actions[]. */
358 struct nlattr
*actions
; /* Datapath actions. */
360 enum slow_path_reason slow
; /* 0 if fast path may be used. */
361 enum subfacet_path path
; /* Installed in datapath? */
363 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
364 * splinters can cause it to differ. This value should be removed when
365 * the VLAN splinters feature is no longer needed. */
366 ovs_be16 initial_tci
; /* Initial VLAN TCI value. */
369 static struct subfacet
*subfacet_create(struct facet
*, enum odp_key_fitness
,
370 const struct nlattr
*key
,
371 size_t key_len
, ovs_be16 initial_tci
);
372 static struct subfacet
*subfacet_find(struct ofproto_dpif
*,
373 const struct nlattr
*key
, size_t key_len
);
374 static void subfacet_destroy(struct subfacet
*);
375 static void subfacet_destroy__(struct subfacet
*);
376 static void subfacet_get_key(struct subfacet
*, struct odputil_keybuf
*,
378 static void subfacet_reset_dp_stats(struct subfacet
*,
379 struct dpif_flow_stats
*);
380 static void subfacet_update_time(struct subfacet
*, long long int used
);
381 static void subfacet_update_stats(struct subfacet
*,
382 const struct dpif_flow_stats
*);
383 static void subfacet_make_actions(struct subfacet
*,
384 const struct ofpbuf
*packet
,
385 struct ofpbuf
*odp_actions
);
386 static int subfacet_install(struct subfacet
*,
387 const struct nlattr
*actions
, size_t actions_len
,
388 struct dpif_flow_stats
*, enum slow_path_reason
);
389 static void subfacet_uninstall(struct subfacet
*);
391 static enum subfacet_path
subfacet_want_path(enum slow_path_reason
);
393 /* An exact-match instantiation of an OpenFlow flow.
395 * A facet associates a "struct flow", which represents the Open vSwitch
396 * userspace idea of an exact-match flow, with one or more subfacets. Each
397 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
398 * the facet. When the kernel module (or other dpif implementation) and Open
399 * vSwitch userspace agree on the definition of a flow key, there is exactly
400 * one subfacet per facet. If the dpif implementation supports more-specific
401 * flow matching than userspace, however, a facet can have more than one
402 * subfacet, each of which corresponds to some distinction in flow that
403 * userspace simply doesn't understand.
405 * Flow expiration works in terms of subfacets, so a facet must have at least
406 * one subfacet or it will never expire, leaking memory. */
409 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
410 struct list list_node
; /* In owning rule's 'facets' list. */
411 struct rule_dpif
*rule
; /* Owning rule. */
414 struct list subfacets
;
415 long long int used
; /* Time last used; time created if not used. */
422 * - Do include packets and bytes sent "by hand", e.g. with
425 * - Do include packets and bytes that were obtained from the datapath
426 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
427 * DPIF_FP_ZERO_STATS).
429 * - Do not include packets or bytes that can be obtained from the
430 * datapath for any existing subfacet.
432 uint64_t packet_count
; /* Number of packets received. */
433 uint64_t byte_count
; /* Number of bytes received. */
435 /* Resubmit statistics. */
436 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
437 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
438 long long int prev_used
; /* Used time from last stats push. */
441 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
442 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
443 uint8_t tcp_flags
; /* TCP flags seen for this 'rule'. */
445 /* Properties of datapath actions.
447 * Every subfacet has its own actions because actions can differ slightly
448 * between splintered and non-splintered subfacets due to the VLAN tag
449 * being initially different (present vs. absent). All of them have these
450 * properties in common so we just store one copy of them here. */
451 bool has_learn
; /* Actions include NXAST_LEARN? */
452 bool has_normal
; /* Actions output to OFPP_NORMAL? */
453 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
454 tag_type tags
; /* Tags that would require revalidation. */
455 mirror_mask_t mirrors
; /* Bitmap of dependent mirrors. */
457 /* Storage for a single subfacet, to reduce malloc() time and space
458 * overhead. (A facet always has at least one subfacet and in the common
459 * case has exactly one subfacet.) */
460 struct subfacet one_subfacet
;
463 static struct facet
*facet_create(struct rule_dpif
*,
464 const struct flow
*, uint32_t hash
);
465 static void facet_remove(struct facet
*);
466 static void facet_free(struct facet
*);
468 static struct facet
*facet_find(struct ofproto_dpif
*,
469 const struct flow
*, uint32_t hash
);
470 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
471 const struct flow
*, uint32_t hash
);
472 static void facet_revalidate(struct facet
*);
473 static bool facet_check_consistency(struct facet
*);
475 static void facet_flush_stats(struct facet
*);
477 static void facet_update_time(struct facet
*, long long int used
);
478 static void facet_reset_counters(struct facet
*);
479 static void facet_push_stats(struct facet
*);
480 static void facet_learn(struct facet
*);
481 static void facet_account(struct facet
*);
483 static bool facet_is_controller_flow(struct facet
*);
489 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
490 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
491 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
492 tag_type tag
; /* Tag associated with this port. */
493 uint32_t bond_stable_id
; /* stable_id to use as bond slave, or 0. */
494 bool may_enable
; /* May be enabled in bonds. */
495 long long int carrier_seq
; /* Carrier status changes. */
498 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
499 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
500 long long int stp_state_entered
;
502 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
504 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
506 * This is deprecated. It is only for compatibility with broken device
507 * drivers in old versions of Linux that do not properly support VLANs when
508 * VLAN devices are not used. When broken device drivers are no longer in
509 * widespread use, we will delete these interfaces. */
510 uint16_t realdev_ofp_port
;
514 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
515 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
516 * traffic egressing the 'ofport' with that priority should be marked with. */
517 struct priority_to_dscp
{
518 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
519 uint32_t priority
; /* Priority of this queue (see struct flow). */
521 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
524 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
526 * This is deprecated. It is only for compatibility with broken device drivers
527 * in old versions of Linux that do not properly support VLANs when VLAN
528 * devices are not used. When broken device drivers are no longer in
529 * widespread use, we will delete these interfaces. */
530 struct vlan_splinter
{
531 struct hmap_node realdev_vid_node
;
532 struct hmap_node vlandev_node
;
533 uint16_t realdev_ofp_port
;
534 uint16_t vlandev_ofp_port
;
538 static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
539 uint32_t realdev
, ovs_be16 vlan_tci
);
540 static bool vsp_adjust_flow(const struct ofproto_dpif
*, struct flow
*);
541 static void vsp_remove(struct ofport_dpif
*);
542 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
544 static struct ofport_dpif
*
545 ofport_dpif_cast(const struct ofport
*ofport
)
547 assert(ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
);
548 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
551 static void port_run(struct ofport_dpif
*);
552 static void port_run_fast(struct ofport_dpif
*);
553 static void port_wait(struct ofport_dpif
*);
554 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
555 static void ofport_clear_priorities(struct ofport_dpif
*);
557 struct dpif_completion
{
558 struct list list_node
;
559 struct ofoperation
*op
;
562 /* Extra information about a classifier table.
563 * Currently used just for optimized flow revalidation. */
565 /* If either of these is nonnull, then this table has a form that allows
566 * flows to be tagged to avoid revalidating most flows for the most common
567 * kinds of flow table changes. */
568 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
569 struct cls_table
*other_table
; /* Table with any other wildcard set. */
570 uint32_t basis
; /* Keeps each table's tags separate. */
573 /* Reasons that we might need to revalidate every facet, and corresponding
576 * A value of 0 means that there is no need to revalidate.
578 * It would be nice to have some cleaner way to integrate with coverage
579 * counters, but with only a few reasons I guess this is good enough for
581 enum revalidate_reason
{
582 REV_RECONFIGURE
= 1, /* Switch configuration changed. */
583 REV_STP
, /* Spanning tree protocol port status change. */
584 REV_PORT_TOGGLED
, /* Port enabled or disabled by CFM, LACP, ...*/
585 REV_FLOW_TABLE
, /* Flow table changed. */
586 REV_INCONSISTENCY
/* Facet self-check failed. */
588 COVERAGE_DEFINE(rev_reconfigure
);
589 COVERAGE_DEFINE(rev_stp
);
590 COVERAGE_DEFINE(rev_port_toggled
);
591 COVERAGE_DEFINE(rev_flow_table
);
592 COVERAGE_DEFINE(rev_inconsistency
);
594 struct ofproto_dpif
{
595 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
600 /* Special OpenFlow rules. */
601 struct rule_dpif
*miss_rule
; /* Sends flow table misses to controller. */
602 struct rule_dpif
*no_packet_in_rule
; /* Drops flow table misses. */
608 struct netflow
*netflow
;
609 struct dpif_sflow
*sflow
;
610 struct hmap bundles
; /* Contains "struct ofbundle"s. */
611 struct mac_learning
*ml
;
612 struct ofmirror
*mirrors
[MAX_MIRRORS
];
614 bool has_bonded_bundles
;
617 struct timer next_expiration
;
621 struct hmap subfacets
;
622 struct governor
*governor
;
625 struct table_dpif tables
[N_TABLES
];
626 enum revalidate_reason need_revalidate
;
627 struct tag_set revalidate_set
;
629 /* Support for debugging async flow mods. */
630 struct list completions
;
632 bool has_bundle_action
; /* True when the first bundle action appears. */
633 struct netdev_stats stats
; /* To account packets generated and consumed in
638 long long int stp_last_tick
;
640 /* VLAN splinters. */
641 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
642 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
645 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
646 * for debugging the asynchronous flow_mod implementation.) */
649 /* All existing ofproto_dpif instances, indexed by ->up.name. */
650 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
652 static void ofproto_dpif_unixctl_init(void);
654 static struct ofproto_dpif
*
655 ofproto_dpif_cast(const struct ofproto
*ofproto
)
657 assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
658 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
661 static struct ofport_dpif
*get_ofp_port(const struct ofproto_dpif
*,
663 static struct ofport_dpif
*get_odp_port(const struct ofproto_dpif
*,
665 static void ofproto_trace(struct ofproto_dpif
*, const struct flow
*,
666 const struct ofpbuf
*, ovs_be16 initial_tci
,
669 /* Packet processing. */
670 static void update_learning_table(struct ofproto_dpif
*,
671 const struct flow
*, int vlan
,
674 #define FLOW_MISS_MAX_BATCH 50
675 static int handle_upcalls(struct ofproto_dpif
*, unsigned int max_batch
);
677 /* Flow expiration. */
678 static int expire(struct ofproto_dpif
*);
681 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
684 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
685 static size_t compose_sflow_action(const struct ofproto_dpif
*,
686 struct ofpbuf
*odp_actions
,
687 const struct flow
*, uint32_t odp_port
);
688 static void add_mirror_actions(struct action_xlate_ctx
*ctx
,
689 const struct flow
*flow
);
690 /* Global variables. */
691 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
693 /* Factory functions. */
696 enumerate_types(struct sset
*types
)
698 dp_enumerate_types(types
);
702 enumerate_names(const char *type
, struct sset
*names
)
704 return dp_enumerate_names(type
, names
);
708 del(const char *type
, const char *name
)
713 error
= dpif_open(name
, type
, &dpif
);
715 error
= dpif_delete(dpif
);
721 /* Basic life-cycle. */
723 static int add_internal_flows(struct ofproto_dpif
*);
725 static struct ofproto
*
728 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
733 dealloc(struct ofproto
*ofproto_
)
735 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
740 construct(struct ofproto
*ofproto_
)
742 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
743 const char *name
= ofproto
->up
.name
;
747 error
= dpif_create_and_open(name
, ofproto
->up
.type
, &ofproto
->dpif
);
749 VLOG_ERR("failed to open datapath %s: %s", name
, strerror(error
));
753 ofproto
->max_ports
= dpif_get_max_ports(ofproto
->dpif
);
754 ofproto
->n_matches
= 0;
756 dpif_flow_flush(ofproto
->dpif
);
757 dpif_recv_purge(ofproto
->dpif
);
759 error
= dpif_recv_set(ofproto
->dpif
, true);
761 VLOG_ERR("failed to listen on datapath %s: %s", name
, strerror(error
));
762 dpif_close(ofproto
->dpif
);
766 ofproto
->netflow
= NULL
;
767 ofproto
->sflow
= NULL
;
769 hmap_init(&ofproto
->bundles
);
770 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
771 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
772 ofproto
->mirrors
[i
] = NULL
;
774 ofproto
->has_bonded_bundles
= false;
776 timer_set_duration(&ofproto
->next_expiration
, 1000);
778 hmap_init(&ofproto
->facets
);
779 hmap_init(&ofproto
->subfacets
);
780 ofproto
->governor
= NULL
;
782 for (i
= 0; i
< N_TABLES
; i
++) {
783 struct table_dpif
*table
= &ofproto
->tables
[i
];
785 table
->catchall_table
= NULL
;
786 table
->other_table
= NULL
;
787 table
->basis
= random_uint32();
789 ofproto
->need_revalidate
= 0;
790 tag_set_init(&ofproto
->revalidate_set
);
792 list_init(&ofproto
->completions
);
794 ofproto_dpif_unixctl_init();
796 ofproto
->has_mirrors
= false;
797 ofproto
->has_bundle_action
= false;
799 hmap_init(&ofproto
->vlandev_map
);
800 hmap_init(&ofproto
->realdev_vid_map
);
802 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
803 hash_string(ofproto
->up
.name
, 0));
804 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
806 ofproto_init_tables(ofproto_
, N_TABLES
);
807 error
= add_internal_flows(ofproto
);
808 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
814 add_internal_flow(struct ofproto_dpif
*ofproto
, int id
,
815 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
817 struct ofputil_flow_mod fm
;
820 cls_rule_init_catchall(&fm
.cr
, 0);
821 cls_rule_set_reg(&fm
.cr
, 0, id
);
822 fm
.new_cookie
= htonll(0);
823 fm
.cookie
= htonll(0);
824 fm
.cookie_mask
= htonll(0);
825 fm
.table_id
= TBL_INTERNAL
;
826 fm
.command
= OFPFC_ADD
;
832 fm
.ofpacts
= ofpacts
->data
;
833 fm
.ofpacts_len
= ofpacts
->size
;
835 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
837 VLOG_ERR_RL(&rl
, "failed to add internal flow %d (%s)",
838 id
, ofperr_to_string(error
));
842 *rulep
= rule_dpif_lookup__(ofproto
, &fm
.cr
.flow
, TBL_INTERNAL
);
843 assert(*rulep
!= NULL
);
849 add_internal_flows(struct ofproto_dpif
*ofproto
)
851 struct ofpact_controller
*controller
;
852 uint64_t ofpacts_stub
[128 / 8];
853 struct ofpbuf ofpacts
;
857 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
860 controller
= ofpact_put_CONTROLLER(&ofpacts
);
861 controller
->max_len
= UINT16_MAX
;
862 controller
->controller_id
= 0;
863 controller
->reason
= OFPR_NO_MATCH
;
864 ofpact_pad(&ofpacts
);
866 error
= add_internal_flow(ofproto
, id
++, &ofpacts
, &ofproto
->miss_rule
);
871 ofpbuf_clear(&ofpacts
);
872 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
873 &ofproto
->no_packet_in_rule
);
878 complete_operations(struct ofproto_dpif
*ofproto
)
880 struct dpif_completion
*c
, *next
;
882 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
883 ofoperation_complete(c
->op
, 0);
884 list_remove(&c
->list_node
);
890 destruct(struct ofproto
*ofproto_
)
892 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
893 struct rule_dpif
*rule
, *next_rule
;
894 struct oftable
*table
;
897 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
898 complete_operations(ofproto
);
900 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
901 struct cls_cursor cursor
;
903 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
904 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
905 ofproto_rule_destroy(&rule
->up
);
909 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
910 mirror_destroy(ofproto
->mirrors
[i
]);
913 netflow_destroy(ofproto
->netflow
);
914 dpif_sflow_destroy(ofproto
->sflow
);
915 hmap_destroy(&ofproto
->bundles
);
916 mac_learning_destroy(ofproto
->ml
);
918 hmap_destroy(&ofproto
->facets
);
919 hmap_destroy(&ofproto
->subfacets
);
920 governor_destroy(ofproto
->governor
);
922 hmap_destroy(&ofproto
->vlandev_map
);
923 hmap_destroy(&ofproto
->realdev_vid_map
);
925 dpif_close(ofproto
->dpif
);
929 run_fast(struct ofproto
*ofproto_
)
931 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
932 struct ofport_dpif
*ofport
;
935 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
936 port_run_fast(ofport
);
939 /* Handle one or more batches of upcalls, until there's nothing left to do
940 * or until we do a fixed total amount of work.
942 * We do work in batches because it can be much cheaper to set up a number
943 * of flows and fire off their patches all at once. We do multiple batches
944 * because in some cases handling a packet can cause another packet to be
945 * queued almost immediately as part of the return flow. Both
946 * optimizations can make major improvements on some benchmarks and
947 * presumably for real traffic as well. */
949 while (work
< FLOW_MISS_MAX_BATCH
) {
950 int retval
= handle_upcalls(ofproto
, FLOW_MISS_MAX_BATCH
- work
);
960 run(struct ofproto
*ofproto_
)
962 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
963 struct ofport_dpif
*ofport
;
964 struct ofbundle
*bundle
;
968 complete_operations(ofproto
);
970 dpif_run(ofproto
->dpif
);
972 error
= run_fast(ofproto_
);
977 if (timer_expired(&ofproto
->next_expiration
)) {
978 int delay
= expire(ofproto
);
979 timer_set_duration(&ofproto
->next_expiration
, delay
);
982 if (ofproto
->netflow
) {
983 if (netflow_run(ofproto
->netflow
)) {
984 send_netflow_active_timeouts(ofproto
);
987 if (ofproto
->sflow
) {
988 dpif_sflow_run(ofproto
->sflow
);
991 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
994 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
999 mac_learning_run(ofproto
->ml
, &ofproto
->revalidate_set
);
1001 /* Now revalidate if there's anything to do. */
1002 if (ofproto
->need_revalidate
1003 || !tag_set_is_empty(&ofproto
->revalidate_set
)) {
1004 struct tag_set revalidate_set
= ofproto
->revalidate_set
;
1005 bool revalidate_all
= ofproto
->need_revalidate
;
1006 struct facet
*facet
;
1008 switch (ofproto
->need_revalidate
) {
1009 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
1010 case REV_STP
: COVERAGE_INC(rev_stp
); break;
1011 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
1012 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
1013 case REV_INCONSISTENCY
: COVERAGE_INC(rev_inconsistency
); break;
1016 /* Clear the revalidation flags. */
1017 tag_set_init(&ofproto
->revalidate_set
);
1018 ofproto
->need_revalidate
= 0;
1020 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
1022 || tag_set_intersects(&revalidate_set
, facet
->tags
)) {
1023 facet_revalidate(facet
);
1028 /* Check the consistency of a random facet, to aid debugging. */
1029 if (!hmap_is_empty(&ofproto
->facets
) && !ofproto
->need_revalidate
) {
1030 struct facet
*facet
;
1032 facet
= CONTAINER_OF(hmap_random_node(&ofproto
->facets
),
1033 struct facet
, hmap_node
);
1034 if (!tag_set_intersects(&ofproto
->revalidate_set
, facet
->tags
)) {
1035 if (!facet_check_consistency(facet
)) {
1036 ofproto
->need_revalidate
= REV_INCONSISTENCY
;
1041 if (ofproto
->governor
) {
1044 governor_run(ofproto
->governor
);
1046 /* If the governor has shrunk to its minimum size and the number of
1047 * subfacets has dwindled, then drop the governor entirely.
1049 * For hysteresis, the number of subfacets to drop the governor is
1050 * smaller than the number needed to trigger its creation. */
1051 n_subfacets
= hmap_count(&ofproto
->subfacets
);
1052 if (n_subfacets
* 4 < ofproto
->up
.flow_eviction_threshold
1053 && governor_is_idle(ofproto
->governor
)) {
1054 governor_destroy(ofproto
->governor
);
1055 ofproto
->governor
= NULL
;
1063 wait(struct ofproto
*ofproto_
)
1065 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1066 struct ofport_dpif
*ofport
;
1067 struct ofbundle
*bundle
;
1069 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
1070 poll_immediate_wake();
1073 dpif_wait(ofproto
->dpif
);
1074 dpif_recv_wait(ofproto
->dpif
);
1075 if (ofproto
->sflow
) {
1076 dpif_sflow_wait(ofproto
->sflow
);
1078 if (!tag_set_is_empty(&ofproto
->revalidate_set
)) {
1079 poll_immediate_wake();
1081 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1084 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1085 bundle_wait(bundle
);
1087 if (ofproto
->netflow
) {
1088 netflow_wait(ofproto
->netflow
);
1090 mac_learning_wait(ofproto
->ml
);
1092 if (ofproto
->need_revalidate
) {
1093 /* Shouldn't happen, but if it does just go around again. */
1094 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
1095 poll_immediate_wake();
1097 timer_wait(&ofproto
->next_expiration
);
1099 if (ofproto
->governor
) {
1100 governor_wait(ofproto
->governor
);
1105 get_memory_usage(const struct ofproto
*ofproto_
, struct simap
*usage
)
1107 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1109 simap_increase(usage
, "facets", hmap_count(&ofproto
->facets
));
1110 simap_increase(usage
, "subfacets", hmap_count(&ofproto
->subfacets
));
1114 flush(struct ofproto
*ofproto_
)
1116 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1117 struct facet
*facet
, *next_facet
;
1119 HMAP_FOR_EACH_SAFE (facet
, next_facet
, hmap_node
, &ofproto
->facets
) {
1120 /* Mark the facet as not installed so that facet_remove() doesn't
1121 * bother trying to uninstall it. There is no point in uninstalling it
1122 * individually since we are about to blow away all the facets with
1123 * dpif_flow_flush(). */
1124 struct subfacet
*subfacet
;
1126 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
1127 subfacet
->path
= SF_NOT_INSTALLED
;
1128 subfacet
->dp_packet_count
= 0;
1129 subfacet
->dp_byte_count
= 0;
1131 facet_remove(facet
);
1133 dpif_flow_flush(ofproto
->dpif
);
1137 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
1138 bool *arp_match_ip
, enum ofputil_action_bitmap
*actions
)
1140 *arp_match_ip
= true;
1141 *actions
= (OFPUTIL_A_OUTPUT
|
1142 OFPUTIL_A_SET_VLAN_VID
|
1143 OFPUTIL_A_SET_VLAN_PCP
|
1144 OFPUTIL_A_STRIP_VLAN
|
1145 OFPUTIL_A_SET_DL_SRC
|
1146 OFPUTIL_A_SET_DL_DST
|
1147 OFPUTIL_A_SET_NW_SRC
|
1148 OFPUTIL_A_SET_NW_DST
|
1149 OFPUTIL_A_SET_NW_TOS
|
1150 OFPUTIL_A_SET_TP_SRC
|
1151 OFPUTIL_A_SET_TP_DST
|
1156 get_tables(struct ofproto
*ofproto_
, struct ofp10_table_stats
*ots
)
1158 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1159 struct dpif_dp_stats s
;
1161 strcpy(ots
->name
, "classifier");
1163 dpif_get_dp_stats(ofproto
->dpif
, &s
);
1164 put_32aligned_be64(&ots
->lookup_count
, htonll(s
.n_hit
+ s
.n_missed
));
1165 put_32aligned_be64(&ots
->matched_count
,
1166 htonll(s
.n_hit
+ ofproto
->n_matches
));
1169 static struct ofport
*
1172 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
1177 port_dealloc(struct ofport
*port_
)
1179 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1184 port_construct(struct ofport
*port_
)
1186 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1187 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1189 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1190 port
->odp_port
= ofp_port_to_odp_port(port
->up
.ofp_port
);
1191 port
->bundle
= NULL
;
1193 port
->tag
= tag_create_random();
1194 port
->may_enable
= true;
1195 port
->stp_port
= NULL
;
1196 port
->stp_state
= STP_DISABLED
;
1197 hmap_init(&port
->priorities
);
1198 port
->realdev_ofp_port
= 0;
1199 port
->vlandev_vid
= 0;
1200 port
->carrier_seq
= netdev_get_carrier_resets(port
->up
.netdev
);
1202 if (ofproto
->sflow
) {
1203 dpif_sflow_add_port(ofproto
->sflow
, port_
);
1210 port_destruct(struct ofport
*port_
)
1212 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1213 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1215 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1216 bundle_remove(port_
);
1217 set_cfm(port_
, NULL
);
1218 if (ofproto
->sflow
) {
1219 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1222 ofport_clear_priorities(port
);
1223 hmap_destroy(&port
->priorities
);
1227 port_modified(struct ofport
*port_
)
1229 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1231 if (port
->bundle
&& port
->bundle
->bond
) {
1232 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
1237 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1239 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1240 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1241 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1243 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1244 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1245 OFPUTIL_PC_NO_PACKET_IN
)) {
1246 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1248 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1249 bundle_update(port
->bundle
);
1255 set_sflow(struct ofproto
*ofproto_
,
1256 const struct ofproto_sflow_options
*sflow_options
)
1258 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1259 struct dpif_sflow
*ds
= ofproto
->sflow
;
1261 if (sflow_options
) {
1263 struct ofport_dpif
*ofport
;
1265 ds
= ofproto
->sflow
= dpif_sflow_create(ofproto
->dpif
);
1266 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1267 dpif_sflow_add_port(ds
, &ofport
->up
);
1269 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1271 dpif_sflow_set_options(ds
, sflow_options
);
1274 dpif_sflow_destroy(ds
);
1275 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1276 ofproto
->sflow
= NULL
;
1283 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1285 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1292 struct ofproto_dpif
*ofproto
;
1294 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1295 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1296 ofport
->cfm
= cfm_create(netdev_get_name(ofport
->up
.netdev
));
1299 if (cfm_configure(ofport
->cfm
, s
)) {
1305 cfm_destroy(ofport
->cfm
);
1311 get_cfm_fault(const struct ofport
*ofport_
)
1313 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1315 return ofport
->cfm
? cfm_get_fault(ofport
->cfm
) : -1;
1319 get_cfm_opup(const struct ofport
*ofport_
)
1321 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1323 return ofport
->cfm
? cfm_get_opup(ofport
->cfm
) : -1;
1327 get_cfm_remote_mpids(const struct ofport
*ofport_
, const uint64_t **rmps
,
1330 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1333 cfm_get_remote_mpids(ofport
->cfm
, rmps
, n_rmps
);
1341 get_cfm_health(const struct ofport
*ofport_
)
1343 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1345 return ofport
->cfm
? cfm_get_health(ofport
->cfm
) : -1;
1348 /* Spanning Tree. */
1351 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
1353 struct ofproto_dpif
*ofproto
= ofproto_
;
1354 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
1355 struct ofport_dpif
*ofport
;
1357 ofport
= stp_port_get_aux(sp
);
1359 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
1360 ofproto
->up
.name
, port_num
);
1362 struct eth_header
*eth
= pkt
->l2
;
1364 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
1365 if (eth_addr_is_zero(eth
->eth_src
)) {
1366 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
1367 "with unknown MAC", ofproto
->up
.name
, port_num
);
1369 send_packet(ofport
, pkt
);
1375 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
1377 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
1379 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1381 /* Only revalidate flows if the configuration changed. */
1382 if (!s
!= !ofproto
->stp
) {
1383 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1387 if (!ofproto
->stp
) {
1388 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
1389 send_bpdu_cb
, ofproto
);
1390 ofproto
->stp_last_tick
= time_msec();
1393 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
1394 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
1395 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
1396 stp_set_max_age(ofproto
->stp
, s
->max_age
);
1397 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
1399 struct ofport
*ofport
;
1401 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
1402 set_stp_port(ofport
, NULL
);
1405 stp_destroy(ofproto
->stp
);
1406 ofproto
->stp
= NULL
;
1413 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
1415 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1419 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
1420 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
1421 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
1430 update_stp_port_state(struct ofport_dpif
*ofport
)
1432 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1433 enum stp_state state
;
1435 /* Figure out new state. */
1436 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
1440 if (ofport
->stp_state
!= state
) {
1441 enum ofputil_port_state of_state
;
1444 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
1445 netdev_get_name(ofport
->up
.netdev
),
1446 stp_state_name(ofport
->stp_state
),
1447 stp_state_name(state
));
1448 if (stp_learn_in_state(ofport
->stp_state
)
1449 != stp_learn_in_state(state
)) {
1450 /* xxx Learning action flows should also be flushed. */
1451 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
1453 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
1454 != stp_forward_in_state(state
);
1456 ofproto
->need_revalidate
= REV_STP
;
1457 ofport
->stp_state
= state
;
1458 ofport
->stp_state_entered
= time_msec();
1460 if (fwd_change
&& ofport
->bundle
) {
1461 bundle_update(ofport
->bundle
);
1464 /* Update the STP state bits in the OpenFlow port description. */
1465 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
1466 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
1467 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
1468 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
1469 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
1471 ofproto_port_set_state(&ofport
->up
, of_state
);
1475 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1476 * caller is responsible for assigning STP port numbers and ensuring
1477 * there are no duplicates. */
1479 set_stp_port(struct ofport
*ofport_
,
1480 const struct ofproto_port_stp_settings
*s
)
1482 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1483 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1484 struct stp_port
*sp
= ofport
->stp_port
;
1486 if (!s
|| !s
->enable
) {
1488 ofport
->stp_port
= NULL
;
1489 stp_port_disable(sp
);
1490 update_stp_port_state(ofport
);
1493 } else if (sp
&& stp_port_no(sp
) != s
->port_num
1494 && ofport
== stp_port_get_aux(sp
)) {
1495 /* The port-id changed, so disable the old one if it's not
1496 * already in use by another port. */
1497 stp_port_disable(sp
);
1500 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
1501 stp_port_enable(sp
);
1503 stp_port_set_aux(sp
, ofport
);
1504 stp_port_set_priority(sp
, s
->priority
);
1505 stp_port_set_path_cost(sp
, s
->path_cost
);
1507 update_stp_port_state(ofport
);
1513 get_stp_port_status(struct ofport
*ofport_
,
1514 struct ofproto_port_stp_status
*s
)
1516 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1517 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1518 struct stp_port
*sp
= ofport
->stp_port
;
1520 if (!ofproto
->stp
|| !sp
) {
1526 s
->port_id
= stp_port_get_id(sp
);
1527 s
->state
= stp_port_get_state(sp
);
1528 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
1529 s
->role
= stp_port_get_role(sp
);
1530 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
1536 stp_run(struct ofproto_dpif
*ofproto
)
1539 long long int now
= time_msec();
1540 long long int elapsed
= now
- ofproto
->stp_last_tick
;
1541 struct stp_port
*sp
;
1544 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
1545 ofproto
->stp_last_tick
= now
;
1547 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
1548 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
1551 update_stp_port_state(ofport
);
1555 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
1556 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
1562 stp_wait(struct ofproto_dpif
*ofproto
)
1565 poll_timer_wait(1000);
1569 /* Returns true if STP should process 'flow'. */
1571 stp_should_process_flow(const struct flow
*flow
)
1573 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
1577 stp_process_packet(const struct ofport_dpif
*ofport
,
1578 const struct ofpbuf
*packet
)
1580 struct ofpbuf payload
= *packet
;
1581 struct eth_header
*eth
= payload
.data
;
1582 struct stp_port
*sp
= ofport
->stp_port
;
1584 /* Sink packets on ports that have STP disabled when the bridge has
1586 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
1590 /* Trim off padding on payload. */
1591 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
1592 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
1595 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
1596 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
1600 static struct priority_to_dscp
*
1601 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
1603 struct priority_to_dscp
*pdscp
;
1606 hash
= hash_int(priority
, 0);
1607 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
1608 if (pdscp
->priority
== priority
) {
1616 ofport_clear_priorities(struct ofport_dpif
*ofport
)
1618 struct priority_to_dscp
*pdscp
, *next
;
1620 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
1621 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
1627 set_queues(struct ofport
*ofport_
,
1628 const struct ofproto_port_queue
*qdscp_list
,
1631 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1632 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1633 struct hmap
new = HMAP_INITIALIZER(&new);
1636 for (i
= 0; i
< n_qdscp
; i
++) {
1637 struct priority_to_dscp
*pdscp
;
1641 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
1642 if (dpif_queue_to_priority(ofproto
->dpif
, qdscp_list
[i
].queue
,
1647 pdscp
= get_priority(ofport
, priority
);
1649 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
1651 pdscp
= xmalloc(sizeof *pdscp
);
1652 pdscp
->priority
= priority
;
1654 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1657 if (pdscp
->dscp
!= dscp
) {
1659 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1662 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
1665 if (!hmap_is_empty(&ofport
->priorities
)) {
1666 ofport_clear_priorities(ofport
);
1667 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1670 hmap_swap(&new, &ofport
->priorities
);
1678 /* Expires all MAC learning entries associated with 'bundle' and forces its
1679 * ofproto to revalidate every flow.
1681 * Normally MAC learning entries are removed only from the ofproto associated
1682 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
1683 * are removed from every ofproto. When patch ports and SLB bonds are in use
1684 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
1685 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
1686 * with the host from which it migrated. */
1688 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
1690 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
1691 struct mac_learning
*ml
= ofproto
->ml
;
1692 struct mac_entry
*mac
, *next_mac
;
1694 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1695 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
1696 if (mac
->port
.p
== bundle
) {
1698 struct ofproto_dpif
*o
;
1700 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1702 struct mac_entry
*e
;
1704 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
1707 tag_set_add(&o
->revalidate_set
, e
->tag
);
1708 mac_learning_expire(o
->ml
, e
);
1714 mac_learning_expire(ml
, mac
);
1719 static struct ofbundle
*
1720 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
1722 struct ofbundle
*bundle
;
1724 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
1725 &ofproto
->bundles
) {
1726 if (bundle
->aux
== aux
) {
1733 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
1734 * ones that are found to 'bundles'. */
1736 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
1737 void **auxes
, size_t n_auxes
,
1738 struct hmapx
*bundles
)
1742 hmapx_init(bundles
);
1743 for (i
= 0; i
< n_auxes
; i
++) {
1744 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
1746 hmapx_add(bundles
, bundle
);
1752 bundle_update(struct ofbundle
*bundle
)
1754 struct ofport_dpif
*port
;
1756 bundle
->floodable
= true;
1757 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1758 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
1759 || !stp_forward_in_state(port
->stp_state
)) {
1760 bundle
->floodable
= false;
1767 bundle_del_port(struct ofport_dpif
*port
)
1769 struct ofbundle
*bundle
= port
->bundle
;
1771 bundle
->ofproto
->need_revalidate
= REV_RECONFIGURE
;
1773 list_remove(&port
->bundle_node
);
1774 port
->bundle
= NULL
;
1777 lacp_slave_unregister(bundle
->lacp
, port
);
1780 bond_slave_unregister(bundle
->bond
, port
);
1783 bundle_update(bundle
);
1787 bundle_add_port(struct ofbundle
*bundle
, uint32_t ofp_port
,
1788 struct lacp_slave_settings
*lacp
,
1789 uint32_t bond_stable_id
)
1791 struct ofport_dpif
*port
;
1793 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
1798 if (port
->bundle
!= bundle
) {
1799 bundle
->ofproto
->need_revalidate
= REV_RECONFIGURE
;
1801 bundle_del_port(port
);
1804 port
->bundle
= bundle
;
1805 list_push_back(&bundle
->ports
, &port
->bundle_node
);
1806 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
1807 || !stp_forward_in_state(port
->stp_state
)) {
1808 bundle
->floodable
= false;
1812 port
->bundle
->ofproto
->need_revalidate
= REV_RECONFIGURE
;
1813 lacp_slave_register(bundle
->lacp
, port
, lacp
);
1816 port
->bond_stable_id
= bond_stable_id
;
1822 bundle_destroy(struct ofbundle
*bundle
)
1824 struct ofproto_dpif
*ofproto
;
1825 struct ofport_dpif
*port
, *next_port
;
1832 ofproto
= bundle
->ofproto
;
1833 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1834 struct ofmirror
*m
= ofproto
->mirrors
[i
];
1836 if (m
->out
== bundle
) {
1838 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
1839 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
1840 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1845 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1846 bundle_del_port(port
);
1849 bundle_flush_macs(bundle
, true);
1850 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
1852 free(bundle
->trunks
);
1853 lacp_destroy(bundle
->lacp
);
1854 bond_destroy(bundle
->bond
);
1859 bundle_set(struct ofproto
*ofproto_
, void *aux
,
1860 const struct ofproto_bundle_settings
*s
)
1862 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1863 bool need_flush
= false;
1864 struct ofport_dpif
*port
;
1865 struct ofbundle
*bundle
;
1866 unsigned long *trunks
;
1872 bundle_destroy(bundle_lookup(ofproto
, aux
));
1876 assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
1877 assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
1879 bundle
= bundle_lookup(ofproto
, aux
);
1881 bundle
= xmalloc(sizeof *bundle
);
1883 bundle
->ofproto
= ofproto
;
1884 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
1885 hash_pointer(aux
, 0));
1887 bundle
->name
= NULL
;
1889 list_init(&bundle
->ports
);
1890 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
1892 bundle
->trunks
= NULL
;
1893 bundle
->use_priority_tags
= s
->use_priority_tags
;
1894 bundle
->lacp
= NULL
;
1895 bundle
->bond
= NULL
;
1897 bundle
->floodable
= true;
1899 bundle
->src_mirrors
= 0;
1900 bundle
->dst_mirrors
= 0;
1901 bundle
->mirror_out
= 0;
1904 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
1906 bundle
->name
= xstrdup(s
->name
);
1911 if (!bundle
->lacp
) {
1912 ofproto
->need_revalidate
= REV_RECONFIGURE
;
1913 bundle
->lacp
= lacp_create();
1915 lacp_configure(bundle
->lacp
, s
->lacp
);
1917 lacp_destroy(bundle
->lacp
);
1918 bundle
->lacp
= NULL
;
1921 /* Update set of ports. */
1923 for (i
= 0; i
< s
->n_slaves
; i
++) {
1924 if (!bundle_add_port(bundle
, s
->slaves
[i
],
1925 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
,
1926 s
->bond_stable_ids
? s
->bond_stable_ids
[i
] : 0)) {
1930 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
1931 struct ofport_dpif
*next_port
;
1933 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1934 for (i
= 0; i
< s
->n_slaves
; i
++) {
1935 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
1940 bundle_del_port(port
);
1944 assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
1946 if (list_is_empty(&bundle
->ports
)) {
1947 bundle_destroy(bundle
);
1951 /* Set VLAN tagging mode */
1952 if (s
->vlan_mode
!= bundle
->vlan_mode
1953 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
1954 bundle
->vlan_mode
= s
->vlan_mode
;
1955 bundle
->use_priority_tags
= s
->use_priority_tags
;
1960 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
1961 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
1963 if (vlan
!= bundle
->vlan
) {
1964 bundle
->vlan
= vlan
;
1968 /* Get trunked VLANs. */
1969 switch (s
->vlan_mode
) {
1970 case PORT_VLAN_ACCESS
:
1974 case PORT_VLAN_TRUNK
:
1975 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
1978 case PORT_VLAN_NATIVE_UNTAGGED
:
1979 case PORT_VLAN_NATIVE_TAGGED
:
1980 if (vlan
!= 0 && (!s
->trunks
1981 || !bitmap_is_set(s
->trunks
, vlan
)
1982 || bitmap_is_set(s
->trunks
, 0))) {
1983 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
1985 trunks
= bitmap_clone(s
->trunks
, 4096);
1987 trunks
= bitmap_allocate1(4096);
1989 bitmap_set1(trunks
, vlan
);
1990 bitmap_set0(trunks
, 0);
1992 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
1999 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2000 free(bundle
->trunks
);
2001 if (trunks
== s
->trunks
) {
2002 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2004 bundle
->trunks
= trunks
;
2009 if (trunks
!= s
->trunks
) {
2014 if (!list_is_short(&bundle
->ports
)) {
2015 bundle
->ofproto
->has_bonded_bundles
= true;
2017 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2018 ofproto
->need_revalidate
= REV_RECONFIGURE
;
2021 bundle
->bond
= bond_create(s
->bond
);
2022 ofproto
->need_revalidate
= REV_RECONFIGURE
;
2025 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2026 bond_slave_register(bundle
->bond
, port
, port
->bond_stable_id
,
2030 bond_destroy(bundle
->bond
);
2031 bundle
->bond
= NULL
;
2034 /* If we changed something that would affect MAC learning, un-learn
2035 * everything on this port and force flow revalidation. */
2037 bundle_flush_macs(bundle
, false);
2044 bundle_remove(struct ofport
*port_
)
2046 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2047 struct ofbundle
*bundle
= port
->bundle
;
2050 bundle_del_port(port
);
2051 if (list_is_empty(&bundle
->ports
)) {
2052 bundle_destroy(bundle
);
2053 } else if (list_is_short(&bundle
->ports
)) {
2054 bond_destroy(bundle
->bond
);
2055 bundle
->bond
= NULL
;
2061 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2063 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2064 struct ofport_dpif
*port
= port_
;
2065 uint8_t ea
[ETH_ADDR_LEN
];
2068 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
2070 struct ofpbuf packet
;
2073 ofpbuf_init(&packet
, 0);
2074 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2076 memcpy(packet_pdu
, pdu
, pdu_size
);
2078 send_packet(port
, &packet
);
2079 ofpbuf_uninit(&packet
);
2081 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2082 "%s (%s)", port
->bundle
->name
,
2083 netdev_get_name(port
->up
.netdev
), strerror(error
));
2088 bundle_send_learning_packets(struct ofbundle
*bundle
)
2090 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2091 int error
, n_packets
, n_errors
;
2092 struct mac_entry
*e
;
2094 error
= n_packets
= n_errors
= 0;
2095 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
2096 if (e
->port
.p
!= bundle
) {
2097 struct ofpbuf
*learning_packet
;
2098 struct ofport_dpif
*port
;
2102 /* The assignment to "port" is unnecessary but makes "grep"ing for
2103 * struct ofport_dpif more effective. */
2104 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
2108 ret
= send_packet(port
, learning_packet
);
2109 ofpbuf_delete(learning_packet
);
2119 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2120 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
2121 "packets, last error was: %s",
2122 bundle
->name
, n_errors
, n_packets
, strerror(error
));
2124 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2125 bundle
->name
, n_packets
);
2130 bundle_run(struct ofbundle
*bundle
)
2133 lacp_run(bundle
->lacp
, send_pdu_cb
);
2136 struct ofport_dpif
*port
;
2138 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2139 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
2142 bond_run(bundle
->bond
, &bundle
->ofproto
->revalidate_set
,
2143 lacp_status(bundle
->lacp
));
2144 if (bond_should_send_learning_packets(bundle
->bond
)) {
2145 bundle_send_learning_packets(bundle
);
2151 bundle_wait(struct ofbundle
*bundle
)
2154 lacp_wait(bundle
->lacp
);
2157 bond_wait(bundle
->bond
);
2164 mirror_scan(struct ofproto_dpif
*ofproto
)
2168 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
2169 if (!ofproto
->mirrors
[idx
]) {
2176 static struct ofmirror
*
2177 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
2181 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2182 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
2183 if (mirror
&& mirror
->aux
== aux
) {
2191 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2193 mirror_update_dups(struct ofproto_dpif
*ofproto
)
2197 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2198 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2201 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
2205 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2206 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
2213 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
2214 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
2216 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
2217 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
2218 m2
->dup_mirrors
|= m1
->dup_mirrors
;
2225 mirror_set(struct ofproto
*ofproto_
, void *aux
,
2226 const struct ofproto_mirror_settings
*s
)
2228 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2229 mirror_mask_t mirror_bit
;
2230 struct ofbundle
*bundle
;
2231 struct ofmirror
*mirror
;
2232 struct ofbundle
*out
;
2233 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
2234 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
2237 mirror
= mirror_lookup(ofproto
, aux
);
2239 mirror_destroy(mirror
);
2245 idx
= mirror_scan(ofproto
);
2247 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2249 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
2253 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
2254 mirror
->ofproto
= ofproto
;
2257 mirror
->out_vlan
= -1;
2258 mirror
->name
= NULL
;
2261 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
2263 mirror
->name
= xstrdup(s
->name
);
2266 /* Get the new configuration. */
2267 if (s
->out_bundle
) {
2268 out
= bundle_lookup(ofproto
, s
->out_bundle
);
2270 mirror_destroy(mirror
);
2276 out_vlan
= s
->out_vlan
;
2278 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
2279 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
2281 /* If the configuration has not changed, do nothing. */
2282 if (hmapx_equals(&srcs
, &mirror
->srcs
)
2283 && hmapx_equals(&dsts
, &mirror
->dsts
)
2284 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
2285 && mirror
->out
== out
2286 && mirror
->out_vlan
== out_vlan
)
2288 hmapx_destroy(&srcs
);
2289 hmapx_destroy(&dsts
);
2293 hmapx_swap(&srcs
, &mirror
->srcs
);
2294 hmapx_destroy(&srcs
);
2296 hmapx_swap(&dsts
, &mirror
->dsts
);
2297 hmapx_destroy(&dsts
);
2299 free(mirror
->vlans
);
2300 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
2303 mirror
->out_vlan
= out_vlan
;
2305 /* Update bundles. */
2306 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2307 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
2308 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
2309 bundle
->src_mirrors
|= mirror_bit
;
2311 bundle
->src_mirrors
&= ~mirror_bit
;
2314 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
2315 bundle
->dst_mirrors
|= mirror_bit
;
2317 bundle
->dst_mirrors
&= ~mirror_bit
;
2320 if (mirror
->out
== bundle
) {
2321 bundle
->mirror_out
|= mirror_bit
;
2323 bundle
->mirror_out
&= ~mirror_bit
;
2327 ofproto
->need_revalidate
= REV_RECONFIGURE
;
2328 ofproto
->has_mirrors
= true;
2329 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
2330 mirror_update_dups(ofproto
);
2336 mirror_destroy(struct ofmirror
*mirror
)
2338 struct ofproto_dpif
*ofproto
;
2339 mirror_mask_t mirror_bit
;
2340 struct ofbundle
*bundle
;
2347 ofproto
= mirror
->ofproto
;
2348 ofproto
->need_revalidate
= REV_RECONFIGURE
;
2349 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
2351 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2352 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2353 bundle
->src_mirrors
&= ~mirror_bit
;
2354 bundle
->dst_mirrors
&= ~mirror_bit
;
2355 bundle
->mirror_out
&= ~mirror_bit
;
2358 hmapx_destroy(&mirror
->srcs
);
2359 hmapx_destroy(&mirror
->dsts
);
2360 free(mirror
->vlans
);
2362 ofproto
->mirrors
[mirror
->idx
] = NULL
;
2366 mirror_update_dups(ofproto
);
2368 ofproto
->has_mirrors
= false;
2369 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2370 if (ofproto
->mirrors
[i
]) {
2371 ofproto
->has_mirrors
= true;
2378 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
2379 uint64_t *packets
, uint64_t *bytes
)
2381 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2382 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
2385 *packets
= *bytes
= UINT64_MAX
;
2389 *packets
= mirror
->packet_count
;
2390 *bytes
= mirror
->byte_count
;
2396 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
2398 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2399 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
2400 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
2406 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
2408 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2409 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
2410 return bundle
&& bundle
->mirror_out
!= 0;
2414 forward_bpdu_changed(struct ofproto
*ofproto_
)
2416 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2417 ofproto
->need_revalidate
= REV_RECONFIGURE
;
2421 set_mac_idle_time(struct ofproto
*ofproto_
, unsigned int idle_time
)
2423 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2424 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
2429 static struct ofport_dpif
*
2430 get_ofp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
2432 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
2433 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
2436 static struct ofport_dpif
*
2437 get_odp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
2439 return get_ofp_port(ofproto
, odp_port_to_ofp_port(odp_port
));
2443 ofproto_port_from_dpif_port(struct ofproto_port
*ofproto_port
,
2444 struct dpif_port
*dpif_port
)
2446 ofproto_port
->name
= dpif_port
->name
;
2447 ofproto_port
->type
= dpif_port
->type
;
2448 ofproto_port
->ofp_port
= odp_port_to_ofp_port(dpif_port
->port_no
);
2452 port_run_fast(struct ofport_dpif
*ofport
)
2454 if (ofport
->cfm
&& cfm_should_send_ccm(ofport
->cfm
)) {
2455 struct ofpbuf packet
;
2457 ofpbuf_init(&packet
, 0);
2458 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.pp
.hw_addr
);
2459 send_packet(ofport
, &packet
);
2460 ofpbuf_uninit(&packet
);
2465 port_run(struct ofport_dpif
*ofport
)
2467 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
2468 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
2469 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
2471 ofport
->carrier_seq
= carrier_seq
;
2473 port_run_fast(ofport
);
2475 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
2477 cfm_run(ofport
->cfm
);
2478 enable
= enable
&& !cfm_get_fault(ofport
->cfm
);
2480 if (cfm_opup
>= 0) {
2481 enable
= enable
&& cfm_opup
;
2485 if (ofport
->bundle
) {
2486 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
2487 if (carrier_changed
) {
2488 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
2492 if (ofport
->may_enable
!= enable
) {
2493 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2495 if (ofproto
->has_bundle_action
) {
2496 ofproto
->need_revalidate
= REV_PORT_TOGGLED
;
2500 ofport
->may_enable
= enable
;
2504 port_wait(struct ofport_dpif
*ofport
)
2507 cfm_wait(ofport
->cfm
);
2512 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
2513 struct ofproto_port
*ofproto_port
)
2515 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2516 struct dpif_port dpif_port
;
2519 error
= dpif_port_query_by_name(ofproto
->dpif
, devname
, &dpif_port
);
2521 ofproto_port_from_dpif_port(ofproto_port
, &dpif_port
);
2527 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
, uint16_t *ofp_portp
)
2529 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2530 uint16_t odp_port
= UINT16_MAX
;
2533 error
= dpif_port_add(ofproto
->dpif
, netdev
, &odp_port
);
2535 *ofp_portp
= odp_port_to_ofp_port(odp_port
);
2541 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
2543 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2546 error
= dpif_port_del(ofproto
->dpif
, ofp_port_to_odp_port(ofp_port
));
2548 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
2550 /* The caller is going to close ofport->up.netdev. If this is a
2551 * bonded port, then the bond is using that netdev, so remove it
2552 * from the bond. The client will need to reconfigure everything
2553 * after deleting ports, so then the slave will get re-added. */
2554 bundle_remove(&ofport
->up
);
2561 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
2563 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2566 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
2568 if (!error
&& ofport
->odp_port
== OVSP_LOCAL
) {
2569 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2571 /* ofproto->stats.tx_packets represents packets that we created
2572 * internally and sent to some port (e.g. packets sent with
2573 * send_packet()). Account for them as if they had come from
2574 * OFPP_LOCAL and got forwarded. */
2576 if (stats
->rx_packets
!= UINT64_MAX
) {
2577 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
2580 if (stats
->rx_bytes
!= UINT64_MAX
) {
2581 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
2584 /* ofproto->stats.rx_packets represents packets that were received on
2585 * some port and we processed internally and dropped (e.g. STP).
2586 * Account fro them as if they had been forwarded to OFPP_LOCAL. */
2588 if (stats
->tx_packets
!= UINT64_MAX
) {
2589 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
2592 if (stats
->tx_bytes
!= UINT64_MAX
) {
2593 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
2600 /* Account packets for LOCAL port. */
2602 ofproto_update_local_port_stats(const struct ofproto
*ofproto_
,
2603 size_t tx_size
, size_t rx_size
)
2605 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2608 ofproto
->stats
.rx_packets
++;
2609 ofproto
->stats
.rx_bytes
+= rx_size
;
2612 ofproto
->stats
.tx_packets
++;
2613 ofproto
->stats
.tx_bytes
+= tx_size
;
2617 struct port_dump_state
{
2618 struct dpif_port_dump dump
;
2623 port_dump_start(const struct ofproto
*ofproto_
, void **statep
)
2625 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2626 struct port_dump_state
*state
;
2628 *statep
= state
= xmalloc(sizeof *state
);
2629 dpif_port_dump_start(&state
->dump
, ofproto
->dpif
);
2630 state
->done
= false;
2635 port_dump_next(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
,
2636 struct ofproto_port
*port
)
2638 struct port_dump_state
*state
= state_
;
2639 struct dpif_port dpif_port
;
2641 if (dpif_port_dump_next(&state
->dump
, &dpif_port
)) {
2642 ofproto_port_from_dpif_port(port
, &dpif_port
);
2645 int error
= dpif_port_dump_done(&state
->dump
);
2647 return error
? error
: EOF
;
2652 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
2654 struct port_dump_state
*state
= state_
;
2657 dpif_port_dump_done(&state
->dump
);
2664 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
2666 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2667 return dpif_port_poll(ofproto
->dpif
, devnamep
);
2671 port_poll_wait(const struct ofproto
*ofproto_
)
2673 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2674 dpif_port_poll_wait(ofproto
->dpif
);
2678 port_is_lacp_current(const struct ofport
*ofport_
)
2680 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2681 return (ofport
->bundle
&& ofport
->bundle
->lacp
2682 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
2686 /* Upcall handling. */
2688 /* Flow miss batching.
2690 * Some dpifs implement operations faster when you hand them off in a batch.
2691 * To allow batching, "struct flow_miss" queues the dpif-related work needed
2692 * for a given flow. Each "struct flow_miss" corresponds to sending one or
2693 * more packets, plus possibly installing the flow in the dpif.
2695 * So far we only batch the operations that affect flow setup time the most.
2696 * It's possible to batch more than that, but the benefit might be minimal. */
2698 struct hmap_node hmap_node
;
2700 enum odp_key_fitness key_fitness
;
2701 const struct nlattr
*key
;
2703 ovs_be16 initial_tci
;
2704 struct list packets
;
2705 enum dpif_upcall_type upcall_type
;
2708 struct flow_miss_op
{
2709 struct dpif_op dpif_op
;
2710 struct subfacet
*subfacet
; /* Subfacet */
2711 void *garbage
; /* Pointer to pass to free(), NULL if none. */
2712 uint64_t stub
[1024 / 8]; /* Temporary buffer. */
2715 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
2716 * OpenFlow controller as necessary according to their individual
2717 * configurations. */
2719 send_packet_in_miss(struct ofproto_dpif
*ofproto
, const struct ofpbuf
*packet
,
2720 const struct flow
*flow
)
2722 struct ofputil_packet_in pin
;
2724 pin
.packet
= packet
->data
;
2725 pin
.packet_len
= packet
->size
;
2726 pin
.reason
= OFPR_NO_MATCH
;
2727 pin
.controller_id
= 0;
2732 pin
.send_len
= 0; /* not used for flow table misses */
2734 flow_get_metadata(flow
, &pin
.fmd
);
2736 /* Registers aren't meaningful on a miss. */
2737 memset(pin
.fmd
.reg_masks
, 0, sizeof pin
.fmd
.reg_masks
);
2739 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
);
2742 static enum slow_path_reason
2743 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
2744 const struct ofpbuf
*packet
)
2746 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, flow
->in_port
);
2752 if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
2754 cfm_process_heartbeat(ofport
->cfm
, packet
);
2757 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
2758 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
2760 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
2763 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
2765 stp_process_packet(ofport
, packet
);
2772 static struct flow_miss
*
2773 flow_miss_find(struct hmap
*todo
, const struct flow
*flow
, uint32_t hash
)
2775 struct flow_miss
*miss
;
2777 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
2778 if (flow_equal(&miss
->flow
, flow
)) {
2786 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
2787 * 'packet'. The caller must initialize op->actions and op->actions_len. If
2788 * 'miss' is associated with a subfacet the caller must also initialize the
2789 * returned op->subfacet, and if anything needs to be freed after processing
2790 * the op, the caller must initialize op->garbage also. */
2792 init_flow_miss_execute_op(struct flow_miss
*miss
, struct ofpbuf
*packet
,
2793 struct flow_miss_op
*op
)
2795 if (miss
->flow
.vlan_tci
!= miss
->initial_tci
) {
2796 /* This packet was received on a VLAN splinter port. We
2797 * added a VLAN to the packet to make the packet resemble
2798 * the flow, but the actions were composed assuming that
2799 * the packet contained no VLAN. So, we must remove the
2800 * VLAN header from the packet before trying to execute the
2802 eth_pop_vlan(packet
);
2805 op
->subfacet
= NULL
;
2807 op
->dpif_op
.type
= DPIF_OP_EXECUTE
;
2808 op
->dpif_op
.u
.execute
.key
= miss
->key
;
2809 op
->dpif_op
.u
.execute
.key_len
= miss
->key_len
;
2810 op
->dpif_op
.u
.execute
.packet
= packet
;
2813 /* Helper for handle_flow_miss_without_facet() and
2814 * handle_flow_miss_with_facet(). */
2816 handle_flow_miss_common(struct rule_dpif
*rule
,
2817 struct ofpbuf
*packet
, const struct flow
*flow
)
2819 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
2821 ofproto
->n_matches
++;
2823 if (rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
2825 * Extra-special case for fail-open mode.
2827 * We are in fail-open mode and the packet matched the fail-open
2828 * rule, but we are connected to a controller too. We should send
2829 * the packet up to the controller in the hope that it will try to
2830 * set up a flow and thereby allow us to exit fail-open.
2832 * See the top-level comment in fail-open.c for more information.
2834 send_packet_in_miss(ofproto
, packet
, flow
);
2838 /* Figures out whether a flow that missed in 'ofproto', whose details are in
2839 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
2840 * installing a datapath flow. The answer is usually "yes" (a return value of
2841 * true). However, for short flows the cost of bookkeeping is much higher than
2842 * the benefits, so when the datapath holds a large number of flows we impose
2843 * some heuristics to decide which flows are likely to be worth tracking. */
2845 flow_miss_should_make_facet(struct ofproto_dpif
*ofproto
,
2846 struct flow_miss
*miss
, uint32_t hash
)
2848 if (!ofproto
->governor
) {
2851 n_subfacets
= hmap_count(&ofproto
->subfacets
);
2852 if (n_subfacets
* 2 <= ofproto
->up
.flow_eviction_threshold
) {
2856 ofproto
->governor
= governor_create(ofproto
->up
.name
);
2859 return governor_should_install_flow(ofproto
->governor
, hash
,
2860 list_size(&miss
->packets
));
2863 /* Handles 'miss', which matches 'rule', without creating a facet or subfacet
2864 * or creating any datapath flow. May add an "execute" operation to 'ops' and
2865 * increment '*n_ops'. */
2867 handle_flow_miss_without_facet(struct flow_miss
*miss
,
2868 struct rule_dpif
*rule
,
2869 struct flow_miss_op
*ops
, size_t *n_ops
)
2871 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
2872 struct action_xlate_ctx ctx
;
2873 struct ofpbuf
*packet
;
2875 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
2876 struct flow_miss_op
*op
= &ops
[*n_ops
];
2877 struct dpif_flow_stats stats
;
2878 struct ofpbuf odp_actions
;
2880 COVERAGE_INC(facet_suppress
);
2882 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
2884 dpif_flow_stats_extract(&miss
->flow
, packet
, &stats
);
2885 rule_credit_stats(rule
, &stats
);
2887 action_xlate_ctx_init(&ctx
, ofproto
, &miss
->flow
, miss
->initial_tci
,
2889 ctx
.resubmit_stats
= &stats
;
2890 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
2893 if (odp_actions
.size
) {
2894 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
2896 init_flow_miss_execute_op(miss
, packet
, op
);
2897 execute
->actions
= odp_actions
.data
;
2898 execute
->actions_len
= odp_actions
.size
;
2899 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
2903 ofpbuf_uninit(&odp_actions
);
2908 /* Handles 'miss', which matches 'facet'. May add any required datapath
2909 * operations to 'ops', incrementing '*n_ops' for each new op. */
2911 handle_flow_miss_with_facet(struct flow_miss
*miss
, struct facet
*facet
,
2912 struct flow_miss_op
*ops
, size_t *n_ops
)
2914 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
2915 enum subfacet_path want_path
;
2916 struct subfacet
*subfacet
;
2917 struct ofpbuf
*packet
;
2919 subfacet
= subfacet_create(facet
,
2920 miss
->key_fitness
, miss
->key
, miss
->key_len
,
2923 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
2924 struct flow_miss_op
*op
= &ops
[*n_ops
];
2925 struct dpif_flow_stats stats
;
2926 struct ofpbuf odp_actions
;
2928 handle_flow_miss_common(facet
->rule
, packet
, &miss
->flow
);
2930 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
2931 if (!subfacet
->actions
|| subfacet
->slow
) {
2932 subfacet_make_actions(subfacet
, packet
, &odp_actions
);
2935 dpif_flow_stats_extract(&facet
->flow
, packet
, &stats
);
2936 subfacet_update_stats(subfacet
, &stats
);
2938 if (subfacet
->actions_len
) {
2939 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
2941 init_flow_miss_execute_op(miss
, packet
, op
);
2942 op
->subfacet
= subfacet
;
2943 if (!subfacet
->slow
) {
2944 execute
->actions
= subfacet
->actions
;
2945 execute
->actions_len
= subfacet
->actions_len
;
2946 ofpbuf_uninit(&odp_actions
);
2948 execute
->actions
= odp_actions
.data
;
2949 execute
->actions_len
= odp_actions
.size
;
2950 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
2955 ofpbuf_uninit(&odp_actions
);
2959 want_path
= subfacet_want_path(subfacet
->slow
);
2960 if (miss
->upcall_type
== DPIF_UC_MISS
|| subfacet
->path
!= want_path
) {
2961 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
2962 struct dpif_flow_put
*put
= &op
->dpif_op
.u
.flow_put
;
2964 op
->subfacet
= subfacet
;
2966 op
->dpif_op
.type
= DPIF_OP_FLOW_PUT
;
2967 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
2968 put
->key
= miss
->key
;
2969 put
->key_len
= miss
->key_len
;
2970 if (want_path
== SF_FAST_PATH
) {
2971 put
->actions
= subfacet
->actions
;
2972 put
->actions_len
= subfacet
->actions_len
;
2974 compose_slow_path(ofproto
, &facet
->flow
, subfacet
->slow
,
2975 op
->stub
, sizeof op
->stub
,
2976 &put
->actions
, &put
->actions_len
);
2982 /* Handles flow miss 'miss' on 'ofproto'. May add any required datapath
2983 * operations to 'ops', incrementing '*n_ops' for each new op. */
2985 handle_flow_miss(struct ofproto_dpif
*ofproto
, struct flow_miss
*miss
,
2986 struct flow_miss_op
*ops
, size_t *n_ops
)
2988 struct facet
*facet
;
2991 /* The caller must ensure that miss->hmap_node.hash contains
2992 * flow_hash(miss->flow, 0). */
2993 hash
= miss
->hmap_node
.hash
;
2995 facet
= facet_lookup_valid(ofproto
, &miss
->flow
, hash
);
2997 struct rule_dpif
*rule
= rule_dpif_lookup(ofproto
, &miss
->flow
);
2999 if (!flow_miss_should_make_facet(ofproto
, miss
, hash
)) {
3000 handle_flow_miss_without_facet(miss
, rule
, ops
, n_ops
);
3004 facet
= facet_create(rule
, &miss
->flow
, hash
);
3006 handle_flow_miss_with_facet(miss
, facet
, ops
, n_ops
);
3009 /* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
3010 * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
3011 * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
3012 * what a flow key should contain.
3014 * This function also includes some logic to help make VLAN splinters
3015 * transparent to the rest of the upcall processing logic. In particular, if
3016 * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
3017 * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
3018 * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
3020 * Sets '*initial_tci' to the VLAN TCI with which the packet was really
3021 * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
3022 * (This differs from the value returned in flow->vlan_tci only for packets
3023 * received on VLAN splinters.)
3025 static enum odp_key_fitness
3026 ofproto_dpif_extract_flow_key(const struct ofproto_dpif
*ofproto
,
3027 const struct nlattr
*key
, size_t key_len
,
3028 struct flow
*flow
, ovs_be16
*initial_tci
,
3029 struct ofpbuf
*packet
)
3031 enum odp_key_fitness fitness
;
3033 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
3034 if (fitness
== ODP_FIT_ERROR
) {
3037 *initial_tci
= flow
->vlan_tci
;
3039 if (vsp_adjust_flow(ofproto
, flow
)) {
3041 /* Make the packet resemble the flow, so that it gets sent to an
3042 * OpenFlow controller properly, so that it looks correct for
3043 * sFlow, and so that flow_extract() will get the correct vlan_tci
3044 * if it is called on 'packet'.
3046 * The allocated space inside 'packet' probably also contains
3047 * 'key', that is, both 'packet' and 'key' are probably part of a
3048 * struct dpif_upcall (see the large comment on that structure
3049 * definition), so pushing data on 'packet' is in general not a
3050 * good idea since it could overwrite 'key' or free it as a side
3051 * effect. However, it's OK in this special case because we know
3052 * that 'packet' is inside a Netlink attribute: pushing 4 bytes
3053 * will just overwrite the 4-byte "struct nlattr", which is fine
3054 * since we don't need that header anymore. */
3055 eth_push_vlan(packet
, flow
->vlan_tci
);
3058 /* Let the caller know that we can't reproduce 'key' from 'flow'. */
3059 if (fitness
== ODP_FIT_PERFECT
) {
3060 fitness
= ODP_FIT_TOO_MUCH
;
3068 handle_miss_upcalls(struct ofproto_dpif
*ofproto
, struct dpif_upcall
*upcalls
,
3071 struct dpif_upcall
*upcall
;
3072 struct flow_miss
*miss
;
3073 struct flow_miss misses
[FLOW_MISS_MAX_BATCH
];
3074 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
3075 struct dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
3085 /* Construct the to-do list.
3087 * This just amounts to extracting the flow from each packet and sticking
3088 * the packets that have the same flow in the same "flow_miss" structure so
3089 * that we can process them together. */
3092 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
3093 struct flow_miss
*miss
= &misses
[n_misses
];
3094 struct flow_miss
*existing_miss
;
3097 /* Obtain metadata and check userspace/kernel agreement on flow match,
3098 * then set 'flow''s header pointers. */
3099 miss
->key_fitness
= ofproto_dpif_extract_flow_key(
3100 ofproto
, upcall
->key
, upcall
->key_len
,
3101 &miss
->flow
, &miss
->initial_tci
, upcall
->packet
);
3102 if (miss
->key_fitness
== ODP_FIT_ERROR
) {
3105 flow_extract(upcall
->packet
, miss
->flow
.skb_priority
,
3106 miss
->flow
.tun_id
, miss
->flow
.in_port
, &miss
->flow
);
3108 /* Add other packets to a to-do list. */
3109 hash
= flow_hash(&miss
->flow
, 0);
3110 existing_miss
= flow_miss_find(&todo
, &miss
->flow
, hash
);
3111 if (!existing_miss
) {
3112 hmap_insert(&todo
, &miss
->hmap_node
, hash
);
3113 miss
->key
= upcall
->key
;
3114 miss
->key_len
= upcall
->key_len
;
3115 miss
->upcall_type
= upcall
->type
;
3116 list_init(&miss
->packets
);
3120 miss
= existing_miss
;
3122 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
3125 /* Process each element in the to-do list, constructing the set of
3126 * operations to batch. */
3128 HMAP_FOR_EACH (miss
, hmap_node
, &todo
) {
3129 handle_flow_miss(ofproto
, miss
, flow_miss_ops
, &n_ops
);
3131 assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
3133 /* Execute batch. */
3134 for (i
= 0; i
< n_ops
; i
++) {
3135 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
3137 dpif_operate(ofproto
->dpif
, dpif_ops
, n_ops
);
3139 /* Free memory and update facets. */
3140 for (i
= 0; i
< n_ops
; i
++) {
3141 struct flow_miss_op
*op
= &flow_miss_ops
[i
];
3143 switch (op
->dpif_op
.type
) {
3144 case DPIF_OP_EXECUTE
:
3147 case DPIF_OP_FLOW_PUT
:
3148 if (!op
->dpif_op
.error
) {
3149 op
->subfacet
->path
= subfacet_want_path(op
->subfacet
->slow
);
3153 case DPIF_OP_FLOW_DEL
:
3159 hmap_destroy(&todo
);
3162 static enum { SFLOW_UPCALL
, MISS_UPCALL
, BAD_UPCALL
}
3163 classify_upcall(const struct dpif_upcall
*upcall
)
3165 union user_action_cookie cookie
;
3167 /* First look at the upcall type. */
3168 switch (upcall
->type
) {
3169 case DPIF_UC_ACTION
:
3175 case DPIF_N_UC_TYPES
:
3177 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
3181 /* "action" upcalls need a closer look. */
3182 memcpy(&cookie
, &upcall
->userdata
, sizeof(cookie
));
3183 switch (cookie
.type
) {
3184 case USER_ACTION_COOKIE_SFLOW
:
3185 return SFLOW_UPCALL
;
3187 case USER_ACTION_COOKIE_SLOW_PATH
:
3190 case USER_ACTION_COOKIE_UNSPEC
:
3192 VLOG_WARN_RL(&rl
, "invalid user cookie : 0x%"PRIx64
, upcall
->userdata
);
3198 handle_sflow_upcall(struct ofproto_dpif
*ofproto
,
3199 const struct dpif_upcall
*upcall
)
3201 union user_action_cookie cookie
;
3202 enum odp_key_fitness fitness
;
3203 ovs_be16 initial_tci
;
3206 fitness
= ofproto_dpif_extract_flow_key(ofproto
, upcall
->key
,
3207 upcall
->key_len
, &flow
,
3208 &initial_tci
, upcall
->packet
);
3209 if (fitness
== ODP_FIT_ERROR
) {
3213 memcpy(&cookie
, &upcall
->userdata
, sizeof(cookie
));
3214 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
, &cookie
);
3218 handle_upcalls(struct ofproto_dpif
*ofproto
, unsigned int max_batch
)
3220 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
3221 struct ofpbuf miss_bufs
[FLOW_MISS_MAX_BATCH
];
3222 uint64_t miss_buf_stubs
[FLOW_MISS_MAX_BATCH
][4096 / 8];
3227 assert(max_batch
<= FLOW_MISS_MAX_BATCH
);
3230 for (n_processed
= 0; n_processed
< max_batch
; n_processed
++) {
3231 struct dpif_upcall
*upcall
= &misses
[n_misses
];
3232 struct ofpbuf
*buf
= &miss_bufs
[n_misses
];
3235 ofpbuf_use_stub(buf
, miss_buf_stubs
[n_misses
],
3236 sizeof miss_buf_stubs
[n_misses
]);
3237 error
= dpif_recv(ofproto
->dpif
, upcall
, buf
);
3243 switch (classify_upcall(upcall
)) {
3245 /* Handle it later. */
3250 if (ofproto
->sflow
) {
3251 handle_sflow_upcall(ofproto
, upcall
);
3262 /* Handle deferred MISS_UPCALL processing. */
3263 handle_miss_upcalls(ofproto
, misses
, n_misses
);
3264 for (i
= 0; i
< n_misses
; i
++) {
3265 ofpbuf_uninit(&miss_bufs
[i
]);
3271 /* Flow expiration. */
3273 static int subfacet_max_idle(const struct ofproto_dpif
*);
3274 static void update_stats(struct ofproto_dpif
*);
3275 static void rule_expire(struct rule_dpif
*);
3276 static void expire_subfacets(struct ofproto_dpif
*, int dp_max_idle
);
3278 /* This function is called periodically by run(). Its job is to collect
3279 * updates for the flows that have been installed into the datapath, most
3280 * importantly when they last were used, and then use that information to
3281 * expire flows that have not been used recently.
3283 * Returns the number of milliseconds after which it should be called again. */
3285 expire(struct ofproto_dpif
*ofproto
)
3287 struct rule_dpif
*rule
, *next_rule
;
3288 struct oftable
*table
;
3291 /* Update stats for each flow in the datapath. */
3292 update_stats(ofproto
);
3294 /* Expire subfacets that have been idle too long. */
3295 dp_max_idle
= subfacet_max_idle(ofproto
);
3296 expire_subfacets(ofproto
, dp_max_idle
);
3298 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
3299 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
3300 struct cls_cursor cursor
;
3302 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
3303 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
3308 /* All outstanding data in existing flows has been accounted, so it's a
3309 * good time to do bond rebalancing. */
3310 if (ofproto
->has_bonded_bundles
) {
3311 struct ofbundle
*bundle
;
3313 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
3315 bond_rebalance(bundle
->bond
, &ofproto
->revalidate_set
);
3320 return MIN(dp_max_idle
, 1000);
3323 /* Updates flow table statistics given that the datapath just reported 'stats'
3324 * as 'subfacet''s statistics. */
3326 update_subfacet_stats(struct subfacet
*subfacet
,
3327 const struct dpif_flow_stats
*stats
)
3329 struct facet
*facet
= subfacet
->facet
;
3331 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
3332 uint64_t extra
= stats
->n_packets
- subfacet
->dp_packet_count
;
3333 facet
->packet_count
+= extra
;
3335 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
3338 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
3339 facet
->byte_count
+= stats
->n_bytes
- subfacet
->dp_byte_count
;
3341 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
3344 subfacet
->dp_packet_count
= stats
->n_packets
;
3345 subfacet
->dp_byte_count
= stats
->n_bytes
;
3347 facet
->tcp_flags
|= stats
->tcp_flags
;
3349 subfacet_update_time(subfacet
, stats
->used
);
3350 if (facet
->accounted_bytes
< facet
->byte_count
) {
3352 facet_account(facet
);
3353 facet
->accounted_bytes
= facet
->byte_count
;
3355 facet_push_stats(facet
);
3358 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
3359 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
3361 delete_unexpected_flow(struct dpif
*dpif
,
3362 const struct nlattr
*key
, size_t key_len
)
3364 if (!VLOG_DROP_WARN(&rl
)) {
3368 odp_flow_key_format(key
, key_len
, &s
);
3369 VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s
));
3373 COVERAGE_INC(facet_unexpected
);
3374 dpif_flow_del(dpif
, key
, key_len
, NULL
);
3377 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
3379 * This function also pushes statistics updates to rules which each facet
3380 * resubmits into. Generally these statistics will be accurate. However, if a
3381 * facet changes the rule it resubmits into at some time in between
3382 * update_stats() runs, it is possible that statistics accrued to the
3383 * old rule will be incorrectly attributed to the new rule. This could be
3384 * avoided by calling update_stats() whenever rules are created or
3385 * deleted. However, the performance impact of making so many calls to the
3386 * datapath do not justify the benefit of having perfectly accurate statistics.
3389 update_stats(struct ofproto_dpif
*p
)
3391 const struct dpif_flow_stats
*stats
;
3392 struct dpif_flow_dump dump
;
3393 const struct nlattr
*key
;
3396 dpif_flow_dump_start(&dump
, p
->dpif
);
3397 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
3398 struct subfacet
*subfacet
;
3400 subfacet
= subfacet_find(p
, key
, key_len
);
3401 switch (subfacet
? subfacet
->path
: SF_NOT_INSTALLED
) {
3403 update_subfacet_stats(subfacet
, stats
);
3407 /* Stats are updated per-packet. */
3410 case SF_NOT_INSTALLED
:
3412 delete_unexpected_flow(p
->dpif
, key
, key_len
);
3416 dpif_flow_dump_done(&dump
);
3419 /* Calculates and returns the number of milliseconds of idle time after which
3420 * subfacets should expire from the datapath. When a subfacet expires, we fold
3421 * its statistics into its facet, and when a facet's last subfacet expires, we
3422 * fold its statistic into its rule. */
3424 subfacet_max_idle(const struct ofproto_dpif
*ofproto
)
3427 * Idle time histogram.
3429 * Most of the time a switch has a relatively small number of subfacets.
3430 * When this is the case we might as well keep statistics for all of them
3431 * in userspace and to cache them in the kernel datapath for performance as
3434 * As the number of subfacets increases, the memory required to maintain
3435 * statistics about them in userspace and in the kernel becomes
3436 * significant. However, with a large number of subfacets it is likely
3437 * that only a few of them are "heavy hitters" that consume a large amount
3438 * of bandwidth. At this point, only heavy hitters are worth caching in
3439 * the kernel and maintaining in userspaces; other subfacets we can
3442 * The technique used to compute the idle time is to build a histogram with
3443 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
3444 * that is installed in the kernel gets dropped in the appropriate bucket.
3445 * After the histogram has been built, we compute the cutoff so that only
3446 * the most-recently-used 1% of subfacets (but at least
3447 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
3448 * the most-recently-used bucket of subfacets is kept, so actually an
3449 * arbitrary number of subfacets can be kept in any given expiration run
3450 * (though the next run will delete most of those unless they receive
3453 * This requires a second pass through the subfacets, in addition to the
3454 * pass made by update_stats(), because the former function never looks at
3455 * uninstallable subfacets.
3457 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
3458 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
3459 int buckets
[N_BUCKETS
] = { 0 };
3460 int total
, subtotal
, bucket
;
3461 struct subfacet
*subfacet
;
3465 total
= hmap_count(&ofproto
->subfacets
);
3466 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
3467 return N_BUCKETS
* BUCKET_WIDTH
;
3470 /* Build histogram. */
3472 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
3473 long long int idle
= now
- subfacet
->used
;
3474 int bucket
= (idle
<= 0 ? 0
3475 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
3476 : (unsigned int) idle
/ BUCKET_WIDTH
);
3480 /* Find the first bucket whose flows should be expired. */
3481 subtotal
= bucket
= 0;
3483 subtotal
+= buckets
[bucket
++];
3484 } while (bucket
< N_BUCKETS
&&
3485 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
3487 if (VLOG_IS_DBG_ENABLED()) {
3491 ds_put_cstr(&s
, "keep");
3492 for (i
= 0; i
< N_BUCKETS
; i
++) {
3494 ds_put_cstr(&s
, ", drop");
3497 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
3500 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
3504 return bucket
* BUCKET_WIDTH
;
3507 enum { EXPIRE_MAX_BATCH
= 50 };
3510 expire_batch(struct ofproto_dpif
*ofproto
, struct subfacet
**subfacets
, int n
)
3512 struct odputil_keybuf keybufs
[EXPIRE_MAX_BATCH
];
3513 struct dpif_op ops
[EXPIRE_MAX_BATCH
];
3514 struct dpif_op
*opsp
[EXPIRE_MAX_BATCH
];
3515 struct ofpbuf keys
[EXPIRE_MAX_BATCH
];
3516 struct dpif_flow_stats stats
[EXPIRE_MAX_BATCH
];
3519 for (i
= 0; i
< n
; i
++) {
3520 ops
[i
].type
= DPIF_OP_FLOW_DEL
;
3521 subfacet_get_key(subfacets
[i
], &keybufs
[i
], &keys
[i
]);
3522 ops
[i
].u
.flow_del
.key
= keys
[i
].data
;
3523 ops
[i
].u
.flow_del
.key_len
= keys
[i
].size
;
3524 ops
[i
].u
.flow_del
.stats
= &stats
[i
];
3528 dpif_operate(ofproto
->dpif
, opsp
, n
);
3529 for (i
= 0; i
< n
; i
++) {
3530 subfacet_reset_dp_stats(subfacets
[i
], &stats
[i
]);
3531 subfacets
[i
]->path
= SF_NOT_INSTALLED
;
3532 subfacet_destroy(subfacets
[i
]);
3537 expire_subfacets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
3539 /* Cutoff time for most flows. */
3540 long long int normal_cutoff
= time_msec() - dp_max_idle
;
3542 /* We really want to keep flows for special protocols around, so use a more
3543 * conservative cutoff. */
3544 long long int special_cutoff
= time_msec() - 10000;
3546 struct subfacet
*subfacet
, *next_subfacet
;
3547 struct subfacet
*batch
[EXPIRE_MAX_BATCH
];
3551 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
3552 &ofproto
->subfacets
) {
3553 long long int cutoff
;
3555 cutoff
= (subfacet
->slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)
3558 if (subfacet
->used
< cutoff
) {
3559 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
3560 batch
[n_batch
++] = subfacet
;
3561 if (n_batch
>= EXPIRE_MAX_BATCH
) {
3562 expire_batch(ofproto
, batch
, n_batch
);
3566 subfacet_destroy(subfacet
);
3572 expire_batch(ofproto
, batch
, n_batch
);
3576 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3577 * then delete it entirely. */
3579 rule_expire(struct rule_dpif
*rule
)
3581 struct facet
*facet
, *next_facet
;
3585 if (rule
->up
.pending
) {
3586 /* We'll have to expire it later. */
3590 /* Has 'rule' expired? */
3592 if (rule
->up
.hard_timeout
3593 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
3594 reason
= OFPRR_HARD_TIMEOUT
;
3595 } else if (rule
->up
.idle_timeout
3596 && now
> rule
->up
.used
+ rule
->up
.idle_timeout
* 1000) {
3597 reason
= OFPRR_IDLE_TIMEOUT
;
3602 COVERAGE_INC(ofproto_dpif_expired
);
3604 /* Update stats. (This is a no-op if the rule expired due to an idle
3605 * timeout, because that only happens when the rule has no facets left.) */
3606 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
3607 facet_remove(facet
);
3610 /* Get rid of the rule. */
3611 ofproto_rule_expire(&rule
->up
, reason
);
3616 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
3618 * The caller must already have determined that no facet with an identical
3619 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
3620 * the ofproto's classifier table.
3622 * 'hash' must be the return value of flow_hash(flow, 0).
3624 * The facet will initially have no subfacets. The caller should create (at
3625 * least) one subfacet with subfacet_create(). */
3626 static struct facet
*
3627 facet_create(struct rule_dpif
*rule
, const struct flow
*flow
, uint32_t hash
)
3629 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3630 struct facet
*facet
;
3632 facet
= xzalloc(sizeof *facet
);
3633 facet
->used
= time_msec();
3634 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, hash
);
3635 list_push_back(&rule
->facets
, &facet
->list_node
);
3637 facet
->flow
= *flow
;
3638 list_init(&facet
->subfacets
);
3639 netflow_flow_init(&facet
->nf_flow
);
3640 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
3646 facet_free(struct facet
*facet
)
3651 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
3652 * 'packet', which arrived on 'in_port'.
3654 * Takes ownership of 'packet'. */
3656 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3657 const struct nlattr
*odp_actions
, size_t actions_len
,
3658 struct ofpbuf
*packet
)
3660 struct odputil_keybuf keybuf
;
3664 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
3665 odp_flow_key_from_flow(&key
, flow
);
3667 error
= dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
3668 odp_actions
, actions_len
, packet
);
3670 ofpbuf_delete(packet
);
3674 /* Remove 'facet' from 'ofproto' and free up the associated memory:
3676 * - If 'facet' was installed in the datapath, uninstalls it and updates its
3677 * rule's statistics, via subfacet_uninstall().
3679 * - Removes 'facet' from its rule and from ofproto->facets.
3682 facet_remove(struct facet
*facet
)
3684 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3685 struct subfacet
*subfacet
, *next_subfacet
;
3687 assert(!list_is_empty(&facet
->subfacets
));
3689 /* First uninstall all of the subfacets to get final statistics. */
3690 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3691 subfacet_uninstall(subfacet
);
3694 /* Flush the final stats to the rule.
3696 * This might require us to have at least one subfacet around so that we
3697 * can use its actions for accounting in facet_account(), which is why we
3698 * have uninstalled but not yet destroyed the subfacets. */
3699 facet_flush_stats(facet
);
3701 /* Now we're really all done so destroy everything. */
3702 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
3703 &facet
->subfacets
) {
3704 subfacet_destroy__(subfacet
);
3706 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
3707 list_remove(&facet
->list_node
);
3711 /* Feed information from 'facet' back into the learning table to keep it in
3712 * sync with what is actually flowing through the datapath. */
3714 facet_learn(struct facet
*facet
)
3716 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3717 struct action_xlate_ctx ctx
;
3719 if (!facet
->has_learn
3720 && !facet
->has_normal
3721 && (!facet
->has_fin_timeout
3722 || !(facet
->tcp_flags
& (TCP_FIN
| TCP_RST
)))) {
3726 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
3727 facet
->flow
.vlan_tci
,
3728 facet
->rule
, facet
->tcp_flags
, NULL
);
3729 ctx
.may_learn
= true;
3730 xlate_actions_for_side_effects(&ctx
, facet
->rule
->up
.ofpacts
,
3731 facet
->rule
->up
.ofpacts_len
);
3735 facet_account(struct facet
*facet
)
3737 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3738 struct subfacet
*subfacet
;
3739 const struct nlattr
*a
;
3744 if (!facet
->has_normal
|| !ofproto
->has_bonded_bundles
) {
3747 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
3749 /* This loop feeds byte counters to bond_account() for rebalancing to use
3750 * as a basis. We also need to track the actual VLAN on which the packet
3751 * is going to be sent to ensure that it matches the one passed to
3752 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
3755 * We use the actions from an arbitrary subfacet because they should all
3756 * be equally valid for our purpose. */
3757 subfacet
= CONTAINER_OF(list_front(&facet
->subfacets
),
3758 struct subfacet
, list_node
);
3759 vlan_tci
= facet
->flow
.vlan_tci
;
3760 NL_ATTR_FOR_EACH_UNSAFE (a
, left
,
3761 subfacet
->actions
, subfacet
->actions_len
) {
3762 const struct ovs_action_push_vlan
*vlan
;
3763 struct ofport_dpif
*port
;
3765 switch (nl_attr_type(a
)) {
3766 case OVS_ACTION_ATTR_OUTPUT
:
3767 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
3768 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
3769 bond_account(port
->bundle
->bond
, &facet
->flow
,
3770 vlan_tci_to_vid(vlan_tci
), n_bytes
);
3774 case OVS_ACTION_ATTR_POP_VLAN
:
3775 vlan_tci
= htons(0);
3778 case OVS_ACTION_ATTR_PUSH_VLAN
:
3779 vlan
= nl_attr_get(a
);
3780 vlan_tci
= vlan
->vlan_tci
;
3786 /* Returns true if the only action for 'facet' is to send to the controller.
3787 * (We don't report NetFlow expiration messages for such facets because they
3788 * are just part of the control logic for the network, not real traffic). */
3790 facet_is_controller_flow(struct facet
*facet
)
3793 const struct rule
*rule
= &facet
->rule
->up
;
3794 const struct ofpact
*ofpacts
= rule
->ofpacts
;
3795 size_t ofpacts_len
= rule
->ofpacts_len
;
3797 if (ofpacts
->type
== OFPACT_CONTROLLER
&&
3798 ofpact_next(ofpacts
) >= ofpact_end(ofpacts
, ofpacts_len
)) {
3805 /* Folds all of 'facet''s statistics into its rule. Also updates the
3806 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
3807 * 'facet''s statistics in the datapath should have been zeroed and folded into
3808 * its packet and byte counts before this function is called. */
3810 facet_flush_stats(struct facet
*facet
)
3812 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3813 struct subfacet
*subfacet
;
3815 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3816 assert(!subfacet
->dp_byte_count
);
3817 assert(!subfacet
->dp_packet_count
);
3820 facet_push_stats(facet
);
3821 if (facet
->accounted_bytes
< facet
->byte_count
) {
3822 facet_account(facet
);
3823 facet
->accounted_bytes
= facet
->byte_count
;
3826 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
3827 struct ofexpired expired
;
3828 expired
.flow
= facet
->flow
;
3829 expired
.packet_count
= facet
->packet_count
;
3830 expired
.byte_count
= facet
->byte_count
;
3831 expired
.used
= facet
->used
;
3832 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
3835 facet
->rule
->packet_count
+= facet
->packet_count
;
3836 facet
->rule
->byte_count
+= facet
->byte_count
;
3838 /* Reset counters to prevent double counting if 'facet' ever gets
3840 facet_reset_counters(facet
);
3842 netflow_flow_clear(&facet
->nf_flow
);
3843 facet
->tcp_flags
= 0;
3846 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3847 * Returns it if found, otherwise a null pointer.
3849 * 'hash' must be the return value of flow_hash(flow, 0).
3851 * The returned facet might need revalidation; use facet_lookup_valid()
3852 * instead if that is important. */
3853 static struct facet
*
3854 facet_find(struct ofproto_dpif
*ofproto
,
3855 const struct flow
*flow
, uint32_t hash
)
3857 struct facet
*facet
;
3859 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, hash
, &ofproto
->facets
) {
3860 if (flow_equal(flow
, &facet
->flow
)) {
3868 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3869 * Returns it if found, otherwise a null pointer.
3871 * 'hash' must be the return value of flow_hash(flow, 0).
3873 * The returned facet is guaranteed to be valid. */
3874 static struct facet
*
3875 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3878 struct facet
*facet
;
3880 facet
= facet_find(ofproto
, flow
, hash
);
3882 && (ofproto
->need_revalidate
3883 || tag_set_intersects(&ofproto
->revalidate_set
, facet
->tags
))) {
3884 facet_revalidate(facet
);
3891 subfacet_path_to_string(enum subfacet_path path
)
3894 case SF_NOT_INSTALLED
:
3895 return "not installed";
3897 return "in fast path";
3899 return "in slow path";
3905 /* Returns the path in which a subfacet should be installed if its 'slow'
3906 * member has the specified value. */
3907 static enum subfacet_path
3908 subfacet_want_path(enum slow_path_reason slow
)
3910 return slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
3913 /* Returns true if 'subfacet' needs to have its datapath flow updated,
3914 * supposing that its actions have been recalculated as 'want_actions' and that
3915 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
3917 subfacet_should_install(struct subfacet
*subfacet
, enum slow_path_reason slow
,
3918 const struct ofpbuf
*want_actions
)
3920 enum subfacet_path want_path
= subfacet_want_path(slow
);
3921 return (want_path
!= subfacet
->path
3922 || (want_path
== SF_FAST_PATH
3923 && (subfacet
->actions_len
!= want_actions
->size
3924 || memcmp(subfacet
->actions
, want_actions
->data
,
3925 subfacet
->actions_len
))));
3929 facet_check_consistency(struct facet
*facet
)
3931 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
3933 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3935 uint64_t odp_actions_stub
[1024 / 8];
3936 struct ofpbuf odp_actions
;
3938 struct rule_dpif
*rule
;
3939 struct subfacet
*subfacet
;
3940 bool may_log
= false;
3943 /* Check the rule for consistency. */
3944 rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
3945 ok
= rule
== facet
->rule
;
3947 may_log
= !VLOG_DROP_WARN(&rl
);
3952 flow_format(&s
, &facet
->flow
);
3953 ds_put_format(&s
, ": facet associated with wrong rule (was "
3954 "table=%"PRIu8
",", facet
->rule
->up
.table_id
);
3955 cls_rule_format(&facet
->rule
->up
.cr
, &s
);
3956 ds_put_format(&s
, ") (should have been table=%"PRIu8
",",
3958 cls_rule_format(&rule
->up
.cr
, &s
);
3959 ds_put_char(&s
, ')');
3961 VLOG_WARN("%s", ds_cstr(&s
));
3966 /* Check the datapath actions for consistency. */
3967 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
3968 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
3969 enum subfacet_path want_path
;
3970 struct odputil_keybuf keybuf
;
3971 struct action_xlate_ctx ctx
;
3975 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
3976 subfacet
->initial_tci
, rule
, 0, NULL
);
3977 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
3980 if (subfacet
->path
== SF_NOT_INSTALLED
) {
3981 /* This only happens if the datapath reported an error when we
3982 * tried to install the flow. Don't flag another error here. */
3986 want_path
= subfacet_want_path(subfacet
->slow
);
3987 if (want_path
== SF_SLOW_PATH
&& subfacet
->path
== SF_SLOW_PATH
) {
3988 /* The actions for slow-path flows may legitimately vary from one
3989 * packet to the next. We're done. */
3993 if (!subfacet_should_install(subfacet
, subfacet
->slow
, &odp_actions
)) {
3997 /* Inconsistency! */
3999 may_log
= !VLOG_DROP_WARN(&rl
);
4003 /* Rate-limited, skip reporting. */
4008 subfacet_get_key(subfacet
, &keybuf
, &key
);
4009 odp_flow_key_format(key
.data
, key
.size
, &s
);
4011 ds_put_cstr(&s
, ": inconsistency in subfacet");
4012 if (want_path
!= subfacet
->path
) {
4013 enum odp_key_fitness fitness
= subfacet
->key_fitness
;
4015 ds_put_format(&s
, " (%s, fitness=%s)",
4016 subfacet_path_to_string(subfacet
->path
),
4017 odp_key_fitness_to_string(fitness
));
4018 ds_put_format(&s
, " (should have been %s)",
4019 subfacet_path_to_string(want_path
));
4020 } else if (want_path
== SF_FAST_PATH
) {
4021 ds_put_cstr(&s
, " (actions were: ");
4022 format_odp_actions(&s
, subfacet
->actions
,
4023 subfacet
->actions_len
);
4024 ds_put_cstr(&s
, ") (correct actions: ");
4025 format_odp_actions(&s
, odp_actions
.data
, odp_actions
.size
);
4026 ds_put_char(&s
, ')');
4028 ds_put_cstr(&s
, " (actions: ");
4029 format_odp_actions(&s
, subfacet
->actions
,
4030 subfacet
->actions_len
);
4031 ds_put_char(&s
, ')');
4033 VLOG_WARN("%s", ds_cstr(&s
));
4036 ofpbuf_uninit(&odp_actions
);
4041 /* Re-searches the classifier for 'facet':
4043 * - If the rule found is different from 'facet''s current rule, moves
4044 * 'facet' to the new rule and recompiles its actions.
4046 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
4047 * where it is and recompiles its actions anyway. */
4049 facet_revalidate(struct facet
*facet
)
4051 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4053 struct nlattr
*odp_actions
;
4056 struct actions
*new_actions
;
4058 struct action_xlate_ctx ctx
;
4059 uint64_t odp_actions_stub
[1024 / 8];
4060 struct ofpbuf odp_actions
;
4062 struct rule_dpif
*new_rule
;
4063 struct subfacet
*subfacet
;
4066 COVERAGE_INC(facet_revalidate
);
4068 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4070 /* Calculate new datapath actions.
4072 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4073 * emit a NetFlow expiration and, if so, we need to have the old state
4074 * around to properly compose it. */
4076 /* If the datapath actions changed or the installability changed,
4077 * then we need to talk to the datapath. */
4080 memset(&ctx
, 0, sizeof ctx
);
4081 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4082 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4083 enum slow_path_reason slow
;
4085 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4086 subfacet
->initial_tci
, new_rule
, 0, NULL
);
4087 xlate_actions(&ctx
, new_rule
->up
.ofpacts
, new_rule
->up
.ofpacts_len
,
4090 slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4091 if (subfacet_should_install(subfacet
, slow
, &odp_actions
)) {
4092 struct dpif_flow_stats stats
;
4094 subfacet_install(subfacet
,
4095 odp_actions
.data
, odp_actions
.size
, &stats
, slow
);
4096 subfacet_update_stats(subfacet
, &stats
);
4099 new_actions
= xcalloc(list_size(&facet
->subfacets
),
4100 sizeof *new_actions
);
4102 new_actions
[i
].odp_actions
= xmemdup(odp_actions
.data
,
4104 new_actions
[i
].actions_len
= odp_actions
.size
;
4109 ofpbuf_uninit(&odp_actions
);
4112 facet_flush_stats(facet
);
4115 /* Update 'facet' now that we've taken care of all the old state. */
4116 facet
->tags
= ctx
.tags
;
4117 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
4118 facet
->has_learn
= ctx
.has_learn
;
4119 facet
->has_normal
= ctx
.has_normal
;
4120 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
4121 facet
->mirrors
= ctx
.mirrors
;
4124 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4125 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4127 if (new_actions
&& new_actions
[i
].odp_actions
) {
4128 free(subfacet
->actions
);
4129 subfacet
->actions
= new_actions
[i
].odp_actions
;
4130 subfacet
->actions_len
= new_actions
[i
].actions_len
;
4136 if (facet
->rule
!= new_rule
) {
4137 COVERAGE_INC(facet_changed_rule
);
4138 list_remove(&facet
->list_node
);
4139 list_push_back(&new_rule
->facets
, &facet
->list_node
);
4140 facet
->rule
= new_rule
;
4141 facet
->used
= new_rule
->up
.created
;
4142 facet
->prev_used
= facet
->used
;
4146 /* Updates 'facet''s used time. Caller is responsible for calling
4147 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4149 facet_update_time(struct facet
*facet
, long long int used
)
4151 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4152 if (used
> facet
->used
) {
4154 ofproto_rule_update_used(&facet
->rule
->up
, used
);
4155 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, used
);
4160 facet_reset_counters(struct facet
*facet
)
4162 facet
->packet_count
= 0;
4163 facet
->byte_count
= 0;
4164 facet
->prev_packet_count
= 0;
4165 facet
->prev_byte_count
= 0;
4166 facet
->accounted_bytes
= 0;
4170 facet_push_stats(struct facet
*facet
)
4172 struct dpif_flow_stats stats
;
4174 assert(facet
->packet_count
>= facet
->prev_packet_count
);
4175 assert(facet
->byte_count
>= facet
->prev_byte_count
);
4176 assert(facet
->used
>= facet
->prev_used
);
4178 stats
.n_packets
= facet
->packet_count
- facet
->prev_packet_count
;
4179 stats
.n_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
4180 stats
.used
= facet
->used
;
4181 stats
.tcp_flags
= 0;
4183 if (stats
.n_packets
|| stats
.n_bytes
|| facet
->used
> facet
->prev_used
) {
4184 facet
->prev_packet_count
= facet
->packet_count
;
4185 facet
->prev_byte_count
= facet
->byte_count
;
4186 facet
->prev_used
= facet
->used
;
4188 flow_push_stats(facet
->rule
, &facet
->flow
, &stats
);
4190 update_mirror_stats(ofproto_dpif_cast(facet
->rule
->up
.ofproto
),
4191 facet
->mirrors
, stats
.n_packets
, stats
.n_bytes
);
4196 rule_credit_stats(struct rule_dpif
*rule
, const struct dpif_flow_stats
*stats
)
4198 rule
->packet_count
+= stats
->n_packets
;
4199 rule
->byte_count
+= stats
->n_bytes
;
4200 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4203 /* Pushes flow statistics to the rules which 'flow' resubmits into given
4204 * 'rule''s actions and mirrors. */
4206 flow_push_stats(struct rule_dpif
*rule
,
4207 const struct flow
*flow
, const struct dpif_flow_stats
*stats
)
4209 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4210 struct action_xlate_ctx ctx
;
4212 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4214 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, rule
,
4216 ctx
.resubmit_stats
= stats
;
4217 xlate_actions_for_side_effects(&ctx
, rule
->up
.ofpacts
,
4218 rule
->up
.ofpacts_len
);
4223 static struct subfacet
*
4224 subfacet_find__(struct ofproto_dpif
*ofproto
,
4225 const struct nlattr
*key
, size_t key_len
, uint32_t key_hash
,
4226 const struct flow
*flow
)
4228 struct subfacet
*subfacet
;
4230 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
4231 &ofproto
->subfacets
) {
4233 ? (subfacet
->key_len
== key_len
4234 && !memcmp(key
, subfacet
->key
, key_len
))
4235 : flow_equal(flow
, &subfacet
->facet
->flow
)) {
4243 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
4244 * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
4245 * there is one, otherwise creates and returns a new subfacet.
4247 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4248 * which case the caller must populate the actions with
4249 * subfacet_make_actions(). */
4250 static struct subfacet
*
4251 subfacet_create(struct facet
*facet
, enum odp_key_fitness key_fitness
,
4252 const struct nlattr
*key
, size_t key_len
, ovs_be16 initial_tci
)
4254 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4255 uint32_t key_hash
= odp_flow_key_hash(key
, key_len
);
4256 struct subfacet
*subfacet
;
4258 if (list_is_empty(&facet
->subfacets
)) {
4259 subfacet
= &facet
->one_subfacet
;
4261 /* This subfacet should conceptually be created, and have its first
4262 * packet pass through, at the same time that its facet was created.
4263 * If we called time_msec() here, then the subfacet could look
4264 * (occasionally) as though it was used some time after the facet was
4265 * used. That can make a one-packet flow look like it has a nonzero
4266 * duration, which looks odd in e.g. NetFlow statistics. */
4267 subfacet
->used
= facet
->used
;
4269 subfacet
= subfacet_find__(ofproto
, key
, key_len
, key_hash
,
4272 if (subfacet
->facet
== facet
) {
4276 /* This shouldn't happen. */
4277 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
4278 subfacet_destroy(subfacet
);
4281 subfacet
= xmalloc(sizeof *subfacet
);
4282 subfacet
->used
= time_msec();
4285 hmap_insert(&ofproto
->subfacets
, &subfacet
->hmap_node
, key_hash
);
4286 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
4287 subfacet
->facet
= facet
;
4288 subfacet
->key_fitness
= key_fitness
;
4289 if (key_fitness
!= ODP_FIT_PERFECT
) {
4290 subfacet
->key
= xmemdup(key
, key_len
);
4291 subfacet
->key_len
= key_len
;
4293 subfacet
->key
= NULL
;
4294 subfacet
->key_len
= 0;
4296 subfacet
->dp_packet_count
= 0;
4297 subfacet
->dp_byte_count
= 0;
4298 subfacet
->actions_len
= 0;
4299 subfacet
->actions
= NULL
;
4300 subfacet
->slow
= (subfacet
->key_fitness
== ODP_FIT_TOO_LITTLE
4303 subfacet
->path
= SF_NOT_INSTALLED
;
4304 subfacet
->initial_tci
= initial_tci
;
4309 /* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
4310 * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
4311 static struct subfacet
*
4312 subfacet_find(struct ofproto_dpif
*ofproto
,
4313 const struct nlattr
*key
, size_t key_len
)
4315 uint32_t key_hash
= odp_flow_key_hash(key
, key_len
);
4316 enum odp_key_fitness fitness
;
4319 fitness
= odp_flow_key_to_flow(key
, key_len
, &flow
);
4320 if (fitness
== ODP_FIT_ERROR
) {
4324 return subfacet_find__(ofproto
, key
, key_len
, key_hash
, &flow
);
4327 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
4328 * its facet within 'ofproto', and frees it. */
4330 subfacet_destroy__(struct subfacet
*subfacet
)
4332 struct facet
*facet
= subfacet
->facet
;
4333 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4335 subfacet_uninstall(subfacet
);
4336 hmap_remove(&ofproto
->subfacets
, &subfacet
->hmap_node
);
4337 list_remove(&subfacet
->list_node
);
4338 free(subfacet
->key
);
4339 free(subfacet
->actions
);
4340 if (subfacet
!= &facet
->one_subfacet
) {
4345 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
4346 * last remaining subfacet in its facet destroys the facet too. */
4348 subfacet_destroy(struct subfacet
*subfacet
)
4350 struct facet
*facet
= subfacet
->facet
;
4352 if (list_is_singleton(&facet
->subfacets
)) {
4353 /* facet_remove() needs at least one subfacet (it will remove it). */
4354 facet_remove(facet
);
4356 subfacet_destroy__(subfacet
);
4360 /* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
4361 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
4362 * for use as temporary storage. */
4364 subfacet_get_key(struct subfacet
*subfacet
, struct odputil_keybuf
*keybuf
,
4367 if (!subfacet
->key
) {
4368 ofpbuf_use_stack(key
, keybuf
, sizeof *keybuf
);
4369 odp_flow_key_from_flow(key
, &subfacet
->facet
->flow
);
4371 ofpbuf_use_const(key
, subfacet
->key
, subfacet
->key_len
);
4375 /* Composes the datapath actions for 'subfacet' based on its rule's actions.
4376 * Translates the actions into 'odp_actions', which the caller must have
4377 * initialized and is responsible for uninitializing. */
4379 subfacet_make_actions(struct subfacet
*subfacet
, const struct ofpbuf
*packet
,
4380 struct ofpbuf
*odp_actions
)
4382 struct facet
*facet
= subfacet
->facet
;
4383 struct rule_dpif
*rule
= facet
->rule
;
4384 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4386 struct action_xlate_ctx ctx
;
4388 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
, subfacet
->initial_tci
,
4390 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, odp_actions
);
4391 facet
->tags
= ctx
.tags
;
4392 facet
->has_learn
= ctx
.has_learn
;
4393 facet
->has_normal
= ctx
.has_normal
;
4394 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
4395 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
4396 facet
->mirrors
= ctx
.mirrors
;
4398 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4399 if (subfacet
->actions_len
!= odp_actions
->size
4400 || memcmp(subfacet
->actions
, odp_actions
->data
, odp_actions
->size
)) {
4401 free(subfacet
->actions
);
4402 subfacet
->actions_len
= odp_actions
->size
;
4403 subfacet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
4407 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
4408 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
4409 * in the datapath will be zeroed and 'stats' will be updated with traffic new
4410 * since 'subfacet' was last updated.
4412 * Returns 0 if successful, otherwise a positive errno value. */
4414 subfacet_install(struct subfacet
*subfacet
,
4415 const struct nlattr
*actions
, size_t actions_len
,
4416 struct dpif_flow_stats
*stats
,
4417 enum slow_path_reason slow
)
4419 struct facet
*facet
= subfacet
->facet
;
4420 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4421 enum subfacet_path path
= subfacet_want_path(slow
);
4422 uint64_t slow_path_stub
[128 / 8];
4423 struct odputil_keybuf keybuf
;
4424 enum dpif_flow_put_flags flags
;
4428 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
4430 flags
|= DPIF_FP_ZERO_STATS
;
4433 if (path
== SF_SLOW_PATH
) {
4434 compose_slow_path(ofproto
, &facet
->flow
, slow
,
4435 slow_path_stub
, sizeof slow_path_stub
,
4436 &actions
, &actions_len
);
4439 subfacet_get_key(subfacet
, &keybuf
, &key
);
4440 ret
= dpif_flow_put(ofproto
->dpif
, flags
, key
.data
, key
.size
,
4441 actions
, actions_len
, stats
);
4444 subfacet_reset_dp_stats(subfacet
, stats
);
4448 subfacet
->path
= path
;
4454 subfacet_reinstall(struct subfacet
*subfacet
, struct dpif_flow_stats
*stats
)
4456 return subfacet_install(subfacet
, subfacet
->actions
, subfacet
->actions_len
,
4457 stats
, subfacet
->slow
);
4460 /* If 'subfacet' is installed in the datapath, uninstalls it. */
4462 subfacet_uninstall(struct subfacet
*subfacet
)
4464 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
4465 struct rule_dpif
*rule
= subfacet
->facet
->rule
;
4466 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4467 struct odputil_keybuf keybuf
;
4468 struct dpif_flow_stats stats
;
4472 subfacet_get_key(subfacet
, &keybuf
, &key
);
4473 error
= dpif_flow_del(ofproto
->dpif
, key
.data
, key
.size
, &stats
);
4474 subfacet_reset_dp_stats(subfacet
, &stats
);
4476 subfacet_update_stats(subfacet
, &stats
);
4478 subfacet
->path
= SF_NOT_INSTALLED
;
4480 assert(subfacet
->dp_packet_count
== 0);
4481 assert(subfacet
->dp_byte_count
== 0);
4485 /* Resets 'subfacet''s datapath statistics counters. This should be called
4486 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
4487 * non-null, it should contain the statistics returned by dpif when 'subfacet'
4488 * was reset in the datapath. 'stats' will be modified to include only
4489 * statistics new since 'subfacet' was last updated. */
4491 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
4492 struct dpif_flow_stats
*stats
)
4495 && subfacet
->dp_packet_count
<= stats
->n_packets
4496 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
4497 stats
->n_packets
-= subfacet
->dp_packet_count
;
4498 stats
->n_bytes
-= subfacet
->dp_byte_count
;
4501 subfacet
->dp_packet_count
= 0;
4502 subfacet
->dp_byte_count
= 0;
4505 /* Updates 'subfacet''s used time. The caller is responsible for calling
4506 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
4508 subfacet_update_time(struct subfacet
*subfacet
, long long int used
)
4510 if (used
> subfacet
->used
) {
4511 subfacet
->used
= used
;
4512 facet_update_time(subfacet
->facet
, used
);
4516 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
4518 * Because of the meaning of a subfacet's counters, it only makes sense to do
4519 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
4520 * represents a packet that was sent by hand or if it represents statistics
4521 * that have been cleared out of the datapath. */
4523 subfacet_update_stats(struct subfacet
*subfacet
,
4524 const struct dpif_flow_stats
*stats
)
4526 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
4527 struct facet
*facet
= subfacet
->facet
;
4529 subfacet_update_time(subfacet
, stats
->used
);
4530 facet
->packet_count
+= stats
->n_packets
;
4531 facet
->byte_count
+= stats
->n_bytes
;
4532 facet
->tcp_flags
|= stats
->tcp_flags
;
4533 facet_push_stats(facet
);
4534 netflow_flow_update_flags(&facet
->nf_flow
, stats
->tcp_flags
);
4540 static struct rule_dpif
*
4541 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
4543 struct ofport_dpif
*port
;
4544 struct rule_dpif
*rule
;
4546 rule
= rule_dpif_lookup__(ofproto
, flow
, 0);
4551 port
= get_ofp_port(ofproto
, flow
->in_port
);
4553 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
, flow
->in_port
);
4554 return ofproto
->miss_rule
;
4557 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
) {
4558 return ofproto
->no_packet_in_rule
;
4560 return ofproto
->miss_rule
;
4563 static struct rule_dpif
*
4564 rule_dpif_lookup__(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4567 struct cls_rule
*cls_rule
;
4568 struct classifier
*cls
;
4570 if (table_id
>= N_TABLES
) {
4574 cls
= &ofproto
->up
.tables
[table_id
].cls
;
4575 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
4576 && ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
4577 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
4578 * are unavailable. */
4579 struct flow ofpc_normal_flow
= *flow
;
4580 ofpc_normal_flow
.tp_src
= htons(0);
4581 ofpc_normal_flow
.tp_dst
= htons(0);
4582 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
4584 cls_rule
= classifier_lookup(cls
, flow
);
4586 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
4590 complete_operation(struct rule_dpif
*rule
)
4592 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4594 rule_invalidate(rule
);
4596 struct dpif_completion
*c
= xmalloc(sizeof *c
);
4597 c
->op
= rule
->up
.pending
;
4598 list_push_back(&ofproto
->completions
, &c
->list_node
);
4600 ofoperation_complete(rule
->up
.pending
, 0);
4604 static struct rule
*
4607 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
4612 rule_dealloc(struct rule
*rule_
)
4614 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4619 rule_construct(struct rule
*rule_
)
4621 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4622 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4623 struct rule_dpif
*victim
;
4627 error
= ofpacts_check(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
4628 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
4633 rule
->packet_count
= 0;
4634 rule
->byte_count
= 0;
4636 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
4637 if (victim
&& !list_is_empty(&victim
->facets
)) {
4638 struct facet
*facet
;
4640 rule
->facets
= victim
->facets
;
4641 list_moved(&rule
->facets
);
4642 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
4643 /* XXX: We're only clearing our local counters here. It's possible
4644 * that quite a few packets are unaccounted for in the datapath
4645 * statistics. These will be accounted to the new rule instead of
4646 * cleared as required. This could be fixed by clearing out the
4647 * datapath statistics for this facet, but currently it doesn't
4649 facet_reset_counters(facet
);
4653 /* Must avoid list_moved() in this case. */
4654 list_init(&rule
->facets
);
4657 table_id
= rule
->up
.table_id
;
4658 rule
->tag
= (victim
? victim
->tag
4660 : rule_calculate_tag(&rule
->up
.cr
.flow
, &rule
->up
.cr
.wc
,
4661 ofproto
->tables
[table_id
].basis
));
4663 complete_operation(rule
);
4668 rule_destruct(struct rule
*rule_
)
4670 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4671 struct facet
*facet
, *next_facet
;
4673 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
4674 facet_revalidate(facet
);
4677 complete_operation(rule
);
4681 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
4683 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4684 struct facet
*facet
;
4686 /* Start from historical data for 'rule' itself that are no longer tracked
4687 * in facets. This counts, for example, facets that have expired. */
4688 *packets
= rule
->packet_count
;
4689 *bytes
= rule
->byte_count
;
4691 /* Add any statistics that are tracked by facets. This includes
4692 * statistical data recently updated by ofproto_update_stats() as well as
4693 * stats for packets that were executed "by hand" via dpif_execute(). */
4694 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
4695 *packets
+= facet
->packet_count
;
4696 *bytes
+= facet
->byte_count
;
4701 rule_execute(struct rule
*rule_
, const struct flow
*flow
,
4702 struct ofpbuf
*packet
)
4704 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4705 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4707 struct dpif_flow_stats stats
;
4709 struct action_xlate_ctx ctx
;
4710 uint64_t odp_actions_stub
[1024 / 8];
4711 struct ofpbuf odp_actions
;
4713 dpif_flow_stats_extract(flow
, packet
, &stats
);
4714 rule_credit_stats(rule
, &stats
);
4716 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4717 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
,
4718 rule
, stats
.tcp_flags
, packet
);
4719 ctx
.resubmit_stats
= &stats
;
4720 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, &odp_actions
);
4722 execute_odp_actions(ofproto
, flow
, odp_actions
.data
,
4723 odp_actions
.size
, packet
);
4725 ofpbuf_uninit(&odp_actions
);
4731 rule_modify_actions(struct rule
*rule_
)
4733 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
4734 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4737 error
= ofpacts_check(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
4738 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
4740 ofoperation_complete(rule
->up
.pending
, error
);
4744 complete_operation(rule
);
4747 /* Sends 'packet' out 'ofport'.
4748 * May modify 'packet'.
4749 * Returns 0 if successful, otherwise a positive errno value. */
4751 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
4753 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
4754 struct ofpbuf key
, odp_actions
;
4755 struct odputil_keybuf keybuf
;
4760 flow_extract(packet
, 0, 0, 0, &flow
);
4761 odp_port
= vsp_realdev_to_vlandev(ofproto
, ofport
->odp_port
,
4763 if (odp_port
!= ofport
->odp_port
) {
4764 eth_pop_vlan(packet
);
4765 flow
.vlan_tci
= htons(0);
4768 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4769 odp_flow_key_from_flow(&key
, &flow
);
4771 ofpbuf_init(&odp_actions
, 32);
4772 compose_sflow_action(ofproto
, &odp_actions
, &flow
, odp_port
);
4774 nl_msg_put_u32(&odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
4775 error
= dpif_execute(ofproto
->dpif
,
4777 odp_actions
.data
, odp_actions
.size
,
4779 ofpbuf_uninit(&odp_actions
);
4782 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %"PRIu32
" (%s)",
4783 ofproto
->up
.name
, odp_port
, strerror(error
));
4785 ofproto_update_local_port_stats(ofport
->up
.ofproto
, packet
->size
, 0);
4789 /* OpenFlow to datapath action translation. */
4791 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
4792 struct action_xlate_ctx
*);
4793 static void xlate_normal(struct action_xlate_ctx
*);
4795 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
4796 * The action will state 'slow' as the reason that the action is in the slow
4797 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
4798 * dump-flows" output to see why a flow is in the slow path.)
4800 * The 'stub_size' bytes in 'stub' will be used to store the action.
4801 * 'stub_size' must be large enough for the action.
4803 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
4806 compose_slow_path(const struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4807 enum slow_path_reason slow
,
4808 uint64_t *stub
, size_t stub_size
,
4809 const struct nlattr
**actionsp
, size_t *actions_lenp
)
4811 union user_action_cookie cookie
;
4814 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
4815 cookie
.slow_path
.unused
= 0;
4816 cookie
.slow_path
.reason
= slow
;
4818 ofpbuf_use_stack(&buf
, stub
, stub_size
);
4819 if (slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)) {
4820 uint32_t pid
= dpif_port_get_pid(ofproto
->dpif
, UINT16_MAX
);
4821 odp_put_userspace_action(pid
, &cookie
, &buf
);
4823 put_userspace_action(ofproto
, &buf
, flow
, &cookie
);
4825 *actionsp
= buf
.data
;
4826 *actions_lenp
= buf
.size
;
4830 put_userspace_action(const struct ofproto_dpif
*ofproto
,
4831 struct ofpbuf
*odp_actions
,
4832 const struct flow
*flow
,
4833 const union user_action_cookie
*cookie
)
4837 pid
= dpif_port_get_pid(ofproto
->dpif
,
4838 ofp_port_to_odp_port(flow
->in_port
));
4840 return odp_put_userspace_action(pid
, cookie
, odp_actions
);
4844 compose_sflow_cookie(const struct ofproto_dpif
*ofproto
,
4845 ovs_be16 vlan_tci
, uint32_t odp_port
,
4846 unsigned int n_outputs
, union user_action_cookie
*cookie
)
4850 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
4851 cookie
->sflow
.vlan_tci
= vlan_tci
;
4853 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
4854 * port information") for the interpretation of cookie->output. */
4855 switch (n_outputs
) {
4857 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
4858 cookie
->sflow
.output
= 0x40000000 | 256;
4862 ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
4864 cookie
->sflow
.output
= ifindex
;
4869 /* 0x80000000 means "multiple output ports. */
4870 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
4875 /* Compose SAMPLE action for sFlow. */
4877 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
4878 struct ofpbuf
*odp_actions
,
4879 const struct flow
*flow
,
4882 uint32_t probability
;
4883 union user_action_cookie cookie
;
4884 size_t sample_offset
, actions_offset
;
4887 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
4891 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
4893 /* Number of packets out of UINT_MAX to sample. */
4894 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
4895 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
4897 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
4898 compose_sflow_cookie(ofproto
, htons(0), odp_port
,
4899 odp_port
== OVSP_NONE
? 0 : 1, &cookie
);
4900 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, &cookie
);
4902 nl_msg_end_nested(odp_actions
, actions_offset
);
4903 nl_msg_end_nested(odp_actions
, sample_offset
);
4904 return cookie_offset
;
4907 /* SAMPLE action must be first action in any given list of actions.
4908 * At this point we do not have all information required to build it. So try to
4909 * build sample action as complete as possible. */
4911 add_sflow_action(struct action_xlate_ctx
*ctx
)
4913 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
4915 &ctx
->flow
, OVSP_NONE
);
4916 ctx
->sflow_odp_port
= 0;
4917 ctx
->sflow_n_outputs
= 0;
4920 /* Fix SAMPLE action according to data collected while composing ODP actions.
4921 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
4922 * USERSPACE action's user-cookie which is required for sflow. */
4924 fix_sflow_action(struct action_xlate_ctx
*ctx
)
4926 const struct flow
*base
= &ctx
->base_flow
;
4927 union user_action_cookie
*cookie
;
4929 if (!ctx
->user_cookie_offset
) {
4933 cookie
= ofpbuf_at(ctx
->odp_actions
, ctx
->user_cookie_offset
,
4935 assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
4937 compose_sflow_cookie(ctx
->ofproto
, base
->vlan_tci
,
4938 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
4942 compose_output_action__(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
,
4945 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
4946 uint16_t odp_port
= ofp_port_to_odp_port(ofp_port
);
4947 ovs_be16 flow_vlan_tci
= ctx
->flow
.vlan_tci
;
4948 uint8_t flow_nw_tos
= ctx
->flow
.nw_tos
;
4952 struct priority_to_dscp
*pdscp
;
4954 if (ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FWD
4955 || (check_stp
&& !stp_forward_in_state(ofport
->stp_state
))) {
4959 pdscp
= get_priority(ofport
, ctx
->flow
.skb_priority
);
4961 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
4962 ctx
->flow
.nw_tos
|= pdscp
->dscp
;
4965 /* We may not have an ofport record for this port, but it doesn't hurt
4966 * to allow forwarding to it anyhow. Maybe such a port will appear
4967 * later and we're pre-populating the flow table. */
4970 out_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, odp_port
,
4971 ctx
->flow
.vlan_tci
);
4972 if (out_port
!= odp_port
) {
4973 ctx
->flow
.vlan_tci
= htons(0);
4975 commit_odp_actions(&ctx
->flow
, &ctx
->base_flow
, ctx
->odp_actions
);
4976 nl_msg_put_u32(ctx
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
4978 ctx
->sflow_odp_port
= odp_port
;
4979 ctx
->sflow_n_outputs
++;
4980 ctx
->nf_output_iface
= ofp_port
;
4981 ctx
->flow
.vlan_tci
= flow_vlan_tci
;
4982 ctx
->flow
.nw_tos
= flow_nw_tos
;
4986 compose_output_action(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
)
4988 compose_output_action__(ctx
, ofp_port
, true);
4992 xlate_table_action(struct action_xlate_ctx
*ctx
,
4993 uint16_t in_port
, uint8_t table_id
)
4995 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
4996 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
4997 struct rule_dpif
*rule
;
4998 uint16_t old_in_port
;
4999 uint8_t old_table_id
;
5001 old_table_id
= ctx
->table_id
;
5002 ctx
->table_id
= table_id
;
5004 /* Look up a flow with 'in_port' as the input port. */
5005 old_in_port
= ctx
->flow
.in_port
;
5006 ctx
->flow
.in_port
= in_port
;
5007 rule
= rule_dpif_lookup__(ofproto
, &ctx
->flow
, table_id
);
5010 if (table_id
> 0 && table_id
< N_TABLES
) {
5011 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
5012 if (table
->other_table
) {
5013 ctx
->tags
|= (rule
&& rule
->tag
5015 : rule_calculate_tag(&ctx
->flow
,
5016 &table
->other_table
->wc
,
5021 /* Restore the original input port. Otherwise OFPP_NORMAL and
5022 * OFPP_IN_PORT will have surprising behavior. */
5023 ctx
->flow
.in_port
= old_in_port
;
5025 if (ctx
->resubmit_hook
) {
5026 ctx
->resubmit_hook(ctx
, rule
);
5030 struct rule_dpif
*old_rule
= ctx
->rule
;
5032 if (ctx
->resubmit_stats
) {
5033 rule_credit_stats(rule
, ctx
->resubmit_stats
);
5038 do_xlate_actions(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, ctx
);
5039 ctx
->rule
= old_rule
;
5043 ctx
->table_id
= old_table_id
;
5045 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
5047 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
5048 MAX_RESUBMIT_RECURSION
);
5049 ctx
->max_resubmit_trigger
= true;
5054 xlate_ofpact_resubmit(struct action_xlate_ctx
*ctx
,
5055 const struct ofpact_resubmit
*resubmit
)
5060 in_port
= resubmit
->in_port
;
5061 if (in_port
== OFPP_IN_PORT
) {
5062 in_port
= ctx
->flow
.in_port
;
5065 table_id
= resubmit
->table_id
;
5066 if (table_id
== 255) {
5067 table_id
= ctx
->table_id
;
5070 xlate_table_action(ctx
, in_port
, table_id
);
5074 flood_packets(struct action_xlate_ctx
*ctx
, bool all
)
5076 struct ofport_dpif
*ofport
;
5078 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
5079 uint16_t ofp_port
= ofport
->up
.ofp_port
;
5081 if (ofp_port
== ctx
->flow
.in_port
) {
5086 compose_output_action__(ctx
, ofp_port
, false);
5087 } else if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
)) {
5088 compose_output_action(ctx
, ofp_port
);
5092 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
5096 execute_controller_action(struct action_xlate_ctx
*ctx
, int len
,
5097 enum ofp_packet_in_reason reason
,
5098 uint16_t controller_id
)
5100 struct ofputil_packet_in pin
;
5101 struct ofpbuf
*packet
;
5103 ctx
->slow
|= SLOW_CONTROLLER
;
5108 packet
= ofpbuf_clone(ctx
->packet
);
5110 if (packet
->l2
&& packet
->l3
) {
5111 struct eth_header
*eh
;
5113 eth_pop_vlan(packet
);
5116 /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
5117 * LLC frame. Calculating the Ethernet type of these frames is more
5118 * trouble than seems appropriate for a simple assertion. */
5119 assert(ntohs(eh
->eth_type
) < ETH_TYPE_MIN
5120 || eh
->eth_type
== ctx
->flow
.dl_type
);
5122 memcpy(eh
->eth_src
, ctx
->flow
.dl_src
, sizeof eh
->eth_src
);
5123 memcpy(eh
->eth_dst
, ctx
->flow
.dl_dst
, sizeof eh
->eth_dst
);
5125 if (ctx
->flow
.vlan_tci
& htons(VLAN_CFI
)) {
5126 eth_push_vlan(packet
, ctx
->flow
.vlan_tci
);
5130 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
5131 packet_set_ipv4(packet
, ctx
->flow
.nw_src
, ctx
->flow
.nw_dst
,
5132 ctx
->flow
.nw_tos
, ctx
->flow
.nw_ttl
);
5136 if (ctx
->flow
.nw_proto
== IPPROTO_TCP
) {
5137 packet_set_tcp_port(packet
, ctx
->flow
.tp_src
,
5139 } else if (ctx
->flow
.nw_proto
== IPPROTO_UDP
) {
5140 packet_set_udp_port(packet
, ctx
->flow
.tp_src
,
5147 pin
.packet
= packet
->data
;
5148 pin
.packet_len
= packet
->size
;
5149 pin
.reason
= reason
;
5150 pin
.controller_id
= controller_id
;
5151 pin
.table_id
= ctx
->table_id
;
5152 pin
.cookie
= ctx
->rule
? ctx
->rule
->up
.flow_cookie
: 0;
5155 flow_get_metadata(&ctx
->flow
, &pin
.fmd
);
5157 connmgr_send_packet_in(ctx
->ofproto
->up
.connmgr
, &pin
);
5158 ofpbuf_delete(packet
);
5162 compose_dec_ttl(struct action_xlate_ctx
*ctx
)
5164 if (ctx
->flow
.dl_type
!= htons(ETH_TYPE_IP
) &&
5165 ctx
->flow
.dl_type
!= htons(ETH_TYPE_IPV6
)) {
5169 if (ctx
->flow
.nw_ttl
> 1) {
5173 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
5175 /* Stop processing for current table. */
5181 xlate_output_action(struct action_xlate_ctx
*ctx
,
5182 uint16_t port
, uint16_t max_len
)
5184 uint16_t prev_nf_output_iface
= ctx
->nf_output_iface
;
5186 ctx
->nf_output_iface
= NF_OUT_DROP
;
5190 compose_output_action(ctx
, ctx
->flow
.in_port
);
5193 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0);
5199 flood_packets(ctx
, false);
5202 flood_packets(ctx
, true);
5204 case OFPP_CONTROLLER
:
5205 execute_controller_action(ctx
, max_len
, OFPR_ACTION
, 0);
5211 if (port
!= ctx
->flow
.in_port
) {
5212 compose_output_action(ctx
, port
);
5217 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
5218 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
5219 } else if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
5220 ctx
->nf_output_iface
= prev_nf_output_iface
;
5221 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
5222 ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
5223 ctx
->nf_output_iface
= NF_OUT_MULTI
;
5228 xlate_output_reg_action(struct action_xlate_ctx
*ctx
,
5229 const struct ofpact_output_reg
*or)
5231 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->flow
);
5232 if (port
<= UINT16_MAX
) {
5233 xlate_output_action(ctx
, port
, or->max_len
);
5238 xlate_enqueue_action(struct action_xlate_ctx
*ctx
,
5239 const struct ofpact_enqueue
*enqueue
)
5241 uint16_t ofp_port
= enqueue
->port
;
5242 uint32_t queue_id
= enqueue
->queue
;
5243 uint32_t flow_priority
, priority
;
5246 /* Translate queue to priority. */
5247 error
= dpif_queue_to_priority(ctx
->ofproto
->dpif
, queue_id
, &priority
);
5249 /* Fall back to ordinary output action. */
5250 xlate_output_action(ctx
, enqueue
->port
, 0);
5254 /* Check output port. */
5255 if (ofp_port
== OFPP_IN_PORT
) {
5256 ofp_port
= ctx
->flow
.in_port
;
5257 } else if (ofp_port
== ctx
->flow
.in_port
) {
5261 /* Add datapath actions. */
5262 flow_priority
= ctx
->flow
.skb_priority
;
5263 ctx
->flow
.skb_priority
= priority
;
5264 compose_output_action(ctx
, ofp_port
);
5265 ctx
->flow
.skb_priority
= flow_priority
;
5267 /* Update NetFlow output port. */
5268 if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
5269 ctx
->nf_output_iface
= ofp_port
;
5270 } else if (ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
5271 ctx
->nf_output_iface
= NF_OUT_MULTI
;
5276 xlate_set_queue_action(struct action_xlate_ctx
*ctx
, uint32_t queue_id
)
5278 uint32_t skb_priority
;
5280 if (!dpif_queue_to_priority(ctx
->ofproto
->dpif
, queue_id
, &skb_priority
)) {
5281 ctx
->flow
.skb_priority
= skb_priority
;
5283 /* Couldn't translate queue to a priority. Nothing to do. A warning
5284 * has already been logged. */
5288 struct xlate_reg_state
{
5294 xlate_autopath(struct action_xlate_ctx
*ctx
,
5295 const struct ofpact_autopath
*ap
)
5297 uint16_t ofp_port
= ap
->port
;
5298 struct ofport_dpif
*port
= get_ofp_port(ctx
->ofproto
, ofp_port
);
5300 if (!port
|| !port
->bundle
) {
5301 ofp_port
= OFPP_NONE
;
5302 } else if (port
->bundle
->bond
) {
5303 /* Autopath does not support VLAN hashing. */
5304 struct ofport_dpif
*slave
= bond_choose_output_slave(
5305 port
->bundle
->bond
, &ctx
->flow
, 0, &ctx
->tags
);
5307 ofp_port
= slave
->up
.ofp_port
;
5310 nxm_reg_load(&ap
->dst
, ofp_port
, &ctx
->flow
);
5314 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
5316 struct ofproto_dpif
*ofproto
= ofproto_
;
5317 struct ofport_dpif
*port
;
5327 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
5330 port
= get_ofp_port(ofproto
, ofp_port
);
5331 return port
? port
->may_enable
: false;
5336 xlate_bundle_action(struct action_xlate_ctx
*ctx
,
5337 const struct ofpact_bundle
*bundle
)
5341 port
= bundle_execute(bundle
, &ctx
->flow
, slave_enabled_cb
, ctx
->ofproto
);
5342 if (bundle
->dst
.field
) {
5343 nxm_reg_load(&bundle
->dst
, port
, &ctx
->flow
);
5345 xlate_output_action(ctx
, port
, 0);
5350 xlate_learn_action(struct action_xlate_ctx
*ctx
,
5351 const struct ofpact_learn
*learn
)
5353 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
5354 struct ofputil_flow_mod fm
;
5355 uint64_t ofpacts_stub
[1024 / 8];
5356 struct ofpbuf ofpacts
;
5359 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
5360 learn_execute(learn
, &ctx
->flow
, &fm
, &ofpacts
);
5362 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
5363 if (error
&& !VLOG_DROP_WARN(&rl
)) {
5364 VLOG_WARN("learning action failed to modify flow table (%s)",
5365 ofperr_get_name(error
));
5368 ofpbuf_uninit(&ofpacts
);
5371 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
5372 * means "infinite". */
5374 reduce_timeout(uint16_t max
, uint16_t *timeout
)
5376 if (max
&& (!*timeout
|| *timeout
> max
)) {
5382 xlate_fin_timeout(struct action_xlate_ctx
*ctx
,
5383 const struct ofpact_fin_timeout
*oft
)
5385 if (ctx
->tcp_flags
& (TCP_FIN
| TCP_RST
) && ctx
->rule
) {
5386 struct rule_dpif
*rule
= ctx
->rule
;
5388 reduce_timeout(oft
->fin_idle_timeout
, &rule
->up
.idle_timeout
);
5389 reduce_timeout(oft
->fin_hard_timeout
, &rule
->up
.hard_timeout
);
5394 may_receive(const struct ofport_dpif
*port
, struct action_xlate_ctx
*ctx
)
5396 if (port
->up
.pp
.config
& (eth_addr_equals(ctx
->flow
.dl_dst
, eth_addr_stp
)
5397 ? OFPUTIL_PC_NO_RECV_STP
5398 : OFPUTIL_PC_NO_RECV
)) {
5402 /* Only drop packets here if both forwarding and learning are
5403 * disabled. If just learning is enabled, we need to have
5404 * OFPP_NORMAL and the learning action have a look at the packet
5405 * before we can drop it. */
5406 if (!stp_forward_in_state(port
->stp_state
)
5407 && !stp_learn_in_state(port
->stp_state
)) {
5415 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
5416 struct action_xlate_ctx
*ctx
)
5418 const struct ofport_dpif
*port
;
5419 bool was_evictable
= true;
5420 const struct ofpact
*a
;
5422 port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
5423 if (port
&& !may_receive(port
, ctx
)) {
5424 /* Drop this flow. */
5429 /* Don't let the rule we're working on get evicted underneath us. */
5430 was_evictable
= ctx
->rule
->up
.evictable
;
5431 ctx
->rule
->up
.evictable
= false;
5433 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
5434 struct ofpact_controller
*controller
;
5442 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
5443 ofpact_get_OUTPUT(a
)->max_len
);
5446 case OFPACT_CONTROLLER
:
5447 controller
= ofpact_get_CONTROLLER(a
);
5448 execute_controller_action(ctx
, controller
->max_len
,
5450 controller
->controller_id
);
5453 case OFPACT_ENQUEUE
:
5454 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
5457 case OFPACT_SET_VLAN_VID
:
5458 ctx
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
5459 ctx
->flow
.vlan_tci
|= (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
5463 case OFPACT_SET_VLAN_PCP
:
5464 ctx
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
5465 ctx
->flow
.vlan_tci
|= htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
5470 case OFPACT_STRIP_VLAN
:
5471 ctx
->flow
.vlan_tci
= htons(0);
5474 case OFPACT_SET_ETH_SRC
:
5475 memcpy(ctx
->flow
.dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
,
5479 case OFPACT_SET_ETH_DST
:
5480 memcpy(ctx
->flow
.dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
,
5484 case OFPACT_SET_IPV4_SRC
:
5485 ctx
->flow
.nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
5488 case OFPACT_SET_IPV4_DST
:
5489 ctx
->flow
.nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
5492 case OFPACT_SET_IPV4_DSCP
:
5493 /* OpenFlow 1.0 only supports IPv4. */
5494 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
5495 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
5496 ctx
->flow
.nw_tos
|= ofpact_get_SET_IPV4_DSCP(a
)->dscp
;
5500 case OFPACT_SET_L4_SRC_PORT
:
5501 ctx
->flow
.tp_src
= htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
5504 case OFPACT_SET_L4_DST_PORT
:
5505 ctx
->flow
.tp_dst
= htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
5508 case OFPACT_RESUBMIT
:
5509 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
5512 case OFPACT_SET_TUNNEL
:
5513 ctx
->flow
.tun_id
= htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
5516 case OFPACT_SET_QUEUE
:
5517 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
5520 case OFPACT_POP_QUEUE
:
5521 ctx
->flow
.skb_priority
= ctx
->orig_skb_priority
;
5524 case OFPACT_REG_MOVE
:
5525 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), &ctx
->flow
);
5528 case OFPACT_REG_LOAD
:
5529 nxm_execute_reg_load(ofpact_get_REG_LOAD(a
), &ctx
->flow
);
5532 case OFPACT_DEC_TTL
:
5533 if (compose_dec_ttl(ctx
)) {
5539 /* Nothing to do. */
5542 case OFPACT_MULTIPATH
:
5543 multipath_execute(ofpact_get_MULTIPATH(a
), &ctx
->flow
);
5546 case OFPACT_AUTOPATH
:
5547 xlate_autopath(ctx
, ofpact_get_AUTOPATH(a
));
5551 ctx
->ofproto
->has_bundle_action
= true;
5552 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
5555 case OFPACT_OUTPUT_REG
:
5556 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
5560 ctx
->has_learn
= true;
5561 if (ctx
->may_learn
) {
5562 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
5570 case OFPACT_FIN_TIMEOUT
:
5571 ctx
->has_fin_timeout
= true;
5572 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
5578 /* We've let OFPP_NORMAL and the learning action look at the packet,
5579 * so drop it now if forwarding is disabled. */
5580 if (port
&& !stp_forward_in_state(port
->stp_state
)) {
5581 ofpbuf_clear(ctx
->odp_actions
);
5582 add_sflow_action(ctx
);
5585 ctx
->rule
->up
.evictable
= was_evictable
;
5590 action_xlate_ctx_init(struct action_xlate_ctx
*ctx
,
5591 struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5592 ovs_be16 initial_tci
, struct rule_dpif
*rule
,
5593 uint8_t tcp_flags
, const struct ofpbuf
*packet
)
5595 ctx
->ofproto
= ofproto
;
5597 ctx
->base_flow
= ctx
->flow
;
5598 ctx
->base_flow
.tun_id
= 0;
5599 ctx
->base_flow
.vlan_tci
= initial_tci
;
5601 ctx
->packet
= packet
;
5602 ctx
->may_learn
= packet
!= NULL
;
5603 ctx
->tcp_flags
= tcp_flags
;
5604 ctx
->resubmit_hook
= NULL
;
5605 ctx
->report_hook
= NULL
;
5606 ctx
->resubmit_stats
= NULL
;
5609 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
5610 * into datapath actions in 'odp_actions', using 'ctx'. */
5612 xlate_actions(struct action_xlate_ctx
*ctx
,
5613 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
5614 struct ofpbuf
*odp_actions
)
5616 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
5617 * that in the future we always keep a copy of the original flow for
5618 * tracing purposes. */
5619 static bool hit_resubmit_limit
;
5621 enum slow_path_reason special
;
5623 COVERAGE_INC(ofproto_dpif_xlate
);
5625 ofpbuf_clear(odp_actions
);
5626 ofpbuf_reserve(odp_actions
, NL_A_U32_SIZE
);
5628 ctx
->odp_actions
= odp_actions
;
5631 ctx
->has_learn
= false;
5632 ctx
->has_normal
= false;
5633 ctx
->has_fin_timeout
= false;
5634 ctx
->nf_output_iface
= NF_OUT_DROP
;
5637 ctx
->max_resubmit_trigger
= false;
5638 ctx
->orig_skb_priority
= ctx
->flow
.skb_priority
;
5642 if (ctx
->ofproto
->has_mirrors
|| hit_resubmit_limit
) {
5643 /* Do this conditionally because the copy is expensive enough that it
5644 * shows up in profiles.
5646 * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
5647 * believe that I wasn't using it without initializing it if I kept it
5648 * in a local variable. */
5649 ctx
->orig_flow
= ctx
->flow
;
5652 if (ctx
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
5653 switch (ctx
->ofproto
->up
.frag_handling
) {
5654 case OFPC_FRAG_NORMAL
:
5655 /* We must pretend that transport ports are unavailable. */
5656 ctx
->flow
.tp_src
= ctx
->base_flow
.tp_src
= htons(0);
5657 ctx
->flow
.tp_dst
= ctx
->base_flow
.tp_dst
= htons(0);
5660 case OFPC_FRAG_DROP
:
5663 case OFPC_FRAG_REASM
:
5666 case OFPC_FRAG_NX_MATCH
:
5667 /* Nothing to do. */
5670 case OFPC_INVALID_TTL_TO_CONTROLLER
:
5675 special
= process_special(ctx
->ofproto
, &ctx
->flow
, ctx
->packet
);
5677 ctx
->slow
|= special
;
5679 static struct vlog_rate_limit trace_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
5680 ovs_be16 initial_tci
= ctx
->base_flow
.vlan_tci
;
5682 add_sflow_action(ctx
);
5683 do_xlate_actions(ofpacts
, ofpacts_len
, ctx
);
5685 if (ctx
->max_resubmit_trigger
&& !ctx
->resubmit_hook
) {
5686 if (!hit_resubmit_limit
) {
5687 /* We didn't record the original flow. Make sure we do from
5689 hit_resubmit_limit
= true;
5690 } else if (!VLOG_DROP_ERR(&trace_rl
)) {
5691 struct ds ds
= DS_EMPTY_INITIALIZER
;
5693 ofproto_trace(ctx
->ofproto
, &ctx
->orig_flow
, ctx
->packet
,
5695 VLOG_ERR("Trace triggered by excessive resubmit "
5696 "recursion:\n%s", ds_cstr(&ds
));
5701 if (!connmgr_may_set_up_flow(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
5702 ctx
->odp_actions
->data
,
5703 ctx
->odp_actions
->size
)) {
5704 ctx
->slow
|= SLOW_IN_BAND
;
5706 && connmgr_msg_in_hook(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
5708 compose_output_action(ctx
, OFPP_LOCAL
);
5711 if (ctx
->ofproto
->has_mirrors
) {
5712 add_mirror_actions(ctx
, &ctx
->orig_flow
);
5714 fix_sflow_action(ctx
);
5718 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
5719 * into datapath actions, using 'ctx', and discards the datapath actions. */
5721 xlate_actions_for_side_effects(struct action_xlate_ctx
*ctx
,
5722 const struct ofpact
*ofpacts
,
5725 uint64_t odp_actions_stub
[1024 / 8];
5726 struct ofpbuf odp_actions
;
5728 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5729 xlate_actions(ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
5730 ofpbuf_uninit(&odp_actions
);
5734 xlate_report(struct action_xlate_ctx
*ctx
, const char *s
)
5736 if (ctx
->report_hook
) {
5737 ctx
->report_hook(ctx
, s
);
5741 /* OFPP_NORMAL implementation. */
5743 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
5745 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
5746 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
5747 * the bundle on which the packet was received, returns the VLAN to which the
5750 * Both 'vid' and the return value are in the range 0...4095. */
5752 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
5754 switch (in_bundle
->vlan_mode
) {
5755 case PORT_VLAN_ACCESS
:
5756 return in_bundle
->vlan
;
5759 case PORT_VLAN_TRUNK
:
5762 case PORT_VLAN_NATIVE_UNTAGGED
:
5763 case PORT_VLAN_NATIVE_TAGGED
:
5764 return vid
? vid
: in_bundle
->vlan
;
5771 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
5772 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
5775 * 'vid' should be the VID obtained from the 802.1Q header that was received as
5776 * part of a packet (specify 0 if there was no 802.1Q header), in the range
5779 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
5781 /* Allow any VID on the OFPP_NONE port. */
5782 if (in_bundle
== &ofpp_none_bundle
) {
5786 switch (in_bundle
->vlan_mode
) {
5787 case PORT_VLAN_ACCESS
:
5790 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5791 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
5792 "packet received on port %s configured as VLAN "
5793 "%"PRIu16
" access port",
5794 in_bundle
->ofproto
->up
.name
, vid
,
5795 in_bundle
->name
, in_bundle
->vlan
);
5801 case PORT_VLAN_NATIVE_UNTAGGED
:
5802 case PORT_VLAN_NATIVE_TAGGED
:
5804 /* Port must always carry its native VLAN. */
5808 case PORT_VLAN_TRUNK
:
5809 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
5811 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5812 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
5813 "received on port %s not configured for trunking "
5815 in_bundle
->ofproto
->up
.name
, vid
,
5816 in_bundle
->name
, vid
);
5828 /* Given 'vlan', the VLAN that a packet belongs to, and
5829 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
5830 * that should be included in the 802.1Q header. (If the return value is 0,
5831 * then the 802.1Q header should only be included in the packet if there is a
5834 * Both 'vlan' and the return value are in the range 0...4095. */
5836 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
5838 switch (out_bundle
->vlan_mode
) {
5839 case PORT_VLAN_ACCESS
:
5842 case PORT_VLAN_TRUNK
:
5843 case PORT_VLAN_NATIVE_TAGGED
:
5846 case PORT_VLAN_NATIVE_UNTAGGED
:
5847 return vlan
== out_bundle
->vlan
? 0 : vlan
;
5855 output_normal(struct action_xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
5858 struct ofport_dpif
*port
;
5860 ovs_be16 tci
, old_tci
;
5862 vid
= output_vlan_to_vid(out_bundle
, vlan
);
5863 if (!out_bundle
->bond
) {
5864 port
= ofbundle_get_a_port(out_bundle
);
5866 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->flow
,
5869 /* No slaves enabled, so drop packet. */
5874 old_tci
= ctx
->flow
.vlan_tci
;
5876 if (tci
|| out_bundle
->use_priority_tags
) {
5877 tci
|= ctx
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
5879 tci
|= htons(VLAN_CFI
);
5882 ctx
->flow
.vlan_tci
= tci
;
5884 compose_output_action(ctx
, port
->up
.ofp_port
);
5885 ctx
->flow
.vlan_tci
= old_tci
;
5889 mirror_mask_ffs(mirror_mask_t mask
)
5891 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
5896 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
5898 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
5899 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
5903 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
5905 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
5908 /* Returns an arbitrary interface within 'bundle'. */
5909 static struct ofport_dpif
*
5910 ofbundle_get_a_port(const struct ofbundle
*bundle
)
5912 return CONTAINER_OF(list_front(&bundle
->ports
),
5913 struct ofport_dpif
, bundle_node
);
5917 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
5919 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
5923 add_mirror_actions(struct action_xlate_ctx
*ctx
, const struct flow
*orig_flow
)
5925 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
5926 mirror_mask_t mirrors
;
5927 struct ofbundle
*in_bundle
;
5930 const struct nlattr
*a
;
5933 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
5934 ctx
->packet
!= NULL
, NULL
);
5938 mirrors
= in_bundle
->src_mirrors
;
5940 /* Drop frames on bundles reserved for mirroring. */
5941 if (in_bundle
->mirror_out
) {
5942 if (ctx
->packet
!= NULL
) {
5943 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
5944 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
5945 "%s, which is reserved exclusively for mirroring",
5946 ctx
->ofproto
->up
.name
, in_bundle
->name
);
5952 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
5953 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
5956 vlan
= input_vid_to_vlan(in_bundle
, vid
);
5958 /* Look at the output ports to check for destination selections. */
5960 NL_ATTR_FOR_EACH (a
, left
, ctx
->odp_actions
->data
,
5961 ctx
->odp_actions
->size
) {
5962 enum ovs_action_attr type
= nl_attr_type(a
);
5963 struct ofport_dpif
*ofport
;
5965 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
5969 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
5970 if (ofport
&& ofport
->bundle
) {
5971 mirrors
|= ofport
->bundle
->dst_mirrors
;
5979 /* Restore the original packet before adding the mirror actions. */
5980 ctx
->flow
= *orig_flow
;
5985 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
5987 if (!vlan_is_mirrored(m
, vlan
)) {
5988 mirrors
&= mirrors
- 1;
5992 mirrors
&= ~m
->dup_mirrors
;
5993 ctx
->mirrors
|= m
->dup_mirrors
;
5995 output_normal(ctx
, m
->out
, vlan
);
5996 } else if (vlan
!= m
->out_vlan
5997 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
5998 struct ofbundle
*bundle
;
6000 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
6001 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
6002 && !bundle
->mirror_out
) {
6003 output_normal(ctx
, bundle
, m
->out_vlan
);
6011 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
6012 uint64_t packets
, uint64_t bytes
)
6018 for (; mirrors
; mirrors
&= mirrors
- 1) {
6021 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
6024 /* In normal circumstances 'm' will not be NULL. However,
6025 * if mirrors are reconfigured, we can temporarily get out
6026 * of sync in facet_revalidate(). We could "correct" the
6027 * mirror list before reaching here, but doing that would
6028 * not properly account the traffic stats we've currently
6029 * accumulated for previous mirror configuration. */
6033 m
->packet_count
+= packets
;
6034 m
->byte_count
+= bytes
;
6038 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
6039 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
6040 * indicate this; newer upstream kernels use gratuitous ARP requests. */
6042 is_gratuitous_arp(const struct flow
*flow
)
6044 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
6045 && eth_addr_is_broadcast(flow
->dl_dst
)
6046 && (flow
->nw_proto
== ARP_OP_REPLY
6047 || (flow
->nw_proto
== ARP_OP_REQUEST
6048 && flow
->nw_src
== flow
->nw_dst
)));
6052 update_learning_table(struct ofproto_dpif
*ofproto
,
6053 const struct flow
*flow
, int vlan
,
6054 struct ofbundle
*in_bundle
)
6056 struct mac_entry
*mac
;
6058 /* Don't learn the OFPP_NONE port. */
6059 if (in_bundle
== &ofpp_none_bundle
) {
6063 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
6067 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
6068 if (is_gratuitous_arp(flow
)) {
6069 /* We don't want to learn from gratuitous ARP packets that are
6070 * reflected back over bond slaves so we lock the learning table. */
6071 if (!in_bundle
->bond
) {
6072 mac_entry_set_grat_arp_lock(mac
);
6073 } else if (mac_entry_is_grat_arp_locked(mac
)) {
6078 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
6079 /* The log messages here could actually be useful in debugging,
6080 * so keep the rate limit relatively high. */
6081 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
6082 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
6083 "on port %s in VLAN %d",
6084 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
6085 in_bundle
->name
, vlan
);
6087 mac
->port
.p
= in_bundle
;
6088 tag_set_add(&ofproto
->revalidate_set
,
6089 mac_learning_changed(ofproto
->ml
, mac
));
6093 static struct ofbundle
*
6094 lookup_input_bundle(const struct ofproto_dpif
*ofproto
, uint16_t in_port
,
6095 bool warn
, struct ofport_dpif
**in_ofportp
)
6097 struct ofport_dpif
*ofport
;
6099 /* Find the port and bundle for the received packet. */
6100 ofport
= get_ofp_port(ofproto
, in_port
);
6102 *in_ofportp
= ofport
;
6104 if (ofport
&& ofport
->bundle
) {
6105 return ofport
->bundle
;
6108 /* Special-case OFPP_NONE, which a controller may use as the ingress
6109 * port for traffic that it is sourcing. */
6110 if (in_port
== OFPP_NONE
) {
6111 return &ofpp_none_bundle
;
6114 /* Odd. A few possible reasons here:
6116 * - We deleted a port but there are still a few packets queued up
6119 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
6120 * we don't know about.
6122 * - The ofproto client didn't configure the port as part of a bundle.
6123 * This is particularly likely to happen if a packet was received on the
6124 * port after it was created, but before the client had a chance to
6125 * configure its bundle.
6128 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6130 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
6131 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
6136 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
6137 * dropped. Returns true if they may be forwarded, false if they should be
6140 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
6141 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
6143 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
6144 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
6145 * checked by input_vid_is_valid().
6147 * May also add tags to '*tags', although the current implementation only does
6148 * so in one special case.
6151 is_admissible(struct action_xlate_ctx
*ctx
, struct ofport_dpif
*in_port
,
6154 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
6155 struct flow
*flow
= &ctx
->flow
;
6156 struct ofbundle
*in_bundle
= in_port
->bundle
;
6158 /* Drop frames for reserved multicast addresses
6159 * only if forward_bpdu option is absent. */
6160 if (!ofproto
->up
.forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
6161 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
6165 if (in_bundle
->bond
) {
6166 struct mac_entry
*mac
;
6168 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
6169 flow
->dl_dst
, &ctx
->tags
)) {
6174 xlate_report(ctx
, "bonding refused admissibility, dropping");
6177 case BV_DROP_IF_MOVED
:
6178 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
6179 if (mac
&& mac
->port
.p
!= in_bundle
&&
6180 (!is_gratuitous_arp(flow
)
6181 || mac_entry_is_grat_arp_locked(mac
))) {
6182 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
6194 xlate_normal(struct action_xlate_ctx
*ctx
)
6196 struct ofport_dpif
*in_port
;
6197 struct ofbundle
*in_bundle
;
6198 struct mac_entry
*mac
;
6202 ctx
->has_normal
= true;
6204 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->flow
.in_port
,
6205 ctx
->packet
!= NULL
, &in_port
);
6207 xlate_report(ctx
, "no input bundle, dropping");
6211 /* Drop malformed frames. */
6212 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
6213 !(ctx
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
6214 if (ctx
->packet
!= NULL
) {
6215 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6216 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
6217 "VLAN tag received on port %s",
6218 ctx
->ofproto
->up
.name
, in_bundle
->name
);
6220 xlate_report(ctx
, "partial VLAN tag, dropping");
6224 /* Drop frames on bundles reserved for mirroring. */
6225 if (in_bundle
->mirror_out
) {
6226 if (ctx
->packet
!= NULL
) {
6227 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6228 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
6229 "%s, which is reserved exclusively for mirroring",
6230 ctx
->ofproto
->up
.name
, in_bundle
->name
);
6232 xlate_report(ctx
, "input port is mirror output port, dropping");
6237 vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
6238 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
6239 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
6242 vlan
= input_vid_to_vlan(in_bundle
, vid
);
6244 /* Check other admissibility requirements. */
6245 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
6249 /* Learn source MAC. */
6250 if (ctx
->may_learn
) {
6251 update_learning_table(ctx
->ofproto
, &ctx
->flow
, vlan
, in_bundle
);
6254 /* Determine output bundle. */
6255 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->flow
.dl_dst
, vlan
,
6258 if (mac
->port
.p
!= in_bundle
) {
6259 xlate_report(ctx
, "forwarding to learned port");
6260 output_normal(ctx
, mac
->port
.p
, vlan
);
6262 xlate_report(ctx
, "learned port is input port, dropping");
6265 struct ofbundle
*bundle
;
6267 xlate_report(ctx
, "no learned MAC for destination, flooding");
6268 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
6269 if (bundle
!= in_bundle
6270 && ofbundle_includes_vlan(bundle
, vlan
)
6271 && bundle
->floodable
6272 && !bundle
->mirror_out
) {
6273 output_normal(ctx
, bundle
, vlan
);
6276 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
6280 /* Optimized flow revalidation.
6282 * It's a difficult problem, in general, to tell which facets need to have
6283 * their actions recalculated whenever the OpenFlow flow table changes. We
6284 * don't try to solve that general problem: for most kinds of OpenFlow flow
6285 * table changes, we recalculate the actions for every facet. This is
6286 * relatively expensive, but it's good enough if the OpenFlow flow table
6287 * doesn't change very often.
6289 * However, we can expect one particular kind of OpenFlow flow table change to
6290 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
6291 * of CPU on revalidating every facet whenever MAC learning modifies the flow
6292 * table, we add a special case that applies to flow tables in which every rule
6293 * has the same form (that is, the same wildcards), except that the table is
6294 * also allowed to have a single "catch-all" flow that matches all packets. We
6295 * optimize this case by tagging all of the facets that resubmit into the table
6296 * and invalidating the same tag whenever a flow changes in that table. The
6297 * end result is that we revalidate just the facets that need it (and sometimes
6298 * a few more, but not all of the facets or even all of the facets that
6299 * resubmit to the table modified by MAC learning). */
6301 /* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
6302 * into an OpenFlow table with the given 'basis'. */
6304 rule_calculate_tag(const struct flow
*flow
, const struct flow_wildcards
*wc
,
6307 if (flow_wildcards_is_catchall(wc
)) {
6310 struct flow tag_flow
= *flow
;
6311 flow_zero_wildcards(&tag_flow
, wc
);
6312 return tag_create_deterministic(flow_hash(&tag_flow
, secret
));
6316 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
6317 * taggability of that table.
6319 * This function must be called after *each* change to a flow table. If you
6320 * skip calling it on some changes then the pointer comparisons at the end can
6321 * be invalid if you get unlucky. For example, if a flow removal causes a
6322 * cls_table to be destroyed and then a flow insertion causes a cls_table with
6323 * different wildcards to be created with the same address, then this function
6324 * will incorrectly skip revalidation. */
6326 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
6328 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
6329 const struct oftable
*oftable
= &ofproto
->up
.tables
[table_id
];
6330 struct cls_table
*catchall
, *other
;
6331 struct cls_table
*t
;
6333 catchall
= other
= NULL
;
6335 switch (hmap_count(&oftable
->cls
.tables
)) {
6337 /* We could tag this OpenFlow table but it would make the logic a
6338 * little harder and it's a corner case that doesn't seem worth it
6344 HMAP_FOR_EACH (t
, hmap_node
, &oftable
->cls
.tables
) {
6345 if (cls_table_is_catchall(t
)) {
6347 } else if (!other
) {
6350 /* Indicate that we can't tag this by setting both tables to
6351 * NULL. (We know that 'catchall' is already NULL.) */
6358 /* Can't tag this table. */
6362 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
6363 table
->catchall_table
= catchall
;
6364 table
->other_table
= other
;
6365 ofproto
->need_revalidate
= REV_FLOW_TABLE
;
6369 /* Given 'rule' that has changed in some way (either it is a rule being
6370 * inserted, a rule being deleted, or a rule whose actions are being
6371 * modified), marks facets for revalidation to ensure that packets will be
6372 * forwarded correctly according to the new state of the flow table.
6374 * This function must be called after *each* change to a flow table. See
6375 * the comment on table_update_taggable() for more information. */
6377 rule_invalidate(const struct rule_dpif
*rule
)
6379 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
6381 table_update_taggable(ofproto
, rule
->up
.table_id
);
6383 if (!ofproto
->need_revalidate
) {
6384 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
6386 if (table
->other_table
&& rule
->tag
) {
6387 tag_set_add(&ofproto
->revalidate_set
, rule
->tag
);
6389 ofproto
->need_revalidate
= REV_FLOW_TABLE
;
6395 set_frag_handling(struct ofproto
*ofproto_
,
6396 enum ofp_config_flags frag_handling
)
6398 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
6400 if (frag_handling
!= OFPC_FRAG_REASM
) {
6401 ofproto
->need_revalidate
= REV_RECONFIGURE
;
6409 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
6410 const struct flow
*flow
,
6411 const struct ofpact
*ofpacts
, size_t ofpacts_len
)
6413 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
6416 if (flow
->in_port
>= ofproto
->max_ports
&& flow
->in_port
< OFPP_MAX
) {
6417 return OFPERR_NXBRC_BAD_IN_PORT
;
6420 error
= ofpacts_check(ofpacts
, ofpacts_len
, flow
, ofproto
->max_ports
);
6422 struct odputil_keybuf keybuf
;
6423 struct dpif_flow_stats stats
;
6427 struct action_xlate_ctx ctx
;
6428 uint64_t odp_actions_stub
[1024 / 8];
6429 struct ofpbuf odp_actions
;
6431 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
6432 odp_flow_key_from_flow(&key
, flow
);
6434 dpif_flow_stats_extract(flow
, packet
, &stats
);
6436 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, NULL
,
6437 packet_get_tcp_flags(packet
, flow
), packet
);
6438 ctx
.resubmit_stats
= &stats
;
6440 ofpbuf_use_stub(&odp_actions
,
6441 odp_actions_stub
, sizeof odp_actions_stub
);
6442 xlate_actions(&ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
6443 dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
6444 odp_actions
.data
, odp_actions
.size
, packet
);
6445 ofpbuf_uninit(&odp_actions
);
6453 set_netflow(struct ofproto
*ofproto_
,
6454 const struct netflow_options
*netflow_options
)
6456 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
6458 if (netflow_options
) {
6459 if (!ofproto
->netflow
) {
6460 ofproto
->netflow
= netflow_create();
6462 return netflow_set_options(ofproto
->netflow
, netflow_options
);
6464 netflow_destroy(ofproto
->netflow
);
6465 ofproto
->netflow
= NULL
;
6471 get_netflow_ids(const struct ofproto
*ofproto_
,
6472 uint8_t *engine_type
, uint8_t *engine_id
)
6474 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
6476 dpif_get_netflow_ids(ofproto
->dpif
, engine_type
, engine_id
);
6480 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
6482 if (!facet_is_controller_flow(facet
) &&
6483 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
6484 struct subfacet
*subfacet
;
6485 struct ofexpired expired
;
6487 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
6488 if (subfacet
->path
== SF_FAST_PATH
) {
6489 struct dpif_flow_stats stats
;
6491 subfacet_reinstall(subfacet
, &stats
);
6492 subfacet_update_stats(subfacet
, &stats
);
6496 expired
.flow
= facet
->flow
;
6497 expired
.packet_count
= facet
->packet_count
;
6498 expired
.byte_count
= facet
->byte_count
;
6499 expired
.used
= facet
->used
;
6500 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
6505 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
6507 struct facet
*facet
;
6509 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
6510 send_active_timeout(ofproto
, facet
);
6514 static struct ofproto_dpif
*
6515 ofproto_dpif_lookup(const char *name
)
6517 struct ofproto_dpif
*ofproto
;
6519 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
6520 hash_string(name
, 0), &all_ofproto_dpifs
) {
6521 if (!strcmp(ofproto
->up
.name
, name
)) {
6529 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
6530 const char *argv
[], void *aux OVS_UNUSED
)
6532 struct ofproto_dpif
*ofproto
;
6535 ofproto
= ofproto_dpif_lookup(argv
[1]);
6537 unixctl_command_reply_error(conn
, "no such bridge");
6540 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
6542 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
6543 mac_learning_flush(ofproto
->ml
, &ofproto
->revalidate_set
);
6547 unixctl_command_reply(conn
, "table successfully flushed");
6551 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
6552 const char *argv
[], void *aux OVS_UNUSED
)
6554 struct ds ds
= DS_EMPTY_INITIALIZER
;
6555 const struct ofproto_dpif
*ofproto
;
6556 const struct mac_entry
*e
;
6558 ofproto
= ofproto_dpif_lookup(argv
[1]);
6560 unixctl_command_reply_error(conn
, "no such bridge");
6564 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
6565 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
6566 struct ofbundle
*bundle
= e
->port
.p
;
6567 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
6568 ofbundle_get_a_port(bundle
)->odp_port
,
6569 e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
6570 mac_entry_age(ofproto
->ml
, e
));
6572 unixctl_command_reply(conn
, ds_cstr(&ds
));
6577 struct action_xlate_ctx ctx
;
6583 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
6584 const struct rule_dpif
*rule
)
6586 ds_put_char_multiple(result
, '\t', level
);
6588 ds_put_cstr(result
, "No match\n");
6592 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
6593 table_id
, ntohll(rule
->up
.flow_cookie
));
6594 cls_rule_format(&rule
->up
.cr
, result
);
6595 ds_put_char(result
, '\n');
6597 ds_put_char_multiple(result
, '\t', level
);
6598 ds_put_cstr(result
, "OpenFlow ");
6599 ofpacts_format(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, result
);
6600 ds_put_char(result
, '\n');
6604 trace_format_flow(struct ds
*result
, int level
, const char *title
,
6605 struct trace_ctx
*trace
)
6607 ds_put_char_multiple(result
, '\t', level
);
6608 ds_put_format(result
, "%s: ", title
);
6609 if (flow_equal(&trace
->ctx
.flow
, &trace
->flow
)) {
6610 ds_put_cstr(result
, "unchanged");
6612 flow_format(result
, &trace
->ctx
.flow
);
6613 trace
->flow
= trace
->ctx
.flow
;
6615 ds_put_char(result
, '\n');
6619 trace_format_regs(struct ds
*result
, int level
, const char *title
,
6620 struct trace_ctx
*trace
)
6624 ds_put_char_multiple(result
, '\t', level
);
6625 ds_put_format(result
, "%s:", title
);
6626 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
6627 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
6629 ds_put_char(result
, '\n');
6633 trace_format_odp(struct ds
*result
, int level
, const char *title
,
6634 struct trace_ctx
*trace
)
6636 struct ofpbuf
*odp_actions
= trace
->ctx
.odp_actions
;
6638 ds_put_char_multiple(result
, '\t', level
);
6639 ds_put_format(result
, "%s: ", title
);
6640 format_odp_actions(result
, odp_actions
->data
, odp_actions
->size
);
6641 ds_put_char(result
, '\n');
6645 trace_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
6647 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
6648 struct ds
*result
= trace
->result
;
6650 ds_put_char(result
, '\n');
6651 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
6652 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
6653 trace_format_odp(result
, ctx
->recurse
+ 1, "Resubmitted odp", trace
);
6654 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
6658 trace_report(struct action_xlate_ctx
*ctx
, const char *s
)
6660 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
6661 struct ds
*result
= trace
->result
;
6663 ds_put_char_multiple(result
, '\t', ctx
->recurse
);
6664 ds_put_cstr(result
, s
);
6665 ds_put_char(result
, '\n');
6669 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
6670 void *aux OVS_UNUSED
)
6672 const char *dpname
= argv
[1];
6673 struct ofproto_dpif
*ofproto
;
6674 struct ofpbuf odp_key
;
6675 struct ofpbuf
*packet
;
6676 ovs_be16 initial_tci
;
6682 ofpbuf_init(&odp_key
, 0);
6685 ofproto
= ofproto_dpif_lookup(dpname
);
6687 unixctl_command_reply_error(conn
, "Unknown ofproto (use ofproto/list "
6691 if (argc
== 3 || (argc
== 4 && !strcmp(argv
[3], "-generate"))) {
6692 /* ofproto/trace dpname flow [-generate] */
6693 const char *flow_s
= argv
[2];
6694 const char *generate_s
= argv
[3];
6696 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
6697 * flow. We guess which type it is based on whether 'flow_s' contains
6698 * an '(', since a datapath flow always contains '(') but an
6699 * OpenFlow-like flow should not (in fact it's allowed but I believe
6700 * that's not documented anywhere).
6702 * An alternative would be to try to parse 'flow_s' both ways, but then
6703 * it would be tricky giving a sensible error message. After all, do
6704 * you just say "syntax error" or do you present both error messages?
6705 * Both choices seem lousy. */
6706 if (strchr(flow_s
, '(')) {
6709 /* Convert string to datapath key. */
6710 ofpbuf_init(&odp_key
, 0);
6711 error
= odp_flow_key_from_string(flow_s
, NULL
, &odp_key
);
6713 unixctl_command_reply_error(conn
, "Bad flow syntax");
6717 /* Convert odp_key to flow. */
6718 error
= ofproto_dpif_extract_flow_key(ofproto
, odp_key
.data
,
6719 odp_key
.size
, &flow
,
6720 &initial_tci
, NULL
);
6721 if (error
== ODP_FIT_ERROR
) {
6722 unixctl_command_reply_error(conn
, "Invalid flow");
6728 error_s
= parse_ofp_exact_flow(&flow
, argv
[2]);
6730 unixctl_command_reply_error(conn
, error_s
);
6735 initial_tci
= flow
.vlan_tci
;
6736 vsp_adjust_flow(ofproto
, &flow
);
6739 /* Generate a packet, if requested. */
6741 packet
= ofpbuf_new(0);
6742 flow_compose(packet
, &flow
);
6744 } else if (argc
== 6) {
6745 /* ofproto/trace dpname priority tun_id in_port packet */
6746 const char *priority_s
= argv
[2];
6747 const char *tun_id_s
= argv
[3];
6748 const char *in_port_s
= argv
[4];
6749 const char *packet_s
= argv
[5];
6750 uint16_t in_port
= ofp_port_to_odp_port(atoi(in_port_s
));
6751 ovs_be64 tun_id
= htonll(strtoull(tun_id_s
, NULL
, 0));
6752 uint32_t priority
= atoi(priority_s
);
6755 msg
= eth_from_hex(packet_s
, &packet
);
6757 unixctl_command_reply_error(conn
, msg
);
6761 ds_put_cstr(&result
, "Packet: ");
6762 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
6763 ds_put_cstr(&result
, s
);
6766 flow_extract(packet
, priority
, tun_id
, in_port
, &flow
);
6767 initial_tci
= flow
.vlan_tci
;
6769 unixctl_command_reply_error(conn
, "Bad command syntax");
6773 ofproto_trace(ofproto
, &flow
, packet
, initial_tci
, &result
);
6774 unixctl_command_reply(conn
, ds_cstr(&result
));
6777 ds_destroy(&result
);
6778 ofpbuf_delete(packet
);
6779 ofpbuf_uninit(&odp_key
);
6783 ofproto_trace(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
6784 const struct ofpbuf
*packet
, ovs_be16 initial_tci
,
6787 struct rule_dpif
*rule
;
6789 ds_put_cstr(ds
, "Flow: ");
6790 flow_format(ds
, flow
);
6791 ds_put_char(ds
, '\n');
6793 rule
= rule_dpif_lookup(ofproto
, flow
);
6795 trace_format_rule(ds
, 0, 0, rule
);
6796 if (rule
== ofproto
->miss_rule
) {
6797 ds_put_cstr(ds
, "\nNo match, flow generates \"packet in\"s.\n");
6798 } else if (rule
== ofproto
->no_packet_in_rule
) {
6799 ds_put_cstr(ds
, "\nNo match, packets dropped because "
6800 "OFPPC_NO_PACKET_IN is set on in_port.\n");
6804 uint64_t odp_actions_stub
[1024 / 8];
6805 struct ofpbuf odp_actions
;
6807 struct trace_ctx trace
;
6810 tcp_flags
= packet
? packet_get_tcp_flags(packet
, flow
) : 0;
6813 ofpbuf_use_stub(&odp_actions
,
6814 odp_actions_stub
, sizeof odp_actions_stub
);
6815 action_xlate_ctx_init(&trace
.ctx
, ofproto
, flow
, initial_tci
,
6816 rule
, tcp_flags
, packet
);
6817 trace
.ctx
.resubmit_hook
= trace_resubmit
;
6818 trace
.ctx
.report_hook
= trace_report
;
6819 xlate_actions(&trace
.ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
6822 ds_put_char(ds
, '\n');
6823 trace_format_flow(ds
, 0, "Final flow", &trace
);
6824 ds_put_cstr(ds
, "Datapath actions: ");
6825 format_odp_actions(ds
, odp_actions
.data
, odp_actions
.size
);
6826 ofpbuf_uninit(&odp_actions
);
6828 if (trace
.ctx
.slow
) {
6829 enum slow_path_reason slow
;
6831 ds_put_cstr(ds
, "\nThis flow is handled by the userspace "
6832 "slow path because it:");
6833 for (slow
= trace
.ctx
.slow
; slow
; ) {
6834 enum slow_path_reason bit
= rightmost_1bit(slow
);
6838 ds_put_cstr(ds
, "\n\t- Consists of CFM packets.");
6841 ds_put_cstr(ds
, "\n\t- Consists of LACP packets.");
6844 ds_put_cstr(ds
, "\n\t- Consists of STP packets.");
6847 ds_put_cstr(ds
, "\n\t- Needs in-band special case "
6850 ds_put_cstr(ds
, "\n\t (The datapath actions are "
6851 "incomplete--for complete actions, "
6852 "please supply a packet.)");
6855 case SLOW_CONTROLLER
:
6856 ds_put_cstr(ds
, "\n\t- Sends \"packet-in\" messages "
6857 "to the OpenFlow controller.");
6860 ds_put_cstr(ds
, "\n\t- Needs more specific matching "
6861 "than the datapath supports.");
6868 if (slow
& ~SLOW_MATCH
) {
6869 ds_put_cstr(ds
, "\nThe datapath actions above do not reflect "
6870 "the special slow-path processing.");
6877 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
6878 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
6881 unixctl_command_reply(conn
, NULL
);
6885 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
6886 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
6889 unixctl_command_reply(conn
, NULL
);
6892 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
6893 * 'reply' describing the results. */
6895 ofproto_dpif_self_check__(struct ofproto_dpif
*ofproto
, struct ds
*reply
)
6897 struct facet
*facet
;
6901 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
6902 if (!facet_check_consistency(facet
)) {
6907 ofproto
->need_revalidate
= REV_INCONSISTENCY
;
6911 ds_put_format(reply
, "%s: self-check failed (%d errors)\n",
6912 ofproto
->up
.name
, errors
);
6914 ds_put_format(reply
, "%s: self-check passed\n", ofproto
->up
.name
);
6919 ofproto_dpif_self_check(struct unixctl_conn
*conn
,
6920 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
6922 struct ds reply
= DS_EMPTY_INITIALIZER
;
6923 struct ofproto_dpif
*ofproto
;
6926 ofproto
= ofproto_dpif_lookup(argv
[1]);
6928 unixctl_command_reply_error(conn
, "Unknown ofproto (use "
6929 "ofproto/list for help)");
6932 ofproto_dpif_self_check__(ofproto
, &reply
);
6934 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
6935 ofproto_dpif_self_check__(ofproto
, &reply
);
6939 unixctl_command_reply(conn
, ds_cstr(&reply
));
6944 ofproto_dpif_unixctl_init(void)
6946 static bool registered
;
6952 unixctl_command_register(
6954 "bridge {tun_id in_port packet | odp_flow [-generate]}",
6955 2, 5, ofproto_unixctl_trace
, NULL
);
6956 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
6957 ofproto_unixctl_fdb_flush
, NULL
);
6958 unixctl_command_register("fdb/show", "bridge", 1, 1,
6959 ofproto_unixctl_fdb_show
, NULL
);
6960 unixctl_command_register("ofproto/clog", "", 0, 0,
6961 ofproto_dpif_clog
, NULL
);
6962 unixctl_command_register("ofproto/unclog", "", 0, 0,
6963 ofproto_dpif_unclog
, NULL
);
6964 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
6965 ofproto_dpif_self_check
, NULL
);
6968 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
6970 * This is deprecated. It is only for compatibility with broken device drivers
6971 * in old versions of Linux that do not properly support VLANs when VLAN
6972 * devices are not used. When broken device drivers are no longer in
6973 * widespread use, we will delete these interfaces. */
6976 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
6978 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
6979 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
6981 if (realdev_ofp_port
== ofport
->realdev_ofp_port
6982 && vid
== ofport
->vlandev_vid
) {
6986 ofproto
->need_revalidate
= REV_RECONFIGURE
;
6988 if (ofport
->realdev_ofp_port
) {
6991 if (realdev_ofp_port
&& ofport
->bundle
) {
6992 /* vlandevs are enslaved to their realdevs, so they are not allowed to
6993 * themselves be part of a bundle. */
6994 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
6997 ofport
->realdev_ofp_port
= realdev_ofp_port
;
6998 ofport
->vlandev_vid
= vid
;
7000 if (realdev_ofp_port
) {
7001 vsp_add(ofport
, realdev_ofp_port
, vid
);
7008 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
7010 return hash_2words(realdev_ofp_port
, vid
);
7013 /* Returns the ODP port number of the Linux VLAN device that corresponds to
7014 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
7015 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
7016 * it would return the port number of eth0.9.
7018 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
7019 * function just returns its 'realdev_odp_port' argument. */
7021 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
7022 uint32_t realdev_odp_port
, ovs_be16 vlan_tci
)
7024 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
7025 uint16_t realdev_ofp_port
= odp_port_to_ofp_port(realdev_odp_port
);
7026 int vid
= vlan_tci_to_vid(vlan_tci
);
7027 const struct vlan_splinter
*vsp
;
7029 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
7030 hash_realdev_vid(realdev_ofp_port
, vid
),
7031 &ofproto
->realdev_vid_map
) {
7032 if (vsp
->realdev_ofp_port
== realdev_ofp_port
7033 && vsp
->vid
== vid
) {
7034 return ofp_port_to_odp_port(vsp
->vlandev_ofp_port
);
7038 return realdev_odp_port
;
7041 static struct vlan_splinter
*
7042 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
7044 struct vlan_splinter
*vsp
;
7046 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
7047 &ofproto
->vlandev_map
) {
7048 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
7056 /* Returns the OpenFlow port number of the "real" device underlying the Linux
7057 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
7058 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
7059 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
7060 * eth0 and store 9 in '*vid'.
7062 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
7063 * VLAN device. Unless VLAN splinters are enabled, this is what this function
7066 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
7067 uint16_t vlandev_ofp_port
, int *vid
)
7069 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
7070 const struct vlan_splinter
*vsp
;
7072 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
7077 return vsp
->realdev_ofp_port
;
7083 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
7084 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
7085 * 'flow->in_port' to the "real" device backing the VLAN device, sets
7086 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
7087 * always the case unless VLAN splinters are enabled), returns false without
7088 * making any changes. */
7090 vsp_adjust_flow(const struct ofproto_dpif
*ofproto
, struct flow
*flow
)
7095 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
7100 /* Cause the flow to be processed as if it came in on the real device with
7101 * the VLAN device's VLAN ID. */
7102 flow
->in_port
= realdev
;
7103 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
7108 vsp_remove(struct ofport_dpif
*port
)
7110 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
7111 struct vlan_splinter
*vsp
;
7113 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
7115 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
7116 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
7119 port
->realdev_ofp_port
= 0;
7121 VLOG_ERR("missing vlan device record");
7126 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
7128 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
7130 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
7131 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
7132 == realdev_ofp_port
)) {
7133 struct vlan_splinter
*vsp
;
7135 vsp
= xmalloc(sizeof *vsp
);
7136 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
7137 hash_int(port
->up
.ofp_port
, 0));
7138 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
7139 hash_realdev_vid(realdev_ofp_port
, vid
));
7140 vsp
->realdev_ofp_port
= realdev_ofp_port
;
7141 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
7144 port
->realdev_ofp_port
= realdev_ofp_port
;
7146 VLOG_ERR("duplicate vlan device record");
7150 const struct ofproto_class ofproto_dpif_class
= {
7180 port_is_lacp_current
,
7181 NULL
, /* rule_choose_table */
7188 rule_modify_actions
,
7197 get_cfm_remote_mpids
,
7202 get_stp_port_status
,
7209 is_mirror_output_bundle
,
7210 forward_bpdu_changed
,