2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "fail-open.h"
35 #include "mac-learning.h"
36 #include "meta-flow.h"
37 #include "multipath.h"
38 #include "netdev-vport.h"
45 #include "ofp-actions.h"
46 #include "ofp-parse.h"
47 #include "ofp-print.h"
48 #include "ofproto-dpif-governor.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "poll-loop.h"
55 #include "unaligned.h"
57 #include "vlan-bitmap.h"
60 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
62 COVERAGE_DEFINE(ofproto_dpif_expired
);
63 COVERAGE_DEFINE(ofproto_dpif_xlate
);
64 COVERAGE_DEFINE(facet_changed_rule
);
65 COVERAGE_DEFINE(facet_revalidate
);
66 COVERAGE_DEFINE(facet_unexpected
);
67 COVERAGE_DEFINE(facet_suppress
);
69 /* Maximum depth of flow table recursion (due to resubmit actions) in a
70 * flow translation. */
71 #define MAX_RESUBMIT_RECURSION 64
73 /* Number of implemented OpenFlow tables. */
74 enum { N_TABLES
= 255 };
75 enum { TBL_INTERNAL
= N_TABLES
- 1 }; /* Used for internal hidden rules. */
76 BUILD_ASSERT_DECL(N_TABLES
>= 2 && N_TABLES
<= 255);
87 * - Do include packets and bytes from facets that have been deleted or
88 * whose own statistics have been folded into the rule.
90 * - Do include packets and bytes sent "by hand" that were accounted to
91 * the rule without any facet being involved (this is a rare corner
92 * case in rule_execute()).
94 * - Do not include packet or bytes that can be obtained from any facet's
95 * packet_count or byte_count member or that can be obtained from the
96 * datapath by, e.g., dpif_flow_get() for any subfacet.
98 uint64_t packet_count
; /* Number of packets received. */
99 uint64_t byte_count
; /* Number of bytes received. */
101 tag_type tag
; /* Caches rule_calculate_tag() result. */
103 struct list facets
; /* List of "struct facet"s. */
106 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
108 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
111 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
112 const struct flow
*);
113 static struct rule_dpif
*rule_dpif_lookup__(struct ofproto_dpif
*,
116 static struct rule_dpif
*rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
,
117 const struct flow
*flow
);
119 static void rule_credit_stats(struct rule_dpif
*,
120 const struct dpif_flow_stats
*);
121 static void flow_push_stats(struct rule_dpif
*, const struct flow
*,
122 const struct dpif_flow_stats
*);
123 static tag_type
rule_calculate_tag(const struct flow
*,
124 const struct minimask
*, uint32_t basis
);
125 static void rule_invalidate(const struct rule_dpif
*);
127 #define MAX_MIRRORS 32
128 typedef uint32_t mirror_mask_t
;
129 #define MIRROR_MASK_C(X) UINT32_C(X)
130 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
132 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
133 size_t idx
; /* In ofproto's "mirrors" array. */
134 void *aux
; /* Key supplied by ofproto's client. */
135 char *name
; /* Identifier for log messages. */
137 /* Selection criteria. */
138 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
139 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
140 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
142 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
143 struct ofbundle
*out
; /* Output port or NULL. */
144 int out_vlan
; /* Output VLAN or -1. */
145 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
148 int64_t packet_count
; /* Number of packets sent. */
149 int64_t byte_count
; /* Number of bytes sent. */
152 static void mirror_destroy(struct ofmirror
*);
153 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
154 mirror_mask_t mirrors
,
155 uint64_t packets
, uint64_t bytes
);
158 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
159 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
160 void *aux
; /* Key supplied by ofproto's client. */
161 char *name
; /* Identifier for log messages. */
164 struct list ports
; /* Contains "struct ofport"s. */
165 enum port_vlan_mode vlan_mode
; /* VLAN mode */
166 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
167 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
168 * NULL if all VLANs are trunked. */
169 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
170 struct bond
*bond
; /* Nonnull iff more than one port. */
171 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
174 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
176 /* Port mirroring info. */
177 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
178 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
179 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
182 static void bundle_remove(struct ofport
*);
183 static void bundle_update(struct ofbundle
*);
184 static void bundle_destroy(struct ofbundle
*);
185 static void bundle_del_port(struct ofport_dpif
*);
186 static void bundle_run(struct ofbundle
*);
187 static void bundle_wait(struct ofbundle
*);
188 static struct ofbundle
*lookup_input_bundle(const struct ofproto_dpif
*,
189 uint16_t in_port
, bool warn
,
190 struct ofport_dpif
**in_ofportp
);
192 /* A controller may use OFPP_NONE as the ingress port to indicate that
193 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
194 * when an input bundle is needed for validation (e.g., mirroring or
195 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
196 * any 'port' structs, so care must be taken when dealing with it. */
197 static struct ofbundle ofpp_none_bundle
= {
199 .vlan_mode
= PORT_VLAN_TRUNK
202 static void stp_run(struct ofproto_dpif
*ofproto
);
203 static void stp_wait(struct ofproto_dpif
*ofproto
);
204 static int set_stp_port(struct ofport
*,
205 const struct ofproto_port_stp_settings
*);
207 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
209 struct action_xlate_ctx
{
210 /* action_xlate_ctx_init() initializes these members. */
213 struct ofproto_dpif
*ofproto
;
215 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
216 * this flow when actions change header fields. */
219 /* The packet corresponding to 'flow', or a null pointer if we are
220 * revalidating without a packet to refer to. */
221 const struct ofpbuf
*packet
;
223 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
224 * actions update the flow table?
226 * We want to update these tables if we are actually processing a packet,
227 * or if we are accounting for packets that the datapath has processed, but
228 * not if we are just revalidating. */
231 /* The rule that we are currently translating, or NULL. */
232 struct rule_dpif
*rule
;
234 /* Union of the set of TCP flags seen so far in this flow. (Used only by
235 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
239 /* If nonnull, flow translation calls this function just before executing a
240 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
241 * when the recursion depth is exceeded.
243 * 'rule' is the rule being submitted into. It will be null if the
244 * resubmit or OFPP_TABLE action didn't find a matching rule.
246 * This is normally null so the client has to set it manually after
247 * calling action_xlate_ctx_init(). */
248 void (*resubmit_hook
)(struct action_xlate_ctx
*, struct rule_dpif
*rule
);
250 /* If nonnull, flow translation calls this function to report some
251 * significant decision, e.g. to explain why OFPP_NORMAL translation
252 * dropped a packet. */
253 void (*report_hook
)(struct action_xlate_ctx
*, const char *s
);
255 /* If nonnull, flow translation credits the specified statistics to each
256 * rule reached through a resubmit or OFPP_TABLE action.
258 * This is normally null so the client has to set it manually after
259 * calling action_xlate_ctx_init(). */
260 const struct dpif_flow_stats
*resubmit_stats
;
262 /* xlate_actions() initializes and uses these members. The client might want
263 * to look at them after it returns. */
265 struct ofpbuf
*odp_actions
; /* Datapath actions. */
266 tag_type tags
; /* Tags associated with actions. */
267 enum slow_path_reason slow
; /* 0 if fast path may be used. */
268 bool has_learn
; /* Actions include NXAST_LEARN? */
269 bool has_normal
; /* Actions output to OFPP_NORMAL? */
270 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
271 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
272 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
274 /* xlate_actions() initializes and uses these members, but the client has no
275 * reason to look at them. */
277 int recurse
; /* Recursion level, via xlate_table_action. */
278 bool max_resubmit_trigger
; /* Recursed too deeply during translation. */
279 struct flow base_flow
; /* Flow at the last commit. */
280 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
281 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
282 uint32_t sflow_n_outputs
; /* Number of output ports. */
283 uint32_t sflow_odp_port
; /* Output port for composing sFlow action. */
284 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
285 bool exit
; /* No further actions should be processed. */
288 static void action_xlate_ctx_init(struct action_xlate_ctx
*,
289 struct ofproto_dpif
*, const struct flow
*,
290 ovs_be16 initial_tci
, struct rule_dpif
*,
291 uint8_t tcp_flags
, const struct ofpbuf
*);
292 static void xlate_actions(struct action_xlate_ctx
*,
293 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
294 struct ofpbuf
*odp_actions
);
295 static void xlate_actions_for_side_effects(struct action_xlate_ctx
*,
296 const struct ofpact
*ofpacts
,
298 static void xlate_table_action(struct action_xlate_ctx
*, uint16_t in_port
,
299 uint8_t table_id
, bool may_packet_in
);
301 static size_t put_userspace_action(const struct ofproto_dpif
*,
302 struct ofpbuf
*odp_actions
,
304 const union user_action_cookie
*);
306 static void compose_slow_path(const struct ofproto_dpif
*, const struct flow
*,
307 enum slow_path_reason
,
308 uint64_t *stub
, size_t stub_size
,
309 const struct nlattr
**actionsp
,
310 size_t *actions_lenp
);
312 static void xlate_report(struct action_xlate_ctx
*ctx
, const char *s
);
314 /* A subfacet (see "struct subfacet" below) has three possible installation
317 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
318 * case just after the subfacet is created, just before the subfacet is
319 * destroyed, or if the datapath returns an error when we try to install a
322 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
324 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
325 * ofproto_dpif is installed in the datapath.
328 SF_NOT_INSTALLED
, /* No datapath flow for this subfacet. */
329 SF_FAST_PATH
, /* Full actions are installed. */
330 SF_SLOW_PATH
, /* Send-to-userspace action is installed. */
333 static const char *subfacet_path_to_string(enum subfacet_path
);
335 /* A dpif flow and actions associated with a facet.
337 * See also the large comment on struct facet. */
340 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
341 struct list list_node
; /* In struct facet's 'facets' list. */
342 struct facet
*facet
; /* Owning facet. */
344 enum odp_key_fitness key_fitness
;
348 long long int used
; /* Time last used; time created if not used. */
350 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
351 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
355 * These should be essentially identical for every subfacet in a facet, but
356 * may differ in trivial ways due to VLAN splinters. */
357 size_t actions_len
; /* Number of bytes in actions[]. */
358 struct nlattr
*actions
; /* Datapath actions. */
360 enum slow_path_reason slow
; /* 0 if fast path may be used. */
361 enum subfacet_path path
; /* Installed in datapath? */
363 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
364 * splinters can cause it to differ. This value should be removed when
365 * the VLAN splinters feature is no longer needed. */
366 ovs_be16 initial_tci
; /* Initial VLAN TCI value. */
368 /* Datapath port the packet arrived on. This is needed to remove
369 * flows for ports that are no longer part of the bridge. Since the
370 * flow definition only has the OpenFlow port number and the port is
371 * no longer part of the bridge, we can't determine the datapath port
372 * number needed to delete the flow from the datapath. */
373 uint32_t odp_in_port
;
376 #define SUBFACET_DESTROY_MAX_BATCH 50
378 static struct subfacet
*subfacet_create(struct facet
*, struct flow_miss
*miss
,
380 static struct subfacet
*subfacet_find(struct ofproto_dpif
*,
381 const struct nlattr
*key
, size_t key_len
,
383 static void subfacet_destroy(struct subfacet
*);
384 static void subfacet_destroy__(struct subfacet
*);
385 static void subfacet_destroy_batch(struct ofproto_dpif
*,
386 struct subfacet
**, int n
);
387 static void subfacet_reset_dp_stats(struct subfacet
*,
388 struct dpif_flow_stats
*);
389 static void subfacet_update_time(struct subfacet
*, long long int used
);
390 static void subfacet_update_stats(struct subfacet
*,
391 const struct dpif_flow_stats
*);
392 static void subfacet_make_actions(struct subfacet
*,
393 const struct ofpbuf
*packet
,
394 struct ofpbuf
*odp_actions
);
395 static int subfacet_install(struct subfacet
*,
396 const struct nlattr
*actions
, size_t actions_len
,
397 struct dpif_flow_stats
*, enum slow_path_reason
);
398 static void subfacet_uninstall(struct subfacet
*);
400 static enum subfacet_path
subfacet_want_path(enum slow_path_reason
);
402 /* An exact-match instantiation of an OpenFlow flow.
404 * A facet associates a "struct flow", which represents the Open vSwitch
405 * userspace idea of an exact-match flow, with one or more subfacets. Each
406 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
407 * the facet. When the kernel module (or other dpif implementation) and Open
408 * vSwitch userspace agree on the definition of a flow key, there is exactly
409 * one subfacet per facet. If the dpif implementation supports more-specific
410 * flow matching than userspace, however, a facet can have more than one
411 * subfacet, each of which corresponds to some distinction in flow that
412 * userspace simply doesn't understand.
414 * Flow expiration works in terms of subfacets, so a facet must have at least
415 * one subfacet or it will never expire, leaking memory. */
418 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
419 struct list list_node
; /* In owning rule's 'facets' list. */
420 struct rule_dpif
*rule
; /* Owning rule. */
423 struct list subfacets
;
424 long long int used
; /* Time last used; time created if not used. */
431 * - Do include packets and bytes sent "by hand", e.g. with
434 * - Do include packets and bytes that were obtained from the datapath
435 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
436 * DPIF_FP_ZERO_STATS).
438 * - Do not include packets or bytes that can be obtained from the
439 * datapath for any existing subfacet.
441 uint64_t packet_count
; /* Number of packets received. */
442 uint64_t byte_count
; /* Number of bytes received. */
444 /* Resubmit statistics. */
445 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
446 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
447 long long int prev_used
; /* Used time from last stats push. */
450 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
451 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
452 uint8_t tcp_flags
; /* TCP flags seen for this 'rule'. */
454 /* Properties of datapath actions.
456 * Every subfacet has its own actions because actions can differ slightly
457 * between splintered and non-splintered subfacets due to the VLAN tag
458 * being initially different (present vs. absent). All of them have these
459 * properties in common so we just store one copy of them here. */
460 bool has_learn
; /* Actions include NXAST_LEARN? */
461 bool has_normal
; /* Actions output to OFPP_NORMAL? */
462 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
463 tag_type tags
; /* Tags that would require revalidation. */
464 mirror_mask_t mirrors
; /* Bitmap of dependent mirrors. */
466 /* Storage for a single subfacet, to reduce malloc() time and space
467 * overhead. (A facet always has at least one subfacet and in the common
468 * case has exactly one subfacet.) */
469 struct subfacet one_subfacet
;
472 static struct facet
*facet_create(struct rule_dpif
*,
473 const struct flow
*, uint32_t hash
);
474 static void facet_remove(struct facet
*);
475 static void facet_free(struct facet
*);
477 static struct facet
*facet_find(struct ofproto_dpif
*,
478 const struct flow
*, uint32_t hash
);
479 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
480 const struct flow
*, uint32_t hash
);
481 static void facet_revalidate(struct facet
*);
482 static bool facet_check_consistency(struct facet
*);
484 static void facet_flush_stats(struct facet
*);
486 static void facet_update_time(struct facet
*, long long int used
);
487 static void facet_reset_counters(struct facet
*);
488 static void facet_push_stats(struct facet
*);
489 static void facet_learn(struct facet
*);
490 static void facet_account(struct facet
*);
492 static bool facet_is_controller_flow(struct facet
*);
495 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
499 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
500 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
501 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
502 tag_type tag
; /* Tag associated with this port. */
503 bool may_enable
; /* May be enabled in bonds. */
504 long long int carrier_seq
; /* Carrier status changes. */
505 struct tnl_port
*tnl_port
; /* Tunnel handle, or null. */
508 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
509 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
510 long long int stp_state_entered
;
512 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
514 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
516 * This is deprecated. It is only for compatibility with broken device
517 * drivers in old versions of Linux that do not properly support VLANs when
518 * VLAN devices are not used. When broken device drivers are no longer in
519 * widespread use, we will delete these interfaces. */
520 uint16_t realdev_ofp_port
;
524 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
525 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
526 * traffic egressing the 'ofport' with that priority should be marked with. */
527 struct priority_to_dscp
{
528 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
529 uint32_t priority
; /* Priority of this queue (see struct flow). */
531 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
534 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
536 * This is deprecated. It is only for compatibility with broken device drivers
537 * in old versions of Linux that do not properly support VLANs when VLAN
538 * devices are not used. When broken device drivers are no longer in
539 * widespread use, we will delete these interfaces. */
540 struct vlan_splinter
{
541 struct hmap_node realdev_vid_node
;
542 struct hmap_node vlandev_node
;
543 uint16_t realdev_ofp_port
;
544 uint16_t vlandev_ofp_port
;
548 static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
549 uint32_t realdev
, ovs_be16 vlan_tci
);
550 static bool vsp_adjust_flow(const struct ofproto_dpif
*, struct flow
*);
551 static void vsp_remove(struct ofport_dpif
*);
552 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
554 static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif
*,
556 static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif
*,
559 static struct ofport_dpif
*
560 ofport_dpif_cast(const struct ofport
*ofport
)
562 ovs_assert(ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
);
563 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
566 static void port_run(struct ofport_dpif
*);
567 static void port_run_fast(struct ofport_dpif
*);
568 static void port_wait(struct ofport_dpif
*);
569 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
570 static void ofport_clear_priorities(struct ofport_dpif
*);
572 struct dpif_completion
{
573 struct list list_node
;
574 struct ofoperation
*op
;
577 /* Extra information about a classifier table.
578 * Currently used just for optimized flow revalidation. */
580 /* If either of these is nonnull, then this table has a form that allows
581 * flows to be tagged to avoid revalidating most flows for the most common
582 * kinds of flow table changes. */
583 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
584 struct cls_table
*other_table
; /* Table with any other wildcard set. */
585 uint32_t basis
; /* Keeps each table's tags separate. */
588 /* Reasons that we might need to revalidate every facet, and corresponding
591 * A value of 0 means that there is no need to revalidate.
593 * It would be nice to have some cleaner way to integrate with coverage
594 * counters, but with only a few reasons I guess this is good enough for
596 enum revalidate_reason
{
597 REV_RECONFIGURE
= 1, /* Switch configuration changed. */
598 REV_STP
, /* Spanning tree protocol port status change. */
599 REV_PORT_TOGGLED
, /* Port enabled or disabled by CFM, LACP, ...*/
600 REV_FLOW_TABLE
, /* Flow table changed. */
601 REV_INCONSISTENCY
/* Facet self-check failed. */
603 COVERAGE_DEFINE(rev_reconfigure
);
604 COVERAGE_DEFINE(rev_stp
);
605 COVERAGE_DEFINE(rev_port_toggled
);
606 COVERAGE_DEFINE(rev_flow_table
);
607 COVERAGE_DEFINE(rev_inconsistency
);
609 /* Drop keys are odp flow keys which have drop flows installed in the kernel.
610 * These are datapath flows which have no associated ofproto, if they did we
611 * would use facets. */
613 struct hmap_node hmap_node
;
618 /* All datapaths of a given type share a single dpif backer instance. */
623 struct timer next_expiration
;
624 struct hmap odp_to_ofport_map
; /* ODP port to ofport mapping. */
626 struct simap tnl_backers
; /* Set of dpif ports backing tunnels. */
628 /* Facet revalidation flags applying to facets which use this backer. */
629 enum revalidate_reason need_revalidate
; /* Revalidate every facet. */
630 struct tag_set revalidate_set
; /* Revalidate only matching facets. */
632 struct hmap drop_keys
; /* Set of dropped odp keys. */
635 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
636 static struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
638 static void drop_key_clear(struct dpif_backer
*);
639 static struct ofport_dpif
*
640 odp_port_to_ofport(const struct dpif_backer
*, uint32_t odp_port
);
642 struct ofproto_dpif
{
643 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
645 struct dpif_backer
*backer
;
647 /* Special OpenFlow rules. */
648 struct rule_dpif
*miss_rule
; /* Sends flow table misses to controller. */
649 struct rule_dpif
*no_packet_in_rule
; /* Drops flow table misses. */
655 struct netflow
*netflow
;
656 struct dpif_sflow
*sflow
;
657 struct hmap bundles
; /* Contains "struct ofbundle"s. */
658 struct mac_learning
*ml
;
659 struct ofmirror
*mirrors
[MAX_MIRRORS
];
661 bool has_bonded_bundles
;
665 struct hmap subfacets
;
666 struct governor
*governor
;
669 struct table_dpif tables
[N_TABLES
];
671 /* Support for debugging async flow mods. */
672 struct list completions
;
674 bool has_bundle_action
; /* True when the first bundle action appears. */
675 struct netdev_stats stats
; /* To account packets generated and consumed in
680 long long int stp_last_tick
;
682 /* VLAN splinters. */
683 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
684 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
687 struct sset ports
; /* Set of standard port names. */
688 struct sset ghost_ports
; /* Ports with no datapath port. */
689 struct sset port_poll_set
; /* Queued names for port_poll() reply. */
690 int port_poll_errno
; /* Last errno for port_poll() reply. */
693 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
694 * for debugging the asynchronous flow_mod implementation.) */
697 /* All existing ofproto_dpif instances, indexed by ->up.name. */
698 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
700 static void ofproto_dpif_unixctl_init(void);
702 static struct ofproto_dpif
*
703 ofproto_dpif_cast(const struct ofproto
*ofproto
)
705 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
706 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
709 static struct ofport_dpif
*get_ofp_port(const struct ofproto_dpif
*,
711 static struct ofport_dpif
*get_odp_port(const struct ofproto_dpif
*,
713 static void ofproto_trace(struct ofproto_dpif
*, const struct flow
*,
714 const struct ofpbuf
*, ovs_be16 initial_tci
,
717 /* Packet processing. */
718 static void update_learning_table(struct ofproto_dpif
*,
719 const struct flow
*, int vlan
,
722 #define FLOW_MISS_MAX_BATCH 50
723 static int handle_upcalls(struct dpif_backer
*, unsigned int max_batch
);
725 /* Flow expiration. */
726 static int expire(struct dpif_backer
*);
729 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
732 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
733 static size_t compose_sflow_action(const struct ofproto_dpif
*,
734 struct ofpbuf
*odp_actions
,
735 const struct flow
*, uint32_t odp_port
);
736 static void add_mirror_actions(struct action_xlate_ctx
*ctx
,
737 const struct flow
*flow
);
738 /* Global variables. */
739 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
741 /* Initial mappings of port to bridge mappings. */
742 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
744 /* Factory functions. */
747 init(const struct shash
*iface_hints
)
749 struct shash_node
*node
;
751 /* Make a local copy, since we don't own 'iface_hints' elements. */
752 SHASH_FOR_EACH(node
, iface_hints
) {
753 const struct iface_hint
*orig_hint
= node
->data
;
754 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
756 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
757 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
758 new_hint
->ofp_port
= orig_hint
->ofp_port
;
760 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
765 enumerate_types(struct sset
*types
)
767 dp_enumerate_types(types
);
771 enumerate_names(const char *type
, struct sset
*names
)
773 struct ofproto_dpif
*ofproto
;
776 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
777 if (strcmp(type
, ofproto
->up
.type
)) {
780 sset_add(names
, ofproto
->up
.name
);
787 del(const char *type
, const char *name
)
792 error
= dpif_open(name
, type
, &dpif
);
794 error
= dpif_delete(dpif
);
801 port_open_type(const char *datapath_type
, const char *port_type
)
803 return dpif_port_open_type(datapath_type
, port_type
);
806 /* Type functions. */
808 static struct ofproto_dpif
*
809 lookup_ofproto_dpif_by_port_name(const char *name
)
811 struct ofproto_dpif
*ofproto
;
813 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
814 if (sset_contains(&ofproto
->ports
, name
)) {
823 type_run(const char *type
)
825 struct dpif_backer
*backer
;
829 backer
= shash_find_data(&all_dpif_backers
, type
);
831 /* This is not necessarily a problem, since backers are only
832 * created on demand. */
836 dpif_run(backer
->dpif
);
838 if (backer
->need_revalidate
839 || !tag_set_is_empty(&backer
->revalidate_set
)) {
840 struct tag_set revalidate_set
= backer
->revalidate_set
;
841 bool need_revalidate
= backer
->need_revalidate
;
842 struct ofproto_dpif
*ofproto
;
843 struct simap_node
*node
;
844 struct simap tmp_backers
;
846 /* Handle tunnel garbage collection. */
847 simap_init(&tmp_backers
);
848 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
850 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
851 struct ofport_dpif
*iter
;
853 if (backer
!= ofproto
->backer
) {
857 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
860 if (!iter
->tnl_port
) {
864 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
);
865 node
= simap_find(&tmp_backers
, dp_port
);
867 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
868 simap_delete(&tmp_backers
, node
);
869 node
= simap_find(&backer
->tnl_backers
, dp_port
);
871 node
= simap_find(&backer
->tnl_backers
, dp_port
);
873 uint32_t odp_port
= UINT32_MAX
;
875 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
877 simap_put(&backer
->tnl_backers
, dp_port
, odp_port
);
878 node
= simap_find(&backer
->tnl_backers
, dp_port
);
883 iter
->odp_port
= node
? node
->data
: OVSP_NONE
;
884 if (tnl_port_reconfigure(&iter
->up
, iter
->odp_port
,
886 backer
->need_revalidate
= REV_RECONFIGURE
;
891 SIMAP_FOR_EACH (node
, &tmp_backers
) {
892 dpif_port_del(backer
->dpif
, node
->data
);
894 simap_destroy(&tmp_backers
);
896 switch (backer
->need_revalidate
) {
897 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
898 case REV_STP
: COVERAGE_INC(rev_stp
); break;
899 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
900 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
901 case REV_INCONSISTENCY
: COVERAGE_INC(rev_inconsistency
); break;
904 if (backer
->need_revalidate
) {
905 /* Clear the drop_keys in case we should now be accepting some
906 * formerly dropped flows. */
907 drop_key_clear(backer
);
910 /* Clear the revalidation flags. */
911 tag_set_init(&backer
->revalidate_set
);
912 backer
->need_revalidate
= 0;
914 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
915 struct facet
*facet
, *next
;
917 if (ofproto
->backer
!= backer
) {
921 HMAP_FOR_EACH_SAFE (facet
, next
, hmap_node
, &ofproto
->facets
) {
923 || tag_set_intersects(&revalidate_set
, facet
->tags
)) {
924 facet_revalidate(facet
);
930 if (timer_expired(&backer
->next_expiration
)) {
931 int delay
= expire(backer
);
932 timer_set_duration(&backer
->next_expiration
, delay
);
935 /* Check for port changes in the dpif. */
936 while ((error
= dpif_port_poll(backer
->dpif
, &devname
)) == 0) {
937 struct ofproto_dpif
*ofproto
;
938 struct dpif_port port
;
940 /* Don't report on the datapath's device. */
941 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
945 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
946 &all_ofproto_dpifs
) {
947 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
952 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
953 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
954 /* The port was removed. If we know the datapath,
955 * report it through poll_set(). If we don't, it may be
956 * notifying us of a removal we initiated, so ignore it.
957 * If there's a pending ENOBUFS, let it stand, since
958 * everything will be reevaluated. */
959 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
960 sset_add(&ofproto
->port_poll_set
, devname
);
961 ofproto
->port_poll_errno
= 0;
963 } else if (!ofproto
) {
964 /* The port was added, but we don't know with which
965 * ofproto we should associate it. Delete it. */
966 dpif_port_del(backer
->dpif
, port
.port_no
);
968 dpif_port_destroy(&port
);
974 if (error
!= EAGAIN
) {
975 struct ofproto_dpif
*ofproto
;
977 /* There was some sort of error, so propagate it to all
978 * ofprotos that use this backer. */
979 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
980 &all_ofproto_dpifs
) {
981 if (ofproto
->backer
== backer
) {
982 sset_clear(&ofproto
->port_poll_set
);
983 ofproto
->port_poll_errno
= error
;
992 type_run_fast(const char *type
)
994 struct dpif_backer
*backer
;
997 backer
= shash_find_data(&all_dpif_backers
, type
);
999 /* This is not necessarily a problem, since backers are only
1000 * created on demand. */
1004 /* Handle one or more batches of upcalls, until there's nothing left to do
1005 * or until we do a fixed total amount of work.
1007 * We do work in batches because it can be much cheaper to set up a number
1008 * of flows and fire off their patches all at once. We do multiple batches
1009 * because in some cases handling a packet can cause another packet to be
1010 * queued almost immediately as part of the return flow. Both
1011 * optimizations can make major improvements on some benchmarks and
1012 * presumably for real traffic as well. */
1014 while (work
< FLOW_MISS_MAX_BATCH
) {
1015 int retval
= handle_upcalls(backer
, FLOW_MISS_MAX_BATCH
- work
);
1026 type_wait(const char *type
)
1028 struct dpif_backer
*backer
;
1030 backer
= shash_find_data(&all_dpif_backers
, type
);
1032 /* This is not necessarily a problem, since backers are only
1033 * created on demand. */
1037 timer_wait(&backer
->next_expiration
);
1040 /* Basic life-cycle. */
1042 static int add_internal_flows(struct ofproto_dpif
*);
1044 static struct ofproto
*
1047 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
1048 return &ofproto
->up
;
1052 dealloc(struct ofproto
*ofproto_
)
1054 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1059 close_dpif_backer(struct dpif_backer
*backer
)
1061 struct shash_node
*node
;
1063 ovs_assert(backer
->refcount
> 0);
1065 if (--backer
->refcount
) {
1069 drop_key_clear(backer
);
1070 hmap_destroy(&backer
->drop_keys
);
1072 simap_destroy(&backer
->tnl_backers
);
1073 hmap_destroy(&backer
->odp_to_ofport_map
);
1074 node
= shash_find(&all_dpif_backers
, backer
->type
);
1076 shash_delete(&all_dpif_backers
, node
);
1077 dpif_close(backer
->dpif
);
1082 /* Datapath port slated for removal from datapath. */
1083 struct odp_garbage
{
1084 struct list list_node
;
1089 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
1091 struct dpif_backer
*backer
;
1092 struct dpif_port_dump port_dump
;
1093 struct dpif_port port
;
1094 struct shash_node
*node
;
1095 struct list garbage_list
;
1096 struct odp_garbage
*garbage
, *next
;
1102 backer
= shash_find_data(&all_dpif_backers
, type
);
1109 backer_name
= xasprintf("ovs-%s", type
);
1111 /* Remove any existing datapaths, since we assume we're the only
1112 * userspace controlling the datapath. */
1114 dp_enumerate_names(type
, &names
);
1115 SSET_FOR_EACH(name
, &names
) {
1116 struct dpif
*old_dpif
;
1118 /* Don't remove our backer if it exists. */
1119 if (!strcmp(name
, backer_name
)) {
1123 if (dpif_open(name
, type
, &old_dpif
)) {
1124 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
1126 dpif_delete(old_dpif
);
1127 dpif_close(old_dpif
);
1130 sset_destroy(&names
);
1132 backer
= xmalloc(sizeof *backer
);
1134 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
1137 VLOG_ERR("failed to open datapath of type %s: %s", type
,
1143 backer
->type
= xstrdup(type
);
1144 backer
->refcount
= 1;
1145 hmap_init(&backer
->odp_to_ofport_map
);
1146 hmap_init(&backer
->drop_keys
);
1147 timer_set_duration(&backer
->next_expiration
, 1000);
1148 backer
->need_revalidate
= 0;
1149 simap_init(&backer
->tnl_backers
);
1150 tag_set_init(&backer
->revalidate_set
);
1153 dpif_flow_flush(backer
->dpif
);
1155 /* Loop through the ports already on the datapath and remove any
1156 * that we don't need anymore. */
1157 list_init(&garbage_list
);
1158 dpif_port_dump_start(&port_dump
, backer
->dpif
);
1159 while (dpif_port_dump_next(&port_dump
, &port
)) {
1160 node
= shash_find(&init_ofp_ports
, port
.name
);
1161 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
1162 garbage
= xmalloc(sizeof *garbage
);
1163 garbage
->odp_port
= port
.port_no
;
1164 list_push_front(&garbage_list
, &garbage
->list_node
);
1167 dpif_port_dump_done(&port_dump
);
1169 LIST_FOR_EACH_SAFE (garbage
, next
, list_node
, &garbage_list
) {
1170 dpif_port_del(backer
->dpif
, garbage
->odp_port
);
1171 list_remove(&garbage
->list_node
);
1175 shash_add(&all_dpif_backers
, type
, backer
);
1177 error
= dpif_recv_set(backer
->dpif
, true);
1179 VLOG_ERR("failed to listen on datapath of type %s: %s",
1180 type
, strerror(error
));
1181 close_dpif_backer(backer
);
1189 construct(struct ofproto
*ofproto_
)
1191 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1192 struct shash_node
*node
, *next
;
1197 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1202 max_ports
= dpif_get_max_ports(ofproto
->backer
->dpif
);
1203 ofproto_init_max_ports(ofproto_
, MIN(max_ports
, OFPP_MAX
));
1205 ofproto
->n_matches
= 0;
1207 ofproto
->netflow
= NULL
;
1208 ofproto
->sflow
= NULL
;
1209 ofproto
->stp
= NULL
;
1210 hmap_init(&ofproto
->bundles
);
1211 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1212 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1213 ofproto
->mirrors
[i
] = NULL
;
1215 ofproto
->has_bonded_bundles
= false;
1217 hmap_init(&ofproto
->facets
);
1218 hmap_init(&ofproto
->subfacets
);
1219 ofproto
->governor
= NULL
;
1221 for (i
= 0; i
< N_TABLES
; i
++) {
1222 struct table_dpif
*table
= &ofproto
->tables
[i
];
1224 table
->catchall_table
= NULL
;
1225 table
->other_table
= NULL
;
1226 table
->basis
= random_uint32();
1229 list_init(&ofproto
->completions
);
1231 ofproto_dpif_unixctl_init();
1233 ofproto
->has_mirrors
= false;
1234 ofproto
->has_bundle_action
= false;
1236 hmap_init(&ofproto
->vlandev_map
);
1237 hmap_init(&ofproto
->realdev_vid_map
);
1239 sset_init(&ofproto
->ports
);
1240 sset_init(&ofproto
->ghost_ports
);
1241 sset_init(&ofproto
->port_poll_set
);
1242 ofproto
->port_poll_errno
= 0;
1244 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1245 struct iface_hint
*iface_hint
= node
->data
;
1247 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1248 /* Check if the datapath already has this port. */
1249 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1250 sset_add(&ofproto
->ports
, node
->name
);
1253 free(iface_hint
->br_name
);
1254 free(iface_hint
->br_type
);
1256 shash_delete(&init_ofp_ports
, node
);
1260 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
1261 hash_string(ofproto
->up
.name
, 0));
1262 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1264 ofproto_init_tables(ofproto_
, N_TABLES
);
1265 error
= add_internal_flows(ofproto
);
1266 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1272 add_internal_flow(struct ofproto_dpif
*ofproto
, int id
,
1273 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1275 struct ofputil_flow_mod fm
;
1278 match_init_catchall(&fm
.match
);
1280 match_set_reg(&fm
.match
, 0, id
);
1281 fm
.new_cookie
= htonll(0);
1282 fm
.cookie
= htonll(0);
1283 fm
.cookie_mask
= htonll(0);
1284 fm
.table_id
= TBL_INTERNAL
;
1285 fm
.command
= OFPFC_ADD
;
1286 fm
.idle_timeout
= 0;
1287 fm
.hard_timeout
= 0;
1291 fm
.ofpacts
= ofpacts
->data
;
1292 fm
.ofpacts_len
= ofpacts
->size
;
1294 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
1296 VLOG_ERR_RL(&rl
, "failed to add internal flow %d (%s)",
1297 id
, ofperr_to_string(error
));
1301 *rulep
= rule_dpif_lookup__(ofproto
, &fm
.match
.flow
, TBL_INTERNAL
);
1302 ovs_assert(*rulep
!= NULL
);
1308 add_internal_flows(struct ofproto_dpif
*ofproto
)
1310 struct ofpact_controller
*controller
;
1311 uint64_t ofpacts_stub
[128 / 8];
1312 struct ofpbuf ofpacts
;
1316 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1319 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1320 controller
->max_len
= UINT16_MAX
;
1321 controller
->controller_id
= 0;
1322 controller
->reason
= OFPR_NO_MATCH
;
1323 ofpact_pad(&ofpacts
);
1325 error
= add_internal_flow(ofproto
, id
++, &ofpacts
, &ofproto
->miss_rule
);
1330 ofpbuf_clear(&ofpacts
);
1331 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1332 &ofproto
->no_packet_in_rule
);
1337 complete_operations(struct ofproto_dpif
*ofproto
)
1339 struct dpif_completion
*c
, *next
;
1341 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
1342 ofoperation_complete(c
->op
, 0);
1343 list_remove(&c
->list_node
);
1349 destruct(struct ofproto
*ofproto_
)
1351 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1352 struct rule_dpif
*rule
, *next_rule
;
1353 struct oftable
*table
;
1356 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
1357 complete_operations(ofproto
);
1359 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1360 struct cls_cursor cursor
;
1362 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
1363 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
1364 ofproto_rule_destroy(&rule
->up
);
1368 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1369 mirror_destroy(ofproto
->mirrors
[i
]);
1372 netflow_destroy(ofproto
->netflow
);
1373 dpif_sflow_destroy(ofproto
->sflow
);
1374 hmap_destroy(&ofproto
->bundles
);
1375 mac_learning_destroy(ofproto
->ml
);
1377 hmap_destroy(&ofproto
->facets
);
1378 hmap_destroy(&ofproto
->subfacets
);
1379 governor_destroy(ofproto
->governor
);
1381 hmap_destroy(&ofproto
->vlandev_map
);
1382 hmap_destroy(&ofproto
->realdev_vid_map
);
1384 sset_destroy(&ofproto
->ports
);
1385 sset_destroy(&ofproto
->ghost_ports
);
1386 sset_destroy(&ofproto
->port_poll_set
);
1388 close_dpif_backer(ofproto
->backer
);
1392 run_fast(struct ofproto
*ofproto_
)
1394 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1395 struct ofport_dpif
*ofport
;
1397 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1398 port_run_fast(ofport
);
1405 run(struct ofproto
*ofproto_
)
1407 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1408 struct ofport_dpif
*ofport
;
1409 struct ofbundle
*bundle
;
1413 complete_operations(ofproto
);
1416 error
= run_fast(ofproto_
);
1421 if (ofproto
->netflow
) {
1422 if (netflow_run(ofproto
->netflow
)) {
1423 send_netflow_active_timeouts(ofproto
);
1426 if (ofproto
->sflow
) {
1427 dpif_sflow_run(ofproto
->sflow
);
1430 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1433 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1438 mac_learning_run(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
1440 /* Check the consistency of a random facet, to aid debugging. */
1441 if (!hmap_is_empty(&ofproto
->facets
)
1442 && !ofproto
->backer
->need_revalidate
) {
1443 struct facet
*facet
;
1445 facet
= CONTAINER_OF(hmap_random_node(&ofproto
->facets
),
1446 struct facet
, hmap_node
);
1447 if (!tag_set_intersects(&ofproto
->backer
->revalidate_set
,
1449 if (!facet_check_consistency(facet
)) {
1450 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
1455 if (ofproto
->governor
) {
1458 governor_run(ofproto
->governor
);
1460 /* If the governor has shrunk to its minimum size and the number of
1461 * subfacets has dwindled, then drop the governor entirely.
1463 * For hysteresis, the number of subfacets to drop the governor is
1464 * smaller than the number needed to trigger its creation. */
1465 n_subfacets
= hmap_count(&ofproto
->subfacets
);
1466 if (n_subfacets
* 4 < ofproto
->up
.flow_eviction_threshold
1467 && governor_is_idle(ofproto
->governor
)) {
1468 governor_destroy(ofproto
->governor
);
1469 ofproto
->governor
= NULL
;
1477 wait(struct ofproto
*ofproto_
)
1479 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1480 struct ofport_dpif
*ofport
;
1481 struct ofbundle
*bundle
;
1483 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
1484 poll_immediate_wake();
1487 dpif_wait(ofproto
->backer
->dpif
);
1488 dpif_recv_wait(ofproto
->backer
->dpif
);
1489 if (ofproto
->sflow
) {
1490 dpif_sflow_wait(ofproto
->sflow
);
1492 if (!tag_set_is_empty(&ofproto
->backer
->revalidate_set
)) {
1493 poll_immediate_wake();
1495 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1498 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1499 bundle_wait(bundle
);
1501 if (ofproto
->netflow
) {
1502 netflow_wait(ofproto
->netflow
);
1504 mac_learning_wait(ofproto
->ml
);
1506 if (ofproto
->backer
->need_revalidate
) {
1507 /* Shouldn't happen, but if it does just go around again. */
1508 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
1509 poll_immediate_wake();
1511 if (ofproto
->governor
) {
1512 governor_wait(ofproto
->governor
);
1517 get_memory_usage(const struct ofproto
*ofproto_
, struct simap
*usage
)
1519 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1521 simap_increase(usage
, "facets", hmap_count(&ofproto
->facets
));
1522 simap_increase(usage
, "subfacets", hmap_count(&ofproto
->subfacets
));
1526 flush(struct ofproto
*ofproto_
)
1528 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1529 struct subfacet
*subfacet
, *next_subfacet
;
1530 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
1534 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
1535 &ofproto
->subfacets
) {
1536 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
1537 batch
[n_batch
++] = subfacet
;
1538 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
1539 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1543 subfacet_destroy(subfacet
);
1548 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
1553 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
1554 bool *arp_match_ip
, enum ofputil_action_bitmap
*actions
)
1556 *arp_match_ip
= true;
1557 *actions
= (OFPUTIL_A_OUTPUT
|
1558 OFPUTIL_A_SET_VLAN_VID
|
1559 OFPUTIL_A_SET_VLAN_PCP
|
1560 OFPUTIL_A_STRIP_VLAN
|
1561 OFPUTIL_A_SET_DL_SRC
|
1562 OFPUTIL_A_SET_DL_DST
|
1563 OFPUTIL_A_SET_NW_SRC
|
1564 OFPUTIL_A_SET_NW_DST
|
1565 OFPUTIL_A_SET_NW_TOS
|
1566 OFPUTIL_A_SET_TP_SRC
|
1567 OFPUTIL_A_SET_TP_DST
|
1572 get_tables(struct ofproto
*ofproto_
, struct ofp12_table_stats
*ots
)
1574 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1575 struct dpif_dp_stats s
;
1577 strcpy(ots
->name
, "classifier");
1579 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
1581 ots
->lookup_count
= htonll(s
.n_hit
+ s
.n_missed
);
1582 ots
->matched_count
= htonll(s
.n_hit
+ ofproto
->n_matches
);
1585 static struct ofport
*
1588 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
1593 port_dealloc(struct ofport
*port_
)
1595 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1600 port_construct(struct ofport
*port_
)
1602 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1603 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1604 const struct netdev
*netdev
= port
->up
.netdev
;
1605 struct dpif_port dpif_port
;
1608 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1609 port
->bundle
= NULL
;
1611 port
->tag
= tag_create_random();
1612 port
->may_enable
= true;
1613 port
->stp_port
= NULL
;
1614 port
->stp_state
= STP_DISABLED
;
1615 port
->tnl_port
= NULL
;
1616 hmap_init(&port
->priorities
);
1617 port
->realdev_ofp_port
= 0;
1618 port
->vlandev_vid
= 0;
1619 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1621 if (netdev_vport_is_patch(netdev
)) {
1622 /* XXX By bailing out here, we don't do required sFlow work. */
1623 port
->odp_port
= OVSP_NONE
;
1627 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
1628 netdev_vport_get_dpif_port(netdev
),
1634 port
->odp_port
= dpif_port
.port_no
;
1636 if (netdev_get_tunnel_config(netdev
)) {
1637 port
->tnl_port
= tnl_port_add(&port
->up
, port
->odp_port
);
1639 /* Sanity-check that a mapping doesn't already exist. This
1640 * shouldn't happen for non-tunnel ports. */
1641 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1642 VLOG_ERR("port %s already has an OpenFlow port number",
1644 dpif_port_destroy(&dpif_port
);
1648 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1649 hash_int(port
->odp_port
, 0));
1651 dpif_port_destroy(&dpif_port
);
1653 if (ofproto
->sflow
) {
1654 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1661 port_destruct(struct ofport
*port_
)
1663 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1664 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1665 const char *dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
);
1666 const char *devname
= netdev_get_name(port
->up
.netdev
);
1668 if (dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1669 /* The underlying device is still there, so delete it. This
1670 * happens when the ofproto is being destroyed, since the caller
1671 * assumes that removal of attached ports will happen as part of
1673 if (!port
->tnl_port
) {
1674 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
);
1676 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1679 if (port
->odp_port
!= OVSP_NONE
&& !port
->tnl_port
) {
1680 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1683 tnl_port_del(port
->tnl_port
);
1684 sset_find_and_delete(&ofproto
->ports
, devname
);
1685 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1686 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1687 bundle_remove(port_
);
1688 set_cfm(port_
, NULL
);
1689 if (ofproto
->sflow
) {
1690 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1693 ofport_clear_priorities(port
);
1694 hmap_destroy(&port
->priorities
);
1698 port_modified(struct ofport
*port_
)
1700 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1702 if (port
->bundle
&& port
->bundle
->bond
) {
1703 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
1708 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1710 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1711 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1712 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1714 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1715 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1716 OFPUTIL_PC_NO_PACKET_IN
)) {
1717 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1719 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1720 bundle_update(port
->bundle
);
1726 set_sflow(struct ofproto
*ofproto_
,
1727 const struct ofproto_sflow_options
*sflow_options
)
1729 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1730 struct dpif_sflow
*ds
= ofproto
->sflow
;
1732 if (sflow_options
) {
1734 struct ofport_dpif
*ofport
;
1736 ds
= ofproto
->sflow
= dpif_sflow_create();
1737 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1738 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
1740 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1742 dpif_sflow_set_options(ds
, sflow_options
);
1745 dpif_sflow_destroy(ds
);
1746 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1747 ofproto
->sflow
= NULL
;
1754 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
1756 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1763 struct ofproto_dpif
*ofproto
;
1765 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1766 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1767 ofport
->cfm
= cfm_create(netdev_get_name(ofport
->up
.netdev
));
1770 if (cfm_configure(ofport
->cfm
, s
)) {
1776 cfm_destroy(ofport
->cfm
);
1782 get_cfm_status(const struct ofport
*ofport_
,
1783 struct ofproto_cfm_status
*status
)
1785 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1788 status
->faults
= cfm_get_fault(ofport
->cfm
);
1789 status
->remote_opstate
= cfm_get_opup(ofport
->cfm
);
1790 status
->health
= cfm_get_health(ofport
->cfm
);
1791 cfm_get_remote_mpids(ofport
->cfm
, &status
->rmps
, &status
->n_rmps
);
1798 /* Spanning Tree. */
1801 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
1803 struct ofproto_dpif
*ofproto
= ofproto_
;
1804 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
1805 struct ofport_dpif
*ofport
;
1807 ofport
= stp_port_get_aux(sp
);
1809 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
1810 ofproto
->up
.name
, port_num
);
1812 struct eth_header
*eth
= pkt
->l2
;
1814 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
1815 if (eth_addr_is_zero(eth
->eth_src
)) {
1816 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
1817 "with unknown MAC", ofproto
->up
.name
, port_num
);
1819 send_packet(ofport
, pkt
);
1825 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
1827 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
1829 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1831 /* Only revalidate flows if the configuration changed. */
1832 if (!s
!= !ofproto
->stp
) {
1833 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1837 if (!ofproto
->stp
) {
1838 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
1839 send_bpdu_cb
, ofproto
);
1840 ofproto
->stp_last_tick
= time_msec();
1843 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
1844 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
1845 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
1846 stp_set_max_age(ofproto
->stp
, s
->max_age
);
1847 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
1849 struct ofport
*ofport
;
1851 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
1852 set_stp_port(ofport
, NULL
);
1855 stp_destroy(ofproto
->stp
);
1856 ofproto
->stp
= NULL
;
1863 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
1865 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1869 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
1870 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
1871 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
1880 update_stp_port_state(struct ofport_dpif
*ofport
)
1882 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1883 enum stp_state state
;
1885 /* Figure out new state. */
1886 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
1890 if (ofport
->stp_state
!= state
) {
1891 enum ofputil_port_state of_state
;
1894 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
1895 netdev_get_name(ofport
->up
.netdev
),
1896 stp_state_name(ofport
->stp_state
),
1897 stp_state_name(state
));
1898 if (stp_learn_in_state(ofport
->stp_state
)
1899 != stp_learn_in_state(state
)) {
1900 /* xxx Learning action flows should also be flushed. */
1901 mac_learning_flush(ofproto
->ml
,
1902 &ofproto
->backer
->revalidate_set
);
1904 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
1905 != stp_forward_in_state(state
);
1907 ofproto
->backer
->need_revalidate
= REV_STP
;
1908 ofport
->stp_state
= state
;
1909 ofport
->stp_state_entered
= time_msec();
1911 if (fwd_change
&& ofport
->bundle
) {
1912 bundle_update(ofport
->bundle
);
1915 /* Update the STP state bits in the OpenFlow port description. */
1916 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
1917 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
1918 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
1919 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
1920 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
1922 ofproto_port_set_state(&ofport
->up
, of_state
);
1926 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1927 * caller is responsible for assigning STP port numbers and ensuring
1928 * there are no duplicates. */
1930 set_stp_port(struct ofport
*ofport_
,
1931 const struct ofproto_port_stp_settings
*s
)
1933 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1934 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1935 struct stp_port
*sp
= ofport
->stp_port
;
1937 if (!s
|| !s
->enable
) {
1939 ofport
->stp_port
= NULL
;
1940 stp_port_disable(sp
);
1941 update_stp_port_state(ofport
);
1944 } else if (sp
&& stp_port_no(sp
) != s
->port_num
1945 && ofport
== stp_port_get_aux(sp
)) {
1946 /* The port-id changed, so disable the old one if it's not
1947 * already in use by another port. */
1948 stp_port_disable(sp
);
1951 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
1952 stp_port_enable(sp
);
1954 stp_port_set_aux(sp
, ofport
);
1955 stp_port_set_priority(sp
, s
->priority
);
1956 stp_port_set_path_cost(sp
, s
->path_cost
);
1958 update_stp_port_state(ofport
);
1964 get_stp_port_status(struct ofport
*ofport_
,
1965 struct ofproto_port_stp_status
*s
)
1967 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1968 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1969 struct stp_port
*sp
= ofport
->stp_port
;
1971 if (!ofproto
->stp
|| !sp
) {
1977 s
->port_id
= stp_port_get_id(sp
);
1978 s
->state
= stp_port_get_state(sp
);
1979 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
1980 s
->role
= stp_port_get_role(sp
);
1981 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
1987 stp_run(struct ofproto_dpif
*ofproto
)
1990 long long int now
= time_msec();
1991 long long int elapsed
= now
- ofproto
->stp_last_tick
;
1992 struct stp_port
*sp
;
1995 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
1996 ofproto
->stp_last_tick
= now
;
1998 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
1999 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2002 update_stp_port_state(ofport
);
2006 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2007 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2013 stp_wait(struct ofproto_dpif
*ofproto
)
2016 poll_timer_wait(1000);
2020 /* Returns true if STP should process 'flow'. */
2022 stp_should_process_flow(const struct flow
*flow
)
2024 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
2028 stp_process_packet(const struct ofport_dpif
*ofport
,
2029 const struct ofpbuf
*packet
)
2031 struct ofpbuf payload
= *packet
;
2032 struct eth_header
*eth
= payload
.data
;
2033 struct stp_port
*sp
= ofport
->stp_port
;
2035 /* Sink packets on ports that have STP disabled when the bridge has
2037 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
2041 /* Trim off padding on payload. */
2042 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
2043 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
2046 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
2047 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
2051 static struct priority_to_dscp
*
2052 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
2054 struct priority_to_dscp
*pdscp
;
2057 hash
= hash_int(priority
, 0);
2058 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
2059 if (pdscp
->priority
== priority
) {
2067 ofport_clear_priorities(struct ofport_dpif
*ofport
)
2069 struct priority_to_dscp
*pdscp
, *next
;
2071 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
2072 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2078 set_queues(struct ofport
*ofport_
,
2079 const struct ofproto_port_queue
*qdscp_list
,
2082 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2083 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2084 struct hmap
new = HMAP_INITIALIZER(&new);
2087 for (i
= 0; i
< n_qdscp
; i
++) {
2088 struct priority_to_dscp
*pdscp
;
2092 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
2093 if (dpif_queue_to_priority(ofproto
->backer
->dpif
, qdscp_list
[i
].queue
,
2098 pdscp
= get_priority(ofport
, priority
);
2100 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2102 pdscp
= xmalloc(sizeof *pdscp
);
2103 pdscp
->priority
= priority
;
2105 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2108 if (pdscp
->dscp
!= dscp
) {
2110 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2113 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
2116 if (!hmap_is_empty(&ofport
->priorities
)) {
2117 ofport_clear_priorities(ofport
);
2118 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2121 hmap_swap(&new, &ofport
->priorities
);
2129 /* Expires all MAC learning entries associated with 'bundle' and forces its
2130 * ofproto to revalidate every flow.
2132 * Normally MAC learning entries are removed only from the ofproto associated
2133 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2134 * are removed from every ofproto. When patch ports and SLB bonds are in use
2135 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2136 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2137 * with the host from which it migrated. */
2139 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2141 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2142 struct mac_learning
*ml
= ofproto
->ml
;
2143 struct mac_entry
*mac
, *next_mac
;
2145 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2146 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2147 if (mac
->port
.p
== bundle
) {
2149 struct ofproto_dpif
*o
;
2151 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2153 struct mac_entry
*e
;
2155 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
2158 mac_learning_expire(o
->ml
, e
);
2164 mac_learning_expire(ml
, mac
);
2169 static struct ofbundle
*
2170 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2172 struct ofbundle
*bundle
;
2174 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2175 &ofproto
->bundles
) {
2176 if (bundle
->aux
== aux
) {
2183 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2184 * ones that are found to 'bundles'. */
2186 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
2187 void **auxes
, size_t n_auxes
,
2188 struct hmapx
*bundles
)
2192 hmapx_init(bundles
);
2193 for (i
= 0; i
< n_auxes
; i
++) {
2194 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
2196 hmapx_add(bundles
, bundle
);
2202 bundle_update(struct ofbundle
*bundle
)
2204 struct ofport_dpif
*port
;
2206 bundle
->floodable
= true;
2207 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2208 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2209 || !stp_forward_in_state(port
->stp_state
)) {
2210 bundle
->floodable
= false;
2217 bundle_del_port(struct ofport_dpif
*port
)
2219 struct ofbundle
*bundle
= port
->bundle
;
2221 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2223 list_remove(&port
->bundle_node
);
2224 port
->bundle
= NULL
;
2227 lacp_slave_unregister(bundle
->lacp
, port
);
2230 bond_slave_unregister(bundle
->bond
, port
);
2233 bundle_update(bundle
);
2237 bundle_add_port(struct ofbundle
*bundle
, uint32_t ofp_port
,
2238 struct lacp_slave_settings
*lacp
)
2240 struct ofport_dpif
*port
;
2242 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
2247 if (port
->bundle
!= bundle
) {
2248 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2250 bundle_del_port(port
);
2253 port
->bundle
= bundle
;
2254 list_push_back(&bundle
->ports
, &port
->bundle_node
);
2255 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2256 || !stp_forward_in_state(port
->stp_state
)) {
2257 bundle
->floodable
= false;
2261 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2262 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2269 bundle_destroy(struct ofbundle
*bundle
)
2271 struct ofproto_dpif
*ofproto
;
2272 struct ofport_dpif
*port
, *next_port
;
2279 ofproto
= bundle
->ofproto
;
2280 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2281 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2283 if (m
->out
== bundle
) {
2285 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
2286 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
2287 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2292 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2293 bundle_del_port(port
);
2296 bundle_flush_macs(bundle
, true);
2297 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2299 free(bundle
->trunks
);
2300 lacp_destroy(bundle
->lacp
);
2301 bond_destroy(bundle
->bond
);
2306 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2307 const struct ofproto_bundle_settings
*s
)
2309 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2310 bool need_flush
= false;
2311 struct ofport_dpif
*port
;
2312 struct ofbundle
*bundle
;
2313 unsigned long *trunks
;
2319 bundle_destroy(bundle_lookup(ofproto
, aux
));
2323 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2324 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
2326 bundle
= bundle_lookup(ofproto
, aux
);
2328 bundle
= xmalloc(sizeof *bundle
);
2330 bundle
->ofproto
= ofproto
;
2331 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
2332 hash_pointer(aux
, 0));
2334 bundle
->name
= NULL
;
2336 list_init(&bundle
->ports
);
2337 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
2339 bundle
->trunks
= NULL
;
2340 bundle
->use_priority_tags
= s
->use_priority_tags
;
2341 bundle
->lacp
= NULL
;
2342 bundle
->bond
= NULL
;
2344 bundle
->floodable
= true;
2346 bundle
->src_mirrors
= 0;
2347 bundle
->dst_mirrors
= 0;
2348 bundle
->mirror_out
= 0;
2351 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
2353 bundle
->name
= xstrdup(s
->name
);
2358 if (!bundle
->lacp
) {
2359 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2360 bundle
->lacp
= lacp_create();
2362 lacp_configure(bundle
->lacp
, s
->lacp
);
2364 lacp_destroy(bundle
->lacp
);
2365 bundle
->lacp
= NULL
;
2368 /* Update set of ports. */
2370 for (i
= 0; i
< s
->n_slaves
; i
++) {
2371 if (!bundle_add_port(bundle
, s
->slaves
[i
],
2372 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
2376 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
2377 struct ofport_dpif
*next_port
;
2379 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2380 for (i
= 0; i
< s
->n_slaves
; i
++) {
2381 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
2386 bundle_del_port(port
);
2390 ovs_assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
2392 if (list_is_empty(&bundle
->ports
)) {
2393 bundle_destroy(bundle
);
2397 /* Set VLAN tagging mode */
2398 if (s
->vlan_mode
!= bundle
->vlan_mode
2399 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
2400 bundle
->vlan_mode
= s
->vlan_mode
;
2401 bundle
->use_priority_tags
= s
->use_priority_tags
;
2406 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
2407 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
2409 if (vlan
!= bundle
->vlan
) {
2410 bundle
->vlan
= vlan
;
2414 /* Get trunked VLANs. */
2415 switch (s
->vlan_mode
) {
2416 case PORT_VLAN_ACCESS
:
2420 case PORT_VLAN_TRUNK
:
2421 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2424 case PORT_VLAN_NATIVE_UNTAGGED
:
2425 case PORT_VLAN_NATIVE_TAGGED
:
2426 if (vlan
!= 0 && (!s
->trunks
2427 || !bitmap_is_set(s
->trunks
, vlan
)
2428 || bitmap_is_set(s
->trunks
, 0))) {
2429 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2431 trunks
= bitmap_clone(s
->trunks
, 4096);
2433 trunks
= bitmap_allocate1(4096);
2435 bitmap_set1(trunks
, vlan
);
2436 bitmap_set0(trunks
, 0);
2438 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2445 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2446 free(bundle
->trunks
);
2447 if (trunks
== s
->trunks
) {
2448 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2450 bundle
->trunks
= trunks
;
2455 if (trunks
!= s
->trunks
) {
2460 if (!list_is_short(&bundle
->ports
)) {
2461 bundle
->ofproto
->has_bonded_bundles
= true;
2463 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2464 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2467 bundle
->bond
= bond_create(s
->bond
);
2468 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2471 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2472 bond_slave_register(bundle
->bond
, port
, port
->up
.netdev
);
2475 bond_destroy(bundle
->bond
);
2476 bundle
->bond
= NULL
;
2479 /* If we changed something that would affect MAC learning, un-learn
2480 * everything on this port and force flow revalidation. */
2482 bundle_flush_macs(bundle
, false);
2489 bundle_remove(struct ofport
*port_
)
2491 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2492 struct ofbundle
*bundle
= port
->bundle
;
2495 bundle_del_port(port
);
2496 if (list_is_empty(&bundle
->ports
)) {
2497 bundle_destroy(bundle
);
2498 } else if (list_is_short(&bundle
->ports
)) {
2499 bond_destroy(bundle
->bond
);
2500 bundle
->bond
= NULL
;
2506 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2508 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2509 struct ofport_dpif
*port
= port_
;
2510 uint8_t ea
[ETH_ADDR_LEN
];
2513 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
2515 struct ofpbuf packet
;
2518 ofpbuf_init(&packet
, 0);
2519 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2521 memcpy(packet_pdu
, pdu
, pdu_size
);
2523 send_packet(port
, &packet
);
2524 ofpbuf_uninit(&packet
);
2526 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2527 "%s (%s)", port
->bundle
->name
,
2528 netdev_get_name(port
->up
.netdev
), strerror(error
));
2533 bundle_send_learning_packets(struct ofbundle
*bundle
)
2535 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2536 int error
, n_packets
, n_errors
;
2537 struct mac_entry
*e
;
2539 error
= n_packets
= n_errors
= 0;
2540 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
2541 if (e
->port
.p
!= bundle
) {
2542 struct ofpbuf
*learning_packet
;
2543 struct ofport_dpif
*port
;
2547 /* The assignment to "port" is unnecessary but makes "grep"ing for
2548 * struct ofport_dpif more effective. */
2549 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
2553 ret
= send_packet(port
, learning_packet
);
2554 ofpbuf_delete(learning_packet
);
2564 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2565 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
2566 "packets, last error was: %s",
2567 bundle
->name
, n_errors
, n_packets
, strerror(error
));
2569 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2570 bundle
->name
, n_packets
);
2575 bundle_run(struct ofbundle
*bundle
)
2578 lacp_run(bundle
->lacp
, send_pdu_cb
);
2581 struct ofport_dpif
*port
;
2583 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2584 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
2587 bond_run(bundle
->bond
, &bundle
->ofproto
->backer
->revalidate_set
,
2588 lacp_status(bundle
->lacp
));
2589 if (bond_should_send_learning_packets(bundle
->bond
)) {
2590 bundle_send_learning_packets(bundle
);
2596 bundle_wait(struct ofbundle
*bundle
)
2599 lacp_wait(bundle
->lacp
);
2602 bond_wait(bundle
->bond
);
2609 mirror_scan(struct ofproto_dpif
*ofproto
)
2613 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
2614 if (!ofproto
->mirrors
[idx
]) {
2621 static struct ofmirror
*
2622 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
2626 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2627 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
2628 if (mirror
&& mirror
->aux
== aux
) {
2636 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2638 mirror_update_dups(struct ofproto_dpif
*ofproto
)
2642 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2643 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2646 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
2650 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2651 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
2658 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
2659 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
2661 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
2662 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
2663 m2
->dup_mirrors
|= m1
->dup_mirrors
;
2670 mirror_set(struct ofproto
*ofproto_
, void *aux
,
2671 const struct ofproto_mirror_settings
*s
)
2673 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2674 mirror_mask_t mirror_bit
;
2675 struct ofbundle
*bundle
;
2676 struct ofmirror
*mirror
;
2677 struct ofbundle
*out
;
2678 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
2679 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
2682 mirror
= mirror_lookup(ofproto
, aux
);
2684 mirror_destroy(mirror
);
2690 idx
= mirror_scan(ofproto
);
2692 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2694 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
2698 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
2699 mirror
->ofproto
= ofproto
;
2702 mirror
->out_vlan
= -1;
2703 mirror
->name
= NULL
;
2706 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
2708 mirror
->name
= xstrdup(s
->name
);
2711 /* Get the new configuration. */
2712 if (s
->out_bundle
) {
2713 out
= bundle_lookup(ofproto
, s
->out_bundle
);
2715 mirror_destroy(mirror
);
2721 out_vlan
= s
->out_vlan
;
2723 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
2724 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
2726 /* If the configuration has not changed, do nothing. */
2727 if (hmapx_equals(&srcs
, &mirror
->srcs
)
2728 && hmapx_equals(&dsts
, &mirror
->dsts
)
2729 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
2730 && mirror
->out
== out
2731 && mirror
->out_vlan
== out_vlan
)
2733 hmapx_destroy(&srcs
);
2734 hmapx_destroy(&dsts
);
2738 hmapx_swap(&srcs
, &mirror
->srcs
);
2739 hmapx_destroy(&srcs
);
2741 hmapx_swap(&dsts
, &mirror
->dsts
);
2742 hmapx_destroy(&dsts
);
2744 free(mirror
->vlans
);
2745 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
2748 mirror
->out_vlan
= out_vlan
;
2750 /* Update bundles. */
2751 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2752 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
2753 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
2754 bundle
->src_mirrors
|= mirror_bit
;
2756 bundle
->src_mirrors
&= ~mirror_bit
;
2759 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
2760 bundle
->dst_mirrors
|= mirror_bit
;
2762 bundle
->dst_mirrors
&= ~mirror_bit
;
2765 if (mirror
->out
== bundle
) {
2766 bundle
->mirror_out
|= mirror_bit
;
2768 bundle
->mirror_out
&= ~mirror_bit
;
2772 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2773 ofproto
->has_mirrors
= true;
2774 mac_learning_flush(ofproto
->ml
,
2775 &ofproto
->backer
->revalidate_set
);
2776 mirror_update_dups(ofproto
);
2782 mirror_destroy(struct ofmirror
*mirror
)
2784 struct ofproto_dpif
*ofproto
;
2785 mirror_mask_t mirror_bit
;
2786 struct ofbundle
*bundle
;
2793 ofproto
= mirror
->ofproto
;
2794 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2795 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2797 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
2798 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2799 bundle
->src_mirrors
&= ~mirror_bit
;
2800 bundle
->dst_mirrors
&= ~mirror_bit
;
2801 bundle
->mirror_out
&= ~mirror_bit
;
2804 hmapx_destroy(&mirror
->srcs
);
2805 hmapx_destroy(&mirror
->dsts
);
2806 free(mirror
->vlans
);
2808 ofproto
->mirrors
[mirror
->idx
] = NULL
;
2812 mirror_update_dups(ofproto
);
2814 ofproto
->has_mirrors
= false;
2815 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2816 if (ofproto
->mirrors
[i
]) {
2817 ofproto
->has_mirrors
= true;
2824 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
2825 uint64_t *packets
, uint64_t *bytes
)
2827 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2828 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
2831 *packets
= *bytes
= UINT64_MAX
;
2835 *packets
= mirror
->packet_count
;
2836 *bytes
= mirror
->byte_count
;
2842 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
2844 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2845 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
2846 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2852 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
2854 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2855 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
2856 return bundle
&& bundle
->mirror_out
!= 0;
2860 forward_bpdu_changed(struct ofproto
*ofproto_
)
2862 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2863 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2867 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
2870 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2871 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
2872 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
2877 static struct ofport_dpif
*
2878 get_ofp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
2880 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
2881 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
2884 static struct ofport_dpif
*
2885 get_odp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
2887 struct ofport_dpif
*port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
2888 return port
&& &ofproto
->up
== port
->up
.ofproto
? port
: NULL
;
2892 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
2893 struct ofproto_port
*ofproto_port
,
2894 struct dpif_port
*dpif_port
)
2896 ofproto_port
->name
= dpif_port
->name
;
2897 ofproto_port
->type
= dpif_port
->type
;
2898 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
2901 static struct ofport_dpif
*
2902 ofport_get_peer(const struct ofport_dpif
*ofport_dpif
)
2904 const struct ofproto_dpif
*ofproto
;
2907 peer
= netdev_vport_patch_peer(ofport_dpif
->up
.netdev
);
2912 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2913 struct ofport
*ofport
;
2915 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer
);
2916 if (ofport
&& ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
) {
2917 return ofport_dpif_cast(ofport
);
2924 port_run_fast(struct ofport_dpif
*ofport
)
2926 if (ofport
->cfm
&& cfm_should_send_ccm(ofport
->cfm
)) {
2927 struct ofpbuf packet
;
2929 ofpbuf_init(&packet
, 0);
2930 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.pp
.hw_addr
);
2931 send_packet(ofport
, &packet
);
2932 ofpbuf_uninit(&packet
);
2937 port_run(struct ofport_dpif
*ofport
)
2939 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
2940 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
2941 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
2943 ofport
->carrier_seq
= carrier_seq
;
2945 port_run_fast(ofport
);
2947 if (ofport
->tnl_port
2948 && tnl_port_reconfigure(&ofport
->up
, ofport
->odp_port
,
2949 &ofport
->tnl_port
)) {
2950 ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
->need_revalidate
= true;
2954 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
2956 cfm_run(ofport
->cfm
);
2957 enable
= enable
&& !cfm_get_fault(ofport
->cfm
);
2959 if (cfm_opup
>= 0) {
2960 enable
= enable
&& cfm_opup
;
2964 if (ofport
->bundle
) {
2965 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
2966 if (carrier_changed
) {
2967 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
2971 if (ofport
->may_enable
!= enable
) {
2972 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2974 if (ofproto
->has_bundle_action
) {
2975 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
2979 ofport
->may_enable
= enable
;
2983 port_wait(struct ofport_dpif
*ofport
)
2986 cfm_wait(ofport
->cfm
);
2991 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
2992 struct ofproto_port
*ofproto_port
)
2994 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2995 struct dpif_port dpif_port
;
2998 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
2999 const char *type
= netdev_get_type_from_name(devname
);
3001 /* We may be called before ofproto->up.port_by_name is populated with
3002 * the appropriate ofport. For this reason, we must get the name and
3003 * type from the netdev layer directly. */
3005 const struct ofport
*ofport
;
3007 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3008 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3009 ofproto_port
->name
= xstrdup(devname
);
3010 ofproto_port
->type
= xstrdup(type
);
3016 if (!sset_contains(&ofproto
->ports
, devname
)) {
3019 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3020 devname
, &dpif_port
);
3022 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3028 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3030 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3031 const char *dp_port_name
= netdev_vport_get_dpif_port(netdev
);
3032 const char *devname
= netdev_get_name(netdev
);
3034 if (netdev_vport_is_patch(netdev
)) {
3035 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3039 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3040 uint32_t port_no
= UINT32_MAX
;
3043 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3047 if (netdev_get_tunnel_config(netdev
)) {
3048 simap_put(&ofproto
->backer
->tnl_backers
, dp_port_name
, port_no
);
3052 if (netdev_get_tunnel_config(netdev
)) {
3053 sset_add(&ofproto
->ghost_ports
, devname
);
3055 sset_add(&ofproto
->ports
, devname
);
3061 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
3063 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3064 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
3071 sset_find_and_delete(&ofproto
->ghost_ports
,
3072 netdev_get_name(ofport
->up
.netdev
));
3073 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3074 if (!ofport
->tnl_port
) {
3075 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
);
3077 /* The caller is going to close ofport->up.netdev. If this is a
3078 * bonded port, then the bond is using that netdev, so remove it
3079 * from the bond. The client will need to reconfigure everything
3080 * after deleting ports, so then the slave will get re-added. */
3081 bundle_remove(&ofport
->up
);
3088 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3090 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3093 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3095 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3096 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3098 /* ofproto->stats.tx_packets represents packets that we created
3099 * internally and sent to some port (e.g. packets sent with
3100 * send_packet()). Account for them as if they had come from
3101 * OFPP_LOCAL and got forwarded. */
3103 if (stats
->rx_packets
!= UINT64_MAX
) {
3104 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3107 if (stats
->rx_bytes
!= UINT64_MAX
) {
3108 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3111 /* ofproto->stats.rx_packets represents packets that were received on
3112 * some port and we processed internally and dropped (e.g. STP).
3113 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3115 if (stats
->tx_packets
!= UINT64_MAX
) {
3116 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3119 if (stats
->tx_bytes
!= UINT64_MAX
) {
3120 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3127 /* Account packets for LOCAL port. */
3129 ofproto_update_local_port_stats(const struct ofproto
*ofproto_
,
3130 size_t tx_size
, size_t rx_size
)
3132 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3135 ofproto
->stats
.rx_packets
++;
3136 ofproto
->stats
.rx_bytes
+= rx_size
;
3139 ofproto
->stats
.tx_packets
++;
3140 ofproto
->stats
.tx_bytes
+= tx_size
;
3144 struct port_dump_state
{
3149 struct ofproto_port port
;
3154 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3156 *statep
= xzalloc(sizeof(struct port_dump_state
));
3161 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3162 struct ofproto_port
*port
)
3164 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3165 struct port_dump_state
*state
= state_
;
3166 const struct sset
*sset
;
3167 struct sset_node
*node
;
3169 if (state
->has_port
) {
3170 ofproto_port_destroy(&state
->port
);
3171 state
->has_port
= false;
3173 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3174 while ((node
= sset_at_position(sset
, &state
->bucket
, &state
->offset
))) {
3177 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3179 *port
= state
->port
;
3180 state
->has_port
= true;
3182 } else if (error
!= ENODEV
) {
3187 if (!state
->ghost
) {
3188 state
->ghost
= true;
3191 return port_dump_next(ofproto_
, state_
, port
);
3198 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3200 struct port_dump_state
*state
= state_
;
3202 if (state
->has_port
) {
3203 ofproto_port_destroy(&state
->port
);
3210 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3212 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3214 if (ofproto
->port_poll_errno
) {
3215 int error
= ofproto
->port_poll_errno
;
3216 ofproto
->port_poll_errno
= 0;
3220 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3224 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3229 port_poll_wait(const struct ofproto
*ofproto_
)
3231 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3232 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3236 port_is_lacp_current(const struct ofport
*ofport_
)
3238 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3239 return (ofport
->bundle
&& ofport
->bundle
->lacp
3240 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3244 /* Upcall handling. */
3246 /* Flow miss batching.
3248 * Some dpifs implement operations faster when you hand them off in a batch.
3249 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3250 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3251 * more packets, plus possibly installing the flow in the dpif.
3253 * So far we only batch the operations that affect flow setup time the most.
3254 * It's possible to batch more than that, but the benefit might be minimal. */
3256 struct hmap_node hmap_node
;
3257 struct ofproto_dpif
*ofproto
;
3259 enum odp_key_fitness key_fitness
;
3260 const struct nlattr
*key
;
3262 ovs_be16 initial_tci
;
3263 struct list packets
;
3264 enum dpif_upcall_type upcall_type
;
3265 uint32_t odp_in_port
;
3268 struct flow_miss_op
{
3269 struct dpif_op dpif_op
;
3270 void *garbage
; /* Pointer to pass to free(), NULL if none. */
3271 uint64_t stub
[1024 / 8]; /* Temporary buffer. */
3274 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3275 * OpenFlow controller as necessary according to their individual
3276 * configurations. */
3278 send_packet_in_miss(struct ofproto_dpif
*ofproto
, const struct ofpbuf
*packet
,
3279 const struct flow
*flow
)
3281 struct ofputil_packet_in pin
;
3283 pin
.packet
= packet
->data
;
3284 pin
.packet_len
= packet
->size
;
3285 pin
.reason
= OFPR_NO_MATCH
;
3286 pin
.controller_id
= 0;
3291 pin
.send_len
= 0; /* not used for flow table misses */
3293 flow_get_metadata(flow
, &pin
.fmd
);
3295 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
);
3298 static enum slow_path_reason
3299 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3300 const struct ofport_dpif
*ofport
, const struct ofpbuf
*packet
)
3304 } else if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
3306 cfm_process_heartbeat(ofport
->cfm
, packet
);
3309 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
3310 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
3312 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
3315 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
3317 stp_process_packet(ofport
, packet
);
3325 static struct flow_miss
*
3326 flow_miss_find(struct hmap
*todo
, const struct ofproto_dpif
*ofproto
,
3327 const struct flow
*flow
, uint32_t hash
)
3329 struct flow_miss
*miss
;
3331 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
3332 if (miss
->ofproto
== ofproto
&& flow_equal(&miss
->flow
, flow
)) {
3340 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
3341 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3342 * 'miss' is associated with a subfacet the caller must also initialize the
3343 * returned op->subfacet, and if anything needs to be freed after processing
3344 * the op, the caller must initialize op->garbage also. */
3346 init_flow_miss_execute_op(struct flow_miss
*miss
, struct ofpbuf
*packet
,
3347 struct flow_miss_op
*op
)
3349 if (miss
->flow
.vlan_tci
!= miss
->initial_tci
) {
3350 /* This packet was received on a VLAN splinter port. We
3351 * added a VLAN to the packet to make the packet resemble
3352 * the flow, but the actions were composed assuming that
3353 * the packet contained no VLAN. So, we must remove the
3354 * VLAN header from the packet before trying to execute the
3356 eth_pop_vlan(packet
);
3360 op
->dpif_op
.type
= DPIF_OP_EXECUTE
;
3361 op
->dpif_op
.u
.execute
.key
= miss
->key
;
3362 op
->dpif_op
.u
.execute
.key_len
= miss
->key_len
;
3363 op
->dpif_op
.u
.execute
.packet
= packet
;
3366 /* Helper for handle_flow_miss_without_facet() and
3367 * handle_flow_miss_with_facet(). */
3369 handle_flow_miss_common(struct rule_dpif
*rule
,
3370 struct ofpbuf
*packet
, const struct flow
*flow
)
3372 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3374 ofproto
->n_matches
++;
3376 if (rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
3378 * Extra-special case for fail-open mode.
3380 * We are in fail-open mode and the packet matched the fail-open
3381 * rule, but we are connected to a controller too. We should send
3382 * the packet up to the controller in the hope that it will try to
3383 * set up a flow and thereby allow us to exit fail-open.
3385 * See the top-level comment in fail-open.c for more information.
3387 send_packet_in_miss(ofproto
, packet
, flow
);
3391 /* Figures out whether a flow that missed in 'ofproto', whose details are in
3392 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3393 * installing a datapath flow. The answer is usually "yes" (a return value of
3394 * true). However, for short flows the cost of bookkeeping is much higher than
3395 * the benefits, so when the datapath holds a large number of flows we impose
3396 * some heuristics to decide which flows are likely to be worth tracking. */
3398 flow_miss_should_make_facet(struct ofproto_dpif
*ofproto
,
3399 struct flow_miss
*miss
, uint32_t hash
)
3401 if (!ofproto
->governor
) {
3404 n_subfacets
= hmap_count(&ofproto
->subfacets
);
3405 if (n_subfacets
* 2 <= ofproto
->up
.flow_eviction_threshold
) {
3409 ofproto
->governor
= governor_create(ofproto
->up
.name
);
3412 return governor_should_install_flow(ofproto
->governor
, hash
,
3413 list_size(&miss
->packets
));
3416 /* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3417 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3418 * increment '*n_ops'. */
3420 handle_flow_miss_without_facet(struct flow_miss
*miss
,
3421 struct rule_dpif
*rule
,
3422 struct flow_miss_op
*ops
, size_t *n_ops
)
3424 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3425 long long int now
= time_msec();
3426 struct action_xlate_ctx ctx
;
3427 struct ofpbuf
*packet
;
3429 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3430 struct flow_miss_op
*op
= &ops
[*n_ops
];
3431 struct dpif_flow_stats stats
;
3432 struct ofpbuf odp_actions
;
3434 COVERAGE_INC(facet_suppress
);
3436 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
3438 dpif_flow_stats_extract(&miss
->flow
, packet
, now
, &stats
);
3439 rule_credit_stats(rule
, &stats
);
3441 action_xlate_ctx_init(&ctx
, ofproto
, &miss
->flow
, miss
->initial_tci
,
3443 ctx
.resubmit_stats
= &stats
;
3444 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
3447 if (odp_actions
.size
) {
3448 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3450 init_flow_miss_execute_op(miss
, packet
, op
);
3451 execute
->actions
= odp_actions
.data
;
3452 execute
->actions_len
= odp_actions
.size
;
3453 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
3457 ofpbuf_uninit(&odp_actions
);
3462 /* Handles 'miss', which matches 'facet'. May add any required datapath
3463 * operations to 'ops', incrementing '*n_ops' for each new op.
3465 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3466 * This is really important only for new facets: if we just called time_msec()
3467 * here, then the new subfacet or its packets could look (occasionally) as
3468 * though it was used some time after the facet was used. That can make a
3469 * one-packet flow look like it has a nonzero duration, which looks odd in
3470 * e.g. NetFlow statistics. */
3472 handle_flow_miss_with_facet(struct flow_miss
*miss
, struct facet
*facet
,
3474 struct flow_miss_op
*ops
, size_t *n_ops
)
3476 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3477 enum subfacet_path want_path
;
3478 struct subfacet
*subfacet
;
3479 struct ofpbuf
*packet
;
3481 subfacet
= subfacet_create(facet
, miss
, now
);
3483 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3484 struct flow_miss_op
*op
= &ops
[*n_ops
];
3485 struct dpif_flow_stats stats
;
3486 struct ofpbuf odp_actions
;
3488 handle_flow_miss_common(facet
->rule
, packet
, &miss
->flow
);
3490 ofpbuf_use_stub(&odp_actions
, op
->stub
, sizeof op
->stub
);
3491 if (!subfacet
->actions
|| subfacet
->slow
) {
3492 subfacet_make_actions(subfacet
, packet
, &odp_actions
);
3495 dpif_flow_stats_extract(&facet
->flow
, packet
, now
, &stats
);
3496 subfacet_update_stats(subfacet
, &stats
);
3498 if (subfacet
->actions_len
) {
3499 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3501 init_flow_miss_execute_op(miss
, packet
, op
);
3502 if (!subfacet
->slow
) {
3503 execute
->actions
= subfacet
->actions
;
3504 execute
->actions_len
= subfacet
->actions_len
;
3505 ofpbuf_uninit(&odp_actions
);
3507 execute
->actions
= odp_actions
.data
;
3508 execute
->actions_len
= odp_actions
.size
;
3509 op
->garbage
= ofpbuf_get_uninit_pointer(&odp_actions
);
3514 ofpbuf_uninit(&odp_actions
);
3518 want_path
= subfacet_want_path(subfacet
->slow
);
3519 if (miss
->upcall_type
== DPIF_UC_MISS
|| subfacet
->path
!= want_path
) {
3520 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
3521 struct dpif_flow_put
*put
= &op
->dpif_op
.u
.flow_put
;
3523 subfacet
->path
= want_path
;
3526 op
->dpif_op
.type
= DPIF_OP_FLOW_PUT
;
3527 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
3528 put
->key
= miss
->key
;
3529 put
->key_len
= miss
->key_len
;
3530 if (want_path
== SF_FAST_PATH
) {
3531 put
->actions
= subfacet
->actions
;
3532 put
->actions_len
= subfacet
->actions_len
;
3534 compose_slow_path(ofproto
, &facet
->flow
, subfacet
->slow
,
3535 op
->stub
, sizeof op
->stub
,
3536 &put
->actions
, &put
->actions_len
);
3542 /* Handles flow miss 'miss'. May add any required datapath operations
3543 * to 'ops', incrementing '*n_ops' for each new op. */
3545 handle_flow_miss(struct flow_miss
*miss
, struct flow_miss_op
*ops
,
3548 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
3549 struct facet
*facet
;
3553 /* The caller must ensure that miss->hmap_node.hash contains
3554 * flow_hash(miss->flow, 0). */
3555 hash
= miss
->hmap_node
.hash
;
3557 facet
= facet_lookup_valid(ofproto
, &miss
->flow
, hash
);
3559 struct rule_dpif
*rule
= rule_dpif_lookup(ofproto
, &miss
->flow
);
3561 if (!flow_miss_should_make_facet(ofproto
, miss
, hash
)) {
3562 handle_flow_miss_without_facet(miss
, rule
, ops
, n_ops
);
3566 facet
= facet_create(rule
, &miss
->flow
, hash
);
3571 handle_flow_miss_with_facet(miss
, facet
, now
, ops
, n_ops
);
3574 static struct drop_key
*
3575 drop_key_lookup(const struct dpif_backer
*backer
, const struct nlattr
*key
,
3578 struct drop_key
*drop_key
;
3580 HMAP_FOR_EACH_WITH_HASH (drop_key
, hmap_node
, hash_bytes(key
, key_len
, 0),
3581 &backer
->drop_keys
) {
3582 if (drop_key
->key_len
== key_len
3583 && !memcmp(drop_key
->key
, key
, key_len
)) {
3591 drop_key_clear(struct dpif_backer
*backer
)
3593 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
3594 struct drop_key
*drop_key
, *next
;
3596 HMAP_FOR_EACH_SAFE (drop_key
, next
, hmap_node
, &backer
->drop_keys
) {
3599 error
= dpif_flow_del(backer
->dpif
, drop_key
->key
, drop_key
->key_len
,
3601 if (error
&& !VLOG_DROP_WARN(&rl
)) {
3602 struct ds ds
= DS_EMPTY_INITIALIZER
;
3603 odp_flow_key_format(drop_key
->key
, drop_key
->key_len
, &ds
);
3604 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error
),
3609 hmap_remove(&backer
->drop_keys
, &drop_key
->hmap_node
);
3610 free(drop_key
->key
);
3615 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3616 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3617 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3618 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3619 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3620 * 'packet' ingressed.
3622 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3623 * 'flow''s in_port to OFPP_NONE.
3625 * This function does post-processing on data returned from
3626 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3627 * of the upcall processing logic. In particular, if the extracted in_port is
3628 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3629 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3630 * a VLAN header onto 'packet' (if it is nonnull).
3632 * Optionally, if nonnull, sets '*initial_tci' to the VLAN TCI with which the
3633 * packet was really received, that is, the actual VLAN TCI extracted by
3634 * odp_flow_key_to_flow(). (This differs from the value returned in
3635 * flow->vlan_tci only for packets received on VLAN splinters.)
3637 * Similarly, this function also includes some logic to help with tunnels. It
3638 * may modify 'flow' as necessary to make the tunneling implementation
3639 * transparent to the upcall processing logic.
3641 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3642 * or some other positive errno if there are other problems. */
3644 ofproto_receive(const struct dpif_backer
*backer
, struct ofpbuf
*packet
,
3645 const struct nlattr
*key
, size_t key_len
,
3646 struct flow
*flow
, enum odp_key_fitness
*fitnessp
,
3647 struct ofproto_dpif
**ofproto
, uint32_t *odp_in_port
,
3648 ovs_be16
*initial_tci
)
3650 const struct ofport_dpif
*port
;
3651 enum odp_key_fitness fitness
;
3654 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
3655 if (fitness
== ODP_FIT_ERROR
) {
3661 *initial_tci
= flow
->vlan_tci
;
3665 *odp_in_port
= flow
->in_port
;
3668 if (tnl_port_should_receive(flow
)) {
3669 const struct ofport
*ofport
= tnl_port_receive(flow
);
3671 flow
->in_port
= OFPP_NONE
;
3674 port
= ofport_dpif_cast(ofport
);
3676 /* We can't reproduce 'key' from 'flow'. */
3677 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
3679 /* XXX: Since the tunnel module is not scoped per backer, it's
3680 * theoretically possible that we'll receive an ofport belonging to an
3681 * entirely different datapath. In practice, this can't happen because
3682 * no platforms has two separate datapaths which each support
3684 ovs_assert(ofproto_dpif_cast(port
->up
.ofproto
)->backer
== backer
);
3686 port
= odp_port_to_ofport(backer
, flow
->in_port
);
3688 flow
->in_port
= OFPP_NONE
;
3692 flow
->in_port
= port
->up
.ofp_port
;
3693 if (vsp_adjust_flow(ofproto_dpif_cast(port
->up
.ofproto
), flow
)) {
3695 /* Make the packet resemble the flow, so that it gets sent to
3696 * an OpenFlow controller properly, so that it looks correct
3697 * for sFlow, and so that flow_extract() will get the correct
3698 * vlan_tci if it is called on 'packet'.
3700 * The allocated space inside 'packet' probably also contains
3701 * 'key', that is, both 'packet' and 'key' are probably part of
3702 * a struct dpif_upcall (see the large comment on that
3703 * structure definition), so pushing data on 'packet' is in
3704 * general not a good idea since it could overwrite 'key' or
3705 * free it as a side effect. However, it's OK in this special
3706 * case because we know that 'packet' is inside a Netlink
3707 * attribute: pushing 4 bytes will just overwrite the 4-byte
3708 * "struct nlattr", which is fine since we don't need that
3709 * header anymore. */
3710 eth_push_vlan(packet
, flow
->vlan_tci
);
3712 /* We can't reproduce 'key' from 'flow'. */
3713 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
3719 *ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
3724 *fitnessp
= fitness
;
3730 handle_miss_upcalls(struct dpif_backer
*backer
, struct dpif_upcall
*upcalls
,
3733 struct dpif_upcall
*upcall
;
3734 struct flow_miss
*miss
;
3735 struct flow_miss misses
[FLOW_MISS_MAX_BATCH
];
3736 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
3737 struct dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
3747 /* Construct the to-do list.
3749 * This just amounts to extracting the flow from each packet and sticking
3750 * the packets that have the same flow in the same "flow_miss" structure so
3751 * that we can process them together. */
3754 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
3755 struct flow_miss
*miss
= &misses
[n_misses
];
3756 struct flow_miss
*existing_miss
;
3757 struct ofproto_dpif
*ofproto
;
3758 uint32_t odp_in_port
;
3763 error
= ofproto_receive(backer
, upcall
->packet
, upcall
->key
,
3764 upcall
->key_len
, &flow
, &miss
->key_fitness
,
3765 &ofproto
, &odp_in_port
, &miss
->initial_tci
);
3766 if (error
== ENODEV
) {
3767 struct drop_key
*drop_key
;
3769 /* Received packet on port for which we couldn't associate
3770 * an ofproto. This can happen if a port is removed while
3771 * traffic is being received. Print a rate-limited message
3772 * in case it happens frequently. Install a drop flow so
3773 * that future packets of the flow are inexpensively dropped
3775 VLOG_INFO_RL(&rl
, "received packet on unassociated port %"PRIu32
,
3778 drop_key
= drop_key_lookup(backer
, upcall
->key
, upcall
->key_len
);
3780 drop_key
= xmalloc(sizeof *drop_key
);
3781 drop_key
->key
= xmemdup(upcall
->key
, upcall
->key_len
);
3782 drop_key
->key_len
= upcall
->key_len
;
3784 hmap_insert(&backer
->drop_keys
, &drop_key
->hmap_node
,
3785 hash_bytes(drop_key
->key
, drop_key
->key_len
, 0));
3786 dpif_flow_put(backer
->dpif
, DPIF_FP_CREATE
| DPIF_FP_MODIFY
,
3787 drop_key
->key
, drop_key
->key_len
, NULL
, 0, NULL
);
3794 flow_extract(upcall
->packet
, flow
.skb_priority
, flow
.skb_mark
,
3795 &flow
.tunnel
, flow
.in_port
, &miss
->flow
);
3797 /* Add other packets to a to-do list. */
3798 hash
= flow_hash(&miss
->flow
, 0);
3799 existing_miss
= flow_miss_find(&todo
, ofproto
, &miss
->flow
, hash
);
3800 if (!existing_miss
) {
3801 hmap_insert(&todo
, &miss
->hmap_node
, hash
);
3802 miss
->ofproto
= ofproto
;
3803 miss
->key
= upcall
->key
;
3804 miss
->key_len
= upcall
->key_len
;
3805 miss
->upcall_type
= upcall
->type
;
3806 miss
->odp_in_port
= odp_in_port
;
3807 list_init(&miss
->packets
);
3811 miss
= existing_miss
;
3813 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
3816 /* Process each element in the to-do list, constructing the set of
3817 * operations to batch. */
3819 HMAP_FOR_EACH (miss
, hmap_node
, &todo
) {
3820 handle_flow_miss(miss
, flow_miss_ops
, &n_ops
);
3822 ovs_assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
3824 /* Execute batch. */
3825 for (i
= 0; i
< n_ops
; i
++) {
3826 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
3828 dpif_operate(backer
->dpif
, dpif_ops
, n_ops
);
3831 for (i
= 0; i
< n_ops
; i
++) {
3832 free(flow_miss_ops
[i
].garbage
);
3834 hmap_destroy(&todo
);
3837 static enum { SFLOW_UPCALL
, MISS_UPCALL
, BAD_UPCALL
}
3838 classify_upcall(const struct dpif_upcall
*upcall
)
3840 union user_action_cookie cookie
;
3842 /* First look at the upcall type. */
3843 switch (upcall
->type
) {
3844 case DPIF_UC_ACTION
:
3850 case DPIF_N_UC_TYPES
:
3852 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
3856 /* "action" upcalls need a closer look. */
3857 if (!upcall
->userdata
) {
3858 VLOG_WARN_RL(&rl
, "action upcall missing cookie");
3861 if (nl_attr_get_size(upcall
->userdata
) != sizeof(cookie
)) {
3862 VLOG_WARN_RL(&rl
, "action upcall cookie has unexpected size %zu",
3863 nl_attr_get_size(upcall
->userdata
));
3866 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof(cookie
));
3867 switch (cookie
.type
) {
3868 case USER_ACTION_COOKIE_SFLOW
:
3869 return SFLOW_UPCALL
;
3871 case USER_ACTION_COOKIE_SLOW_PATH
:
3874 case USER_ACTION_COOKIE_UNSPEC
:
3876 VLOG_WARN_RL(&rl
, "invalid user cookie : 0x%"PRIx64
,
3877 nl_attr_get_u64(upcall
->userdata
));
3883 handle_sflow_upcall(struct dpif_backer
*backer
,
3884 const struct dpif_upcall
*upcall
)
3886 struct ofproto_dpif
*ofproto
;
3887 union user_action_cookie cookie
;
3889 uint32_t odp_in_port
;
3891 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
3892 &flow
, NULL
, &ofproto
, &odp_in_port
, NULL
)
3893 || !ofproto
->sflow
) {
3897 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof(cookie
));
3898 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
,
3899 odp_in_port
, &cookie
);
3903 handle_upcalls(struct dpif_backer
*backer
, unsigned int max_batch
)
3905 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
3906 struct ofpbuf miss_bufs
[FLOW_MISS_MAX_BATCH
];
3907 uint64_t miss_buf_stubs
[FLOW_MISS_MAX_BATCH
][4096 / 8];
3912 ovs_assert(max_batch
<= FLOW_MISS_MAX_BATCH
);
3915 for (n_processed
= 0; n_processed
< max_batch
; n_processed
++) {
3916 struct dpif_upcall
*upcall
= &misses
[n_misses
];
3917 struct ofpbuf
*buf
= &miss_bufs
[n_misses
];
3920 ofpbuf_use_stub(buf
, miss_buf_stubs
[n_misses
],
3921 sizeof miss_buf_stubs
[n_misses
]);
3922 error
= dpif_recv(backer
->dpif
, upcall
, buf
);
3928 switch (classify_upcall(upcall
)) {
3930 /* Handle it later. */
3935 handle_sflow_upcall(backer
, upcall
);
3945 /* Handle deferred MISS_UPCALL processing. */
3946 handle_miss_upcalls(backer
, misses
, n_misses
);
3947 for (i
= 0; i
< n_misses
; i
++) {
3948 ofpbuf_uninit(&miss_bufs
[i
]);
3954 /* Flow expiration. */
3956 static int subfacet_max_idle(const struct ofproto_dpif
*);
3957 static void update_stats(struct dpif_backer
*);
3958 static void rule_expire(struct rule_dpif
*);
3959 static void expire_subfacets(struct ofproto_dpif
*, int dp_max_idle
);
3961 /* This function is called periodically by run(). Its job is to collect
3962 * updates for the flows that have been installed into the datapath, most
3963 * importantly when they last were used, and then use that information to
3964 * expire flows that have not been used recently.
3966 * Returns the number of milliseconds after which it should be called again. */
3968 expire(struct dpif_backer
*backer
)
3970 struct ofproto_dpif
*ofproto
;
3971 int max_idle
= INT32_MAX
;
3973 /* Periodically clear out the drop keys in an effort to keep them
3974 * relatively few. */
3975 drop_key_clear(backer
);
3977 /* Update stats for each flow in the backer. */
3978 update_stats(backer
);
3980 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
3981 struct rule
*rule
, *next_rule
;
3984 if (ofproto
->backer
!= backer
) {
3988 /* Expire subfacets that have been idle too long. */
3989 dp_max_idle
= subfacet_max_idle(ofproto
);
3990 expire_subfacets(ofproto
, dp_max_idle
);
3992 max_idle
= MIN(max_idle
, dp_max_idle
);
3994 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
3996 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
3997 &ofproto
->up
.expirable
) {
3998 rule_expire(rule_dpif_cast(rule
));
4001 /* All outstanding data in existing flows has been accounted, so it's a
4002 * good time to do bond rebalancing. */
4003 if (ofproto
->has_bonded_bundles
) {
4004 struct ofbundle
*bundle
;
4006 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
4008 bond_rebalance(bundle
->bond
, &backer
->revalidate_set
);
4014 return MIN(max_idle
, 1000);
4017 /* Updates flow table statistics given that the datapath just reported 'stats'
4018 * as 'subfacet''s statistics. */
4020 update_subfacet_stats(struct subfacet
*subfacet
,
4021 const struct dpif_flow_stats
*stats
)
4023 struct facet
*facet
= subfacet
->facet
;
4025 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
4026 uint64_t extra
= stats
->n_packets
- subfacet
->dp_packet_count
;
4027 facet
->packet_count
+= extra
;
4029 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
4032 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
4033 facet
->byte_count
+= stats
->n_bytes
- subfacet
->dp_byte_count
;
4035 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
4038 subfacet
->dp_packet_count
= stats
->n_packets
;
4039 subfacet
->dp_byte_count
= stats
->n_bytes
;
4041 facet
->tcp_flags
|= stats
->tcp_flags
;
4043 subfacet_update_time(subfacet
, stats
->used
);
4044 if (facet
->accounted_bytes
< facet
->byte_count
) {
4046 facet_account(facet
);
4047 facet
->accounted_bytes
= facet
->byte_count
;
4049 facet_push_stats(facet
);
4052 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4053 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4055 delete_unexpected_flow(struct ofproto_dpif
*ofproto
,
4056 const struct nlattr
*key
, size_t key_len
)
4058 if (!VLOG_DROP_WARN(&rl
)) {
4062 odp_flow_key_format(key
, key_len
, &s
);
4063 VLOG_WARN("unexpected flow on %s: %s", ofproto
->up
.name
, ds_cstr(&s
));
4067 COVERAGE_INC(facet_unexpected
);
4068 dpif_flow_del(ofproto
->backer
->dpif
, key
, key_len
, NULL
);
4071 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4073 * This function also pushes statistics updates to rules which each facet
4074 * resubmits into. Generally these statistics will be accurate. However, if a
4075 * facet changes the rule it resubmits into at some time in between
4076 * update_stats() runs, it is possible that statistics accrued to the
4077 * old rule will be incorrectly attributed to the new rule. This could be
4078 * avoided by calling update_stats() whenever rules are created or
4079 * deleted. However, the performance impact of making so many calls to the
4080 * datapath do not justify the benefit of having perfectly accurate statistics.
4083 update_stats(struct dpif_backer
*backer
)
4085 const struct dpif_flow_stats
*stats
;
4086 struct dpif_flow_dump dump
;
4087 const struct nlattr
*key
;
4090 dpif_flow_dump_start(&dump
, backer
->dpif
);
4091 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
4093 struct subfacet
*subfacet
;
4094 struct ofproto_dpif
*ofproto
;
4095 struct ofport_dpif
*ofport
;
4098 if (ofproto_receive(backer
, NULL
, key
, key_len
, &flow
, NULL
, &ofproto
,
4103 ofport
= get_ofp_port(ofproto
, flow
.in_port
);
4104 if (ofport
&& ofport
->tnl_port
) {
4105 netdev_vport_inc_rx(ofport
->up
.netdev
, stats
);
4108 key_hash
= odp_flow_key_hash(key
, key_len
);
4109 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
4110 switch (subfacet
? subfacet
->path
: SF_NOT_INSTALLED
) {
4112 update_subfacet_stats(subfacet
, stats
);
4116 /* Stats are updated per-packet. */
4119 case SF_NOT_INSTALLED
:
4121 delete_unexpected_flow(ofproto
, key
, key_len
);
4125 dpif_flow_dump_done(&dump
);
4128 /* Calculates and returns the number of milliseconds of idle time after which
4129 * subfacets should expire from the datapath. When a subfacet expires, we fold
4130 * its statistics into its facet, and when a facet's last subfacet expires, we
4131 * fold its statistic into its rule. */
4133 subfacet_max_idle(const struct ofproto_dpif
*ofproto
)
4136 * Idle time histogram.
4138 * Most of the time a switch has a relatively small number of subfacets.
4139 * When this is the case we might as well keep statistics for all of them
4140 * in userspace and to cache them in the kernel datapath for performance as
4143 * As the number of subfacets increases, the memory required to maintain
4144 * statistics about them in userspace and in the kernel becomes
4145 * significant. However, with a large number of subfacets it is likely
4146 * that only a few of them are "heavy hitters" that consume a large amount
4147 * of bandwidth. At this point, only heavy hitters are worth caching in
4148 * the kernel and maintaining in userspaces; other subfacets we can
4151 * The technique used to compute the idle time is to build a histogram with
4152 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
4153 * that is installed in the kernel gets dropped in the appropriate bucket.
4154 * After the histogram has been built, we compute the cutoff so that only
4155 * the most-recently-used 1% of subfacets (but at least
4156 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
4157 * the most-recently-used bucket of subfacets is kept, so actually an
4158 * arbitrary number of subfacets can be kept in any given expiration run
4159 * (though the next run will delete most of those unless they receive
4162 * This requires a second pass through the subfacets, in addition to the
4163 * pass made by update_stats(), because the former function never looks at
4164 * uninstallable subfacets.
4166 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
4167 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
4168 int buckets
[N_BUCKETS
] = { 0 };
4169 int total
, subtotal
, bucket
;
4170 struct subfacet
*subfacet
;
4174 total
= hmap_count(&ofproto
->subfacets
);
4175 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
4176 return N_BUCKETS
* BUCKET_WIDTH
;
4179 /* Build histogram. */
4181 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
4182 long long int idle
= now
- subfacet
->used
;
4183 int bucket
= (idle
<= 0 ? 0
4184 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
4185 : (unsigned int) idle
/ BUCKET_WIDTH
);
4189 /* Find the first bucket whose flows should be expired. */
4190 subtotal
= bucket
= 0;
4192 subtotal
+= buckets
[bucket
++];
4193 } while (bucket
< N_BUCKETS
&&
4194 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
4196 if (VLOG_IS_DBG_ENABLED()) {
4200 ds_put_cstr(&s
, "keep");
4201 for (i
= 0; i
< N_BUCKETS
; i
++) {
4203 ds_put_cstr(&s
, ", drop");
4206 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
4209 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
4213 return bucket
* BUCKET_WIDTH
;
4217 expire_subfacets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
4219 /* Cutoff time for most flows. */
4220 long long int normal_cutoff
= time_msec() - dp_max_idle
;
4222 /* We really want to keep flows for special protocols around, so use a more
4223 * conservative cutoff. */
4224 long long int special_cutoff
= time_msec() - 10000;
4226 struct subfacet
*subfacet
, *next_subfacet
;
4227 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
4231 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
4232 &ofproto
->subfacets
) {
4233 long long int cutoff
;
4235 cutoff
= (subfacet
->slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)
4238 if (subfacet
->used
< cutoff
) {
4239 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
4240 batch
[n_batch
++] = subfacet
;
4241 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
4242 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4246 subfacet_destroy(subfacet
);
4252 subfacet_destroy_batch(ofproto
, batch
, n_batch
);
4256 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4257 * then delete it entirely. */
4259 rule_expire(struct rule_dpif
*rule
)
4261 struct facet
*facet
, *next_facet
;
4265 if (rule
->up
.pending
) {
4266 /* We'll have to expire it later. */
4270 /* Has 'rule' expired? */
4272 if (rule
->up
.hard_timeout
4273 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
4274 reason
= OFPRR_HARD_TIMEOUT
;
4275 } else if (rule
->up
.idle_timeout
4276 && now
> rule
->up
.used
+ rule
->up
.idle_timeout
* 1000) {
4277 reason
= OFPRR_IDLE_TIMEOUT
;
4282 COVERAGE_INC(ofproto_dpif_expired
);
4284 /* Update stats. (This is a no-op if the rule expired due to an idle
4285 * timeout, because that only happens when the rule has no facets left.) */
4286 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
4287 facet_remove(facet
);
4290 /* Get rid of the rule. */
4291 ofproto_rule_expire(&rule
->up
, reason
);
4296 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
4298 * The caller must already have determined that no facet with an identical
4299 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
4300 * the ofproto's classifier table.
4302 * 'hash' must be the return value of flow_hash(flow, 0).
4304 * The facet will initially have no subfacets. The caller should create (at
4305 * least) one subfacet with subfacet_create(). */
4306 static struct facet
*
4307 facet_create(struct rule_dpif
*rule
, const struct flow
*flow
, uint32_t hash
)
4309 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4310 struct facet
*facet
;
4312 facet
= xzalloc(sizeof *facet
);
4313 facet
->used
= time_msec();
4314 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, hash
);
4315 list_push_back(&rule
->facets
, &facet
->list_node
);
4317 facet
->flow
= *flow
;
4318 list_init(&facet
->subfacets
);
4319 netflow_flow_init(&facet
->nf_flow
);
4320 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
4326 facet_free(struct facet
*facet
)
4331 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
4332 * 'packet', which arrived on 'in_port'. */
4334 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4335 const struct nlattr
*odp_actions
, size_t actions_len
,
4336 struct ofpbuf
*packet
)
4338 struct odputil_keybuf keybuf
;
4342 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4343 odp_flow_key_from_flow(&key
, flow
,
4344 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
4346 error
= dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
4347 odp_actions
, actions_len
, packet
);
4351 /* Remove 'facet' from 'ofproto' and free up the associated memory:
4353 * - If 'facet' was installed in the datapath, uninstalls it and updates its
4354 * rule's statistics, via subfacet_uninstall().
4356 * - Removes 'facet' from its rule and from ofproto->facets.
4359 facet_remove(struct facet
*facet
)
4361 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4362 struct subfacet
*subfacet
, *next_subfacet
;
4364 ovs_assert(!list_is_empty(&facet
->subfacets
));
4366 /* First uninstall all of the subfacets to get final statistics. */
4367 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4368 subfacet_uninstall(subfacet
);
4371 /* Flush the final stats to the rule.
4373 * This might require us to have at least one subfacet around so that we
4374 * can use its actions for accounting in facet_account(), which is why we
4375 * have uninstalled but not yet destroyed the subfacets. */
4376 facet_flush_stats(facet
);
4378 /* Now we're really all done so destroy everything. */
4379 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
4380 &facet
->subfacets
) {
4381 subfacet_destroy__(subfacet
);
4383 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
4384 list_remove(&facet
->list_node
);
4388 /* Feed information from 'facet' back into the learning table to keep it in
4389 * sync with what is actually flowing through the datapath. */
4391 facet_learn(struct facet
*facet
)
4393 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4394 struct action_xlate_ctx ctx
;
4396 if (!facet
->has_learn
4397 && !facet
->has_normal
4398 && (!facet
->has_fin_timeout
4399 || !(facet
->tcp_flags
& (TCP_FIN
| TCP_RST
)))) {
4403 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4404 facet
->flow
.vlan_tci
,
4405 facet
->rule
, facet
->tcp_flags
, NULL
);
4406 ctx
.may_learn
= true;
4407 xlate_actions_for_side_effects(&ctx
, facet
->rule
->up
.ofpacts
,
4408 facet
->rule
->up
.ofpacts_len
);
4412 facet_account(struct facet
*facet
)
4414 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4415 struct subfacet
*subfacet
;
4416 const struct nlattr
*a
;
4421 if (!facet
->has_normal
|| !ofproto
->has_bonded_bundles
) {
4424 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
4426 /* This loop feeds byte counters to bond_account() for rebalancing to use
4427 * as a basis. We also need to track the actual VLAN on which the packet
4428 * is going to be sent to ensure that it matches the one passed to
4429 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
4432 * We use the actions from an arbitrary subfacet because they should all
4433 * be equally valid for our purpose. */
4434 subfacet
= CONTAINER_OF(list_front(&facet
->subfacets
),
4435 struct subfacet
, list_node
);
4436 vlan_tci
= facet
->flow
.vlan_tci
;
4437 NL_ATTR_FOR_EACH_UNSAFE (a
, left
,
4438 subfacet
->actions
, subfacet
->actions_len
) {
4439 const struct ovs_action_push_vlan
*vlan
;
4440 struct ofport_dpif
*port
;
4442 switch (nl_attr_type(a
)) {
4443 case OVS_ACTION_ATTR_OUTPUT
:
4444 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
4445 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
4446 bond_account(port
->bundle
->bond
, &facet
->flow
,
4447 vlan_tci_to_vid(vlan_tci
), n_bytes
);
4451 case OVS_ACTION_ATTR_POP_VLAN
:
4452 vlan_tci
= htons(0);
4455 case OVS_ACTION_ATTR_PUSH_VLAN
:
4456 vlan
= nl_attr_get(a
);
4457 vlan_tci
= vlan
->vlan_tci
;
4463 /* Returns true if the only action for 'facet' is to send to the controller.
4464 * (We don't report NetFlow expiration messages for such facets because they
4465 * are just part of the control logic for the network, not real traffic). */
4467 facet_is_controller_flow(struct facet
*facet
)
4470 const struct rule
*rule
= &facet
->rule
->up
;
4471 const struct ofpact
*ofpacts
= rule
->ofpacts
;
4472 size_t ofpacts_len
= rule
->ofpacts_len
;
4474 if (ofpacts_len
> 0 &&
4475 ofpacts
->type
== OFPACT_CONTROLLER
&&
4476 ofpact_next(ofpacts
) >= ofpact_end(ofpacts
, ofpacts_len
)) {
4483 /* Folds all of 'facet''s statistics into its rule. Also updates the
4484 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4485 * 'facet''s statistics in the datapath should have been zeroed and folded into
4486 * its packet and byte counts before this function is called. */
4488 facet_flush_stats(struct facet
*facet
)
4490 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4491 struct subfacet
*subfacet
;
4493 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4494 ovs_assert(!subfacet
->dp_byte_count
);
4495 ovs_assert(!subfacet
->dp_packet_count
);
4498 facet_push_stats(facet
);
4499 if (facet
->accounted_bytes
< facet
->byte_count
) {
4500 facet_account(facet
);
4501 facet
->accounted_bytes
= facet
->byte_count
;
4504 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
4505 struct ofexpired expired
;
4506 expired
.flow
= facet
->flow
;
4507 expired
.packet_count
= facet
->packet_count
;
4508 expired
.byte_count
= facet
->byte_count
;
4509 expired
.used
= facet
->used
;
4510 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
4513 facet
->rule
->packet_count
+= facet
->packet_count
;
4514 facet
->rule
->byte_count
+= facet
->byte_count
;
4516 /* Reset counters to prevent double counting if 'facet' ever gets
4518 facet_reset_counters(facet
);
4520 netflow_flow_clear(&facet
->nf_flow
);
4521 facet
->tcp_flags
= 0;
4524 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4525 * Returns it if found, otherwise a null pointer.
4527 * 'hash' must be the return value of flow_hash(flow, 0).
4529 * The returned facet might need revalidation; use facet_lookup_valid()
4530 * instead if that is important. */
4531 static struct facet
*
4532 facet_find(struct ofproto_dpif
*ofproto
,
4533 const struct flow
*flow
, uint32_t hash
)
4535 struct facet
*facet
;
4537 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, hash
, &ofproto
->facets
) {
4538 if (flow_equal(flow
, &facet
->flow
)) {
4546 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4547 * Returns it if found, otherwise a null pointer.
4549 * 'hash' must be the return value of flow_hash(flow, 0).
4551 * The returned facet is guaranteed to be valid. */
4552 static struct facet
*
4553 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4556 struct facet
*facet
;
4558 facet
= facet_find(ofproto
, flow
, hash
);
4560 && (ofproto
->backer
->need_revalidate
4561 || tag_set_intersects(&ofproto
->backer
->revalidate_set
,
4563 facet_revalidate(facet
);
4565 /* facet_revalidate() may have destroyed 'facet'. */
4566 facet
= facet_find(ofproto
, flow
, hash
);
4573 subfacet_path_to_string(enum subfacet_path path
)
4576 case SF_NOT_INSTALLED
:
4577 return "not installed";
4579 return "in fast path";
4581 return "in slow path";
4587 /* Returns the path in which a subfacet should be installed if its 'slow'
4588 * member has the specified value. */
4589 static enum subfacet_path
4590 subfacet_want_path(enum slow_path_reason slow
)
4592 return slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
4595 /* Returns true if 'subfacet' needs to have its datapath flow updated,
4596 * supposing that its actions have been recalculated as 'want_actions' and that
4597 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
4599 subfacet_should_install(struct subfacet
*subfacet
, enum slow_path_reason slow
,
4600 const struct ofpbuf
*want_actions
)
4602 enum subfacet_path want_path
= subfacet_want_path(slow
);
4603 return (want_path
!= subfacet
->path
4604 || (want_path
== SF_FAST_PATH
4605 && (subfacet
->actions_len
!= want_actions
->size
4606 || memcmp(subfacet
->actions
, want_actions
->data
,
4607 subfacet
->actions_len
))));
4611 facet_check_consistency(struct facet
*facet
)
4613 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
4615 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4617 uint64_t odp_actions_stub
[1024 / 8];
4618 struct ofpbuf odp_actions
;
4620 struct rule_dpif
*rule
;
4621 struct subfacet
*subfacet
;
4622 bool may_log
= false;
4625 /* Check the rule for consistency. */
4626 rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4627 ok
= rule
== facet
->rule
;
4629 may_log
= !VLOG_DROP_WARN(&rl
);
4634 flow_format(&s
, &facet
->flow
);
4635 ds_put_format(&s
, ": facet associated with wrong rule (was "
4636 "table=%"PRIu8
",", facet
->rule
->up
.table_id
);
4637 cls_rule_format(&facet
->rule
->up
.cr
, &s
);
4638 ds_put_format(&s
, ") (should have been table=%"PRIu8
",",
4640 cls_rule_format(&rule
->up
.cr
, &s
);
4641 ds_put_char(&s
, ')');
4643 VLOG_WARN("%s", ds_cstr(&s
));
4648 /* Check the datapath actions for consistency. */
4649 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4650 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4651 enum subfacet_path want_path
;
4652 struct action_xlate_ctx ctx
;
4655 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4656 subfacet
->initial_tci
, rule
, 0, NULL
);
4657 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
4660 if (subfacet
->path
== SF_NOT_INSTALLED
) {
4661 /* This only happens if the datapath reported an error when we
4662 * tried to install the flow. Don't flag another error here. */
4666 want_path
= subfacet_want_path(subfacet
->slow
);
4667 if (want_path
== SF_SLOW_PATH
&& subfacet
->path
== SF_SLOW_PATH
) {
4668 /* The actions for slow-path flows may legitimately vary from one
4669 * packet to the next. We're done. */
4673 if (!subfacet_should_install(subfacet
, subfacet
->slow
, &odp_actions
)) {
4677 /* Inconsistency! */
4679 may_log
= !VLOG_DROP_WARN(&rl
);
4683 /* Rate-limited, skip reporting. */
4688 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &s
);
4690 ds_put_cstr(&s
, ": inconsistency in subfacet");
4691 if (want_path
!= subfacet
->path
) {
4692 enum odp_key_fitness fitness
= subfacet
->key_fitness
;
4694 ds_put_format(&s
, " (%s, fitness=%s)",
4695 subfacet_path_to_string(subfacet
->path
),
4696 odp_key_fitness_to_string(fitness
));
4697 ds_put_format(&s
, " (should have been %s)",
4698 subfacet_path_to_string(want_path
));
4699 } else if (want_path
== SF_FAST_PATH
) {
4700 ds_put_cstr(&s
, " (actions were: ");
4701 format_odp_actions(&s
, subfacet
->actions
,
4702 subfacet
->actions_len
);
4703 ds_put_cstr(&s
, ") (correct actions: ");
4704 format_odp_actions(&s
, odp_actions
.data
, odp_actions
.size
);
4705 ds_put_char(&s
, ')');
4707 ds_put_cstr(&s
, " (actions: ");
4708 format_odp_actions(&s
, subfacet
->actions
,
4709 subfacet
->actions_len
);
4710 ds_put_char(&s
, ')');
4712 VLOG_WARN("%s", ds_cstr(&s
));
4715 ofpbuf_uninit(&odp_actions
);
4720 /* Re-searches the classifier for 'facet':
4722 * - If the rule found is different from 'facet''s current rule, moves
4723 * 'facet' to the new rule and recompiles its actions.
4725 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
4726 * where it is and recompiles its actions anyway.
4728 * - If any of 'facet''s subfacets correspond to a new flow according to
4729 * ofproto_receive(), 'facet' is removed. */
4731 facet_revalidate(struct facet
*facet
)
4733 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4735 struct nlattr
*odp_actions
;
4738 struct actions
*new_actions
;
4740 struct action_xlate_ctx ctx
;
4741 uint64_t odp_actions_stub
[1024 / 8];
4742 struct ofpbuf odp_actions
;
4744 struct rule_dpif
*new_rule
;
4745 struct subfacet
*subfacet
;
4748 COVERAGE_INC(facet_revalidate
);
4750 /* Check that child subfacets still correspond to this facet. Tunnel
4751 * configuration changes could cause a subfacet's OpenFlow in_port to
4753 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4754 struct ofproto_dpif
*recv_ofproto
;
4755 struct flow recv_flow
;
4758 error
= ofproto_receive(ofproto
->backer
, NULL
, subfacet
->key
,
4759 subfacet
->key_len
, &recv_flow
, NULL
,
4760 &recv_ofproto
, NULL
, NULL
);
4762 || recv_ofproto
!= ofproto
4763 || memcmp(&recv_flow
, &facet
->flow
, sizeof recv_flow
)) {
4764 facet_remove(facet
);
4769 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
);
4771 /* Calculate new datapath actions.
4773 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4774 * emit a NetFlow expiration and, if so, we need to have the old state
4775 * around to properly compose it. */
4777 /* If the datapath actions changed or the installability changed,
4778 * then we need to talk to the datapath. */
4781 memset(&ctx
, 0, sizeof ctx
);
4782 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
4783 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4784 enum slow_path_reason slow
;
4786 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
,
4787 subfacet
->initial_tci
, new_rule
, 0, NULL
);
4788 xlate_actions(&ctx
, new_rule
->up
.ofpacts
, new_rule
->up
.ofpacts_len
,
4791 slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4792 if (subfacet_should_install(subfacet
, slow
, &odp_actions
)) {
4793 struct dpif_flow_stats stats
;
4795 subfacet_install(subfacet
,
4796 odp_actions
.data
, odp_actions
.size
, &stats
, slow
);
4797 subfacet_update_stats(subfacet
, &stats
);
4800 new_actions
= xcalloc(list_size(&facet
->subfacets
),
4801 sizeof *new_actions
);
4803 new_actions
[i
].odp_actions
= xmemdup(odp_actions
.data
,
4805 new_actions
[i
].actions_len
= odp_actions
.size
;
4810 ofpbuf_uninit(&odp_actions
);
4813 facet_flush_stats(facet
);
4816 /* Update 'facet' now that we've taken care of all the old state. */
4817 facet
->tags
= ctx
.tags
;
4818 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
4819 facet
->has_learn
= ctx
.has_learn
;
4820 facet
->has_normal
= ctx
.has_normal
;
4821 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
4822 facet
->mirrors
= ctx
.mirrors
;
4825 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4826 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
4828 if (new_actions
&& new_actions
[i
].odp_actions
) {
4829 free(subfacet
->actions
);
4830 subfacet
->actions
= new_actions
[i
].odp_actions
;
4831 subfacet
->actions_len
= new_actions
[i
].actions_len
;
4837 if (facet
->rule
!= new_rule
) {
4838 COVERAGE_INC(facet_changed_rule
);
4839 list_remove(&facet
->list_node
);
4840 list_push_back(&new_rule
->facets
, &facet
->list_node
);
4841 facet
->rule
= new_rule
;
4842 facet
->used
= new_rule
->up
.created
;
4843 facet
->prev_used
= facet
->used
;
4847 /* Updates 'facet''s used time. Caller is responsible for calling
4848 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4850 facet_update_time(struct facet
*facet
, long long int used
)
4852 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4853 if (used
> facet
->used
) {
4855 ofproto_rule_update_used(&facet
->rule
->up
, used
);
4856 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, used
);
4861 facet_reset_counters(struct facet
*facet
)
4863 facet
->packet_count
= 0;
4864 facet
->byte_count
= 0;
4865 facet
->prev_packet_count
= 0;
4866 facet
->prev_byte_count
= 0;
4867 facet
->accounted_bytes
= 0;
4871 facet_push_stats(struct facet
*facet
)
4873 struct dpif_flow_stats stats
;
4875 ovs_assert(facet
->packet_count
>= facet
->prev_packet_count
);
4876 ovs_assert(facet
->byte_count
>= facet
->prev_byte_count
);
4877 ovs_assert(facet
->used
>= facet
->prev_used
);
4879 stats
.n_packets
= facet
->packet_count
- facet
->prev_packet_count
;
4880 stats
.n_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
4881 stats
.used
= facet
->used
;
4882 stats
.tcp_flags
= 0;
4884 if (stats
.n_packets
|| stats
.n_bytes
|| facet
->used
> facet
->prev_used
) {
4885 facet
->prev_packet_count
= facet
->packet_count
;
4886 facet
->prev_byte_count
= facet
->byte_count
;
4887 facet
->prev_used
= facet
->used
;
4889 flow_push_stats(facet
->rule
, &facet
->flow
, &stats
);
4891 update_mirror_stats(ofproto_dpif_cast(facet
->rule
->up
.ofproto
),
4892 facet
->mirrors
, stats
.n_packets
, stats
.n_bytes
);
4897 rule_credit_stats(struct rule_dpif
*rule
, const struct dpif_flow_stats
*stats
)
4899 rule
->packet_count
+= stats
->n_packets
;
4900 rule
->byte_count
+= stats
->n_bytes
;
4901 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4904 /* Pushes flow statistics to the rules which 'flow' resubmits into given
4905 * 'rule''s actions and mirrors. */
4907 flow_push_stats(struct rule_dpif
*rule
,
4908 const struct flow
*flow
, const struct dpif_flow_stats
*stats
)
4910 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
4911 struct action_xlate_ctx ctx
;
4913 ofproto_rule_update_used(&rule
->up
, stats
->used
);
4915 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, rule
,
4917 ctx
.resubmit_stats
= stats
;
4918 xlate_actions_for_side_effects(&ctx
, rule
->up
.ofpacts
,
4919 rule
->up
.ofpacts_len
);
4924 static struct subfacet
*
4925 subfacet_find(struct ofproto_dpif
*ofproto
,
4926 const struct nlattr
*key
, size_t key_len
, uint32_t key_hash
)
4928 struct subfacet
*subfacet
;
4930 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
4931 &ofproto
->subfacets
) {
4932 if (subfacet
->key_len
== key_len
4933 && !memcmp(key
, subfacet
->key
, key_len
)) {
4941 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
4942 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
4943 * existing subfacet if there is one, otherwise creates and returns a
4946 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4947 * which case the caller must populate the actions with
4948 * subfacet_make_actions(). */
4949 static struct subfacet
*
4950 subfacet_create(struct facet
*facet
, struct flow_miss
*miss
,
4953 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4954 enum odp_key_fitness key_fitness
= miss
->key_fitness
;
4955 const struct nlattr
*key
= miss
->key
;
4956 size_t key_len
= miss
->key_len
;
4958 struct subfacet
*subfacet
;
4960 key_hash
= odp_flow_key_hash(key
, key_len
);
4962 if (list_is_empty(&facet
->subfacets
)) {
4963 subfacet
= &facet
->one_subfacet
;
4965 subfacet
= subfacet_find(ofproto
, key
, key_len
, key_hash
);
4967 if (subfacet
->facet
== facet
) {
4971 /* This shouldn't happen. */
4972 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
4973 subfacet_destroy(subfacet
);
4976 subfacet
= xmalloc(sizeof *subfacet
);
4979 hmap_insert(&ofproto
->subfacets
, &subfacet
->hmap_node
, key_hash
);
4980 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
4981 subfacet
->facet
= facet
;
4982 subfacet
->key_fitness
= key_fitness
;
4983 subfacet
->key
= xmemdup(key
, key_len
);
4984 subfacet
->key_len
= key_len
;
4985 subfacet
->used
= now
;
4986 subfacet
->dp_packet_count
= 0;
4987 subfacet
->dp_byte_count
= 0;
4988 subfacet
->actions_len
= 0;
4989 subfacet
->actions
= NULL
;
4990 subfacet
->slow
= (subfacet
->key_fitness
== ODP_FIT_TOO_LITTLE
4993 subfacet
->path
= SF_NOT_INSTALLED
;
4994 subfacet
->initial_tci
= miss
->initial_tci
;
4995 subfacet
->odp_in_port
= miss
->odp_in_port
;
5000 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5001 * its facet within 'ofproto', and frees it. */
5003 subfacet_destroy__(struct subfacet
*subfacet
)
5005 struct facet
*facet
= subfacet
->facet
;
5006 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5008 subfacet_uninstall(subfacet
);
5009 hmap_remove(&ofproto
->subfacets
, &subfacet
->hmap_node
);
5010 list_remove(&subfacet
->list_node
);
5011 free(subfacet
->key
);
5012 free(subfacet
->actions
);
5013 if (subfacet
!= &facet
->one_subfacet
) {
5018 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5019 * last remaining subfacet in its facet destroys the facet too. */
5021 subfacet_destroy(struct subfacet
*subfacet
)
5023 struct facet
*facet
= subfacet
->facet
;
5025 if (list_is_singleton(&facet
->subfacets
)) {
5026 /* facet_remove() needs at least one subfacet (it will remove it). */
5027 facet_remove(facet
);
5029 subfacet_destroy__(subfacet
);
5034 subfacet_destroy_batch(struct ofproto_dpif
*ofproto
,
5035 struct subfacet
**subfacets
, int n
)
5037 struct dpif_op ops
[SUBFACET_DESTROY_MAX_BATCH
];
5038 struct dpif_op
*opsp
[SUBFACET_DESTROY_MAX_BATCH
];
5039 struct dpif_flow_stats stats
[SUBFACET_DESTROY_MAX_BATCH
];
5042 for (i
= 0; i
< n
; i
++) {
5043 ops
[i
].type
= DPIF_OP_FLOW_DEL
;
5044 ops
[i
].u
.flow_del
.key
= subfacets
[i
]->key
;
5045 ops
[i
].u
.flow_del
.key_len
= subfacets
[i
]->key_len
;
5046 ops
[i
].u
.flow_del
.stats
= &stats
[i
];
5050 dpif_operate(ofproto
->backer
->dpif
, opsp
, n
);
5051 for (i
= 0; i
< n
; i
++) {
5052 subfacet_reset_dp_stats(subfacets
[i
], &stats
[i
]);
5053 subfacets
[i
]->path
= SF_NOT_INSTALLED
;
5054 subfacet_destroy(subfacets
[i
]);
5058 /* Composes the datapath actions for 'subfacet' based on its rule's actions.
5059 * Translates the actions into 'odp_actions', which the caller must have
5060 * initialized and is responsible for uninitializing. */
5062 subfacet_make_actions(struct subfacet
*subfacet
, const struct ofpbuf
*packet
,
5063 struct ofpbuf
*odp_actions
)
5065 struct facet
*facet
= subfacet
->facet
;
5066 struct rule_dpif
*rule
= facet
->rule
;
5067 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5069 struct action_xlate_ctx ctx
;
5071 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
, subfacet
->initial_tci
,
5073 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, odp_actions
);
5074 facet
->tags
= ctx
.tags
;
5075 facet
->has_learn
= ctx
.has_learn
;
5076 facet
->has_normal
= ctx
.has_normal
;
5077 facet
->has_fin_timeout
= ctx
.has_fin_timeout
;
5078 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
5079 facet
->mirrors
= ctx
.mirrors
;
5081 subfacet
->slow
= (subfacet
->slow
& SLOW_MATCH
) | ctx
.slow
;
5082 if (subfacet
->actions_len
!= odp_actions
->size
5083 || memcmp(subfacet
->actions
, odp_actions
->data
, odp_actions
->size
)) {
5084 free(subfacet
->actions
);
5085 subfacet
->actions_len
= odp_actions
->size
;
5086 subfacet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
5090 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5091 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5092 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5093 * since 'subfacet' was last updated.
5095 * Returns 0 if successful, otherwise a positive errno value. */
5097 subfacet_install(struct subfacet
*subfacet
,
5098 const struct nlattr
*actions
, size_t actions_len
,
5099 struct dpif_flow_stats
*stats
,
5100 enum slow_path_reason slow
)
5102 struct facet
*facet
= subfacet
->facet
;
5103 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5104 enum subfacet_path path
= subfacet_want_path(slow
);
5105 uint64_t slow_path_stub
[128 / 8];
5106 enum dpif_flow_put_flags flags
;
5109 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
5111 flags
|= DPIF_FP_ZERO_STATS
;
5114 if (path
== SF_SLOW_PATH
) {
5115 compose_slow_path(ofproto
, &facet
->flow
, slow
,
5116 slow_path_stub
, sizeof slow_path_stub
,
5117 &actions
, &actions_len
);
5120 ret
= dpif_flow_put(ofproto
->backer
->dpif
, flags
, subfacet
->key
,
5121 subfacet
->key_len
, actions
, actions_len
, stats
);
5124 subfacet_reset_dp_stats(subfacet
, stats
);
5128 subfacet
->path
= path
;
5134 subfacet_reinstall(struct subfacet
*subfacet
, struct dpif_flow_stats
*stats
)
5136 return subfacet_install(subfacet
, subfacet
->actions
, subfacet
->actions_len
,
5137 stats
, subfacet
->slow
);
5140 /* If 'subfacet' is installed in the datapath, uninstalls it. */
5142 subfacet_uninstall(struct subfacet
*subfacet
)
5144 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
5145 struct rule_dpif
*rule
= subfacet
->facet
->rule
;
5146 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5147 struct dpif_flow_stats stats
;
5150 error
= dpif_flow_del(ofproto
->backer
->dpif
, subfacet
->key
,
5151 subfacet
->key_len
, &stats
);
5152 subfacet_reset_dp_stats(subfacet
, &stats
);
5154 subfacet_update_stats(subfacet
, &stats
);
5156 subfacet
->path
= SF_NOT_INSTALLED
;
5158 ovs_assert(subfacet
->dp_packet_count
== 0);
5159 ovs_assert(subfacet
->dp_byte_count
== 0);
5163 /* Resets 'subfacet''s datapath statistics counters. This should be called
5164 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5165 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5166 * was reset in the datapath. 'stats' will be modified to include only
5167 * statistics new since 'subfacet' was last updated. */
5169 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
5170 struct dpif_flow_stats
*stats
)
5173 && subfacet
->dp_packet_count
<= stats
->n_packets
5174 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
5175 stats
->n_packets
-= subfacet
->dp_packet_count
;
5176 stats
->n_bytes
-= subfacet
->dp_byte_count
;
5179 subfacet
->dp_packet_count
= 0;
5180 subfacet
->dp_byte_count
= 0;
5183 /* Updates 'subfacet''s used time. The caller is responsible for calling
5184 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
5186 subfacet_update_time(struct subfacet
*subfacet
, long long int used
)
5188 if (used
> subfacet
->used
) {
5189 subfacet
->used
= used
;
5190 facet_update_time(subfacet
->facet
, used
);
5194 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
5196 * Because of the meaning of a subfacet's counters, it only makes sense to do
5197 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5198 * represents a packet that was sent by hand or if it represents statistics
5199 * that have been cleared out of the datapath. */
5201 subfacet_update_stats(struct subfacet
*subfacet
,
5202 const struct dpif_flow_stats
*stats
)
5204 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
5205 struct facet
*facet
= subfacet
->facet
;
5207 subfacet_update_time(subfacet
, stats
->used
);
5208 facet
->packet_count
+= stats
->n_packets
;
5209 facet
->byte_count
+= stats
->n_bytes
;
5210 facet
->tcp_flags
|= stats
->tcp_flags
;
5211 facet_push_stats(facet
);
5212 netflow_flow_update_flags(&facet
->nf_flow
, stats
->tcp_flags
);
5218 static struct rule_dpif
*
5219 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5221 struct rule_dpif
*rule
;
5223 rule
= rule_dpif_lookup__(ofproto
, flow
, 0);
5228 return rule_dpif_miss_rule(ofproto
, flow
);
5231 static struct rule_dpif
*
5232 rule_dpif_lookup__(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5235 struct cls_rule
*cls_rule
;
5236 struct classifier
*cls
;
5238 if (table_id
>= N_TABLES
) {
5242 cls
= &ofproto
->up
.tables
[table_id
].cls
;
5243 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
5244 && ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
5245 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
5246 * are unavailable. */
5247 struct flow ofpc_normal_flow
= *flow
;
5248 ofpc_normal_flow
.tp_src
= htons(0);
5249 ofpc_normal_flow
.tp_dst
= htons(0);
5250 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
5252 cls_rule
= classifier_lookup(cls
, flow
);
5254 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
5257 static struct rule_dpif
*
5258 rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5260 struct ofport_dpif
*port
;
5262 port
= get_ofp_port(ofproto
, flow
->in_port
);
5264 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
, flow
->in_port
);
5265 return ofproto
->miss_rule
;
5268 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
) {
5269 return ofproto
->no_packet_in_rule
;
5271 return ofproto
->miss_rule
;
5275 complete_operation(struct rule_dpif
*rule
)
5277 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5279 rule_invalidate(rule
);
5281 struct dpif_completion
*c
= xmalloc(sizeof *c
);
5282 c
->op
= rule
->up
.pending
;
5283 list_push_back(&ofproto
->completions
, &c
->list_node
);
5285 ofoperation_complete(rule
->up
.pending
, 0);
5289 static struct rule
*
5292 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
5297 rule_dealloc(struct rule
*rule_
)
5299 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5304 rule_construct(struct rule
*rule_
)
5306 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5307 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5308 struct rule_dpif
*victim
;
5311 rule
->packet_count
= 0;
5312 rule
->byte_count
= 0;
5314 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
5315 if (victim
&& !list_is_empty(&victim
->facets
)) {
5316 struct facet
*facet
;
5318 rule
->facets
= victim
->facets
;
5319 list_moved(&rule
->facets
);
5320 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5321 /* XXX: We're only clearing our local counters here. It's possible
5322 * that quite a few packets are unaccounted for in the datapath
5323 * statistics. These will be accounted to the new rule instead of
5324 * cleared as required. This could be fixed by clearing out the
5325 * datapath statistics for this facet, but currently it doesn't
5327 facet_reset_counters(facet
);
5331 /* Must avoid list_moved() in this case. */
5332 list_init(&rule
->facets
);
5335 table_id
= rule
->up
.table_id
;
5337 rule
->tag
= victim
->tag
;
5338 } else if (table_id
== 0) {
5343 miniflow_expand(&rule
->up
.cr
.match
.flow
, &flow
);
5344 rule
->tag
= rule_calculate_tag(&flow
, &rule
->up
.cr
.match
.mask
,
5345 ofproto
->tables
[table_id
].basis
);
5348 complete_operation(rule
);
5353 rule_destruct(struct rule
*rule_
)
5355 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5356 struct facet
*facet
, *next_facet
;
5358 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
5359 facet_revalidate(facet
);
5362 complete_operation(rule
);
5366 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
5368 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5369 struct facet
*facet
;
5371 /* Start from historical data for 'rule' itself that are no longer tracked
5372 * in facets. This counts, for example, facets that have expired. */
5373 *packets
= rule
->packet_count
;
5374 *bytes
= rule
->byte_count
;
5376 /* Add any statistics that are tracked by facets. This includes
5377 * statistical data recently updated by ofproto_update_stats() as well as
5378 * stats for packets that were executed "by hand" via dpif_execute(). */
5379 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5380 *packets
+= facet
->packet_count
;
5381 *bytes
+= facet
->byte_count
;
5386 rule_dpif_execute(struct rule_dpif
*rule
, const struct flow
*flow
,
5387 struct ofpbuf
*packet
)
5389 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5391 struct dpif_flow_stats stats
;
5393 struct action_xlate_ctx ctx
;
5394 uint64_t odp_actions_stub
[1024 / 8];
5395 struct ofpbuf odp_actions
;
5397 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
5398 rule_credit_stats(rule
, &stats
);
5400 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5401 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
,
5402 rule
, stats
.tcp_flags
, packet
);
5403 ctx
.resubmit_stats
= &stats
;
5404 xlate_actions(&ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, &odp_actions
);
5406 execute_odp_actions(ofproto
, flow
, odp_actions
.data
,
5407 odp_actions
.size
, packet
);
5409 ofpbuf_uninit(&odp_actions
);
5413 rule_execute(struct rule
*rule
, const struct flow
*flow
,
5414 struct ofpbuf
*packet
)
5416 rule_dpif_execute(rule_dpif_cast(rule
), flow
, packet
);
5417 ofpbuf_delete(packet
);
5422 rule_modify_actions(struct rule
*rule_
)
5424 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5426 complete_operation(rule
);
5429 /* Sends 'packet' out 'ofport'.
5430 * May modify 'packet'.
5431 * Returns 0 if successful, otherwise a positive errno value. */
5433 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
5435 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5436 uint64_t odp_actions_stub
[1024 / 8];
5437 struct ofpbuf key
, odp_actions
;
5438 struct odputil_keybuf keybuf
;
5443 flow_extract(packet
, 0, 0, NULL
, OFPP_LOCAL
, &flow
);
5444 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5445 struct ofproto_dpif
*peer_ofproto
;
5446 struct dpif_flow_stats stats
;
5447 struct ofport_dpif
*peer
;
5448 struct rule_dpif
*rule
;
5450 peer
= ofport_get_peer(ofport
);
5455 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5456 netdev_vport_inc_tx(ofport
->up
.netdev
, &stats
);
5457 netdev_vport_inc_rx(peer
->up
.netdev
, &stats
);
5459 flow
.in_port
= peer
->up
.ofp_port
;
5460 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5461 rule
= rule_dpif_lookup(peer_ofproto
, &flow
);
5462 rule_dpif_execute(rule
, &flow
, packet
);
5467 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5469 if (ofport
->tnl_port
) {
5470 struct dpif_flow_stats stats
;
5472 odp_port
= tnl_port_send(ofport
->tnl_port
, &flow
);
5473 if (odp_port
== OVSP_NONE
) {
5477 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5478 netdev_vport_inc_tx(ofport
->up
.netdev
, &stats
);
5479 odp_put_tunnel_action(&flow
.tunnel
, &odp_actions
);
5480 odp_put_skb_mark_action(flow
.skb_mark
, &odp_actions
);
5482 odp_port
= vsp_realdev_to_vlandev(ofproto
, ofport
->odp_port
,
5484 if (odp_port
!= ofport
->odp_port
) {
5485 eth_pop_vlan(packet
);
5486 flow
.vlan_tci
= htons(0);
5490 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5491 odp_flow_key_from_flow(&key
, &flow
,
5492 ofp_port_to_odp_port(ofproto
, flow
.in_port
));
5494 compose_sflow_action(ofproto
, &odp_actions
, &flow
, odp_port
);
5496 nl_msg_put_u32(&odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
5497 error
= dpif_execute(ofproto
->backer
->dpif
,
5499 odp_actions
.data
, odp_actions
.size
,
5501 ofpbuf_uninit(&odp_actions
);
5504 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %"PRIu32
" (%s)",
5505 ofproto
->up
.name
, odp_port
, strerror(error
));
5507 ofproto_update_local_port_stats(ofport
->up
.ofproto
, packet
->size
, 0);
5511 /* OpenFlow to datapath action translation. */
5513 static bool may_receive(const struct ofport_dpif
*, struct action_xlate_ctx
*);
5514 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
5515 struct action_xlate_ctx
*);
5516 static void xlate_normal(struct action_xlate_ctx
*);
5518 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5519 * The action will state 'slow' as the reason that the action is in the slow
5520 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5521 * dump-flows" output to see why a flow is in the slow path.)
5523 * The 'stub_size' bytes in 'stub' will be used to store the action.
5524 * 'stub_size' must be large enough for the action.
5526 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5529 compose_slow_path(const struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5530 enum slow_path_reason slow
,
5531 uint64_t *stub
, size_t stub_size
,
5532 const struct nlattr
**actionsp
, size_t *actions_lenp
)
5534 union user_action_cookie cookie
;
5537 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
5538 cookie
.slow_path
.unused
= 0;
5539 cookie
.slow_path
.reason
= slow
;
5541 ofpbuf_use_stack(&buf
, stub
, stub_size
);
5542 if (slow
& (SLOW_CFM
| SLOW_LACP
| SLOW_STP
)) {
5543 uint32_t pid
= dpif_port_get_pid(ofproto
->backer
->dpif
, UINT32_MAX
);
5544 odp_put_userspace_action(pid
, &cookie
, sizeof cookie
, &buf
);
5546 put_userspace_action(ofproto
, &buf
, flow
, &cookie
);
5548 *actionsp
= buf
.data
;
5549 *actions_lenp
= buf
.size
;
5553 put_userspace_action(const struct ofproto_dpif
*ofproto
,
5554 struct ofpbuf
*odp_actions
,
5555 const struct flow
*flow
,
5556 const union user_action_cookie
*cookie
)
5560 pid
= dpif_port_get_pid(ofproto
->backer
->dpif
,
5561 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
5563 return odp_put_userspace_action(pid
, cookie
, sizeof *cookie
, odp_actions
);
5567 compose_sflow_cookie(const struct ofproto_dpif
*ofproto
,
5568 ovs_be16 vlan_tci
, uint32_t odp_port
,
5569 unsigned int n_outputs
, union user_action_cookie
*cookie
)
5573 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
5574 cookie
->sflow
.vlan_tci
= vlan_tci
;
5576 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5577 * port information") for the interpretation of cookie->output. */
5578 switch (n_outputs
) {
5580 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
5581 cookie
->sflow
.output
= 0x40000000 | 256;
5585 ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
5587 cookie
->sflow
.output
= ifindex
;
5592 /* 0x80000000 means "multiple output ports. */
5593 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
5598 /* Compose SAMPLE action for sFlow. */
5600 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
5601 struct ofpbuf
*odp_actions
,
5602 const struct flow
*flow
,
5605 uint32_t probability
;
5606 union user_action_cookie cookie
;
5607 size_t sample_offset
, actions_offset
;
5610 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
5614 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
5616 /* Number of packets out of UINT_MAX to sample. */
5617 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
5618 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
5620 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
5621 compose_sflow_cookie(ofproto
, htons(0), odp_port
,
5622 odp_port
== OVSP_NONE
? 0 : 1, &cookie
);
5623 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, &cookie
);
5625 nl_msg_end_nested(odp_actions
, actions_offset
);
5626 nl_msg_end_nested(odp_actions
, sample_offset
);
5627 return cookie_offset
;
5630 /* SAMPLE action must be first action in any given list of actions.
5631 * At this point we do not have all information required to build it. So try to
5632 * build sample action as complete as possible. */
5634 add_sflow_action(struct action_xlate_ctx
*ctx
)
5636 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
5638 &ctx
->flow
, OVSP_NONE
);
5639 ctx
->sflow_odp_port
= 0;
5640 ctx
->sflow_n_outputs
= 0;
5643 /* Fix SAMPLE action according to data collected while composing ODP actions.
5644 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5645 * USERSPACE action's user-cookie which is required for sflow. */
5647 fix_sflow_action(struct action_xlate_ctx
*ctx
)
5649 const struct flow
*base
= &ctx
->base_flow
;
5650 union user_action_cookie
*cookie
;
5652 if (!ctx
->user_cookie_offset
) {
5656 cookie
= ofpbuf_at(ctx
->odp_actions
, ctx
->user_cookie_offset
,
5658 ovs_assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
5660 compose_sflow_cookie(ctx
->ofproto
, base
->vlan_tci
,
5661 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
5665 compose_output_action__(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
,
5668 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
5669 ovs_be16 flow_vlan_tci
= ctx
->flow
.vlan_tci
;
5670 ovs_be64 flow_tun_id
= ctx
->flow
.tunnel
.tun_id
;
5671 uint8_t flow_nw_tos
= ctx
->flow
.nw_tos
;
5672 struct priority_to_dscp
*pdscp
;
5673 uint32_t out_port
, odp_port
;
5675 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5676 * before traversing a patch port. */
5677 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 19);
5680 xlate_report(ctx
, "Nonexistent output port");
5682 } else if (ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FWD
) {
5683 xlate_report(ctx
, "OFPPC_NO_FWD set, skipping output");
5685 } else if (check_stp
&& !stp_forward_in_state(ofport
->stp_state
)) {
5686 xlate_report(ctx
, "STP not in forwarding state, skipping output");
5690 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5691 struct ofport_dpif
*peer
= ofport_get_peer(ofport
);
5692 struct flow old_flow
= ctx
->flow
;
5693 const struct ofproto_dpif
*peer_ofproto
;
5694 enum slow_path_reason special
;
5695 struct ofport_dpif
*in_port
;
5698 xlate_report(ctx
, "Nonexistent patch port peer");
5702 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5703 if (peer_ofproto
->backer
!= ctx
->ofproto
->backer
) {
5704 xlate_report(ctx
, "Patch port peer on a different datapath");
5708 ctx
->ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5709 ctx
->flow
.in_port
= peer
->up
.ofp_port
;
5710 ctx
->flow
.metadata
= htonll(0);
5711 memset(&ctx
->flow
.tunnel
, 0, sizeof ctx
->flow
.tunnel
);
5712 memset(ctx
->flow
.regs
, 0, sizeof ctx
->flow
.regs
);
5714 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
5715 special
= process_special(ctx
->ofproto
, &ctx
->flow
, in_port
,
5718 ctx
->slow
|= special
;
5719 } else if (!in_port
|| may_receive(in_port
, ctx
)) {
5720 if (!in_port
|| stp_forward_in_state(in_port
->stp_state
)) {
5721 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, true);
5723 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
5724 * learning action look at the packet, then drop it. */
5725 struct flow old_base_flow
= ctx
->base_flow
;
5726 size_t old_size
= ctx
->odp_actions
->size
;
5727 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, true);
5728 ctx
->base_flow
= old_base_flow
;
5729 ctx
->odp_actions
->size
= old_size
;
5733 ctx
->flow
= old_flow
;
5734 ctx
->ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5736 if (ctx
->resubmit_stats
) {
5737 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->resubmit_stats
);
5738 netdev_vport_inc_rx(peer
->up
.netdev
, ctx
->resubmit_stats
);
5744 pdscp
= get_priority(ofport
, ctx
->flow
.skb_priority
);
5746 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
5747 ctx
->flow
.nw_tos
|= pdscp
->dscp
;
5750 odp_port
= ofp_port_to_odp_port(ctx
->ofproto
, ofp_port
);
5751 if (ofport
->tnl_port
) {
5752 odp_port
= tnl_port_send(ofport
->tnl_port
, &ctx
->flow
);
5753 if (odp_port
== OVSP_NONE
) {
5754 xlate_report(ctx
, "Tunneling decided against output");
5758 if (ctx
->resubmit_stats
) {
5759 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->resubmit_stats
);
5761 out_port
= odp_port
;
5762 commit_odp_tunnel_action(&ctx
->flow
, &ctx
->base_flow
,
5765 out_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, odp_port
,
5766 ctx
->flow
.vlan_tci
);
5767 if (out_port
!= odp_port
) {
5768 ctx
->flow
.vlan_tci
= htons(0);
5771 commit_odp_actions(&ctx
->flow
, &ctx
->base_flow
, ctx
->odp_actions
);
5772 nl_msg_put_u32(ctx
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
5774 ctx
->sflow_odp_port
= odp_port
;
5775 ctx
->sflow_n_outputs
++;
5776 ctx
->nf_output_iface
= ofp_port
;
5777 ctx
->flow
.tunnel
.tun_id
= flow_tun_id
;
5778 ctx
->flow
.vlan_tci
= flow_vlan_tci
;
5779 ctx
->flow
.nw_tos
= flow_nw_tos
;
5783 compose_output_action(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
)
5785 compose_output_action__(ctx
, ofp_port
, true);
5789 xlate_table_action(struct action_xlate_ctx
*ctx
,
5790 uint16_t in_port
, uint8_t table_id
, bool may_packet_in
)
5792 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
5793 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
5794 struct rule_dpif
*rule
;
5795 uint16_t old_in_port
;
5796 uint8_t old_table_id
;
5798 old_table_id
= ctx
->table_id
;
5799 ctx
->table_id
= table_id
;
5801 /* Look up a flow with 'in_port' as the input port. */
5802 old_in_port
= ctx
->flow
.in_port
;
5803 ctx
->flow
.in_port
= in_port
;
5804 rule
= rule_dpif_lookup__(ofproto
, &ctx
->flow
, table_id
);
5807 if (table_id
> 0 && table_id
< N_TABLES
) {
5808 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
5809 if (table
->other_table
) {
5810 ctx
->tags
|= (rule
&& rule
->tag
5812 : rule_calculate_tag(&ctx
->flow
,
5813 &table
->other_table
->mask
,
5818 /* Restore the original input port. Otherwise OFPP_NORMAL and
5819 * OFPP_IN_PORT will have surprising behavior. */
5820 ctx
->flow
.in_port
= old_in_port
;
5822 if (ctx
->resubmit_hook
) {
5823 ctx
->resubmit_hook(ctx
, rule
);
5826 if (rule
== NULL
&& may_packet_in
) {
5828 * check if table configuration flags
5829 * OFPTC_TABLE_MISS_CONTROLLER, default.
5830 * OFPTC_TABLE_MISS_CONTINUE,
5831 * OFPTC_TABLE_MISS_DROP
5832 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
5834 rule
= rule_dpif_miss_rule(ofproto
, &ctx
->flow
);
5838 struct rule_dpif
*old_rule
= ctx
->rule
;
5840 if (ctx
->resubmit_stats
) {
5841 rule_credit_stats(rule
, ctx
->resubmit_stats
);
5846 do_xlate_actions(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, ctx
);
5847 ctx
->rule
= old_rule
;
5851 ctx
->table_id
= old_table_id
;
5853 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
5855 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
5856 MAX_RESUBMIT_RECURSION
);
5857 ctx
->max_resubmit_trigger
= true;
5862 xlate_ofpact_resubmit(struct action_xlate_ctx
*ctx
,
5863 const struct ofpact_resubmit
*resubmit
)
5868 in_port
= resubmit
->in_port
;
5869 if (in_port
== OFPP_IN_PORT
) {
5870 in_port
= ctx
->flow
.in_port
;
5873 table_id
= resubmit
->table_id
;
5874 if (table_id
== 255) {
5875 table_id
= ctx
->table_id
;
5878 xlate_table_action(ctx
, in_port
, table_id
, false);
5882 flood_packets(struct action_xlate_ctx
*ctx
, bool all
)
5884 struct ofport_dpif
*ofport
;
5886 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
5887 uint16_t ofp_port
= ofport
->up
.ofp_port
;
5889 if (ofp_port
== ctx
->flow
.in_port
) {
5894 compose_output_action__(ctx
, ofp_port
, false);
5895 } else if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
)) {
5896 compose_output_action(ctx
, ofp_port
);
5900 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
5904 execute_controller_action(struct action_xlate_ctx
*ctx
, int len
,
5905 enum ofp_packet_in_reason reason
,
5906 uint16_t controller_id
)
5908 struct ofputil_packet_in pin
;
5909 struct ofpbuf
*packet
;
5911 ctx
->slow
|= SLOW_CONTROLLER
;
5916 packet
= ofpbuf_clone(ctx
->packet
);
5918 if (packet
->l2
&& packet
->l3
) {
5919 struct eth_header
*eh
;
5920 uint16_t mpls_depth
;
5922 eth_pop_vlan(packet
);
5925 memcpy(eh
->eth_src
, ctx
->flow
.dl_src
, sizeof eh
->eth_src
);
5926 memcpy(eh
->eth_dst
, ctx
->flow
.dl_dst
, sizeof eh
->eth_dst
);
5928 if (ctx
->flow
.vlan_tci
& htons(VLAN_CFI
)) {
5929 eth_push_vlan(packet
, ctx
->flow
.vlan_tci
);
5932 mpls_depth
= eth_mpls_depth(packet
);
5934 if (mpls_depth
< ctx
->flow
.mpls_depth
) {
5935 push_mpls(packet
, ctx
->flow
.dl_type
, ctx
->flow
.mpls_lse
);
5936 } else if (mpls_depth
> ctx
->flow
.mpls_depth
) {
5937 pop_mpls(packet
, ctx
->flow
.dl_type
);
5938 } else if (mpls_depth
) {
5939 set_mpls_lse(packet
, ctx
->flow
.mpls_lse
);
5943 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
5944 packet_set_ipv4(packet
, ctx
->flow
.nw_src
, ctx
->flow
.nw_dst
,
5945 ctx
->flow
.nw_tos
, ctx
->flow
.nw_ttl
);
5949 if (ctx
->flow
.nw_proto
== IPPROTO_TCP
) {
5950 packet_set_tcp_port(packet
, ctx
->flow
.tp_src
,
5952 } else if (ctx
->flow
.nw_proto
== IPPROTO_UDP
) {
5953 packet_set_udp_port(packet
, ctx
->flow
.tp_src
,
5960 pin
.packet
= packet
->data
;
5961 pin
.packet_len
= packet
->size
;
5962 pin
.reason
= reason
;
5963 pin
.controller_id
= controller_id
;
5964 pin
.table_id
= ctx
->table_id
;
5965 pin
.cookie
= ctx
->rule
? ctx
->rule
->up
.flow_cookie
: 0;
5968 flow_get_metadata(&ctx
->flow
, &pin
.fmd
);
5970 connmgr_send_packet_in(ctx
->ofproto
->up
.connmgr
, &pin
);
5971 ofpbuf_delete(packet
);
5975 execute_mpls_push_action(struct action_xlate_ctx
*ctx
, ovs_be16 eth_type
)
5977 ovs_assert(eth_type_mpls(eth_type
));
5979 if (ctx
->base_flow
.mpls_depth
) {
5980 ctx
->flow
.mpls_lse
&= ~htonl(MPLS_BOS_MASK
);
5981 ctx
->flow
.mpls_depth
++;
5986 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IPV6
)) {
5987 label
= htonl(0x2); /* IPV6 Explicit Null. */
5989 label
= htonl(0x0); /* IPV4 Explicit Null. */
5991 tc
= (ctx
->flow
.nw_tos
& IP_DSCP_MASK
) >> 2;
5992 ttl
= ctx
->flow
.nw_ttl
? ctx
->flow
.nw_ttl
: 0x40;
5993 ctx
->flow
.mpls_lse
= set_mpls_lse_values(ttl
, tc
, 1, label
);
5994 ctx
->flow
.encap_dl_type
= ctx
->flow
.dl_type
;
5995 ctx
->flow
.mpls_depth
= 1;
5997 ctx
->flow
.dl_type
= eth_type
;
6001 execute_mpls_pop_action(struct action_xlate_ctx
*ctx
, ovs_be16 eth_type
)
6003 ovs_assert(eth_type_mpls(ctx
->flow
.dl_type
));
6004 ovs_assert(!eth_type_mpls(eth_type
));
6006 if (ctx
->flow
.mpls_depth
) {
6007 ctx
->flow
.mpls_depth
--;
6008 ctx
->flow
.mpls_lse
= htonl(0);
6009 if (!ctx
->flow
.mpls_depth
) {
6010 ctx
->flow
.dl_type
= eth_type
;
6011 ctx
->flow
.encap_dl_type
= htons(0);
6017 compose_dec_ttl(struct action_xlate_ctx
*ctx
, struct ofpact_cnt_ids
*ids
)
6019 if (ctx
->flow
.dl_type
!= htons(ETH_TYPE_IP
) &&
6020 ctx
->flow
.dl_type
!= htons(ETH_TYPE_IPV6
)) {
6024 if (ctx
->flow
.nw_ttl
> 1) {
6030 for (i
= 0; i
< ids
->n_controllers
; i
++) {
6031 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
,
6035 /* Stop processing for current table. */
6041 execute_set_mpls_ttl_action(struct action_xlate_ctx
*ctx
, uint8_t ttl
)
6043 if (!eth_type_mpls(ctx
->flow
.dl_type
)) {
6047 set_mpls_lse_ttl(&ctx
->flow
.mpls_lse
, ttl
);
6052 execute_dec_mpls_ttl_action(struct action_xlate_ctx
*ctx
)
6054 uint8_t ttl
= mpls_lse_to_ttl(ctx
->flow
.mpls_lse
);
6056 if (!eth_type_mpls(ctx
->flow
.dl_type
)) {
6062 set_mpls_lse_ttl(&ctx
->flow
.mpls_lse
, ttl
);
6065 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
6067 /* Stop processing for current table. */
6073 xlate_output_action(struct action_xlate_ctx
*ctx
,
6074 uint16_t port
, uint16_t max_len
, bool may_packet_in
)
6076 uint16_t prev_nf_output_iface
= ctx
->nf_output_iface
;
6078 ctx
->nf_output_iface
= NF_OUT_DROP
;
6082 compose_output_action(ctx
, ctx
->flow
.in_port
);
6085 xlate_table_action(ctx
, ctx
->flow
.in_port
, 0, may_packet_in
);
6091 flood_packets(ctx
, false);
6094 flood_packets(ctx
, true);
6096 case OFPP_CONTROLLER
:
6097 execute_controller_action(ctx
, max_len
, OFPR_ACTION
, 0);
6103 if (port
!= ctx
->flow
.in_port
) {
6104 compose_output_action(ctx
, port
);
6106 xlate_report(ctx
, "skipping output to input port");
6111 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
6112 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
6113 } else if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
6114 ctx
->nf_output_iface
= prev_nf_output_iface
;
6115 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
6116 ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
6117 ctx
->nf_output_iface
= NF_OUT_MULTI
;
6122 xlate_output_reg_action(struct action_xlate_ctx
*ctx
,
6123 const struct ofpact_output_reg
*or)
6125 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->flow
);
6126 if (port
<= UINT16_MAX
) {
6127 xlate_output_action(ctx
, port
, or->max_len
, false);
6132 xlate_enqueue_action(struct action_xlate_ctx
*ctx
,
6133 const struct ofpact_enqueue
*enqueue
)
6135 uint16_t ofp_port
= enqueue
->port
;
6136 uint32_t queue_id
= enqueue
->queue
;
6137 uint32_t flow_priority
, priority
;
6140 /* Translate queue to priority. */
6141 error
= dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6142 queue_id
, &priority
);
6144 /* Fall back to ordinary output action. */
6145 xlate_output_action(ctx
, enqueue
->port
, 0, false);
6149 /* Check output port. */
6150 if (ofp_port
== OFPP_IN_PORT
) {
6151 ofp_port
= ctx
->flow
.in_port
;
6152 } else if (ofp_port
== ctx
->flow
.in_port
) {
6156 /* Add datapath actions. */
6157 flow_priority
= ctx
->flow
.skb_priority
;
6158 ctx
->flow
.skb_priority
= priority
;
6159 compose_output_action(ctx
, ofp_port
);
6160 ctx
->flow
.skb_priority
= flow_priority
;
6162 /* Update NetFlow output port. */
6163 if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
6164 ctx
->nf_output_iface
= ofp_port
;
6165 } else if (ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
6166 ctx
->nf_output_iface
= NF_OUT_MULTI
;
6171 xlate_set_queue_action(struct action_xlate_ctx
*ctx
, uint32_t queue_id
)
6173 uint32_t skb_priority
;
6175 if (!dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6176 queue_id
, &skb_priority
)) {
6177 ctx
->flow
.skb_priority
= skb_priority
;
6179 /* Couldn't translate queue to a priority. Nothing to do. A warning
6180 * has already been logged. */
6184 struct xlate_reg_state
{
6190 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
6192 struct ofproto_dpif
*ofproto
= ofproto_
;
6193 struct ofport_dpif
*port
;
6203 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
6206 port
= get_ofp_port(ofproto
, ofp_port
);
6207 return port
? port
->may_enable
: false;
6212 xlate_bundle_action(struct action_xlate_ctx
*ctx
,
6213 const struct ofpact_bundle
*bundle
)
6217 port
= bundle_execute(bundle
, &ctx
->flow
, slave_enabled_cb
, ctx
->ofproto
);
6218 if (bundle
->dst
.field
) {
6219 nxm_reg_load(&bundle
->dst
, port
, &ctx
->flow
);
6221 xlate_output_action(ctx
, port
, 0, false);
6226 xlate_learn_action(struct action_xlate_ctx
*ctx
,
6227 const struct ofpact_learn
*learn
)
6229 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
6230 struct ofputil_flow_mod fm
;
6231 uint64_t ofpacts_stub
[1024 / 8];
6232 struct ofpbuf ofpacts
;
6235 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
6236 learn_execute(learn
, &ctx
->flow
, &fm
, &ofpacts
);
6238 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
6239 if (error
&& !VLOG_DROP_WARN(&rl
)) {
6240 VLOG_WARN("learning action failed to modify flow table (%s)",
6241 ofperr_get_name(error
));
6244 ofpbuf_uninit(&ofpacts
);
6247 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6248 * means "infinite". */
6250 reduce_timeout(uint16_t max
, uint16_t *timeout
)
6252 if (max
&& (!*timeout
|| *timeout
> max
)) {
6258 xlate_fin_timeout(struct action_xlate_ctx
*ctx
,
6259 const struct ofpact_fin_timeout
*oft
)
6261 if (ctx
->tcp_flags
& (TCP_FIN
| TCP_RST
) && ctx
->rule
) {
6262 struct rule_dpif
*rule
= ctx
->rule
;
6264 reduce_timeout(oft
->fin_idle_timeout
, &rule
->up
.idle_timeout
);
6265 reduce_timeout(oft
->fin_hard_timeout
, &rule
->up
.hard_timeout
);
6270 may_receive(const struct ofport_dpif
*port
, struct action_xlate_ctx
*ctx
)
6272 if (port
->up
.pp
.config
& (eth_addr_equals(ctx
->flow
.dl_dst
, eth_addr_stp
)
6273 ? OFPUTIL_PC_NO_RECV_STP
6274 : OFPUTIL_PC_NO_RECV
)) {
6278 /* Only drop packets here if both forwarding and learning are
6279 * disabled. If just learning is enabled, we need to have
6280 * OFPP_NORMAL and the learning action have a look at the packet
6281 * before we can drop it. */
6282 if (!stp_forward_in_state(port
->stp_state
)
6283 && !stp_learn_in_state(port
->stp_state
)) {
6291 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6292 struct action_xlate_ctx
*ctx
)
6294 bool was_evictable
= true;
6295 const struct ofpact
*a
;
6298 /* Don't let the rule we're working on get evicted underneath us. */
6299 was_evictable
= ctx
->rule
->up
.evictable
;
6300 ctx
->rule
->up
.evictable
= false;
6302 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
6303 struct ofpact_controller
*controller
;
6304 const struct ofpact_metadata
*metadata
;
6312 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
6313 ofpact_get_OUTPUT(a
)->max_len
, true);
6316 case OFPACT_CONTROLLER
:
6317 controller
= ofpact_get_CONTROLLER(a
);
6318 execute_controller_action(ctx
, controller
->max_len
,
6320 controller
->controller_id
);
6323 case OFPACT_ENQUEUE
:
6324 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
6327 case OFPACT_SET_VLAN_VID
:
6328 ctx
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
6329 ctx
->flow
.vlan_tci
|= (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
6333 case OFPACT_SET_VLAN_PCP
:
6334 ctx
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
6335 ctx
->flow
.vlan_tci
|= htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
6340 case OFPACT_STRIP_VLAN
:
6341 ctx
->flow
.vlan_tci
= htons(0);
6344 case OFPACT_PUSH_VLAN
:
6345 /* XXX 802.1AD(QinQ) */
6346 ctx
->flow
.vlan_tci
= htons(VLAN_CFI
);
6349 case OFPACT_SET_ETH_SRC
:
6350 memcpy(ctx
->flow
.dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
,
6354 case OFPACT_SET_ETH_DST
:
6355 memcpy(ctx
->flow
.dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
,
6359 case OFPACT_SET_IPV4_SRC
:
6360 ctx
->flow
.nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
6363 case OFPACT_SET_IPV4_DST
:
6364 ctx
->flow
.nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
6367 case OFPACT_SET_IPV4_DSCP
:
6368 /* OpenFlow 1.0 only supports IPv4. */
6369 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6370 ctx
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
6371 ctx
->flow
.nw_tos
|= ofpact_get_SET_IPV4_DSCP(a
)->dscp
;
6375 case OFPACT_SET_L4_SRC_PORT
:
6376 ctx
->flow
.tp_src
= htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
6379 case OFPACT_SET_L4_DST_PORT
:
6380 ctx
->flow
.tp_dst
= htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
6383 case OFPACT_RESUBMIT
:
6384 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
6387 case OFPACT_SET_TUNNEL
:
6388 ctx
->flow
.tunnel
.tun_id
= htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
6391 case OFPACT_SET_QUEUE
:
6392 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
6395 case OFPACT_POP_QUEUE
:
6396 ctx
->flow
.skb_priority
= ctx
->orig_skb_priority
;
6399 case OFPACT_REG_MOVE
:
6400 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), &ctx
->flow
);
6403 case OFPACT_REG_LOAD
:
6404 nxm_execute_reg_load(ofpact_get_REG_LOAD(a
), &ctx
->flow
);
6407 case OFPACT_PUSH_MPLS
:
6408 execute_mpls_push_action(ctx
, ofpact_get_PUSH_MPLS(a
)->ethertype
);
6411 case OFPACT_POP_MPLS
:
6412 execute_mpls_pop_action(ctx
, ofpact_get_POP_MPLS(a
)->ethertype
);
6415 case OFPACT_SET_MPLS_TTL
:
6416 if (execute_set_mpls_ttl_action(ctx
, ofpact_get_SET_MPLS_TTL(a
)->ttl
)) {
6421 case OFPACT_DEC_MPLS_TTL
:
6422 if (execute_dec_mpls_ttl_action(ctx
)) {
6427 case OFPACT_DEC_TTL
:
6428 if (compose_dec_ttl(ctx
, ofpact_get_DEC_TTL(a
))) {
6434 /* Nothing to do. */
6437 case OFPACT_MULTIPATH
:
6438 multipath_execute(ofpact_get_MULTIPATH(a
), &ctx
->flow
);
6442 ctx
->ofproto
->has_bundle_action
= true;
6443 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
6446 case OFPACT_OUTPUT_REG
:
6447 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
6451 ctx
->has_learn
= true;
6452 if (ctx
->may_learn
) {
6453 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
6461 case OFPACT_FIN_TIMEOUT
:
6462 ctx
->has_fin_timeout
= true;
6463 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
6466 case OFPACT_CLEAR_ACTIONS
:
6468 * Nothing to do because writa-actions is not supported for now.
6469 * When writa-actions is supported, clear-actions also must
6470 * be supported at the same time.
6474 case OFPACT_WRITE_METADATA
:
6475 metadata
= ofpact_get_WRITE_METADATA(a
);
6476 ctx
->flow
.metadata
&= ~metadata
->mask
;
6477 ctx
->flow
.metadata
|= metadata
->metadata
& metadata
->mask
;
6480 case OFPACT_GOTO_TABLE
: {
6481 /* XXX remove recursion */
6482 /* It is assumed that goto-table is last action */
6483 struct ofpact_goto_table
*ogt
= ofpact_get_GOTO_TABLE(a
);
6484 ovs_assert(ctx
->table_id
< ogt
->table_id
);
6485 xlate_table_action(ctx
, ctx
->flow
.in_port
, ogt
->table_id
, true);
6493 ctx
->rule
->up
.evictable
= was_evictable
;
6498 action_xlate_ctx_init(struct action_xlate_ctx
*ctx
,
6499 struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
6500 ovs_be16 initial_tci
, struct rule_dpif
*rule
,
6501 uint8_t tcp_flags
, const struct ofpbuf
*packet
)
6503 ovs_be64 initial_tun_id
= flow
->tunnel
.tun_id
;
6505 /* Flow initialization rules:
6506 * - 'base_flow' must match the kernel's view of the packet at the
6507 * time that action processing starts. 'flow' represents any
6508 * transformations we wish to make through actions.
6509 * - By default 'base_flow' and 'flow' are the same since the input
6510 * packet matches the output before any actions are applied.
6511 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6512 * of the received packet as seen by the kernel. If we later output
6513 * to another device without any modifications this will cause us to
6514 * insert a new tag since the original one was stripped off by the
6516 * - Tunnel 'flow' is largely cleared when transitioning between
6517 * the input and output stages since it does not make sense to output
6518 * a packet with the exact headers that it was received with (i.e.
6519 * the destination IP is us). The one exception is the tun_id, which
6520 * is preserved to allow use in later resubmit lookups and loads into
6522 * - Tunnel 'base_flow' is completely cleared since that is what the
6523 * kernel does. If we wish to maintain the original values an action
6524 * needs to be generated. */
6526 ctx
->ofproto
= ofproto
;
6528 memset(&ctx
->flow
.tunnel
, 0, sizeof ctx
->flow
.tunnel
);
6529 ctx
->base_flow
= ctx
->flow
;
6530 ctx
->base_flow
.vlan_tci
= initial_tci
;
6531 ctx
->flow
.tunnel
.tun_id
= initial_tun_id
;
6533 ctx
->packet
= packet
;
6534 ctx
->may_learn
= packet
!= NULL
;
6535 ctx
->tcp_flags
= tcp_flags
;
6536 ctx
->resubmit_hook
= NULL
;
6537 ctx
->report_hook
= NULL
;
6538 ctx
->resubmit_stats
= NULL
;
6541 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6542 * into datapath actions in 'odp_actions', using 'ctx'. */
6544 xlate_actions(struct action_xlate_ctx
*ctx
,
6545 const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6546 struct ofpbuf
*odp_actions
)
6548 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6549 * that in the future we always keep a copy of the original flow for
6550 * tracing purposes. */
6551 static bool hit_resubmit_limit
;
6553 enum slow_path_reason special
;
6554 struct ofport_dpif
*in_port
;
6555 struct flow orig_flow
;
6557 COVERAGE_INC(ofproto_dpif_xlate
);
6559 ofpbuf_clear(odp_actions
);
6560 ofpbuf_reserve(odp_actions
, NL_A_U32_SIZE
);
6562 ctx
->odp_actions
= odp_actions
;
6565 ctx
->has_learn
= false;
6566 ctx
->has_normal
= false;
6567 ctx
->has_fin_timeout
= false;
6568 ctx
->nf_output_iface
= NF_OUT_DROP
;
6571 ctx
->max_resubmit_trigger
= false;
6572 ctx
->orig_skb_priority
= ctx
->flow
.skb_priority
;
6576 if (ctx
->ofproto
->has_mirrors
|| hit_resubmit_limit
) {
6577 /* Do this conditionally because the copy is expensive enough that it
6578 * shows up in profiles. */
6579 orig_flow
= ctx
->flow
;
6582 if (ctx
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
6583 switch (ctx
->ofproto
->up
.frag_handling
) {
6584 case OFPC_FRAG_NORMAL
:
6585 /* We must pretend that transport ports are unavailable. */
6586 ctx
->flow
.tp_src
= ctx
->base_flow
.tp_src
= htons(0);
6587 ctx
->flow
.tp_dst
= ctx
->base_flow
.tp_dst
= htons(0);
6590 case OFPC_FRAG_DROP
:
6593 case OFPC_FRAG_REASM
:
6596 case OFPC_FRAG_NX_MATCH
:
6597 /* Nothing to do. */
6600 case OFPC_INVALID_TTL_TO_CONTROLLER
:
6605 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
6606 special
= process_special(ctx
->ofproto
, &ctx
->flow
, in_port
, ctx
->packet
);
6608 ctx
->slow
|= special
;
6610 static struct vlog_rate_limit trace_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
6611 ovs_be16 initial_tci
= ctx
->base_flow
.vlan_tci
;
6612 uint32_t local_odp_port
;
6614 add_sflow_action(ctx
);
6616 if (!in_port
|| may_receive(in_port
, ctx
)) {
6617 do_xlate_actions(ofpacts
, ofpacts_len
, ctx
);
6619 /* We've let OFPP_NORMAL and the learning action look at the
6620 * packet, so drop it now if forwarding is disabled. */
6621 if (in_port
&& !stp_forward_in_state(in_port
->stp_state
)) {
6622 ofpbuf_clear(ctx
->odp_actions
);
6623 add_sflow_action(ctx
);
6627 if (ctx
->max_resubmit_trigger
&& !ctx
->resubmit_hook
) {
6628 if (!hit_resubmit_limit
) {
6629 /* We didn't record the original flow. Make sure we do from
6631 hit_resubmit_limit
= true;
6632 } else if (!VLOG_DROP_ERR(&trace_rl
)) {
6633 struct ds ds
= DS_EMPTY_INITIALIZER
;
6635 ofproto_trace(ctx
->ofproto
, &orig_flow
, ctx
->packet
,
6637 VLOG_ERR("Trace triggered by excessive resubmit "
6638 "recursion:\n%s", ds_cstr(&ds
));
6643 local_odp_port
= ofp_port_to_odp_port(ctx
->ofproto
, OFPP_LOCAL
);
6644 if (!connmgr_may_set_up_flow(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
6646 ctx
->odp_actions
->data
,
6647 ctx
->odp_actions
->size
)) {
6648 ctx
->slow
|= SLOW_IN_BAND
;
6650 && connmgr_msg_in_hook(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
6652 compose_output_action(ctx
, OFPP_LOCAL
);
6655 if (ctx
->ofproto
->has_mirrors
) {
6656 add_mirror_actions(ctx
, &orig_flow
);
6658 fix_sflow_action(ctx
);
6662 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
6663 * into datapath actions, using 'ctx', and discards the datapath actions. */
6665 xlate_actions_for_side_effects(struct action_xlate_ctx
*ctx
,
6666 const struct ofpact
*ofpacts
,
6669 uint64_t odp_actions_stub
[1024 / 8];
6670 struct ofpbuf odp_actions
;
6672 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
6673 xlate_actions(ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
6674 ofpbuf_uninit(&odp_actions
);
6678 xlate_report(struct action_xlate_ctx
*ctx
, const char *s
)
6680 if (ctx
->report_hook
) {
6681 ctx
->report_hook(ctx
, s
);
6685 /* OFPP_NORMAL implementation. */
6687 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
6689 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
6690 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
6691 * the bundle on which the packet was received, returns the VLAN to which the
6694 * Both 'vid' and the return value are in the range 0...4095. */
6696 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
6698 switch (in_bundle
->vlan_mode
) {
6699 case PORT_VLAN_ACCESS
:
6700 return in_bundle
->vlan
;
6703 case PORT_VLAN_TRUNK
:
6706 case PORT_VLAN_NATIVE_UNTAGGED
:
6707 case PORT_VLAN_NATIVE_TAGGED
:
6708 return vid
? vid
: in_bundle
->vlan
;
6715 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
6716 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
6719 * 'vid' should be the VID obtained from the 802.1Q header that was received as
6720 * part of a packet (specify 0 if there was no 802.1Q header), in the range
6723 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
6725 /* Allow any VID on the OFPP_NONE port. */
6726 if (in_bundle
== &ofpp_none_bundle
) {
6730 switch (in_bundle
->vlan_mode
) {
6731 case PORT_VLAN_ACCESS
:
6734 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6735 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
6736 "packet received on port %s configured as VLAN "
6737 "%"PRIu16
" access port",
6738 in_bundle
->ofproto
->up
.name
, vid
,
6739 in_bundle
->name
, in_bundle
->vlan
);
6745 case PORT_VLAN_NATIVE_UNTAGGED
:
6746 case PORT_VLAN_NATIVE_TAGGED
:
6748 /* Port must always carry its native VLAN. */
6752 case PORT_VLAN_TRUNK
:
6753 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
6755 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6756 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
6757 "received on port %s not configured for trunking "
6759 in_bundle
->ofproto
->up
.name
, vid
,
6760 in_bundle
->name
, vid
);
6772 /* Given 'vlan', the VLAN that a packet belongs to, and
6773 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
6774 * that should be included in the 802.1Q header. (If the return value is 0,
6775 * then the 802.1Q header should only be included in the packet if there is a
6778 * Both 'vlan' and the return value are in the range 0...4095. */
6780 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
6782 switch (out_bundle
->vlan_mode
) {
6783 case PORT_VLAN_ACCESS
:
6786 case PORT_VLAN_TRUNK
:
6787 case PORT_VLAN_NATIVE_TAGGED
:
6790 case PORT_VLAN_NATIVE_UNTAGGED
:
6791 return vlan
== out_bundle
->vlan
? 0 : vlan
;
6799 output_normal(struct action_xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
6802 struct ofport_dpif
*port
;
6804 ovs_be16 tci
, old_tci
;
6806 vid
= output_vlan_to_vid(out_bundle
, vlan
);
6807 if (!out_bundle
->bond
) {
6808 port
= ofbundle_get_a_port(out_bundle
);
6810 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->flow
,
6813 /* No slaves enabled, so drop packet. */
6818 old_tci
= ctx
->flow
.vlan_tci
;
6820 if (tci
|| out_bundle
->use_priority_tags
) {
6821 tci
|= ctx
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
6823 tci
|= htons(VLAN_CFI
);
6826 ctx
->flow
.vlan_tci
= tci
;
6828 compose_output_action(ctx
, port
->up
.ofp_port
);
6829 ctx
->flow
.vlan_tci
= old_tci
;
6833 mirror_mask_ffs(mirror_mask_t mask
)
6835 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
6840 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
6842 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
6843 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
6847 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
6849 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
6852 /* Returns an arbitrary interface within 'bundle'. */
6853 static struct ofport_dpif
*
6854 ofbundle_get_a_port(const struct ofbundle
*bundle
)
6856 return CONTAINER_OF(list_front(&bundle
->ports
),
6857 struct ofport_dpif
, bundle_node
);
6861 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
6863 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
6867 add_mirror_actions(struct action_xlate_ctx
*ctx
, const struct flow
*orig_flow
)
6869 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
6870 mirror_mask_t mirrors
;
6871 struct ofbundle
*in_bundle
;
6874 const struct nlattr
*a
;
6877 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
6878 ctx
->packet
!= NULL
, NULL
);
6882 mirrors
= in_bundle
->src_mirrors
;
6884 /* Drop frames on bundles reserved for mirroring. */
6885 if (in_bundle
->mirror_out
) {
6886 if (ctx
->packet
!= NULL
) {
6887 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
6888 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
6889 "%s, which is reserved exclusively for mirroring",
6890 ctx
->ofproto
->up
.name
, in_bundle
->name
);
6896 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
6897 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
6900 vlan
= input_vid_to_vlan(in_bundle
, vid
);
6902 /* Look at the output ports to check for destination selections. */
6904 NL_ATTR_FOR_EACH (a
, left
, ctx
->odp_actions
->data
,
6905 ctx
->odp_actions
->size
) {
6906 enum ovs_action_attr type
= nl_attr_type(a
);
6907 struct ofport_dpif
*ofport
;
6909 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
6913 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
6914 if (ofport
&& ofport
->bundle
) {
6915 mirrors
|= ofport
->bundle
->dst_mirrors
;
6923 /* Restore the original packet before adding the mirror actions. */
6924 ctx
->flow
= *orig_flow
;
6929 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
6931 if (!vlan_is_mirrored(m
, vlan
)) {
6932 mirrors
= zero_rightmost_1bit(mirrors
);
6936 mirrors
&= ~m
->dup_mirrors
;
6937 ctx
->mirrors
|= m
->dup_mirrors
;
6939 output_normal(ctx
, m
->out
, vlan
);
6940 } else if (vlan
!= m
->out_vlan
6941 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
6942 struct ofbundle
*bundle
;
6944 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
6945 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
6946 && !bundle
->mirror_out
) {
6947 output_normal(ctx
, bundle
, m
->out_vlan
);
6955 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
6956 uint64_t packets
, uint64_t bytes
)
6962 for (; mirrors
; mirrors
= zero_rightmost_1bit(mirrors
)) {
6965 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
6968 /* In normal circumstances 'm' will not be NULL. However,
6969 * if mirrors are reconfigured, we can temporarily get out
6970 * of sync in facet_revalidate(). We could "correct" the
6971 * mirror list before reaching here, but doing that would
6972 * not properly account the traffic stats we've currently
6973 * accumulated for previous mirror configuration. */
6977 m
->packet_count
+= packets
;
6978 m
->byte_count
+= bytes
;
6982 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
6983 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
6984 * indicate this; newer upstream kernels use gratuitous ARP requests. */
6986 is_gratuitous_arp(const struct flow
*flow
)
6988 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
6989 && eth_addr_is_broadcast(flow
->dl_dst
)
6990 && (flow
->nw_proto
== ARP_OP_REPLY
6991 || (flow
->nw_proto
== ARP_OP_REQUEST
6992 && flow
->nw_src
== flow
->nw_dst
)));
6996 update_learning_table(struct ofproto_dpif
*ofproto
,
6997 const struct flow
*flow
, int vlan
,
6998 struct ofbundle
*in_bundle
)
7000 struct mac_entry
*mac
;
7002 /* Don't learn the OFPP_NONE port. */
7003 if (in_bundle
== &ofpp_none_bundle
) {
7007 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
7011 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
7012 if (is_gratuitous_arp(flow
)) {
7013 /* We don't want to learn from gratuitous ARP packets that are
7014 * reflected back over bond slaves so we lock the learning table. */
7015 if (!in_bundle
->bond
) {
7016 mac_entry_set_grat_arp_lock(mac
);
7017 } else if (mac_entry_is_grat_arp_locked(mac
)) {
7022 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
7023 /* The log messages here could actually be useful in debugging,
7024 * so keep the rate limit relatively high. */
7025 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
7026 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
7027 "on port %s in VLAN %d",
7028 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
7029 in_bundle
->name
, vlan
);
7031 mac
->port
.p
= in_bundle
;
7032 tag_set_add(&ofproto
->backer
->revalidate_set
,
7033 mac_learning_changed(ofproto
->ml
, mac
));
7037 static struct ofbundle
*
7038 lookup_input_bundle(const struct ofproto_dpif
*ofproto
, uint16_t in_port
,
7039 bool warn
, struct ofport_dpif
**in_ofportp
)
7041 struct ofport_dpif
*ofport
;
7043 /* Find the port and bundle for the received packet. */
7044 ofport
= get_ofp_port(ofproto
, in_port
);
7046 *in_ofportp
= ofport
;
7048 if (ofport
&& ofport
->bundle
) {
7049 return ofport
->bundle
;
7052 /* Special-case OFPP_NONE, which a controller may use as the ingress
7053 * port for traffic that it is sourcing. */
7054 if (in_port
== OFPP_NONE
) {
7055 return &ofpp_none_bundle
;
7058 /* Odd. A few possible reasons here:
7060 * - We deleted a port but there are still a few packets queued up
7063 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7064 * we don't know about.
7066 * - The ofproto client didn't configure the port as part of a bundle.
7067 * This is particularly likely to happen if a packet was received on the
7068 * port after it was created, but before the client had a chance to
7069 * configure its bundle.
7072 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7074 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
7075 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
7080 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
7081 * dropped. Returns true if they may be forwarded, false if they should be
7084 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7085 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
7087 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7088 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7089 * checked by input_vid_is_valid().
7091 * May also add tags to '*tags', although the current implementation only does
7092 * so in one special case.
7095 is_admissible(struct action_xlate_ctx
*ctx
, struct ofport_dpif
*in_port
,
7098 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7099 struct flow
*flow
= &ctx
->flow
;
7100 struct ofbundle
*in_bundle
= in_port
->bundle
;
7102 /* Drop frames for reserved multicast addresses
7103 * only if forward_bpdu option is absent. */
7104 if (!ofproto
->up
.forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
7105 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
7109 if (in_bundle
->bond
) {
7110 struct mac_entry
*mac
;
7112 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
7113 flow
->dl_dst
, &ctx
->tags
)) {
7118 xlate_report(ctx
, "bonding refused admissibility, dropping");
7121 case BV_DROP_IF_MOVED
:
7122 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
7123 if (mac
&& mac
->port
.p
!= in_bundle
&&
7124 (!is_gratuitous_arp(flow
)
7125 || mac_entry_is_grat_arp_locked(mac
))) {
7126 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
7138 xlate_normal(struct action_xlate_ctx
*ctx
)
7140 struct ofport_dpif
*in_port
;
7141 struct ofbundle
*in_bundle
;
7142 struct mac_entry
*mac
;
7146 ctx
->has_normal
= true;
7148 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->flow
.in_port
,
7149 ctx
->packet
!= NULL
, &in_port
);
7151 xlate_report(ctx
, "no input bundle, dropping");
7155 /* Drop malformed frames. */
7156 if (ctx
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
7157 !(ctx
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
7158 if (ctx
->packet
!= NULL
) {
7159 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7160 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
7161 "VLAN tag received on port %s",
7162 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7164 xlate_report(ctx
, "partial VLAN tag, dropping");
7168 /* Drop frames on bundles reserved for mirroring. */
7169 if (in_bundle
->mirror_out
) {
7170 if (ctx
->packet
!= NULL
) {
7171 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7172 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7173 "%s, which is reserved exclusively for mirroring",
7174 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7176 xlate_report(ctx
, "input port is mirror output port, dropping");
7181 vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
7182 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->packet
!= NULL
)) {
7183 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
7186 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7188 /* Check other admissibility requirements. */
7189 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
7193 /* Learn source MAC. */
7194 if (ctx
->may_learn
) {
7195 update_learning_table(ctx
->ofproto
, &ctx
->flow
, vlan
, in_bundle
);
7198 /* Determine output bundle. */
7199 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->flow
.dl_dst
, vlan
,
7202 if (mac
->port
.p
!= in_bundle
) {
7203 xlate_report(ctx
, "forwarding to learned port");
7204 output_normal(ctx
, mac
->port
.p
, vlan
);
7206 xlate_report(ctx
, "learned port is input port, dropping");
7209 struct ofbundle
*bundle
;
7211 xlate_report(ctx
, "no learned MAC for destination, flooding");
7212 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
7213 if (bundle
!= in_bundle
7214 && ofbundle_includes_vlan(bundle
, vlan
)
7215 && bundle
->floodable
7216 && !bundle
->mirror_out
) {
7217 output_normal(ctx
, bundle
, vlan
);
7220 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
7224 /* Optimized flow revalidation.
7226 * It's a difficult problem, in general, to tell which facets need to have
7227 * their actions recalculated whenever the OpenFlow flow table changes. We
7228 * don't try to solve that general problem: for most kinds of OpenFlow flow
7229 * table changes, we recalculate the actions for every facet. This is
7230 * relatively expensive, but it's good enough if the OpenFlow flow table
7231 * doesn't change very often.
7233 * However, we can expect one particular kind of OpenFlow flow table change to
7234 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7235 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7236 * table, we add a special case that applies to flow tables in which every rule
7237 * has the same form (that is, the same wildcards), except that the table is
7238 * also allowed to have a single "catch-all" flow that matches all packets. We
7239 * optimize this case by tagging all of the facets that resubmit into the table
7240 * and invalidating the same tag whenever a flow changes in that table. The
7241 * end result is that we revalidate just the facets that need it (and sometimes
7242 * a few more, but not all of the facets or even all of the facets that
7243 * resubmit to the table modified by MAC learning). */
7245 /* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
7246 * into an OpenFlow table with the given 'basis'. */
7248 rule_calculate_tag(const struct flow
*flow
, const struct minimask
*mask
,
7251 if (minimask_is_catchall(mask
)) {
7254 uint32_t hash
= flow_hash_in_minimask(flow
, mask
, secret
);
7255 return tag_create_deterministic(hash
);
7259 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7260 * taggability of that table.
7262 * This function must be called after *each* change to a flow table. If you
7263 * skip calling it on some changes then the pointer comparisons at the end can
7264 * be invalid if you get unlucky. For example, if a flow removal causes a
7265 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7266 * different wildcards to be created with the same address, then this function
7267 * will incorrectly skip revalidation. */
7269 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
7271 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
7272 const struct oftable
*oftable
= &ofproto
->up
.tables
[table_id
];
7273 struct cls_table
*catchall
, *other
;
7274 struct cls_table
*t
;
7276 catchall
= other
= NULL
;
7278 switch (hmap_count(&oftable
->cls
.tables
)) {
7280 /* We could tag this OpenFlow table but it would make the logic a
7281 * little harder and it's a corner case that doesn't seem worth it
7287 HMAP_FOR_EACH (t
, hmap_node
, &oftable
->cls
.tables
) {
7288 if (cls_table_is_catchall(t
)) {
7290 } else if (!other
) {
7293 /* Indicate that we can't tag this by setting both tables to
7294 * NULL. (We know that 'catchall' is already NULL.) */
7301 /* Can't tag this table. */
7305 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
7306 table
->catchall_table
= catchall
;
7307 table
->other_table
= other
;
7308 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7312 /* Given 'rule' that has changed in some way (either it is a rule being
7313 * inserted, a rule being deleted, or a rule whose actions are being
7314 * modified), marks facets for revalidation to ensure that packets will be
7315 * forwarded correctly according to the new state of the flow table.
7317 * This function must be called after *each* change to a flow table. See
7318 * the comment on table_update_taggable() for more information. */
7320 rule_invalidate(const struct rule_dpif
*rule
)
7322 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
7324 table_update_taggable(ofproto
, rule
->up
.table_id
);
7326 if (!ofproto
->backer
->need_revalidate
) {
7327 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
7329 if (table
->other_table
&& rule
->tag
) {
7330 tag_set_add(&ofproto
->backer
->revalidate_set
, rule
->tag
);
7332 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7338 set_frag_handling(struct ofproto
*ofproto_
,
7339 enum ofp_config_flags frag_handling
)
7341 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7342 if (frag_handling
!= OFPC_FRAG_REASM
) {
7343 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
7351 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
7352 const struct flow
*flow
,
7353 const struct ofpact
*ofpacts
, size_t ofpacts_len
)
7355 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7356 struct odputil_keybuf keybuf
;
7357 struct dpif_flow_stats stats
;
7361 struct action_xlate_ctx ctx
;
7362 uint64_t odp_actions_stub
[1024 / 8];
7363 struct ofpbuf odp_actions
;
7365 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
7366 odp_flow_key_from_flow(&key
, flow
,
7367 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
7369 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
7371 action_xlate_ctx_init(&ctx
, ofproto
, flow
, flow
->vlan_tci
, NULL
,
7372 packet_get_tcp_flags(packet
, flow
), packet
);
7373 ctx
.resubmit_stats
= &stats
;
7375 ofpbuf_use_stub(&odp_actions
,
7376 odp_actions_stub
, sizeof odp_actions_stub
);
7377 xlate_actions(&ctx
, ofpacts
, ofpacts_len
, &odp_actions
);
7378 dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
7379 odp_actions
.data
, odp_actions
.size
, packet
);
7380 ofpbuf_uninit(&odp_actions
);
7388 set_netflow(struct ofproto
*ofproto_
,
7389 const struct netflow_options
*netflow_options
)
7391 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7393 if (netflow_options
) {
7394 if (!ofproto
->netflow
) {
7395 ofproto
->netflow
= netflow_create();
7397 return netflow_set_options(ofproto
->netflow
, netflow_options
);
7399 netflow_destroy(ofproto
->netflow
);
7400 ofproto
->netflow
= NULL
;
7406 get_netflow_ids(const struct ofproto
*ofproto_
,
7407 uint8_t *engine_type
, uint8_t *engine_id
)
7409 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7411 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
7415 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
7417 if (!facet_is_controller_flow(facet
) &&
7418 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
7419 struct subfacet
*subfacet
;
7420 struct ofexpired expired
;
7422 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
7423 if (subfacet
->path
== SF_FAST_PATH
) {
7424 struct dpif_flow_stats stats
;
7426 subfacet_reinstall(subfacet
, &stats
);
7427 subfacet_update_stats(subfacet
, &stats
);
7431 expired
.flow
= facet
->flow
;
7432 expired
.packet_count
= facet
->packet_count
;
7433 expired
.byte_count
= facet
->byte_count
;
7434 expired
.used
= facet
->used
;
7435 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
7440 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
7442 struct facet
*facet
;
7444 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
7445 send_active_timeout(ofproto
, facet
);
7449 static struct ofproto_dpif
*
7450 ofproto_dpif_lookup(const char *name
)
7452 struct ofproto_dpif
*ofproto
;
7454 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
7455 hash_string(name
, 0), &all_ofproto_dpifs
) {
7456 if (!strcmp(ofproto
->up
.name
, name
)) {
7464 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
7465 const char *argv
[], void *aux OVS_UNUSED
)
7467 struct ofproto_dpif
*ofproto
;
7470 ofproto
= ofproto_dpif_lookup(argv
[1]);
7472 unixctl_command_reply_error(conn
, "no such bridge");
7475 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7477 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7478 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
7482 unixctl_command_reply(conn
, "table successfully flushed");
7486 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
7487 const char *argv
[], void *aux OVS_UNUSED
)
7489 struct ds ds
= DS_EMPTY_INITIALIZER
;
7490 const struct ofproto_dpif
*ofproto
;
7491 const struct mac_entry
*e
;
7493 ofproto
= ofproto_dpif_lookup(argv
[1]);
7495 unixctl_command_reply_error(conn
, "no such bridge");
7499 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
7500 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
7501 struct ofbundle
*bundle
= e
->port
.p
;
7502 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
7503 ofbundle_get_a_port(bundle
)->odp_port
,
7504 e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
7505 mac_entry_age(ofproto
->ml
, e
));
7507 unixctl_command_reply(conn
, ds_cstr(&ds
));
7512 struct action_xlate_ctx ctx
;
7518 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
7519 const struct rule_dpif
*rule
)
7521 ds_put_char_multiple(result
, '\t', level
);
7523 ds_put_cstr(result
, "No match\n");
7527 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
7528 table_id
, ntohll(rule
->up
.flow_cookie
));
7529 cls_rule_format(&rule
->up
.cr
, result
);
7530 ds_put_char(result
, '\n');
7532 ds_put_char_multiple(result
, '\t', level
);
7533 ds_put_cstr(result
, "OpenFlow ");
7534 ofpacts_format(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, result
);
7535 ds_put_char(result
, '\n');
7539 trace_format_flow(struct ds
*result
, int level
, const char *title
,
7540 struct trace_ctx
*trace
)
7542 ds_put_char_multiple(result
, '\t', level
);
7543 ds_put_format(result
, "%s: ", title
);
7544 if (flow_equal(&trace
->ctx
.flow
, &trace
->flow
)) {
7545 ds_put_cstr(result
, "unchanged");
7547 flow_format(result
, &trace
->ctx
.flow
);
7548 trace
->flow
= trace
->ctx
.flow
;
7550 ds_put_char(result
, '\n');
7554 trace_format_regs(struct ds
*result
, int level
, const char *title
,
7555 struct trace_ctx
*trace
)
7559 ds_put_char_multiple(result
, '\t', level
);
7560 ds_put_format(result
, "%s:", title
);
7561 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
7562 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
7564 ds_put_char(result
, '\n');
7568 trace_format_odp(struct ds
*result
, int level
, const char *title
,
7569 struct trace_ctx
*trace
)
7571 struct ofpbuf
*odp_actions
= trace
->ctx
.odp_actions
;
7573 ds_put_char_multiple(result
, '\t', level
);
7574 ds_put_format(result
, "%s: ", title
);
7575 format_odp_actions(result
, odp_actions
->data
, odp_actions
->size
);
7576 ds_put_char(result
, '\n');
7580 trace_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
7582 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
7583 struct ds
*result
= trace
->result
;
7585 ds_put_char(result
, '\n');
7586 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
7587 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
7588 trace_format_odp(result
, ctx
->recurse
+ 1, "Resubmitted odp", trace
);
7589 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
7593 trace_report(struct action_xlate_ctx
*ctx
, const char *s
)
7595 struct trace_ctx
*trace
= CONTAINER_OF(ctx
, struct trace_ctx
, ctx
);
7596 struct ds
*result
= trace
->result
;
7598 ds_put_char_multiple(result
, '\t', ctx
->recurse
);
7599 ds_put_cstr(result
, s
);
7600 ds_put_char(result
, '\n');
7604 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
7605 void *aux OVS_UNUSED
)
7607 const char *dpname
= argv
[1];
7608 struct ofproto_dpif
*ofproto
;
7609 struct ofpbuf odp_key
;
7610 struct ofpbuf
*packet
;
7611 ovs_be16 initial_tci
;
7617 ofpbuf_init(&odp_key
, 0);
7620 ofproto
= ofproto_dpif_lookup(dpname
);
7622 unixctl_command_reply_error(conn
, "Unknown ofproto (use ofproto/list "
7626 if (argc
== 3 || (argc
== 4 && !strcmp(argv
[3], "-generate"))) {
7627 /* ofproto/trace dpname flow [-generate] */
7628 const char *flow_s
= argv
[2];
7629 const char *generate_s
= argv
[3];
7631 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
7632 * flow. We guess which type it is based on whether 'flow_s' contains
7633 * an '(', since a datapath flow always contains '(') but an
7634 * OpenFlow-like flow should not (in fact it's allowed but I believe
7635 * that's not documented anywhere).
7637 * An alternative would be to try to parse 'flow_s' both ways, but then
7638 * it would be tricky giving a sensible error message. After all, do
7639 * you just say "syntax error" or do you present both error messages?
7640 * Both choices seem lousy. */
7641 if (strchr(flow_s
, '(')) {
7644 /* Convert string to datapath key. */
7645 ofpbuf_init(&odp_key
, 0);
7646 error
= odp_flow_key_from_string(flow_s
, NULL
, &odp_key
);
7648 unixctl_command_reply_error(conn
, "Bad flow syntax");
7652 /* XXX: Since we allow the user to specify an ofproto, it's
7653 * possible they will specify a different ofproto than the one the
7654 * port actually belongs too. Ideally we should simply remove the
7655 * ability to specify the ofproto. */
7656 if (ofproto_receive(ofproto
->backer
, NULL
, odp_key
.data
,
7657 odp_key
.size
, &flow
, NULL
, NULL
, NULL
,
7659 unixctl_command_reply_error(conn
, "Invalid flow");
7665 error_s
= parse_ofp_exact_flow(&flow
, argv
[2]);
7667 unixctl_command_reply_error(conn
, error_s
);
7672 initial_tci
= flow
.vlan_tci
;
7675 /* Generate a packet, if requested. */
7677 packet
= ofpbuf_new(0);
7678 flow_compose(packet
, &flow
);
7680 } else if (argc
== 7) {
7681 /* ofproto/trace dpname priority tun_id in_port mark packet */
7682 const char *priority_s
= argv
[2];
7683 const char *tun_id_s
= argv
[3];
7684 const char *in_port_s
= argv
[4];
7685 const char *mark_s
= argv
[5];
7686 const char *packet_s
= argv
[6];
7687 uint32_t in_port
= atoi(in_port_s
);
7688 ovs_be64 tun_id
= htonll(strtoull(tun_id_s
, NULL
, 0));
7689 uint32_t priority
= atoi(priority_s
);
7690 uint32_t mark
= atoi(mark_s
);
7693 msg
= eth_from_hex(packet_s
, &packet
);
7695 unixctl_command_reply_error(conn
, msg
);
7699 ds_put_cstr(&result
, "Packet: ");
7700 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
7701 ds_put_cstr(&result
, s
);
7704 flow_extract(packet
, priority
, mark
, NULL
, in_port
, &flow
);
7705 flow
.tunnel
.tun_id
= tun_id
;
7706 initial_tci
= flow
.vlan_tci
;
7708 unixctl_command_reply_error(conn
, "Bad command syntax");
7712 ofproto_trace(ofproto
, &flow
, packet
, initial_tci
, &result
);
7713 unixctl_command_reply(conn
, ds_cstr(&result
));
7716 ds_destroy(&result
);
7717 ofpbuf_delete(packet
);
7718 ofpbuf_uninit(&odp_key
);
7722 ofproto_trace(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
7723 const struct ofpbuf
*packet
, ovs_be16 initial_tci
,
7726 struct rule_dpif
*rule
;
7728 ds_put_cstr(ds
, "Flow: ");
7729 flow_format(ds
, flow
);
7730 ds_put_char(ds
, '\n');
7732 rule
= rule_dpif_lookup(ofproto
, flow
);
7734 trace_format_rule(ds
, 0, 0, rule
);
7735 if (rule
== ofproto
->miss_rule
) {
7736 ds_put_cstr(ds
, "\nNo match, flow generates \"packet in\"s.\n");
7737 } else if (rule
== ofproto
->no_packet_in_rule
) {
7738 ds_put_cstr(ds
, "\nNo match, packets dropped because "
7739 "OFPPC_NO_PACKET_IN is set on in_port.\n");
7743 uint64_t odp_actions_stub
[1024 / 8];
7744 struct ofpbuf odp_actions
;
7746 struct trace_ctx trace
;
7749 tcp_flags
= packet
? packet_get_tcp_flags(packet
, flow
) : 0;
7752 ofpbuf_use_stub(&odp_actions
,
7753 odp_actions_stub
, sizeof odp_actions_stub
);
7754 action_xlate_ctx_init(&trace
.ctx
, ofproto
, flow
, initial_tci
,
7755 rule
, tcp_flags
, packet
);
7756 trace
.ctx
.resubmit_hook
= trace_resubmit
;
7757 trace
.ctx
.report_hook
= trace_report
;
7758 xlate_actions(&trace
.ctx
, rule
->up
.ofpacts
, rule
->up
.ofpacts_len
,
7761 ds_put_char(ds
, '\n');
7762 trace_format_flow(ds
, 0, "Final flow", &trace
);
7763 ds_put_cstr(ds
, "Datapath actions: ");
7764 format_odp_actions(ds
, odp_actions
.data
, odp_actions
.size
);
7765 ofpbuf_uninit(&odp_actions
);
7767 if (trace
.ctx
.slow
) {
7768 enum slow_path_reason slow
;
7770 ds_put_cstr(ds
, "\nThis flow is handled by the userspace "
7771 "slow path because it:");
7772 for (slow
= trace
.ctx
.slow
; slow
; ) {
7773 enum slow_path_reason bit
= rightmost_1bit(slow
);
7777 ds_put_cstr(ds
, "\n\t- Consists of CFM packets.");
7780 ds_put_cstr(ds
, "\n\t- Consists of LACP packets.");
7783 ds_put_cstr(ds
, "\n\t- Consists of STP packets.");
7786 ds_put_cstr(ds
, "\n\t- Needs in-band special case "
7789 ds_put_cstr(ds
, "\n\t (The datapath actions are "
7790 "incomplete--for complete actions, "
7791 "please supply a packet.)");
7794 case SLOW_CONTROLLER
:
7795 ds_put_cstr(ds
, "\n\t- Sends \"packet-in\" messages "
7796 "to the OpenFlow controller.");
7799 ds_put_cstr(ds
, "\n\t- Needs more specific matching "
7800 "than the datapath supports.");
7807 if (slow
& ~SLOW_MATCH
) {
7808 ds_put_cstr(ds
, "\nThe datapath actions above do not reflect "
7809 "the special slow-path processing.");
7816 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
7817 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
7820 unixctl_command_reply(conn
, NULL
);
7824 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
7825 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
7828 unixctl_command_reply(conn
, NULL
);
7831 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
7832 * 'reply' describing the results. */
7834 ofproto_dpif_self_check__(struct ofproto_dpif
*ofproto
, struct ds
*reply
)
7836 struct facet
*facet
;
7840 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
7841 if (!facet_check_consistency(facet
)) {
7846 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
7850 ds_put_format(reply
, "%s: self-check failed (%d errors)\n",
7851 ofproto
->up
.name
, errors
);
7853 ds_put_format(reply
, "%s: self-check passed\n", ofproto
->up
.name
);
7858 ofproto_dpif_self_check(struct unixctl_conn
*conn
,
7859 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
7861 struct ds reply
= DS_EMPTY_INITIALIZER
;
7862 struct ofproto_dpif
*ofproto
;
7865 ofproto
= ofproto_dpif_lookup(argv
[1]);
7867 unixctl_command_reply_error(conn
, "Unknown ofproto (use "
7868 "ofproto/list for help)");
7871 ofproto_dpif_self_check__(ofproto
, &reply
);
7873 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7874 ofproto_dpif_self_check__(ofproto
, &reply
);
7878 unixctl_command_reply(conn
, ds_cstr(&reply
));
7882 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
7883 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
7884 * to destroy 'ofproto_shash' and free the returned value. */
7885 static const struct shash_node
**
7886 get_ofprotos(struct shash
*ofproto_shash
)
7888 const struct ofproto_dpif
*ofproto
;
7890 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
7891 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
7892 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
7895 return shash_sort(ofproto_shash
);
7899 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
7900 const char *argv
[] OVS_UNUSED
,
7901 void *aux OVS_UNUSED
)
7903 struct ds ds
= DS_EMPTY_INITIALIZER
;
7904 struct shash ofproto_shash
;
7905 const struct shash_node
**sorted_ofprotos
;
7908 shash_init(&ofproto_shash
);
7909 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
7910 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
7911 const struct shash_node
*node
= sorted_ofprotos
[i
];
7912 ds_put_format(&ds
, "%s\n", node
->name
);
7915 shash_destroy(&ofproto_shash
);
7916 free(sorted_ofprotos
);
7918 unixctl_command_reply(conn
, ds_cstr(&ds
));
7923 show_dp_format(const struct ofproto_dpif
*ofproto
, struct ds
*ds
)
7925 struct dpif_dp_stats s
;
7926 const struct shash_node
**ports
;
7929 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
7931 ds_put_format(ds
, "%s (%s):\n", ofproto
->up
.name
,
7932 dpif_name(ofproto
->backer
->dpif
));
7933 /* xxx It would be better to show bridge-specific stats instead
7934 * xxx of dp ones. */
7936 "\tlookups: hit:%"PRIu64
" missed:%"PRIu64
" lost:%"PRIu64
"\n",
7937 s
.n_hit
, s
.n_missed
, s
.n_lost
);
7938 ds_put_format(ds
, "\tflows: %zu\n",
7939 hmap_count(&ofproto
->subfacets
));
7941 ports
= shash_sort(&ofproto
->up
.port_by_name
);
7942 for (i
= 0; i
< shash_count(&ofproto
->up
.port_by_name
); i
++) {
7943 const struct shash_node
*node
= ports
[i
];
7944 struct ofport
*ofport
= node
->data
;
7945 const char *name
= netdev_get_name(ofport
->netdev
);
7946 const char *type
= netdev_get_type(ofport
->netdev
);
7949 ds_put_format(ds
, "\t%s %u/", name
, ofport
->ofp_port
);
7951 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
7952 if (odp_port
!= OVSP_NONE
) {
7953 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
7955 ds_put_cstr(ds
, "none:");
7958 if (strcmp(type
, "system")) {
7959 struct netdev
*netdev
;
7962 ds_put_format(ds
, " (%s", type
);
7964 error
= netdev_open(name
, type
, &netdev
);
7969 error
= netdev_get_config(netdev
, &config
);
7971 const struct smap_node
**nodes
;
7974 nodes
= smap_sort(&config
);
7975 for (i
= 0; i
< smap_count(&config
); i
++) {
7976 const struct smap_node
*node
= nodes
[i
];
7977 ds_put_format(ds
, "%c %s=%s", i
? ',' : ':',
7978 node
->key
, node
->value
);
7982 smap_destroy(&config
);
7984 netdev_close(netdev
);
7986 ds_put_char(ds
, ')');
7988 ds_put_char(ds
, '\n');
7994 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc
,
7995 const char *argv
[], void *aux OVS_UNUSED
)
7997 struct ds ds
= DS_EMPTY_INITIALIZER
;
7998 const struct ofproto_dpif
*ofproto
;
8002 for (i
= 1; i
< argc
; i
++) {
8003 ofproto
= ofproto_dpif_lookup(argv
[i
]);
8005 ds_put_format(&ds
, "Unknown bridge %s (use dpif/dump-dps "
8006 "for help)", argv
[i
]);
8007 unixctl_command_reply_error(conn
, ds_cstr(&ds
));
8010 show_dp_format(ofproto
, &ds
);
8013 struct shash ofproto_shash
;
8014 const struct shash_node
**sorted_ofprotos
;
8017 shash_init(&ofproto_shash
);
8018 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
8019 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8020 const struct shash_node
*node
= sorted_ofprotos
[i
];
8021 show_dp_format(node
->data
, &ds
);
8024 shash_destroy(&ofproto_shash
);
8025 free(sorted_ofprotos
);
8028 unixctl_command_reply(conn
, ds_cstr(&ds
));
8033 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
8034 int argc OVS_UNUSED
, const char *argv
[],
8035 void *aux OVS_UNUSED
)
8037 struct ds ds
= DS_EMPTY_INITIALIZER
;
8038 const struct ofproto_dpif
*ofproto
;
8039 struct subfacet
*subfacet
;
8041 ofproto
= ofproto_dpif_lookup(argv
[1]);
8043 unixctl_command_reply_error(conn
, "no such bridge");
8047 update_stats(ofproto
->backer
);
8049 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->subfacets
) {
8050 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &ds
);
8052 ds_put_format(&ds
, ", packets:%"PRIu64
", bytes:%"PRIu64
", used:",
8053 subfacet
->dp_packet_count
, subfacet
->dp_byte_count
);
8054 if (subfacet
->used
) {
8055 ds_put_format(&ds
, "%.3fs",
8056 (time_msec() - subfacet
->used
) / 1000.0);
8058 ds_put_format(&ds
, "never");
8060 if (subfacet
->facet
->tcp_flags
) {
8061 ds_put_cstr(&ds
, ", flags:");
8062 packet_format_tcp_flags(&ds
, subfacet
->facet
->tcp_flags
);
8065 ds_put_cstr(&ds
, ", actions:");
8066 format_odp_actions(&ds
, subfacet
->actions
, subfacet
->actions_len
);
8067 ds_put_char(&ds
, '\n');
8070 unixctl_command_reply(conn
, ds_cstr(&ds
));
8075 ofproto_unixctl_dpif_del_flows(struct unixctl_conn
*conn
,
8076 int argc OVS_UNUSED
, const char *argv
[],
8077 void *aux OVS_UNUSED
)
8079 struct ds ds
= DS_EMPTY_INITIALIZER
;
8080 struct ofproto_dpif
*ofproto
;
8082 ofproto
= ofproto_dpif_lookup(argv
[1]);
8084 unixctl_command_reply_error(conn
, "no such bridge");
8088 flush(&ofproto
->up
);
8090 unixctl_command_reply(conn
, ds_cstr(&ds
));
8095 ofproto_dpif_unixctl_init(void)
8097 static bool registered
;
8103 unixctl_command_register(
8105 "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
8106 2, 6, ofproto_unixctl_trace
, NULL
);
8107 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
8108 ofproto_unixctl_fdb_flush
, NULL
);
8109 unixctl_command_register("fdb/show", "bridge", 1, 1,
8110 ofproto_unixctl_fdb_show
, NULL
);
8111 unixctl_command_register("ofproto/clog", "", 0, 0,
8112 ofproto_dpif_clog
, NULL
);
8113 unixctl_command_register("ofproto/unclog", "", 0, 0,
8114 ofproto_dpif_unclog
, NULL
);
8115 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8116 ofproto_dpif_self_check
, NULL
);
8117 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8118 ofproto_unixctl_dpif_dump_dps
, NULL
);
8119 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX
,
8120 ofproto_unixctl_dpif_show
, NULL
);
8121 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8122 ofproto_unixctl_dpif_dump_flows
, NULL
);
8123 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8124 ofproto_unixctl_dpif_del_flows
, NULL
);
8127 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8129 * This is deprecated. It is only for compatibility with broken device drivers
8130 * in old versions of Linux that do not properly support VLANs when VLAN
8131 * devices are not used. When broken device drivers are no longer in
8132 * widespread use, we will delete these interfaces. */
8135 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
8137 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
8138 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
8140 if (realdev_ofp_port
== ofport
->realdev_ofp_port
8141 && vid
== ofport
->vlandev_vid
) {
8145 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
8147 if (ofport
->realdev_ofp_port
) {
8150 if (realdev_ofp_port
&& ofport
->bundle
) {
8151 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8152 * themselves be part of a bundle. */
8153 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
8156 ofport
->realdev_ofp_port
= realdev_ofp_port
;
8157 ofport
->vlandev_vid
= vid
;
8159 if (realdev_ofp_port
) {
8160 vsp_add(ofport
, realdev_ofp_port
, vid
);
8167 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
8169 return hash_2words(realdev_ofp_port
, vid
);
8172 /* Returns the ODP port number of the Linux VLAN device that corresponds to
8173 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
8174 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
8175 * it would return the port number of eth0.9.
8177 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
8178 * function just returns its 'realdev_odp_port' argument. */
8180 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
8181 uint32_t realdev_odp_port
, ovs_be16 vlan_tci
)
8183 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
8184 uint16_t realdev_ofp_port
;
8185 int vid
= vlan_tci_to_vid(vlan_tci
);
8186 const struct vlan_splinter
*vsp
;
8188 realdev_ofp_port
= odp_port_to_ofp_port(ofproto
, realdev_odp_port
);
8189 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
8190 hash_realdev_vid(realdev_ofp_port
, vid
),
8191 &ofproto
->realdev_vid_map
) {
8192 if (vsp
->realdev_ofp_port
== realdev_ofp_port
8193 && vsp
->vid
== vid
) {
8194 return ofp_port_to_odp_port(ofproto
, vsp
->vlandev_ofp_port
);
8198 return realdev_odp_port
;
8201 static struct vlan_splinter
*
8202 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
8204 struct vlan_splinter
*vsp
;
8206 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
8207 &ofproto
->vlandev_map
) {
8208 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
8216 /* Returns the OpenFlow port number of the "real" device underlying the Linux
8217 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8218 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8219 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8220 * eth0 and store 9 in '*vid'.
8222 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8223 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8226 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
8227 uint16_t vlandev_ofp_port
, int *vid
)
8229 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
8230 const struct vlan_splinter
*vsp
;
8232 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
8237 return vsp
->realdev_ofp_port
;
8243 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
8244 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8245 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8246 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8247 * always the case unless VLAN splinters are enabled), returns false without
8248 * making any changes. */
8250 vsp_adjust_flow(const struct ofproto_dpif
*ofproto
, struct flow
*flow
)
8255 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
8260 /* Cause the flow to be processed as if it came in on the real device with
8261 * the VLAN device's VLAN ID. */
8262 flow
->in_port
= realdev
;
8263 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
8268 vsp_remove(struct ofport_dpif
*port
)
8270 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8271 struct vlan_splinter
*vsp
;
8273 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
8275 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
8276 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
8279 port
->realdev_ofp_port
= 0;
8281 VLOG_ERR("missing vlan device record");
8286 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
8288 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8290 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
8291 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
8292 == realdev_ofp_port
)) {
8293 struct vlan_splinter
*vsp
;
8295 vsp
= xmalloc(sizeof *vsp
);
8296 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
8297 hash_int(port
->up
.ofp_port
, 0));
8298 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
8299 hash_realdev_vid(realdev_ofp_port
, vid
));
8300 vsp
->realdev_ofp_port
= realdev_ofp_port
;
8301 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
8304 port
->realdev_ofp_port
= realdev_ofp_port
;
8306 VLOG_ERR("duplicate vlan device record");
8311 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
8313 const struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
8314 return ofport
? ofport
->odp_port
: OVSP_NONE
;
8317 static struct ofport_dpif
*
8318 odp_port_to_ofport(const struct dpif_backer
*backer
, uint32_t odp_port
)
8320 struct ofport_dpif
*port
;
8322 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
,
8323 hash_int(odp_port
, 0),
8324 &backer
->odp_to_ofport_map
) {
8325 if (port
->odp_port
== odp_port
) {
8334 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
8336 struct ofport_dpif
*port
;
8338 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
8339 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
8340 return port
->up
.ofp_port
;
8346 const struct ofproto_class ofproto_dpif_class
= {
8381 port_is_lacp_current
,
8382 NULL
, /* rule_choose_table */
8389 rule_modify_actions
,
8400 get_stp_port_status
,
8407 is_mirror_output_bundle
,
8408 forward_bpdu_changed
,
8409 set_mac_table_config
,