2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "meta-flow.h"
38 #include "multipath.h"
39 #include "netdev-vport.h"
44 #include "odp-execute.h"
47 #include "ofp-actions.h"
48 #include "ofp-parse.h"
49 #include "ofp-print.h"
50 #include "ofproto-dpif-governor.h"
51 #include "ofproto-dpif-ipfix.h"
52 #include "ofproto-dpif-sflow.h"
53 #include "poll-loop.h"
58 #include "unaligned.h"
60 #include "vlan-bitmap.h"
63 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
65 COVERAGE_DEFINE(ofproto_dpif_expired
);
66 COVERAGE_DEFINE(ofproto_dpif_xlate
);
67 COVERAGE_DEFINE(facet_changed_rule
);
68 COVERAGE_DEFINE(facet_revalidate
);
69 COVERAGE_DEFINE(facet_unexpected
);
70 COVERAGE_DEFINE(facet_suppress
);
72 /* Maximum depth of flow table recursion (due to resubmit actions) in a
73 * flow translation. */
74 #define MAX_RESUBMIT_RECURSION 64
76 /* Number of implemented OpenFlow tables. */
77 enum { N_TABLES
= 255 };
78 enum { TBL_INTERNAL
= N_TABLES
- 1 }; /* Used for internal hidden rules. */
79 BUILD_ASSERT_DECL(N_TABLES
>= 2 && N_TABLES
<= 255);
91 * - Do include packets and bytes from facets that have been deleted or
92 * whose own statistics have been folded into the rule.
94 * - Do include packets and bytes sent "by hand" that were accounted to
95 * the rule without any facet being involved (this is a rare corner
96 * case in rule_execute()).
98 * - Do not include packet or bytes that can be obtained from any facet's
99 * packet_count or byte_count member or that can be obtained from the
100 * datapath by, e.g., dpif_flow_get() for any subfacet.
102 uint64_t packet_count
; /* Number of packets received. */
103 uint64_t byte_count
; /* Number of bytes received. */
105 tag_type tag
; /* Caches rule_calculate_tag() result. */
107 struct list facets
; /* List of "struct facet"s. */
110 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
112 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
115 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
117 struct flow_wildcards
*wc
);
118 static struct rule_dpif
*rule_dpif_lookup__(struct ofproto_dpif
*,
120 struct flow_wildcards
*wc
,
122 static struct rule_dpif
*rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
,
123 const struct flow
*flow
);
125 static void rule_get_stats(struct rule
*, uint64_t *packets
, uint64_t *bytes
);
126 static void rule_credit_stats(struct rule_dpif
*,
127 const struct dpif_flow_stats
*);
128 static tag_type
rule_calculate_tag(const struct flow
*,
129 const struct minimask
*, uint32_t basis
);
130 static void rule_invalidate(const struct rule_dpif
*);
132 #define MAX_MIRRORS 32
133 typedef uint32_t mirror_mask_t
;
134 #define MIRROR_MASK_C(X) UINT32_C(X)
135 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
137 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
138 size_t idx
; /* In ofproto's "mirrors" array. */
139 void *aux
; /* Key supplied by ofproto's client. */
140 char *name
; /* Identifier for log messages. */
142 /* Selection criteria. */
143 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
144 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
145 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
147 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
148 struct ofbundle
*out
; /* Output port or NULL. */
149 int out_vlan
; /* Output VLAN or -1. */
150 mirror_mask_t dup_mirrors
; /* Bitmap of mirrors with the same output. */
153 int64_t packet_count
; /* Number of packets sent. */
154 int64_t byte_count
; /* Number of bytes sent. */
157 static void mirror_destroy(struct ofmirror
*);
158 static void update_mirror_stats(struct ofproto_dpif
*ofproto
,
159 mirror_mask_t mirrors
,
160 uint64_t packets
, uint64_t bytes
);
163 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
164 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
165 void *aux
; /* Key supplied by ofproto's client. */
166 char *name
; /* Identifier for log messages. */
169 struct list ports
; /* Contains "struct ofport"s. */
170 enum port_vlan_mode vlan_mode
; /* VLAN mode */
171 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
172 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
173 * NULL if all VLANs are trunked. */
174 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
175 struct bond
*bond
; /* Nonnull iff more than one port. */
176 bool use_priority_tags
; /* Use 802.1p tag for frames in VLAN 0? */
179 bool floodable
; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
181 /* Port mirroring info. */
182 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
183 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
184 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
187 static void bundle_remove(struct ofport
*);
188 static void bundle_update(struct ofbundle
*);
189 static void bundle_destroy(struct ofbundle
*);
190 static void bundle_del_port(struct ofport_dpif
*);
191 static void bundle_run(struct ofbundle
*);
192 static void bundle_wait(struct ofbundle
*);
193 static struct ofbundle
*lookup_input_bundle(const struct ofproto_dpif
*,
194 uint16_t in_port
, bool warn
,
195 struct ofport_dpif
**in_ofportp
);
197 /* A controller may use OFPP_NONE as the ingress port to indicate that
198 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
199 * when an input bundle is needed for validation (e.g., mirroring or
200 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
201 * any 'port' structs, so care must be taken when dealing with it. */
202 static struct ofbundle ofpp_none_bundle
= {
204 .vlan_mode
= PORT_VLAN_TRUNK
207 static void stp_run(struct ofproto_dpif
*ofproto
);
208 static void stp_wait(struct ofproto_dpif
*ofproto
);
209 static int set_stp_port(struct ofport
*,
210 const struct ofproto_port_stp_settings
*);
212 static bool ofbundle_includes_vlan(const struct ofbundle
*, uint16_t vlan
);
217 /* Wildcards relevant in translation. Any fields that were used to
218 * calculate the action must be set for caching and kernel
219 * wildcarding to work. For example, if the flow lookup involved
220 * performing the "normal" action on IPv4 and ARP packets, 'wc'
221 * would have the 'in_port' (always set), 'dl_type' (flow match),
222 * 'vlan_tci' (normal action), and 'dl_dst' (normal action) fields
224 struct flow_wildcards wc
;
226 tag_type tags
; /* Tags associated with actions. */
227 enum slow_path_reason slow
; /* 0 if fast path may be used. */
228 bool has_learn
; /* Actions include NXAST_LEARN? */
229 bool has_normal
; /* Actions output to OFPP_NORMAL? */
230 bool has_fin_timeout
; /* Actions include NXAST_FIN_TIMEOUT? */
231 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
232 mirror_mask_t mirrors
; /* Bitmap of associated mirrors. */
234 uint64_t odp_actions_stub
[256 / 8];
235 struct ofpbuf odp_actions
;
239 struct ofproto_dpif
*ofproto
;
241 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
242 * this flow when actions change header fields. */
245 /* The packet corresponding to 'flow', or a null pointer if we are
246 * revalidating without a packet to refer to. */
247 const struct ofpbuf
*packet
;
249 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
250 * actions update the flow table?
252 * We want to update these tables if we are actually processing a packet,
253 * or if we are accounting for packets that the datapath has processed, but
254 * not if we are just revalidating. */
257 /* The rule initiating translation or NULL. */
258 struct rule_dpif
*rule
;
260 /* The actions to translate. If 'rule' is not NULL, these may be NULL. */
261 const struct ofpact
*ofpacts
;
264 /* Union of the set of TCP flags seen so far in this flow. (Used only by
265 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
269 /* If nonnull, flow translation calls this function just before executing a
270 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
271 * when the recursion depth is exceeded.
273 * 'rule' is the rule being submitted into. It will be null if the
274 * resubmit or OFPP_TABLE action didn't find a matching rule.
276 * This is normally null so the client has to set it manually after
277 * calling xlate_in_init(). */
278 void (*resubmit_hook
)(struct xlate_ctx
*, struct rule_dpif
*rule
);
280 /* If nonnull, flow translation calls this function to report some
281 * significant decision, e.g. to explain why OFPP_NORMAL translation
282 * dropped a packet. */
283 void (*report_hook
)(struct xlate_ctx
*, const char *s
);
285 /* If nonnull, flow translation credits the specified statistics to each
286 * rule reached through a resubmit or OFPP_TABLE action.
288 * This is normally null so the client has to set it manually after
289 * calling xlate_in_init(). */
290 const struct dpif_flow_stats
*resubmit_stats
;
293 /* Context used by xlate_actions() and its callees. */
295 struct xlate_in
*xin
;
296 struct xlate_out
*xout
;
298 struct ofproto_dpif
*ofproto
;
300 /* Flow at the last commit. */
301 struct flow base_flow
;
303 /* Tunnel IP destination address as received. This is stored separately
304 * as the base_flow.tunnel is cleared on init to reflect the datapath
305 * behavior. Used to make sure not to send tunneled output to ourselves,
306 * which might lead to an infinite loop. This could happen easily
307 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
308 * actually set the tun_dst field. */
309 ovs_be32 orig_tunnel_ip_dst
;
311 /* Stack for the push and pop actions. Each stack element is of type
312 * "union mf_subvalue". */
313 union mf_subvalue init_stack
[1024 / sizeof(union mf_subvalue
)];
316 /* The rule that we are currently translating, or NULL. */
317 struct rule_dpif
*rule
;
319 int recurse
; /* Recursion level, via xlate_table_action. */
320 bool max_resubmit_trigger
; /* Recursed too deeply during translation. */
321 uint32_t orig_skb_priority
; /* Priority when packet arrived. */
322 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
323 uint32_t sflow_n_outputs
; /* Number of output ports. */
324 uint32_t sflow_odp_port
; /* Output port for composing sFlow action. */
325 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
326 bool exit
; /* No further actions should be processed. */
329 static void xlate_in_init(struct xlate_in
*, struct ofproto_dpif
*,
330 const struct flow
*, struct rule_dpif
*,
331 uint8_t tcp_flags
, const struct ofpbuf
*);
333 static void xlate_out_uninit(struct xlate_out
*);
335 static void xlate_actions(struct xlate_in
*, struct xlate_out
*);
337 static void xlate_actions_for_side_effects(struct xlate_in
*);
339 static void xlate_table_action(struct xlate_ctx
*, uint16_t in_port
,
340 uint8_t table_id
, bool may_packet_in
);
342 static size_t put_userspace_action(const struct ofproto_dpif
*,
343 struct ofpbuf
*odp_actions
,
345 const union user_action_cookie
*,
348 static void compose_slow_path(const struct ofproto_dpif
*, const struct flow
*,
349 enum slow_path_reason
,
350 uint64_t *stub
, size_t stub_size
,
351 const struct nlattr
**actionsp
,
352 size_t *actions_lenp
);
354 static void xlate_report(struct xlate_ctx
*ctx
, const char *s
);
356 static void xlate_out_copy(struct xlate_out
*dst
, const struct xlate_out
*src
);
358 /* A subfacet (see "struct subfacet" below) has three possible installation
361 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
362 * case just after the subfacet is created, just before the subfacet is
363 * destroyed, or if the datapath returns an error when we try to install a
366 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
368 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
369 * ofproto_dpif is installed in the datapath.
372 SF_NOT_INSTALLED
, /* No datapath flow for this subfacet. */
373 SF_FAST_PATH
, /* Full actions are installed. */
374 SF_SLOW_PATH
, /* Send-to-userspace action is installed. */
377 /* A dpif flow and actions associated with a facet.
379 * See also the large comment on struct facet. */
382 struct hmap_node hmap_node
; /* In struct ofproto_dpif 'subfacets' list. */
383 struct list list_node
; /* In struct facet's 'facets' list. */
384 struct facet
*facet
; /* Owning facet. */
385 struct dpif_backer
*backer
; /* Owning backer. */
387 enum odp_key_fitness key_fitness
;
391 long long int used
; /* Time last used; time created if not used. */
392 long long int created
; /* Time created. */
394 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
395 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
397 enum subfacet_path path
; /* Installed in datapath? */
400 #define SUBFACET_DESTROY_MAX_BATCH 50
402 static struct subfacet
*subfacet_create(struct facet
*, struct flow_miss
*miss
,
404 static struct subfacet
*subfacet_find(struct dpif_backer
*,
405 const struct nlattr
*key
, size_t key_len
,
407 static void subfacet_destroy(struct subfacet
*);
408 static void subfacet_destroy__(struct subfacet
*);
409 static void subfacet_destroy_batch(struct dpif_backer
*,
410 struct subfacet
**, int n
);
411 static void subfacet_reset_dp_stats(struct subfacet
*,
412 struct dpif_flow_stats
*);
413 static void subfacet_update_stats(struct subfacet
*,
414 const struct dpif_flow_stats
*);
415 static int subfacet_install(struct subfacet
*,
416 const struct ofpbuf
*odp_actions
,
417 struct dpif_flow_stats
*);
418 static void subfacet_uninstall(struct subfacet
*);
420 /* A unique, non-overlapping instantiation of an OpenFlow flow.
422 * A facet associates a "struct flow", which represents the Open vSwitch
423 * userspace idea of an exact-match flow, with one or more subfacets.
424 * While the facet is created based on an exact-match flow, it is stored
425 * within the ofproto based on the wildcards that could be expressed
426 * based on the flow table and other configuration. (See the 'wc'
427 * description in "struct xlate_out" for more details.)
429 * Each subfacet tracks the datapath's idea of the flow equivalent to
430 * the facet. When the kernel module (or other dpif implementation) and
431 * Open vSwitch userspace agree on the definition of a flow key, there
432 * is exactly one subfacet per facet. If the dpif implementation
433 * supports more-specific flow matching than userspace, however, a facet
434 * can have more than one subfacet. Examples include the dpif
435 * implementation not supporting the same wildcards as userspace or some
436 * distinction in flow that userspace simply doesn't understand.
438 * Flow expiration works in terms of subfacets, so a facet must have at
439 * least one subfacet or it will never expire, leaking memory. */
442 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
443 struct list list_node
; /* In owning rule's 'facets' list. */
444 struct rule_dpif
*rule
; /* Owning rule. */
447 struct list subfacets
;
448 long long int used
; /* Time last used; time created if not used. */
451 struct flow flow
; /* Flow of the creating subfacet. */
452 struct cls_rule cr
; /* In 'ofproto_dpif's facets classifier. */
456 * - Do include packets and bytes sent "by hand", e.g. with
459 * - Do include packets and bytes that were obtained from the datapath
460 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
461 * DPIF_FP_ZERO_STATS).
463 * - Do not include packets or bytes that can be obtained from the
464 * datapath for any existing subfacet.
466 uint64_t packet_count
; /* Number of packets received. */
467 uint64_t byte_count
; /* Number of bytes received. */
469 /* Resubmit statistics. */
470 uint64_t prev_packet_count
; /* Number of packets from last stats push. */
471 uint64_t prev_byte_count
; /* Number of bytes from last stats push. */
472 long long int prev_used
; /* Used time from last stats push. */
475 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
476 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
477 uint8_t tcp_flags
; /* TCP flags seen for this 'rule'. */
479 struct xlate_out xout
;
481 /* Storage for a single subfacet, to reduce malloc() time and space
482 * overhead. (A facet always has at least one subfacet and in the common
483 * case has exactly one subfacet. However, 'one_subfacet' may not
484 * always be valid, since it could have been removed after newer
485 * subfacets were pushed onto the 'subfacets' list.) */
486 struct subfacet one_subfacet
;
488 long long int learn_rl
; /* Rate limiter for facet_learn(). */
491 static struct facet
*facet_create(const struct flow_miss
*, struct rule_dpif
*,
493 struct dpif_flow_stats
*);
494 static void facet_remove(struct facet
*);
495 static void facet_free(struct facet
*);
497 static struct facet
*facet_find(struct ofproto_dpif
*, const struct flow
*);
498 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
499 const struct flow
*);
500 static bool facet_revalidate(struct facet
*);
501 static bool facet_check_consistency(struct facet
*);
503 static void facet_flush_stats(struct facet
*);
505 static void facet_reset_counters(struct facet
*);
506 static void facet_push_stats(struct facet
*, bool may_learn
);
507 static void facet_learn(struct facet
*);
508 static void facet_account(struct facet
*);
509 static void push_all_stats(void);
511 static bool facet_is_controller_flow(struct facet
*);
514 struct hmap_node odp_port_node
; /* In dpif_backer's "odp_to_ofport_map". */
518 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
519 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
520 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
521 struct bfd
*bfd
; /* BFD, if any. */
522 tag_type tag
; /* Tag associated with this port. */
523 bool may_enable
; /* May be enabled in bonds. */
524 long long int carrier_seq
; /* Carrier status changes. */
525 struct tnl_port
*tnl_port
; /* Tunnel handle, or null. */
528 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
529 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
530 long long int stp_state_entered
;
532 struct hmap priorities
; /* Map of attached 'priority_to_dscp's. */
534 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
536 * This is deprecated. It is only for compatibility with broken device
537 * drivers in old versions of Linux that do not properly support VLANs when
538 * VLAN devices are not used. When broken device drivers are no longer in
539 * widespread use, we will delete these interfaces. */
540 uint16_t realdev_ofp_port
;
544 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
545 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
546 * traffic egressing the 'ofport' with that priority should be marked with. */
547 struct priority_to_dscp
{
548 struct hmap_node hmap_node
; /* Node in 'ofport_dpif''s 'priorities' map. */
549 uint32_t priority
; /* Priority of this queue (see struct flow). */
551 uint8_t dscp
; /* DSCP bits to mark outgoing traffic with. */
554 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
556 * This is deprecated. It is only for compatibility with broken device drivers
557 * in old versions of Linux that do not properly support VLANs when VLAN
558 * devices are not used. When broken device drivers are no longer in
559 * widespread use, we will delete these interfaces. */
560 struct vlan_splinter
{
561 struct hmap_node realdev_vid_node
;
562 struct hmap_node vlandev_node
;
563 uint16_t realdev_ofp_port
;
564 uint16_t vlandev_ofp_port
;
568 static uint16_t vsp_realdev_to_vlandev(const struct ofproto_dpif
*,
569 uint16_t realdev_ofp_port
,
571 static bool vsp_adjust_flow(const struct ofproto_dpif
*, struct flow
*);
572 static void vsp_remove(struct ofport_dpif
*);
573 static void vsp_add(struct ofport_dpif
*, uint16_t realdev_ofp_port
, int vid
);
575 static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif
*,
577 static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif
*,
580 static struct ofport_dpif
*
581 ofport_dpif_cast(const struct ofport
*ofport
)
583 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
586 static void port_run(struct ofport_dpif
*);
587 static void port_run_fast(struct ofport_dpif
*);
588 static void port_wait(struct ofport_dpif
*);
589 static int set_bfd(struct ofport
*, const struct smap
*);
590 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
591 static void ofport_clear_priorities(struct ofport_dpif
*);
592 static void run_fast_rl(void);
594 struct dpif_completion
{
595 struct list list_node
;
596 struct ofoperation
*op
;
599 /* Extra information about a classifier table.
600 * Currently used just for optimized flow revalidation. */
602 /* If either of these is nonnull, then this table has a form that allows
603 * flows to be tagged to avoid revalidating most flows for the most common
604 * kinds of flow table changes. */
605 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
606 struct cls_table
*other_table
; /* Table with any other wildcard set. */
607 uint32_t basis
; /* Keeps each table's tags separate. */
610 /* Reasons that we might need to revalidate every facet, and corresponding
613 * A value of 0 means that there is no need to revalidate.
615 * It would be nice to have some cleaner way to integrate with coverage
616 * counters, but with only a few reasons I guess this is good enough for
618 enum revalidate_reason
{
619 REV_RECONFIGURE
= 1, /* Switch configuration changed. */
620 REV_STP
, /* Spanning tree protocol port status change. */
621 REV_PORT_TOGGLED
, /* Port enabled or disabled by CFM, LACP, ...*/
622 REV_FLOW_TABLE
, /* Flow table changed. */
623 REV_INCONSISTENCY
/* Facet self-check failed. */
625 COVERAGE_DEFINE(rev_reconfigure
);
626 COVERAGE_DEFINE(rev_stp
);
627 COVERAGE_DEFINE(rev_port_toggled
);
628 COVERAGE_DEFINE(rev_flow_table
);
629 COVERAGE_DEFINE(rev_inconsistency
);
631 /* Drop keys are odp flow keys which have drop flows installed in the kernel.
632 * These are datapath flows which have no associated ofproto, if they did we
633 * would use facets. */
635 struct hmap_node hmap_node
;
640 struct avg_subfacet_rates
{
641 double add_rate
; /* Moving average of new flows created per minute. */
642 double del_rate
; /* Moving average of flows deleted per minute. */
645 /* All datapaths of a given type share a single dpif backer instance. */
650 struct timer next_expiration
;
651 struct hmap odp_to_ofport_map
; /* ODP port to ofport mapping. */
653 struct simap tnl_backers
; /* Set of dpif ports backing tunnels. */
655 /* Facet revalidation flags applying to facets which use this backer. */
656 enum revalidate_reason need_revalidate
; /* Revalidate every facet. */
657 struct tag_set revalidate_set
; /* Revalidate only matching facets. */
659 struct hmap drop_keys
; /* Set of dropped odp keys. */
660 bool recv_set_enable
; /* Enables or disables receiving packets. */
662 struct hmap subfacets
;
663 struct governor
*governor
;
665 /* Subfacet statistics.
667 * These keep track of the total number of subfacets added and deleted and
668 * flow life span. They are useful for computing the flow rates stats
669 * exposed via "ovs-appctl dpif/show". The goal is to learn about
670 * traffic patterns in ways that we can use later to improve Open vSwitch
671 * performance in new situations. */
672 long long int created
; /* Time when it is created. */
673 unsigned max_n_subfacet
; /* Maximum number of flows */
674 unsigned avg_n_subfacet
; /* Average number of flows. */
675 long long int avg_subfacet_life
; /* Average life span of subfacets. */
677 /* The average number of subfacets... */
678 struct avg_subfacet_rates hourly
; /* ...over the last hour. */
679 struct avg_subfacet_rates daily
; /* ...over the last day. */
680 struct avg_subfacet_rates lifetime
; /* ...over the switch lifetime. */
681 long long int last_minute
; /* Last time 'hourly' was updated. */
683 /* Number of subfacets added or deleted since 'last_minute'. */
684 unsigned subfacet_add_count
;
685 unsigned subfacet_del_count
;
687 /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
688 unsigned long long int total_subfacet_add_count
;
689 unsigned long long int total_subfacet_del_count
;
692 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
693 static struct shash all_dpif_backers
= SHASH_INITIALIZER(&all_dpif_backers
);
695 static void drop_key_clear(struct dpif_backer
*);
696 static struct ofport_dpif
*
697 odp_port_to_ofport(const struct dpif_backer
*, uint32_t odp_port
);
698 static void update_moving_averages(struct dpif_backer
*backer
);
700 struct ofproto_dpif
{
701 struct hmap_node all_ofproto_dpifs_node
; /* In 'all_ofproto_dpifs'. */
703 struct dpif_backer
*backer
;
705 /* Special OpenFlow rules. */
706 struct rule_dpif
*miss_rule
; /* Sends flow table misses to controller. */
707 struct rule_dpif
*no_packet_in_rule
; /* Drops flow table misses. */
708 struct rule_dpif
*drop_frags_rule
; /* Used in OFPC_FRAG_DROP mode. */
711 struct netflow
*netflow
;
712 struct dpif_sflow
*sflow
;
713 struct dpif_ipfix
*ipfix
;
714 struct hmap bundles
; /* Contains "struct ofbundle"s. */
715 struct mac_learning
*ml
;
716 struct ofmirror
*mirrors
[MAX_MIRRORS
];
718 bool has_bonded_bundles
;
721 struct classifier facets
; /* Contains 'struct facet's. */
722 long long int consistency_rl
;
725 struct table_dpif tables
[N_TABLES
];
727 /* Support for debugging async flow mods. */
728 struct list completions
;
730 bool has_bundle_action
; /* True when the first bundle action appears. */
731 struct netdev_stats stats
; /* To account packets generated and consumed in
736 long long int stp_last_tick
;
738 /* VLAN splinters. */
739 struct hmap realdev_vid_map
; /* (realdev,vid) -> vlandev. */
740 struct hmap vlandev_map
; /* vlandev -> (realdev,vid). */
743 struct sset ports
; /* Set of standard port names. */
744 struct sset ghost_ports
; /* Ports with no datapath port. */
745 struct sset port_poll_set
; /* Queued names for port_poll() reply. */
746 int port_poll_errno
; /* Last errno for port_poll() reply. */
748 /* Per ofproto's dpif stats. */
753 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
754 * for debugging the asynchronous flow_mod implementation.) */
757 /* All existing ofproto_dpif instances, indexed by ->up.name. */
758 static struct hmap all_ofproto_dpifs
= HMAP_INITIALIZER(&all_ofproto_dpifs
);
760 static void ofproto_dpif_unixctl_init(void);
762 static struct ofproto_dpif
*
763 ofproto_dpif_cast(const struct ofproto
*ofproto
)
765 ovs_assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
766 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
769 static struct ofport_dpif
*get_ofp_port(const struct ofproto_dpif
*,
771 static struct ofport_dpif
*get_odp_port(const struct ofproto_dpif
*,
773 static void ofproto_trace(struct ofproto_dpif
*, const struct flow
*,
774 const struct ofpbuf
*, struct ds
*);
776 /* Packet processing. */
777 static void update_learning_table(struct ofproto_dpif
*, const struct flow
*,
778 struct flow_wildcards
*, int vlan
,
781 #define FLOW_MISS_MAX_BATCH 50
782 static int handle_upcalls(struct dpif_backer
*, unsigned int max_batch
);
784 /* Flow expiration. */
785 static int expire(struct dpif_backer
*);
788 static void send_netflow_active_timeouts(struct ofproto_dpif
*);
791 static int send_packet(const struct ofport_dpif
*, struct ofpbuf
*packet
);
792 static size_t compose_sflow_action(const struct ofproto_dpif
*,
793 struct ofpbuf
*odp_actions
,
794 const struct flow
*, uint32_t odp_port
);
795 static void compose_ipfix_action(const struct ofproto_dpif
*,
796 struct ofpbuf
*odp_actions
,
797 const struct flow
*);
798 static void add_mirror_actions(struct xlate_ctx
*ctx
,
799 const struct flow
*flow
);
800 /* Global variables. */
801 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
803 /* Initial mappings of port to bridge mappings. */
804 static struct shash init_ofp_ports
= SHASH_INITIALIZER(&init_ofp_ports
);
806 /* Factory functions. */
809 init(const struct shash
*iface_hints
)
811 struct shash_node
*node
;
813 /* Make a local copy, since we don't own 'iface_hints' elements. */
814 SHASH_FOR_EACH(node
, iface_hints
) {
815 const struct iface_hint
*orig_hint
= node
->data
;
816 struct iface_hint
*new_hint
= xmalloc(sizeof *new_hint
);
818 new_hint
->br_name
= xstrdup(orig_hint
->br_name
);
819 new_hint
->br_type
= xstrdup(orig_hint
->br_type
);
820 new_hint
->ofp_port
= orig_hint
->ofp_port
;
822 shash_add(&init_ofp_ports
, node
->name
, new_hint
);
827 enumerate_types(struct sset
*types
)
829 dp_enumerate_types(types
);
833 enumerate_names(const char *type
, struct sset
*names
)
835 struct ofproto_dpif
*ofproto
;
838 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
839 if (strcmp(type
, ofproto
->up
.type
)) {
842 sset_add(names
, ofproto
->up
.name
);
849 del(const char *type
, const char *name
)
854 error
= dpif_open(name
, type
, &dpif
);
856 error
= dpif_delete(dpif
);
863 port_open_type(const char *datapath_type
, const char *port_type
)
865 return dpif_port_open_type(datapath_type
, port_type
);
868 /* Type functions. */
870 static struct ofproto_dpif
*
871 lookup_ofproto_dpif_by_port_name(const char *name
)
873 struct ofproto_dpif
*ofproto
;
875 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
876 if (sset_contains(&ofproto
->ports
, name
)) {
885 type_run(const char *type
)
887 static long long int push_timer
= LLONG_MIN
;
888 struct dpif_backer
*backer
;
892 backer
= shash_find_data(&all_dpif_backers
, type
);
894 /* This is not necessarily a problem, since backers are only
895 * created on demand. */
899 dpif_run(backer
->dpif
);
901 /* The most natural place to push facet statistics is when they're pulled
902 * from the datapath. However, when there are many flows in the datapath,
903 * this expensive operation can occur so frequently, that it reduces our
904 * ability to quickly set up flows. To reduce the cost, we push statistics
906 if (time_msec() > push_timer
) {
907 push_timer
= time_msec() + 2000;
911 /* If vswitchd started with other_config:flow_restore_wait set as "true",
912 * and the configuration has now changed to "false", enable receiving
913 * packets from the datapath. */
914 if (!backer
->recv_set_enable
&& !ofproto_get_flow_restore_wait()) {
915 backer
->recv_set_enable
= true;
917 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
919 VLOG_ERR("Failed to enable receiving packets in dpif.");
922 dpif_flow_flush(backer
->dpif
);
923 backer
->need_revalidate
= REV_RECONFIGURE
;
926 if (backer
->need_revalidate
927 || !tag_set_is_empty(&backer
->revalidate_set
)) {
928 struct tag_set revalidate_set
= backer
->revalidate_set
;
929 bool need_revalidate
= backer
->need_revalidate
;
930 struct ofproto_dpif
*ofproto
;
931 struct simap_node
*node
;
932 struct simap tmp_backers
;
934 /* Handle tunnel garbage collection. */
935 simap_init(&tmp_backers
);
936 simap_swap(&backer
->tnl_backers
, &tmp_backers
);
938 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
939 struct ofport_dpif
*iter
;
941 if (backer
!= ofproto
->backer
) {
945 HMAP_FOR_EACH (iter
, up
.hmap_node
, &ofproto
->up
.ports
) {
946 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
949 if (!iter
->tnl_port
) {
953 dp_port
= netdev_vport_get_dpif_port(iter
->up
.netdev
,
954 namebuf
, sizeof namebuf
);
955 node
= simap_find(&tmp_backers
, dp_port
);
957 simap_put(&backer
->tnl_backers
, dp_port
, node
->data
);
958 simap_delete(&tmp_backers
, node
);
959 node
= simap_find(&backer
->tnl_backers
, dp_port
);
961 node
= simap_find(&backer
->tnl_backers
, dp_port
);
963 uint32_t odp_port
= UINT32_MAX
;
965 if (!dpif_port_add(backer
->dpif
, iter
->up
.netdev
,
967 simap_put(&backer
->tnl_backers
, dp_port
, odp_port
);
968 node
= simap_find(&backer
->tnl_backers
, dp_port
);
973 iter
->odp_port
= node
? node
->data
: OVSP_NONE
;
974 if (tnl_port_reconfigure(&iter
->up
, iter
->odp_port
,
976 backer
->need_revalidate
= REV_RECONFIGURE
;
981 SIMAP_FOR_EACH (node
, &tmp_backers
) {
982 dpif_port_del(backer
->dpif
, node
->data
);
984 simap_destroy(&tmp_backers
);
986 switch (backer
->need_revalidate
) {
987 case REV_RECONFIGURE
: COVERAGE_INC(rev_reconfigure
); break;
988 case REV_STP
: COVERAGE_INC(rev_stp
); break;
989 case REV_PORT_TOGGLED
: COVERAGE_INC(rev_port_toggled
); break;
990 case REV_FLOW_TABLE
: COVERAGE_INC(rev_flow_table
); break;
991 case REV_INCONSISTENCY
: COVERAGE_INC(rev_inconsistency
); break;
994 if (backer
->need_revalidate
) {
995 /* Clear the drop_keys in case we should now be accepting some
996 * formerly dropped flows. */
997 drop_key_clear(backer
);
1000 /* Clear the revalidation flags. */
1001 tag_set_init(&backer
->revalidate_set
);
1002 backer
->need_revalidate
= 0;
1004 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1005 struct facet
*facet
, *next
;
1006 struct cls_cursor cursor
;
1008 if (ofproto
->backer
!= backer
) {
1012 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
1013 CLS_CURSOR_FOR_EACH_SAFE (facet
, next
, cr
, &cursor
) {
1015 || tag_set_intersects(&revalidate_set
, facet
->xout
.tags
)) {
1016 facet_revalidate(facet
);
1023 if (!backer
->recv_set_enable
) {
1024 /* Wake up before a max of 1000ms. */
1025 timer_set_duration(&backer
->next_expiration
, 1000);
1026 } else if (timer_expired(&backer
->next_expiration
)) {
1027 int delay
= expire(backer
);
1028 timer_set_duration(&backer
->next_expiration
, delay
);
1031 /* Check for port changes in the dpif. */
1032 while ((error
= dpif_port_poll(backer
->dpif
, &devname
)) == 0) {
1033 struct ofproto_dpif
*ofproto
;
1034 struct dpif_port port
;
1036 /* Don't report on the datapath's device. */
1037 if (!strcmp(devname
, dpif_base_name(backer
->dpif
))) {
1041 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
1042 &all_ofproto_dpifs
) {
1043 if (simap_contains(&ofproto
->backer
->tnl_backers
, devname
)) {
1048 ofproto
= lookup_ofproto_dpif_by_port_name(devname
);
1049 if (dpif_port_query_by_name(backer
->dpif
, devname
, &port
)) {
1050 /* The port was removed. If we know the datapath,
1051 * report it through poll_set(). If we don't, it may be
1052 * notifying us of a removal we initiated, so ignore it.
1053 * If there's a pending ENOBUFS, let it stand, since
1054 * everything will be reevaluated. */
1055 if (ofproto
&& ofproto
->port_poll_errno
!= ENOBUFS
) {
1056 sset_add(&ofproto
->port_poll_set
, devname
);
1057 ofproto
->port_poll_errno
= 0;
1059 } else if (!ofproto
) {
1060 /* The port was added, but we don't know with which
1061 * ofproto we should associate it. Delete it. */
1062 dpif_port_del(backer
->dpif
, port
.port_no
);
1064 dpif_port_destroy(&port
);
1070 if (error
!= EAGAIN
) {
1071 struct ofproto_dpif
*ofproto
;
1073 /* There was some sort of error, so propagate it to all
1074 * ofprotos that use this backer. */
1075 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
,
1076 &all_ofproto_dpifs
) {
1077 if (ofproto
->backer
== backer
) {
1078 sset_clear(&ofproto
->port_poll_set
);
1079 ofproto
->port_poll_errno
= error
;
1084 if (backer
->governor
) {
1087 governor_run(backer
->governor
);
1089 /* If the governor has shrunk to its minimum size and the number of
1090 * subfacets has dwindled, then drop the governor entirely.
1092 * For hysteresis, the number of subfacets to drop the governor is
1093 * smaller than the number needed to trigger its creation. */
1094 n_subfacets
= hmap_count(&backer
->subfacets
);
1095 if (n_subfacets
* 4 < flow_eviction_threshold
1096 && governor_is_idle(backer
->governor
)) {
1097 governor_destroy(backer
->governor
);
1098 backer
->governor
= NULL
;
1106 dpif_backer_run_fast(struct dpif_backer
*backer
, int max_batch
)
1110 /* If recv_set_enable is false, we should not handle upcalls. */
1111 if (!backer
->recv_set_enable
) {
1115 /* Handle one or more batches of upcalls, until there's nothing left to do
1116 * or until we do a fixed total amount of work.
1118 * We do work in batches because it can be much cheaper to set up a number
1119 * of flows and fire off their patches all at once. We do multiple batches
1120 * because in some cases handling a packet can cause another packet to be
1121 * queued almost immediately as part of the return flow. Both
1122 * optimizations can make major improvements on some benchmarks and
1123 * presumably for real traffic as well. */
1125 while (work
< max_batch
) {
1126 int retval
= handle_upcalls(backer
, max_batch
- work
);
1137 type_run_fast(const char *type
)
1139 struct dpif_backer
*backer
;
1141 backer
= shash_find_data(&all_dpif_backers
, type
);
1143 /* This is not necessarily a problem, since backers are only
1144 * created on demand. */
1148 return dpif_backer_run_fast(backer
, FLOW_MISS_MAX_BATCH
);
1154 static long long int port_rl
= LLONG_MIN
;
1155 static unsigned int backer_rl
= 0;
1157 if (time_msec() >= port_rl
) {
1158 struct ofproto_dpif
*ofproto
;
1159 struct ofport_dpif
*ofport
;
1161 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
1163 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1164 port_run_fast(ofport
);
1167 port_rl
= time_msec() + 200;
1170 /* XXX: We have to be careful not to do too much work in this function. If
1171 * we call dpif_backer_run_fast() too often, or with too large a batch,
1172 * performance improves signifcantly, but at a cost. It's possible for the
1173 * number of flows in the datapath to increase without bound, and for poll
1174 * loops to take 10s of seconds. The correct solution to this problem,
1175 * long term, is to separate flow miss handling into it's own thread so it
1176 * isn't affected by revalidations, and expirations. Until then, this is
1177 * the best we can do. */
1178 if (++backer_rl
>= 10) {
1179 struct shash_node
*node
;
1182 SHASH_FOR_EACH (node
, &all_dpif_backers
) {
1183 dpif_backer_run_fast(node
->data
, 1);
1189 type_wait(const char *type
)
1191 struct dpif_backer
*backer
;
1193 backer
= shash_find_data(&all_dpif_backers
, type
);
1195 /* This is not necessarily a problem, since backers are only
1196 * created on demand. */
1200 if (backer
->governor
) {
1201 governor_wait(backer
->governor
);
1204 timer_wait(&backer
->next_expiration
);
1207 /* Basic life-cycle. */
1209 static int add_internal_flows(struct ofproto_dpif
*);
1211 static struct ofproto
*
1214 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
1215 return &ofproto
->up
;
1219 dealloc(struct ofproto
*ofproto_
)
1221 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1226 close_dpif_backer(struct dpif_backer
*backer
)
1228 struct shash_node
*node
;
1230 ovs_assert(backer
->refcount
> 0);
1232 if (--backer
->refcount
) {
1236 drop_key_clear(backer
);
1237 hmap_destroy(&backer
->drop_keys
);
1239 simap_destroy(&backer
->tnl_backers
);
1240 hmap_destroy(&backer
->odp_to_ofport_map
);
1241 node
= shash_find(&all_dpif_backers
, backer
->type
);
1243 shash_delete(&all_dpif_backers
, node
);
1244 dpif_close(backer
->dpif
);
1246 ovs_assert(hmap_is_empty(&backer
->subfacets
));
1247 hmap_destroy(&backer
->subfacets
);
1248 governor_destroy(backer
->governor
);
1253 /* Datapath port slated for removal from datapath. */
1254 struct odp_garbage
{
1255 struct list list_node
;
1260 open_dpif_backer(const char *type
, struct dpif_backer
**backerp
)
1262 struct dpif_backer
*backer
;
1263 struct dpif_port_dump port_dump
;
1264 struct dpif_port port
;
1265 struct shash_node
*node
;
1266 struct list garbage_list
;
1267 struct odp_garbage
*garbage
, *next
;
1273 backer
= shash_find_data(&all_dpif_backers
, type
);
1280 backer_name
= xasprintf("ovs-%s", type
);
1282 /* Remove any existing datapaths, since we assume we're the only
1283 * userspace controlling the datapath. */
1285 dp_enumerate_names(type
, &names
);
1286 SSET_FOR_EACH(name
, &names
) {
1287 struct dpif
*old_dpif
;
1289 /* Don't remove our backer if it exists. */
1290 if (!strcmp(name
, backer_name
)) {
1294 if (dpif_open(name
, type
, &old_dpif
)) {
1295 VLOG_WARN("couldn't open old datapath %s to remove it", name
);
1297 dpif_delete(old_dpif
);
1298 dpif_close(old_dpif
);
1301 sset_destroy(&names
);
1303 backer
= xmalloc(sizeof *backer
);
1305 error
= dpif_create_and_open(backer_name
, type
, &backer
->dpif
);
1308 VLOG_ERR("failed to open datapath of type %s: %s", type
,
1314 backer
->type
= xstrdup(type
);
1315 backer
->governor
= NULL
;
1316 backer
->refcount
= 1;
1317 hmap_init(&backer
->odp_to_ofport_map
);
1318 hmap_init(&backer
->drop_keys
);
1319 hmap_init(&backer
->subfacets
);
1320 timer_set_duration(&backer
->next_expiration
, 1000);
1321 backer
->need_revalidate
= 0;
1322 simap_init(&backer
->tnl_backers
);
1323 tag_set_init(&backer
->revalidate_set
);
1324 backer
->recv_set_enable
= !ofproto_get_flow_restore_wait();
1327 if (backer
->recv_set_enable
) {
1328 dpif_flow_flush(backer
->dpif
);
1331 /* Loop through the ports already on the datapath and remove any
1332 * that we don't need anymore. */
1333 list_init(&garbage_list
);
1334 dpif_port_dump_start(&port_dump
, backer
->dpif
);
1335 while (dpif_port_dump_next(&port_dump
, &port
)) {
1336 node
= shash_find(&init_ofp_ports
, port
.name
);
1337 if (!node
&& strcmp(port
.name
, dpif_base_name(backer
->dpif
))) {
1338 garbage
= xmalloc(sizeof *garbage
);
1339 garbage
->odp_port
= port
.port_no
;
1340 list_push_front(&garbage_list
, &garbage
->list_node
);
1343 dpif_port_dump_done(&port_dump
);
1345 LIST_FOR_EACH_SAFE (garbage
, next
, list_node
, &garbage_list
) {
1346 dpif_port_del(backer
->dpif
, garbage
->odp_port
);
1347 list_remove(&garbage
->list_node
);
1351 shash_add(&all_dpif_backers
, type
, backer
);
1353 error
= dpif_recv_set(backer
->dpif
, backer
->recv_set_enable
);
1355 VLOG_ERR("failed to listen on datapath of type %s: %s",
1356 type
, strerror(error
));
1357 close_dpif_backer(backer
);
1361 backer
->max_n_subfacet
= 0;
1362 backer
->created
= time_msec();
1363 backer
->last_minute
= backer
->created
;
1364 memset(&backer
->hourly
, 0, sizeof backer
->hourly
);
1365 memset(&backer
->daily
, 0, sizeof backer
->daily
);
1366 memset(&backer
->lifetime
, 0, sizeof backer
->lifetime
);
1367 backer
->subfacet_add_count
= 0;
1368 backer
->subfacet_del_count
= 0;
1369 backer
->total_subfacet_add_count
= 0;
1370 backer
->total_subfacet_del_count
= 0;
1371 backer
->avg_n_subfacet
= 0;
1372 backer
->avg_subfacet_life
= 0;
1378 construct(struct ofproto
*ofproto_
)
1380 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1381 struct shash_node
*node
, *next
;
1386 error
= open_dpif_backer(ofproto
->up
.type
, &ofproto
->backer
);
1391 max_ports
= dpif_get_max_ports(ofproto
->backer
->dpif
);
1392 ofproto_init_max_ports(ofproto_
, MIN(max_ports
, OFPP_MAX
));
1394 ofproto
->netflow
= NULL
;
1395 ofproto
->sflow
= NULL
;
1396 ofproto
->ipfix
= NULL
;
1397 ofproto
->stp
= NULL
;
1398 hmap_init(&ofproto
->bundles
);
1399 ofproto
->ml
= mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME
);
1400 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1401 ofproto
->mirrors
[i
] = NULL
;
1403 ofproto
->has_bonded_bundles
= false;
1405 classifier_init(&ofproto
->facets
);
1406 ofproto
->consistency_rl
= LLONG_MIN
;
1408 for (i
= 0; i
< N_TABLES
; i
++) {
1409 struct table_dpif
*table
= &ofproto
->tables
[i
];
1411 table
->catchall_table
= NULL
;
1412 table
->other_table
= NULL
;
1413 table
->basis
= random_uint32();
1416 list_init(&ofproto
->completions
);
1418 ofproto_dpif_unixctl_init();
1420 ofproto
->has_mirrors
= false;
1421 ofproto
->has_bundle_action
= false;
1423 hmap_init(&ofproto
->vlandev_map
);
1424 hmap_init(&ofproto
->realdev_vid_map
);
1426 sset_init(&ofproto
->ports
);
1427 sset_init(&ofproto
->ghost_ports
);
1428 sset_init(&ofproto
->port_poll_set
);
1429 ofproto
->port_poll_errno
= 0;
1431 SHASH_FOR_EACH_SAFE (node
, next
, &init_ofp_ports
) {
1432 struct iface_hint
*iface_hint
= node
->data
;
1434 if (!strcmp(iface_hint
->br_name
, ofproto
->up
.name
)) {
1435 /* Check if the datapath already has this port. */
1436 if (dpif_port_exists(ofproto
->backer
->dpif
, node
->name
)) {
1437 sset_add(&ofproto
->ports
, node
->name
);
1440 free(iface_hint
->br_name
);
1441 free(iface_hint
->br_type
);
1443 shash_delete(&init_ofp_ports
, node
);
1447 hmap_insert(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
,
1448 hash_string(ofproto
->up
.name
, 0));
1449 memset(&ofproto
->stats
, 0, sizeof ofproto
->stats
);
1451 ofproto_init_tables(ofproto_
, N_TABLES
);
1452 error
= add_internal_flows(ofproto
);
1453 ofproto
->up
.tables
[TBL_INTERNAL
].flags
= OFTABLE_HIDDEN
| OFTABLE_READONLY
;
1456 ofproto
->n_missed
= 0;
1462 add_internal_flow(struct ofproto_dpif
*ofproto
, int id
,
1463 const struct ofpbuf
*ofpacts
, struct rule_dpif
**rulep
)
1465 struct ofputil_flow_mod fm
;
1468 match_init_catchall(&fm
.match
);
1470 match_set_reg(&fm
.match
, 0, id
);
1471 fm
.new_cookie
= htonll(0);
1472 fm
.cookie
= htonll(0);
1473 fm
.cookie_mask
= htonll(0);
1474 fm
.table_id
= TBL_INTERNAL
;
1475 fm
.command
= OFPFC_ADD
;
1476 fm
.idle_timeout
= 0;
1477 fm
.hard_timeout
= 0;
1481 fm
.ofpacts
= ofpacts
->data
;
1482 fm
.ofpacts_len
= ofpacts
->size
;
1484 error
= ofproto_flow_mod(&ofproto
->up
, &fm
);
1486 VLOG_ERR_RL(&rl
, "failed to add internal flow %d (%s)",
1487 id
, ofperr_to_string(error
));
1491 *rulep
= rule_dpif_lookup__(ofproto
, &fm
.match
.flow
, NULL
, TBL_INTERNAL
);
1492 ovs_assert(*rulep
!= NULL
);
1498 add_internal_flows(struct ofproto_dpif
*ofproto
)
1500 struct ofpact_controller
*controller
;
1501 uint64_t ofpacts_stub
[128 / 8];
1502 struct ofpbuf ofpacts
;
1506 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
1509 controller
= ofpact_put_CONTROLLER(&ofpacts
);
1510 controller
->max_len
= UINT16_MAX
;
1511 controller
->controller_id
= 0;
1512 controller
->reason
= OFPR_NO_MATCH
;
1513 ofpact_pad(&ofpacts
);
1515 error
= add_internal_flow(ofproto
, id
++, &ofpacts
, &ofproto
->miss_rule
);
1520 ofpbuf_clear(&ofpacts
);
1521 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1522 &ofproto
->no_packet_in_rule
);
1527 error
= add_internal_flow(ofproto
, id
++, &ofpacts
,
1528 &ofproto
->drop_frags_rule
);
1533 complete_operations(struct ofproto_dpif
*ofproto
)
1535 struct dpif_completion
*c
, *next
;
1537 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
1538 ofoperation_complete(c
->op
, 0);
1539 list_remove(&c
->list_node
);
1545 destruct(struct ofproto
*ofproto_
)
1547 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1548 struct rule_dpif
*rule
, *next_rule
;
1549 struct oftable
*table
;
1552 hmap_remove(&all_ofproto_dpifs
, &ofproto
->all_ofproto_dpifs_node
);
1553 complete_operations(ofproto
);
1555 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
1556 struct cls_cursor cursor
;
1558 cls_cursor_init(&cursor
, &table
->cls
, NULL
);
1559 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
1560 ofproto_rule_destroy(&rule
->up
);
1564 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1565 mirror_destroy(ofproto
->mirrors
[i
]);
1568 netflow_destroy(ofproto
->netflow
);
1569 dpif_sflow_destroy(ofproto
->sflow
);
1570 hmap_destroy(&ofproto
->bundles
);
1571 mac_learning_destroy(ofproto
->ml
);
1573 classifier_destroy(&ofproto
->facets
);
1575 hmap_destroy(&ofproto
->vlandev_map
);
1576 hmap_destroy(&ofproto
->realdev_vid_map
);
1578 sset_destroy(&ofproto
->ports
);
1579 sset_destroy(&ofproto
->ghost_ports
);
1580 sset_destroy(&ofproto
->port_poll_set
);
1582 close_dpif_backer(ofproto
->backer
);
1586 run_fast(struct ofproto
*ofproto_
)
1588 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1589 struct ofport_dpif
*ofport
;
1591 /* Do not perform any periodic activity required by 'ofproto' while
1592 * waiting for flow restore to complete. */
1593 if (ofproto_get_flow_restore_wait()) {
1597 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1598 port_run_fast(ofport
);
1605 run(struct ofproto
*ofproto_
)
1607 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1608 struct ofport_dpif
*ofport
;
1609 struct ofbundle
*bundle
;
1613 complete_operations(ofproto
);
1616 /* Do not perform any periodic activity below required by 'ofproto' while
1617 * waiting for flow restore to complete. */
1618 if (ofproto_get_flow_restore_wait()) {
1622 error
= run_fast(ofproto_
);
1627 if (ofproto
->netflow
) {
1628 if (netflow_run(ofproto
->netflow
)) {
1629 send_netflow_active_timeouts(ofproto
);
1632 if (ofproto
->sflow
) {
1633 dpif_sflow_run(ofproto
->sflow
);
1636 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1639 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1644 mac_learning_run(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
1646 /* Check the consistency of a random facet, to aid debugging. */
1647 if (time_msec() >= ofproto
->consistency_rl
1648 && !classifier_is_empty(&ofproto
->facets
)
1649 && !ofproto
->backer
->need_revalidate
) {
1650 struct cls_table
*table
;
1651 struct cls_rule
*cr
;
1652 struct facet
*facet
;
1654 ofproto
->consistency_rl
= time_msec() + 250;
1656 table
= CONTAINER_OF(hmap_random_node(&ofproto
->facets
.tables
),
1657 struct cls_table
, hmap_node
);
1658 cr
= CONTAINER_OF(hmap_random_node(&table
->rules
), struct cls_rule
,
1660 facet
= CONTAINER_OF(cr
, struct facet
, cr
);
1662 if (!tag_set_intersects(&ofproto
->backer
->revalidate_set
,
1663 facet
->xout
.tags
)) {
1664 if (!facet_check_consistency(facet
)) {
1665 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
1674 wait(struct ofproto
*ofproto_
)
1676 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1677 struct ofport_dpif
*ofport
;
1678 struct ofbundle
*bundle
;
1680 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
1681 poll_immediate_wake();
1684 if (ofproto_get_flow_restore_wait()) {
1688 dpif_wait(ofproto
->backer
->dpif
);
1689 dpif_recv_wait(ofproto
->backer
->dpif
);
1690 if (ofproto
->sflow
) {
1691 dpif_sflow_wait(ofproto
->sflow
);
1693 if (!tag_set_is_empty(&ofproto
->backer
->revalidate_set
)) {
1694 poll_immediate_wake();
1696 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1699 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1700 bundle_wait(bundle
);
1702 if (ofproto
->netflow
) {
1703 netflow_wait(ofproto
->netflow
);
1705 mac_learning_wait(ofproto
->ml
);
1707 if (ofproto
->backer
->need_revalidate
) {
1708 /* Shouldn't happen, but if it does just go around again. */
1709 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
1710 poll_immediate_wake();
1715 get_memory_usage(const struct ofproto
*ofproto_
, struct simap
*usage
)
1717 const struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1718 struct cls_cursor cursor
;
1719 size_t n_subfacets
= 0;
1720 struct facet
*facet
;
1722 simap_increase(usage
, "facets", classifier_count(&ofproto
->facets
));
1724 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
1725 CLS_CURSOR_FOR_EACH (facet
, cr
, &cursor
) {
1726 n_subfacets
+= list_size(&facet
->subfacets
);
1728 simap_increase(usage
, "subfacets", n_subfacets
);
1732 flush(struct ofproto
*ofproto_
)
1734 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1735 struct subfacet
*subfacet
, *next_subfacet
;
1736 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
1740 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
1741 &ofproto
->backer
->subfacets
) {
1742 if (ofproto_dpif_cast(subfacet
->facet
->rule
->up
.ofproto
) != ofproto
) {
1746 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
1747 batch
[n_batch
++] = subfacet
;
1748 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
1749 subfacet_destroy_batch(ofproto
->backer
, batch
, n_batch
);
1753 subfacet_destroy(subfacet
);
1758 subfacet_destroy_batch(ofproto
->backer
, batch
, n_batch
);
1763 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
1764 bool *arp_match_ip
, enum ofputil_action_bitmap
*actions
)
1766 *arp_match_ip
= true;
1767 *actions
= (OFPUTIL_A_OUTPUT
|
1768 OFPUTIL_A_SET_VLAN_VID
|
1769 OFPUTIL_A_SET_VLAN_PCP
|
1770 OFPUTIL_A_STRIP_VLAN
|
1771 OFPUTIL_A_SET_DL_SRC
|
1772 OFPUTIL_A_SET_DL_DST
|
1773 OFPUTIL_A_SET_NW_SRC
|
1774 OFPUTIL_A_SET_NW_DST
|
1775 OFPUTIL_A_SET_NW_TOS
|
1776 OFPUTIL_A_SET_TP_SRC
|
1777 OFPUTIL_A_SET_TP_DST
|
1782 get_tables(struct ofproto
*ofproto_
, struct ofp12_table_stats
*ots
)
1784 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1785 struct dpif_dp_stats s
;
1786 uint64_t n_miss
, n_no_pkt_in
, n_bytes
, n_dropped_frags
;
1789 strcpy(ots
->name
, "classifier");
1791 dpif_get_dp_stats(ofproto
->backer
->dpif
, &s
);
1792 rule_get_stats(&ofproto
->miss_rule
->up
, &n_miss
, &n_bytes
);
1793 rule_get_stats(&ofproto
->no_packet_in_rule
->up
, &n_no_pkt_in
, &n_bytes
);
1794 rule_get_stats(&ofproto
->drop_frags_rule
->up
, &n_dropped_frags
, &n_bytes
);
1796 n_lookup
= s
.n_hit
+ s
.n_missed
- n_dropped_frags
;
1797 ots
->lookup_count
= htonll(n_lookup
);
1798 ots
->matched_count
= htonll(n_lookup
- n_miss
- n_no_pkt_in
);
1801 static struct ofport
*
1804 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
1809 port_dealloc(struct ofport
*port_
)
1811 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1816 port_construct(struct ofport
*port_
)
1818 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1819 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1820 const struct netdev
*netdev
= port
->up
.netdev
;
1821 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1822 struct dpif_port dpif_port
;
1825 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1826 port
->bundle
= NULL
;
1829 port
->tag
= tag_create_random();
1830 port
->may_enable
= true;
1831 port
->stp_port
= NULL
;
1832 port
->stp_state
= STP_DISABLED
;
1833 port
->tnl_port
= NULL
;
1834 hmap_init(&port
->priorities
);
1835 port
->realdev_ofp_port
= 0;
1836 port
->vlandev_vid
= 0;
1837 port
->carrier_seq
= netdev_get_carrier_resets(netdev
);
1839 if (netdev_vport_is_patch(netdev
)) {
1840 /* By bailing out here, we don't submit the port to the sFlow module
1841 * to be considered for counter polling export. This is correct
1842 * because the patch port represents an interface that sFlow considers
1843 * to be "internal" to the switch as a whole, and therefore not an
1844 * candidate for counter polling. */
1845 port
->odp_port
= OVSP_NONE
;
1849 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
1850 netdev_vport_get_dpif_port(netdev
, namebuf
,
1857 port
->odp_port
= dpif_port
.port_no
;
1859 if (netdev_get_tunnel_config(netdev
)) {
1860 port
->tnl_port
= tnl_port_add(&port
->up
, port
->odp_port
);
1862 /* Sanity-check that a mapping doesn't already exist. This
1863 * shouldn't happen for non-tunnel ports. */
1864 if (odp_port_to_ofp_port(ofproto
, port
->odp_port
) != OFPP_NONE
) {
1865 VLOG_ERR("port %s already has an OpenFlow port number",
1867 dpif_port_destroy(&dpif_port
);
1871 hmap_insert(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
,
1872 hash_int(port
->odp_port
, 0));
1874 dpif_port_destroy(&dpif_port
);
1876 if (ofproto
->sflow
) {
1877 dpif_sflow_add_port(ofproto
->sflow
, port_
, port
->odp_port
);
1884 port_destruct(struct ofport
*port_
)
1886 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1887 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1888 const char *devname
= netdev_get_name(port
->up
.netdev
);
1889 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1890 const char *dp_port_name
;
1892 dp_port_name
= netdev_vport_get_dpif_port(port
->up
.netdev
, namebuf
,
1894 if (dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
1895 /* The underlying device is still there, so delete it. This
1896 * happens when the ofproto is being destroyed, since the caller
1897 * assumes that removal of attached ports will happen as part of
1899 if (!port
->tnl_port
) {
1900 dpif_port_del(ofproto
->backer
->dpif
, port
->odp_port
);
1902 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1905 if (port
->odp_port
!= OVSP_NONE
&& !port
->tnl_port
) {
1906 hmap_remove(&ofproto
->backer
->odp_to_ofport_map
, &port
->odp_port_node
);
1909 tnl_port_del(port
->tnl_port
);
1910 sset_find_and_delete(&ofproto
->ports
, devname
);
1911 sset_find_and_delete(&ofproto
->ghost_ports
, devname
);
1912 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1913 bundle_remove(port_
);
1914 set_cfm(port_
, NULL
);
1915 set_bfd(port_
, NULL
);
1916 if (ofproto
->sflow
) {
1917 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
1920 ofport_clear_priorities(port
);
1921 hmap_destroy(&port
->priorities
);
1925 port_modified(struct ofport
*port_
)
1927 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1929 if (port
->bundle
&& port
->bundle
->bond
) {
1930 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
1934 cfm_set_netdev(port
->cfm
, port
->up
.netdev
);
1939 port_reconfigured(struct ofport
*port_
, enum ofputil_port_config old_config
)
1941 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1942 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
1943 enum ofputil_port_config changed
= old_config
^ port
->up
.pp
.config
;
1945 if (changed
& (OFPUTIL_PC_NO_RECV
| OFPUTIL_PC_NO_RECV_STP
|
1946 OFPUTIL_PC_NO_FWD
| OFPUTIL_PC_NO_FLOOD
|
1947 OFPUTIL_PC_NO_PACKET_IN
)) {
1948 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1950 if (changed
& OFPUTIL_PC_NO_FLOOD
&& port
->bundle
) {
1951 bundle_update(port
->bundle
);
1957 set_sflow(struct ofproto
*ofproto_
,
1958 const struct ofproto_sflow_options
*sflow_options
)
1960 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1961 struct dpif_sflow
*ds
= ofproto
->sflow
;
1963 if (sflow_options
) {
1965 struct ofport_dpif
*ofport
;
1967 ds
= ofproto
->sflow
= dpif_sflow_create();
1968 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
1969 dpif_sflow_add_port(ds
, &ofport
->up
, ofport
->odp_port
);
1971 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1973 dpif_sflow_set_options(ds
, sflow_options
);
1976 dpif_sflow_destroy(ds
);
1977 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
1978 ofproto
->sflow
= NULL
;
1986 struct ofproto
*ofproto_
,
1987 const struct ofproto_ipfix_bridge_exporter_options
*bridge_exporter_options
,
1988 const struct ofproto_ipfix_flow_exporter_options
*flow_exporters_options
,
1989 size_t n_flow_exporters_options
)
1991 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1992 struct dpif_ipfix
*di
= ofproto
->ipfix
;
1994 if (bridge_exporter_options
|| flow_exporters_options
) {
1996 di
= ofproto
->ipfix
= dpif_ipfix_create();
1998 dpif_ipfix_set_options(
1999 di
, bridge_exporter_options
, flow_exporters_options
,
2000 n_flow_exporters_options
);
2003 dpif_ipfix_destroy(di
);
2004 ofproto
->ipfix
= NULL
;
2011 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
2013 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2020 struct ofproto_dpif
*ofproto
;
2022 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2023 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2024 ofport
->cfm
= cfm_create(ofport
->up
.netdev
);
2027 if (cfm_configure(ofport
->cfm
, s
)) {
2033 cfm_destroy(ofport
->cfm
);
2039 get_cfm_status(const struct ofport
*ofport_
,
2040 struct ofproto_cfm_status
*status
)
2042 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2045 status
->faults
= cfm_get_fault(ofport
->cfm
);
2046 status
->remote_opstate
= cfm_get_opup(ofport
->cfm
);
2047 status
->health
= cfm_get_health(ofport
->cfm
);
2048 cfm_get_remote_mpids(ofport
->cfm
, &status
->rmps
, &status
->n_rmps
);
2056 set_bfd(struct ofport
*ofport_
, const struct smap
*cfg
)
2058 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
2059 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2063 ofport
->bfd
= bfd_configure(old
, netdev_get_name(ofport
->up
.netdev
), cfg
);
2064 if (ofport
->bfd
!= old
) {
2065 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2072 get_bfd_status(struct ofport
*ofport_
, struct smap
*smap
)
2074 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2077 bfd_get_status(ofport
->bfd
, smap
);
2084 /* Spanning Tree. */
2087 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
2089 struct ofproto_dpif
*ofproto
= ofproto_
;
2090 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
2091 struct ofport_dpif
*ofport
;
2093 ofport
= stp_port_get_aux(sp
);
2095 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
2096 ofproto
->up
.name
, port_num
);
2098 struct eth_header
*eth
= pkt
->l2
;
2100 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
2101 if (eth_addr_is_zero(eth
->eth_src
)) {
2102 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
2103 "with unknown MAC", ofproto
->up
.name
, port_num
);
2105 send_packet(ofport
, pkt
);
2111 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2113 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
2115 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2117 /* Only revalidate flows if the configuration changed. */
2118 if (!s
!= !ofproto
->stp
) {
2119 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2123 if (!ofproto
->stp
) {
2124 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
2125 send_bpdu_cb
, ofproto
);
2126 ofproto
->stp_last_tick
= time_msec();
2129 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
2130 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
2131 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
2132 stp_set_max_age(ofproto
->stp
, s
->max_age
);
2133 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
2135 struct ofport
*ofport
;
2137 HMAP_FOR_EACH (ofport
, hmap_node
, &ofproto
->up
.ports
) {
2138 set_stp_port(ofport
, NULL
);
2141 stp_destroy(ofproto
->stp
);
2142 ofproto
->stp
= NULL
;
2149 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
2151 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2155 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
2156 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
2157 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
2166 update_stp_port_state(struct ofport_dpif
*ofport
)
2168 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2169 enum stp_state state
;
2171 /* Figure out new state. */
2172 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
2176 if (ofport
->stp_state
!= state
) {
2177 enum ofputil_port_state of_state
;
2180 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
2181 netdev_get_name(ofport
->up
.netdev
),
2182 stp_state_name(ofport
->stp_state
),
2183 stp_state_name(state
));
2184 if (stp_learn_in_state(ofport
->stp_state
)
2185 != stp_learn_in_state(state
)) {
2186 /* xxx Learning action flows should also be flushed. */
2187 mac_learning_flush(ofproto
->ml
,
2188 &ofproto
->backer
->revalidate_set
);
2190 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
2191 != stp_forward_in_state(state
);
2193 ofproto
->backer
->need_revalidate
= REV_STP
;
2194 ofport
->stp_state
= state
;
2195 ofport
->stp_state_entered
= time_msec();
2197 if (fwd_change
&& ofport
->bundle
) {
2198 bundle_update(ofport
->bundle
);
2201 /* Update the STP state bits in the OpenFlow port description. */
2202 of_state
= ofport
->up
.pp
.state
& ~OFPUTIL_PS_STP_MASK
;
2203 of_state
|= (state
== STP_LISTENING
? OFPUTIL_PS_STP_LISTEN
2204 : state
== STP_LEARNING
? OFPUTIL_PS_STP_LEARN
2205 : state
== STP_FORWARDING
? OFPUTIL_PS_STP_FORWARD
2206 : state
== STP_BLOCKING
? OFPUTIL_PS_STP_BLOCK
2208 ofproto_port_set_state(&ofport
->up
, of_state
);
2212 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2213 * caller is responsible for assigning STP port numbers and ensuring
2214 * there are no duplicates. */
2216 set_stp_port(struct ofport
*ofport_
,
2217 const struct ofproto_port_stp_settings
*s
)
2219 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2220 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2221 struct stp_port
*sp
= ofport
->stp_port
;
2223 if (!s
|| !s
->enable
) {
2225 ofport
->stp_port
= NULL
;
2226 stp_port_disable(sp
);
2227 update_stp_port_state(ofport
);
2230 } else if (sp
&& stp_port_no(sp
) != s
->port_num
2231 && ofport
== stp_port_get_aux(sp
)) {
2232 /* The port-id changed, so disable the old one if it's not
2233 * already in use by another port. */
2234 stp_port_disable(sp
);
2237 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
2238 stp_port_enable(sp
);
2240 stp_port_set_aux(sp
, ofport
);
2241 stp_port_set_priority(sp
, s
->priority
);
2242 stp_port_set_path_cost(sp
, s
->path_cost
);
2244 update_stp_port_state(ofport
);
2250 get_stp_port_status(struct ofport
*ofport_
,
2251 struct ofproto_port_stp_status
*s
)
2253 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2254 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2255 struct stp_port
*sp
= ofport
->stp_port
;
2257 if (!ofproto
->stp
|| !sp
) {
2263 s
->port_id
= stp_port_get_id(sp
);
2264 s
->state
= stp_port_get_state(sp
);
2265 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
2266 s
->role
= stp_port_get_role(sp
);
2267 stp_port_get_counts(sp
, &s
->tx_count
, &s
->rx_count
, &s
->error_count
);
2273 stp_run(struct ofproto_dpif
*ofproto
)
2276 long long int now
= time_msec();
2277 long long int elapsed
= now
- ofproto
->stp_last_tick
;
2278 struct stp_port
*sp
;
2281 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
2282 ofproto
->stp_last_tick
= now
;
2284 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
2285 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
2288 update_stp_port_state(ofport
);
2292 if (stp_check_and_reset_fdb_flush(ofproto
->stp
)) {
2293 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
2299 stp_wait(struct ofproto_dpif
*ofproto
)
2302 poll_timer_wait(1000);
2306 /* Returns true if STP should process 'flow'. */
2308 stp_should_process_flow(const struct flow
*flow
)
2310 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
2314 stp_process_packet(const struct ofport_dpif
*ofport
,
2315 const struct ofpbuf
*packet
)
2317 struct ofpbuf payload
= *packet
;
2318 struct eth_header
*eth
= payload
.data
;
2319 struct stp_port
*sp
= ofport
->stp_port
;
2321 /* Sink packets on ports that have STP disabled when the bridge has
2323 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
2327 /* Trim off padding on payload. */
2328 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
2329 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
2332 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
2333 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
2337 static struct priority_to_dscp
*
2338 get_priority(const struct ofport_dpif
*ofport
, uint32_t priority
)
2340 struct priority_to_dscp
*pdscp
;
2343 hash
= hash_int(priority
, 0);
2344 HMAP_FOR_EACH_IN_BUCKET (pdscp
, hmap_node
, hash
, &ofport
->priorities
) {
2345 if (pdscp
->priority
== priority
) {
2353 ofport_clear_priorities(struct ofport_dpif
*ofport
)
2355 struct priority_to_dscp
*pdscp
, *next
;
2357 HMAP_FOR_EACH_SAFE (pdscp
, next
, hmap_node
, &ofport
->priorities
) {
2358 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2364 set_queues(struct ofport
*ofport_
,
2365 const struct ofproto_port_queue
*qdscp_list
,
2368 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2369 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
2370 struct hmap
new = HMAP_INITIALIZER(&new);
2373 for (i
= 0; i
< n_qdscp
; i
++) {
2374 struct priority_to_dscp
*pdscp
;
2378 dscp
= (qdscp_list
[i
].dscp
<< 2) & IP_DSCP_MASK
;
2379 if (dpif_queue_to_priority(ofproto
->backer
->dpif
, qdscp_list
[i
].queue
,
2384 pdscp
= get_priority(ofport
, priority
);
2386 hmap_remove(&ofport
->priorities
, &pdscp
->hmap_node
);
2388 pdscp
= xmalloc(sizeof *pdscp
);
2389 pdscp
->priority
= priority
;
2391 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2394 if (pdscp
->dscp
!= dscp
) {
2396 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2399 hmap_insert(&new, &pdscp
->hmap_node
, hash_int(pdscp
->priority
, 0));
2402 if (!hmap_is_empty(&ofport
->priorities
)) {
2403 ofport_clear_priorities(ofport
);
2404 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2407 hmap_swap(&new, &ofport
->priorities
);
2415 /* Expires all MAC learning entries associated with 'bundle' and forces its
2416 * ofproto to revalidate every flow.
2418 * Normally MAC learning entries are removed only from the ofproto associated
2419 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2420 * are removed from every ofproto. When patch ports and SLB bonds are in use
2421 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2422 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2423 * with the host from which it migrated. */
2425 bundle_flush_macs(struct ofbundle
*bundle
, bool all_ofprotos
)
2427 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2428 struct mac_learning
*ml
= ofproto
->ml
;
2429 struct mac_entry
*mac
, *next_mac
;
2431 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2432 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
2433 if (mac
->port
.p
== bundle
) {
2435 struct ofproto_dpif
*o
;
2437 HMAP_FOR_EACH (o
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
2439 struct mac_entry
*e
;
2441 e
= mac_learning_lookup(o
->ml
, mac
->mac
, mac
->vlan
,
2444 mac_learning_expire(o
->ml
, e
);
2450 mac_learning_expire(ml
, mac
);
2455 static struct ofbundle
*
2456 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
2458 struct ofbundle
*bundle
;
2460 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
2461 &ofproto
->bundles
) {
2462 if (bundle
->aux
== aux
) {
2469 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2470 * ones that are found to 'bundles'. */
2472 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
2473 void **auxes
, size_t n_auxes
,
2474 struct hmapx
*bundles
)
2478 hmapx_init(bundles
);
2479 for (i
= 0; i
< n_auxes
; i
++) {
2480 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
2482 hmapx_add(bundles
, bundle
);
2488 bundle_update(struct ofbundle
*bundle
)
2490 struct ofport_dpif
*port
;
2492 bundle
->floodable
= true;
2493 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2494 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2495 || !stp_forward_in_state(port
->stp_state
)) {
2496 bundle
->floodable
= false;
2503 bundle_del_port(struct ofport_dpif
*port
)
2505 struct ofbundle
*bundle
= port
->bundle
;
2507 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2509 list_remove(&port
->bundle_node
);
2510 port
->bundle
= NULL
;
2513 lacp_slave_unregister(bundle
->lacp
, port
);
2516 bond_slave_unregister(bundle
->bond
, port
);
2519 bundle_update(bundle
);
2523 bundle_add_port(struct ofbundle
*bundle
, uint16_t ofp_port
,
2524 struct lacp_slave_settings
*lacp
)
2526 struct ofport_dpif
*port
;
2528 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
2533 if (port
->bundle
!= bundle
) {
2534 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2536 bundle_del_port(port
);
2539 port
->bundle
= bundle
;
2540 list_push_back(&bundle
->ports
, &port
->bundle_node
);
2541 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
2542 || !stp_forward_in_state(port
->stp_state
)) {
2543 bundle
->floodable
= false;
2547 bundle
->ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2548 lacp_slave_register(bundle
->lacp
, port
, lacp
);
2555 bundle_destroy(struct ofbundle
*bundle
)
2557 struct ofproto_dpif
*ofproto
;
2558 struct ofport_dpif
*port
, *next_port
;
2565 ofproto
= bundle
->ofproto
;
2566 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2567 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2569 if (m
->out
== bundle
) {
2571 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
2572 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
2573 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2578 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2579 bundle_del_port(port
);
2582 bundle_flush_macs(bundle
, true);
2583 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
2585 free(bundle
->trunks
);
2586 lacp_destroy(bundle
->lacp
);
2587 bond_destroy(bundle
->bond
);
2592 bundle_set(struct ofproto
*ofproto_
, void *aux
,
2593 const struct ofproto_bundle_settings
*s
)
2595 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2596 bool need_flush
= false;
2597 struct ofport_dpif
*port
;
2598 struct ofbundle
*bundle
;
2599 unsigned long *trunks
;
2605 bundle_destroy(bundle_lookup(ofproto
, aux
));
2609 ovs_assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
2610 ovs_assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
2612 bundle
= bundle_lookup(ofproto
, aux
);
2614 bundle
= xmalloc(sizeof *bundle
);
2616 bundle
->ofproto
= ofproto
;
2617 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
2618 hash_pointer(aux
, 0));
2620 bundle
->name
= NULL
;
2622 list_init(&bundle
->ports
);
2623 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
2625 bundle
->trunks
= NULL
;
2626 bundle
->use_priority_tags
= s
->use_priority_tags
;
2627 bundle
->lacp
= NULL
;
2628 bundle
->bond
= NULL
;
2630 bundle
->floodable
= true;
2632 bundle
->src_mirrors
= 0;
2633 bundle
->dst_mirrors
= 0;
2634 bundle
->mirror_out
= 0;
2637 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
2639 bundle
->name
= xstrdup(s
->name
);
2644 if (!bundle
->lacp
) {
2645 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2646 bundle
->lacp
= lacp_create();
2648 lacp_configure(bundle
->lacp
, s
->lacp
);
2650 lacp_destroy(bundle
->lacp
);
2651 bundle
->lacp
= NULL
;
2654 /* Update set of ports. */
2656 for (i
= 0; i
< s
->n_slaves
; i
++) {
2657 if (!bundle_add_port(bundle
, s
->slaves
[i
],
2658 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
)) {
2662 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
2663 struct ofport_dpif
*next_port
;
2665 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
2666 for (i
= 0; i
< s
->n_slaves
; i
++) {
2667 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
2672 bundle_del_port(port
);
2676 ovs_assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
2678 if (list_is_empty(&bundle
->ports
)) {
2679 bundle_destroy(bundle
);
2683 /* Set VLAN tagging mode */
2684 if (s
->vlan_mode
!= bundle
->vlan_mode
2685 || s
->use_priority_tags
!= bundle
->use_priority_tags
) {
2686 bundle
->vlan_mode
= s
->vlan_mode
;
2687 bundle
->use_priority_tags
= s
->use_priority_tags
;
2692 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
2693 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
2695 if (vlan
!= bundle
->vlan
) {
2696 bundle
->vlan
= vlan
;
2700 /* Get trunked VLANs. */
2701 switch (s
->vlan_mode
) {
2702 case PORT_VLAN_ACCESS
:
2706 case PORT_VLAN_TRUNK
:
2707 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2710 case PORT_VLAN_NATIVE_UNTAGGED
:
2711 case PORT_VLAN_NATIVE_TAGGED
:
2712 if (vlan
!= 0 && (!s
->trunks
2713 || !bitmap_is_set(s
->trunks
, vlan
)
2714 || bitmap_is_set(s
->trunks
, 0))) {
2715 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2717 trunks
= bitmap_clone(s
->trunks
, 4096);
2719 trunks
= bitmap_allocate1(4096);
2721 bitmap_set1(trunks
, vlan
);
2722 bitmap_set0(trunks
, 0);
2724 trunks
= CONST_CAST(unsigned long *, s
->trunks
);
2731 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
2732 free(bundle
->trunks
);
2733 if (trunks
== s
->trunks
) {
2734 bundle
->trunks
= vlan_bitmap_clone(trunks
);
2736 bundle
->trunks
= trunks
;
2741 if (trunks
!= s
->trunks
) {
2746 if (!list_is_short(&bundle
->ports
)) {
2747 bundle
->ofproto
->has_bonded_bundles
= true;
2749 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
2750 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2753 bundle
->bond
= bond_create(s
->bond
);
2754 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
2757 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2758 bond_slave_register(bundle
->bond
, port
, port
->up
.netdev
);
2761 bond_destroy(bundle
->bond
);
2762 bundle
->bond
= NULL
;
2765 /* If we changed something that would affect MAC learning, un-learn
2766 * everything on this port and force flow revalidation. */
2768 bundle_flush_macs(bundle
, false);
2775 bundle_remove(struct ofport
*port_
)
2777 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
2778 struct ofbundle
*bundle
= port
->bundle
;
2781 bundle_del_port(port
);
2782 if (list_is_empty(&bundle
->ports
)) {
2783 bundle_destroy(bundle
);
2784 } else if (list_is_short(&bundle
->ports
)) {
2785 bond_destroy(bundle
->bond
);
2786 bundle
->bond
= NULL
;
2792 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
2794 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
2795 struct ofport_dpif
*port
= port_
;
2796 uint8_t ea
[ETH_ADDR_LEN
];
2799 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
2801 struct ofpbuf packet
;
2804 ofpbuf_init(&packet
, 0);
2805 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
2807 memcpy(packet_pdu
, pdu
, pdu_size
);
2809 send_packet(port
, &packet
);
2810 ofpbuf_uninit(&packet
);
2812 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
2813 "%s (%s)", port
->bundle
->name
,
2814 netdev_get_name(port
->up
.netdev
), strerror(error
));
2819 bundle_send_learning_packets(struct ofbundle
*bundle
)
2821 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
2822 int error
, n_packets
, n_errors
;
2823 struct mac_entry
*e
;
2825 error
= n_packets
= n_errors
= 0;
2826 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
2827 if (e
->port
.p
!= bundle
) {
2828 struct ofpbuf
*learning_packet
;
2829 struct ofport_dpif
*port
;
2833 /* The assignment to "port" is unnecessary but makes "grep"ing for
2834 * struct ofport_dpif more effective. */
2835 learning_packet
= bond_compose_learning_packet(bundle
->bond
,
2839 ret
= send_packet(port
, learning_packet
);
2840 ofpbuf_delete(learning_packet
);
2850 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2851 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
2852 "packets, last error was: %s",
2853 bundle
->name
, n_errors
, n_packets
, strerror(error
));
2855 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2856 bundle
->name
, n_packets
);
2861 bundle_run(struct ofbundle
*bundle
)
2864 lacp_run(bundle
->lacp
, send_pdu_cb
);
2867 struct ofport_dpif
*port
;
2869 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
2870 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
2873 bond_run(bundle
->bond
, &bundle
->ofproto
->backer
->revalidate_set
,
2874 lacp_status(bundle
->lacp
));
2875 if (bond_should_send_learning_packets(bundle
->bond
)) {
2876 bundle_send_learning_packets(bundle
);
2882 bundle_wait(struct ofbundle
*bundle
)
2885 lacp_wait(bundle
->lacp
);
2888 bond_wait(bundle
->bond
);
2895 mirror_scan(struct ofproto_dpif
*ofproto
)
2899 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
2900 if (!ofproto
->mirrors
[idx
]) {
2907 static struct ofmirror
*
2908 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
2912 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2913 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
2914 if (mirror
&& mirror
->aux
== aux
) {
2922 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2924 mirror_update_dups(struct ofproto_dpif
*ofproto
)
2928 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2929 struct ofmirror
*m
= ofproto
->mirrors
[i
];
2932 m
->dup_mirrors
= MIRROR_MASK_C(1) << i
;
2936 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
2937 struct ofmirror
*m1
= ofproto
->mirrors
[i
];
2944 for (j
= i
+ 1; j
< MAX_MIRRORS
; j
++) {
2945 struct ofmirror
*m2
= ofproto
->mirrors
[j
];
2947 if (m2
&& m1
->out
== m2
->out
&& m1
->out_vlan
== m2
->out_vlan
) {
2948 m1
->dup_mirrors
|= MIRROR_MASK_C(1) << j
;
2949 m2
->dup_mirrors
|= m1
->dup_mirrors
;
2956 mirror_set(struct ofproto
*ofproto_
, void *aux
,
2957 const struct ofproto_mirror_settings
*s
)
2959 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2960 mirror_mask_t mirror_bit
;
2961 struct ofbundle
*bundle
;
2962 struct ofmirror
*mirror
;
2963 struct ofbundle
*out
;
2964 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
2965 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
2968 mirror
= mirror_lookup(ofproto
, aux
);
2970 mirror_destroy(mirror
);
2976 idx
= mirror_scan(ofproto
);
2978 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2980 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
2984 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
2985 mirror
->ofproto
= ofproto
;
2988 mirror
->out_vlan
= -1;
2989 mirror
->name
= NULL
;
2992 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
2994 mirror
->name
= xstrdup(s
->name
);
2997 /* Get the new configuration. */
2998 if (s
->out_bundle
) {
2999 out
= bundle_lookup(ofproto
, s
->out_bundle
);
3001 mirror_destroy(mirror
);
3007 out_vlan
= s
->out_vlan
;
3009 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
3010 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
3012 /* If the configuration has not changed, do nothing. */
3013 if (hmapx_equals(&srcs
, &mirror
->srcs
)
3014 && hmapx_equals(&dsts
, &mirror
->dsts
)
3015 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
3016 && mirror
->out
== out
3017 && mirror
->out_vlan
== out_vlan
)
3019 hmapx_destroy(&srcs
);
3020 hmapx_destroy(&dsts
);
3024 hmapx_swap(&srcs
, &mirror
->srcs
);
3025 hmapx_destroy(&srcs
);
3027 hmapx_swap(&dsts
, &mirror
->dsts
);
3028 hmapx_destroy(&dsts
);
3030 free(mirror
->vlans
);
3031 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
3034 mirror
->out_vlan
= out_vlan
;
3036 /* Update bundles. */
3037 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
3038 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
3039 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
3040 bundle
->src_mirrors
|= mirror_bit
;
3042 bundle
->src_mirrors
&= ~mirror_bit
;
3045 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
3046 bundle
->dst_mirrors
|= mirror_bit
;
3048 bundle
->dst_mirrors
&= ~mirror_bit
;
3051 if (mirror
->out
== bundle
) {
3052 bundle
->mirror_out
|= mirror_bit
;
3054 bundle
->mirror_out
&= ~mirror_bit
;
3058 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3059 ofproto
->has_mirrors
= true;
3060 mac_learning_flush(ofproto
->ml
,
3061 &ofproto
->backer
->revalidate_set
);
3062 mirror_update_dups(ofproto
);
3068 mirror_destroy(struct ofmirror
*mirror
)
3070 struct ofproto_dpif
*ofproto
;
3071 mirror_mask_t mirror_bit
;
3072 struct ofbundle
*bundle
;
3079 ofproto
= mirror
->ofproto
;
3080 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3081 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
3083 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
3084 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
3085 bundle
->src_mirrors
&= ~mirror_bit
;
3086 bundle
->dst_mirrors
&= ~mirror_bit
;
3087 bundle
->mirror_out
&= ~mirror_bit
;
3090 hmapx_destroy(&mirror
->srcs
);
3091 hmapx_destroy(&mirror
->dsts
);
3092 free(mirror
->vlans
);
3094 ofproto
->mirrors
[mirror
->idx
] = NULL
;
3098 mirror_update_dups(ofproto
);
3100 ofproto
->has_mirrors
= false;
3101 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
3102 if (ofproto
->mirrors
[i
]) {
3103 ofproto
->has_mirrors
= true;
3110 mirror_get_stats(struct ofproto
*ofproto_
, void *aux
,
3111 uint64_t *packets
, uint64_t *bytes
)
3113 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3114 struct ofmirror
*mirror
= mirror_lookup(ofproto
, aux
);
3117 *packets
= *bytes
= UINT64_MAX
;
3123 *packets
= mirror
->packet_count
;
3124 *bytes
= mirror
->byte_count
;
3130 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
3132 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3133 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
3134 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
3140 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
3142 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3143 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
3144 return bundle
&& bundle
->mirror_out
!= 0;
3148 forward_bpdu_changed(struct ofproto
*ofproto_
)
3150 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3151 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3155 set_mac_table_config(struct ofproto
*ofproto_
, unsigned int idle_time
,
3158 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3159 mac_learning_set_idle_time(ofproto
->ml
, idle_time
);
3160 mac_learning_set_max_entries(ofproto
->ml
, max_entries
);
3165 static struct ofport_dpif
*
3166 get_ofp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
3168 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
3169 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
3172 static struct ofport_dpif
*
3173 get_odp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
3175 struct ofport_dpif
*port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
3176 return port
&& &ofproto
->up
== port
->up
.ofproto
? port
: NULL
;
3180 ofproto_port_from_dpif_port(struct ofproto_dpif
*ofproto
,
3181 struct ofproto_port
*ofproto_port
,
3182 struct dpif_port
*dpif_port
)
3184 ofproto_port
->name
= dpif_port
->name
;
3185 ofproto_port
->type
= dpif_port
->type
;
3186 ofproto_port
->ofp_port
= odp_port_to_ofp_port(ofproto
, dpif_port
->port_no
);
3189 static struct ofport_dpif
*
3190 ofport_get_peer(const struct ofport_dpif
*ofport_dpif
)
3192 const struct ofproto_dpif
*ofproto
;
3195 peer
= netdev_vport_patch_peer(ofport_dpif
->up
.netdev
);
3200 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
3201 struct ofport
*ofport
;
3203 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, peer
);
3204 if (ofport
&& ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
) {
3205 return ofport_dpif_cast(ofport
);
3212 port_run_fast(struct ofport_dpif
*ofport
)
3214 if (ofport
->cfm
&& cfm_should_send_ccm(ofport
->cfm
)) {
3215 struct ofpbuf packet
;
3217 ofpbuf_init(&packet
, 0);
3218 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.pp
.hw_addr
);
3219 send_packet(ofport
, &packet
);
3220 ofpbuf_uninit(&packet
);
3223 if (ofport
->bfd
&& bfd_should_send_packet(ofport
->bfd
)) {
3224 struct ofpbuf packet
;
3226 ofpbuf_init(&packet
, 0);
3227 bfd_put_packet(ofport
->bfd
, &packet
, ofport
->up
.pp
.hw_addr
);
3228 send_packet(ofport
, &packet
);
3229 ofpbuf_uninit(&packet
);
3234 port_run(struct ofport_dpif
*ofport
)
3236 long long int carrier_seq
= netdev_get_carrier_resets(ofport
->up
.netdev
);
3237 bool carrier_changed
= carrier_seq
!= ofport
->carrier_seq
;
3238 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
3240 ofport
->carrier_seq
= carrier_seq
;
3242 port_run_fast(ofport
);
3244 if (ofport
->tnl_port
3245 && tnl_port_reconfigure(&ofport
->up
, ofport
->odp_port
,
3246 &ofport
->tnl_port
)) {
3247 ofproto_dpif_cast(ofport
->up
.ofproto
)->backer
->need_revalidate
= true;
3251 int cfm_opup
= cfm_get_opup(ofport
->cfm
);
3253 cfm_run(ofport
->cfm
);
3254 enable
= enable
&& !cfm_get_fault(ofport
->cfm
);
3256 if (cfm_opup
>= 0) {
3257 enable
= enable
&& cfm_opup
;
3262 bfd_run(ofport
->bfd
);
3263 enable
= enable
&& bfd_forwarding(ofport
->bfd
);
3266 if (ofport
->bundle
) {
3267 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
3268 if (carrier_changed
) {
3269 lacp_slave_carrier_changed(ofport
->bundle
->lacp
, ofport
);
3273 if (ofport
->may_enable
!= enable
) {
3274 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3276 if (ofproto
->has_bundle_action
) {
3277 ofproto
->backer
->need_revalidate
= REV_PORT_TOGGLED
;
3281 ofport
->may_enable
= enable
;
3285 port_wait(struct ofport_dpif
*ofport
)
3288 cfm_wait(ofport
->cfm
);
3292 bfd_wait(ofport
->bfd
);
3297 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
3298 struct ofproto_port
*ofproto_port
)
3300 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3301 struct dpif_port dpif_port
;
3304 if (sset_contains(&ofproto
->ghost_ports
, devname
)) {
3305 const char *type
= netdev_get_type_from_name(devname
);
3307 /* We may be called before ofproto->up.port_by_name is populated with
3308 * the appropriate ofport. For this reason, we must get the name and
3309 * type from the netdev layer directly. */
3311 const struct ofport
*ofport
;
3313 ofport
= shash_find_data(&ofproto
->up
.port_by_name
, devname
);
3314 ofproto_port
->ofp_port
= ofport
? ofport
->ofp_port
: OFPP_NONE
;
3315 ofproto_port
->name
= xstrdup(devname
);
3316 ofproto_port
->type
= xstrdup(type
);
3322 if (!sset_contains(&ofproto
->ports
, devname
)) {
3325 error
= dpif_port_query_by_name(ofproto
->backer
->dpif
,
3326 devname
, &dpif_port
);
3328 ofproto_port_from_dpif_port(ofproto
, ofproto_port
, &dpif_port
);
3334 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
)
3336 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3337 const char *devname
= netdev_get_name(netdev
);
3338 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
3339 const char *dp_port_name
;
3341 if (netdev_vport_is_patch(netdev
)) {
3342 sset_add(&ofproto
->ghost_ports
, netdev_get_name(netdev
));
3346 dp_port_name
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
3347 if (!dpif_port_exists(ofproto
->backer
->dpif
, dp_port_name
)) {
3348 uint32_t port_no
= UINT32_MAX
;
3351 error
= dpif_port_add(ofproto
->backer
->dpif
, netdev
, &port_no
);
3355 if (netdev_get_tunnel_config(netdev
)) {
3356 simap_put(&ofproto
->backer
->tnl_backers
, dp_port_name
, port_no
);
3360 if (netdev_get_tunnel_config(netdev
)) {
3361 sset_add(&ofproto
->ghost_ports
, devname
);
3363 sset_add(&ofproto
->ports
, devname
);
3369 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
3371 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3372 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
3379 sset_find_and_delete(&ofproto
->ghost_ports
,
3380 netdev_get_name(ofport
->up
.netdev
));
3381 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
3382 if (!ofport
->tnl_port
) {
3383 error
= dpif_port_del(ofproto
->backer
->dpif
, ofport
->odp_port
);
3385 /* The caller is going to close ofport->up.netdev. If this is a
3386 * bonded port, then the bond is using that netdev, so remove it
3387 * from the bond. The client will need to reconfigure everything
3388 * after deleting ports, so then the slave will get re-added. */
3389 bundle_remove(&ofport
->up
);
3396 port_get_stats(const struct ofport
*ofport_
, struct netdev_stats
*stats
)
3398 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3403 error
= netdev_get_stats(ofport
->up
.netdev
, stats
);
3405 if (!error
&& ofport_
->ofp_port
== OFPP_LOCAL
) {
3406 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
3408 /* ofproto->stats.tx_packets represents packets that we created
3409 * internally and sent to some port (e.g. packets sent with
3410 * send_packet()). Account for them as if they had come from
3411 * OFPP_LOCAL and got forwarded. */
3413 if (stats
->rx_packets
!= UINT64_MAX
) {
3414 stats
->rx_packets
+= ofproto
->stats
.tx_packets
;
3417 if (stats
->rx_bytes
!= UINT64_MAX
) {
3418 stats
->rx_bytes
+= ofproto
->stats
.tx_bytes
;
3421 /* ofproto->stats.rx_packets represents packets that were received on
3422 * some port and we processed internally and dropped (e.g. STP).
3423 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3425 if (stats
->tx_packets
!= UINT64_MAX
) {
3426 stats
->tx_packets
+= ofproto
->stats
.rx_packets
;
3429 if (stats
->tx_bytes
!= UINT64_MAX
) {
3430 stats
->tx_bytes
+= ofproto
->stats
.rx_bytes
;
3437 struct port_dump_state
{
3442 struct ofproto_port port
;
3447 port_dump_start(const struct ofproto
*ofproto_ OVS_UNUSED
, void **statep
)
3449 *statep
= xzalloc(sizeof(struct port_dump_state
));
3454 port_dump_next(const struct ofproto
*ofproto_
, void *state_
,
3455 struct ofproto_port
*port
)
3457 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3458 struct port_dump_state
*state
= state_
;
3459 const struct sset
*sset
;
3460 struct sset_node
*node
;
3462 if (state
->has_port
) {
3463 ofproto_port_destroy(&state
->port
);
3464 state
->has_port
= false;
3466 sset
= state
->ghost
? &ofproto
->ghost_ports
: &ofproto
->ports
;
3467 while ((node
= sset_at_position(sset
, &state
->bucket
, &state
->offset
))) {
3470 error
= port_query_by_name(ofproto_
, node
->name
, &state
->port
);
3472 *port
= state
->port
;
3473 state
->has_port
= true;
3475 } else if (error
!= ENODEV
) {
3480 if (!state
->ghost
) {
3481 state
->ghost
= true;
3484 return port_dump_next(ofproto_
, state_
, port
);
3491 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
3493 struct port_dump_state
*state
= state_
;
3495 if (state
->has_port
) {
3496 ofproto_port_destroy(&state
->port
);
3503 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
3505 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3507 if (ofproto
->port_poll_errno
) {
3508 int error
= ofproto
->port_poll_errno
;
3509 ofproto
->port_poll_errno
= 0;
3513 if (sset_is_empty(&ofproto
->port_poll_set
)) {
3517 *devnamep
= sset_pop(&ofproto
->port_poll_set
);
3522 port_poll_wait(const struct ofproto
*ofproto_
)
3524 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
3525 dpif_port_poll_wait(ofproto
->backer
->dpif
);
3529 port_is_lacp_current(const struct ofport
*ofport_
)
3531 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
3532 return (ofport
->bundle
&& ofport
->bundle
->lacp
3533 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
3537 /* Upcall handling. */
3539 /* Flow miss batching.
3541 * Some dpifs implement operations faster when you hand them off in a batch.
3542 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3543 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3544 * more packets, plus possibly installing the flow in the dpif.
3546 * So far we only batch the operations that affect flow setup time the most.
3547 * It's possible to batch more than that, but the benefit might be minimal. */
3549 struct hmap_node hmap_node
;
3550 struct ofproto_dpif
*ofproto
;
3552 enum odp_key_fitness key_fitness
;
3553 const struct nlattr
*key
;
3555 struct list packets
;
3556 enum dpif_upcall_type upcall_type
;
3559 struct flow_miss_op
{
3560 struct dpif_op dpif_op
;
3562 uint64_t slow_stub
[128 / 8]; /* Buffer for compose_slow_path() */
3563 struct xlate_out xout
;
3564 bool xout_garbage
; /* 'xout' needs to be uninitialized? */
3567 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3568 * OpenFlow controller as necessary according to their individual
3569 * configurations. */
3571 send_packet_in_miss(struct ofproto_dpif
*ofproto
, const struct ofpbuf
*packet
,
3572 const struct flow
*flow
)
3574 struct ofputil_packet_in pin
;
3576 pin
.packet
= packet
->data
;
3577 pin
.packet_len
= packet
->size
;
3578 pin
.reason
= OFPR_NO_MATCH
;
3579 pin
.controller_id
= 0;
3584 pin
.send_len
= 0; /* not used for flow table misses */
3586 flow_get_metadata(flow
, &pin
.fmd
);
3588 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
);
3591 static enum slow_path_reason
3592 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3593 const struct ofport_dpif
*ofport
, const struct ofpbuf
*packet
)
3597 } else if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
3599 cfm_process_heartbeat(ofport
->cfm
, packet
);
3602 } else if (ofport
->bfd
&& bfd_should_process_flow(flow
)) {
3604 bfd_process_packet(ofport
->bfd
, flow
, packet
);
3607 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
3608 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
3610 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
3613 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
3615 stp_process_packet(ofport
, packet
);
3623 static struct flow_miss
*
3624 flow_miss_find(struct hmap
*todo
, const struct ofproto_dpif
*ofproto
,
3625 const struct flow
*flow
, uint32_t hash
)
3627 struct flow_miss
*miss
;
3629 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
3630 if (miss
->ofproto
== ofproto
&& flow_equal(&miss
->flow
, flow
)) {
3638 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
3639 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3640 * 'miss' is associated with a subfacet the caller must also initialize the
3641 * returned op->subfacet, and if anything needs to be freed after processing
3642 * the op, the caller must initialize op->garbage also. */
3644 init_flow_miss_execute_op(struct flow_miss
*miss
, struct ofpbuf
*packet
,
3645 struct flow_miss_op
*op
)
3647 if (miss
->flow
.in_port
3648 != vsp_realdev_to_vlandev(miss
->ofproto
, miss
->flow
.in_port
,
3649 miss
->flow
.vlan_tci
)) {
3650 /* This packet was received on a VLAN splinter port. We
3651 * added a VLAN to the packet to make the packet resemble
3652 * the flow, but the actions were composed assuming that
3653 * the packet contained no VLAN. So, we must remove the
3654 * VLAN header from the packet before trying to execute the
3656 eth_pop_vlan(packet
);
3659 op
->xout_garbage
= false;
3660 op
->dpif_op
.type
= DPIF_OP_EXECUTE
;
3661 op
->dpif_op
.u
.execute
.key
= miss
->key
;
3662 op
->dpif_op
.u
.execute
.key_len
= miss
->key_len
;
3663 op
->dpif_op
.u
.execute
.packet
= packet
;
3666 /* Helper for handle_flow_miss_without_facet() and
3667 * handle_flow_miss_with_facet(). */
3669 handle_flow_miss_common(struct rule_dpif
*rule
,
3670 struct ofpbuf
*packet
, const struct flow
*flow
)
3672 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3674 if (rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
3676 * Extra-special case for fail-open mode.
3678 * We are in fail-open mode and the packet matched the fail-open
3679 * rule, but we are connected to a controller too. We should send
3680 * the packet up to the controller in the hope that it will try to
3681 * set up a flow and thereby allow us to exit fail-open.
3683 * See the top-level comment in fail-open.c for more information.
3685 send_packet_in_miss(ofproto
, packet
, flow
);
3689 /* Figures out whether a flow that missed in 'ofproto', whose details are in
3690 * 'miss' masked by 'wc', is likely to be worth tracking in detail in userspace
3691 * and (usually) installing a datapath flow. The answer is usually "yes" (a
3692 * return value of true). However, for short flows the cost of bookkeeping is
3693 * much higher than the benefits, so when the datapath holds a large number of
3694 * flows we impose some heuristics to decide which flows are likely to be worth
3697 flow_miss_should_make_facet(struct flow_miss
*miss
, struct flow_wildcards
*wc
)
3699 struct dpif_backer
*backer
= miss
->ofproto
->backer
;
3702 if (!backer
->governor
) {
3705 n_subfacets
= hmap_count(&backer
->subfacets
);
3706 if (n_subfacets
* 2 <= flow_eviction_threshold
) {
3710 backer
->governor
= governor_create();
3713 hash
= flow_hash_in_wildcards(&miss
->flow
, wc
, 0);
3714 return governor_should_install_flow(backer
->governor
, hash
,
3715 list_size(&miss
->packets
));
3718 /* Handles 'miss' without creating a facet or subfacet or creating any datapath
3719 * flow. 'miss->flow' must have matched 'rule' and been xlated into 'xout'.
3720 * May add an "execute" operation to 'ops' and increment '*n_ops'. */
3722 handle_flow_miss_without_facet(struct rule_dpif
*rule
, struct xlate_out
*xout
,
3723 struct flow_miss
*miss
,
3724 struct flow_miss_op
*ops
, size_t *n_ops
)
3726 struct ofpbuf
*packet
;
3728 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3730 COVERAGE_INC(facet_suppress
);
3732 handle_flow_miss_common(rule
, packet
, &miss
->flow
);
3735 struct xlate_in xin
;
3737 xlate_in_init(&xin
, miss
->ofproto
, &miss
->flow
, rule
, 0, packet
);
3738 xlate_actions_for_side_effects(&xin
);
3741 if (xout
->odp_actions
.size
) {
3742 struct flow_miss_op
*op
= &ops
[*n_ops
];
3743 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3745 init_flow_miss_execute_op(miss
, packet
, op
);
3746 xlate_out_copy(&op
->xout
, xout
);
3747 execute
->actions
= op
->xout
.odp_actions
.data
;
3748 execute
->actions_len
= op
->xout
.odp_actions
.size
;
3749 op
->xout_garbage
= true;
3756 /* Handles 'miss', which matches 'facet'. May add any required datapath
3757 * operations to 'ops', incrementing '*n_ops' for each new op.
3759 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3760 * This is really important only for new facets: if we just called time_msec()
3761 * here, then the new subfacet or its packets could look (occasionally) as
3762 * though it was used some time after the facet was used. That can make a
3763 * one-packet flow look like it has a nonzero duration, which looks odd in
3764 * e.g. NetFlow statistics.
3766 * If non-null, 'stats' will be folded into 'facet'. */
3768 handle_flow_miss_with_facet(struct flow_miss
*miss
, struct facet
*facet
,
3769 long long int now
, struct dpif_flow_stats
*stats
,
3770 struct flow_miss_op
*ops
, size_t *n_ops
)
3772 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
3773 enum subfacet_path want_path
;
3774 struct subfacet
*subfacet
;
3775 struct ofpbuf
*packet
;
3777 subfacet
= subfacet_create(facet
, miss
, now
);
3778 want_path
= facet
->xout
.slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
3780 subfacet_update_stats(subfacet
, stats
);
3783 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3784 struct flow_miss_op
*op
= &ops
[*n_ops
];
3786 handle_flow_miss_common(facet
->rule
, packet
, &miss
->flow
);
3788 if (want_path
!= SF_FAST_PATH
) {
3789 struct xlate_in xin
;
3791 xlate_in_init(&xin
, ofproto
, &facet
->flow
, facet
->rule
, 0, packet
);
3792 xlate_actions_for_side_effects(&xin
);
3795 if (facet
->xout
.odp_actions
.size
) {
3796 struct dpif_execute
*execute
= &op
->dpif_op
.u
.execute
;
3798 init_flow_miss_execute_op(miss
, packet
, op
);
3799 execute
->actions
= facet
->xout
.odp_actions
.data
,
3800 execute
->actions_len
= facet
->xout
.odp_actions
.size
;
3805 if (miss
->upcall_type
== DPIF_UC_MISS
|| subfacet
->path
!= want_path
) {
3806 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
3807 struct dpif_flow_put
*put
= &op
->dpif_op
.u
.flow_put
;
3809 subfacet
->path
= want_path
;
3811 op
->xout_garbage
= false;
3812 op
->dpif_op
.type
= DPIF_OP_FLOW_PUT
;
3813 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
3814 put
->key
= miss
->key
;
3815 put
->key_len
= miss
->key_len
;
3816 if (want_path
== SF_FAST_PATH
) {
3817 put
->actions
= facet
->xout
.odp_actions
.data
;
3818 put
->actions_len
= facet
->xout
.odp_actions
.size
;
3820 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
3821 op
->slow_stub
, sizeof op
->slow_stub
,
3822 &put
->actions
, &put
->actions_len
);
3828 /* Handles flow miss 'miss'. May add any required datapath operations
3829 * to 'ops', incrementing '*n_ops' for each new op. */
3831 handle_flow_miss(struct flow_miss
*miss
, struct flow_miss_op
*ops
,
3834 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
3835 struct dpif_flow_stats stats__
;
3836 struct dpif_flow_stats
*stats
= &stats__
;
3837 struct ofpbuf
*packet
;
3838 struct facet
*facet
;
3842 memset(stats
, 0, sizeof *stats
);
3844 LIST_FOR_EACH (packet
, list_node
, &miss
->packets
) {
3845 stats
->tcp_flags
|= packet_get_tcp_flags(packet
, &miss
->flow
);
3846 stats
->n_bytes
+= packet
->size
;
3850 facet
= facet_lookup_valid(ofproto
, &miss
->flow
);
3852 struct flow_wildcards wc
;
3853 struct rule_dpif
*rule
;
3854 struct xlate_out xout
;
3855 struct xlate_in xin
;
3857 flow_wildcards_init_catchall(&wc
);
3858 rule
= rule_dpif_lookup(ofproto
, &miss
->flow
, &wc
);
3859 rule_credit_stats(rule
, stats
);
3861 xlate_in_init(&xin
, ofproto
, &miss
->flow
, rule
, stats
->tcp_flags
,
3863 xin
.resubmit_stats
= stats
;
3864 xin
.may_learn
= true;
3865 xlate_actions(&xin
, &xout
);
3866 flow_wildcards_or(&xout
.wc
, &xout
.wc
, &wc
);
3868 /* There does not exist a bijection between 'struct flow' and datapath
3869 * flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental
3870 * assumption used throughout the facet and subfacet handling code.
3871 * Since we have to handle these misses in userspace anyway, we simply
3872 * skip facet creation, avoiding the problem altogether. */
3873 if (miss
->key_fitness
== ODP_FIT_TOO_LITTLE
3874 || !flow_miss_should_make_facet(miss
, &xout
.wc
)) {
3875 handle_flow_miss_without_facet(rule
, &xout
, miss
, ops
, n_ops
);
3879 facet
= facet_create(miss
, rule
, &xout
, stats
);
3882 handle_flow_miss_with_facet(miss
, facet
, now
, stats
, ops
, n_ops
);
3885 static struct drop_key
*
3886 drop_key_lookup(const struct dpif_backer
*backer
, const struct nlattr
*key
,
3889 struct drop_key
*drop_key
;
3891 HMAP_FOR_EACH_WITH_HASH (drop_key
, hmap_node
, hash_bytes(key
, key_len
, 0),
3892 &backer
->drop_keys
) {
3893 if (drop_key
->key_len
== key_len
3894 && !memcmp(drop_key
->key
, key
, key_len
)) {
3902 drop_key_clear(struct dpif_backer
*backer
)
3904 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
3905 struct drop_key
*drop_key
, *next
;
3907 HMAP_FOR_EACH_SAFE (drop_key
, next
, hmap_node
, &backer
->drop_keys
) {
3910 error
= dpif_flow_del(backer
->dpif
, drop_key
->key
, drop_key
->key_len
,
3912 if (error
&& !VLOG_DROP_WARN(&rl
)) {
3913 struct ds ds
= DS_EMPTY_INITIALIZER
;
3914 odp_flow_key_format(drop_key
->key
, drop_key
->key_len
, &ds
);
3915 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error
),
3920 hmap_remove(&backer
->drop_keys
, &drop_key
->hmap_node
);
3921 free(drop_key
->key
);
3926 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3927 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3928 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3929 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3930 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3931 * 'packet' ingressed.
3933 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3934 * 'flow''s in_port to OFPP_NONE.
3936 * This function does post-processing on data returned from
3937 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3938 * of the upcall processing logic. In particular, if the extracted in_port is
3939 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3940 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3941 * a VLAN header onto 'packet' (if it is nonnull).
3943 * Similarly, this function also includes some logic to help with tunnels. It
3944 * may modify 'flow' as necessary to make the tunneling implementation
3945 * transparent to the upcall processing logic.
3947 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3948 * or some other positive errno if there are other problems. */
3950 ofproto_receive(const struct dpif_backer
*backer
, struct ofpbuf
*packet
,
3951 const struct nlattr
*key
, size_t key_len
,
3952 struct flow
*flow
, enum odp_key_fitness
*fitnessp
,
3953 struct ofproto_dpif
**ofproto
, uint32_t *odp_in_port
)
3955 const struct ofport_dpif
*port
;
3956 enum odp_key_fitness fitness
;
3959 fitness
= odp_flow_key_to_flow(key
, key_len
, flow
);
3960 if (fitness
== ODP_FIT_ERROR
) {
3966 *odp_in_port
= flow
->in_port
;
3969 port
= (tnl_port_should_receive(flow
)
3970 ? ofport_dpif_cast(tnl_port_receive(flow
))
3971 : odp_port_to_ofport(backer
, flow
->in_port
));
3972 flow
->in_port
= port
? port
->up
.ofp_port
: OFPP_NONE
;
3977 /* XXX: Since the tunnel module is not scoped per backer, for a tunnel port
3978 * it's theoretically possible that we'll receive an ofport belonging to an
3979 * entirely different datapath. In practice, this can't happen because no
3980 * platforms has two separate datapaths which each support tunneling. */
3981 ovs_assert(ofproto_dpif_cast(port
->up
.ofproto
)->backer
== backer
);
3983 if (vsp_adjust_flow(ofproto_dpif_cast(port
->up
.ofproto
), flow
)) {
3985 /* Make the packet resemble the flow, so that it gets sent to
3986 * an OpenFlow controller properly, so that it looks correct
3987 * for sFlow, and so that flow_extract() will get the correct
3988 * vlan_tci if it is called on 'packet'.
3990 * The allocated space inside 'packet' probably also contains
3991 * 'key', that is, both 'packet' and 'key' are probably part of
3992 * a struct dpif_upcall (see the large comment on that
3993 * structure definition), so pushing data on 'packet' is in
3994 * general not a good idea since it could overwrite 'key' or
3995 * free it as a side effect. However, it's OK in this special
3996 * case because we know that 'packet' is inside a Netlink
3997 * attribute: pushing 4 bytes will just overwrite the 4-byte
3998 * "struct nlattr", which is fine since we don't need that
3999 * header anymore. */
4000 eth_push_vlan(packet
, flow
->vlan_tci
);
4002 /* We can't reproduce 'key' from 'flow'. */
4003 fitness
= fitness
== ODP_FIT_PERFECT
? ODP_FIT_TOO_MUCH
: fitness
;
4008 *ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
4013 *fitnessp
= fitness
;
4019 handle_miss_upcalls(struct dpif_backer
*backer
, struct dpif_upcall
*upcalls
,
4022 struct dpif_upcall
*upcall
;
4023 struct flow_miss
*miss
;
4024 struct flow_miss misses
[FLOW_MISS_MAX_BATCH
];
4025 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
4026 struct dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
4036 /* Construct the to-do list.
4038 * This just amounts to extracting the flow from each packet and sticking
4039 * the packets that have the same flow in the same "flow_miss" structure so
4040 * that we can process them together. */
4043 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
4044 struct flow_miss
*miss
= &misses
[n_misses
];
4045 struct flow_miss
*existing_miss
;
4046 struct ofproto_dpif
*ofproto
;
4047 uint32_t odp_in_port
;
4052 error
= ofproto_receive(backer
, upcall
->packet
, upcall
->key
,
4053 upcall
->key_len
, &flow
, &miss
->key_fitness
,
4054 &ofproto
, &odp_in_port
);
4055 if (error
== ENODEV
) {
4056 struct drop_key
*drop_key
;
4058 /* Received packet on datapath port for which we couldn't
4059 * associate an ofproto. This can happen if a port is removed
4060 * while traffic is being received. Print a rate-limited message
4061 * in case it happens frequently. Install a drop flow so
4062 * that future packets of the flow are inexpensively dropped
4064 VLOG_INFO_RL(&rl
, "received packet on unassociated datapath port "
4065 "%"PRIu32
, odp_in_port
);
4067 drop_key
= drop_key_lookup(backer
, upcall
->key
, upcall
->key_len
);
4069 drop_key
= xmalloc(sizeof *drop_key
);
4070 drop_key
->key
= xmemdup(upcall
->key
, upcall
->key_len
);
4071 drop_key
->key_len
= upcall
->key_len
;
4073 hmap_insert(&backer
->drop_keys
, &drop_key
->hmap_node
,
4074 hash_bytes(drop_key
->key
, drop_key
->key_len
, 0));
4075 dpif_flow_put(backer
->dpif
, DPIF_FP_CREATE
| DPIF_FP_MODIFY
,
4076 drop_key
->key
, drop_key
->key_len
, NULL
, 0, NULL
);
4084 ofproto
->n_missed
++;
4085 flow_extract(upcall
->packet
, flow
.skb_priority
, flow
.skb_mark
,
4086 &flow
.tunnel
, flow
.in_port
, &miss
->flow
);
4088 /* Add other packets to a to-do list. */
4089 hash
= flow_hash(&miss
->flow
, 0);
4090 existing_miss
= flow_miss_find(&todo
, ofproto
, &miss
->flow
, hash
);
4091 if (!existing_miss
) {
4092 hmap_insert(&todo
, &miss
->hmap_node
, hash
);
4093 miss
->ofproto
= ofproto
;
4094 miss
->key
= upcall
->key
;
4095 miss
->key_len
= upcall
->key_len
;
4096 miss
->upcall_type
= upcall
->type
;
4097 list_init(&miss
->packets
);
4101 miss
= existing_miss
;
4103 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
4106 /* Process each element in the to-do list, constructing the set of
4107 * operations to batch. */
4109 HMAP_FOR_EACH (miss
, hmap_node
, &todo
) {
4110 handle_flow_miss(miss
, flow_miss_ops
, &n_ops
);
4112 ovs_assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
4114 /* Execute batch. */
4115 for (i
= 0; i
< n_ops
; i
++) {
4116 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
4118 dpif_operate(backer
->dpif
, dpif_ops
, n_ops
);
4121 for (i
= 0; i
< n_ops
; i
++) {
4122 if (flow_miss_ops
[i
].xout_garbage
) {
4123 xlate_out_uninit(&flow_miss_ops
[i
].xout
);
4126 hmap_destroy(&todo
);
4129 static enum { SFLOW_UPCALL
, MISS_UPCALL
, BAD_UPCALL
, FLOW_SAMPLE_UPCALL
,
4131 classify_upcall(const struct dpif_upcall
*upcall
)
4133 size_t userdata_len
;
4134 union user_action_cookie cookie
;
4136 /* First look at the upcall type. */
4137 switch (upcall
->type
) {
4138 case DPIF_UC_ACTION
:
4144 case DPIF_N_UC_TYPES
:
4146 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
4150 /* "action" upcalls need a closer look. */
4151 if (!upcall
->userdata
) {
4152 VLOG_WARN_RL(&rl
, "action upcall missing cookie");
4155 userdata_len
= nl_attr_get_size(upcall
->userdata
);
4156 if (userdata_len
< sizeof cookie
.type
4157 || userdata_len
> sizeof cookie
) {
4158 VLOG_WARN_RL(&rl
, "action upcall cookie has unexpected size %zu",
4162 memset(&cookie
, 0, sizeof cookie
);
4163 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), userdata_len
);
4164 if (userdata_len
== sizeof cookie
.sflow
4165 && cookie
.type
== USER_ACTION_COOKIE_SFLOW
) {
4166 return SFLOW_UPCALL
;
4167 } else if (userdata_len
== sizeof cookie
.slow_path
4168 && cookie
.type
== USER_ACTION_COOKIE_SLOW_PATH
) {
4170 } else if (userdata_len
== sizeof cookie
.flow_sample
4171 && cookie
.type
== USER_ACTION_COOKIE_FLOW_SAMPLE
) {
4172 return FLOW_SAMPLE_UPCALL
;
4173 } else if (userdata_len
== sizeof cookie
.ipfix
4174 && cookie
.type
== USER_ACTION_COOKIE_IPFIX
) {
4175 return IPFIX_UPCALL
;
4177 VLOG_WARN_RL(&rl
, "invalid user cookie of type %"PRIu16
4178 " and size %zu", cookie
.type
, userdata_len
);
4184 handle_sflow_upcall(struct dpif_backer
*backer
,
4185 const struct dpif_upcall
*upcall
)
4187 struct ofproto_dpif
*ofproto
;
4188 union user_action_cookie cookie
;
4190 uint32_t odp_in_port
;
4192 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4193 &flow
, NULL
, &ofproto
, &odp_in_port
)
4194 || !ofproto
->sflow
) {
4198 memset(&cookie
, 0, sizeof cookie
);
4199 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof cookie
.sflow
);
4200 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
,
4201 odp_in_port
, &cookie
);
4205 handle_flow_sample_upcall(struct dpif_backer
*backer
,
4206 const struct dpif_upcall
*upcall
)
4208 struct ofproto_dpif
*ofproto
;
4209 union user_action_cookie cookie
;
4212 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4213 &flow
, NULL
, &ofproto
, NULL
)
4214 || !ofproto
->ipfix
) {
4218 memset(&cookie
, 0, sizeof cookie
);
4219 memcpy(&cookie
, nl_attr_get(upcall
->userdata
), sizeof cookie
.flow_sample
);
4221 /* The flow reflects exactly the contents of the packet. Sample
4222 * the packet using it. */
4223 dpif_ipfix_flow_sample(ofproto
->ipfix
, upcall
->packet
, &flow
,
4224 cookie
.flow_sample
.collector_set_id
,
4225 cookie
.flow_sample
.probability
,
4226 cookie
.flow_sample
.obs_domain_id
,
4227 cookie
.flow_sample
.obs_point_id
);
4231 handle_ipfix_upcall(struct dpif_backer
*backer
,
4232 const struct dpif_upcall
*upcall
)
4234 struct ofproto_dpif
*ofproto
;
4237 if (ofproto_receive(backer
, upcall
->packet
, upcall
->key
, upcall
->key_len
,
4238 &flow
, NULL
, &ofproto
, NULL
)
4239 || !ofproto
->ipfix
) {
4243 /* The flow reflects exactly the contents of the packet. Sample
4244 * the packet using it. */
4245 dpif_ipfix_bridge_sample(ofproto
->ipfix
, upcall
->packet
, &flow
);
4249 handle_upcalls(struct dpif_backer
*backer
, unsigned int max_batch
)
4251 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
4252 struct ofpbuf miss_bufs
[FLOW_MISS_MAX_BATCH
];
4253 uint64_t miss_buf_stubs
[FLOW_MISS_MAX_BATCH
][4096 / 8];
4258 ovs_assert(max_batch
<= FLOW_MISS_MAX_BATCH
);
4261 for (n_processed
= 0; n_processed
< max_batch
; n_processed
++) {
4262 struct dpif_upcall
*upcall
= &misses
[n_misses
];
4263 struct ofpbuf
*buf
= &miss_bufs
[n_misses
];
4266 ofpbuf_use_stub(buf
, miss_buf_stubs
[n_misses
],
4267 sizeof miss_buf_stubs
[n_misses
]);
4268 error
= dpif_recv(backer
->dpif
, upcall
, buf
);
4274 switch (classify_upcall(upcall
)) {
4276 /* Handle it later. */
4281 handle_sflow_upcall(backer
, upcall
);
4285 case FLOW_SAMPLE_UPCALL
:
4286 handle_flow_sample_upcall(backer
, upcall
);
4291 handle_ipfix_upcall(backer
, upcall
);
4301 /* Handle deferred MISS_UPCALL processing. */
4302 handle_miss_upcalls(backer
, misses
, n_misses
);
4303 for (i
= 0; i
< n_misses
; i
++) {
4304 ofpbuf_uninit(&miss_bufs
[i
]);
4310 /* Flow expiration. */
4312 static int subfacet_max_idle(const struct dpif_backer
*);
4313 static void update_stats(struct dpif_backer
*);
4314 static void rule_expire(struct rule_dpif
*);
4315 static void expire_subfacets(struct dpif_backer
*, int dp_max_idle
);
4317 /* This function is called periodically by run(). Its job is to collect
4318 * updates for the flows that have been installed into the datapath, most
4319 * importantly when they last were used, and then use that information to
4320 * expire flows that have not been used recently.
4322 * Returns the number of milliseconds after which it should be called again. */
4324 expire(struct dpif_backer
*backer
)
4326 struct ofproto_dpif
*ofproto
;
4330 /* Periodically clear out the drop keys in an effort to keep them
4331 * relatively few. */
4332 drop_key_clear(backer
);
4334 /* Update stats for each flow in the backer. */
4335 update_stats(backer
);
4337 n_subfacets
= hmap_count(&backer
->subfacets
);
4339 struct subfacet
*subfacet
;
4340 long long int total
, now
;
4344 HMAP_FOR_EACH (subfacet
, hmap_node
, &backer
->subfacets
) {
4345 total
+= now
- subfacet
->created
;
4347 backer
->avg_subfacet_life
+= total
/ n_subfacets
;
4349 backer
->avg_subfacet_life
/= 2;
4351 backer
->avg_n_subfacet
+= n_subfacets
;
4352 backer
->avg_n_subfacet
/= 2;
4354 backer
->max_n_subfacet
= MAX(backer
->max_n_subfacet
, n_subfacets
);
4356 max_idle
= subfacet_max_idle(backer
);
4357 expire_subfacets(backer
, max_idle
);
4359 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
4360 struct rule
*rule
, *next_rule
;
4362 if (ofproto
->backer
!= backer
) {
4366 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4368 LIST_FOR_EACH_SAFE (rule
, next_rule
, expirable
,
4369 &ofproto
->up
.expirable
) {
4370 rule_expire(rule_dpif_cast(rule
));
4373 /* All outstanding data in existing flows has been accounted, so it's a
4374 * good time to do bond rebalancing. */
4375 if (ofproto
->has_bonded_bundles
) {
4376 struct ofbundle
*bundle
;
4378 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
4380 bond_rebalance(bundle
->bond
, &backer
->revalidate_set
);
4386 return MIN(max_idle
, 1000);
4389 /* Updates flow table statistics given that the datapath just reported 'stats'
4390 * as 'subfacet''s statistics. */
4392 update_subfacet_stats(struct subfacet
*subfacet
,
4393 const struct dpif_flow_stats
*stats
)
4395 struct facet
*facet
= subfacet
->facet
;
4396 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4397 struct dpif_flow_stats diff
;
4399 diff
.tcp_flags
= stats
->tcp_flags
;
4400 diff
.used
= stats
->used
;
4402 if (stats
->n_packets
>= subfacet
->dp_packet_count
) {
4403 diff
.n_packets
= stats
->n_packets
- subfacet
->dp_packet_count
;
4405 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
4409 if (stats
->n_bytes
>= subfacet
->dp_byte_count
) {
4410 diff
.n_bytes
= stats
->n_bytes
- subfacet
->dp_byte_count
;
4412 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
4416 ofproto
->n_hit
+= diff
.n_packets
;
4417 subfacet
->dp_packet_count
= stats
->n_packets
;
4418 subfacet
->dp_byte_count
= stats
->n_bytes
;
4419 subfacet_update_stats(subfacet
, &diff
);
4421 if (facet
->accounted_bytes
< facet
->byte_count
) {
4423 facet_account(facet
);
4424 facet
->accounted_bytes
= facet
->byte_count
;
4428 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4429 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4431 delete_unexpected_flow(struct dpif_backer
*backer
,
4432 const struct nlattr
*key
, size_t key_len
)
4434 if (!VLOG_DROP_WARN(&rl
)) {
4438 odp_flow_key_format(key
, key_len
, &s
);
4439 VLOG_WARN("unexpected flow: %s", ds_cstr(&s
));
4443 COVERAGE_INC(facet_unexpected
);
4444 dpif_flow_del(backer
->dpif
, key
, key_len
, NULL
);
4447 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4449 * This function also pushes statistics updates to rules which each facet
4450 * resubmits into. Generally these statistics will be accurate. However, if a
4451 * facet changes the rule it resubmits into at some time in between
4452 * update_stats() runs, it is possible that statistics accrued to the
4453 * old rule will be incorrectly attributed to the new rule. This could be
4454 * avoided by calling update_stats() whenever rules are created or
4455 * deleted. However, the performance impact of making so many calls to the
4456 * datapath do not justify the benefit of having perfectly accurate statistics.
4458 * In addition, this function maintains per ofproto flow hit counts. The patch
4459 * port is not treated specially. e.g. A packet ingress from br0 patched into
4460 * br1 will increase the hit count of br0 by 1, however, does not affect
4461 * the hit or miss counts of br1.
4464 update_stats(struct dpif_backer
*backer
)
4466 const struct dpif_flow_stats
*stats
;
4467 struct dpif_flow_dump dump
;
4468 const struct nlattr
*key
;
4471 dpif_flow_dump_start(&dump
, backer
->dpif
);
4472 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
4473 struct subfacet
*subfacet
;
4476 key_hash
= odp_flow_key_hash(key
, key_len
);
4477 subfacet
= subfacet_find(backer
, key
, key_len
, key_hash
);
4478 switch (subfacet
? subfacet
->path
: SF_NOT_INSTALLED
) {
4480 update_subfacet_stats(subfacet
, stats
);
4484 /* Stats are updated per-packet. */
4487 case SF_NOT_INSTALLED
:
4489 delete_unexpected_flow(backer
, key
, key_len
);
4494 dpif_flow_dump_done(&dump
);
4496 update_moving_averages(backer
);
4499 /* Calculates and returns the number of milliseconds of idle time after which
4500 * subfacets should expire from the datapath. When a subfacet expires, we fold
4501 * its statistics into its facet, and when a facet's last subfacet expires, we
4502 * fold its statistic into its rule. */
4504 subfacet_max_idle(const struct dpif_backer
*backer
)
4507 * Idle time histogram.
4509 * Most of the time a switch has a relatively small number of subfacets.
4510 * When this is the case we might as well keep statistics for all of them
4511 * in userspace and to cache them in the kernel datapath for performance as
4514 * As the number of subfacets increases, the memory required to maintain
4515 * statistics about them in userspace and in the kernel becomes
4516 * significant. However, with a large number of subfacets it is likely
4517 * that only a few of them are "heavy hitters" that consume a large amount
4518 * of bandwidth. At this point, only heavy hitters are worth caching in
4519 * the kernel and maintaining in userspaces; other subfacets we can
4522 * The technique used to compute the idle time is to build a histogram with
4523 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
4524 * that is installed in the kernel gets dropped in the appropriate bucket.
4525 * After the histogram has been built, we compute the cutoff so that only
4526 * the most-recently-used 1% of subfacets (but at least
4527 * flow_eviction_threshold flows) are kept cached. At least
4528 * the most-recently-used bucket of subfacets is kept, so actually an
4529 * arbitrary number of subfacets can be kept in any given expiration run
4530 * (though the next run will delete most of those unless they receive
4533 * This requires a second pass through the subfacets, in addition to the
4534 * pass made by update_stats(), because the former function never looks at
4535 * uninstallable subfacets.
4537 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
4538 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
4539 int buckets
[N_BUCKETS
] = { 0 };
4540 int total
, subtotal
, bucket
;
4541 struct subfacet
*subfacet
;
4545 total
= hmap_count(&backer
->subfacets
);
4546 if (total
<= flow_eviction_threshold
) {
4547 return N_BUCKETS
* BUCKET_WIDTH
;
4550 /* Build histogram. */
4552 HMAP_FOR_EACH (subfacet
, hmap_node
, &backer
->subfacets
) {
4553 long long int idle
= now
- subfacet
->used
;
4554 int bucket
= (idle
<= 0 ? 0
4555 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
4556 : (unsigned int) idle
/ BUCKET_WIDTH
);
4560 /* Find the first bucket whose flows should be expired. */
4561 subtotal
= bucket
= 0;
4563 subtotal
+= buckets
[bucket
++];
4564 } while (bucket
< N_BUCKETS
&&
4565 subtotal
< MAX(flow_eviction_threshold
, total
/ 100));
4567 if (VLOG_IS_DBG_ENABLED()) {
4571 ds_put_cstr(&s
, "keep");
4572 for (i
= 0; i
< N_BUCKETS
; i
++) {
4574 ds_put_cstr(&s
, ", drop");
4577 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
4580 VLOG_INFO("%s (msec:count)", ds_cstr(&s
));
4584 return bucket
* BUCKET_WIDTH
;
4588 expire_subfacets(struct dpif_backer
*backer
, int dp_max_idle
)
4590 /* Cutoff time for most flows. */
4591 long long int normal_cutoff
= time_msec() - dp_max_idle
;
4593 /* We really want to keep flows for special protocols around, so use a more
4594 * conservative cutoff. */
4595 long long int special_cutoff
= time_msec() - 10000;
4597 struct subfacet
*subfacet
, *next_subfacet
;
4598 struct subfacet
*batch
[SUBFACET_DESTROY_MAX_BATCH
];
4602 HMAP_FOR_EACH_SAFE (subfacet
, next_subfacet
, hmap_node
,
4603 &backer
->subfacets
) {
4604 long long int cutoff
;
4606 cutoff
= (subfacet
->facet
->xout
.slow
& (SLOW_CFM
| SLOW_BFD
| SLOW_LACP
4610 if (subfacet
->used
< cutoff
) {
4611 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
4612 batch
[n_batch
++] = subfacet
;
4613 if (n_batch
>= SUBFACET_DESTROY_MAX_BATCH
) {
4614 subfacet_destroy_batch(backer
, batch
, n_batch
);
4618 subfacet_destroy(subfacet
);
4624 subfacet_destroy_batch(backer
, batch
, n_batch
);
4628 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4629 * then delete it entirely. */
4631 rule_expire(struct rule_dpif
*rule
)
4633 struct facet
*facet
, *next_facet
;
4637 if (rule
->up
.pending
) {
4638 /* We'll have to expire it later. */
4642 /* Has 'rule' expired? */
4644 if (rule
->up
.hard_timeout
4645 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
4646 reason
= OFPRR_HARD_TIMEOUT
;
4647 } else if (rule
->up
.idle_timeout
4648 && now
> rule
->up
.used
+ rule
->up
.idle_timeout
* 1000) {
4649 reason
= OFPRR_IDLE_TIMEOUT
;
4654 COVERAGE_INC(ofproto_dpif_expired
);
4656 /* Update stats. (This is a no-op if the rule expired due to an idle
4657 * timeout, because that only happens when the rule has no facets left.) */
4658 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
4659 facet_remove(facet
);
4662 /* Get rid of the rule. */
4663 ofproto_rule_expire(&rule
->up
, reason
);
4668 /* Creates and returns a new facet based on 'miss'.
4670 * The caller must already have determined that no facet with an identical
4671 * 'miss->flow' exists in 'miss->ofproto'.
4673 * 'rule' and 'xout' must have been created based on 'miss'.
4675 * 'facet'' statistics are initialized based on 'stats'.
4677 * The facet will initially have no subfacets. The caller should create (at
4678 * least) one subfacet with subfacet_create(). */
4679 static struct facet
*
4680 facet_create(const struct flow_miss
*miss
, struct rule_dpif
*rule
,
4681 struct xlate_out
*xout
, struct dpif_flow_stats
*stats
)
4683 struct ofproto_dpif
*ofproto
= miss
->ofproto
;
4684 struct facet
*facet
;
4687 facet
= xzalloc(sizeof *facet
);
4688 facet
->packet_count
= facet
->prev_packet_count
= stats
->n_packets
;
4689 facet
->byte_count
= facet
->prev_byte_count
= stats
->n_bytes
;
4690 facet
->tcp_flags
= stats
->tcp_flags
;
4691 facet
->used
= stats
->used
;
4692 facet
->flow
= miss
->flow
;
4693 facet
->learn_rl
= time_msec() + 500;
4696 list_push_back(&facet
->rule
->facets
, &facet
->list_node
);
4697 list_init(&facet
->subfacets
);
4698 netflow_flow_init(&facet
->nf_flow
);
4699 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
4701 xlate_out_copy(&facet
->xout
, xout
);
4703 match_init(&match
, &facet
->flow
, &facet
->xout
.wc
);
4704 cls_rule_init(&facet
->cr
, &match
, OFP_DEFAULT_PRIORITY
);
4705 classifier_insert(&ofproto
->facets
, &facet
->cr
);
4707 facet
->nf_flow
.output_iface
= facet
->xout
.nf_output_iface
;
4713 facet_free(struct facet
*facet
)
4716 xlate_out_uninit(&facet
->xout
);
4721 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
4722 * 'packet', which arrived on 'in_port'. */
4724 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4725 const struct nlattr
*odp_actions
, size_t actions_len
,
4726 struct ofpbuf
*packet
)
4728 struct odputil_keybuf keybuf
;
4732 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
4733 odp_flow_key_from_flow(&key
, flow
,
4734 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
4736 error
= dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
4737 odp_actions
, actions_len
, packet
);
4741 /* Remove 'facet' from its ofproto and free up the associated memory:
4743 * - If 'facet' was installed in the datapath, uninstalls it and updates its
4744 * rule's statistics, via subfacet_uninstall().
4746 * - Removes 'facet' from its rule and from ofproto->facets.
4749 facet_remove(struct facet
*facet
)
4751 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4752 struct subfacet
*subfacet
, *next_subfacet
;
4754 ovs_assert(!list_is_empty(&facet
->subfacets
));
4756 /* First uninstall all of the subfacets to get final statistics. */
4757 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4758 subfacet_uninstall(subfacet
);
4761 /* Flush the final stats to the rule.
4763 * This might require us to have at least one subfacet around so that we
4764 * can use its actions for accounting in facet_account(), which is why we
4765 * have uninstalled but not yet destroyed the subfacets. */
4766 facet_flush_stats(facet
);
4768 /* Now we're really all done so destroy everything. */
4769 LIST_FOR_EACH_SAFE (subfacet
, next_subfacet
, list_node
,
4770 &facet
->subfacets
) {
4771 subfacet_destroy__(subfacet
);
4773 classifier_remove(&ofproto
->facets
, &facet
->cr
);
4774 cls_rule_destroy(&facet
->cr
);
4775 list_remove(&facet
->list_node
);
4779 /* Feed information from 'facet' back into the learning table to keep it in
4780 * sync with what is actually flowing through the datapath. */
4782 facet_learn(struct facet
*facet
)
4784 long long int now
= time_msec();
4786 if (!facet
->xout
.has_fin_timeout
&& now
< facet
->learn_rl
) {
4790 facet
->learn_rl
= now
+ 500;
4792 if (!facet
->xout
.has_learn
4793 && !facet
->xout
.has_normal
4794 && (!facet
->xout
.has_fin_timeout
4795 || !(facet
->tcp_flags
& (TCP_FIN
| TCP_RST
)))) {
4799 facet_push_stats(facet
, true);
4803 facet_account(struct facet
*facet
)
4805 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4806 const struct nlattr
*a
;
4811 if (!facet
->xout
.has_normal
|| !ofproto
->has_bonded_bundles
) {
4814 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
4816 /* This loop feeds byte counters to bond_account() for rebalancing to use
4817 * as a basis. We also need to track the actual VLAN on which the packet
4818 * is going to be sent to ensure that it matches the one passed to
4819 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
4822 * We use the actions from an arbitrary subfacet because they should all
4823 * be equally valid for our purpose. */
4824 vlan_tci
= facet
->flow
.vlan_tci
;
4825 NL_ATTR_FOR_EACH_UNSAFE (a
, left
, facet
->xout
.odp_actions
.data
,
4826 facet
->xout
.odp_actions
.size
) {
4827 const struct ovs_action_push_vlan
*vlan
;
4828 struct ofport_dpif
*port
;
4830 switch (nl_attr_type(a
)) {
4831 case OVS_ACTION_ATTR_OUTPUT
:
4832 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
4833 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
4834 bond_account(port
->bundle
->bond
, &facet
->flow
,
4835 vlan_tci_to_vid(vlan_tci
), n_bytes
);
4839 case OVS_ACTION_ATTR_POP_VLAN
:
4840 vlan_tci
= htons(0);
4843 case OVS_ACTION_ATTR_PUSH_VLAN
:
4844 vlan
= nl_attr_get(a
);
4845 vlan_tci
= vlan
->vlan_tci
;
4851 /* Returns true if the only action for 'facet' is to send to the controller.
4852 * (We don't report NetFlow expiration messages for such facets because they
4853 * are just part of the control logic for the network, not real traffic). */
4855 facet_is_controller_flow(struct facet
*facet
)
4858 const struct rule
*rule
= &facet
->rule
->up
;
4859 const struct ofpact
*ofpacts
= rule
->ofpacts
;
4860 size_t ofpacts_len
= rule
->ofpacts_len
;
4862 if (ofpacts_len
> 0 &&
4863 ofpacts
->type
== OFPACT_CONTROLLER
&&
4864 ofpact_next(ofpacts
) >= ofpact_end(ofpacts
, ofpacts_len
)) {
4871 /* Folds all of 'facet''s statistics into its rule. Also updates the
4872 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4873 * 'facet''s statistics in the datapath should have been zeroed and folded into
4874 * its packet and byte counts before this function is called. */
4876 facet_flush_stats(struct facet
*facet
)
4878 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4879 struct subfacet
*subfacet
;
4881 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
4882 ovs_assert(!subfacet
->dp_byte_count
);
4883 ovs_assert(!subfacet
->dp_packet_count
);
4886 facet_push_stats(facet
, false);
4887 if (facet
->accounted_bytes
< facet
->byte_count
) {
4888 facet_account(facet
);
4889 facet
->accounted_bytes
= facet
->byte_count
;
4892 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
4893 struct ofexpired expired
;
4894 expired
.flow
= facet
->flow
;
4895 expired
.packet_count
= facet
->packet_count
;
4896 expired
.byte_count
= facet
->byte_count
;
4897 expired
.used
= facet
->used
;
4898 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
4901 /* Reset counters to prevent double counting if 'facet' ever gets
4903 facet_reset_counters(facet
);
4905 netflow_flow_clear(&facet
->nf_flow
);
4906 facet
->tcp_flags
= 0;
4909 /* Searches 'ofproto''s table of facets for one which would be responsible for
4910 * 'flow'. Returns it if found, otherwise a null pointer.
4912 * The returned facet might need revalidation; use facet_lookup_valid()
4913 * instead if that is important. */
4914 static struct facet
*
4915 facet_find(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
4917 struct cls_rule
*cr
= classifier_lookup(&ofproto
->facets
, flow
, NULL
);
4918 return cr
? CONTAINER_OF(cr
, struct facet
, cr
) : NULL
;
4921 /* Searches 'ofproto''s table of facets for one capable that covers
4922 * 'flow'. Returns it if found, otherwise a null pointer.
4924 * The returned facet is guaranteed to be valid. */
4925 static struct facet
*
4926 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
4928 struct facet
*facet
;
4930 facet
= facet_find(ofproto
, flow
);
4932 && (ofproto
->backer
->need_revalidate
4933 || tag_set_intersects(&ofproto
->backer
->revalidate_set
,
4935 && !facet_revalidate(facet
)) {
4943 facet_check_consistency(struct facet
*facet
)
4945 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 15);
4947 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
4949 struct xlate_out xout
;
4950 struct xlate_in xin
;
4952 struct rule_dpif
*rule
;
4955 /* Check the rule for consistency. */
4956 rule
= rule_dpif_lookup(ofproto
, &facet
->flow
, NULL
);
4957 if (rule
!= facet
->rule
) {
4958 if (!VLOG_DROP_WARN(&rl
)) {
4959 struct ds s
= DS_EMPTY_INITIALIZER
;
4961 flow_format(&s
, &facet
->flow
);
4962 ds_put_format(&s
, ": facet associated with wrong rule (was "
4963 "table=%"PRIu8
",", facet
->rule
->up
.table_id
);
4964 cls_rule_format(&facet
->rule
->up
.cr
, &s
);
4965 ds_put_format(&s
, ") (should have been table=%"PRIu8
",",
4967 cls_rule_format(&rule
->up
.cr
, &s
);
4968 ds_put_char(&s
, ')');
4970 VLOG_WARN("%s", ds_cstr(&s
));
4976 /* Check the datapath actions for consistency. */
4977 xlate_in_init(&xin
, ofproto
, &facet
->flow
, rule
, 0, NULL
);
4978 xlate_actions(&xin
, &xout
);
4980 ok
= ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)
4981 && facet
->xout
.slow
== xout
.slow
;
4982 if (!ok
&& !VLOG_DROP_WARN(&rl
)) {
4983 struct ds s
= DS_EMPTY_INITIALIZER
;
4985 flow_format(&s
, &facet
->flow
);
4986 ds_put_cstr(&s
, ": inconsistency in facet");
4988 if (!ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)) {
4989 ds_put_cstr(&s
, " (actions were: ");
4990 format_odp_actions(&s
, facet
->xout
.odp_actions
.data
,
4991 facet
->xout
.odp_actions
.size
);
4992 ds_put_cstr(&s
, ") (correct actions: ");
4993 format_odp_actions(&s
, xout
.odp_actions
.data
,
4994 xout
.odp_actions
.size
);
4995 ds_put_char(&s
, ')');
4998 if (facet
->xout
.slow
!= xout
.slow
) {
4999 ds_put_format(&s
, " slow path incorrect. should be %d", xout
.slow
);
5002 VLOG_WARN("%s", ds_cstr(&s
));
5005 xlate_out_uninit(&xout
);
5010 /* Re-searches the classifier for 'facet':
5012 * - If the rule found is different from 'facet''s current rule, moves
5013 * 'facet' to the new rule and recompiles its actions.
5015 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
5016 * where it is and recompiles its actions anyway.
5018 * - If any of 'facet''s subfacets correspond to a new flow according to
5019 * ofproto_receive(), 'facet' is removed.
5021 * Returns true if 'facet' is still valid. False if 'facet' was removed. */
5023 facet_revalidate(struct facet
*facet
)
5025 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5026 struct rule_dpif
*new_rule
;
5027 struct subfacet
*subfacet
;
5028 struct flow_wildcards wc
;
5029 struct xlate_out xout
;
5030 struct xlate_in xin
;
5032 COVERAGE_INC(facet_revalidate
);
5034 /* Check that child subfacets still correspond to this facet. Tunnel
5035 * configuration changes could cause a subfacet's OpenFlow in_port to
5037 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
5038 struct ofproto_dpif
*recv_ofproto
;
5039 struct flow recv_flow
;
5042 error
= ofproto_receive(ofproto
->backer
, NULL
, subfacet
->key
,
5043 subfacet
->key_len
, &recv_flow
, NULL
,
5044 &recv_ofproto
, NULL
);
5046 || recv_ofproto
!= ofproto
5047 || facet
!= facet_find(ofproto
, &recv_flow
)) {
5048 facet_remove(facet
);
5053 flow_wildcards_init_catchall(&wc
);
5054 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
, &wc
);
5056 /* Calculate new datapath actions.
5058 * We do not modify any 'facet' state yet, because we might need to, e.g.,
5059 * emit a NetFlow expiration and, if so, we need to have the old state
5060 * around to properly compose it. */
5061 xlate_in_init(&xin
, ofproto
, &facet
->flow
, new_rule
, 0, NULL
);
5062 xlate_actions(&xin
, &xout
);
5063 flow_wildcards_or(&xout
.wc
, &xout
.wc
, &wc
);
5065 /* A facet's slow path reason should only change under dramatic
5066 * circumstances. Rather than try to update everything, it's simpler to
5067 * remove the facet and start over.
5069 * More importantly, if a facet's wildcards change, it will be relatively
5070 * difficult to figure out if its subfacets still belong to it, and if not
5071 * which facet they may belong to. Again, to avoid the complexity, we
5072 * simply give up instead. */
5073 if (facet
->xout
.slow
!= xout
.slow
5074 || memcmp(&facet
->xout
.wc
, &xout
.wc
, sizeof xout
.wc
)) {
5075 facet_remove(facet
);
5076 xlate_out_uninit(&xout
);
5080 if (!ofpbuf_equal(&facet
->xout
.odp_actions
, &xout
.odp_actions
)) {
5081 LIST_FOR_EACH(subfacet
, list_node
, &facet
->subfacets
) {
5082 if (subfacet
->path
== SF_FAST_PATH
) {
5083 struct dpif_flow_stats stats
;
5085 subfacet_install(subfacet
, &xout
.odp_actions
, &stats
);
5086 subfacet_update_stats(subfacet
, &stats
);
5090 facet_flush_stats(facet
);
5092 ofpbuf_clear(&facet
->xout
.odp_actions
);
5093 ofpbuf_put(&facet
->xout
.odp_actions
, xout
.odp_actions
.data
,
5094 xout
.odp_actions
.size
);
5097 /* Update 'facet' now that we've taken care of all the old state. */
5098 facet
->xout
.tags
= xout
.tags
;
5099 facet
->xout
.slow
= xout
.slow
;
5100 facet
->xout
.has_learn
= xout
.has_learn
;
5101 facet
->xout
.has_normal
= xout
.has_normal
;
5102 facet
->xout
.has_fin_timeout
= xout
.has_fin_timeout
;
5103 facet
->xout
.nf_output_iface
= xout
.nf_output_iface
;
5104 facet
->xout
.mirrors
= xout
.mirrors
;
5105 facet
->nf_flow
.output_iface
= facet
->xout
.nf_output_iface
;
5107 if (facet
->rule
!= new_rule
) {
5108 COVERAGE_INC(facet_changed_rule
);
5109 list_remove(&facet
->list_node
);
5110 list_push_back(&new_rule
->facets
, &facet
->list_node
);
5111 facet
->rule
= new_rule
;
5112 facet
->used
= new_rule
->up
.created
;
5113 facet
->prev_used
= facet
->used
;
5116 xlate_out_uninit(&xout
);
5121 facet_reset_counters(struct facet
*facet
)
5123 facet
->packet_count
= 0;
5124 facet
->byte_count
= 0;
5125 facet
->prev_packet_count
= 0;
5126 facet
->prev_byte_count
= 0;
5127 facet
->accounted_bytes
= 0;
5131 facet_push_stats(struct facet
*facet
, bool may_learn
)
5133 struct dpif_flow_stats stats
;
5135 ovs_assert(facet
->packet_count
>= facet
->prev_packet_count
);
5136 ovs_assert(facet
->byte_count
>= facet
->prev_byte_count
);
5137 ovs_assert(facet
->used
>= facet
->prev_used
);
5139 stats
.n_packets
= facet
->packet_count
- facet
->prev_packet_count
;
5140 stats
.n_bytes
= facet
->byte_count
- facet
->prev_byte_count
;
5141 stats
.used
= facet
->used
;
5142 stats
.tcp_flags
= facet
->tcp_flags
;
5144 if (may_learn
|| stats
.n_packets
|| facet
->used
> facet
->prev_used
) {
5145 struct ofproto_dpif
*ofproto
=
5146 ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5148 struct ofport_dpif
*in_port
;
5149 struct xlate_in xin
;
5151 facet
->prev_packet_count
= facet
->packet_count
;
5152 facet
->prev_byte_count
= facet
->byte_count
;
5153 facet
->prev_used
= facet
->used
;
5155 in_port
= get_ofp_port(ofproto
, facet
->flow
.in_port
);
5156 if (in_port
&& in_port
->tnl_port
) {
5157 netdev_vport_inc_rx(in_port
->up
.netdev
, &stats
);
5160 rule_credit_stats(facet
->rule
, &stats
);
5161 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
,
5163 netflow_flow_update_flags(&facet
->nf_flow
, facet
->tcp_flags
);
5164 update_mirror_stats(ofproto
, facet
->xout
.mirrors
, stats
.n_packets
,
5167 xlate_in_init(&xin
, ofproto
, &facet
->flow
, facet
->rule
,
5168 stats
.tcp_flags
, NULL
);
5169 xin
.resubmit_stats
= &stats
;
5170 xin
.may_learn
= may_learn
;
5171 xlate_actions_for_side_effects(&xin
);
5176 push_all_stats__(bool run_fast
)
5178 static long long int rl
= LLONG_MIN
;
5179 struct ofproto_dpif
*ofproto
;
5181 if (time_msec() < rl
) {
5185 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
5186 struct cls_cursor cursor
;
5187 struct facet
*facet
;
5189 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
5190 CLS_CURSOR_FOR_EACH (facet
, cr
, &cursor
) {
5191 facet_push_stats(facet
, false);
5198 rl
= time_msec() + 100;
5202 push_all_stats(void)
5204 push_all_stats__(true);
5208 rule_credit_stats(struct rule_dpif
*rule
, const struct dpif_flow_stats
*stats
)
5210 rule
->packet_count
+= stats
->n_packets
;
5211 rule
->byte_count
+= stats
->n_bytes
;
5212 ofproto_rule_update_used(&rule
->up
, stats
->used
);
5217 static struct subfacet
*
5218 subfacet_find(struct dpif_backer
*backer
, const struct nlattr
*key
,
5219 size_t key_len
, uint32_t key_hash
)
5221 struct subfacet
*subfacet
;
5223 HMAP_FOR_EACH_WITH_HASH (subfacet
, hmap_node
, key_hash
,
5224 &backer
->subfacets
) {
5225 if (subfacet
->key_len
== key_len
5226 && !memcmp(key
, subfacet
->key
, key_len
)) {
5234 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
5235 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
5236 * existing subfacet if there is one, otherwise creates and returns a
5238 static struct subfacet
*
5239 subfacet_create(struct facet
*facet
, struct flow_miss
*miss
,
5242 struct dpif_backer
*backer
= miss
->ofproto
->backer
;
5243 enum odp_key_fitness key_fitness
= miss
->key_fitness
;
5244 const struct nlattr
*key
= miss
->key
;
5245 size_t key_len
= miss
->key_len
;
5247 struct subfacet
*subfacet
;
5249 key_hash
= odp_flow_key_hash(key
, key_len
);
5251 if (list_is_empty(&facet
->subfacets
)) {
5252 subfacet
= &facet
->one_subfacet
;
5254 subfacet
= subfacet_find(backer
, key
, key_len
, key_hash
);
5256 if (subfacet
->facet
== facet
) {
5260 /* This shouldn't happen. */
5261 VLOG_ERR_RL(&rl
, "subfacet with wrong facet");
5262 subfacet_destroy(subfacet
);
5265 subfacet
= xmalloc(sizeof *subfacet
);
5268 hmap_insert(&backer
->subfacets
, &subfacet
->hmap_node
, key_hash
);
5269 list_push_back(&facet
->subfacets
, &subfacet
->list_node
);
5270 subfacet
->facet
= facet
;
5271 subfacet
->key_fitness
= key_fitness
;
5272 subfacet
->key
= xmemdup(key
, key_len
);
5273 subfacet
->key_len
= key_len
;
5274 subfacet
->used
= now
;
5275 subfacet
->created
= now
;
5276 subfacet
->dp_packet_count
= 0;
5277 subfacet
->dp_byte_count
= 0;
5278 subfacet
->path
= SF_NOT_INSTALLED
;
5279 subfacet
->backer
= backer
;
5281 backer
->subfacet_add_count
++;
5285 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5286 * its facet within 'ofproto', and frees it. */
5288 subfacet_destroy__(struct subfacet
*subfacet
)
5290 struct facet
*facet
= subfacet
->facet
;
5291 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5293 /* Update ofproto stats before uninstall the subfacet. */
5294 ofproto
->backer
->subfacet_del_count
++;
5296 subfacet_uninstall(subfacet
);
5297 hmap_remove(&subfacet
->backer
->subfacets
, &subfacet
->hmap_node
);
5298 list_remove(&subfacet
->list_node
);
5299 free(subfacet
->key
);
5300 if (subfacet
!= &facet
->one_subfacet
) {
5305 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5306 * last remaining subfacet in its facet destroys the facet too. */
5308 subfacet_destroy(struct subfacet
*subfacet
)
5310 struct facet
*facet
= subfacet
->facet
;
5312 if (list_is_singleton(&facet
->subfacets
)) {
5313 /* facet_remove() needs at least one subfacet (it will remove it). */
5314 facet_remove(facet
);
5316 subfacet_destroy__(subfacet
);
5321 subfacet_destroy_batch(struct dpif_backer
*backer
,
5322 struct subfacet
**subfacets
, int n
)
5324 struct dpif_op ops
[SUBFACET_DESTROY_MAX_BATCH
];
5325 struct dpif_op
*opsp
[SUBFACET_DESTROY_MAX_BATCH
];
5326 struct dpif_flow_stats stats
[SUBFACET_DESTROY_MAX_BATCH
];
5329 for (i
= 0; i
< n
; i
++) {
5330 ops
[i
].type
= DPIF_OP_FLOW_DEL
;
5331 ops
[i
].u
.flow_del
.key
= subfacets
[i
]->key
;
5332 ops
[i
].u
.flow_del
.key_len
= subfacets
[i
]->key_len
;
5333 ops
[i
].u
.flow_del
.stats
= &stats
[i
];
5337 dpif_operate(backer
->dpif
, opsp
, n
);
5338 for (i
= 0; i
< n
; i
++) {
5339 subfacet_reset_dp_stats(subfacets
[i
], &stats
[i
]);
5340 subfacets
[i
]->path
= SF_NOT_INSTALLED
;
5341 subfacet_destroy(subfacets
[i
]);
5346 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5347 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5348 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5349 * since 'subfacet' was last updated.
5351 * Returns 0 if successful, otherwise a positive errno value. */
5353 subfacet_install(struct subfacet
*subfacet
, const struct ofpbuf
*odp_actions
,
5354 struct dpif_flow_stats
*stats
)
5356 struct facet
*facet
= subfacet
->facet
;
5357 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(facet
->rule
->up
.ofproto
);
5358 enum subfacet_path path
= facet
->xout
.slow
? SF_SLOW_PATH
: SF_FAST_PATH
;
5359 const struct nlattr
*actions
= odp_actions
->data
;
5360 size_t actions_len
= odp_actions
->size
;
5362 uint64_t slow_path_stub
[128 / 8];
5363 enum dpif_flow_put_flags flags
;
5366 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
5368 flags
|= DPIF_FP_ZERO_STATS
;
5371 if (path
== SF_SLOW_PATH
) {
5372 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
5373 slow_path_stub
, sizeof slow_path_stub
,
5374 &actions
, &actions_len
);
5377 ret
= dpif_flow_put(subfacet
->backer
->dpif
, flags
, subfacet
->key
,
5378 subfacet
->key_len
, actions
, actions_len
, stats
);
5381 subfacet_reset_dp_stats(subfacet
, stats
);
5385 subfacet
->path
= path
;
5390 /* If 'subfacet' is installed in the datapath, uninstalls it. */
5392 subfacet_uninstall(struct subfacet
*subfacet
)
5394 if (subfacet
->path
!= SF_NOT_INSTALLED
) {
5395 struct rule_dpif
*rule
= subfacet
->facet
->rule
;
5396 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5397 struct dpif_flow_stats stats
;
5400 error
= dpif_flow_del(ofproto
->backer
->dpif
, subfacet
->key
,
5401 subfacet
->key_len
, &stats
);
5402 subfacet_reset_dp_stats(subfacet
, &stats
);
5404 subfacet_update_stats(subfacet
, &stats
);
5406 subfacet
->path
= SF_NOT_INSTALLED
;
5408 ovs_assert(subfacet
->dp_packet_count
== 0);
5409 ovs_assert(subfacet
->dp_byte_count
== 0);
5413 /* Resets 'subfacet''s datapath statistics counters. This should be called
5414 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5415 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5416 * was reset in the datapath. 'stats' will be modified to include only
5417 * statistics new since 'subfacet' was last updated. */
5419 subfacet_reset_dp_stats(struct subfacet
*subfacet
,
5420 struct dpif_flow_stats
*stats
)
5423 && subfacet
->dp_packet_count
<= stats
->n_packets
5424 && subfacet
->dp_byte_count
<= stats
->n_bytes
) {
5425 stats
->n_packets
-= subfacet
->dp_packet_count
;
5426 stats
->n_bytes
-= subfacet
->dp_byte_count
;
5429 subfacet
->dp_packet_count
= 0;
5430 subfacet
->dp_byte_count
= 0;
5433 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
5435 * Because of the meaning of a subfacet's counters, it only makes sense to do
5436 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5437 * represents a packet that was sent by hand or if it represents statistics
5438 * that have been cleared out of the datapath. */
5440 subfacet_update_stats(struct subfacet
*subfacet
,
5441 const struct dpif_flow_stats
*stats
)
5443 if (stats
->n_packets
|| stats
->used
> subfacet
->used
) {
5444 struct facet
*facet
= subfacet
->facet
;
5446 subfacet
->used
= MAX(subfacet
->used
, stats
->used
);
5447 facet
->used
= MAX(facet
->used
, stats
->used
);
5448 facet
->packet_count
+= stats
->n_packets
;
5449 facet
->byte_count
+= stats
->n_bytes
;
5450 facet
->tcp_flags
|= stats
->tcp_flags
;
5456 /* Lookup 'flow' in 'ofproto''s classifier. If 'wc' is non-null, sets
5457 * the fields that were relevant as part of the lookup. */
5458 static struct rule_dpif
*
5459 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5460 struct flow_wildcards
*wc
)
5462 struct rule_dpif
*rule
;
5464 rule
= rule_dpif_lookup__(ofproto
, flow
, wc
, 0);
5469 return rule_dpif_miss_rule(ofproto
, flow
);
5472 static struct rule_dpif
*
5473 rule_dpif_lookup__(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5474 struct flow_wildcards
*wc
, uint8_t table_id
)
5476 struct cls_rule
*cls_rule
;
5477 struct classifier
*cls
;
5480 if (table_id
>= N_TABLES
) {
5484 cls
= &ofproto
->up
.tables
[table_id
].cls
;
5485 frag
= (flow
->nw_frag
& FLOW_NW_FRAG_ANY
) != 0;
5486 if (frag
&& ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
5487 /* We must pretend that transport ports are unavailable. */
5488 struct flow ofpc_normal_flow
= *flow
;
5489 ofpc_normal_flow
.tp_src
= htons(0);
5490 ofpc_normal_flow
.tp_dst
= htons(0);
5491 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
, wc
);
5492 } else if (frag
&& ofproto
->up
.frag_handling
== OFPC_FRAG_DROP
) {
5493 cls_rule
= &ofproto
->drop_frags_rule
->up
.cr
;
5495 flow_wildcards_init_exact(wc
);
5498 cls_rule
= classifier_lookup(cls
, flow
, wc
);
5500 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
5503 static struct rule_dpif
*
5504 rule_dpif_miss_rule(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
5506 struct ofport_dpif
*port
;
5508 port
= get_ofp_port(ofproto
, flow
->in_port
);
5510 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
, flow
->in_port
);
5511 return ofproto
->miss_rule
;
5514 if (port
->up
.pp
.config
& OFPUTIL_PC_NO_PACKET_IN
) {
5515 return ofproto
->no_packet_in_rule
;
5517 return ofproto
->miss_rule
;
5521 complete_operation(struct rule_dpif
*rule
)
5523 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5525 rule_invalidate(rule
);
5527 struct dpif_completion
*c
= xmalloc(sizeof *c
);
5528 c
->op
= rule
->up
.pending
;
5529 list_push_back(&ofproto
->completions
, &c
->list_node
);
5531 ofoperation_complete(rule
->up
.pending
, 0);
5535 static struct rule
*
5538 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
5543 rule_dealloc(struct rule
*rule_
)
5545 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5550 rule_construct(struct rule
*rule_
)
5552 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5553 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5554 struct rule_dpif
*victim
;
5557 rule
->packet_count
= 0;
5558 rule
->byte_count
= 0;
5560 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
5561 if (victim
&& !list_is_empty(&victim
->facets
)) {
5562 struct facet
*facet
;
5564 rule
->facets
= victim
->facets
;
5565 list_moved(&rule
->facets
);
5566 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
5567 /* XXX: We're only clearing our local counters here. It's possible
5568 * that quite a few packets are unaccounted for in the datapath
5569 * statistics. These will be accounted to the new rule instead of
5570 * cleared as required. This could be fixed by clearing out the
5571 * datapath statistics for this facet, but currently it doesn't
5573 facet_reset_counters(facet
);
5577 /* Must avoid list_moved() in this case. */
5578 list_init(&rule
->facets
);
5581 table_id
= rule
->up
.table_id
;
5583 rule
->tag
= victim
->tag
;
5584 } else if (table_id
== 0) {
5589 miniflow_expand(&rule
->up
.cr
.match
.flow
, &flow
);
5590 rule
->tag
= rule_calculate_tag(&flow
, &rule
->up
.cr
.match
.mask
,
5591 ofproto
->tables
[table_id
].basis
);
5594 complete_operation(rule
);
5599 rule_destruct(struct rule
*rule_
)
5601 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5602 struct facet
*facet
, *next_facet
;
5604 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
5605 facet_revalidate(facet
);
5608 complete_operation(rule
);
5612 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
5614 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5616 /* push_all_stats() can handle flow misses which, when using the learn
5617 * action, can cause rules to be added and deleted. This can corrupt our
5618 * caller's datastructures which assume that rule_get_stats() doesn't have
5619 * an impact on the flow table. To be safe, we disable miss handling. */
5620 push_all_stats__(false);
5622 /* Start from historical data for 'rule' itself that are no longer tracked
5623 * in facets. This counts, for example, facets that have expired. */
5624 *packets
= rule
->packet_count
;
5625 *bytes
= rule
->byte_count
;
5629 rule_dpif_execute(struct rule_dpif
*rule
, const struct flow
*flow
,
5630 struct ofpbuf
*packet
)
5632 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5633 struct dpif_flow_stats stats
;
5634 struct xlate_out xout
;
5635 struct xlate_in xin
;
5637 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
5638 rule_credit_stats(rule
, &stats
);
5640 xlate_in_init(&xin
, ofproto
, flow
, rule
, stats
.tcp_flags
, packet
);
5641 xin
.resubmit_stats
= &stats
;
5642 xlate_actions(&xin
, &xout
);
5644 execute_odp_actions(ofproto
, flow
, xout
.odp_actions
.data
,
5645 xout
.odp_actions
.size
, packet
);
5647 xlate_out_uninit(&xout
);
5651 rule_execute(struct rule
*rule
, const struct flow
*flow
,
5652 struct ofpbuf
*packet
)
5654 rule_dpif_execute(rule_dpif_cast(rule
), flow
, packet
);
5655 ofpbuf_delete(packet
);
5660 rule_modify_actions(struct rule
*rule_
)
5662 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
5664 complete_operation(rule
);
5667 /* Sends 'packet' out 'ofport'.
5668 * May modify 'packet'.
5669 * Returns 0 if successful, otherwise a positive errno value. */
5671 send_packet(const struct ofport_dpif
*ofport
, struct ofpbuf
*packet
)
5673 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
5674 uint64_t odp_actions_stub
[1024 / 8];
5675 struct ofpbuf key
, odp_actions
;
5676 struct dpif_flow_stats stats
;
5677 struct odputil_keybuf keybuf
;
5678 struct ofpact_output output
;
5679 struct xlate_out xout
;
5680 struct xlate_in xin
;
5684 ofpbuf_use_stub(&odp_actions
, odp_actions_stub
, sizeof odp_actions_stub
);
5685 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5687 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
5688 flow_extract(packet
, 0, 0, NULL
, OFPP_NONE
, &flow
);
5689 odp_flow_key_from_flow(&key
, &flow
, ofp_port_to_odp_port(ofproto
,
5691 dpif_flow_stats_extract(&flow
, packet
, time_msec(), &stats
);
5693 ofpact_init(&output
.ofpact
, OFPACT_OUTPUT
, sizeof output
);
5694 output
.port
= ofport
->up
.ofp_port
;
5697 xlate_in_init(&xin
, ofproto
, &flow
, NULL
, 0, packet
);
5698 xin
.ofpacts_len
= sizeof output
;
5699 xin
.ofpacts
= &output
.ofpact
;
5700 xin
.resubmit_stats
= &stats
;
5701 xlate_actions(&xin
, &xout
);
5703 error
= dpif_execute(ofproto
->backer
->dpif
,
5705 xout
.odp_actions
.data
, xout
.odp_actions
.size
,
5707 xlate_out_uninit(&xout
);
5710 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %s (%s)",
5711 ofproto
->up
.name
, netdev_get_name(ofport
->up
.netdev
),
5715 ofproto
->stats
.tx_packets
++;
5716 ofproto
->stats
.tx_bytes
+= packet
->size
;
5720 /* OpenFlow to datapath action translation. */
5722 static bool may_receive(const struct ofport_dpif
*, struct xlate_ctx
*);
5723 static void do_xlate_actions(const struct ofpact
*, size_t ofpacts_len
,
5724 struct xlate_ctx
*);
5725 static void xlate_normal(struct xlate_ctx
*);
5727 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5728 * The action will state 'slow' as the reason that the action is in the slow
5729 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5730 * dump-flows" output to see why a flow is in the slow path.)
5732 * The 'stub_size' bytes in 'stub' will be used to store the action.
5733 * 'stub_size' must be large enough for the action.
5735 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5738 compose_slow_path(const struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
5739 enum slow_path_reason slow
,
5740 uint64_t *stub
, size_t stub_size
,
5741 const struct nlattr
**actionsp
, size_t *actions_lenp
)
5743 union user_action_cookie cookie
;
5746 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
5747 cookie
.slow_path
.unused
= 0;
5748 cookie
.slow_path
.reason
= slow
;
5750 ofpbuf_use_stack(&buf
, stub
, stub_size
);
5751 if (slow
& (SLOW_CFM
| SLOW_BFD
| SLOW_LACP
| SLOW_STP
)) {
5752 uint32_t pid
= dpif_port_get_pid(ofproto
->backer
->dpif
, UINT32_MAX
);
5753 odp_put_userspace_action(pid
, &cookie
, sizeof cookie
.slow_path
, &buf
);
5755 put_userspace_action(ofproto
, &buf
, flow
, &cookie
,
5756 sizeof cookie
.slow_path
);
5758 *actionsp
= buf
.data
;
5759 *actions_lenp
= buf
.size
;
5763 put_userspace_action(const struct ofproto_dpif
*ofproto
,
5764 struct ofpbuf
*odp_actions
,
5765 const struct flow
*flow
,
5766 const union user_action_cookie
*cookie
,
5767 const size_t cookie_size
)
5771 pid
= dpif_port_get_pid(ofproto
->backer
->dpif
,
5772 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
5774 return odp_put_userspace_action(pid
, cookie
, cookie_size
, odp_actions
);
5777 /* Compose SAMPLE action for sFlow or IPFIX. The given probability is
5778 * the number of packets out of UINT32_MAX to sample. The given
5779 * cookie is passed back in the callback for each sampled packet.
5782 compose_sample_action(const struct ofproto_dpif
*ofproto
,
5783 struct ofpbuf
*odp_actions
,
5784 const struct flow
*flow
,
5785 const uint32_t probability
,
5786 const union user_action_cookie
*cookie
,
5787 const size_t cookie_size
)
5789 size_t sample_offset
, actions_offset
;
5792 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
5794 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
5796 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
5797 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, cookie
,
5800 nl_msg_end_nested(odp_actions
, actions_offset
);
5801 nl_msg_end_nested(odp_actions
, sample_offset
);
5802 return cookie_offset
;
5806 compose_sflow_cookie(const struct ofproto_dpif
*ofproto
,
5807 ovs_be16 vlan_tci
, uint32_t odp_port
,
5808 unsigned int n_outputs
, union user_action_cookie
*cookie
)
5812 cookie
->type
= USER_ACTION_COOKIE_SFLOW
;
5813 cookie
->sflow
.vlan_tci
= vlan_tci
;
5815 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5816 * port information") for the interpretation of cookie->output. */
5817 switch (n_outputs
) {
5819 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
5820 cookie
->sflow
.output
= 0x40000000 | 256;
5824 ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
5826 cookie
->sflow
.output
= ifindex
;
5831 /* 0x80000000 means "multiple output ports. */
5832 cookie
->sflow
.output
= 0x80000000 | n_outputs
;
5837 /* Compose SAMPLE action for sFlow bridge sampling. */
5839 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
5840 struct ofpbuf
*odp_actions
,
5841 const struct flow
*flow
,
5844 uint32_t probability
;
5845 union user_action_cookie cookie
;
5847 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
5851 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
5852 compose_sflow_cookie(ofproto
, htons(0), odp_port
,
5853 odp_port
== OVSP_NONE
? 0 : 1, &cookie
);
5855 return compose_sample_action(ofproto
, odp_actions
, flow
, probability
,
5856 &cookie
, sizeof cookie
.sflow
);
5860 compose_flow_sample_cookie(uint16_t probability
, uint32_t collector_set_id
,
5861 uint32_t obs_domain_id
, uint32_t obs_point_id
,
5862 union user_action_cookie
*cookie
)
5864 cookie
->type
= USER_ACTION_COOKIE_FLOW_SAMPLE
;
5865 cookie
->flow_sample
.probability
= probability
;
5866 cookie
->flow_sample
.collector_set_id
= collector_set_id
;
5867 cookie
->flow_sample
.obs_domain_id
= obs_domain_id
;
5868 cookie
->flow_sample
.obs_point_id
= obs_point_id
;
5872 compose_ipfix_cookie(union user_action_cookie
*cookie
)
5874 cookie
->type
= USER_ACTION_COOKIE_IPFIX
;
5877 /* Compose SAMPLE action for IPFIX bridge sampling. */
5879 compose_ipfix_action(const struct ofproto_dpif
*ofproto
,
5880 struct ofpbuf
*odp_actions
,
5881 const struct flow
*flow
)
5883 uint32_t probability
;
5884 union user_action_cookie cookie
;
5886 if (!ofproto
->ipfix
|| flow
->in_port
== OFPP_NONE
) {
5890 probability
= dpif_ipfix_get_bridge_exporter_probability(ofproto
->ipfix
);
5891 compose_ipfix_cookie(&cookie
);
5893 compose_sample_action(ofproto
, odp_actions
, flow
, probability
,
5894 &cookie
, sizeof cookie
.ipfix
);
5897 /* SAMPLE action for sFlow must be first action in any given list of
5898 * actions. At this point we do not have all information required to
5899 * build it. So try to build sample action as complete as possible. */
5901 add_sflow_action(struct xlate_ctx
*ctx
)
5903 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
5904 &ctx
->xout
->odp_actions
,
5905 &ctx
->xin
->flow
, OVSP_NONE
);
5906 ctx
->sflow_odp_port
= 0;
5907 ctx
->sflow_n_outputs
= 0;
5910 /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
5911 * of actions, eventually after the SAMPLE action for sFlow. */
5913 add_ipfix_action(struct xlate_ctx
*ctx
)
5915 compose_ipfix_action(ctx
->ofproto
, &ctx
->xout
->odp_actions
,
5919 /* Fix SAMPLE action according to data collected while composing ODP actions.
5920 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5921 * USERSPACE action's user-cookie which is required for sflow. */
5923 fix_sflow_action(struct xlate_ctx
*ctx
)
5925 const struct flow
*base
= &ctx
->base_flow
;
5926 union user_action_cookie
*cookie
;
5928 if (!ctx
->user_cookie_offset
) {
5932 cookie
= ofpbuf_at(&ctx
->xout
->odp_actions
, ctx
->user_cookie_offset
,
5933 sizeof cookie
->sflow
);
5934 ovs_assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
5936 compose_sflow_cookie(ctx
->ofproto
, base
->vlan_tci
,
5937 ctx
->sflow_odp_port
, ctx
->sflow_n_outputs
, cookie
);
5941 compose_output_action__(struct xlate_ctx
*ctx
, uint16_t ofp_port
,
5944 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
5945 ovs_be16 flow_vlan_tci
;
5946 uint32_t flow_skb_mark
;
5947 uint8_t flow_nw_tos
;
5948 struct priority_to_dscp
*pdscp
;
5949 uint32_t out_port
, odp_port
;
5951 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5952 * before traversing a patch port. */
5953 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 20);
5956 xlate_report(ctx
, "Nonexistent output port");
5958 } else if (ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FWD
) {
5959 xlate_report(ctx
, "OFPPC_NO_FWD set, skipping output");
5961 } else if (check_stp
&& !stp_forward_in_state(ofport
->stp_state
)) {
5962 xlate_report(ctx
, "STP not in forwarding state, skipping output");
5966 if (netdev_vport_is_patch(ofport
->up
.netdev
)) {
5967 struct ofport_dpif
*peer
= ofport_get_peer(ofport
);
5968 struct flow old_flow
= ctx
->xin
->flow
;
5969 const struct ofproto_dpif
*peer_ofproto
;
5970 enum slow_path_reason special
;
5971 struct ofport_dpif
*in_port
;
5974 xlate_report(ctx
, "Nonexistent patch port peer");
5978 peer_ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5979 if (peer_ofproto
->backer
!= ctx
->ofproto
->backer
) {
5980 xlate_report(ctx
, "Patch port peer on a different datapath");
5984 ctx
->ofproto
= ofproto_dpif_cast(peer
->up
.ofproto
);
5985 ctx
->xin
->flow
.in_port
= peer
->up
.ofp_port
;
5986 ctx
->xin
->flow
.metadata
= htonll(0);
5987 memset(&ctx
->xin
->flow
.tunnel
, 0, sizeof ctx
->xin
->flow
.tunnel
);
5988 memset(ctx
->xin
->flow
.regs
, 0, sizeof ctx
->xin
->flow
.regs
);
5990 in_port
= get_ofp_port(ctx
->ofproto
, ctx
->xin
->flow
.in_port
);
5991 special
= process_special(ctx
->ofproto
, &ctx
->xin
->flow
, in_port
,
5994 ctx
->xout
->slow
= special
;
5995 } else if (!in_port
|| may_receive(in_port
, ctx
)) {
5996 if (!in_port
|| stp_forward_in_state(in_port
->stp_state
)) {
5997 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, true);
5999 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
6000 * learning action look at the packet, then drop it. */
6001 struct flow old_base_flow
= ctx
->base_flow
;
6002 size_t old_size
= ctx
->xout
->odp_actions
.size
;
6003 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, true);
6004 ctx
->base_flow
= old_base_flow
;
6005 ctx
->xout
->odp_actions
.size
= old_size
;
6009 ctx
->xin
->flow
= old_flow
;
6010 ctx
->ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
6012 if (ctx
->xin
->resubmit_stats
) {
6013 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->xin
->resubmit_stats
);
6014 netdev_vport_inc_rx(peer
->up
.netdev
, ctx
->xin
->resubmit_stats
);
6020 flow_vlan_tci
= ctx
->xin
->flow
.vlan_tci
;
6021 flow_skb_mark
= ctx
->xin
->flow
.skb_mark
;
6022 flow_nw_tos
= ctx
->xin
->flow
.nw_tos
;
6024 pdscp
= get_priority(ofport
, ctx
->xin
->flow
.skb_priority
);
6026 ctx
->xin
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
6027 ctx
->xin
->flow
.nw_tos
|= pdscp
->dscp
;
6030 if (ofport
->tnl_port
) {
6031 /* Save tunnel metadata so that changes made due to
6032 * the Logical (tunnel) Port are not visible for any further
6033 * matches, while explicit set actions on tunnel metadata are.
6035 struct flow_tnl flow_tnl
= ctx
->xin
->flow
.tunnel
;
6036 odp_port
= tnl_port_send(ofport
->tnl_port
, &ctx
->xin
->flow
);
6037 if (odp_port
== OVSP_NONE
) {
6038 xlate_report(ctx
, "Tunneling decided against output");
6039 goto out
; /* restore flow_nw_tos */
6041 if (ctx
->xin
->flow
.tunnel
.ip_dst
== ctx
->orig_tunnel_ip_dst
) {
6042 xlate_report(ctx
, "Not tunneling to our own address");
6043 goto out
; /* restore flow_nw_tos */
6045 if (ctx
->xin
->resubmit_stats
) {
6046 netdev_vport_inc_tx(ofport
->up
.netdev
, ctx
->xin
->resubmit_stats
);
6048 out_port
= odp_port
;
6049 commit_odp_tunnel_action(&ctx
->xin
->flow
, &ctx
->base_flow
,
6050 &ctx
->xout
->odp_actions
);
6051 ctx
->xin
->flow
.tunnel
= flow_tnl
; /* Restore tunnel metadata */
6053 uint16_t vlandev_port
;
6054 odp_port
= ofport
->odp_port
;
6055 vlandev_port
= vsp_realdev_to_vlandev(ctx
->ofproto
, ofp_port
,
6056 ctx
->xin
->flow
.vlan_tci
);
6057 if (vlandev_port
== ofp_port
) {
6058 out_port
= odp_port
;
6060 out_port
= ofp_port_to_odp_port(ctx
->ofproto
, vlandev_port
);
6061 ctx
->xin
->flow
.vlan_tci
= htons(0);
6063 ctx
->xin
->flow
.skb_mark
&= ~IPSEC_MARK
;
6065 commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
6066 &ctx
->xout
->odp_actions
);
6067 nl_msg_put_u32(&ctx
->xout
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, out_port
);
6069 ctx
->sflow_odp_port
= odp_port
;
6070 ctx
->sflow_n_outputs
++;
6071 ctx
->xout
->nf_output_iface
= ofp_port
;
6074 ctx
->xin
->flow
.vlan_tci
= flow_vlan_tci
;
6075 ctx
->xin
->flow
.skb_mark
= flow_skb_mark
;
6077 ctx
->xin
->flow
.nw_tos
= flow_nw_tos
;
6081 compose_output_action(struct xlate_ctx
*ctx
, uint16_t ofp_port
)
6083 compose_output_action__(ctx
, ofp_port
, true);
6087 tag_the_flow(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
)
6089 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
6090 uint8_t table_id
= ctx
->table_id
;
6092 if (table_id
> 0 && table_id
< N_TABLES
) {
6093 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
6094 if (table
->other_table
) {
6095 ctx
->xout
->tags
|= (rule
&& rule
->tag
6097 : rule_calculate_tag(&ctx
->xin
->flow
,
6098 &table
->other_table
->mask
,
6104 /* Common rule processing in one place to avoid duplicating code. */
6105 static struct rule_dpif
*
6106 ctx_rule_hooks(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
,
6109 if (ctx
->xin
->resubmit_hook
) {
6110 ctx
->xin
->resubmit_hook(ctx
, rule
);
6112 if (rule
== NULL
&& may_packet_in
) {
6114 * check if table configuration flags
6115 * OFPTC_TABLE_MISS_CONTROLLER, default.
6116 * OFPTC_TABLE_MISS_CONTINUE,
6117 * OFPTC_TABLE_MISS_DROP
6118 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
6120 rule
= rule_dpif_miss_rule(ctx
->ofproto
, &ctx
->xin
->flow
);
6122 if (rule
&& ctx
->xin
->resubmit_stats
) {
6123 rule_credit_stats(rule
, ctx
->xin
->resubmit_stats
);
6129 xlate_table_action(struct xlate_ctx
*ctx
,
6130 uint16_t in_port
, uint8_t table_id
, bool may_packet_in
)
6132 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
6133 struct rule_dpif
*rule
;
6134 uint16_t old_in_port
= ctx
->xin
->flow
.in_port
;
6135 uint8_t old_table_id
= ctx
->table_id
;
6137 ctx
->table_id
= table_id
;
6139 /* Look up a flow with 'in_port' as the input port. */
6140 ctx
->xin
->flow
.in_port
= in_port
;
6141 rule
= rule_dpif_lookup__(ctx
->ofproto
, &ctx
->xin
->flow
,
6142 &ctx
->xout
->wc
, table_id
);
6144 tag_the_flow(ctx
, rule
);
6146 /* Restore the original input port. Otherwise OFPP_NORMAL and
6147 * OFPP_IN_PORT will have surprising behavior. */
6148 ctx
->xin
->flow
.in_port
= old_in_port
;
6150 rule
= ctx_rule_hooks(ctx
, rule
, may_packet_in
);
6153 struct rule_dpif
*old_rule
= ctx
->rule
;
6157 do_xlate_actions(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, ctx
);
6158 ctx
->rule
= old_rule
;
6162 ctx
->table_id
= old_table_id
;
6164 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
6166 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
6167 MAX_RESUBMIT_RECURSION
);
6168 ctx
->max_resubmit_trigger
= true;
6173 xlate_ofpact_resubmit(struct xlate_ctx
*ctx
,
6174 const struct ofpact_resubmit
*resubmit
)
6179 in_port
= resubmit
->in_port
;
6180 if (in_port
== OFPP_IN_PORT
) {
6181 in_port
= ctx
->xin
->flow
.in_port
;
6184 table_id
= resubmit
->table_id
;
6185 if (table_id
== 255) {
6186 table_id
= ctx
->table_id
;
6189 xlate_table_action(ctx
, in_port
, table_id
, false);
6193 flood_packets(struct xlate_ctx
*ctx
, bool all
)
6195 struct ofport_dpif
*ofport
;
6197 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
6198 uint16_t ofp_port
= ofport
->up
.ofp_port
;
6200 if (ofp_port
== ctx
->xin
->flow
.in_port
) {
6205 compose_output_action__(ctx
, ofp_port
, false);
6206 } else if (!(ofport
->up
.pp
.config
& OFPUTIL_PC_NO_FLOOD
)) {
6207 compose_output_action(ctx
, ofp_port
);
6211 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
6215 execute_controller_action(struct xlate_ctx
*ctx
, int len
,
6216 enum ofp_packet_in_reason reason
,
6217 uint16_t controller_id
)
6219 struct ofputil_packet_in pin
;
6220 struct ofpbuf
*packet
;
6223 ovs_assert(!ctx
->xout
->slow
|| ctx
->xout
->slow
== SLOW_CONTROLLER
);
6224 ctx
->xout
->slow
= SLOW_CONTROLLER
;
6225 if (!ctx
->xin
->packet
) {
6229 packet
= ofpbuf_clone(ctx
->xin
->packet
);
6231 key
.skb_priority
= 0;
6233 memset(&key
.tunnel
, 0, sizeof key
.tunnel
);
6235 commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
6236 &ctx
->xout
->odp_actions
);
6238 odp_execute_actions(NULL
, packet
, &key
, ctx
->xout
->odp_actions
.data
,
6239 ctx
->xout
->odp_actions
.size
, NULL
, NULL
);
6241 pin
.packet
= packet
->data
;
6242 pin
.packet_len
= packet
->size
;
6243 pin
.reason
= reason
;
6244 pin
.controller_id
= controller_id
;
6245 pin
.table_id
= ctx
->table_id
;
6246 pin
.cookie
= ctx
->rule
? ctx
->rule
->up
.flow_cookie
: 0;
6249 flow_get_metadata(&ctx
->xin
->flow
, &pin
.fmd
);
6251 connmgr_send_packet_in(ctx
->ofproto
->up
.connmgr
, &pin
);
6252 ofpbuf_delete(packet
);
6256 execute_mpls_push_action(struct xlate_ctx
*ctx
, ovs_be16 eth_type
)
6258 ovs_assert(eth_type_mpls(eth_type
));
6260 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6261 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6262 memset(&ctx
->xout
->wc
.masks
.mpls_lse
, 0xff,
6263 sizeof ctx
->xout
->wc
.masks
.mpls_lse
);
6264 memset(&ctx
->xout
->wc
.masks
.mpls_depth
, 0xff,
6265 sizeof ctx
->xout
->wc
.masks
.mpls_depth
);
6267 if (ctx
->base_flow
.mpls_depth
) {
6268 ctx
->xin
->flow
.mpls_lse
&= ~htonl(MPLS_BOS_MASK
);
6269 ctx
->xin
->flow
.mpls_depth
++;
6274 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IPV6
)) {
6275 label
= htonl(0x2); /* IPV6 Explicit Null. */
6277 label
= htonl(0x0); /* IPV4 Explicit Null. */
6279 tc
= (ctx
->xin
->flow
.nw_tos
& IP_DSCP_MASK
) >> 2;
6280 ttl
= ctx
->xin
->flow
.nw_ttl
? ctx
->xin
->flow
.nw_ttl
: 0x40;
6281 ctx
->xin
->flow
.mpls_lse
= set_mpls_lse_values(ttl
, tc
, 1, label
);
6282 ctx
->xin
->flow
.mpls_depth
= 1;
6284 ctx
->xin
->flow
.dl_type
= eth_type
;
6288 execute_mpls_pop_action(struct xlate_ctx
*ctx
, ovs_be16 eth_type
)
6290 ovs_assert(eth_type_mpls(ctx
->xin
->flow
.dl_type
));
6291 ovs_assert(!eth_type_mpls(eth_type
));
6293 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6294 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6295 memset(&ctx
->xout
->wc
.masks
.mpls_lse
, 0xff,
6296 sizeof ctx
->xout
->wc
.masks
.mpls_lse
);
6297 memset(&ctx
->xout
->wc
.masks
.mpls_depth
, 0xff,
6298 sizeof ctx
->xout
->wc
.masks
.mpls_depth
);
6300 if (ctx
->xin
->flow
.mpls_depth
) {
6301 ctx
->xin
->flow
.mpls_depth
--;
6302 ctx
->xin
->flow
.mpls_lse
= htonl(0);
6303 if (!ctx
->xin
->flow
.mpls_depth
) {
6304 ctx
->xin
->flow
.dl_type
= eth_type
;
6310 compose_dec_ttl(struct xlate_ctx
*ctx
, struct ofpact_cnt_ids
*ids
)
6312 if (ctx
->xin
->flow
.dl_type
!= htons(ETH_TYPE_IP
) &&
6313 ctx
->xin
->flow
.dl_type
!= htons(ETH_TYPE_IPV6
)) {
6317 if (ctx
->xin
->flow
.nw_ttl
> 1) {
6318 ctx
->xin
->flow
.nw_ttl
--;
6323 for (i
= 0; i
< ids
->n_controllers
; i
++) {
6324 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
,
6328 /* Stop processing for current table. */
6334 execute_set_mpls_ttl_action(struct xlate_ctx
*ctx
, uint8_t ttl
)
6336 if (!eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
6340 set_mpls_lse_ttl(&ctx
->xin
->flow
.mpls_lse
, ttl
);
6345 execute_dec_mpls_ttl_action(struct xlate_ctx
*ctx
)
6347 uint8_t ttl
= mpls_lse_to_ttl(ctx
->xin
->flow
.mpls_lse
);
6349 if (!eth_type_mpls(ctx
->xin
->flow
.dl_type
)) {
6355 set_mpls_lse_ttl(&ctx
->xin
->flow
.mpls_lse
, ttl
);
6358 execute_controller_action(ctx
, UINT16_MAX
, OFPR_INVALID_TTL
, 0);
6360 /* Stop processing for current table. */
6366 xlate_output_action(struct xlate_ctx
*ctx
,
6367 uint16_t port
, uint16_t max_len
, bool may_packet_in
)
6369 uint16_t prev_nf_output_iface
= ctx
->xout
->nf_output_iface
;
6371 ctx
->xout
->nf_output_iface
= NF_OUT_DROP
;
6375 compose_output_action(ctx
, ctx
->xin
->flow
.in_port
);
6378 xlate_table_action(ctx
, ctx
->xin
->flow
.in_port
, 0, may_packet_in
);
6384 flood_packets(ctx
, false);
6387 flood_packets(ctx
, true);
6389 case OFPP_CONTROLLER
:
6390 execute_controller_action(ctx
, max_len
, OFPR_ACTION
, 0);
6396 if (port
!= ctx
->xin
->flow
.in_port
) {
6397 compose_output_action(ctx
, port
);
6399 xlate_report(ctx
, "skipping output to input port");
6404 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
6405 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
6406 } else if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
6407 ctx
->xout
->nf_output_iface
= prev_nf_output_iface
;
6408 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
6409 ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
6410 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
6415 xlate_output_reg_action(struct xlate_ctx
*ctx
,
6416 const struct ofpact_output_reg
*or)
6418 uint64_t port
= mf_get_subfield(&or->src
, &ctx
->xin
->flow
);
6419 if (port
<= UINT16_MAX
) {
6420 union mf_subvalue value
;
6422 memset(&value
, 0xff, sizeof value
);
6423 mf_write_subfield_flow(&or->src
, &value
, &ctx
->xout
->wc
.masks
);
6424 xlate_output_action(ctx
, port
, or->max_len
, false);
6429 xlate_enqueue_action(struct xlate_ctx
*ctx
,
6430 const struct ofpact_enqueue
*enqueue
)
6432 uint16_t ofp_port
= enqueue
->port
;
6433 uint32_t queue_id
= enqueue
->queue
;
6434 uint32_t flow_priority
, priority
;
6437 /* Translate queue to priority. */
6438 error
= dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6439 queue_id
, &priority
);
6441 /* Fall back to ordinary output action. */
6442 xlate_output_action(ctx
, enqueue
->port
, 0, false);
6446 /* Check output port. */
6447 if (ofp_port
== OFPP_IN_PORT
) {
6448 ofp_port
= ctx
->xin
->flow
.in_port
;
6449 } else if (ofp_port
== ctx
->xin
->flow
.in_port
) {
6453 /* Add datapath actions. */
6454 flow_priority
= ctx
->xin
->flow
.skb_priority
;
6455 ctx
->xin
->flow
.skb_priority
= priority
;
6456 compose_output_action(ctx
, ofp_port
);
6457 ctx
->xin
->flow
.skb_priority
= flow_priority
;
6459 /* Update NetFlow output port. */
6460 if (ctx
->xout
->nf_output_iface
== NF_OUT_DROP
) {
6461 ctx
->xout
->nf_output_iface
= ofp_port
;
6462 } else if (ctx
->xout
->nf_output_iface
!= NF_OUT_FLOOD
) {
6463 ctx
->xout
->nf_output_iface
= NF_OUT_MULTI
;
6468 xlate_set_queue_action(struct xlate_ctx
*ctx
, uint32_t queue_id
)
6470 uint32_t skb_priority
;
6472 if (!dpif_queue_to_priority(ctx
->ofproto
->backer
->dpif
,
6473 queue_id
, &skb_priority
)) {
6474 ctx
->xin
->flow
.skb_priority
= skb_priority
;
6476 /* Couldn't translate queue to a priority. Nothing to do. A warning
6477 * has already been logged. */
6482 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
6484 struct ofproto_dpif
*ofproto
= ofproto_
;
6485 struct ofport_dpif
*port
;
6495 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
6498 port
= get_ofp_port(ofproto
, ofp_port
);
6499 return port
? port
->may_enable
: false;
6504 xlate_bundle_action(struct xlate_ctx
*ctx
,
6505 const struct ofpact_bundle
*bundle
)
6509 port
= bundle_execute(bundle
, &ctx
->xin
->flow
, &ctx
->xout
->wc
,
6510 slave_enabled_cb
, ctx
->ofproto
);
6511 if (bundle
->dst
.field
) {
6512 nxm_reg_load(&bundle
->dst
, port
, &ctx
->xin
->flow
);
6514 xlate_output_action(ctx
, port
, 0, false);
6519 xlate_learn_action(struct xlate_ctx
*ctx
,
6520 const struct ofpact_learn
*learn
)
6522 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
6523 struct ofputil_flow_mod fm
;
6524 uint64_t ofpacts_stub
[1024 / 8];
6525 struct ofpbuf ofpacts
;
6528 ctx
->xout
->has_learn
= true;
6530 learn_mask(learn
, &ctx
->xout
->wc
);
6532 if (!ctx
->xin
->may_learn
) {
6536 ofpbuf_use_stack(&ofpacts
, ofpacts_stub
, sizeof ofpacts_stub
);
6537 learn_execute(learn
, &ctx
->xin
->flow
, &fm
, &ofpacts
);
6539 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
6540 if (error
&& !VLOG_DROP_WARN(&rl
)) {
6541 VLOG_WARN("learning action failed to modify flow table (%s)",
6542 ofperr_get_name(error
));
6545 ofpbuf_uninit(&ofpacts
);
6548 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6549 * means "infinite". */
6551 reduce_timeout(uint16_t max
, uint16_t *timeout
)
6553 if (max
&& (!*timeout
|| *timeout
> max
)) {
6559 xlate_fin_timeout(struct xlate_ctx
*ctx
,
6560 const struct ofpact_fin_timeout
*oft
)
6562 if (ctx
->xin
->tcp_flags
& (TCP_FIN
| TCP_RST
) && ctx
->rule
) {
6563 struct rule_dpif
*rule
= ctx
->rule
;
6565 reduce_timeout(oft
->fin_idle_timeout
, &rule
->up
.idle_timeout
);
6566 reduce_timeout(oft
->fin_hard_timeout
, &rule
->up
.hard_timeout
);
6571 xlate_sample_action(struct xlate_ctx
*ctx
,
6572 const struct ofpact_sample
*os
)
6574 union user_action_cookie cookie
;
6575 /* Scale the probability from 16-bit to 32-bit while representing
6576 * the same percentage. */
6577 uint32_t probability
= (os
->probability
<< 16) | os
->probability
;
6579 commit_odp_actions(&ctx
->xin
->flow
, &ctx
->base_flow
,
6580 &ctx
->xout
->odp_actions
);
6582 compose_flow_sample_cookie(os
->probability
, os
->collector_set_id
,
6583 os
->obs_domain_id
, os
->obs_point_id
, &cookie
);
6584 compose_sample_action(ctx
->ofproto
, &ctx
->xout
->odp_actions
, &ctx
->xin
->flow
,
6585 probability
, &cookie
, sizeof cookie
.flow_sample
);
6589 may_receive(const struct ofport_dpif
*port
, struct xlate_ctx
*ctx
)
6591 if (port
->up
.pp
.config
& (eth_addr_equals(ctx
->xin
->flow
.dl_dst
,
6593 ? OFPUTIL_PC_NO_RECV_STP
6594 : OFPUTIL_PC_NO_RECV
)) {
6598 /* Only drop packets here if both forwarding and learning are
6599 * disabled. If just learning is enabled, we need to have
6600 * OFPP_NORMAL and the learning action have a look at the packet
6601 * before we can drop it. */
6602 if (!stp_forward_in_state(port
->stp_state
)
6603 && !stp_learn_in_state(port
->stp_state
)) {
6611 tunnel_ecn_ok(struct xlate_ctx
*ctx
)
6613 if (is_ip_any(&ctx
->base_flow
)
6614 && (ctx
->xin
->flow
.tunnel
.ip_tos
& IP_ECN_MASK
) == IP_ECN_CE
) {
6615 if ((ctx
->base_flow
.nw_tos
& IP_ECN_MASK
) == IP_ECN_NOT_ECT
) {
6616 VLOG_WARN_RL(&rl
, "dropping tunnel packet marked ECN CE"
6617 " but is not ECN capable");
6620 /* Set the ECN CE value in the tunneled packet. */
6621 ctx
->xin
->flow
.nw_tos
|= IP_ECN_CE
;
6629 do_xlate_actions(const struct ofpact
*ofpacts
, size_t ofpacts_len
,
6630 struct xlate_ctx
*ctx
)
6632 bool was_evictable
= true;
6633 const struct ofpact
*a
;
6636 /* Don't let the rule we're working on get evicted underneath us. */
6637 was_evictable
= ctx
->rule
->up
.evictable
;
6638 ctx
->rule
->up
.evictable
= false;
6641 do_xlate_actions_again
:
6642 OFPACT_FOR_EACH (a
, ofpacts
, ofpacts_len
) {
6643 struct ofpact_controller
*controller
;
6644 const struct ofpact_metadata
*metadata
;
6652 xlate_output_action(ctx
, ofpact_get_OUTPUT(a
)->port
,
6653 ofpact_get_OUTPUT(a
)->max_len
, true);
6656 case OFPACT_CONTROLLER
:
6657 controller
= ofpact_get_CONTROLLER(a
);
6658 execute_controller_action(ctx
, controller
->max_len
,
6660 controller
->controller_id
);
6663 case OFPACT_ENQUEUE
:
6664 xlate_enqueue_action(ctx
, ofpact_get_ENQUEUE(a
));
6667 case OFPACT_SET_VLAN_VID
:
6668 ctx
->xin
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
6669 ctx
->xin
->flow
.vlan_tci
|=
6670 (htons(ofpact_get_SET_VLAN_VID(a
)->vlan_vid
)
6674 case OFPACT_SET_VLAN_PCP
:
6675 ctx
->xin
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
6676 ctx
->xin
->flow
.vlan_tci
|=
6677 htons((ofpact_get_SET_VLAN_PCP(a
)->vlan_pcp
<< VLAN_PCP_SHIFT
)
6681 case OFPACT_STRIP_VLAN
:
6682 ctx
->xin
->flow
.vlan_tci
= htons(0);
6685 case OFPACT_PUSH_VLAN
:
6686 /* XXX 802.1AD(QinQ) */
6687 ctx
->xin
->flow
.vlan_tci
= htons(VLAN_CFI
);
6690 case OFPACT_SET_ETH_SRC
:
6691 memcpy(ctx
->xin
->flow
.dl_src
, ofpact_get_SET_ETH_SRC(a
)->mac
,
6695 case OFPACT_SET_ETH_DST
:
6696 memcpy(ctx
->xin
->flow
.dl_dst
, ofpact_get_SET_ETH_DST(a
)->mac
,
6700 case OFPACT_SET_IPV4_SRC
:
6701 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6702 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6703 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6704 ctx
->xin
->flow
.nw_src
= ofpact_get_SET_IPV4_SRC(a
)->ipv4
;
6708 case OFPACT_SET_IPV4_DST
:
6709 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6710 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6711 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6712 ctx
->xin
->flow
.nw_dst
= ofpact_get_SET_IPV4_DST(a
)->ipv4
;
6716 case OFPACT_SET_IPV4_DSCP
:
6717 /* OpenFlow 1.0 only supports IPv4. */
6718 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6719 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6720 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
6721 ctx
->xin
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
6722 ctx
->xin
->flow
.nw_tos
|= ofpact_get_SET_IPV4_DSCP(a
)->dscp
;
6726 case OFPACT_SET_L4_SRC_PORT
:
6727 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6728 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6729 memset(&ctx
->xout
->wc
.masks
.nw_proto
, 0xff,
6730 sizeof ctx
->xout
->wc
.masks
.nw_proto
);
6731 if (is_ip_any(&ctx
->xin
->flow
)) {
6732 ctx
->xin
->flow
.tp_src
=
6733 htons(ofpact_get_SET_L4_SRC_PORT(a
)->port
);
6737 case OFPACT_SET_L4_DST_PORT
:
6738 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6739 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6740 memset(&ctx
->xout
->wc
.masks
.nw_proto
, 0xff,
6741 sizeof ctx
->xout
->wc
.masks
.nw_proto
);
6742 if (is_ip_any(&ctx
->xin
->flow
)) {
6743 ctx
->xin
->flow
.tp_dst
=
6744 htons(ofpact_get_SET_L4_DST_PORT(a
)->port
);
6748 case OFPACT_RESUBMIT
:
6749 xlate_ofpact_resubmit(ctx
, ofpact_get_RESUBMIT(a
));
6752 case OFPACT_SET_TUNNEL
:
6753 ctx
->xin
->flow
.tunnel
.tun_id
=
6754 htonll(ofpact_get_SET_TUNNEL(a
)->tun_id
);
6757 case OFPACT_SET_QUEUE
:
6758 xlate_set_queue_action(ctx
, ofpact_get_SET_QUEUE(a
)->queue_id
);
6761 case OFPACT_POP_QUEUE
:
6762 memset(&ctx
->xout
->wc
.masks
.skb_priority
, 0xff,
6763 sizeof ctx
->xout
->wc
.masks
.skb_priority
);
6765 ctx
->xin
->flow
.skb_priority
= ctx
->orig_skb_priority
;
6768 case OFPACT_REG_MOVE
:
6769 nxm_execute_reg_move(ofpact_get_REG_MOVE(a
), &ctx
->xin
->flow
,
6773 case OFPACT_REG_LOAD
:
6774 nxm_execute_reg_load(ofpact_get_REG_LOAD(a
), &ctx
->xin
->flow
);
6777 case OFPACT_STACK_PUSH
:
6778 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a
), &ctx
->xin
->flow
,
6779 &ctx
->xout
->wc
, &ctx
->stack
);
6782 case OFPACT_STACK_POP
:
6783 nxm_execute_stack_pop(ofpact_get_STACK_POP(a
), &ctx
->xin
->flow
,
6787 case OFPACT_PUSH_MPLS
:
6788 execute_mpls_push_action(ctx
, ofpact_get_PUSH_MPLS(a
)->ethertype
);
6791 case OFPACT_POP_MPLS
:
6792 execute_mpls_pop_action(ctx
, ofpact_get_POP_MPLS(a
)->ethertype
);
6795 case OFPACT_SET_MPLS_TTL
:
6796 if (execute_set_mpls_ttl_action(ctx
,
6797 ofpact_get_SET_MPLS_TTL(a
)->ttl
)) {
6802 case OFPACT_DEC_MPLS_TTL
:
6803 if (execute_dec_mpls_ttl_action(ctx
)) {
6808 case OFPACT_DEC_TTL
:
6809 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6810 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6811 if (compose_dec_ttl(ctx
, ofpact_get_DEC_TTL(a
))) {
6817 /* Nothing to do. */
6820 case OFPACT_MULTIPATH
:
6821 multipath_execute(ofpact_get_MULTIPATH(a
), &ctx
->xin
->flow
,
6826 ctx
->ofproto
->has_bundle_action
= true;
6827 xlate_bundle_action(ctx
, ofpact_get_BUNDLE(a
));
6830 case OFPACT_OUTPUT_REG
:
6831 xlate_output_reg_action(ctx
, ofpact_get_OUTPUT_REG(a
));
6835 xlate_learn_action(ctx
, ofpact_get_LEARN(a
));
6842 case OFPACT_FIN_TIMEOUT
:
6843 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
6844 sizeof ctx
->xout
->wc
.masks
.dl_type
);
6845 memset(&ctx
->xout
->wc
.masks
.nw_proto
, 0xff,
6846 sizeof ctx
->xout
->wc
.masks
.nw_proto
);
6847 ctx
->xout
->has_fin_timeout
= true;
6848 xlate_fin_timeout(ctx
, ofpact_get_FIN_TIMEOUT(a
));
6851 case OFPACT_CLEAR_ACTIONS
:
6853 * Nothing to do because writa-actions is not supported for now.
6854 * When writa-actions is supported, clear-actions also must
6855 * be supported at the same time.
6859 case OFPACT_WRITE_METADATA
:
6860 metadata
= ofpact_get_WRITE_METADATA(a
);
6861 ctx
->xin
->flow
.metadata
&= ~metadata
->mask
;
6862 ctx
->xin
->flow
.metadata
|= metadata
->metadata
& metadata
->mask
;
6865 case OFPACT_GOTO_TABLE
: {
6866 /* It is assumed that goto-table is the last action. */
6867 struct ofpact_goto_table
*ogt
= ofpact_get_GOTO_TABLE(a
);
6868 struct rule_dpif
*rule
;
6870 ovs_assert(ctx
->table_id
< ogt
->table_id
);
6872 ctx
->table_id
= ogt
->table_id
;
6874 /* Look up a flow from the new table. */
6875 rule
= rule_dpif_lookup__(ctx
->ofproto
, &ctx
->xin
->flow
,
6876 &ctx
->xout
->wc
, ctx
->table_id
);
6878 tag_the_flow(ctx
, rule
);
6880 rule
= ctx_rule_hooks(ctx
, rule
, true);
6884 ctx
->rule
->up
.evictable
= was_evictable
;
6887 was_evictable
= rule
->up
.evictable
;
6888 rule
->up
.evictable
= false;
6890 /* Tail recursion removal. */
6891 ofpacts
= rule
->up
.ofpacts
;
6892 ofpacts_len
= rule
->up
.ofpacts_len
;
6893 goto do_xlate_actions_again
;
6899 xlate_sample_action(ctx
, ofpact_get_SAMPLE(a
));
6906 ctx
->rule
->up
.evictable
= was_evictable
;
6911 xlate_in_init(struct xlate_in
*xin
, struct ofproto_dpif
*ofproto
,
6912 const struct flow
*flow
, struct rule_dpif
*rule
,
6913 uint8_t tcp_flags
, const struct ofpbuf
*packet
)
6915 xin
->ofproto
= ofproto
;
6917 xin
->packet
= packet
;
6918 xin
->may_learn
= packet
!= NULL
;
6920 xin
->ofpacts
= NULL
;
6921 xin
->ofpacts_len
= 0;
6922 xin
->tcp_flags
= tcp_flags
;
6923 xin
->resubmit_hook
= NULL
;
6924 xin
->report_hook
= NULL
;
6925 xin
->resubmit_stats
= NULL
;
6929 xlate_out_uninit(struct xlate_out
*xout
)
6932 ofpbuf_uninit(&xout
->odp_actions
);
6936 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6937 * into datapath actions in 'odp_actions', using 'ctx'. */
6939 xlate_actions(struct xlate_in
*xin
, struct xlate_out
*xout
)
6941 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6942 * that in the future we always keep a copy of the original flow for
6943 * tracing purposes. */
6944 static bool hit_resubmit_limit
;
6946 enum slow_path_reason special
;
6947 const struct ofpact
*ofpacts
;
6948 struct ofport_dpif
*in_port
;
6949 struct flow orig_flow
;
6950 struct xlate_ctx ctx
;
6953 COVERAGE_INC(ofproto_dpif_xlate
);
6955 /* Flow initialization rules:
6956 * - 'base_flow' must match the kernel's view of the packet at the
6957 * time that action processing starts. 'flow' represents any
6958 * transformations we wish to make through actions.
6959 * - By default 'base_flow' and 'flow' are the same since the input
6960 * packet matches the output before any actions are applied.
6961 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6962 * of the received packet as seen by the kernel. If we later output
6963 * to another device without any modifications this will cause us to
6964 * insert a new tag since the original one was stripped off by the
6966 * - Tunnel metadata as received is retained in 'flow'. This allows
6967 * tunnel metadata matching also in later tables.
6968 * Since a kernel action for setting the tunnel metadata will only be
6969 * generated with actual tunnel output, changing the tunnel metadata
6970 * values in 'flow' (such as tun_id) will only have effect with a later
6971 * tunnel output action.
6972 * - Tunnel 'base_flow' is completely cleared since that is what the
6973 * kernel does. If we wish to maintain the original values an action
6974 * needs to be generated. */
6979 ctx
.ofproto
= xin
->ofproto
;
6980 ctx
.rule
= xin
->rule
;
6982 ctx
.base_flow
= ctx
.xin
->flow
;
6983 memset(&ctx
.base_flow
.tunnel
, 0, sizeof ctx
.base_flow
.tunnel
);
6984 ctx
.orig_tunnel_ip_dst
= ctx
.xin
->flow
.tunnel
.ip_dst
;
6986 flow_wildcards_init_catchall(&ctx
.xout
->wc
);
6987 memset(&ctx
.xout
->wc
.masks
.in_port
, 0xff,
6988 sizeof ctx
.xout
->wc
.masks
.in_port
);
6990 if (tnl_port_should_receive(&ctx
.xin
->flow
)) {
6991 memset(&ctx
.xout
->wc
.masks
.tunnel
, 0xff,
6992 sizeof ctx
.xout
->wc
.masks
.tunnel
);
6995 /* Disable most wildcarding for NetFlow. */
6996 if (xin
->ofproto
->netflow
) {
6997 memset(&ctx
.xout
->wc
.masks
.dl_src
, 0xff,
6998 sizeof ctx
.xout
->wc
.masks
.dl_src
);
6999 memset(&ctx
.xout
->wc
.masks
.dl_dst
, 0xff,
7000 sizeof ctx
.xout
->wc
.masks
.dl_dst
);
7001 memset(&ctx
.xout
->wc
.masks
.dl_type
, 0xff,
7002 sizeof ctx
.xout
->wc
.masks
.dl_type
);
7003 memset(&ctx
.xout
->wc
.masks
.vlan_tci
, 0xff,
7004 sizeof ctx
.xout
->wc
.masks
.vlan_tci
);
7005 memset(&ctx
.xout
->wc
.masks
.nw_proto
, 0xff,
7006 sizeof ctx
.xout
->wc
.masks
.nw_proto
);
7007 memset(&ctx
.xout
->wc
.masks
.nw_src
, 0xff,
7008 sizeof ctx
.xout
->wc
.masks
.nw_src
);
7009 memset(&ctx
.xout
->wc
.masks
.nw_dst
, 0xff,
7010 sizeof ctx
.xout
->wc
.masks
.nw_dst
);
7011 memset(&ctx
.xout
->wc
.masks
.tp_src
, 0xff,
7012 sizeof ctx
.xout
->wc
.masks
.tp_src
);
7013 memset(&ctx
.xout
->wc
.masks
.tp_dst
, 0xff,
7014 sizeof ctx
.xout
->wc
.masks
.tp_dst
);
7019 ctx
.xout
->has_learn
= false;
7020 ctx
.xout
->has_normal
= false;
7021 ctx
.xout
->has_fin_timeout
= false;
7022 ctx
.xout
->nf_output_iface
= NF_OUT_DROP
;
7023 ctx
.xout
->mirrors
= 0;
7025 ofpbuf_use_stub(&ctx
.xout
->odp_actions
, ctx
.xout
->odp_actions_stub
,
7026 sizeof ctx
.xout
->odp_actions_stub
);
7027 ofpbuf_reserve(&ctx
.xout
->odp_actions
, NL_A_U32_SIZE
);
7030 ctx
.max_resubmit_trigger
= false;
7031 ctx
.orig_skb_priority
= ctx
.xin
->flow
.skb_priority
;
7036 ofpacts
= xin
->ofpacts
;
7037 ofpacts_len
= xin
->ofpacts_len
;
7038 } else if (xin
->rule
) {
7039 ofpacts
= xin
->rule
->up
.ofpacts
;
7040 ofpacts_len
= xin
->rule
->up
.ofpacts_len
;
7045 ofpbuf_use_stub(&ctx
.stack
, ctx
.init_stack
, sizeof ctx
.init_stack
);
7047 if (ctx
.ofproto
->has_mirrors
|| hit_resubmit_limit
) {
7048 /* Do this conditionally because the copy is expensive enough that it
7049 * shows up in profiles. */
7050 orig_flow
= ctx
.xin
->flow
;
7053 if (ctx
.xin
->flow
.nw_frag
& FLOW_NW_FRAG_ANY
) {
7054 switch (ctx
.ofproto
->up
.frag_handling
) {
7055 case OFPC_FRAG_NORMAL
:
7056 /* We must pretend that transport ports are unavailable. */
7057 ctx
.xin
->flow
.tp_src
= ctx
.base_flow
.tp_src
= htons(0);
7058 ctx
.xin
->flow
.tp_dst
= ctx
.base_flow
.tp_dst
= htons(0);
7061 case OFPC_FRAG_DROP
:
7064 case OFPC_FRAG_REASM
:
7067 case OFPC_FRAG_NX_MATCH
:
7068 /* Nothing to do. */
7071 case OFPC_INVALID_TTL_TO_CONTROLLER
:
7076 in_port
= get_ofp_port(ctx
.ofproto
, ctx
.xin
->flow
.in_port
);
7077 special
= process_special(ctx
.ofproto
, &ctx
.xin
->flow
, in_port
,
7080 ctx
.xout
->slow
= special
;
7082 static struct vlog_rate_limit trace_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
7083 size_t sample_actions_len
;
7084 uint32_t local_odp_port
;
7086 if (ctx
.xin
->flow
.in_port
7087 != vsp_realdev_to_vlandev(ctx
.ofproto
, ctx
.xin
->flow
.in_port
,
7088 ctx
.xin
->flow
.vlan_tci
)) {
7089 ctx
.base_flow
.vlan_tci
= 0;
7092 add_sflow_action(&ctx
);
7093 add_ipfix_action(&ctx
);
7094 sample_actions_len
= ctx
.xout
->odp_actions
.size
;
7096 if (tunnel_ecn_ok(&ctx
) && (!in_port
|| may_receive(in_port
, &ctx
))) {
7097 do_xlate_actions(ofpacts
, ofpacts_len
, &ctx
);
7099 /* We've let OFPP_NORMAL and the learning action look at the
7100 * packet, so drop it now if forwarding is disabled. */
7101 if (in_port
&& !stp_forward_in_state(in_port
->stp_state
)) {
7102 ctx
.xout
->odp_actions
.size
= sample_actions_len
;
7106 if (ctx
.max_resubmit_trigger
&& !ctx
.xin
->resubmit_hook
) {
7107 if (!hit_resubmit_limit
) {
7108 /* We didn't record the original flow. Make sure we do from
7110 hit_resubmit_limit
= true;
7111 } else if (!VLOG_DROP_ERR(&trace_rl
)) {
7112 struct ds ds
= DS_EMPTY_INITIALIZER
;
7114 ofproto_trace(ctx
.ofproto
, &orig_flow
, ctx
.xin
->packet
, &ds
);
7115 VLOG_ERR("Trace triggered by excessive resubmit "
7116 "recursion:\n%s", ds_cstr(&ds
));
7121 local_odp_port
= ofp_port_to_odp_port(ctx
.ofproto
, OFPP_LOCAL
);
7122 if (!connmgr_must_output_local(ctx
.ofproto
->up
.connmgr
, &ctx
.xin
->flow
,
7124 ctx
.xout
->odp_actions
.data
,
7125 ctx
.xout
->odp_actions
.size
)) {
7126 compose_output_action(&ctx
, OFPP_LOCAL
);
7128 if (ctx
.ofproto
->has_mirrors
) {
7129 add_mirror_actions(&ctx
, &orig_flow
);
7131 fix_sflow_action(&ctx
);
7134 ofpbuf_uninit(&ctx
.stack
);
7136 /* Clear the metadata and register wildcard masks, because we won't
7137 * use non-header fields as part of the cache. */
7138 memset(&ctx
.xout
->wc
.masks
.metadata
, 0,
7139 sizeof ctx
.xout
->wc
.masks
.metadata
);
7140 memset(&ctx
.xout
->wc
.masks
.regs
, 0, sizeof ctx
.xout
->wc
.masks
.regs
);
7143 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
7144 * into datapath actions, using 'ctx', and discards the datapath actions. */
7146 xlate_actions_for_side_effects(struct xlate_in
*xin
)
7148 struct xlate_out xout
;
7150 xlate_actions(xin
, &xout
);
7151 xlate_out_uninit(&xout
);
7155 xlate_report(struct xlate_ctx
*ctx
, const char *s
)
7157 if (ctx
->xin
->report_hook
) {
7158 ctx
->xin
->report_hook(ctx
, s
);
7163 xlate_out_copy(struct xlate_out
*dst
, const struct xlate_out
*src
)
7166 dst
->tags
= src
->tags
;
7167 dst
->slow
= src
->slow
;
7168 dst
->has_learn
= src
->has_learn
;
7169 dst
->has_normal
= src
->has_normal
;
7170 dst
->has_fin_timeout
= src
->has_fin_timeout
;
7171 dst
->nf_output_iface
= src
->nf_output_iface
;
7172 dst
->mirrors
= src
->mirrors
;
7174 ofpbuf_use_stub(&dst
->odp_actions
, dst
->odp_actions_stub
,
7175 sizeof dst
->odp_actions_stub
);
7176 ofpbuf_put(&dst
->odp_actions
, src
->odp_actions
.data
,
7177 src
->odp_actions
.size
);
7180 /* OFPP_NORMAL implementation. */
7182 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
7184 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
7185 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
7186 * the bundle on which the packet was received, returns the VLAN to which the
7189 * Both 'vid' and the return value are in the range 0...4095. */
7191 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
7193 switch (in_bundle
->vlan_mode
) {
7194 case PORT_VLAN_ACCESS
:
7195 return in_bundle
->vlan
;
7198 case PORT_VLAN_TRUNK
:
7201 case PORT_VLAN_NATIVE_UNTAGGED
:
7202 case PORT_VLAN_NATIVE_TAGGED
:
7203 return vid
? vid
: in_bundle
->vlan
;
7210 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
7211 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
7214 * 'vid' should be the VID obtained from the 802.1Q header that was received as
7215 * part of a packet (specify 0 if there was no 802.1Q header), in the range
7218 input_vid_is_valid(uint16_t vid
, struct ofbundle
*in_bundle
, bool warn
)
7220 /* Allow any VID on the OFPP_NONE port. */
7221 if (in_bundle
== &ofpp_none_bundle
) {
7225 switch (in_bundle
->vlan_mode
) {
7226 case PORT_VLAN_ACCESS
:
7229 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7230 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" tagged "
7231 "packet received on port %s configured as VLAN "
7232 "%"PRIu16
" access port",
7233 in_bundle
->ofproto
->up
.name
, vid
,
7234 in_bundle
->name
, in_bundle
->vlan
);
7240 case PORT_VLAN_NATIVE_UNTAGGED
:
7241 case PORT_VLAN_NATIVE_TAGGED
:
7243 /* Port must always carry its native VLAN. */
7247 case PORT_VLAN_TRUNK
:
7248 if (!ofbundle_includes_vlan(in_bundle
, vid
)) {
7250 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7251 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %"PRIu16
" packet "
7252 "received on port %s not configured for trunking "
7254 in_bundle
->ofproto
->up
.name
, vid
,
7255 in_bundle
->name
, vid
);
7267 /* Given 'vlan', the VLAN that a packet belongs to, and
7268 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
7269 * that should be included in the 802.1Q header. (If the return value is 0,
7270 * then the 802.1Q header should only be included in the packet if there is a
7273 * Both 'vlan' and the return value are in the range 0...4095. */
7275 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
7277 switch (out_bundle
->vlan_mode
) {
7278 case PORT_VLAN_ACCESS
:
7281 case PORT_VLAN_TRUNK
:
7282 case PORT_VLAN_NATIVE_TAGGED
:
7285 case PORT_VLAN_NATIVE_UNTAGGED
:
7286 return vlan
== out_bundle
->vlan
? 0 : vlan
;
7294 output_normal(struct xlate_ctx
*ctx
, const struct ofbundle
*out_bundle
,
7297 struct ofport_dpif
*port
;
7299 ovs_be16 tci
, old_tci
;
7301 vid
= output_vlan_to_vid(out_bundle
, vlan
);
7302 if (!out_bundle
->bond
) {
7303 port
= ofbundle_get_a_port(out_bundle
);
7305 port
= bond_choose_output_slave(out_bundle
->bond
, &ctx
->xin
->flow
,
7306 &ctx
->xout
->wc
, vid
, &ctx
->xout
->tags
);
7308 /* No slaves enabled, so drop packet. */
7313 old_tci
= ctx
->xin
->flow
.vlan_tci
;
7315 if (tci
|| out_bundle
->use_priority_tags
) {
7316 tci
|= ctx
->xin
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
7318 tci
|= htons(VLAN_CFI
);
7321 ctx
->xin
->flow
.vlan_tci
= tci
;
7323 compose_output_action(ctx
, port
->up
.ofp_port
);
7324 ctx
->xin
->flow
.vlan_tci
= old_tci
;
7328 mirror_mask_ffs(mirror_mask_t mask
)
7330 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
7335 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
7337 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
7338 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
7342 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
7344 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
7347 /* Returns an arbitrary interface within 'bundle'. */
7348 static struct ofport_dpif
*
7349 ofbundle_get_a_port(const struct ofbundle
*bundle
)
7351 return CONTAINER_OF(list_front(&bundle
->ports
),
7352 struct ofport_dpif
, bundle_node
);
7356 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
7358 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
7362 add_mirror_actions(struct xlate_ctx
*ctx
, const struct flow
*orig_flow
)
7364 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7365 mirror_mask_t mirrors
;
7366 struct ofbundle
*in_bundle
;
7369 const struct nlattr
*a
;
7372 in_bundle
= lookup_input_bundle(ctx
->ofproto
, orig_flow
->in_port
,
7373 ctx
->xin
->packet
!= NULL
, NULL
);
7377 mirrors
= in_bundle
->src_mirrors
;
7379 /* Drop frames on bundles reserved for mirroring. */
7380 if (in_bundle
->mirror_out
) {
7381 if (ctx
->xin
->packet
!= NULL
) {
7382 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7383 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7384 "%s, which is reserved exclusively for mirroring",
7385 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7391 vid
= vlan_tci_to_vid(orig_flow
->vlan_tci
);
7392 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->xin
->packet
!= NULL
)) {
7395 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7397 /* Look at the output ports to check for destination selections. */
7399 NL_ATTR_FOR_EACH (a
, left
, ctx
->xout
->odp_actions
.data
,
7400 ctx
->xout
->odp_actions
.size
) {
7401 enum ovs_action_attr type
= nl_attr_type(a
);
7402 struct ofport_dpif
*ofport
;
7404 if (type
!= OVS_ACTION_ATTR_OUTPUT
) {
7408 ofport
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
7409 if (ofport
&& ofport
->bundle
) {
7410 mirrors
|= ofport
->bundle
->dst_mirrors
;
7418 /* Restore the original packet before adding the mirror actions. */
7419 ctx
->xin
->flow
= *orig_flow
;
7424 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
7427 ctx
->xout
->wc
.masks
.vlan_tci
|= htons(VLAN_CFI
| VLAN_VID_MASK
);
7430 if (!vlan_is_mirrored(m
, vlan
)) {
7431 mirrors
= zero_rightmost_1bit(mirrors
);
7435 mirrors
&= ~m
->dup_mirrors
;
7436 ctx
->xout
->mirrors
|= m
->dup_mirrors
;
7438 output_normal(ctx
, m
->out
, vlan
);
7439 } else if (vlan
!= m
->out_vlan
7440 && !eth_addr_is_reserved(orig_flow
->dl_dst
)) {
7441 struct ofbundle
*bundle
;
7443 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
7444 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
7445 && !bundle
->mirror_out
) {
7446 output_normal(ctx
, bundle
, m
->out_vlan
);
7454 update_mirror_stats(struct ofproto_dpif
*ofproto
, mirror_mask_t mirrors
,
7455 uint64_t packets
, uint64_t bytes
)
7461 for (; mirrors
; mirrors
= zero_rightmost_1bit(mirrors
)) {
7464 m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
7467 /* In normal circumstances 'm' will not be NULL. However,
7468 * if mirrors are reconfigured, we can temporarily get out
7469 * of sync in facet_revalidate(). We could "correct" the
7470 * mirror list before reaching here, but doing that would
7471 * not properly account the traffic stats we've currently
7472 * accumulated for previous mirror configuration. */
7476 m
->packet_count
+= packets
;
7477 m
->byte_count
+= bytes
;
7481 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
7482 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
7483 * indicate this; newer upstream kernels use gratuitous ARP requests. */
7485 is_gratuitous_arp(const struct flow
*flow
, struct flow_wildcards
*wc
)
7487 if (flow
->dl_type
!= htons(ETH_TYPE_ARP
)) {
7491 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
7492 if (!eth_addr_is_broadcast(flow
->dl_dst
)) {
7496 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
7497 if (flow
->nw_proto
== ARP_OP_REPLY
) {
7499 } else if (flow
->nw_proto
== ARP_OP_REQUEST
) {
7500 memset(&wc
->masks
.nw_src
, 0xff, sizeof wc
->masks
.nw_src
);
7501 memset(&wc
->masks
.nw_dst
, 0xff, sizeof wc
->masks
.nw_dst
);
7503 return flow
->nw_src
== flow
->nw_dst
;
7510 update_learning_table(struct ofproto_dpif
*ofproto
,
7511 const struct flow
*flow
, struct flow_wildcards
*wc
,
7512 int vlan
, struct ofbundle
*in_bundle
)
7514 struct mac_entry
*mac
;
7516 /* Don't learn the OFPP_NONE port. */
7517 if (in_bundle
== &ofpp_none_bundle
) {
7521 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
7525 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
7526 if (is_gratuitous_arp(flow
, wc
)) {
7527 /* We don't want to learn from gratuitous ARP packets that are
7528 * reflected back over bond slaves so we lock the learning table. */
7529 if (!in_bundle
->bond
) {
7530 mac_entry_set_grat_arp_lock(mac
);
7531 } else if (mac_entry_is_grat_arp_locked(mac
)) {
7536 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
7537 /* The log messages here could actually be useful in debugging,
7538 * so keep the rate limit relatively high. */
7539 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
7540 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
7541 "on port %s in VLAN %d",
7542 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
7543 in_bundle
->name
, vlan
);
7545 mac
->port
.p
= in_bundle
;
7546 tag_set_add(&ofproto
->backer
->revalidate_set
,
7547 mac_learning_changed(ofproto
->ml
, mac
));
7551 static struct ofbundle
*
7552 lookup_input_bundle(const struct ofproto_dpif
*ofproto
, uint16_t in_port
,
7553 bool warn
, struct ofport_dpif
**in_ofportp
)
7555 struct ofport_dpif
*ofport
;
7557 /* Find the port and bundle for the received packet. */
7558 ofport
= get_ofp_port(ofproto
, in_port
);
7560 *in_ofportp
= ofport
;
7562 if (ofport
&& ofport
->bundle
) {
7563 return ofport
->bundle
;
7566 /* Special-case OFPP_NONE, which a controller may use as the ingress
7567 * port for traffic that it is sourcing. */
7568 if (in_port
== OFPP_NONE
) {
7569 return &ofpp_none_bundle
;
7572 /* Odd. A few possible reasons here:
7574 * - We deleted a port but there are still a few packets queued up
7577 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7578 * we don't know about.
7580 * - The ofproto client didn't configure the port as part of a bundle.
7581 * This is particularly likely to happen if a packet was received on the
7582 * port after it was created, but before the client had a chance to
7583 * configure its bundle.
7586 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7588 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
7589 "port %"PRIu16
, ofproto
->up
.name
, in_port
);
7594 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
7595 * dropped. Returns true if they may be forwarded, false if they should be
7598 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7599 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
7601 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7602 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7603 * checked by input_vid_is_valid().
7605 * May also add tags to '*tags', although the current implementation only does
7606 * so in one special case.
7609 is_admissible(struct xlate_ctx
*ctx
, struct ofport_dpif
*in_port
,
7612 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
7613 struct flow
*flow
= &ctx
->xin
->flow
;
7614 struct ofbundle
*in_bundle
= in_port
->bundle
;
7616 /* Drop frames for reserved multicast addresses
7617 * only if forward_bpdu option is absent. */
7618 if (!ofproto
->up
.forward_bpdu
&& eth_addr_is_reserved(flow
->dl_dst
)) {
7619 xlate_report(ctx
, "packet has reserved destination MAC, dropping");
7623 if (in_bundle
->bond
) {
7624 struct mac_entry
*mac
;
7626 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
7627 flow
->dl_dst
, &ctx
->xout
->tags
)) {
7632 xlate_report(ctx
, "bonding refused admissibility, dropping");
7635 case BV_DROP_IF_MOVED
:
7636 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
7637 if (mac
&& mac
->port
.p
!= in_bundle
&&
7638 (!is_gratuitous_arp(flow
, &ctx
->xout
->wc
)
7639 || mac_entry_is_grat_arp_locked(mac
))) {
7640 xlate_report(ctx
, "SLB bond thinks this packet looped back, "
7652 xlate_normal(struct xlate_ctx
*ctx
)
7654 struct ofport_dpif
*in_port
;
7655 struct ofbundle
*in_bundle
;
7656 struct mac_entry
*mac
;
7660 ctx
->xout
->has_normal
= true;
7662 /* Check the dl_type, since we may check for gratuituous ARP. */
7663 memset(&ctx
->xout
->wc
.masks
.dl_type
, 0xff,
7664 sizeof ctx
->xout
->wc
.masks
.dl_type
);
7666 memset(&ctx
->xout
->wc
.masks
.dl_src
, 0xff,
7667 sizeof ctx
->xout
->wc
.masks
.dl_src
);
7668 memset(&ctx
->xout
->wc
.masks
.dl_dst
, 0xff,
7669 sizeof ctx
->xout
->wc
.masks
.dl_dst
);
7670 memset(&ctx
->xout
->wc
.masks
.vlan_tci
, 0xff,
7671 sizeof ctx
->xout
->wc
.masks
.vlan_tci
);
7673 in_bundle
= lookup_input_bundle(ctx
->ofproto
, ctx
->xin
->flow
.in_port
,
7674 ctx
->xin
->packet
!= NULL
, &in_port
);
7676 xlate_report(ctx
, "no input bundle, dropping");
7680 /* Drop malformed frames. */
7681 if (ctx
->xin
->flow
.dl_type
== htons(ETH_TYPE_VLAN
) &&
7682 !(ctx
->xin
->flow
.vlan_tci
& htons(VLAN_CFI
))) {
7683 if (ctx
->xin
->packet
!= NULL
) {
7684 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7685 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet with partial "
7686 "VLAN tag received on port %s",
7687 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7689 xlate_report(ctx
, "partial VLAN tag, dropping");
7693 /* Drop frames on bundles reserved for mirroring. */
7694 if (in_bundle
->mirror_out
) {
7695 if (ctx
->xin
->packet
!= NULL
) {
7696 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
7697 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
7698 "%s, which is reserved exclusively for mirroring",
7699 ctx
->ofproto
->up
.name
, in_bundle
->name
);
7701 xlate_report(ctx
, "input port is mirror output port, dropping");
7706 vid
= vlan_tci_to_vid(ctx
->xin
->flow
.vlan_tci
);
7707 if (!input_vid_is_valid(vid
, in_bundle
, ctx
->xin
->packet
!= NULL
)) {
7708 xlate_report(ctx
, "disallowed VLAN VID for this input port, dropping");
7711 vlan
= input_vid_to_vlan(in_bundle
, vid
);
7713 /* Check other admissibility requirements. */
7714 if (in_port
&& !is_admissible(ctx
, in_port
, vlan
)) {
7718 /* Learn source MAC. */
7719 if (ctx
->xin
->may_learn
) {
7720 update_learning_table(ctx
->ofproto
, &ctx
->xin
->flow
, &ctx
->xout
->wc
,
7724 /* Determine output bundle. */
7725 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->xin
->flow
.dl_dst
, vlan
,
7728 if (mac
->port
.p
!= in_bundle
) {
7729 xlate_report(ctx
, "forwarding to learned port");
7730 output_normal(ctx
, mac
->port
.p
, vlan
);
7732 xlate_report(ctx
, "learned port is input port, dropping");
7735 struct ofbundle
*bundle
;
7737 xlate_report(ctx
, "no learned MAC for destination, flooding");
7738 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
7739 if (bundle
!= in_bundle
7740 && ofbundle_includes_vlan(bundle
, vlan
)
7741 && bundle
->floodable
7742 && !bundle
->mirror_out
) {
7743 output_normal(ctx
, bundle
, vlan
);
7746 ctx
->xout
->nf_output_iface
= NF_OUT_FLOOD
;
7750 /* Optimized flow revalidation.
7752 * It's a difficult problem, in general, to tell which facets need to have
7753 * their actions recalculated whenever the OpenFlow flow table changes. We
7754 * don't try to solve that general problem: for most kinds of OpenFlow flow
7755 * table changes, we recalculate the actions for every facet. This is
7756 * relatively expensive, but it's good enough if the OpenFlow flow table
7757 * doesn't change very often.
7759 * However, we can expect one particular kind of OpenFlow flow table change to
7760 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7761 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7762 * table, we add a special case that applies to flow tables in which every rule
7763 * has the same form (that is, the same wildcards), except that the table is
7764 * also allowed to have a single "catch-all" flow that matches all packets. We
7765 * optimize this case by tagging all of the facets that resubmit into the table
7766 * and invalidating the same tag whenever a flow changes in that table. The
7767 * end result is that we revalidate just the facets that need it (and sometimes
7768 * a few more, but not all of the facets or even all of the facets that
7769 * resubmit to the table modified by MAC learning). */
7771 /* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
7772 * into an OpenFlow table with the given 'basis'. */
7774 rule_calculate_tag(const struct flow
*flow
, const struct minimask
*mask
,
7777 if (minimask_is_catchall(mask
)) {
7780 uint32_t hash
= flow_hash_in_minimask(flow
, mask
, secret
);
7781 return tag_create_deterministic(hash
);
7785 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7786 * taggability of that table.
7788 * This function must be called after *each* change to a flow table. If you
7789 * skip calling it on some changes then the pointer comparisons at the end can
7790 * be invalid if you get unlucky. For example, if a flow removal causes a
7791 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7792 * different wildcards to be created with the same address, then this function
7793 * will incorrectly skip revalidation. */
7795 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
7797 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
7798 const struct oftable
*oftable
= &ofproto
->up
.tables
[table_id
];
7799 struct cls_table
*catchall
, *other
;
7800 struct cls_table
*t
;
7802 catchall
= other
= NULL
;
7804 switch (hmap_count(&oftable
->cls
.tables
)) {
7806 /* We could tag this OpenFlow table but it would make the logic a
7807 * little harder and it's a corner case that doesn't seem worth it
7813 HMAP_FOR_EACH (t
, hmap_node
, &oftable
->cls
.tables
) {
7814 if (cls_table_is_catchall(t
)) {
7816 } else if (!other
) {
7819 /* Indicate that we can't tag this by setting both tables to
7820 * NULL. (We know that 'catchall' is already NULL.) */
7827 /* Can't tag this table. */
7831 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
7832 table
->catchall_table
= catchall
;
7833 table
->other_table
= other
;
7834 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7838 /* Given 'rule' that has changed in some way (either it is a rule being
7839 * inserted, a rule being deleted, or a rule whose actions are being
7840 * modified), marks facets for revalidation to ensure that packets will be
7841 * forwarded correctly according to the new state of the flow table.
7843 * This function must be called after *each* change to a flow table. See
7844 * the comment on table_update_taggable() for more information. */
7846 rule_invalidate(const struct rule_dpif
*rule
)
7848 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
7850 table_update_taggable(ofproto
, rule
->up
.table_id
);
7852 if (!ofproto
->backer
->need_revalidate
) {
7853 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
7855 if (table
->other_table
&& rule
->tag
) {
7856 tag_set_add(&ofproto
->backer
->revalidate_set
, rule
->tag
);
7858 ofproto
->backer
->need_revalidate
= REV_FLOW_TABLE
;
7864 set_frag_handling(struct ofproto
*ofproto_
,
7865 enum ofp_config_flags frag_handling
)
7867 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7868 if (frag_handling
!= OFPC_FRAG_REASM
) {
7869 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
7877 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
7878 const struct flow
*flow
,
7879 const struct ofpact
*ofpacts
, size_t ofpacts_len
)
7881 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7882 struct odputil_keybuf keybuf
;
7883 struct dpif_flow_stats stats
;
7884 struct xlate_out xout
;
7885 struct xlate_in xin
;
7889 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
7890 odp_flow_key_from_flow(&key
, flow
,
7891 ofp_port_to_odp_port(ofproto
, flow
->in_port
));
7893 dpif_flow_stats_extract(flow
, packet
, time_msec(), &stats
);
7895 xlate_in_init(&xin
, ofproto
, flow
, NULL
, stats
.tcp_flags
, packet
);
7896 xin
.resubmit_stats
= &stats
;
7897 xin
.ofpacts_len
= ofpacts_len
;
7898 xin
.ofpacts
= ofpacts
;
7900 xlate_actions(&xin
, &xout
);
7901 dpif_execute(ofproto
->backer
->dpif
, key
.data
, key
.size
,
7902 xout
.odp_actions
.data
, xout
.odp_actions
.size
, packet
);
7903 xlate_out_uninit(&xout
);
7911 set_netflow(struct ofproto
*ofproto_
,
7912 const struct netflow_options
*netflow_options
)
7914 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7916 if (netflow_options
) {
7917 if (!ofproto
->netflow
) {
7918 ofproto
->netflow
= netflow_create();
7920 return netflow_set_options(ofproto
->netflow
, netflow_options
);
7922 netflow_destroy(ofproto
->netflow
);
7923 ofproto
->netflow
= NULL
;
7929 get_netflow_ids(const struct ofproto
*ofproto_
,
7930 uint8_t *engine_type
, uint8_t *engine_id
)
7932 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
7934 dpif_get_netflow_ids(ofproto
->backer
->dpif
, engine_type
, engine_id
);
7938 send_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
7940 if (!facet_is_controller_flow(facet
) &&
7941 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
7942 struct subfacet
*subfacet
;
7943 struct ofexpired expired
;
7945 LIST_FOR_EACH (subfacet
, list_node
, &facet
->subfacets
) {
7946 if (subfacet
->path
== SF_FAST_PATH
) {
7947 struct dpif_flow_stats stats
;
7949 subfacet_install(subfacet
, &facet
->xout
.odp_actions
,
7951 subfacet_update_stats(subfacet
, &stats
);
7955 expired
.flow
= facet
->flow
;
7956 expired
.packet_count
= facet
->packet_count
;
7957 expired
.byte_count
= facet
->byte_count
;
7958 expired
.used
= facet
->used
;
7959 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
7964 send_netflow_active_timeouts(struct ofproto_dpif
*ofproto
)
7966 struct cls_cursor cursor
;
7967 struct facet
*facet
;
7969 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
7970 CLS_CURSOR_FOR_EACH (facet
, cr
, &cursor
) {
7971 send_active_timeout(ofproto
, facet
);
7975 static struct ofproto_dpif
*
7976 ofproto_dpif_lookup(const char *name
)
7978 struct ofproto_dpif
*ofproto
;
7980 HMAP_FOR_EACH_WITH_HASH (ofproto
, all_ofproto_dpifs_node
,
7981 hash_string(name
, 0), &all_ofproto_dpifs
) {
7982 if (!strcmp(ofproto
->up
.name
, name
)) {
7990 ofproto_unixctl_fdb_flush(struct unixctl_conn
*conn
, int argc
,
7991 const char *argv
[], void *aux OVS_UNUSED
)
7993 struct ofproto_dpif
*ofproto
;
7996 ofproto
= ofproto_dpif_lookup(argv
[1]);
7998 unixctl_command_reply_error(conn
, "no such bridge");
8001 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
8003 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8004 mac_learning_flush(ofproto
->ml
, &ofproto
->backer
->revalidate_set
);
8008 unixctl_command_reply(conn
, "table successfully flushed");
8012 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
8013 const char *argv
[], void *aux OVS_UNUSED
)
8015 struct ds ds
= DS_EMPTY_INITIALIZER
;
8016 const struct ofproto_dpif
*ofproto
;
8017 const struct mac_entry
*e
;
8019 ofproto
= ofproto_dpif_lookup(argv
[1]);
8021 unixctl_command_reply_error(conn
, "no such bridge");
8025 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
8026 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
8027 struct ofbundle
*bundle
= e
->port
.p
;
8028 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
8029 ofbundle_get_a_port(bundle
)->odp_port
,
8030 e
->vlan
, ETH_ADDR_ARGS(e
->mac
),
8031 mac_entry_age(ofproto
->ml
, e
));
8033 unixctl_command_reply(conn
, ds_cstr(&ds
));
8038 struct xlate_out xout
;
8039 struct xlate_in xin
;
8045 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
8046 const struct rule_dpif
*rule
)
8048 ds_put_char_multiple(result
, '\t', level
);
8050 ds_put_cstr(result
, "No match\n");
8054 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
8055 table_id
, ntohll(rule
->up
.flow_cookie
));
8056 cls_rule_format(&rule
->up
.cr
, result
);
8057 ds_put_char(result
, '\n');
8059 ds_put_char_multiple(result
, '\t', level
);
8060 ds_put_cstr(result
, "OpenFlow ");
8061 ofpacts_format(rule
->up
.ofpacts
, rule
->up
.ofpacts_len
, result
);
8062 ds_put_char(result
, '\n');
8066 trace_format_flow(struct ds
*result
, int level
, const char *title
,
8067 struct trace_ctx
*trace
)
8069 ds_put_char_multiple(result
, '\t', level
);
8070 ds_put_format(result
, "%s: ", title
);
8071 if (flow_equal(&trace
->xin
.flow
, &trace
->flow
)) {
8072 ds_put_cstr(result
, "unchanged");
8074 flow_format(result
, &trace
->xin
.flow
);
8075 trace
->flow
= trace
->xin
.flow
;
8077 ds_put_char(result
, '\n');
8081 trace_format_regs(struct ds
*result
, int level
, const char *title
,
8082 struct trace_ctx
*trace
)
8086 ds_put_char_multiple(result
, '\t', level
);
8087 ds_put_format(result
, "%s:", title
);
8088 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
8089 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
8091 ds_put_char(result
, '\n');
8095 trace_format_odp(struct ds
*result
, int level
, const char *title
,
8096 struct trace_ctx
*trace
)
8098 struct ofpbuf
*odp_actions
= &trace
->xout
.odp_actions
;
8100 ds_put_char_multiple(result
, '\t', level
);
8101 ds_put_format(result
, "%s: ", title
);
8102 format_odp_actions(result
, odp_actions
->data
, odp_actions
->size
);
8103 ds_put_char(result
, '\n');
8107 trace_resubmit(struct xlate_ctx
*ctx
, struct rule_dpif
*rule
)
8109 struct trace_ctx
*trace
= CONTAINER_OF(ctx
->xin
, struct trace_ctx
, xin
);
8110 struct ds
*result
= trace
->result
;
8112 ds_put_char(result
, '\n');
8113 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
8114 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
8115 trace_format_odp(result
, ctx
->recurse
+ 1, "Resubmitted odp", trace
);
8116 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
8120 trace_report(struct xlate_ctx
*ctx
, const char *s
)
8122 struct trace_ctx
*trace
= CONTAINER_OF(ctx
->xin
, struct trace_ctx
, xin
);
8123 struct ds
*result
= trace
->result
;
8125 ds_put_char_multiple(result
, '\t', ctx
->recurse
);
8126 ds_put_cstr(result
, s
);
8127 ds_put_char(result
, '\n');
8131 ofproto_unixctl_trace(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
8132 void *aux OVS_UNUSED
)
8134 const struct dpif_backer
*backer
;
8135 struct ofproto_dpif
*ofproto
;
8136 struct ofpbuf odp_key
;
8137 struct ofpbuf
*packet
;
8145 ofpbuf_init(&odp_key
, 0);
8147 /* Handle "-generate" or a hex string as the last argument. */
8148 if (!strcmp(argv
[argc
- 1], "-generate")) {
8149 packet
= ofpbuf_new(0);
8152 const char *error
= eth_from_hex(argv
[argc
- 1], &packet
);
8155 } else if (argc
== 4) {
8156 /* The 3-argument form must end in "-generate' or a hex string. */
8157 unixctl_command_reply_error(conn
, error
);
8162 /* Parse the flow and determine whether a datapath or
8163 * bridge is specified. If function odp_flow_key_from_string()
8164 * returns 0, the flow is a odp_flow. If function
8165 * parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
8166 if (!odp_flow_key_from_string(argv
[argc
- 1], NULL
, &odp_key
)) {
8167 /* If the odp_flow is the second argument,
8168 * the datapath name is the first argument. */
8170 const char *dp_type
;
8171 if (!strncmp(argv
[1], "ovs-", 4)) {
8172 dp_type
= argv
[1] + 4;
8176 backer
= shash_find_data(&all_dpif_backers
, dp_type
);
8178 unixctl_command_reply_error(conn
, "Cannot find datapath "
8183 /* No datapath name specified, so there should be only one
8185 struct shash_node
*node
;
8186 if (shash_count(&all_dpif_backers
) != 1) {
8187 unixctl_command_reply_error(conn
, "Must specify datapath "
8188 "name, there is more than one type of datapath");
8191 node
= shash_first(&all_dpif_backers
);
8192 backer
= node
->data
;
8195 /* Extract the ofproto_dpif object from the ofproto_receive()
8197 if (ofproto_receive(backer
, NULL
, odp_key
.data
,
8198 odp_key
.size
, &flow
, NULL
, &ofproto
, NULL
)) {
8199 unixctl_command_reply_error(conn
, "Invalid datapath flow");
8202 ds_put_format(&result
, "Bridge: %s\n", ofproto
->up
.name
);
8203 } else if (!parse_ofp_exact_flow(&flow
, argv
[argc
- 1])) {
8205 unixctl_command_reply_error(conn
, "Must specify bridge name");
8209 ofproto
= ofproto_dpif_lookup(argv
[1]);
8211 unixctl_command_reply_error(conn
, "Unknown bridge name");
8215 unixctl_command_reply_error(conn
, "Bad flow syntax");
8219 /* Generate a packet, if requested. */
8221 if (!packet
->size
) {
8222 flow_compose(packet
, &flow
);
8224 ds_put_cstr(&result
, "Packet: ");
8225 s
= ofp_packet_to_string(packet
->data
, packet
->size
);
8226 ds_put_cstr(&result
, s
);
8229 /* Use the metadata from the flow and the packet argument
8230 * to reconstruct the flow. */
8231 flow_extract(packet
, flow
.skb_priority
, flow
.skb_mark
, NULL
,
8232 flow
.in_port
, &flow
);
8236 ofproto_trace(ofproto
, &flow
, packet
, &result
);
8237 unixctl_command_reply(conn
, ds_cstr(&result
));
8240 ds_destroy(&result
);
8241 ofpbuf_delete(packet
);
8242 ofpbuf_uninit(&odp_key
);
8246 ofproto_trace(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
8247 const struct ofpbuf
*packet
, struct ds
*ds
)
8249 struct rule_dpif
*rule
;
8251 ds_put_cstr(ds
, "Flow: ");
8252 flow_format(ds
, flow
);
8253 ds_put_char(ds
, '\n');
8255 rule
= rule_dpif_lookup(ofproto
, flow
, NULL
);
8257 trace_format_rule(ds
, 0, 0, rule
);
8258 if (rule
== ofproto
->miss_rule
) {
8259 ds_put_cstr(ds
, "\nNo match, flow generates \"packet in\"s.\n");
8260 } else if (rule
== ofproto
->no_packet_in_rule
) {
8261 ds_put_cstr(ds
, "\nNo match, packets dropped because "
8262 "OFPPC_NO_PACKET_IN is set on in_port.\n");
8263 } else if (rule
== ofproto
->drop_frags_rule
) {
8264 ds_put_cstr(ds
, "\nPackets dropped because they are IP fragments "
8265 "and the fragment handling mode is \"drop\".\n");
8269 uint64_t odp_actions_stub
[1024 / 8];
8270 struct ofpbuf odp_actions
;
8271 struct trace_ctx trace
;
8275 tcp_flags
= packet
? packet_get_tcp_flags(packet
, flow
) : 0;
8278 ofpbuf_use_stub(&odp_actions
,
8279 odp_actions_stub
, sizeof odp_actions_stub
);
8280 xlate_in_init(&trace
.xin
, ofproto
, flow
, rule
, tcp_flags
, packet
);
8281 trace
.xin
.resubmit_hook
= trace_resubmit
;
8282 trace
.xin
.report_hook
= trace_report
;
8284 xlate_actions(&trace
.xin
, &trace
.xout
);
8286 ds_put_char(ds
, '\n');
8287 trace_format_flow(ds
, 0, "Final flow", &trace
);
8289 match_init(&match
, flow
, &trace
.xout
.wc
);
8290 ds_put_cstr(ds
, "Relevant fields: ");
8291 match_format(&match
, ds
, OFP_DEFAULT_PRIORITY
);
8292 ds_put_char(ds
, '\n');
8294 ds_put_cstr(ds
, "Datapath actions: ");
8295 format_odp_actions(ds
, trace
.xout
.odp_actions
.data
,
8296 trace
.xout
.odp_actions
.size
);
8298 if (trace
.xout
.slow
) {
8299 ds_put_cstr(ds
, "\nThis flow is handled by the userspace "
8300 "slow path because it:");
8301 switch (trace
.xout
.slow
) {
8303 ds_put_cstr(ds
, "\n\t- Consists of CFM packets.");
8306 ds_put_cstr(ds
, "\n\t- Consists of LACP packets.");
8309 ds_put_cstr(ds
, "\n\t- Consists of STP packets.");
8312 ds_put_cstr(ds
, "\n\t- Consists of BFD packets.");
8314 case SLOW_CONTROLLER
:
8315 ds_put_cstr(ds
, "\n\t- Sends \"packet-in\" messages "
8316 "to the OpenFlow controller.");
8323 xlate_out_uninit(&trace
.xout
);
8328 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
8329 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
8332 unixctl_command_reply(conn
, NULL
);
8336 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
, int argc OVS_UNUSED
,
8337 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
8340 unixctl_command_reply(conn
, NULL
);
8343 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
8344 * 'reply' describing the results. */
8346 ofproto_dpif_self_check__(struct ofproto_dpif
*ofproto
, struct ds
*reply
)
8348 struct cls_cursor cursor
;
8349 struct facet
*facet
;
8353 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
8354 CLS_CURSOR_FOR_EACH (facet
, cr
, &cursor
) {
8355 if (!facet_check_consistency(facet
)) {
8360 ofproto
->backer
->need_revalidate
= REV_INCONSISTENCY
;
8364 ds_put_format(reply
, "%s: self-check failed (%d errors)\n",
8365 ofproto
->up
.name
, errors
);
8367 ds_put_format(reply
, "%s: self-check passed\n", ofproto
->up
.name
);
8372 ofproto_dpif_self_check(struct unixctl_conn
*conn
,
8373 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
8375 struct ds reply
= DS_EMPTY_INITIALIZER
;
8376 struct ofproto_dpif
*ofproto
;
8379 ofproto
= ofproto_dpif_lookup(argv
[1]);
8381 unixctl_command_reply_error(conn
, "Unknown ofproto (use "
8382 "ofproto/list for help)");
8385 ofproto_dpif_self_check__(ofproto
, &reply
);
8387 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8388 ofproto_dpif_self_check__(ofproto
, &reply
);
8392 unixctl_command_reply(conn
, ds_cstr(&reply
));
8396 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
8397 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
8398 * to destroy 'ofproto_shash' and free the returned value. */
8399 static const struct shash_node
**
8400 get_ofprotos(struct shash
*ofproto_shash
)
8402 const struct ofproto_dpif
*ofproto
;
8404 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8405 char *name
= xasprintf("%s@%s", ofproto
->up
.type
, ofproto
->up
.name
);
8406 shash_add_nocopy(ofproto_shash
, name
, ofproto
);
8409 return shash_sort(ofproto_shash
);
8413 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
8414 const char *argv
[] OVS_UNUSED
,
8415 void *aux OVS_UNUSED
)
8417 struct ds ds
= DS_EMPTY_INITIALIZER
;
8418 struct shash ofproto_shash
;
8419 const struct shash_node
**sorted_ofprotos
;
8422 shash_init(&ofproto_shash
);
8423 sorted_ofprotos
= get_ofprotos(&ofproto_shash
);
8424 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8425 const struct shash_node
*node
= sorted_ofprotos
[i
];
8426 ds_put_format(&ds
, "%s\n", node
->name
);
8429 shash_destroy(&ofproto_shash
);
8430 free(sorted_ofprotos
);
8432 unixctl_command_reply(conn
, ds_cstr(&ds
));
8437 show_dp_rates(struct ds
*ds
, const char *heading
,
8438 const struct avg_subfacet_rates
*rates
)
8440 ds_put_format(ds
, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
8441 heading
, rates
->add_rate
, rates
->del_rate
);
8445 dpif_show_backer(const struct dpif_backer
*backer
, struct ds
*ds
)
8447 const struct shash_node
**ofprotos
;
8448 struct ofproto_dpif
*ofproto
;
8449 struct shash ofproto_shash
;
8450 uint64_t n_hit
, n_missed
;
8451 long long int minutes
;
8454 n_hit
= n_missed
= 0;
8455 HMAP_FOR_EACH (ofproto
, all_ofproto_dpifs_node
, &all_ofproto_dpifs
) {
8456 if (ofproto
->backer
== backer
) {
8457 n_missed
+= ofproto
->n_missed
;
8458 n_hit
+= ofproto
->n_hit
;
8462 ds_put_format(ds
, "%s: hit:%"PRIu64
" missed:%"PRIu64
"\n",
8463 dpif_name(backer
->dpif
), n_hit
, n_missed
);
8464 ds_put_format(ds
, "\tflows: cur: %zu, avg: %u, max: %u,"
8465 " life span: %lldms\n", hmap_count(&backer
->subfacets
),
8466 backer
->avg_n_subfacet
, backer
->max_n_subfacet
,
8467 backer
->avg_subfacet_life
);
8469 minutes
= (time_msec() - backer
->created
) / (1000 * 60);
8470 if (minutes
>= 60) {
8471 show_dp_rates(ds
, "\thourly avg:", &backer
->hourly
);
8473 if (minutes
>= 60 * 24) {
8474 show_dp_rates(ds
, "\tdaily avg:", &backer
->daily
);
8476 show_dp_rates(ds
, "\toverall avg:", &backer
->lifetime
);
8478 shash_init(&ofproto_shash
);
8479 ofprotos
= get_ofprotos(&ofproto_shash
);
8480 for (i
= 0; i
< shash_count(&ofproto_shash
); i
++) {
8481 struct ofproto_dpif
*ofproto
= ofprotos
[i
]->data
;
8482 const struct shash_node
**ports
;
8485 if (ofproto
->backer
!= backer
) {
8489 ds_put_format(ds
, "\t%s: hit:%"PRIu64
" missed:%"PRIu64
"\n",
8490 ofproto
->up
.name
, ofproto
->n_hit
, ofproto
->n_missed
);
8492 ports
= shash_sort(&ofproto
->up
.port_by_name
);
8493 for (j
= 0; j
< shash_count(&ofproto
->up
.port_by_name
); j
++) {
8494 const struct shash_node
*node
= ports
[j
];
8495 struct ofport
*ofport
= node
->data
;
8499 ds_put_format(ds
, "\t\t%s %u/", netdev_get_name(ofport
->netdev
),
8502 odp_port
= ofp_port_to_odp_port(ofproto
, ofport
->ofp_port
);
8503 if (odp_port
!= OVSP_NONE
) {
8504 ds_put_format(ds
, "%"PRIu32
":", odp_port
);
8506 ds_put_cstr(ds
, "none:");
8509 ds_put_format(ds
, " (%s", netdev_get_type(ofport
->netdev
));
8512 if (!netdev_get_config(ofport
->netdev
, &config
)) {
8513 const struct smap_node
**nodes
;
8516 nodes
= smap_sort(&config
);
8517 for (i
= 0; i
< smap_count(&config
); i
++) {
8518 const struct smap_node
*node
= nodes
[i
];
8519 ds_put_format(ds
, "%c %s=%s", i
? ',' : ':',
8520 node
->key
, node
->value
);
8524 smap_destroy(&config
);
8526 ds_put_char(ds
, ')');
8527 ds_put_char(ds
, '\n');
8531 shash_destroy(&ofproto_shash
);
8536 ofproto_unixctl_dpif_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
8537 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
8539 struct ds ds
= DS_EMPTY_INITIALIZER
;
8540 const struct shash_node
**backers
;
8543 backers
= shash_sort(&all_dpif_backers
);
8544 for (i
= 0; i
< shash_count(&all_dpif_backers
); i
++) {
8545 dpif_show_backer(backers
[i
]->data
, &ds
);
8549 unixctl_command_reply(conn
, ds_cstr(&ds
));
8553 /* Dump the megaflow (facet) cache. This is useful to check the
8554 * correctness of flow wildcarding, since the same mechanism is used for
8555 * both xlate caching and kernel wildcarding.
8557 * It's important to note that in the output the flow description uses
8558 * OpenFlow (OFP) ports, but the actions use datapath (ODP) ports.
8560 * This command is only needed for advanced debugging, so it's not
8561 * documented in the man page. */
8563 ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn
*conn
,
8564 int argc OVS_UNUSED
, const char *argv
[],
8565 void *aux OVS_UNUSED
)
8567 struct ds ds
= DS_EMPTY_INITIALIZER
;
8568 const struct ofproto_dpif
*ofproto
;
8569 long long int now
= time_msec();
8570 struct cls_cursor cursor
;
8571 struct facet
*facet
;
8573 ofproto
= ofproto_dpif_lookup(argv
[1]);
8575 unixctl_command_reply_error(conn
, "no such bridge");
8579 cls_cursor_init(&cursor
, &ofproto
->facets
, NULL
);
8580 CLS_CURSOR_FOR_EACH (facet
, cr
, &cursor
) {
8581 cls_rule_format(&facet
->cr
, &ds
);
8582 ds_put_cstr(&ds
, ", ");
8583 ds_put_format(&ds
, "n_subfacets:%zu, ", list_size(&facet
->subfacets
));
8584 ds_put_format(&ds
, "used:%.3fs, ", (now
- facet
->used
) / 1000.0);
8585 ds_put_cstr(&ds
, "Datapath actions: ");
8586 format_odp_actions(&ds
, facet
->xout
.odp_actions
.data
,
8587 facet
->xout
.odp_actions
.size
);
8588 ds_put_cstr(&ds
, "\n");
8591 ds_chomp(&ds
, '\n');
8592 unixctl_command_reply(conn
, ds_cstr(&ds
));
8597 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn
*conn
,
8598 int argc OVS_UNUSED
, const char *argv
[],
8599 void *aux OVS_UNUSED
)
8601 struct ds ds
= DS_EMPTY_INITIALIZER
;
8602 const struct ofproto_dpif
*ofproto
;
8603 struct subfacet
*subfacet
;
8605 ofproto
= ofproto_dpif_lookup(argv
[1]);
8607 unixctl_command_reply_error(conn
, "no such bridge");
8611 update_stats(ofproto
->backer
);
8613 HMAP_FOR_EACH (subfacet
, hmap_node
, &ofproto
->backer
->subfacets
) {
8614 struct facet
*facet
= subfacet
->facet
;
8616 if (ofproto_dpif_cast(facet
->rule
->up
.ofproto
) != ofproto
) {
8620 odp_flow_key_format(subfacet
->key
, subfacet
->key_len
, &ds
);
8622 ds_put_format(&ds
, ", packets:%"PRIu64
", bytes:%"PRIu64
", used:",
8623 subfacet
->dp_packet_count
, subfacet
->dp_byte_count
);
8624 if (subfacet
->used
) {
8625 ds_put_format(&ds
, "%.3fs",
8626 (time_msec() - subfacet
->used
) / 1000.0);
8628 ds_put_format(&ds
, "never");
8630 if (subfacet
->facet
->tcp_flags
) {
8631 ds_put_cstr(&ds
, ", flags:");
8632 packet_format_tcp_flags(&ds
, subfacet
->facet
->tcp_flags
);
8635 ds_put_cstr(&ds
, ", actions:");
8636 if (facet
->xout
.slow
) {
8637 uint64_t slow_path_stub
[128 / 8];
8638 const struct nlattr
*actions
;
8641 compose_slow_path(ofproto
, &facet
->flow
, facet
->xout
.slow
,
8642 slow_path_stub
, sizeof slow_path_stub
,
8643 &actions
, &actions_len
);
8644 format_odp_actions(&ds
, actions
, actions_len
);
8646 format_odp_actions(&ds
, facet
->xout
.odp_actions
.data
,
8647 facet
->xout
.odp_actions
.size
);
8649 ds_put_char(&ds
, '\n');
8652 unixctl_command_reply(conn
, ds_cstr(&ds
));
8657 ofproto_unixctl_dpif_del_flows(struct unixctl_conn
*conn
,
8658 int argc OVS_UNUSED
, const char *argv
[],
8659 void *aux OVS_UNUSED
)
8661 struct ds ds
= DS_EMPTY_INITIALIZER
;
8662 struct ofproto_dpif
*ofproto
;
8664 ofproto
= ofproto_dpif_lookup(argv
[1]);
8666 unixctl_command_reply_error(conn
, "no such bridge");
8670 flush(&ofproto
->up
);
8672 unixctl_command_reply(conn
, ds_cstr(&ds
));
8677 ofproto_dpif_unixctl_init(void)
8679 static bool registered
;
8685 unixctl_command_register(
8687 "[dp_name]|bridge odp_flow|br_flow [-generate|packet]",
8688 1, 3, ofproto_unixctl_trace
, NULL
);
8689 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
8690 ofproto_unixctl_fdb_flush
, NULL
);
8691 unixctl_command_register("fdb/show", "bridge", 1, 1,
8692 ofproto_unixctl_fdb_show
, NULL
);
8693 unixctl_command_register("ofproto/clog", "", 0, 0,
8694 ofproto_dpif_clog
, NULL
);
8695 unixctl_command_register("ofproto/unclog", "", 0, 0,
8696 ofproto_dpif_unclog
, NULL
);
8697 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8698 ofproto_dpif_self_check
, NULL
);
8699 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8700 ofproto_unixctl_dpif_dump_dps
, NULL
);
8701 unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show
,
8703 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8704 ofproto_unixctl_dpif_dump_flows
, NULL
);
8705 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8706 ofproto_unixctl_dpif_del_flows
, NULL
);
8707 unixctl_command_register("dpif/dump-megaflows", "bridge", 1, 1,
8708 ofproto_unixctl_dpif_dump_megaflows
, NULL
);
8711 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8713 * This is deprecated. It is only for compatibility with broken device drivers
8714 * in old versions of Linux that do not properly support VLANs when VLAN
8715 * devices are not used. When broken device drivers are no longer in
8716 * widespread use, we will delete these interfaces. */
8719 set_realdev(struct ofport
*ofport_
, uint16_t realdev_ofp_port
, int vid
)
8721 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport_
->ofproto
);
8722 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
8724 if (realdev_ofp_port
== ofport
->realdev_ofp_port
8725 && vid
== ofport
->vlandev_vid
) {
8729 ofproto
->backer
->need_revalidate
= REV_RECONFIGURE
;
8731 if (ofport
->realdev_ofp_port
) {
8734 if (realdev_ofp_port
&& ofport
->bundle
) {
8735 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8736 * themselves be part of a bundle. */
8737 bundle_set(ofport
->up
.ofproto
, ofport
->bundle
, NULL
);
8740 ofport
->realdev_ofp_port
= realdev_ofp_port
;
8741 ofport
->vlandev_vid
= vid
;
8743 if (realdev_ofp_port
) {
8744 vsp_add(ofport
, realdev_ofp_port
, vid
);
8751 hash_realdev_vid(uint16_t realdev_ofp_port
, int vid
)
8753 return hash_2words(realdev_ofp_port
, vid
);
8756 /* Returns the OFP port number of the Linux VLAN device that corresponds to
8757 * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
8758 * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
8759 * 'vlan_tci' 9, it would return the port number of eth0.9.
8761 * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
8762 * function just returns its 'realdev_ofp_port' argument. */
8764 vsp_realdev_to_vlandev(const struct ofproto_dpif
*ofproto
,
8765 uint16_t realdev_ofp_port
, ovs_be16 vlan_tci
)
8767 if (!hmap_is_empty(&ofproto
->realdev_vid_map
)) {
8768 int vid
= vlan_tci_to_vid(vlan_tci
);
8769 const struct vlan_splinter
*vsp
;
8771 HMAP_FOR_EACH_WITH_HASH (vsp
, realdev_vid_node
,
8772 hash_realdev_vid(realdev_ofp_port
, vid
),
8773 &ofproto
->realdev_vid_map
) {
8774 if (vsp
->realdev_ofp_port
== realdev_ofp_port
8775 && vsp
->vid
== vid
) {
8776 return vsp
->vlandev_ofp_port
;
8780 return realdev_ofp_port
;
8783 static struct vlan_splinter
*
8784 vlandev_find(const struct ofproto_dpif
*ofproto
, uint16_t vlandev_ofp_port
)
8786 struct vlan_splinter
*vsp
;
8788 HMAP_FOR_EACH_WITH_HASH (vsp
, vlandev_node
, hash_int(vlandev_ofp_port
, 0),
8789 &ofproto
->vlandev_map
) {
8790 if (vsp
->vlandev_ofp_port
== vlandev_ofp_port
) {
8798 /* Returns the OpenFlow port number of the "real" device underlying the Linux
8799 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8800 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8801 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8802 * eth0 and store 9 in '*vid'.
8804 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8805 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8808 vsp_vlandev_to_realdev(const struct ofproto_dpif
*ofproto
,
8809 uint16_t vlandev_ofp_port
, int *vid
)
8811 if (!hmap_is_empty(&ofproto
->vlandev_map
)) {
8812 const struct vlan_splinter
*vsp
;
8814 vsp
= vlandev_find(ofproto
, vlandev_ofp_port
);
8819 return vsp
->realdev_ofp_port
;
8825 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
8826 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8827 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8828 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8829 * always the case unless VLAN splinters are enabled), returns false without
8830 * making any changes. */
8832 vsp_adjust_flow(const struct ofproto_dpif
*ofproto
, struct flow
*flow
)
8837 realdev
= vsp_vlandev_to_realdev(ofproto
, flow
->in_port
, &vid
);
8842 /* Cause the flow to be processed as if it came in on the real device with
8843 * the VLAN device's VLAN ID. */
8844 flow
->in_port
= realdev
;
8845 flow
->vlan_tci
= htons((vid
& VLAN_VID_MASK
) | VLAN_CFI
);
8850 vsp_remove(struct ofport_dpif
*port
)
8852 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8853 struct vlan_splinter
*vsp
;
8855 vsp
= vlandev_find(ofproto
, port
->up
.ofp_port
);
8857 hmap_remove(&ofproto
->vlandev_map
, &vsp
->vlandev_node
);
8858 hmap_remove(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
);
8861 port
->realdev_ofp_port
= 0;
8863 VLOG_ERR("missing vlan device record");
8868 vsp_add(struct ofport_dpif
*port
, uint16_t realdev_ofp_port
, int vid
)
8870 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
8872 if (!vsp_vlandev_to_realdev(ofproto
, port
->up
.ofp_port
, NULL
)
8873 && (vsp_realdev_to_vlandev(ofproto
, realdev_ofp_port
, htons(vid
))
8874 == realdev_ofp_port
)) {
8875 struct vlan_splinter
*vsp
;
8877 vsp
= xmalloc(sizeof *vsp
);
8878 hmap_insert(&ofproto
->vlandev_map
, &vsp
->vlandev_node
,
8879 hash_int(port
->up
.ofp_port
, 0));
8880 hmap_insert(&ofproto
->realdev_vid_map
, &vsp
->realdev_vid_node
,
8881 hash_realdev_vid(realdev_ofp_port
, vid
));
8882 vsp
->realdev_ofp_port
= realdev_ofp_port
;
8883 vsp
->vlandev_ofp_port
= port
->up
.ofp_port
;
8886 port
->realdev_ofp_port
= realdev_ofp_port
;
8888 VLOG_ERR("duplicate vlan device record");
8893 ofp_port_to_odp_port(const struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
8895 const struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
8896 return ofport
? ofport
->odp_port
: OVSP_NONE
;
8899 static struct ofport_dpif
*
8900 odp_port_to_ofport(const struct dpif_backer
*backer
, uint32_t odp_port
)
8902 struct ofport_dpif
*port
;
8904 HMAP_FOR_EACH_IN_BUCKET (port
, odp_port_node
,
8905 hash_int(odp_port
, 0),
8906 &backer
->odp_to_ofport_map
) {
8907 if (port
->odp_port
== odp_port
) {
8916 odp_port_to_ofp_port(const struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
8918 struct ofport_dpif
*port
;
8920 port
= odp_port_to_ofport(ofproto
->backer
, odp_port
);
8921 if (port
&& &ofproto
->up
== port
->up
.ofproto
) {
8922 return port
->up
.ofp_port
;
8928 /* Compute exponentially weighted moving average, adding 'new' as the newest,
8929 * most heavily weighted element. 'base' designates the rate of decay: after
8930 * 'base' further updates, 'new''s weight in the EWMA decays to about 1/e
8933 exp_mavg(double *avg
, int base
, double new)
8935 *avg
= (*avg
* (base
- 1) + new) / base
;
8939 update_moving_averages(struct dpif_backer
*backer
)
8941 const int min_ms
= 60 * 1000; /* milliseconds in one minute. */
8942 long long int minutes
= (time_msec() - backer
->created
) / min_ms
;
8945 backer
->lifetime
.add_rate
= (double) backer
->total_subfacet_add_count
8947 backer
->lifetime
.del_rate
= (double) backer
->total_subfacet_del_count
8950 backer
->lifetime
.add_rate
= 0.0;
8951 backer
->lifetime
.del_rate
= 0.0;
8954 /* Update hourly averages on the minute boundaries. */
8955 if (time_msec() - backer
->last_minute
>= min_ms
) {
8956 exp_mavg(&backer
->hourly
.add_rate
, 60, backer
->subfacet_add_count
);
8957 exp_mavg(&backer
->hourly
.del_rate
, 60, backer
->subfacet_del_count
);
8959 /* Update daily averages on the hour boundaries. */
8960 if ((backer
->last_minute
- backer
->created
) / min_ms
% 60 == 59) {
8961 exp_mavg(&backer
->daily
.add_rate
, 24, backer
->hourly
.add_rate
);
8962 exp_mavg(&backer
->daily
.del_rate
, 24, backer
->hourly
.del_rate
);
8965 backer
->total_subfacet_add_count
+= backer
->subfacet_add_count
;
8966 backer
->total_subfacet_del_count
+= backer
->subfacet_del_count
;
8967 backer
->subfacet_add_count
= 0;
8968 backer
->subfacet_del_count
= 0;
8969 backer
->last_minute
+= min_ms
;
8973 const struct ofproto_class ofproto_dpif_class
= {
9008 port_is_lacp_current
,
9009 NULL
, /* rule_choose_table */
9016 rule_modify_actions
,
9030 get_stp_port_status
,
9037 is_mirror_output_bundle
,
9038 forward_bpdu_changed
,
9039 set_mac_table_config
,