2 * Copyright (c) 2009, 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "multipath.h"
44 #include "ofp-print.h"
45 #include "ofproto-dpif-sflow.h"
46 #include "poll-loop.h"
48 #include "unaligned.h"
50 #include "vlan-bitmap.h"
53 VLOG_DEFINE_THIS_MODULE(ofproto_dpif
);
55 COVERAGE_DEFINE(ofproto_dpif_ctlr_action
);
56 COVERAGE_DEFINE(ofproto_dpif_expired
);
57 COVERAGE_DEFINE(ofproto_dpif_no_packet_in
);
58 COVERAGE_DEFINE(ofproto_dpif_xlate
);
59 COVERAGE_DEFINE(facet_changed_rule
);
60 COVERAGE_DEFINE(facet_invalidated
);
61 COVERAGE_DEFINE(facet_revalidate
);
62 COVERAGE_DEFINE(facet_unexpected
);
64 /* Maximum depth of flow table recursion (due to resubmit actions) in a
65 * flow translation. */
66 #define MAX_RESUBMIT_RECURSION 32
68 /* Number of implemented OpenFlow tables. */
69 enum { N_TABLES
= 255 };
70 BUILD_ASSERT_DECL(N_TABLES
>= 1 && N_TABLES
<= 255);
78 long long int used
; /* Time last used; time created if not used. */
82 * - Do include packets and bytes from facets that have been deleted or
83 * whose own statistics have been folded into the rule.
85 * - Do include packets and bytes sent "by hand" that were accounted to
86 * the rule without any facet being involved (this is a rare corner
87 * case in rule_execute()).
89 * - Do not include packet or bytes that can be obtained from any facet's
90 * packet_count or byte_count member or that can be obtained from the
91 * datapath by, e.g., dpif_flow_get() for any facet.
93 uint64_t packet_count
; /* Number of packets received. */
94 uint64_t byte_count
; /* Number of bytes received. */
96 tag_type tag
; /* Caches rule_calculate_tag() result. */
98 struct list facets
; /* List of "struct facet"s. */
101 static struct rule_dpif
*rule_dpif_cast(const struct rule
*rule
)
103 return rule
? CONTAINER_OF(rule
, struct rule_dpif
, up
) : NULL
;
106 static struct rule_dpif
*rule_dpif_lookup(struct ofproto_dpif
*,
107 const struct flow
*, uint8_t table
);
109 #define MAX_MIRRORS 32
110 typedef uint32_t mirror_mask_t
;
111 #define MIRROR_MASK_C(X) UINT32_C(X)
112 BUILD_ASSERT_DECL(sizeof(mirror_mask_t
) * CHAR_BIT
>= MAX_MIRRORS
);
114 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
115 size_t idx
; /* In ofproto's "mirrors" array. */
116 void *aux
; /* Key supplied by ofproto's client. */
117 char *name
; /* Identifier for log messages. */
119 /* Selection criteria. */
120 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
121 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
122 unsigned long *vlans
; /* Bitmap of chosen VLANs, NULL selects all. */
124 /* Output (mutually exclusive). */
125 struct ofbundle
*out
; /* Output port or NULL. */
126 int out_vlan
; /* Output VLAN or -1. */
129 static void mirror_destroy(struct ofmirror
*);
131 /* A group of one or more OpenFlow ports. */
132 #define OFBUNDLE_FLOOD ((struct ofbundle *) 1)
134 struct ofproto_dpif
*ofproto
; /* Owning ofproto. */
135 struct hmap_node hmap_node
; /* In struct ofproto's "bundles" hmap. */
136 void *aux
; /* Key supplied by ofproto's client. */
137 char *name
; /* Identifier for log messages. */
140 struct list ports
; /* Contains "struct ofport"s. */
141 enum port_vlan_mode vlan_mode
; /* VLAN mode */
142 int vlan
; /* -1=trunk port, else a 12-bit VLAN ID. */
143 unsigned long *trunks
; /* Bitmap of trunked VLANs, if 'vlan' == -1.
144 * NULL if all VLANs are trunked. */
145 struct lacp
*lacp
; /* LACP if LACP is enabled, otherwise NULL. */
146 struct bond
*bond
; /* Nonnull iff more than one port. */
149 bool floodable
; /* True if no port has OFPPC_NO_FLOOD set. */
151 /* Port mirroring info. */
152 mirror_mask_t src_mirrors
; /* Mirrors triggered when packet received. */
153 mirror_mask_t dst_mirrors
; /* Mirrors triggered when packet sent. */
154 mirror_mask_t mirror_out
; /* Mirrors that output to this bundle. */
157 static void bundle_remove(struct ofport
*);
158 static void bundle_update(struct ofbundle
*);
159 static void bundle_destroy(struct ofbundle
*);
160 static void bundle_del_port(struct ofport_dpif
*);
161 static void bundle_run(struct ofbundle
*);
162 static void bundle_wait(struct ofbundle
*);
164 static void stp_run(struct ofproto_dpif
*ofproto
);
165 static void stp_wait(struct ofproto_dpif
*ofproto
);
167 struct action_xlate_ctx
{
168 /* action_xlate_ctx_init() initializes these members. */
171 struct ofproto_dpif
*ofproto
;
173 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
174 * this flow when actions change header fields. */
177 /* The packet corresponding to 'flow', or a null pointer if we are
178 * revalidating without a packet to refer to. */
179 const struct ofpbuf
*packet
;
181 /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
182 * want to execute them if we are actually processing a packet, or if we
183 * are accounting for packets that the datapath has processed, but not if
184 * we are just revalidating. */
187 /* If nonnull, called just before executing a resubmit action.
189 * This is normally null so the client has to set it manually after
190 * calling action_xlate_ctx_init(). */
191 void (*resubmit_hook
)(struct action_xlate_ctx
*, struct rule_dpif
*);
193 /* xlate_actions() initializes and uses these members. The client might want
194 * to look at them after it returns. */
196 struct ofpbuf
*odp_actions
; /* Datapath actions. */
197 tag_type tags
; /* Tags associated with actions. */
198 bool may_set_up_flow
; /* True ordinarily; false if the actions must
199 * be reassessed for every packet. */
200 bool has_learn
; /* Actions include NXAST_LEARN? */
201 bool has_normal
; /* Actions output to OFPP_NORMAL? */
202 uint16_t nf_output_iface
; /* Output interface index for NetFlow. */
204 /* xlate_actions() initializes and uses these members, but the client has no
205 * reason to look at them. */
207 int recurse
; /* Recursion level, via xlate_table_action. */
208 uint32_t priority
; /* Current flow priority. 0 if none. */
209 struct flow base_flow
; /* Flow at the last commit. */
210 uint32_t base_priority
; /* Priority at the last commit. */
211 uint8_t table_id
; /* OpenFlow table ID where flow was found. */
212 uint32_t sflow_n_outputs
; /* Number of output ports. */
213 uint16_t sflow_odp_port
; /* Output port for composing sFlow action. */
214 uint16_t user_cookie_offset
;/* Used for user_action_cookie fixup. */
215 bool exit
; /* No further actions should be processed. */
218 static void action_xlate_ctx_init(struct action_xlate_ctx
*,
219 struct ofproto_dpif
*, const struct flow
*,
220 const struct ofpbuf
*);
221 static struct ofpbuf
*xlate_actions(struct action_xlate_ctx
*,
222 const union ofp_action
*in
, size_t n_in
);
224 /* An exact-match instantiation of an OpenFlow flow. */
226 long long int used
; /* Time last used; time created if not used. */
230 * - Do include packets and bytes sent "by hand", e.g. with
233 * - Do include packets and bytes that were obtained from the datapath
234 * when its statistics were reset (e.g. dpif_flow_put() with
235 * DPIF_FP_ZERO_STATS).
237 uint64_t packet_count
; /* Number of packets received. */
238 uint64_t byte_count
; /* Number of bytes received. */
240 uint64_t dp_packet_count
; /* Last known packet count in the datapath. */
241 uint64_t dp_byte_count
; /* Last known byte count in the datapath. */
243 uint64_t rs_packet_count
; /* Packets pushed to resubmit children. */
244 uint64_t rs_byte_count
; /* Bytes pushed to resubmit children. */
245 long long int rs_used
; /* Used time pushed to resubmit children. */
247 uint64_t accounted_bytes
; /* Bytes processed by facet_account(). */
249 struct hmap_node hmap_node
; /* In owning ofproto's 'facets' hmap. */
250 struct list list_node
; /* In owning rule's 'facets' list. */
251 struct rule_dpif
*rule
; /* Owning rule. */
252 struct flow flow
; /* Exact-match flow. */
253 bool installed
; /* Installed in datapath? */
254 bool may_install
; /* True ordinarily; false if actions must
255 * be reassessed for every packet. */
256 bool has_learn
; /* Actions include NXAST_LEARN? */
257 bool has_normal
; /* Actions output to OFPP_NORMAL? */
258 size_t actions_len
; /* Number of bytes in actions[]. */
259 struct nlattr
*actions
; /* Datapath actions. */
260 tag_type tags
; /* Tags. */
261 struct netflow_flow nf_flow
; /* Per-flow NetFlow tracking data. */
264 static struct facet
*facet_create(struct rule_dpif
*, const struct flow
*);
265 static void facet_remove(struct ofproto_dpif
*, struct facet
*);
266 static void facet_free(struct facet
*);
268 static struct facet
*facet_find(struct ofproto_dpif
*, const struct flow
*);
269 static struct facet
*facet_lookup_valid(struct ofproto_dpif
*,
270 const struct flow
*);
271 static bool facet_revalidate(struct ofproto_dpif
*, struct facet
*);
273 static bool execute_controller_action(struct ofproto_dpif
*,
275 const struct nlattr
*odp_actions
,
277 struct ofpbuf
*packet
);
278 static void facet_execute(struct ofproto_dpif
*, struct facet
*,
279 struct ofpbuf
*packet
);
281 static int facet_put__(struct ofproto_dpif
*, struct facet
*,
282 const struct nlattr
*actions
, size_t actions_len
,
283 struct dpif_flow_stats
*);
284 static void facet_install(struct ofproto_dpif
*, struct facet
*,
286 static void facet_uninstall(struct ofproto_dpif
*, struct facet
*);
287 static void facet_flush_stats(struct ofproto_dpif
*, struct facet
*);
289 static void facet_make_actions(struct ofproto_dpif
*, struct facet
*,
290 const struct ofpbuf
*packet
);
291 static void facet_update_time(struct ofproto_dpif
*, struct facet
*,
293 static void facet_update_stats(struct ofproto_dpif
*, struct facet
*,
294 const struct dpif_flow_stats
*);
295 static void facet_reset_counters(struct facet
*);
296 static void facet_reset_dp_stats(struct facet
*, struct dpif_flow_stats
*);
297 static void facet_push_stats(struct facet
*);
298 static void facet_account(struct ofproto_dpif
*, struct facet
*);
300 static bool facet_is_controller_flow(struct facet
*);
302 static void flow_push_stats(const struct rule_dpif
*,
303 struct flow
*, uint64_t packets
, uint64_t bytes
,
306 static uint32_t rule_calculate_tag(const struct flow
*,
307 const struct flow_wildcards
*,
309 static void rule_invalidate(const struct rule_dpif
*);
315 struct ofbundle
*bundle
; /* Bundle that contains this port, if any. */
316 struct list bundle_node
; /* In struct ofbundle's "ports" list. */
317 struct cfm
*cfm
; /* Connectivity Fault Management, if any. */
318 tag_type tag
; /* Tag associated with this port. */
319 uint32_t bond_stable_id
; /* stable_id to use as bond slave, or 0. */
320 bool may_enable
; /* May be enabled in bonds. */
322 struct stp_port
*stp_port
; /* Spanning Tree Protocol, if any. */
323 enum stp_state stp_state
; /* Always STP_DISABLED if STP not in use. */
324 long long int stp_state_entered
;
327 static struct ofport_dpif
*
328 ofport_dpif_cast(const struct ofport
*ofport
)
330 assert(ofport
->ofproto
->ofproto_class
== &ofproto_dpif_class
);
331 return ofport
? CONTAINER_OF(ofport
, struct ofport_dpif
, up
) : NULL
;
334 static void port_run(struct ofport_dpif
*);
335 static void port_wait(struct ofport_dpif
*);
336 static int set_cfm(struct ofport
*, const struct cfm_settings
*);
338 struct dpif_completion
{
339 struct list list_node
;
340 struct ofoperation
*op
;
343 /* Extra information about a classifier table.
344 * Currently used just for optimized flow revalidation. */
346 /* If either of these is nonnull, then this table has a form that allows
347 * flows to be tagged to avoid revalidating most flows for the most common
348 * kinds of flow table changes. */
349 struct cls_table
*catchall_table
; /* Table that wildcards all fields. */
350 struct cls_table
*other_table
; /* Table with any other wildcard set. */
351 uint32_t basis
; /* Keeps each table's tags separate. */
354 struct ofproto_dpif
{
363 struct netflow
*netflow
;
364 struct dpif_sflow
*sflow
;
365 struct hmap bundles
; /* Contains "struct ofbundle"s. */
366 struct mac_learning
*ml
;
367 struct ofmirror
*mirrors
[MAX_MIRRORS
];
368 bool has_bonded_bundles
;
371 struct timer next_expiration
;
377 struct table_dpif tables
[N_TABLES
];
378 bool need_revalidate
;
379 struct tag_set revalidate_set
;
381 /* Support for debugging async flow mods. */
382 struct list completions
;
384 bool has_bundle_action
; /* True when the first bundle action appears. */
388 long long int stp_last_tick
;
391 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
392 * for debugging the asynchronous flow_mod implementation.) */
395 static void ofproto_dpif_unixctl_init(void);
397 static struct ofproto_dpif
*
398 ofproto_dpif_cast(const struct ofproto
*ofproto
)
400 assert(ofproto
->ofproto_class
== &ofproto_dpif_class
);
401 return CONTAINER_OF(ofproto
, struct ofproto_dpif
, up
);
404 static struct ofport_dpif
*get_ofp_port(struct ofproto_dpif
*,
406 static struct ofport_dpif
*get_odp_port(struct ofproto_dpif
*,
409 /* Packet processing. */
410 static void update_learning_table(struct ofproto_dpif
*,
411 const struct flow
*, int vlan
,
413 static bool is_admissible(struct ofproto_dpif
*, const struct flow
*,
414 bool have_packet
, tag_type
*, int *vlanp
,
415 struct ofbundle
**in_bundlep
);
418 #define FLOW_MISS_MAX_BATCH 50
419 static void handle_upcall(struct ofproto_dpif
*, struct dpif_upcall
*);
420 static void handle_miss_upcalls(struct ofproto_dpif
*,
421 struct dpif_upcall
*, size_t n
);
423 /* Flow expiration. */
424 static int expire(struct ofproto_dpif
*);
427 static int send_packet(struct ofproto_dpif
*, uint32_t odp_port
,
428 const struct ofpbuf
*packet
);
430 compose_sflow_action(const struct ofproto_dpif
*, struct ofpbuf
*odp_actions
,
431 const struct flow
*, uint32_t odp_port
);
432 /* Global variables. */
433 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
435 /* Factory functions. */
438 enumerate_types(struct sset
*types
)
440 dp_enumerate_types(types
);
444 enumerate_names(const char *type
, struct sset
*names
)
446 return dp_enumerate_names(type
, names
);
450 del(const char *type
, const char *name
)
455 error
= dpif_open(name
, type
, &dpif
);
457 error
= dpif_delete(dpif
);
463 /* Basic life-cycle. */
465 static struct ofproto
*
468 struct ofproto_dpif
*ofproto
= xmalloc(sizeof *ofproto
);
473 dealloc(struct ofproto
*ofproto_
)
475 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
480 construct(struct ofproto
*ofproto_
, int *n_tablesp
)
482 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
483 const char *name
= ofproto
->up
.name
;
487 error
= dpif_create_and_open(name
, ofproto
->up
.type
, &ofproto
->dpif
);
489 VLOG_ERR("failed to open datapath %s: %s", name
, strerror(error
));
493 ofproto
->max_ports
= dpif_get_max_ports(ofproto
->dpif
);
494 ofproto
->n_matches
= 0;
496 dpif_flow_flush(ofproto
->dpif
);
497 dpif_recv_purge(ofproto
->dpif
);
499 error
= dpif_recv_set_mask(ofproto
->dpif
,
500 ((1u << DPIF_UC_MISS
) |
501 (1u << DPIF_UC_ACTION
)));
503 VLOG_ERR("failed to listen on datapath %s: %s", name
, strerror(error
));
504 dpif_close(ofproto
->dpif
);
508 ofproto
->netflow
= NULL
;
509 ofproto
->sflow
= NULL
;
511 hmap_init(&ofproto
->bundles
);
512 ofproto
->ml
= mac_learning_create();
513 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
514 ofproto
->mirrors
[i
] = NULL
;
516 ofproto
->has_bonded_bundles
= false;
518 timer_set_duration(&ofproto
->next_expiration
, 1000);
520 hmap_init(&ofproto
->facets
);
522 for (i
= 0; i
< N_TABLES
; i
++) {
523 struct table_dpif
*table
= &ofproto
->tables
[i
];
525 table
->catchall_table
= NULL
;
526 table
->other_table
= NULL
;
527 table
->basis
= random_uint32();
529 ofproto
->need_revalidate
= false;
530 tag_set_init(&ofproto
->revalidate_set
);
532 list_init(&ofproto
->completions
);
534 ofproto_dpif_unixctl_init();
536 ofproto
->has_bundle_action
= false;
538 *n_tablesp
= N_TABLES
;
543 complete_operations(struct ofproto_dpif
*ofproto
)
545 struct dpif_completion
*c
, *next
;
547 LIST_FOR_EACH_SAFE (c
, next
, list_node
, &ofproto
->completions
) {
548 ofoperation_complete(c
->op
, 0);
549 list_remove(&c
->list_node
);
555 destruct(struct ofproto
*ofproto_
)
557 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
558 struct rule_dpif
*rule
, *next_rule
;
559 struct classifier
*table
;
562 complete_operations(ofproto
);
564 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
565 struct cls_cursor cursor
;
567 cls_cursor_init(&cursor
, table
, NULL
);
568 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
569 ofproto_rule_destroy(&rule
->up
);
573 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
574 mirror_destroy(ofproto
->mirrors
[i
]);
577 netflow_destroy(ofproto
->netflow
);
578 dpif_sflow_destroy(ofproto
->sflow
);
579 hmap_destroy(&ofproto
->bundles
);
580 mac_learning_destroy(ofproto
->ml
);
582 hmap_destroy(&ofproto
->facets
);
584 dpif_close(ofproto
->dpif
);
588 run(struct ofproto
*ofproto_
)
590 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
591 struct dpif_upcall misses
[FLOW_MISS_MAX_BATCH
];
592 struct ofport_dpif
*ofport
;
593 struct ofbundle
*bundle
;
598 complete_operations(ofproto
);
600 dpif_run(ofproto
->dpif
);
603 for (i
= 0; i
< FLOW_MISS_MAX_BATCH
; i
++) {
604 struct dpif_upcall
*upcall
= &misses
[n_misses
];
607 error
= dpif_recv(ofproto
->dpif
, upcall
);
609 if (error
== ENODEV
&& n_misses
== 0) {
615 if (upcall
->type
== DPIF_UC_MISS
) {
616 /* Handle it later. */
619 handle_upcall(ofproto
, upcall
);
623 handle_miss_upcalls(ofproto
, misses
, n_misses
);
625 if (timer_expired(&ofproto
->next_expiration
)) {
626 int delay
= expire(ofproto
);
627 timer_set_duration(&ofproto
->next_expiration
, delay
);
630 if (ofproto
->netflow
) {
631 netflow_run(ofproto
->netflow
);
633 if (ofproto
->sflow
) {
634 dpif_sflow_run(ofproto
->sflow
);
637 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
640 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
645 mac_learning_run(ofproto
->ml
, &ofproto
->revalidate_set
);
647 /* Now revalidate if there's anything to do. */
648 if (ofproto
->need_revalidate
649 || !tag_set_is_empty(&ofproto
->revalidate_set
)) {
650 struct tag_set revalidate_set
= ofproto
->revalidate_set
;
651 bool revalidate_all
= ofproto
->need_revalidate
;
652 struct facet
*facet
, *next
;
654 /* Clear the revalidation flags. */
655 tag_set_init(&ofproto
->revalidate_set
);
656 ofproto
->need_revalidate
= false;
658 HMAP_FOR_EACH_SAFE (facet
, next
, hmap_node
, &ofproto
->facets
) {
660 || tag_set_intersects(&revalidate_set
, facet
->tags
)) {
661 facet_revalidate(ofproto
, facet
);
670 wait(struct ofproto
*ofproto_
)
672 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
673 struct ofport_dpif
*ofport
;
674 struct ofbundle
*bundle
;
676 if (!clogged
&& !list_is_empty(&ofproto
->completions
)) {
677 poll_immediate_wake();
680 dpif_wait(ofproto
->dpif
);
681 dpif_recv_wait(ofproto
->dpif
);
682 if (ofproto
->sflow
) {
683 dpif_sflow_wait(ofproto
->sflow
);
685 if (!tag_set_is_empty(&ofproto
->revalidate_set
)) {
686 poll_immediate_wake();
688 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
691 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
694 mac_learning_wait(ofproto
->ml
);
696 if (ofproto
->need_revalidate
) {
697 /* Shouldn't happen, but if it does just go around again. */
698 VLOG_DBG_RL(&rl
, "need revalidate in ofproto_wait_cb()");
699 poll_immediate_wake();
701 timer_wait(&ofproto
->next_expiration
);
706 flush(struct ofproto
*ofproto_
)
708 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
709 struct facet
*facet
, *next_facet
;
711 HMAP_FOR_EACH_SAFE (facet
, next_facet
, hmap_node
, &ofproto
->facets
) {
712 /* Mark the facet as not installed so that facet_remove() doesn't
713 * bother trying to uninstall it. There is no point in uninstalling it
714 * individually since we are about to blow away all the facets with
715 * dpif_flow_flush(). */
716 facet
->installed
= false;
717 facet
->dp_packet_count
= 0;
718 facet
->dp_byte_count
= 0;
719 facet_remove(ofproto
, facet
);
721 dpif_flow_flush(ofproto
->dpif
);
725 get_features(struct ofproto
*ofproto_ OVS_UNUSED
,
726 bool *arp_match_ip
, uint32_t *actions
)
728 *arp_match_ip
= true;
729 *actions
= ((1u << OFPAT_OUTPUT
) |
730 (1u << OFPAT_SET_VLAN_VID
) |
731 (1u << OFPAT_SET_VLAN_PCP
) |
732 (1u << OFPAT_STRIP_VLAN
) |
733 (1u << OFPAT_SET_DL_SRC
) |
734 (1u << OFPAT_SET_DL_DST
) |
735 (1u << OFPAT_SET_NW_SRC
) |
736 (1u << OFPAT_SET_NW_DST
) |
737 (1u << OFPAT_SET_NW_TOS
) |
738 (1u << OFPAT_SET_TP_SRC
) |
739 (1u << OFPAT_SET_TP_DST
) |
740 (1u << OFPAT_ENQUEUE
));
744 get_tables(struct ofproto
*ofproto_
, struct ofp_table_stats
*ots
)
746 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
747 struct dpif_dp_stats s
;
749 strcpy(ots
->name
, "classifier");
751 dpif_get_dp_stats(ofproto
->dpif
, &s
);
752 put_32aligned_be64(&ots
->lookup_count
, htonll(s
.n_hit
+ s
.n_missed
));
753 put_32aligned_be64(&ots
->matched_count
,
754 htonll(s
.n_hit
+ ofproto
->n_matches
));
758 set_netflow(struct ofproto
*ofproto_
,
759 const struct netflow_options
*netflow_options
)
761 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
763 if (netflow_options
) {
764 if (!ofproto
->netflow
) {
765 ofproto
->netflow
= netflow_create();
767 return netflow_set_options(ofproto
->netflow
, netflow_options
);
769 netflow_destroy(ofproto
->netflow
);
770 ofproto
->netflow
= NULL
;
775 static struct ofport
*
778 struct ofport_dpif
*port
= xmalloc(sizeof *port
);
783 port_dealloc(struct ofport
*port_
)
785 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
790 port_construct(struct ofport
*port_
)
792 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
793 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
795 ofproto
->need_revalidate
= true;
796 port
->odp_port
= ofp_port_to_odp_port(port
->up
.ofp_port
);
799 port
->tag
= tag_create_random();
800 port
->may_enable
= true;
801 port
->stp_port
= NULL
;
802 port
->stp_state
= STP_DISABLED
;
804 if (ofproto
->sflow
) {
805 dpif_sflow_add_port(ofproto
->sflow
, port
->odp_port
,
806 netdev_get_name(port
->up
.netdev
));
813 port_destruct(struct ofport
*port_
)
815 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
816 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
818 ofproto
->need_revalidate
= true;
819 bundle_remove(port_
);
820 set_cfm(port_
, NULL
);
821 if (ofproto
->sflow
) {
822 dpif_sflow_del_port(ofproto
->sflow
, port
->odp_port
);
827 port_modified(struct ofport
*port_
)
829 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
831 if (port
->bundle
&& port
->bundle
->bond
) {
832 bond_slave_set_netdev(port
->bundle
->bond
, port
, port
->up
.netdev
);
837 port_reconfigured(struct ofport
*port_
, ovs_be32 old_config
)
839 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
840 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(port
->up
.ofproto
);
841 ovs_be32 changed
= old_config
^ port
->up
.opp
.config
;
843 if (changed
& htonl(OFPPC_NO_RECV
| OFPPC_NO_RECV_STP
|
844 OFPPC_NO_FWD
| OFPPC_NO_FLOOD
)) {
845 ofproto
->need_revalidate
= true;
847 if (changed
& htonl(OFPPC_NO_FLOOD
) && port
->bundle
) {
848 bundle_update(port
->bundle
);
854 set_sflow(struct ofproto
*ofproto_
,
855 const struct ofproto_sflow_options
*sflow_options
)
857 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
858 struct dpif_sflow
*ds
= ofproto
->sflow
;
862 struct ofport_dpif
*ofport
;
864 ds
= ofproto
->sflow
= dpif_sflow_create(ofproto
->dpif
);
865 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ofproto
->up
.ports
) {
866 dpif_sflow_add_port(ds
, ofport
->odp_port
,
867 netdev_get_name(ofport
->up
.netdev
));
869 ofproto
->need_revalidate
= true;
871 dpif_sflow_set_options(ds
, sflow_options
);
874 dpif_sflow_destroy(ds
);
875 ofproto
->need_revalidate
= true;
876 ofproto
->sflow
= NULL
;
883 set_cfm(struct ofport
*ofport_
, const struct cfm_settings
*s
)
885 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
892 struct ofproto_dpif
*ofproto
;
894 ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
895 ofproto
->need_revalidate
= true;
896 ofport
->cfm
= cfm_create(netdev_get_name(ofport
->up
.netdev
));
899 if (cfm_configure(ofport
->cfm
, s
)) {
905 cfm_destroy(ofport
->cfm
);
911 get_cfm_fault(const struct ofport
*ofport_
)
913 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
915 return ofport
->cfm
? cfm_get_fault(ofport
->cfm
) : -1;
919 get_cfm_remote_mpids(const struct ofport
*ofport_
, const uint64_t **rmps
,
922 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
925 cfm_get_remote_mpids(ofport
->cfm
, rmps
, n_rmps
);
935 send_bpdu_cb(struct ofpbuf
*pkt
, int port_num
, void *ofproto_
)
937 struct ofproto_dpif
*ofproto
= ofproto_
;
938 struct stp_port
*sp
= stp_get_port(ofproto
->stp
, port_num
);
939 struct ofport_dpif
*ofport
;
941 ofport
= stp_port_get_aux(sp
);
943 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on unknown port %d",
944 ofproto
->up
.name
, port_num
);
946 struct eth_header
*eth
= pkt
->l2
;
948 netdev_get_etheraddr(ofport
->up
.netdev
, eth
->eth_src
);
949 if (eth_addr_is_zero(eth
->eth_src
)) {
950 VLOG_WARN_RL(&rl
, "%s: cannot send BPDU on port %d "
951 "with unknown MAC", ofproto
->up
.name
, port_num
);
953 int error
= netdev_send(ofport
->up
.netdev
, pkt
);
955 VLOG_WARN_RL(&rl
, "%s: sending BPDU on port %s failed (%s)",
957 netdev_get_name(ofport
->up
.netdev
),
965 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
967 set_stp(struct ofproto
*ofproto_
, const struct ofproto_stp_settings
*s
)
969 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
971 /* Only revalidate flows if the configuration changed. */
972 if (!s
!= !ofproto
->stp
) {
973 ofproto
->need_revalidate
= true;
978 ofproto
->stp
= stp_create(ofproto_
->name
, s
->system_id
,
979 send_bpdu_cb
, ofproto
);
980 ofproto
->stp_last_tick
= time_msec();
983 stp_set_bridge_id(ofproto
->stp
, s
->system_id
);
984 stp_set_bridge_priority(ofproto
->stp
, s
->priority
);
985 stp_set_hello_time(ofproto
->stp
, s
->hello_time
);
986 stp_set_max_age(ofproto
->stp
, s
->max_age
);
987 stp_set_forward_delay(ofproto
->stp
, s
->fwd_delay
);
989 stp_destroy(ofproto
->stp
);
997 get_stp_status(struct ofproto
*ofproto_
, struct ofproto_stp_status
*s
)
999 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1003 s
->bridge_id
= stp_get_bridge_id(ofproto
->stp
);
1004 s
->designated_root
= stp_get_designated_root(ofproto
->stp
);
1005 s
->root_path_cost
= stp_get_root_path_cost(ofproto
->stp
);
1014 update_stp_port_state(struct ofport_dpif
*ofport
)
1016 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1017 enum stp_state state
;
1019 /* Figure out new state. */
1020 state
= ofport
->stp_port
? stp_port_get_state(ofport
->stp_port
)
1024 if (ofport
->stp_state
!= state
) {
1028 VLOG_DBG_RL(&rl
, "port %s: STP state changed from %s to %s",
1029 netdev_get_name(ofport
->up
.netdev
),
1030 stp_state_name(ofport
->stp_state
),
1031 stp_state_name(state
));
1032 if (stp_learn_in_state(ofport
->stp_state
)
1033 != stp_learn_in_state(state
)) {
1034 /* xxx Learning action flows should also be flushed. */
1035 mac_learning_flush(ofproto
->ml
);
1037 fwd_change
= stp_forward_in_state(ofport
->stp_state
)
1038 != stp_forward_in_state(state
);
1040 ofproto
->need_revalidate
= true;
1041 ofport
->stp_state
= state
;
1042 ofport
->stp_state_entered
= time_msec();
1045 bundle_update(ofport
->bundle
);
1048 /* Update the STP state bits in the OpenFlow port description. */
1049 of_state
= (ofport
->up
.opp
.state
& htonl(~OFPPS_STP_MASK
))
1050 | htonl(state
== STP_LISTENING
? OFPPS_STP_LISTEN
1051 : state
== STP_LEARNING
? OFPPS_STP_LEARN
1052 : state
== STP_FORWARDING
? OFPPS_STP_FORWARD
1053 : state
== STP_BLOCKING
? OFPPS_STP_BLOCK
1055 ofproto_port_set_state(&ofport
->up
, of_state
);
1059 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1060 * caller is responsible for assigning STP port numbers and ensuring
1061 * there are no duplicates. */
1063 set_stp_port(struct ofport
*ofport_
,
1064 const struct ofproto_port_stp_settings
*s
)
1066 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1067 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1068 struct stp_port
*sp
= ofport
->stp_port
;
1070 if (!s
|| !s
->enable
) {
1072 ofport
->stp_port
= NULL
;
1073 stp_port_disable(sp
);
1076 } else if (sp
&& stp_port_no(sp
) != s
->port_num
1077 && ofport
== stp_port_get_aux(sp
)) {
1078 /* The port-id changed, so disable the old one if it's not
1079 * already in use by another port. */
1080 stp_port_disable(sp
);
1083 sp
= ofport
->stp_port
= stp_get_port(ofproto
->stp
, s
->port_num
);
1084 stp_port_enable(sp
);
1086 stp_port_set_aux(sp
, ofport
);
1087 stp_port_set_priority(sp
, s
->priority
);
1088 stp_port_set_path_cost(sp
, s
->path_cost
);
1090 update_stp_port_state(ofport
);
1096 get_stp_port_status(struct ofport
*ofport_
,
1097 struct ofproto_port_stp_status
*s
)
1099 struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
1100 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1101 struct stp_port
*sp
= ofport
->stp_port
;
1103 if (!ofproto
->stp
|| !sp
) {
1109 s
->port_id
= stp_port_get_id(sp
);
1110 s
->state
= stp_port_get_state(sp
);
1111 s
->sec_in_state
= (time_msec() - ofport
->stp_state_entered
) / 1000;
1112 s
->role
= stp_port_get_role(sp
);
1118 stp_run(struct ofproto_dpif
*ofproto
)
1121 long long int now
= time_msec();
1122 long long int elapsed
= now
- ofproto
->stp_last_tick
;
1123 struct stp_port
*sp
;
1126 stp_tick(ofproto
->stp
, MIN(INT_MAX
, elapsed
));
1127 ofproto
->stp_last_tick
= now
;
1129 while (stp_get_changed_port(ofproto
->stp
, &sp
)) {
1130 struct ofport_dpif
*ofport
= stp_port_get_aux(sp
);
1133 update_stp_port_state(ofport
);
1140 stp_wait(struct ofproto_dpif
*ofproto
)
1143 poll_timer_wait(1000);
1147 /* Returns true if STP should process 'flow'. */
1149 stp_should_process_flow(const struct flow
*flow
)
1151 return eth_addr_equals(flow
->dl_dst
, eth_addr_stp
);
1155 stp_process_packet(const struct ofport_dpif
*ofport
,
1156 const struct ofpbuf
*packet
)
1158 struct ofpbuf payload
= *packet
;
1159 struct eth_header
*eth
= payload
.data
;
1160 struct stp_port
*sp
= ofport
->stp_port
;
1162 /* Sink packets on ports that have STP disabled when the bridge has
1164 if (!sp
|| stp_port_get_state(sp
) == STP_DISABLED
) {
1168 /* Trim off padding on payload. */
1169 if (payload
.size
> ntohs(eth
->eth_type
) + ETH_HEADER_LEN
) {
1170 payload
.size
= ntohs(eth
->eth_type
) + ETH_HEADER_LEN
;
1173 if (ofpbuf_try_pull(&payload
, ETH_HEADER_LEN
+ LLC_HEADER_LEN
)) {
1174 stp_received_bpdu(sp
, payload
.data
, payload
.size
);
1180 /* Expires all MAC learning entries associated with 'port' and forces ofproto
1181 * to revalidate every flow. */
1183 bundle_flush_macs(struct ofbundle
*bundle
)
1185 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
1186 struct mac_learning
*ml
= ofproto
->ml
;
1187 struct mac_entry
*mac
, *next_mac
;
1189 ofproto
->need_revalidate
= true;
1190 LIST_FOR_EACH_SAFE (mac
, next_mac
, lru_node
, &ml
->lrus
) {
1191 if (mac
->port
.p
== bundle
) {
1192 mac_learning_expire(ml
, mac
);
1197 static struct ofbundle
*
1198 bundle_lookup(const struct ofproto_dpif
*ofproto
, void *aux
)
1200 struct ofbundle
*bundle
;
1202 HMAP_FOR_EACH_IN_BUCKET (bundle
, hmap_node
, hash_pointer(aux
, 0),
1203 &ofproto
->bundles
) {
1204 if (bundle
->aux
== aux
) {
1211 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
1212 * ones that are found to 'bundles'. */
1214 bundle_lookup_multiple(struct ofproto_dpif
*ofproto
,
1215 void **auxes
, size_t n_auxes
,
1216 struct hmapx
*bundles
)
1220 hmapx_init(bundles
);
1221 for (i
= 0; i
< n_auxes
; i
++) {
1222 struct ofbundle
*bundle
= bundle_lookup(ofproto
, auxes
[i
]);
1224 hmapx_add(bundles
, bundle
);
1230 bundle_update(struct ofbundle
*bundle
)
1232 struct ofport_dpif
*port
;
1234 bundle
->floodable
= true;
1235 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1236 if (port
->up
.opp
.config
& htonl(OFPPC_NO_FLOOD
)
1237 || !stp_forward_in_state(port
->stp_state
)) {
1238 bundle
->floodable
= false;
1245 bundle_del_port(struct ofport_dpif
*port
)
1247 struct ofbundle
*bundle
= port
->bundle
;
1249 bundle
->ofproto
->need_revalidate
= true;
1251 list_remove(&port
->bundle_node
);
1252 port
->bundle
= NULL
;
1255 lacp_slave_unregister(bundle
->lacp
, port
);
1258 bond_slave_unregister(bundle
->bond
, port
);
1261 bundle_update(bundle
);
1265 bundle_add_port(struct ofbundle
*bundle
, uint32_t ofp_port
,
1266 struct lacp_slave_settings
*lacp
,
1267 uint32_t bond_stable_id
)
1269 struct ofport_dpif
*port
;
1271 port
= get_ofp_port(bundle
->ofproto
, ofp_port
);
1276 if (port
->bundle
!= bundle
) {
1277 bundle
->ofproto
->need_revalidate
= true;
1279 bundle_del_port(port
);
1282 port
->bundle
= bundle
;
1283 list_push_back(&bundle
->ports
, &port
->bundle_node
);
1284 if (port
->up
.opp
.config
& htonl(OFPPC_NO_FLOOD
)
1285 || !stp_forward_in_state(port
->stp_state
)) {
1286 bundle
->floodable
= false;
1290 port
->bundle
->ofproto
->need_revalidate
= true;
1291 lacp_slave_register(bundle
->lacp
, port
, lacp
);
1294 port
->bond_stable_id
= bond_stable_id
;
1300 bundle_destroy(struct ofbundle
*bundle
)
1302 struct ofproto_dpif
*ofproto
;
1303 struct ofport_dpif
*port
, *next_port
;
1310 ofproto
= bundle
->ofproto
;
1311 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1312 struct ofmirror
*m
= ofproto
->mirrors
[i
];
1314 if (m
->out
== bundle
) {
1316 } else if (hmapx_find_and_delete(&m
->srcs
, bundle
)
1317 || hmapx_find_and_delete(&m
->dsts
, bundle
)) {
1318 ofproto
->need_revalidate
= true;
1323 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1324 bundle_del_port(port
);
1327 bundle_flush_macs(bundle
);
1328 hmap_remove(&ofproto
->bundles
, &bundle
->hmap_node
);
1330 free(bundle
->trunks
);
1331 lacp_destroy(bundle
->lacp
);
1332 bond_destroy(bundle
->bond
);
1337 bundle_set(struct ofproto
*ofproto_
, void *aux
,
1338 const struct ofproto_bundle_settings
*s
)
1340 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1341 bool need_flush
= false;
1342 struct ofport_dpif
*port
;
1343 struct ofbundle
*bundle
;
1344 unsigned long *trunks
;
1350 bundle_destroy(bundle_lookup(ofproto
, aux
));
1354 assert(s
->n_slaves
== 1 || s
->bond
!= NULL
);
1355 assert((s
->lacp
!= NULL
) == (s
->lacp_slaves
!= NULL
));
1357 bundle
= bundle_lookup(ofproto
, aux
);
1359 bundle
= xmalloc(sizeof *bundle
);
1361 bundle
->ofproto
= ofproto
;
1362 hmap_insert(&ofproto
->bundles
, &bundle
->hmap_node
,
1363 hash_pointer(aux
, 0));
1365 bundle
->name
= NULL
;
1367 list_init(&bundle
->ports
);
1368 bundle
->vlan_mode
= PORT_VLAN_TRUNK
;
1370 bundle
->trunks
= NULL
;
1371 bundle
->lacp
= NULL
;
1372 bundle
->bond
= NULL
;
1374 bundle
->floodable
= true;
1376 bundle
->src_mirrors
= 0;
1377 bundle
->dst_mirrors
= 0;
1378 bundle
->mirror_out
= 0;
1381 if (!bundle
->name
|| strcmp(s
->name
, bundle
->name
)) {
1383 bundle
->name
= xstrdup(s
->name
);
1388 if (!bundle
->lacp
) {
1389 ofproto
->need_revalidate
= true;
1390 bundle
->lacp
= lacp_create();
1392 lacp_configure(bundle
->lacp
, s
->lacp
);
1394 lacp_destroy(bundle
->lacp
);
1395 bundle
->lacp
= NULL
;
1398 /* Update set of ports. */
1400 for (i
= 0; i
< s
->n_slaves
; i
++) {
1401 if (!bundle_add_port(bundle
, s
->slaves
[i
],
1402 s
->lacp
? &s
->lacp_slaves
[i
] : NULL
,
1403 s
->bond_stable_ids
? s
->bond_stable_ids
[i
] : 0)) {
1407 if (!ok
|| list_size(&bundle
->ports
) != s
->n_slaves
) {
1408 struct ofport_dpif
*next_port
;
1410 LIST_FOR_EACH_SAFE (port
, next_port
, bundle_node
, &bundle
->ports
) {
1411 for (i
= 0; i
< s
->n_slaves
; i
++) {
1412 if (s
->slaves
[i
] == port
->up
.ofp_port
) {
1417 bundle_del_port(port
);
1421 assert(list_size(&bundle
->ports
) <= s
->n_slaves
);
1423 if (list_is_empty(&bundle
->ports
)) {
1424 bundle_destroy(bundle
);
1428 /* Set VLAN tagging mode */
1429 if (s
->vlan_mode
!= bundle
->vlan_mode
) {
1430 bundle
->vlan_mode
= s
->vlan_mode
;
1435 vlan
= (s
->vlan_mode
== PORT_VLAN_TRUNK
? -1
1436 : s
->vlan
>= 0 && s
->vlan
<= 4095 ? s
->vlan
1438 if (vlan
!= bundle
->vlan
) {
1439 bundle
->vlan
= vlan
;
1443 /* Get trunked VLANs. */
1444 switch (s
->vlan_mode
) {
1445 case PORT_VLAN_ACCESS
:
1449 case PORT_VLAN_TRUNK
:
1450 trunks
= (unsigned long *) s
->trunks
;
1453 case PORT_VLAN_NATIVE_UNTAGGED
:
1454 case PORT_VLAN_NATIVE_TAGGED
:
1455 if (vlan
!= 0 && (!s
->trunks
1456 || !bitmap_is_set(s
->trunks
, vlan
)
1457 || bitmap_is_set(s
->trunks
, 0))) {
1458 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
1460 trunks
= bitmap_clone(s
->trunks
, 4096);
1462 trunks
= bitmap_allocate1(4096);
1464 bitmap_set1(trunks
, vlan
);
1465 bitmap_set0(trunks
, 0);
1467 trunks
= (unsigned long *) s
->trunks
;
1474 if (!vlan_bitmap_equal(trunks
, bundle
->trunks
)) {
1475 free(bundle
->trunks
);
1476 if (trunks
== s
->trunks
) {
1477 bundle
->trunks
= vlan_bitmap_clone(trunks
);
1479 bundle
->trunks
= trunks
;
1484 if (trunks
!= s
->trunks
) {
1489 if (!list_is_short(&bundle
->ports
)) {
1490 bundle
->ofproto
->has_bonded_bundles
= true;
1492 if (bond_reconfigure(bundle
->bond
, s
->bond
)) {
1493 ofproto
->need_revalidate
= true;
1496 bundle
->bond
= bond_create(s
->bond
);
1497 ofproto
->need_revalidate
= true;
1500 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1501 bond_slave_register(bundle
->bond
, port
, port
->bond_stable_id
,
1505 bond_destroy(bundle
->bond
);
1506 bundle
->bond
= NULL
;
1509 /* If we changed something that would affect MAC learning, un-learn
1510 * everything on this port and force flow revalidation. */
1512 bundle_flush_macs(bundle
);
1519 bundle_remove(struct ofport
*port_
)
1521 struct ofport_dpif
*port
= ofport_dpif_cast(port_
);
1522 struct ofbundle
*bundle
= port
->bundle
;
1525 bundle_del_port(port
);
1526 if (list_is_empty(&bundle
->ports
)) {
1527 bundle_destroy(bundle
);
1528 } else if (list_is_short(&bundle
->ports
)) {
1529 bond_destroy(bundle
->bond
);
1530 bundle
->bond
= NULL
;
1536 send_pdu_cb(void *port_
, const void *pdu
, size_t pdu_size
)
1538 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 10);
1539 struct ofport_dpif
*port
= port_
;
1540 uint8_t ea
[ETH_ADDR_LEN
];
1543 error
= netdev_get_etheraddr(port
->up
.netdev
, ea
);
1545 struct ofpbuf packet
;
1548 ofpbuf_init(&packet
, 0);
1549 packet_pdu
= eth_compose(&packet
, eth_addr_lacp
, ea
, ETH_TYPE_LACP
,
1551 memcpy(packet_pdu
, pdu
, pdu_size
);
1553 error
= netdev_send(port
->up
.netdev
, &packet
);
1555 VLOG_WARN_RL(&rl
, "port %s: sending LACP PDU on iface %s failed "
1556 "(%s)", port
->bundle
->name
,
1557 netdev_get_name(port
->up
.netdev
), strerror(error
));
1559 ofpbuf_uninit(&packet
);
1561 VLOG_ERR_RL(&rl
, "port %s: cannot obtain Ethernet address of iface "
1562 "%s (%s)", port
->bundle
->name
,
1563 netdev_get_name(port
->up
.netdev
), strerror(error
));
1568 bundle_send_learning_packets(struct ofbundle
*bundle
)
1570 struct ofproto_dpif
*ofproto
= bundle
->ofproto
;
1571 int error
, n_packets
, n_errors
;
1572 struct mac_entry
*e
;
1574 error
= n_packets
= n_errors
= 0;
1575 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
1576 if (e
->port
.p
!= bundle
) {
1577 int ret
= bond_send_learning_packet(bundle
->bond
, e
->mac
, e
->vlan
);
1587 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1588 VLOG_WARN_RL(&rl
, "bond %s: %d errors sending %d gratuitous learning "
1589 "packets, last error was: %s",
1590 bundle
->name
, n_errors
, n_packets
, strerror(error
));
1592 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1593 bundle
->name
, n_packets
);
1598 bundle_run(struct ofbundle
*bundle
)
1601 lacp_run(bundle
->lacp
, send_pdu_cb
);
1604 struct ofport_dpif
*port
;
1606 LIST_FOR_EACH (port
, bundle_node
, &bundle
->ports
) {
1607 bond_slave_set_may_enable(bundle
->bond
, port
, port
->may_enable
);
1610 bond_run(bundle
->bond
, &bundle
->ofproto
->revalidate_set
,
1611 lacp_negotiated(bundle
->lacp
));
1612 if (bond_should_send_learning_packets(bundle
->bond
)) {
1613 bundle_send_learning_packets(bundle
);
1619 bundle_wait(struct ofbundle
*bundle
)
1622 lacp_wait(bundle
->lacp
);
1625 bond_wait(bundle
->bond
);
1632 mirror_scan(struct ofproto_dpif
*ofproto
)
1636 for (idx
= 0; idx
< MAX_MIRRORS
; idx
++) {
1637 if (!ofproto
->mirrors
[idx
]) {
1644 static struct ofmirror
*
1645 mirror_lookup(struct ofproto_dpif
*ofproto
, void *aux
)
1649 for (i
= 0; i
< MAX_MIRRORS
; i
++) {
1650 struct ofmirror
*mirror
= ofproto
->mirrors
[i
];
1651 if (mirror
&& mirror
->aux
== aux
) {
1660 mirror_set(struct ofproto
*ofproto_
, void *aux
,
1661 const struct ofproto_mirror_settings
*s
)
1663 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1664 mirror_mask_t mirror_bit
;
1665 struct ofbundle
*bundle
;
1666 struct ofmirror
*mirror
;
1667 struct ofbundle
*out
;
1668 struct hmapx srcs
; /* Contains "struct ofbundle *"s. */
1669 struct hmapx dsts
; /* Contains "struct ofbundle *"s. */
1672 mirror
= mirror_lookup(ofproto
, aux
);
1674 mirror_destroy(mirror
);
1680 idx
= mirror_scan(ofproto
);
1682 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
1684 ofproto
->up
.name
, MAX_MIRRORS
, s
->name
);
1688 mirror
= ofproto
->mirrors
[idx
] = xzalloc(sizeof *mirror
);
1689 mirror
->ofproto
= ofproto
;
1692 mirror
->out_vlan
= -1;
1693 mirror
->name
= NULL
;
1696 if (!mirror
->name
|| strcmp(s
->name
, mirror
->name
)) {
1698 mirror
->name
= xstrdup(s
->name
);
1701 /* Get the new configuration. */
1702 if (s
->out_bundle
) {
1703 out
= bundle_lookup(ofproto
, s
->out_bundle
);
1705 mirror_destroy(mirror
);
1711 out_vlan
= s
->out_vlan
;
1713 bundle_lookup_multiple(ofproto
, s
->srcs
, s
->n_srcs
, &srcs
);
1714 bundle_lookup_multiple(ofproto
, s
->dsts
, s
->n_dsts
, &dsts
);
1716 /* If the configuration has not changed, do nothing. */
1717 if (hmapx_equals(&srcs
, &mirror
->srcs
)
1718 && hmapx_equals(&dsts
, &mirror
->dsts
)
1719 && vlan_bitmap_equal(mirror
->vlans
, s
->src_vlans
)
1720 && mirror
->out
== out
1721 && mirror
->out_vlan
== out_vlan
)
1723 hmapx_destroy(&srcs
);
1724 hmapx_destroy(&dsts
);
1728 hmapx_swap(&srcs
, &mirror
->srcs
);
1729 hmapx_destroy(&srcs
);
1731 hmapx_swap(&dsts
, &mirror
->dsts
);
1732 hmapx_destroy(&dsts
);
1734 free(mirror
->vlans
);
1735 mirror
->vlans
= vlan_bitmap_clone(s
->src_vlans
);
1738 mirror
->out_vlan
= out_vlan
;
1740 /* Update bundles. */
1741 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
1742 HMAP_FOR_EACH (bundle
, hmap_node
, &mirror
->ofproto
->bundles
) {
1743 if (hmapx_contains(&mirror
->srcs
, bundle
)) {
1744 bundle
->src_mirrors
|= mirror_bit
;
1746 bundle
->src_mirrors
&= ~mirror_bit
;
1749 if (hmapx_contains(&mirror
->dsts
, bundle
)) {
1750 bundle
->dst_mirrors
|= mirror_bit
;
1752 bundle
->dst_mirrors
&= ~mirror_bit
;
1755 if (mirror
->out
== bundle
) {
1756 bundle
->mirror_out
|= mirror_bit
;
1758 bundle
->mirror_out
&= ~mirror_bit
;
1762 ofproto
->need_revalidate
= true;
1763 mac_learning_flush(ofproto
->ml
);
1769 mirror_destroy(struct ofmirror
*mirror
)
1771 struct ofproto_dpif
*ofproto
;
1772 mirror_mask_t mirror_bit
;
1773 struct ofbundle
*bundle
;
1779 ofproto
= mirror
->ofproto
;
1780 ofproto
->need_revalidate
= true;
1781 mac_learning_flush(ofproto
->ml
);
1783 mirror_bit
= MIRROR_MASK_C(1) << mirror
->idx
;
1784 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
1785 bundle
->src_mirrors
&= ~mirror_bit
;
1786 bundle
->dst_mirrors
&= ~mirror_bit
;
1787 bundle
->mirror_out
&= ~mirror_bit
;
1790 hmapx_destroy(&mirror
->srcs
);
1791 hmapx_destroy(&mirror
->dsts
);
1792 free(mirror
->vlans
);
1794 ofproto
->mirrors
[mirror
->idx
] = NULL
;
1800 set_flood_vlans(struct ofproto
*ofproto_
, unsigned long *flood_vlans
)
1802 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1803 if (mac_learning_set_flood_vlans(ofproto
->ml
, flood_vlans
)) {
1804 ofproto
->need_revalidate
= true;
1805 mac_learning_flush(ofproto
->ml
);
1811 is_mirror_output_bundle(const struct ofproto
*ofproto_
, void *aux
)
1813 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1814 struct ofbundle
*bundle
= bundle_lookup(ofproto
, aux
);
1815 return bundle
&& bundle
->mirror_out
!= 0;
1819 forward_bpdu_changed(struct ofproto
*ofproto_
)
1821 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1822 /* Revalidate cached flows whenever forward_bpdu option changes. */
1823 ofproto
->need_revalidate
= true;
1828 static struct ofport_dpif
*
1829 get_ofp_port(struct ofproto_dpif
*ofproto
, uint16_t ofp_port
)
1831 struct ofport
*ofport
= ofproto_get_port(&ofproto
->up
, ofp_port
);
1832 return ofport
? ofport_dpif_cast(ofport
) : NULL
;
1835 static struct ofport_dpif
*
1836 get_odp_port(struct ofproto_dpif
*ofproto
, uint32_t odp_port
)
1838 return get_ofp_port(ofproto
, odp_port_to_ofp_port(odp_port
));
1842 ofproto_port_from_dpif_port(struct ofproto_port
*ofproto_port
,
1843 struct dpif_port
*dpif_port
)
1845 ofproto_port
->name
= dpif_port
->name
;
1846 ofproto_port
->type
= dpif_port
->type
;
1847 ofproto_port
->ofp_port
= odp_port_to_ofp_port(dpif_port
->port_no
);
1851 port_run(struct ofport_dpif
*ofport
)
1853 bool enable
= netdev_get_carrier(ofport
->up
.netdev
);
1856 cfm_run(ofport
->cfm
);
1858 if (cfm_should_send_ccm(ofport
->cfm
)) {
1859 struct ofpbuf packet
;
1861 ofpbuf_init(&packet
, 0);
1862 cfm_compose_ccm(ofport
->cfm
, &packet
, ofport
->up
.opp
.hw_addr
);
1863 send_packet(ofproto_dpif_cast(ofport
->up
.ofproto
),
1864 ofport
->odp_port
, &packet
);
1865 ofpbuf_uninit(&packet
);
1868 enable
= enable
&& !cfm_get_fault(ofport
->cfm
)
1869 && cfm_get_opup(ofport
->cfm
);
1872 if (ofport
->bundle
) {
1873 enable
= enable
&& lacp_slave_may_enable(ofport
->bundle
->lacp
, ofport
);
1876 if (ofport
->may_enable
!= enable
) {
1877 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofport
->up
.ofproto
);
1879 if (ofproto
->has_bundle_action
) {
1880 ofproto
->need_revalidate
= true;
1884 ofport
->may_enable
= enable
;
1888 port_wait(struct ofport_dpif
*ofport
)
1891 cfm_wait(ofport
->cfm
);
1896 port_query_by_name(const struct ofproto
*ofproto_
, const char *devname
,
1897 struct ofproto_port
*ofproto_port
)
1899 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1900 struct dpif_port dpif_port
;
1903 error
= dpif_port_query_by_name(ofproto
->dpif
, devname
, &dpif_port
);
1905 ofproto_port_from_dpif_port(ofproto_port
, &dpif_port
);
1911 port_add(struct ofproto
*ofproto_
, struct netdev
*netdev
, uint16_t *ofp_portp
)
1913 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1917 error
= dpif_port_add(ofproto
->dpif
, netdev
, &odp_port
);
1919 *ofp_portp
= odp_port_to_ofp_port(odp_port
);
1925 port_del(struct ofproto
*ofproto_
, uint16_t ofp_port
)
1927 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1930 error
= dpif_port_del(ofproto
->dpif
, ofp_port_to_odp_port(ofp_port
));
1932 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, ofp_port
);
1934 /* The caller is going to close ofport->up.netdev. If this is a
1935 * bonded port, then the bond is using that netdev, so remove it
1936 * from the bond. The client will need to reconfigure everything
1937 * after deleting ports, so then the slave will get re-added. */
1938 bundle_remove(&ofport
->up
);
1944 struct port_dump_state
{
1945 struct dpif_port_dump dump
;
1950 port_dump_start(const struct ofproto
*ofproto_
, void **statep
)
1952 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1953 struct port_dump_state
*state
;
1955 *statep
= state
= xmalloc(sizeof *state
);
1956 dpif_port_dump_start(&state
->dump
, ofproto
->dpif
);
1957 state
->done
= false;
1962 port_dump_next(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
,
1963 struct ofproto_port
*port
)
1965 struct port_dump_state
*state
= state_
;
1966 struct dpif_port dpif_port
;
1968 if (dpif_port_dump_next(&state
->dump
, &dpif_port
)) {
1969 ofproto_port_from_dpif_port(port
, &dpif_port
);
1972 int error
= dpif_port_dump_done(&state
->dump
);
1974 return error
? error
: EOF
;
1979 port_dump_done(const struct ofproto
*ofproto_ OVS_UNUSED
, void *state_
)
1981 struct port_dump_state
*state
= state_
;
1984 dpif_port_dump_done(&state
->dump
);
1991 port_poll(const struct ofproto
*ofproto_
, char **devnamep
)
1993 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
1994 return dpif_port_poll(ofproto
->dpif
, devnamep
);
1998 port_poll_wait(const struct ofproto
*ofproto_
)
2000 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
2001 dpif_port_poll_wait(ofproto
->dpif
);
2005 port_is_lacp_current(const struct ofport
*ofport_
)
2007 const struct ofport_dpif
*ofport
= ofport_dpif_cast(ofport_
);
2008 return (ofport
->bundle
&& ofport
->bundle
->lacp
2009 ? lacp_slave_is_current(ofport
->bundle
->lacp
, ofport
)
2013 /* Upcall handling. */
2015 /* Flow miss batching.
2017 * Some dpifs implement operations faster when you hand them off in a batch.
2018 * To allow batching, "struct flow_miss" queues the dpif-related work needed
2019 * for a given flow. Each "struct flow_miss" corresponds to sending one or
2020 * more packets, plus possibly installing the flow in the dpif.
2022 * So far we only batch the operations that affect flow setup time the most.
2023 * It's possible to batch more than that, but the benefit might be minimal. */
2025 struct hmap_node hmap_node
;
2027 const struct nlattr
*key
;
2029 struct list packets
;
2032 struct flow_miss_op
{
2033 union dpif_op dpif_op
;
2034 struct facet
*facet
;
2037 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
2038 * OpenFlow controller as necessary according to their individual
2041 * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
2042 * ownership is transferred to this function. */
2044 send_packet_in_miss(struct ofproto_dpif
*ofproto
, struct ofpbuf
*packet
,
2045 const struct flow
*flow
, bool clone
)
2047 struct ofputil_packet_in pin
;
2049 pin
.packet
= packet
;
2050 pin
.in_port
= flow
->in_port
;
2051 pin
.reason
= OFPR_NO_MATCH
;
2052 pin
.buffer_id
= 0; /* not yet known */
2053 pin
.send_len
= 0; /* not used for flow table misses */
2054 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
, flow
,
2055 clone
? NULL
: packet
);
2058 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_ACTION to each
2059 * OpenFlow controller as necessary according to their individual
2062 * 'send_len' should be the number of bytes of 'packet' to send to the
2063 * controller, as specified in the action that caused the packet to be sent.
2065 * If 'clone' is true, the caller retains ownership of 'upcall->packet'.
2066 * Otherwise, ownership is transferred to this function. */
2068 send_packet_in_action(struct ofproto_dpif
*ofproto
, struct ofpbuf
*packet
,
2069 uint64_t userdata
, const struct flow
*flow
, bool clone
)
2071 struct ofputil_packet_in pin
;
2072 struct user_action_cookie cookie
;
2074 memcpy(&cookie
, &userdata
, sizeof(cookie
));
2076 pin
.packet
= packet
;
2077 pin
.in_port
= flow
->in_port
;
2078 pin
.reason
= OFPR_ACTION
;
2079 pin
.buffer_id
= 0; /* not yet known */
2080 pin
.send_len
= cookie
.data
;
2081 connmgr_send_packet_in(ofproto
->up
.connmgr
, &pin
, flow
,
2082 clone
? NULL
: packet
);
2086 process_special(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
2087 const struct ofpbuf
*packet
)
2089 struct ofport_dpif
*ofport
= get_ofp_port(ofproto
, flow
->in_port
);
2095 if (ofport
->cfm
&& cfm_should_process_flow(ofport
->cfm
, flow
)) {
2097 cfm_process_heartbeat(ofport
->cfm
, packet
);
2100 } else if (ofport
->bundle
&& ofport
->bundle
->lacp
2101 && flow
->dl_type
== htons(ETH_TYPE_LACP
)) {
2103 lacp_process_packet(ofport
->bundle
->lacp
, ofport
, packet
);
2106 } else if (ofproto
->stp
&& stp_should_process_flow(flow
)) {
2108 stp_process_packet(ofport
, packet
);
2115 static struct flow_miss
*
2116 flow_miss_create(struct hmap
*todo
, const struct flow
*flow
,
2117 const struct nlattr
*key
, size_t key_len
)
2119 uint32_t hash
= flow_hash(flow
, 0);
2120 struct flow_miss
*miss
;
2122 HMAP_FOR_EACH_WITH_HASH (miss
, hmap_node
, hash
, todo
) {
2123 if (flow_equal(&miss
->flow
, flow
)) {
2128 miss
= xmalloc(sizeof *miss
);
2129 hmap_insert(todo
, &miss
->hmap_node
, hash
);
2132 miss
->key_len
= key_len
;
2133 list_init(&miss
->packets
);
2138 handle_flow_miss(struct ofproto_dpif
*ofproto
, struct flow_miss
*miss
,
2139 struct flow_miss_op
*ops
, size_t *n_ops
)
2141 const struct flow
*flow
= &miss
->flow
;
2142 struct ofpbuf
*packet
, *next_packet
;
2143 struct facet
*facet
;
2145 facet
= facet_lookup_valid(ofproto
, flow
);
2147 struct rule_dpif
*rule
;
2149 rule
= rule_dpif_lookup(ofproto
, flow
, 0);
2151 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
2152 struct ofport_dpif
*port
= get_ofp_port(ofproto
, flow
->in_port
);
2154 if (port
->up
.opp
.config
& htonl(OFPPC_NO_PACKET_IN
)) {
2155 COVERAGE_INC(ofproto_dpif_no_packet_in
);
2156 /* XXX install 'drop' flow entry */
2160 VLOG_WARN_RL(&rl
, "packet-in on unknown port %"PRIu16
,
2164 LIST_FOR_EACH_SAFE (packet
, next_packet
, list_node
,
2166 list_remove(&packet
->list_node
);
2167 send_packet_in_miss(ofproto
, packet
, flow
, false);
2173 facet
= facet_create(rule
, flow
);
2176 LIST_FOR_EACH_SAFE (packet
, next_packet
, list_node
, &miss
->packets
) {
2177 list_remove(&packet
->list_node
);
2178 ofproto
->n_matches
++;
2180 if (facet
->rule
->up
.cr
.priority
== FAIL_OPEN_PRIORITY
) {
2182 * Extra-special case for fail-open mode.
2184 * We are in fail-open mode and the packet matched the fail-open
2185 * rule, but we are connected to a controller too. We should send
2186 * the packet up to the controller in the hope that it will try to
2187 * set up a flow and thereby allow us to exit fail-open.
2189 * See the top-level comment in fail-open.c for more information.
2191 send_packet_in_miss(ofproto
, packet
, flow
, true);
2194 if (!facet
->may_install
) {
2195 facet_make_actions(ofproto
, facet
, packet
);
2197 if (!execute_controller_action(ofproto
, &facet
->flow
,
2198 facet
->actions
, facet
->actions_len
,
2200 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
2201 struct dpif_execute
*execute
= &op
->dpif_op
.execute
;
2204 execute
->type
= DPIF_OP_EXECUTE
;
2205 execute
->key
= miss
->key
;
2206 execute
->key_len
= miss
->key_len
;
2208 = (facet
->may_install
2210 : xmemdup(facet
->actions
, facet
->actions_len
));
2211 execute
->actions_len
= facet
->actions_len
;
2212 execute
->packet
= packet
;
2216 if (facet
->may_install
) {
2217 struct flow_miss_op
*op
= &ops
[(*n_ops
)++];
2218 struct dpif_flow_put
*put
= &op
->dpif_op
.flow_put
;
2221 put
->type
= DPIF_OP_FLOW_PUT
;
2222 put
->flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
2223 put
->key
= miss
->key
;
2224 put
->key_len
= miss
->key_len
;
2225 put
->actions
= facet
->actions
;
2226 put
->actions_len
= facet
->actions_len
;
2232 handle_miss_upcalls(struct ofproto_dpif
*ofproto
, struct dpif_upcall
*upcalls
,
2235 struct dpif_upcall
*upcall
;
2236 struct flow_miss
*miss
, *next_miss
;
2237 struct flow_miss_op flow_miss_ops
[FLOW_MISS_MAX_BATCH
* 2];
2238 union dpif_op
*dpif_ops
[FLOW_MISS_MAX_BATCH
* 2];
2247 /* Construct the to-do list.
2249 * This just amounts to extracting the flow from each packet and sticking
2250 * the packets that have the same flow in the same "flow_miss" structure so
2251 * that we can process them together. */
2253 for (upcall
= upcalls
; upcall
< &upcalls
[n_upcalls
]; upcall
++) {
2254 struct flow_miss
*miss
;
2257 /* Obtain in_port and tun_id, at least, then set 'flow''s header
2259 odp_flow_key_to_flow(upcall
->key
, upcall
->key_len
, &flow
);
2260 flow_extract(upcall
->packet
, flow
.tun_id
, flow
.in_port
, &flow
);
2262 /* Handle 802.1ag, LACP, and STP specially. */
2263 if (process_special(ofproto
, &flow
, upcall
->packet
)) {
2264 ofpbuf_delete(upcall
->packet
);
2265 ofproto
->n_matches
++;
2269 /* Add other packets to a to-do list. */
2270 miss
= flow_miss_create(&todo
, &flow
, upcall
->key
, upcall
->key_len
);
2271 list_push_back(&miss
->packets
, &upcall
->packet
->list_node
);
2274 /* Process each element in the to-do list, constructing the set of
2275 * operations to batch. */
2277 HMAP_FOR_EACH_SAFE (miss
, next_miss
, hmap_node
, &todo
) {
2278 handle_flow_miss(ofproto
, miss
, flow_miss_ops
, &n_ops
);
2279 ofpbuf_list_delete(&miss
->packets
);
2280 hmap_remove(&todo
, &miss
->hmap_node
);
2283 assert(n_ops
<= ARRAY_SIZE(flow_miss_ops
));
2284 hmap_destroy(&todo
);
2286 /* Execute batch. */
2287 for (i
= 0; i
< n_ops
; i
++) {
2288 dpif_ops
[i
] = &flow_miss_ops
[i
].dpif_op
;
2290 dpif_operate(ofproto
->dpif
, dpif_ops
, n_ops
);
2292 /* Free memory and update facets. */
2293 for (i
= 0; i
< n_ops
; i
++) {
2294 struct flow_miss_op
*op
= &flow_miss_ops
[i
];
2295 struct dpif_execute
*execute
;
2296 struct dpif_flow_put
*put
;
2298 switch (op
->dpif_op
.type
) {
2299 case DPIF_OP_EXECUTE
:
2300 execute
= &op
->dpif_op
.execute
;
2301 if (op
->facet
->actions
!= execute
->actions
) {
2302 free((struct nlattr
*) execute
->actions
);
2304 ofpbuf_delete((struct ofpbuf
*) execute
->packet
);
2307 case DPIF_OP_FLOW_PUT
:
2308 put
= &op
->dpif_op
.flow_put
;
2310 op
->facet
->installed
= true;
2318 handle_userspace_upcall(struct ofproto_dpif
*ofproto
,
2319 struct dpif_upcall
*upcall
)
2322 struct user_action_cookie cookie
;
2324 memcpy(&cookie
, &upcall
->userdata
, sizeof(cookie
));
2326 if (cookie
.type
== USER_ACTION_COOKIE_SFLOW
) {
2327 if (ofproto
->sflow
) {
2328 odp_flow_key_to_flow(upcall
->key
, upcall
->key_len
, &flow
);
2329 dpif_sflow_received(ofproto
->sflow
, upcall
->packet
, &flow
, &cookie
);
2331 ofpbuf_delete(upcall
->packet
);
2333 } else if (cookie
.type
== USER_ACTION_COOKIE_CONTROLLER
) {
2334 COVERAGE_INC(ofproto_dpif_ctlr_action
);
2335 odp_flow_key_to_flow(upcall
->key
, upcall
->key_len
, &flow
);
2336 send_packet_in_action(ofproto
, upcall
->packet
, upcall
->userdata
,
2339 VLOG_WARN_RL(&rl
, "invalid user cookie : 0x%"PRIx64
, upcall
->userdata
);
2344 handle_upcall(struct ofproto_dpif
*ofproto
, struct dpif_upcall
*upcall
)
2346 switch (upcall
->type
) {
2347 case DPIF_UC_ACTION
:
2348 handle_userspace_upcall(ofproto
, upcall
);
2352 /* The caller handles these. */
2355 case DPIF_N_UC_TYPES
:
2357 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, upcall
->type
);
2362 /* Flow expiration. */
2364 static int facet_max_idle(const struct ofproto_dpif
*);
2365 static void update_stats(struct ofproto_dpif
*);
2366 static void rule_expire(struct rule_dpif
*);
2367 static void expire_facets(struct ofproto_dpif
*, int dp_max_idle
);
2369 /* This function is called periodically by run(). Its job is to collect
2370 * updates for the flows that have been installed into the datapath, most
2371 * importantly when they last were used, and then use that information to
2372 * expire flows that have not been used recently.
2374 * Returns the number of milliseconds after which it should be called again. */
2376 expire(struct ofproto_dpif
*ofproto
)
2378 struct rule_dpif
*rule
, *next_rule
;
2379 struct classifier
*table
;
2382 /* Update stats for each flow in the datapath. */
2383 update_stats(ofproto
);
2385 /* Expire facets that have been idle too long. */
2386 dp_max_idle
= facet_max_idle(ofproto
);
2387 expire_facets(ofproto
, dp_max_idle
);
2389 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
2390 OFPROTO_FOR_EACH_TABLE (table
, &ofproto
->up
) {
2391 struct cls_cursor cursor
;
2393 cls_cursor_init(&cursor
, table
, NULL
);
2394 CLS_CURSOR_FOR_EACH_SAFE (rule
, next_rule
, up
.cr
, &cursor
) {
2399 /* All outstanding data in existing flows has been accounted, so it's a
2400 * good time to do bond rebalancing. */
2401 if (ofproto
->has_bonded_bundles
) {
2402 struct ofbundle
*bundle
;
2404 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
2406 bond_rebalance(bundle
->bond
, &ofproto
->revalidate_set
);
2411 return MIN(dp_max_idle
, 1000);
2414 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
2416 * This function also pushes statistics updates to rules which each facet
2417 * resubmits into. Generally these statistics will be accurate. However, if a
2418 * facet changes the rule it resubmits into at some time in between
2419 * update_stats() runs, it is possible that statistics accrued to the
2420 * old rule will be incorrectly attributed to the new rule. This could be
2421 * avoided by calling update_stats() whenever rules are created or
2422 * deleted. However, the performance impact of making so many calls to the
2423 * datapath do not justify the benefit of having perfectly accurate statistics.
2426 update_stats(struct ofproto_dpif
*p
)
2428 const struct dpif_flow_stats
*stats
;
2429 struct dpif_flow_dump dump
;
2430 const struct nlattr
*key
;
2433 dpif_flow_dump_start(&dump
, p
->dpif
);
2434 while (dpif_flow_dump_next(&dump
, &key
, &key_len
, NULL
, NULL
, &stats
)) {
2435 struct facet
*facet
;
2438 if (odp_flow_key_to_flow(key
, key_len
, &flow
)) {
2442 odp_flow_key_format(key
, key_len
, &s
);
2443 VLOG_WARN_RL(&rl
, "failed to convert datapath flow key to flow: %s",
2449 facet
= facet_find(p
, &flow
);
2451 if (facet
&& facet
->installed
) {
2453 if (stats
->n_packets
>= facet
->dp_packet_count
) {
2454 uint64_t extra
= stats
->n_packets
- facet
->dp_packet_count
;
2455 facet
->packet_count
+= extra
;
2457 VLOG_WARN_RL(&rl
, "unexpected packet count from the datapath");
2460 if (stats
->n_bytes
>= facet
->dp_byte_count
) {
2461 facet
->byte_count
+= stats
->n_bytes
- facet
->dp_byte_count
;
2463 VLOG_WARN_RL(&rl
, "unexpected byte count from datapath");
2466 facet
->dp_packet_count
= stats
->n_packets
;
2467 facet
->dp_byte_count
= stats
->n_bytes
;
2469 facet_update_time(p
, facet
, stats
->used
);
2470 facet_account(p
, facet
);
2471 facet_push_stats(facet
);
2473 /* There's a flow in the datapath that we know nothing about.
2475 COVERAGE_INC(facet_unexpected
);
2476 dpif_flow_del(p
->dpif
, key
, key_len
, NULL
);
2479 dpif_flow_dump_done(&dump
);
2482 /* Calculates and returns the number of milliseconds of idle time after which
2483 * facets should expire from the datapath and we should fold their statistics
2484 * into their parent rules in userspace. */
2486 facet_max_idle(const struct ofproto_dpif
*ofproto
)
2489 * Idle time histogram.
2491 * Most of the time a switch has a relatively small number of facets. When
2492 * this is the case we might as well keep statistics for all of them in
2493 * userspace and to cache them in the kernel datapath for performance as
2496 * As the number of facets increases, the memory required to maintain
2497 * statistics about them in userspace and in the kernel becomes
2498 * significant. However, with a large number of facets it is likely that
2499 * only a few of them are "heavy hitters" that consume a large amount of
2500 * bandwidth. At this point, only heavy hitters are worth caching in the
2501 * kernel and maintaining in userspaces; other facets we can discard.
2503 * The technique used to compute the idle time is to build a histogram with
2504 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
2505 * that is installed in the kernel gets dropped in the appropriate bucket.
2506 * After the histogram has been built, we compute the cutoff so that only
2507 * the most-recently-used 1% of facets (but at least
2508 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
2509 * the most-recently-used bucket of facets is kept, so actually an
2510 * arbitrary number of facets can be kept in any given expiration run
2511 * (though the next run will delete most of those unless they receive
2514 * This requires a second pass through the facets, in addition to the pass
2515 * made by update_stats(), because the former function never looks
2516 * at uninstallable facets.
2518 enum { BUCKET_WIDTH
= ROUND_UP(100, TIME_UPDATE_INTERVAL
) };
2519 enum { N_BUCKETS
= 5000 / BUCKET_WIDTH
};
2520 int buckets
[N_BUCKETS
] = { 0 };
2521 int total
, subtotal
, bucket
;
2522 struct facet
*facet
;
2526 total
= hmap_count(&ofproto
->facets
);
2527 if (total
<= ofproto
->up
.flow_eviction_threshold
) {
2528 return N_BUCKETS
* BUCKET_WIDTH
;
2531 /* Build histogram. */
2533 HMAP_FOR_EACH (facet
, hmap_node
, &ofproto
->facets
) {
2534 long long int idle
= now
- facet
->used
;
2535 int bucket
= (idle
<= 0 ? 0
2536 : idle
>= BUCKET_WIDTH
* N_BUCKETS
? N_BUCKETS
- 1
2537 : (unsigned int) idle
/ BUCKET_WIDTH
);
2541 /* Find the first bucket whose flows should be expired. */
2542 subtotal
= bucket
= 0;
2544 subtotal
+= buckets
[bucket
++];
2545 } while (bucket
< N_BUCKETS
&&
2546 subtotal
< MAX(ofproto
->up
.flow_eviction_threshold
, total
/ 100));
2548 if (VLOG_IS_DBG_ENABLED()) {
2552 ds_put_cstr(&s
, "keep");
2553 for (i
= 0; i
< N_BUCKETS
; i
++) {
2555 ds_put_cstr(&s
, ", drop");
2558 ds_put_format(&s
, " %d:%d", i
* BUCKET_WIDTH
, buckets
[i
]);
2561 VLOG_INFO("%s: %s (msec:count)", ofproto
->up
.name
, ds_cstr(&s
));
2565 return bucket
* BUCKET_WIDTH
;
2569 facet_active_timeout(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
2571 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
) &&
2572 netflow_active_timeout_expired(ofproto
->netflow
, &facet
->nf_flow
)) {
2573 struct ofexpired expired
;
2575 if (facet
->installed
) {
2576 struct dpif_flow_stats stats
;
2578 facet_put__(ofproto
, facet
, facet
->actions
, facet
->actions_len
,
2580 facet_update_stats(ofproto
, facet
, &stats
);
2583 expired
.flow
= facet
->flow
;
2584 expired
.packet_count
= facet
->packet_count
;
2585 expired
.byte_count
= facet
->byte_count
;
2586 expired
.used
= facet
->used
;
2587 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
2592 expire_facets(struct ofproto_dpif
*ofproto
, int dp_max_idle
)
2594 long long int cutoff
= time_msec() - dp_max_idle
;
2595 struct facet
*facet
, *next_facet
;
2597 HMAP_FOR_EACH_SAFE (facet
, next_facet
, hmap_node
, &ofproto
->facets
) {
2598 facet_active_timeout(ofproto
, facet
);
2599 if (facet
->used
< cutoff
) {
2600 facet_remove(ofproto
, facet
);
2605 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
2606 * then delete it entirely. */
2608 rule_expire(struct rule_dpif
*rule
)
2610 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
2611 struct facet
*facet
, *next_facet
;
2615 /* Has 'rule' expired? */
2617 if (rule
->up
.hard_timeout
2618 && now
> rule
->up
.modified
+ rule
->up
.hard_timeout
* 1000) {
2619 reason
= OFPRR_HARD_TIMEOUT
;
2620 } else if (rule
->up
.idle_timeout
&& list_is_empty(&rule
->facets
)
2621 && now
> rule
->used
+ rule
->up
.idle_timeout
* 1000) {
2622 reason
= OFPRR_IDLE_TIMEOUT
;
2627 COVERAGE_INC(ofproto_dpif_expired
);
2629 /* Update stats. (This is a no-op if the rule expired due to an idle
2630 * timeout, because that only happens when the rule has no facets left.) */
2631 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
2632 facet_remove(ofproto
, facet
);
2635 /* Get rid of the rule. */
2636 ofproto_rule_expire(&rule
->up
, reason
);
2641 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
2643 * The caller must already have determined that no facet with an identical
2644 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
2645 * the ofproto's classifier table.
2647 * The facet will initially have no ODP actions. The caller should fix that
2648 * by calling facet_make_actions(). */
2649 static struct facet
*
2650 facet_create(struct rule_dpif
*rule
, const struct flow
*flow
)
2652 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
2653 struct facet
*facet
;
2655 facet
= xzalloc(sizeof *facet
);
2656 facet
->used
= time_msec();
2657 hmap_insert(&ofproto
->facets
, &facet
->hmap_node
, flow_hash(flow
, 0));
2658 list_push_back(&rule
->facets
, &facet
->list_node
);
2660 facet
->flow
= *flow
;
2661 netflow_flow_init(&facet
->nf_flow
);
2662 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, facet
->used
);
2668 facet_free(struct facet
*facet
)
2670 free(facet
->actions
);
2675 execute_controller_action(struct ofproto_dpif
*ofproto
,
2676 const struct flow
*flow
,
2677 const struct nlattr
*odp_actions
, size_t actions_len
,
2678 struct ofpbuf
*packet
)
2681 && odp_actions
->nla_type
== OVS_ACTION_ATTR_USERSPACE
2682 && NLA_ALIGN(odp_actions
->nla_len
) == actions_len
) {
2683 /* As an optimization, avoid a round-trip from userspace to kernel to
2684 * userspace. This also avoids possibly filling up kernel packet
2685 * buffers along the way.
2687 * This optimization will not accidentally catch sFlow
2688 * OVS_ACTION_ATTR_USERSPACE actions, since those are encapsulated
2689 * inside OVS_ACTION_ATTR_SAMPLE. */
2690 const struct nlattr
*nla
;
2692 nla
= nl_attr_find_nested(odp_actions
, OVS_USERSPACE_ATTR_USERDATA
);
2693 send_packet_in_action(ofproto
, packet
, nl_attr_get_u64(nla
), flow
,
2701 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
2702 * 'packet', which arrived on 'in_port'.
2704 * Takes ownership of 'packet'. */
2706 execute_odp_actions(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
2707 const struct nlattr
*odp_actions
, size_t actions_len
,
2708 struct ofpbuf
*packet
)
2710 struct odputil_keybuf keybuf
;
2714 if (execute_controller_action(ofproto
, flow
, odp_actions
, actions_len
,
2719 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
2720 odp_flow_key_from_flow(&key
, flow
);
2722 error
= dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
2723 odp_actions
, actions_len
, packet
);
2725 ofpbuf_delete(packet
);
2729 /* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
2730 * statistics appropriately. 'packet' must have at least sizeof(struct
2731 * ofp_packet_in) bytes of headroom.
2733 * For correct results, 'packet' must actually be in 'facet''s flow; that is,
2734 * applying flow_extract() to 'packet' would yield the same flow as
2737 * 'facet' must have accurately composed datapath actions; that is, it must
2738 * not be in need of revalidation.
2740 * Takes ownership of 'packet'. */
2742 facet_execute(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
2743 struct ofpbuf
*packet
)
2745 struct dpif_flow_stats stats
;
2747 assert(ofpbuf_headroom(packet
) >= sizeof(struct ofp_packet_in
));
2749 dpif_flow_stats_extract(&facet
->flow
, packet
, &stats
);
2750 stats
.used
= time_msec();
2751 if (execute_odp_actions(ofproto
, &facet
->flow
,
2752 facet
->actions
, facet
->actions_len
, packet
)) {
2753 facet_update_stats(ofproto
, facet
, &stats
);
2757 /* Remove 'facet' from 'ofproto' and free up the associated memory:
2759 * - If 'facet' was installed in the datapath, uninstalls it and updates its
2760 * rule's statistics, via facet_uninstall().
2762 * - Removes 'facet' from its rule and from ofproto->facets.
2765 facet_remove(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
2767 facet_uninstall(ofproto
, facet
);
2768 facet_flush_stats(ofproto
, facet
);
2769 hmap_remove(&ofproto
->facets
, &facet
->hmap_node
);
2770 list_remove(&facet
->list_node
);
2774 /* Composes the datapath actions for 'facet' based on its rule's actions. */
2776 facet_make_actions(struct ofproto_dpif
*p
, struct facet
*facet
,
2777 const struct ofpbuf
*packet
)
2779 const struct rule_dpif
*rule
= facet
->rule
;
2780 struct ofpbuf
*odp_actions
;
2781 struct action_xlate_ctx ctx
;
2783 action_xlate_ctx_init(&ctx
, p
, &facet
->flow
, packet
);
2784 odp_actions
= xlate_actions(&ctx
, rule
->up
.actions
, rule
->up
.n_actions
);
2785 facet
->tags
= ctx
.tags
;
2786 facet
->may_install
= ctx
.may_set_up_flow
;
2787 facet
->has_learn
= ctx
.has_learn
;
2788 facet
->has_normal
= ctx
.has_normal
;
2789 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
2791 if (facet
->actions_len
!= odp_actions
->size
2792 || memcmp(facet
->actions
, odp_actions
->data
, odp_actions
->size
)) {
2793 free(facet
->actions
);
2794 facet
->actions_len
= odp_actions
->size
;
2795 facet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
2798 ofpbuf_delete(odp_actions
);
2801 /* Updates 'facet''s flow in the datapath setting its actions to 'actions_len'
2802 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
2803 * in the datapath will be zeroed and 'stats' will be updated with traffic new
2804 * since 'facet' was last updated.
2806 * Returns 0 if successful, otherwise a positive errno value.*/
2808 facet_put__(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
2809 const struct nlattr
*actions
, size_t actions_len
,
2810 struct dpif_flow_stats
*stats
)
2812 struct odputil_keybuf keybuf
;
2813 enum dpif_flow_put_flags flags
;
2817 flags
= DPIF_FP_CREATE
| DPIF_FP_MODIFY
;
2819 flags
|= DPIF_FP_ZERO_STATS
;
2822 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
2823 odp_flow_key_from_flow(&key
, &facet
->flow
);
2825 ret
= dpif_flow_put(ofproto
->dpif
, flags
, key
.data
, key
.size
,
2826 actions
, actions_len
, stats
);
2829 facet_reset_dp_stats(facet
, stats
);
2835 /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If
2836 * 'zero_stats' is true, clears any existing statistics from the datapath for
2839 facet_install(struct ofproto_dpif
*p
, struct facet
*facet
, bool zero_stats
)
2841 struct dpif_flow_stats stats
;
2843 if (facet
->may_install
2844 && !facet_put__(p
, facet
, facet
->actions
, facet
->actions_len
,
2845 zero_stats
? &stats
: NULL
)) {
2846 facet
->installed
= true;
2851 facet_account(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
2854 const struct nlattr
*a
;
2858 if (facet
->byte_count
<= facet
->accounted_bytes
) {
2861 n_bytes
= facet
->byte_count
- facet
->accounted_bytes
;
2862 facet
->accounted_bytes
= facet
->byte_count
;
2864 /* Feed information from the active flows back into the learning table to
2865 * ensure that table is always in sync with what is actually flowing
2866 * through the datapath. */
2867 if (facet
->has_learn
|| facet
->has_normal
) {
2868 struct action_xlate_ctx ctx
;
2870 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
, NULL
);
2871 ctx
.may_learn
= true;
2872 ofpbuf_delete(xlate_actions(&ctx
, facet
->rule
->up
.actions
,
2873 facet
->rule
->up
.n_actions
));
2876 if (!facet
->has_normal
|| !ofproto
->has_bonded_bundles
) {
2880 /* This loop feeds byte counters to bond_account() for rebalancing to use
2881 * as a basis. We also need to track the actual VLAN on which the packet
2882 * is going to be sent to ensure that it matches the one passed to
2883 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
2885 vlan_tci
= facet
->flow
.vlan_tci
;
2886 NL_ATTR_FOR_EACH_UNSAFE (a
, left
, facet
->actions
, facet
->actions_len
) {
2887 struct ofport_dpif
*port
;
2889 switch (nl_attr_type(a
)) {
2890 const struct nlattr
*nested
;
2891 case OVS_ACTION_ATTR_OUTPUT
:
2892 port
= get_odp_port(ofproto
, nl_attr_get_u32(a
));
2893 if (port
&& port
->bundle
&& port
->bundle
->bond
) {
2894 bond_account(port
->bundle
->bond
, &facet
->flow
,
2895 vlan_tci_to_vid(vlan_tci
), n_bytes
);
2899 case OVS_ACTION_ATTR_POP
:
2900 if (nl_attr_get_u16(a
) == OVS_KEY_ATTR_8021Q
) {
2901 vlan_tci
= htons(0);
2905 case OVS_ACTION_ATTR_PUSH
:
2906 nested
= nl_attr_get(a
);
2907 if (nl_attr_type(nested
) == OVS_KEY_ATTR_8021Q
) {
2908 const struct ovs_key_8021q
*q_key
;
2910 q_key
= nl_attr_get_unspec(nested
, sizeof(*q_key
));
2911 vlan_tci
= q_key
->q_tci
;
2918 /* If 'rule' is installed in the datapath, uninstalls it. */
2920 facet_uninstall(struct ofproto_dpif
*p
, struct facet
*facet
)
2922 if (facet
->installed
) {
2923 struct odputil_keybuf keybuf
;
2924 struct dpif_flow_stats stats
;
2928 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
2929 odp_flow_key_from_flow(&key
, &facet
->flow
);
2931 error
= dpif_flow_del(p
->dpif
, key
.data
, key
.size
, &stats
);
2932 facet_reset_dp_stats(facet
, &stats
);
2934 facet_update_stats(p
, facet
, &stats
);
2936 facet
->installed
= false;
2938 assert(facet
->dp_packet_count
== 0);
2939 assert(facet
->dp_byte_count
== 0);
2943 /* Returns true if the only action for 'facet' is to send to the controller.
2944 * (We don't report NetFlow expiration messages for such facets because they
2945 * are just part of the control logic for the network, not real traffic). */
2947 facet_is_controller_flow(struct facet
*facet
)
2950 && facet
->rule
->up
.n_actions
== 1
2951 && action_outputs_to_port(&facet
->rule
->up
.actions
[0],
2952 htons(OFPP_CONTROLLER
)));
2955 /* Resets 'facet''s datapath statistics counters. This should be called when
2956 * 'facet''s statistics are cleared in the datapath. If 'stats' is non-null,
2957 * it should contain the statistics returned by dpif when 'facet' was reset in
2958 * the datapath. 'stats' will be modified to only included statistics new
2959 * since 'facet' was last updated. */
2961 facet_reset_dp_stats(struct facet
*facet
, struct dpif_flow_stats
*stats
)
2963 if (stats
&& facet
->dp_packet_count
<= stats
->n_packets
2964 && facet
->dp_byte_count
<= stats
->n_bytes
) {
2965 stats
->n_packets
-= facet
->dp_packet_count
;
2966 stats
->n_bytes
-= facet
->dp_byte_count
;
2969 facet
->dp_packet_count
= 0;
2970 facet
->dp_byte_count
= 0;
2973 /* Folds all of 'facet''s statistics into its rule. Also updates the
2974 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
2975 * 'facet''s statistics in the datapath should have been zeroed and folded into
2976 * its packet and byte counts before this function is called. */
2978 facet_flush_stats(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
2980 assert(!facet
->dp_byte_count
);
2981 assert(!facet
->dp_packet_count
);
2983 facet_push_stats(facet
);
2984 facet_account(ofproto
, facet
);
2986 if (ofproto
->netflow
&& !facet_is_controller_flow(facet
)) {
2987 struct ofexpired expired
;
2988 expired
.flow
= facet
->flow
;
2989 expired
.packet_count
= facet
->packet_count
;
2990 expired
.byte_count
= facet
->byte_count
;
2991 expired
.used
= facet
->used
;
2992 netflow_expire(ofproto
->netflow
, &facet
->nf_flow
, &expired
);
2995 facet
->rule
->packet_count
+= facet
->packet_count
;
2996 facet
->rule
->byte_count
+= facet
->byte_count
;
2998 /* Reset counters to prevent double counting if 'facet' ever gets
3000 facet_reset_counters(facet
);
3002 netflow_flow_clear(&facet
->nf_flow
);
3005 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3006 * Returns it if found, otherwise a null pointer.
3008 * The returned facet might need revalidation; use facet_lookup_valid()
3009 * instead if that is important. */
3010 static struct facet
*
3011 facet_find(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
3013 struct facet
*facet
;
3015 HMAP_FOR_EACH_WITH_HASH (facet
, hmap_node
, flow_hash(flow
, 0),
3017 if (flow_equal(flow
, &facet
->flow
)) {
3025 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3026 * Returns it if found, otherwise a null pointer.
3028 * The returned facet is guaranteed to be valid. */
3029 static struct facet
*
3030 facet_lookup_valid(struct ofproto_dpif
*ofproto
, const struct flow
*flow
)
3032 struct facet
*facet
= facet_find(ofproto
, flow
);
3034 /* The facet we found might not be valid, since we could be in need of
3035 * revalidation. If it is not valid, don't return it. */
3037 && (ofproto
->need_revalidate
3038 || tag_set_intersects(&ofproto
->revalidate_set
, facet
->tags
))
3039 && !facet_revalidate(ofproto
, facet
)) {
3040 COVERAGE_INC(facet_invalidated
);
3047 /* Re-searches 'ofproto''s classifier for a rule matching 'facet':
3049 * - If the rule found is different from 'facet''s current rule, moves
3050 * 'facet' to the new rule and recompiles its actions.
3052 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
3053 * where it is and recompiles its actions anyway.
3055 * - If there is none, destroys 'facet'.
3057 * Returns true if 'facet' still exists, false if it has been destroyed. */
3059 facet_revalidate(struct ofproto_dpif
*ofproto
, struct facet
*facet
)
3061 struct action_xlate_ctx ctx
;
3062 struct ofpbuf
*odp_actions
;
3063 struct rule_dpif
*new_rule
;
3064 bool actions_changed
;
3066 COVERAGE_INC(facet_revalidate
);
3068 /* Determine the new rule. */
3069 new_rule
= rule_dpif_lookup(ofproto
, &facet
->flow
, 0);
3071 /* No new rule, so delete the facet. */
3072 facet_remove(ofproto
, facet
);
3076 /* Calculate new datapath actions.
3078 * We do not modify any 'facet' state yet, because we might need to, e.g.,
3079 * emit a NetFlow expiration and, if so, we need to have the old state
3080 * around to properly compose it. */
3081 action_xlate_ctx_init(&ctx
, ofproto
, &facet
->flow
, NULL
);
3082 odp_actions
= xlate_actions(&ctx
,
3083 new_rule
->up
.actions
, new_rule
->up
.n_actions
);
3084 actions_changed
= (facet
->actions_len
!= odp_actions
->size
3085 || memcmp(facet
->actions
, odp_actions
->data
,
3086 facet
->actions_len
));
3088 /* If the datapath actions changed or the installability changed,
3089 * then we need to talk to the datapath. */
3090 if (actions_changed
|| ctx
.may_set_up_flow
!= facet
->installed
) {
3091 if (ctx
.may_set_up_flow
) {
3092 struct dpif_flow_stats stats
;
3094 facet_put__(ofproto
, facet
,
3095 odp_actions
->data
, odp_actions
->size
, &stats
);
3096 facet_update_stats(ofproto
, facet
, &stats
);
3098 facet_uninstall(ofproto
, facet
);
3101 /* The datapath flow is gone or has zeroed stats, so push stats out of
3102 * 'facet' into 'rule'. */
3103 facet_flush_stats(ofproto
, facet
);
3106 /* Update 'facet' now that we've taken care of all the old state. */
3107 facet
->tags
= ctx
.tags
;
3108 facet
->nf_flow
.output_iface
= ctx
.nf_output_iface
;
3109 facet
->may_install
= ctx
.may_set_up_flow
;
3110 facet
->has_learn
= ctx
.has_learn
;
3111 facet
->has_normal
= ctx
.has_normal
;
3112 if (actions_changed
) {
3113 free(facet
->actions
);
3114 facet
->actions_len
= odp_actions
->size
;
3115 facet
->actions
= xmemdup(odp_actions
->data
, odp_actions
->size
);
3117 if (facet
->rule
!= new_rule
) {
3118 COVERAGE_INC(facet_changed_rule
);
3119 list_remove(&facet
->list_node
);
3120 list_push_back(&new_rule
->facets
, &facet
->list_node
);
3121 facet
->rule
= new_rule
;
3122 facet
->used
= new_rule
->up
.created
;
3123 facet
->rs_used
= facet
->used
;
3126 ofpbuf_delete(odp_actions
);
3131 /* Updates 'facet''s used time. Caller is responsible for calling
3132 * facet_push_stats() to update the flows which 'facet' resubmits into. */
3134 facet_update_time(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
3137 if (used
> facet
->used
) {
3139 if (used
> facet
->rule
->used
) {
3140 facet
->rule
->used
= used
;
3142 netflow_flow_update_time(ofproto
->netflow
, &facet
->nf_flow
, used
);
3146 /* Folds the statistics from 'stats' into the counters in 'facet'.
3148 * Because of the meaning of a facet's counters, it only makes sense to do this
3149 * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
3150 * packet that was sent by hand or if it represents statistics that have been
3151 * cleared out of the datapath. */
3153 facet_update_stats(struct ofproto_dpif
*ofproto
, struct facet
*facet
,
3154 const struct dpif_flow_stats
*stats
)
3156 if (stats
->n_packets
|| stats
->used
> facet
->used
) {
3157 facet_update_time(ofproto
, facet
, stats
->used
);
3158 facet
->packet_count
+= stats
->n_packets
;
3159 facet
->byte_count
+= stats
->n_bytes
;
3160 facet_push_stats(facet
);
3161 netflow_flow_update_flags(&facet
->nf_flow
, stats
->tcp_flags
);
3166 facet_reset_counters(struct facet
*facet
)
3168 facet
->packet_count
= 0;
3169 facet
->byte_count
= 0;
3170 facet
->rs_packet_count
= 0;
3171 facet
->rs_byte_count
= 0;
3172 facet
->accounted_bytes
= 0;
3176 facet_push_stats(struct facet
*facet
)
3178 uint64_t rs_packets
, rs_bytes
;
3180 assert(facet
->packet_count
>= facet
->rs_packet_count
);
3181 assert(facet
->byte_count
>= facet
->rs_byte_count
);
3182 assert(facet
->used
>= facet
->rs_used
);
3184 rs_packets
= facet
->packet_count
- facet
->rs_packet_count
;
3185 rs_bytes
= facet
->byte_count
- facet
->rs_byte_count
;
3187 if (rs_packets
|| rs_bytes
|| facet
->used
> facet
->rs_used
) {
3188 facet
->rs_packet_count
= facet
->packet_count
;
3189 facet
->rs_byte_count
= facet
->byte_count
;
3190 facet
->rs_used
= facet
->used
;
3192 flow_push_stats(facet
->rule
, &facet
->flow
,
3193 rs_packets
, rs_bytes
, facet
->used
);
3197 struct ofproto_push
{
3198 struct action_xlate_ctx ctx
;
3205 push_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
3207 struct ofproto_push
*push
= CONTAINER_OF(ctx
, struct ofproto_push
, ctx
);
3210 rule
->packet_count
+= push
->packets
;
3211 rule
->byte_count
+= push
->bytes
;
3212 rule
->used
= MAX(push
->used
, rule
->used
);
3216 /* Pushes flow statistics to the rules which 'flow' resubmits into given
3217 * 'rule''s actions. */
3219 flow_push_stats(const struct rule_dpif
*rule
,
3220 struct flow
*flow
, uint64_t packets
, uint64_t bytes
,
3223 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3224 struct ofproto_push push
;
3226 push
.packets
= packets
;
3230 action_xlate_ctx_init(&push
.ctx
, ofproto
, flow
, NULL
);
3231 push
.ctx
.resubmit_hook
= push_resubmit
;
3232 ofpbuf_delete(xlate_actions(&push
.ctx
,
3233 rule
->up
.actions
, rule
->up
.n_actions
));
3238 static struct rule_dpif
*
3239 rule_dpif_lookup(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
3242 struct cls_rule
*cls_rule
;
3243 struct classifier
*cls
;
3245 if (table_id
>= N_TABLES
) {
3249 cls
= &ofproto
->up
.tables
[table_id
];
3250 if (flow
->tos_frag
& FLOW_FRAG_ANY
3251 && ofproto
->up
.frag_handling
== OFPC_FRAG_NORMAL
) {
3252 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
3253 * are unavailable. */
3254 struct flow ofpc_normal_flow
= *flow
;
3255 ofpc_normal_flow
.tp_src
= htons(0);
3256 ofpc_normal_flow
.tp_dst
= htons(0);
3257 cls_rule
= classifier_lookup(cls
, &ofpc_normal_flow
);
3259 cls_rule
= classifier_lookup(cls
, flow
);
3261 return rule_dpif_cast(rule_from_cls_rule(cls_rule
));
3265 complete_operation(struct rule_dpif
*rule
)
3267 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3269 rule_invalidate(rule
);
3271 struct dpif_completion
*c
= xmalloc(sizeof *c
);
3272 c
->op
= rule
->up
.pending
;
3273 list_push_back(&ofproto
->completions
, &c
->list_node
);
3275 ofoperation_complete(rule
->up
.pending
, 0);
3279 static struct rule
*
3282 struct rule_dpif
*rule
= xmalloc(sizeof *rule
);
3287 rule_dealloc(struct rule
*rule_
)
3289 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3294 rule_construct(struct rule
*rule_
)
3296 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3297 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3298 struct rule_dpif
*victim
;
3302 error
= validate_actions(rule
->up
.actions
, rule
->up
.n_actions
,
3303 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
3308 rule
->used
= rule
->up
.created
;
3309 rule
->packet_count
= 0;
3310 rule
->byte_count
= 0;
3312 victim
= rule_dpif_cast(ofoperation_get_victim(rule
->up
.pending
));
3313 if (victim
&& !list_is_empty(&victim
->facets
)) {
3314 struct facet
*facet
;
3316 rule
->facets
= victim
->facets
;
3317 list_moved(&rule
->facets
);
3318 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
3319 /* XXX: We're only clearing our local counters here. It's possible
3320 * that quite a few packets are unaccounted for in the datapath
3321 * statistics. These will be accounted to the new rule instead of
3322 * cleared as required. This could be fixed by clearing out the
3323 * datapath statistics for this facet, but currently it doesn't
3325 facet_reset_counters(facet
);
3329 /* Must avoid list_moved() in this case. */
3330 list_init(&rule
->facets
);
3333 table_id
= rule
->up
.table_id
;
3334 rule
->tag
= (victim
? victim
->tag
3336 : rule_calculate_tag(&rule
->up
.cr
.flow
, &rule
->up
.cr
.wc
,
3337 ofproto
->tables
[table_id
].basis
));
3339 complete_operation(rule
);
3344 rule_destruct(struct rule
*rule_
)
3346 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3347 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3348 struct facet
*facet
, *next_facet
;
3350 LIST_FOR_EACH_SAFE (facet
, next_facet
, list_node
, &rule
->facets
) {
3351 facet_revalidate(ofproto
, facet
);
3354 complete_operation(rule
);
3358 rule_get_stats(struct rule
*rule_
, uint64_t *packets
, uint64_t *bytes
)
3360 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3361 struct facet
*facet
;
3363 /* Start from historical data for 'rule' itself that are no longer tracked
3364 * in facets. This counts, for example, facets that have expired. */
3365 *packets
= rule
->packet_count
;
3366 *bytes
= rule
->byte_count
;
3368 /* Add any statistics that are tracked by facets. This includes
3369 * statistical data recently updated by ofproto_update_stats() as well as
3370 * stats for packets that were executed "by hand" via dpif_execute(). */
3371 LIST_FOR_EACH (facet
, list_node
, &rule
->facets
) {
3372 *packets
+= facet
->packet_count
;
3373 *bytes
+= facet
->byte_count
;
3378 rule_execute(struct rule
*rule_
, struct flow
*flow
, struct ofpbuf
*packet
)
3380 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3381 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3382 struct action_xlate_ctx ctx
;
3383 struct ofpbuf
*odp_actions
;
3384 struct facet
*facet
;
3387 /* First look for a related facet. If we find one, account it to that. */
3388 facet
= facet_lookup_valid(ofproto
, flow
);
3389 if (facet
&& facet
->rule
== rule
) {
3390 if (!facet
->may_install
) {
3391 facet_make_actions(ofproto
, facet
, packet
);
3393 facet_execute(ofproto
, facet
, packet
);
3397 /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
3398 * create a new facet for it and use that. */
3399 if (rule_dpif_lookup(ofproto
, flow
, 0) == rule
) {
3400 facet
= facet_create(rule
, flow
);
3401 facet_make_actions(ofproto
, facet
, packet
);
3402 facet_execute(ofproto
, facet
, packet
);
3403 facet_install(ofproto
, facet
, true);
3407 /* We can't account anything to a facet. If we were to try, then that
3408 * facet would have a non-matching rule, busting our invariants. */
3409 action_xlate_ctx_init(&ctx
, ofproto
, flow
, packet
);
3410 odp_actions
= xlate_actions(&ctx
, rule
->up
.actions
, rule
->up
.n_actions
);
3411 size
= packet
->size
;
3412 if (execute_odp_actions(ofproto
, flow
, odp_actions
->data
,
3413 odp_actions
->size
, packet
)) {
3414 rule
->used
= time_msec();
3415 rule
->packet_count
++;
3416 rule
->byte_count
+= size
;
3417 flow_push_stats(rule
, flow
, 1, size
, rule
->used
);
3419 ofpbuf_delete(odp_actions
);
3425 rule_modify_actions(struct rule
*rule_
)
3427 struct rule_dpif
*rule
= rule_dpif_cast(rule_
);
3428 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
3431 error
= validate_actions(rule
->up
.actions
, rule
->up
.n_actions
,
3432 &rule
->up
.cr
.flow
, ofproto
->max_ports
);
3434 ofoperation_complete(rule
->up
.pending
, error
);
3438 complete_operation(rule
);
3441 /* Sends 'packet' out of port 'odp_port' within 'ofproto'.
3442 * Returns 0 if successful, otherwise a positive errno value. */
3444 send_packet(struct ofproto_dpif
*ofproto
, uint32_t odp_port
,
3445 const struct ofpbuf
*packet
)
3447 struct ofpbuf key
, odp_actions
;
3448 struct odputil_keybuf keybuf
;
3452 flow_extract((struct ofpbuf
*) packet
, 0, 0, &flow
);
3453 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
3454 odp_flow_key_from_flow(&key
, &flow
);
3456 ofpbuf_init(&odp_actions
, 32);
3457 compose_sflow_action(ofproto
, &odp_actions
, &flow
, odp_port
);
3459 nl_msg_put_u32(&odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
3460 error
= dpif_execute(ofproto
->dpif
,
3462 odp_actions
.data
, odp_actions
.size
,
3464 ofpbuf_uninit(&odp_actions
);
3467 VLOG_WARN_RL(&rl
, "%s: failed to send packet on port %"PRIu32
" (%s)",
3468 ofproto
->up
.name
, odp_port
, strerror(error
));
3473 /* OpenFlow to datapath action translation. */
3475 static void do_xlate_actions(const union ofp_action
*in
, size_t n_in
,
3476 struct action_xlate_ctx
*ctx
);
3477 static void xlate_normal(struct action_xlate_ctx
*);
3480 put_userspace_action(const struct ofproto_dpif
*ofproto
,
3481 struct ofpbuf
*odp_actions
,
3482 const struct flow
*flow
,
3483 const struct user_action_cookie
*cookie
)
3488 pid
= dpif_port_get_pid(ofproto
->dpif
,
3489 ofp_port_to_odp_port(flow
->in_port
));
3491 offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_USERSPACE
);
3492 nl_msg_put_u32(odp_actions
, OVS_USERSPACE_ATTR_PID
, pid
);
3493 nl_msg_put_unspec(odp_actions
, OVS_USERSPACE_ATTR_USERDATA
,
3494 cookie
, sizeof *cookie
);
3495 nl_msg_end_nested(odp_actions
, offset
);
3497 return odp_actions
->size
- NLA_ALIGN(sizeof *cookie
);
3500 /* Compose SAMPLE action for sFlow. */
3502 compose_sflow_action(const struct ofproto_dpif
*ofproto
,
3503 struct ofpbuf
*odp_actions
,
3504 const struct flow
*flow
,
3507 uint32_t port_ifindex
;
3508 uint32_t probability
;
3509 struct user_action_cookie cookie
;
3510 size_t sample_offset
, actions_offset
;
3511 int cookie_offset
, n_output
;
3513 if (!ofproto
->sflow
|| flow
->in_port
== OFPP_NONE
) {
3517 if (odp_port
== OVSP_NONE
) {
3521 port_ifindex
= dpif_sflow_odp_port_to_ifindex(ofproto
->sflow
, odp_port
);
3525 sample_offset
= nl_msg_start_nested(odp_actions
, OVS_ACTION_ATTR_SAMPLE
);
3527 /* Number of packets out of UINT_MAX to sample. */
3528 probability
= dpif_sflow_get_probability(ofproto
->sflow
);
3529 nl_msg_put_u32(odp_actions
, OVS_SAMPLE_ATTR_PROBABILITY
, probability
);
3531 actions_offset
= nl_msg_start_nested(odp_actions
, OVS_SAMPLE_ATTR_ACTIONS
);
3533 cookie
.type
= USER_ACTION_COOKIE_SFLOW
;
3534 cookie
.data
= port_ifindex
;
3535 cookie
.n_output
= n_output
;
3536 cookie
.vlan_tci
= 0;
3537 cookie_offset
= put_userspace_action(ofproto
, odp_actions
, flow
, &cookie
);
3539 nl_msg_end_nested(odp_actions
, actions_offset
);
3540 nl_msg_end_nested(odp_actions
, sample_offset
);
3541 return cookie_offset
;
3544 /* SAMPLE action must be first action in any given list of actions.
3545 * At this point we do not have all information required to build it. So try to
3546 * build sample action as complete as possible. */
3548 add_sflow_action(struct action_xlate_ctx
*ctx
)
3550 ctx
->user_cookie_offset
= compose_sflow_action(ctx
->ofproto
,
3552 &ctx
->flow
, OVSP_NONE
);
3553 ctx
->sflow_odp_port
= 0;
3554 ctx
->sflow_n_outputs
= 0;
3557 /* Fix SAMPLE action according to data collected while composing ODP actions.
3558 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
3559 * USERSPACE action's user-cookie which is required for sflow. */
3561 fix_sflow_action(struct action_xlate_ctx
*ctx
)
3563 const struct flow
*base
= &ctx
->base_flow
;
3564 struct user_action_cookie
*cookie
;
3566 if (!ctx
->user_cookie_offset
) {
3570 cookie
= ofpbuf_at(ctx
->odp_actions
, ctx
->user_cookie_offset
,
3572 assert(cookie
!= NULL
);
3573 assert(cookie
->type
== USER_ACTION_COOKIE_SFLOW
);
3575 if (ctx
->sflow_n_outputs
) {
3576 cookie
->data
= dpif_sflow_odp_port_to_ifindex(ctx
->ofproto
->sflow
,
3577 ctx
->sflow_odp_port
);
3579 if (ctx
->sflow_n_outputs
>= 255) {
3580 cookie
->n_output
= 255;
3582 cookie
->n_output
= ctx
->sflow_n_outputs
;
3584 cookie
->vlan_tci
= base
->vlan_tci
;
3588 commit_action__(struct ofpbuf
*odp_actions
,
3589 enum ovs_action_attr act_type
,
3590 enum ovs_key_attr key_type
,
3591 const void *key
, size_t key_size
)
3593 size_t offset
= nl_msg_start_nested(odp_actions
, act_type
);
3595 nl_msg_put_unspec(odp_actions
, key_type
, key
, key_size
);
3596 nl_msg_end_nested(odp_actions
, offset
);
3600 commit_set_tun_id_action(const struct flow
*flow
, struct flow
*base
,
3601 struct ofpbuf
*odp_actions
)
3603 if (base
->tun_id
== flow
->tun_id
) {
3606 base
->tun_id
= flow
->tun_id
;
3608 commit_action__(odp_actions
, OVS_ACTION_ATTR_SET
,
3609 OVS_KEY_ATTR_TUN_ID
, &base
->tun_id
, sizeof(base
->tun_id
));
3613 commit_set_ether_addr_action(const struct flow
*flow
, struct flow
*base
,
3614 struct ofpbuf
*odp_actions
)
3616 struct ovs_key_ethernet eth_key
;
3618 if (eth_addr_equals(base
->dl_src
, flow
->dl_src
) &&
3619 eth_addr_equals(base
->dl_dst
, flow
->dl_dst
)) {
3623 memcpy(base
->dl_src
, flow
->dl_src
, ETH_ADDR_LEN
);
3624 memcpy(base
->dl_dst
, flow
->dl_dst
, ETH_ADDR_LEN
);
3626 memcpy(eth_key
.eth_src
, base
->dl_src
, ETH_ADDR_LEN
);
3627 memcpy(eth_key
.eth_dst
, base
->dl_dst
, ETH_ADDR_LEN
);
3629 commit_action__(odp_actions
, OVS_ACTION_ATTR_SET
,
3630 OVS_KEY_ATTR_ETHERNET
, ð_key
, sizeof(eth_key
));
3634 commit_vlan_action(struct action_xlate_ctx
*ctx
, ovs_be16 new_tci
)
3636 struct flow
*base
= &ctx
->base_flow
;
3638 if (base
->vlan_tci
== new_tci
) {
3642 if (base
->vlan_tci
& htons(VLAN_CFI
)) {
3643 nl_msg_put_u16(ctx
->odp_actions
, OVS_ACTION_ATTR_POP
,
3644 OVS_KEY_ATTR_8021Q
);
3647 if (new_tci
& htons(VLAN_CFI
)) {
3648 struct ovs_key_8021q q_key
;
3650 q_key
.q_tpid
= htons(ETH_TYPE_VLAN
);
3651 q_key
.q_tci
= new_tci
& ~htons(VLAN_CFI
);
3653 commit_action__(ctx
->odp_actions
, OVS_ACTION_ATTR_PUSH
,
3654 OVS_KEY_ATTR_8021Q
, &q_key
, sizeof(q_key
));
3656 base
->vlan_tci
= new_tci
;
3660 commit_set_nw_action(const struct flow
*flow
, struct flow
*base
,
3661 struct ofpbuf
*odp_actions
)
3663 int frag
= base
->tos_frag
& FLOW_FRAG_MASK
;
3664 struct ovs_key_ipv4 ipv4_key
;
3666 if (base
->dl_type
!= htons(ETH_TYPE_IP
) ||
3667 !base
->nw_src
|| !base
->nw_dst
) {
3671 if (base
->nw_src
== flow
->nw_src
&&
3672 base
->nw_dst
== flow
->nw_dst
&&
3673 base
->tos_frag
== flow
->tos_frag
) {
3678 memset(&ipv4_key
, 0, sizeof(ipv4_key
));
3679 ipv4_key
.ipv4_src
= base
->nw_src
= flow
->nw_src
;
3680 ipv4_key
.ipv4_dst
= base
->nw_dst
= flow
->nw_dst
;
3681 ipv4_key
.ipv4_proto
= base
->nw_proto
;
3682 ipv4_key
.ipv4_tos
= flow
->tos_frag
& IP_DSCP_MASK
;
3683 ipv4_key
.ipv4_frag
= (frag
== 0 ? OVS_FRAG_TYPE_NONE
3684 : frag
== FLOW_FRAG_ANY
? OVS_FRAG_TYPE_FIRST
3685 : OVS_FRAG_TYPE_LATER
);
3687 commit_action__(odp_actions
, OVS_ACTION_ATTR_SET
,
3688 OVS_KEY_ATTR_IPV4
, &ipv4_key
, sizeof(ipv4_key
));
3692 commit_set_port_action(const struct flow
*flow
, struct flow
*base
,
3693 struct ofpbuf
*odp_actions
)
3695 if (!base
->tp_src
|| !base
->tp_dst
) {
3699 if (base
->tp_src
== flow
->tp_src
&&
3700 base
->tp_dst
== flow
->tp_dst
) {
3704 if (flow
->nw_proto
== IPPROTO_TCP
) {
3705 struct ovs_key_tcp port_key
;
3707 port_key
.tcp_src
= base
->tp_src
= flow
->tp_src
;
3708 port_key
.tcp_dst
= base
->tp_dst
= flow
->tp_dst
;
3710 commit_action__(odp_actions
, OVS_ACTION_ATTR_SET
,
3711 OVS_KEY_ATTR_TCP
, &port_key
, sizeof(port_key
));
3713 } else if (flow
->nw_proto
== IPPROTO_UDP
) {
3714 struct ovs_key_udp port_key
;
3716 port_key
.udp_src
= base
->tp_src
= flow
->tp_src
;
3717 port_key
.udp_dst
= base
->tp_dst
= flow
->tp_dst
;
3719 commit_action__(odp_actions
, OVS_ACTION_ATTR_SET
,
3720 OVS_KEY_ATTR_UDP
, &port_key
, sizeof(port_key
));
3725 commit_priority_action(struct action_xlate_ctx
*ctx
)
3727 if (ctx
->base_priority
== ctx
->priority
) {
3731 if (ctx
->priority
) {
3732 nl_msg_put_u32(ctx
->odp_actions
,
3733 OVS_ACTION_ATTR_SET_PRIORITY
, ctx
->priority
);
3735 nl_msg_put_flag(ctx
->odp_actions
, OVS_ACTION_ATTR_POP_PRIORITY
);
3737 ctx
->base_priority
= ctx
->priority
;
3741 commit_odp_actions(struct action_xlate_ctx
*ctx
)
3743 const struct flow
*flow
= &ctx
->flow
;
3744 struct flow
*base
= &ctx
->base_flow
;
3745 struct ofpbuf
*odp_actions
= ctx
->odp_actions
;
3747 commit_set_tun_id_action(flow
, base
, odp_actions
);
3748 commit_set_ether_addr_action(flow
, base
, odp_actions
);
3749 commit_vlan_action(ctx
, flow
->vlan_tci
);
3750 commit_set_nw_action(flow
, base
, odp_actions
);
3751 commit_set_port_action(flow
, base
, odp_actions
);
3752 commit_priority_action(ctx
);
3756 compose_output_action(struct action_xlate_ctx
*ctx
, uint16_t odp_port
)
3758 nl_msg_put_u32(ctx
->odp_actions
, OVS_ACTION_ATTR_OUTPUT
, odp_port
);
3759 ctx
->sflow_odp_port
= odp_port
;
3760 ctx
->sflow_n_outputs
++;
3764 add_output_action(struct action_xlate_ctx
*ctx
, uint16_t ofp_port
)
3766 const struct ofport_dpif
*ofport
= get_ofp_port(ctx
->ofproto
, ofp_port
);
3767 uint16_t odp_port
= ofp_port_to_odp_port(ofp_port
);
3770 if (ofport
->up
.opp
.config
& htonl(OFPPC_NO_FWD
)
3771 || !stp_forward_in_state(ofport
->stp_state
)) {
3772 /* Forwarding disabled on port. */
3777 * We don't have an ofport record for this port, but it doesn't hurt to
3778 * allow forwarding to it anyhow. Maybe such a port will appear later
3779 * and we're pre-populating the flow table.
3783 commit_odp_actions(ctx
);
3784 compose_output_action(ctx
, odp_port
);
3785 ctx
->nf_output_iface
= ofp_port
;
3789 xlate_table_action(struct action_xlate_ctx
*ctx
,
3790 uint16_t in_port
, uint8_t table_id
)
3792 if (ctx
->recurse
< MAX_RESUBMIT_RECURSION
) {
3793 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
3794 struct rule_dpif
*rule
;
3795 uint16_t old_in_port
;
3796 uint8_t old_table_id
;
3798 old_table_id
= ctx
->table_id
;
3799 ctx
->table_id
= table_id
;
3801 /* Look up a flow with 'in_port' as the input port. */
3802 old_in_port
= ctx
->flow
.in_port
;
3803 ctx
->flow
.in_port
= in_port
;
3804 rule
= rule_dpif_lookup(ofproto
, &ctx
->flow
, table_id
);
3807 if (table_id
> 0 && table_id
< N_TABLES
) {
3808 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
3809 if (table
->other_table
) {
3812 : rule_calculate_tag(&ctx
->flow
,
3813 &table
->other_table
->wc
,
3818 /* Restore the original input port. Otherwise OFPP_NORMAL and
3819 * OFPP_IN_PORT will have surprising behavior. */
3820 ctx
->flow
.in_port
= old_in_port
;
3822 if (ctx
->resubmit_hook
) {
3823 ctx
->resubmit_hook(ctx
, rule
);
3828 do_xlate_actions(rule
->up
.actions
, rule
->up
.n_actions
, ctx
);
3832 ctx
->table_id
= old_table_id
;
3834 static struct vlog_rate_limit recurse_rl
= VLOG_RATE_LIMIT_INIT(1, 1);
3836 VLOG_ERR_RL(&recurse_rl
, "resubmit actions recursed over %d times",
3837 MAX_RESUBMIT_RECURSION
);
3842 xlate_resubmit_table(struct action_xlate_ctx
*ctx
,
3843 const struct nx_action_resubmit
*nar
)
3848 in_port
= (nar
->in_port
== htons(OFPP_IN_PORT
)
3850 : ntohs(nar
->in_port
));
3851 table_id
= nar
->table
== 255 ? ctx
->table_id
: nar
->table
;
3853 xlate_table_action(ctx
, in_port
, table_id
);
3857 flood_packets(struct action_xlate_ctx
*ctx
, ovs_be32 mask
)
3859 struct ofport_dpif
*ofport
;
3861 commit_odp_actions(ctx
);
3862 HMAP_FOR_EACH (ofport
, up
.hmap_node
, &ctx
->ofproto
->up
.ports
) {
3863 uint16_t ofp_port
= ofport
->up
.ofp_port
;
3864 if (ofp_port
!= ctx
->flow
.in_port
3865 && !(ofport
->up
.opp
.config
& mask
)
3866 && stp_forward_in_state(ofport
->stp_state
)) {
3867 compose_output_action(ctx
, ofport
->odp_port
);
3871 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
3875 compose_controller_action(struct action_xlate_ctx
*ctx
, int len
)
3877 struct user_action_cookie cookie
;
3879 cookie
.type
= USER_ACTION_COOKIE_CONTROLLER
;
3881 cookie
.n_output
= 0;
3882 cookie
.vlan_tci
= 0;
3883 put_userspace_action(ctx
->ofproto
, ctx
->odp_actions
, &ctx
->flow
, &cookie
);
3887 xlate_output_action__(struct action_xlate_ctx
*ctx
,
3888 uint16_t port
, uint16_t max_len
)
3890 uint16_t prev_nf_output_iface
= ctx
->nf_output_iface
;
3892 ctx
->nf_output_iface
= NF_OUT_DROP
;
3896 add_output_action(ctx
, ctx
->flow
.in_port
);
3899 xlate_table_action(ctx
, ctx
->flow
.in_port
, ctx
->table_id
);
3905 flood_packets(ctx
, htonl(OFPPC_NO_FLOOD
));
3908 flood_packets(ctx
, htonl(0));
3910 case OFPP_CONTROLLER
:
3911 commit_odp_actions(ctx
);
3912 compose_controller_action(ctx
, max_len
);
3915 add_output_action(ctx
, OFPP_LOCAL
);
3920 if (port
!= ctx
->flow
.in_port
) {
3921 add_output_action(ctx
, port
);
3926 if (prev_nf_output_iface
== NF_OUT_FLOOD
) {
3927 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
3928 } else if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
3929 ctx
->nf_output_iface
= prev_nf_output_iface
;
3930 } else if (prev_nf_output_iface
!= NF_OUT_DROP
&&
3931 ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
3932 ctx
->nf_output_iface
= NF_OUT_MULTI
;
3937 xlate_output_reg_action(struct action_xlate_ctx
*ctx
,
3938 const struct nx_action_output_reg
*naor
)
3942 ofp_port
= nxm_read_field_bits(naor
->src
, naor
->ofs_nbits
, &ctx
->flow
);
3944 if (ofp_port
<= UINT16_MAX
) {
3945 xlate_output_action__(ctx
, ofp_port
, ntohs(naor
->max_len
));
3950 xlate_output_action(struct action_xlate_ctx
*ctx
,
3951 const struct ofp_action_output
*oao
)
3953 xlate_output_action__(ctx
, ntohs(oao
->port
), ntohs(oao
->max_len
));
3957 xlate_enqueue_action(struct action_xlate_ctx
*ctx
,
3958 const struct ofp_action_enqueue
*oae
)
3960 uint16_t ofp_port
, odp_port
;
3961 uint32_t ctx_priority
, priority
;
3964 error
= dpif_queue_to_priority(ctx
->ofproto
->dpif
, ntohl(oae
->queue_id
),
3967 /* Fall back to ordinary output action. */
3968 xlate_output_action__(ctx
, ntohs(oae
->port
), 0);
3972 /* Figure out datapath output port. */
3973 ofp_port
= ntohs(oae
->port
);
3974 if (ofp_port
== OFPP_IN_PORT
) {
3975 ofp_port
= ctx
->flow
.in_port
;
3976 } else if (ofp_port
== ctx
->flow
.in_port
) {
3979 odp_port
= ofp_port_to_odp_port(ofp_port
);
3981 /* Add datapath actions. */
3982 ctx_priority
= ctx
->priority
;
3983 ctx
->priority
= priority
;
3984 add_output_action(ctx
, odp_port
);
3985 ctx
->priority
= ctx_priority
;
3987 /* Update NetFlow output port. */
3988 if (ctx
->nf_output_iface
== NF_OUT_DROP
) {
3989 ctx
->nf_output_iface
= odp_port
;
3990 } else if (ctx
->nf_output_iface
!= NF_OUT_FLOOD
) {
3991 ctx
->nf_output_iface
= NF_OUT_MULTI
;
3996 xlate_set_queue_action(struct action_xlate_ctx
*ctx
,
3997 const struct nx_action_set_queue
*nasq
)
4002 error
= dpif_queue_to_priority(ctx
->ofproto
->dpif
, ntohl(nasq
->queue_id
),
4005 /* Couldn't translate queue to a priority, so ignore. A warning
4006 * has already been logged. */
4010 ctx
->priority
= priority
;
4013 struct xlate_reg_state
{
4019 xlate_autopath(struct action_xlate_ctx
*ctx
,
4020 const struct nx_action_autopath
*naa
)
4022 uint16_t ofp_port
= ntohl(naa
->id
);
4023 struct ofport_dpif
*port
= get_ofp_port(ctx
->ofproto
, ofp_port
);
4025 if (!port
|| !port
->bundle
) {
4026 ofp_port
= OFPP_NONE
;
4027 } else if (port
->bundle
->bond
) {
4028 /* Autopath does not support VLAN hashing. */
4029 struct ofport_dpif
*slave
= bond_choose_output_slave(
4030 port
->bundle
->bond
, &ctx
->flow
, 0, &ctx
->tags
);
4032 ofp_port
= slave
->up
.ofp_port
;
4035 autopath_execute(naa
, &ctx
->flow
, ofp_port
);
4039 slave_enabled_cb(uint16_t ofp_port
, void *ofproto_
)
4041 struct ofproto_dpif
*ofproto
= ofproto_
;
4042 struct ofport_dpif
*port
;
4052 case OFPP_CONTROLLER
: /* Not supported by the bundle action. */
4055 port
= get_ofp_port(ofproto
, ofp_port
);
4056 return port
? port
->may_enable
: false;
4061 xlate_learn_action(struct action_xlate_ctx
*ctx
,
4062 const struct nx_action_learn
*learn
)
4064 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
4065 struct ofputil_flow_mod fm
;
4068 learn_execute(learn
, &ctx
->flow
, &fm
);
4070 error
= ofproto_flow_mod(&ctx
->ofproto
->up
, &fm
);
4071 if (error
&& !VLOG_DROP_WARN(&rl
)) {
4072 char *msg
= ofputil_error_to_string(error
);
4073 VLOG_WARN("learning action failed to modify flow table (%s)", msg
);
4081 may_receive(const struct ofport_dpif
*port
, struct action_xlate_ctx
*ctx
)
4083 if (port
->up
.opp
.config
& (eth_addr_equals(ctx
->flow
.dl_dst
, eth_addr_stp
)
4084 ? htonl(OFPPC_NO_RECV_STP
)
4085 : htonl(OFPPC_NO_RECV
))) {
4089 /* Only drop packets here if both forwarding and learning are
4090 * disabled. If just learning is enabled, we need to have
4091 * OFPP_NORMAL and the learning action have a look at the packet
4092 * before we can drop it. */
4093 if (!stp_forward_in_state(port
->stp_state
)
4094 && !stp_learn_in_state(port
->stp_state
)) {
4102 do_xlate_actions(const union ofp_action
*in
, size_t n_in
,
4103 struct action_xlate_ctx
*ctx
)
4105 const struct ofport_dpif
*port
;
4106 const union ofp_action
*ia
;
4109 port
= get_ofp_port(ctx
->ofproto
, ctx
->flow
.in_port
);
4110 if (port
&& !may_receive(port
, ctx
)) {
4111 /* Drop this flow. */
4115 OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia
, left
, in
, n_in
) {
4116 const struct ofp_action_dl_addr
*oada
;
4117 const struct nx_action_resubmit
*nar
;
4118 const struct nx_action_set_tunnel
*nast
;
4119 const struct nx_action_set_queue
*nasq
;
4120 const struct nx_action_multipath
*nam
;
4121 const struct nx_action_autopath
*naa
;
4122 const struct nx_action_bundle
*nab
;
4123 const struct nx_action_output_reg
*naor
;
4124 enum ofputil_action_code code
;
4131 code
= ofputil_decode_action_unsafe(ia
);
4133 case OFPUTIL_OFPAT_OUTPUT
:
4134 xlate_output_action(ctx
, &ia
->output
);
4137 case OFPUTIL_OFPAT_SET_VLAN_VID
:
4138 ctx
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
4139 ctx
->flow
.vlan_tci
|= ia
->vlan_vid
.vlan_vid
| htons(VLAN_CFI
);
4142 case OFPUTIL_OFPAT_SET_VLAN_PCP
:
4143 ctx
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
4144 ctx
->flow
.vlan_tci
|= htons(
4145 (ia
->vlan_pcp
.vlan_pcp
<< VLAN_PCP_SHIFT
) | VLAN_CFI
);
4148 case OFPUTIL_OFPAT_STRIP_VLAN
:
4149 ctx
->flow
.vlan_tci
= htons(0);
4152 case OFPUTIL_OFPAT_SET_DL_SRC
:
4153 oada
= ((struct ofp_action_dl_addr
*) ia
);
4154 memcpy(ctx
->flow
.dl_src
, oada
->dl_addr
, ETH_ADDR_LEN
);
4157 case OFPUTIL_OFPAT_SET_DL_DST
:
4158 oada
= ((struct ofp_action_dl_addr
*) ia
);
4159 memcpy(ctx
->flow
.dl_dst
, oada
->dl_addr
, ETH_ADDR_LEN
);
4162 case OFPUTIL_OFPAT_SET_NW_SRC
:
4163 ctx
->flow
.nw_src
= ia
->nw_addr
.nw_addr
;
4166 case OFPUTIL_OFPAT_SET_NW_DST
:
4167 ctx
->flow
.nw_dst
= ia
->nw_addr
.nw_addr
;
4170 case OFPUTIL_OFPAT_SET_NW_TOS
:
4171 ctx
->flow
.tos_frag
&= ~IP_DSCP_MASK
;
4172 ctx
->flow
.tos_frag
|= ia
->nw_tos
.nw_tos
& IP_DSCP_MASK
;
4175 case OFPUTIL_OFPAT_SET_TP_SRC
:
4176 ctx
->flow
.tp_src
= ia
->tp_port
.tp_port
;
4179 case OFPUTIL_OFPAT_SET_TP_DST
:
4180 ctx
->flow
.tp_dst
= ia
->tp_port
.tp_port
;
4183 case OFPUTIL_OFPAT_ENQUEUE
:
4184 xlate_enqueue_action(ctx
, (const struct ofp_action_enqueue
*) ia
);
4187 case OFPUTIL_NXAST_RESUBMIT
:
4188 nar
= (const struct nx_action_resubmit
*) ia
;
4189 xlate_table_action(ctx
, ntohs(nar
->in_port
), ctx
->table_id
);
4192 case OFPUTIL_NXAST_RESUBMIT_TABLE
:
4193 xlate_resubmit_table(ctx
, (const struct nx_action_resubmit
*) ia
);
4196 case OFPUTIL_NXAST_SET_TUNNEL
:
4197 nast
= (const struct nx_action_set_tunnel
*) ia
;
4198 tun_id
= htonll(ntohl(nast
->tun_id
));
4199 ctx
->flow
.tun_id
= tun_id
;
4202 case OFPUTIL_NXAST_SET_QUEUE
:
4203 nasq
= (const struct nx_action_set_queue
*) ia
;
4204 xlate_set_queue_action(ctx
, nasq
);
4207 case OFPUTIL_NXAST_POP_QUEUE
:
4211 case OFPUTIL_NXAST_REG_MOVE
:
4212 nxm_execute_reg_move((const struct nx_action_reg_move
*) ia
,
4216 case OFPUTIL_NXAST_REG_LOAD
:
4217 nxm_execute_reg_load((const struct nx_action_reg_load
*) ia
,
4221 case OFPUTIL_NXAST_NOTE
:
4222 /* Nothing to do. */
4225 case OFPUTIL_NXAST_SET_TUNNEL64
:
4226 tun_id
= ((const struct nx_action_set_tunnel64
*) ia
)->tun_id
;
4227 ctx
->flow
.tun_id
= tun_id
;
4230 case OFPUTIL_NXAST_MULTIPATH
:
4231 nam
= (const struct nx_action_multipath
*) ia
;
4232 multipath_execute(nam
, &ctx
->flow
);
4235 case OFPUTIL_NXAST_AUTOPATH
:
4236 naa
= (const struct nx_action_autopath
*) ia
;
4237 xlate_autopath(ctx
, naa
);
4240 case OFPUTIL_NXAST_BUNDLE
:
4241 ctx
->ofproto
->has_bundle_action
= true;
4242 nab
= (const struct nx_action_bundle
*) ia
;
4243 xlate_output_action__(ctx
, bundle_execute(nab
, &ctx
->flow
,
4248 case OFPUTIL_NXAST_BUNDLE_LOAD
:
4249 ctx
->ofproto
->has_bundle_action
= true;
4250 nab
= (const struct nx_action_bundle
*) ia
;
4251 bundle_execute_load(nab
, &ctx
->flow
, slave_enabled_cb
,
4255 case OFPUTIL_NXAST_OUTPUT_REG
:
4256 naor
= (const struct nx_action_output_reg
*) ia
;
4257 xlate_output_reg_action(ctx
, naor
);
4260 case OFPUTIL_NXAST_LEARN
:
4261 ctx
->has_learn
= true;
4262 if (ctx
->may_learn
) {
4263 xlate_learn_action(ctx
, (const struct nx_action_learn
*) ia
);
4267 case OFPUTIL_NXAST_EXIT
:
4273 /* We've let OFPP_NORMAL and the learning action look at the packet,
4274 * so drop it now if forwarding is disabled. */
4275 if (port
&& !stp_forward_in_state(port
->stp_state
)) {
4276 ofpbuf_clear(ctx
->odp_actions
);
4277 add_sflow_action(ctx
);
4282 action_xlate_ctx_init(struct action_xlate_ctx
*ctx
,
4283 struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4284 const struct ofpbuf
*packet
)
4286 ctx
->ofproto
= ofproto
;
4288 ctx
->packet
= packet
;
4289 ctx
->may_learn
= packet
!= NULL
;
4290 ctx
->resubmit_hook
= NULL
;
4293 static struct ofpbuf
*
4294 xlate_actions(struct action_xlate_ctx
*ctx
,
4295 const union ofp_action
*in
, size_t n_in
)
4297 COVERAGE_INC(ofproto_dpif_xlate
);
4299 ctx
->odp_actions
= ofpbuf_new(512);
4300 ofpbuf_reserve(ctx
->odp_actions
, NL_A_U32_SIZE
);
4302 ctx
->may_set_up_flow
= true;
4303 ctx
->has_learn
= false;
4304 ctx
->has_normal
= false;
4305 ctx
->nf_output_iface
= NF_OUT_DROP
;
4308 ctx
->base_priority
= 0;
4309 ctx
->base_flow
= ctx
->flow
;
4310 ctx
->base_flow
.tun_id
= 0;
4314 if (ctx
->flow
.tos_frag
& FLOW_FRAG_ANY
) {
4315 switch (ctx
->ofproto
->up
.frag_handling
) {
4316 case OFPC_FRAG_NORMAL
:
4317 /* We must pretend that transport ports are unavailable. */
4318 ctx
->flow
.tp_src
= ctx
->base_flow
.tp_src
= htons(0);
4319 ctx
->flow
.tp_dst
= ctx
->base_flow
.tp_dst
= htons(0);
4322 case OFPC_FRAG_DROP
:
4323 return ctx
->odp_actions
;
4325 case OFPC_FRAG_REASM
:
4328 case OFPC_FRAG_NX_MATCH
:
4329 /* Nothing to do. */
4334 if (process_special(ctx
->ofproto
, &ctx
->flow
, ctx
->packet
)) {
4335 ctx
->may_set_up_flow
= false;
4336 return ctx
->odp_actions
;
4338 add_sflow_action(ctx
);
4339 do_xlate_actions(in
, n_in
, ctx
);
4341 if (!connmgr_may_set_up_flow(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
4342 ctx
->odp_actions
->data
,
4343 ctx
->odp_actions
->size
)) {
4344 ctx
->may_set_up_flow
= false;
4346 && connmgr_msg_in_hook(ctx
->ofproto
->up
.connmgr
, &ctx
->flow
,
4348 compose_output_action(ctx
, OVSP_LOCAL
);
4351 fix_sflow_action(ctx
);
4354 return ctx
->odp_actions
;
4357 /* OFPP_NORMAL implementation. */
4360 struct ofport_dpif
*port
;
4365 struct dst builtin
[32];
4367 size_t n
, allocated
;
4370 static void dst_set_init(struct dst_set
*);
4371 static void dst_set_add(struct dst_set
*, const struct dst
*);
4372 static void dst_set_free(struct dst_set
*);
4374 static struct ofport_dpif
*ofbundle_get_a_port(const struct ofbundle
*);
4376 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
4377 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
4378 * the bundle on which the packet was received, returns the VLAN to which the
4381 * Both 'vid' and the return value are in the range 0...4095. */
4383 input_vid_to_vlan(const struct ofbundle
*in_bundle
, uint16_t vid
)
4385 switch (in_bundle
->vlan_mode
) {
4386 case PORT_VLAN_ACCESS
:
4387 return in_bundle
->vlan
;
4390 case PORT_VLAN_TRUNK
:
4393 case PORT_VLAN_NATIVE_UNTAGGED
:
4394 case PORT_VLAN_NATIVE_TAGGED
:
4395 return vid
? vid
: in_bundle
->vlan
;
4402 /* Given 'vlan', the VLAN that a packet belongs to, and
4403 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
4404 * that should be included in the 802.1Q header. (If the return value is 0,
4405 * then the 802.1Q header should only be included in the packet if there is a
4408 * Both 'vlan' and the return value are in the range 0...4095. */
4410 output_vlan_to_vid(const struct ofbundle
*out_bundle
, uint16_t vlan
)
4412 switch (out_bundle
->vlan_mode
) {
4413 case PORT_VLAN_ACCESS
:
4416 case PORT_VLAN_TRUNK
:
4417 case PORT_VLAN_NATIVE_TAGGED
:
4420 case PORT_VLAN_NATIVE_UNTAGGED
:
4421 return vlan
== out_bundle
->vlan
? 0 : vlan
;
4429 set_dst(struct action_xlate_ctx
*ctx
, struct dst
*dst
,
4430 const struct ofbundle
*in_bundle
, const struct ofbundle
*out_bundle
)
4434 vlan
= input_vid_to_vlan(in_bundle
, vlan_tci_to_vid(ctx
->flow
.vlan_tci
));
4435 dst
->vid
= output_vlan_to_vid(out_bundle
, vlan
);
4437 dst
->port
= (!out_bundle
->bond
4438 ? ofbundle_get_a_port(out_bundle
)
4439 : bond_choose_output_slave(out_bundle
->bond
, &ctx
->flow
,
4440 dst
->vid
, &ctx
->tags
));
4441 return dst
->port
!= NULL
;
4445 mirror_mask_ffs(mirror_mask_t mask
)
4447 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask
));
4452 dst_set_init(struct dst_set
*set
)
4454 set
->dsts
= set
->builtin
;
4456 set
->allocated
= ARRAY_SIZE(set
->builtin
);
4460 dst_set_add(struct dst_set
*set
, const struct dst
*dst
)
4462 if (set
->n
>= set
->allocated
) {
4463 size_t new_allocated
;
4464 struct dst
*new_dsts
;
4466 new_allocated
= set
->allocated
* 2;
4467 new_dsts
= xmalloc(new_allocated
* sizeof *new_dsts
);
4468 memcpy(new_dsts
, set
->dsts
, set
->n
* sizeof *new_dsts
);
4472 set
->dsts
= new_dsts
;
4473 set
->allocated
= new_allocated
;
4475 set
->dsts
[set
->n
++] = *dst
;
4479 dst_set_free(struct dst_set
*set
)
4481 if (set
->dsts
!= set
->builtin
) {
4487 dst_is_duplicate(const struct dst_set
*set
, const struct dst
*test
)
4490 for (i
= 0; i
< set
->n
; i
++) {
4491 if (set
->dsts
[i
].vid
== test
->vid
4492 && set
->dsts
[i
].port
== test
->port
) {
4500 ofbundle_trunks_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
4502 return (bundle
->vlan_mode
!= PORT_VLAN_ACCESS
4503 && (!bundle
->trunks
|| bitmap_is_set(bundle
->trunks
, vlan
)));
4507 ofbundle_includes_vlan(const struct ofbundle
*bundle
, uint16_t vlan
)
4509 return vlan
== bundle
->vlan
|| ofbundle_trunks_vlan(bundle
, vlan
);
4512 /* Returns an arbitrary interface within 'bundle'. */
4513 static struct ofport_dpif
*
4514 ofbundle_get_a_port(const struct ofbundle
*bundle
)
4516 return CONTAINER_OF(list_front(&bundle
->ports
),
4517 struct ofport_dpif
, bundle_node
);
4521 compose_dsts(struct action_xlate_ctx
*ctx
, uint16_t vlan
,
4522 const struct ofbundle
*in_bundle
,
4523 const struct ofbundle
*out_bundle
, struct dst_set
*set
)
4527 if (out_bundle
== OFBUNDLE_FLOOD
) {
4528 struct ofbundle
*bundle
;
4530 HMAP_FOR_EACH (bundle
, hmap_node
, &ctx
->ofproto
->bundles
) {
4531 if (bundle
!= in_bundle
4532 && ofbundle_includes_vlan(bundle
, vlan
)
4533 && bundle
->floodable
4534 && !bundle
->mirror_out
4535 && set_dst(ctx
, &dst
, in_bundle
, bundle
)) {
4536 dst_set_add(set
, &dst
);
4539 ctx
->nf_output_iface
= NF_OUT_FLOOD
;
4540 } else if (out_bundle
&& set_dst(ctx
, &dst
, in_bundle
, out_bundle
)) {
4541 dst_set_add(set
, &dst
);
4542 ctx
->nf_output_iface
= dst
.port
->odp_port
;
4547 vlan_is_mirrored(const struct ofmirror
*m
, int vlan
)
4549 return !m
->vlans
|| bitmap_is_set(m
->vlans
, vlan
);
4552 /* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
4553 * to a VLAN. In general most packets may be mirrored but we want to drop
4554 * protocols that may confuse switches. */
4556 eth_dst_may_rspan(const uint8_t dst
[ETH_ADDR_LEN
])
4558 /* If you change this function's behavior, please update corresponding
4559 * documentation in vswitch.xml at the same time. */
4560 if (dst
[0] != 0x01) {
4561 /* All the currently banned MACs happen to start with 01 currently, so
4562 * this is a quick way to eliminate most of the good ones. */
4564 if (eth_addr_is_reserved(dst
)) {
4565 /* Drop STP, IEEE pause frames, and other reserved protocols
4566 * (01-80-c2-00-00-0x). */
4570 if (dst
[0] == 0x01 && dst
[1] == 0x00 && dst
[2] == 0x0c) {
4572 if ((dst
[3] & 0xfe) == 0xcc &&
4573 (dst
[4] & 0xfe) == 0xcc &&
4574 (dst
[5] & 0xfe) == 0xcc) {
4575 /* Drop the following protocols plus others following the same
4578 CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc)
4579 Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd)
4580 STP Uplink Fast (01-00-0c-cd-cd-cd) */
4584 if (!(dst
[3] | dst
[4] | dst
[5])) {
4585 /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */
4594 compose_mirror_dsts(struct action_xlate_ctx
*ctx
,
4595 uint16_t vlan
, const struct ofbundle
*in_bundle
,
4596 struct dst_set
*set
)
4598 struct ofproto_dpif
*ofproto
= ctx
->ofproto
;
4599 mirror_mask_t mirrors
;
4603 mirrors
= in_bundle
->src_mirrors
;
4604 for (i
= 0; i
< set
->n
; i
++) {
4605 mirrors
|= set
->dsts
[i
].port
->bundle
->dst_mirrors
;
4612 flow_vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
4614 struct ofmirror
*m
= ofproto
->mirrors
[mirror_mask_ffs(mirrors
) - 1];
4615 if (vlan_is_mirrored(m
, vlan
)) {
4619 if (set_dst(ctx
, &dst
, in_bundle
, m
->out
)
4620 && !dst_is_duplicate(set
, &dst
)) {
4621 dst_set_add(set
, &dst
);
4623 } else if (eth_dst_may_rspan(ctx
->flow
.dl_dst
)) {
4624 struct ofbundle
*bundle
;
4626 HMAP_FOR_EACH (bundle
, hmap_node
, &ofproto
->bundles
) {
4627 if (ofbundle_includes_vlan(bundle
, m
->out_vlan
)
4628 && set_dst(ctx
, &dst
, in_bundle
, bundle
))
4630 /* set_dst() got dst->vid from the input packet's VLAN,
4631 * not from m->out_vlan, so recompute it. */
4632 dst
.vid
= output_vlan_to_vid(bundle
, m
->out_vlan
);
4634 if (dst_is_duplicate(set
, &dst
)) {
4638 if (bundle
== in_bundle
&& dst
.vid
== flow_vid
) {
4639 /* Don't send out input port on same VLAN. */
4642 dst_set_add(set
, &dst
);
4647 mirrors
&= mirrors
- 1;
4652 compose_actions(struct action_xlate_ctx
*ctx
, uint16_t vlan
,
4653 const struct ofbundle
*in_bundle
,
4654 const struct ofbundle
*out_bundle
)
4656 uint16_t initial_vid
, cur_vid
;
4657 const struct dst
*dst
;
4661 compose_dsts(ctx
, vlan
, in_bundle
, out_bundle
, &set
);
4662 compose_mirror_dsts(ctx
, vlan
, in_bundle
, &set
);
4668 /* Output all the packets we can without having to change the VLAN. */
4669 commit_odp_actions(ctx
);
4670 initial_vid
= vlan_tci_to_vid(ctx
->flow
.vlan_tci
);
4671 for (dst
= set
.dsts
; dst
< &set
.dsts
[set
.n
]; dst
++) {
4672 if (dst
->vid
!= initial_vid
) {
4675 compose_output_action(ctx
, dst
->port
->odp_port
);
4678 /* Then output the rest. */
4679 cur_vid
= initial_vid
;
4680 for (dst
= set
.dsts
; dst
< &set
.dsts
[set
.n
]; dst
++) {
4681 if (dst
->vid
== initial_vid
) {
4684 if (dst
->vid
!= cur_vid
) {
4687 tci
= htons(dst
->vid
);
4688 tci
|= ctx
->flow
.vlan_tci
& htons(VLAN_PCP_MASK
);
4690 tci
|= htons(VLAN_CFI
);
4692 commit_vlan_action(ctx
, tci
);
4696 compose_output_action(ctx
, dst
->port
->odp_port
);
4702 /* Returns the effective vlan of a packet, taking into account both the
4703 * 802.1Q header and implicitly tagged ports. A value of 0 indicates that
4704 * the packet is untagged and -1 indicates it has an invalid header and
4705 * should be dropped. */
4707 flow_get_vlan(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4708 struct ofbundle
*in_bundle
, bool have_packet
)
4710 int vlan
= vlan_tci_to_vid(flow
->vlan_tci
);
4712 if (in_bundle
->vlan_mode
== PORT_VLAN_ACCESS
) {
4713 /* Drop tagged packet on access port */
4715 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4716 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %d tagged "
4717 "packet received on port %s configured with "
4718 "implicit VLAN %"PRIu16
,
4719 ofproto
->up
.name
, vlan
,
4720 in_bundle
->name
, in_bundle
->vlan
);
4723 } else if (ofbundle_includes_vlan(in_bundle
, vlan
)) {
4726 /* Drop packets from a VLAN not member of the trunk */
4728 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4729 VLOG_WARN_RL(&rl
, "bridge %s: dropping VLAN %d tagged "
4730 "packet received on port %s not configured for "
4732 ofproto
->up
.name
, vlan
, in_bundle
->name
, vlan
);
4737 if (in_bundle
->vlan_mode
!= PORT_VLAN_TRUNK
) {
4738 return in_bundle
->vlan
;
4740 return ofbundle_includes_vlan(in_bundle
, 0) ? 0 : -1;
4745 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
4746 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
4747 * indicate this; newer upstream kernels use gratuitous ARP requests. */
4749 is_gratuitous_arp(const struct flow
*flow
)
4751 return (flow
->dl_type
== htons(ETH_TYPE_ARP
)
4752 && eth_addr_is_broadcast(flow
->dl_dst
)
4753 && (flow
->nw_proto
== ARP_OP_REPLY
4754 || (flow
->nw_proto
== ARP_OP_REQUEST
4755 && flow
->nw_src
== flow
->nw_dst
)));
4759 update_learning_table(struct ofproto_dpif
*ofproto
,
4760 const struct flow
*flow
, int vlan
,
4761 struct ofbundle
*in_bundle
)
4763 struct mac_entry
*mac
;
4765 if (!mac_learning_may_learn(ofproto
->ml
, flow
->dl_src
, vlan
)) {
4769 mac
= mac_learning_insert(ofproto
->ml
, flow
->dl_src
, vlan
);
4770 if (is_gratuitous_arp(flow
)) {
4771 /* We don't want to learn from gratuitous ARP packets that are
4772 * reflected back over bond slaves so we lock the learning table. */
4773 if (!in_bundle
->bond
) {
4774 mac_entry_set_grat_arp_lock(mac
);
4775 } else if (mac_entry_is_grat_arp_locked(mac
)) {
4780 if (mac_entry_is_new(mac
) || mac
->port
.p
!= in_bundle
) {
4781 /* The log messages here could actually be useful in debugging,
4782 * so keep the rate limit relatively high. */
4783 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
4784 VLOG_DBG_RL(&rl
, "bridge %s: learned that "ETH_ADDR_FMT
" is "
4785 "on port %s in VLAN %d",
4786 ofproto
->up
.name
, ETH_ADDR_ARGS(flow
->dl_src
),
4787 in_bundle
->name
, vlan
);
4789 mac
->port
.p
= in_bundle
;
4790 tag_set_add(&ofproto
->revalidate_set
,
4791 mac_learning_changed(ofproto
->ml
, mac
));
4795 /* Determines whether packets in 'flow' within 'br' should be forwarded or
4796 * dropped. Returns true if they may be forwarded, false if they should be
4799 * If 'have_packet' is true, it indicates that the caller is processing a
4800 * received packet. If 'have_packet' is false, then the caller is just
4801 * revalidating an existing flow because configuration has changed. Either
4802 * way, 'have_packet' only affects logging (there is no point in logging errors
4803 * during revalidation).
4805 * Sets '*in_portp' to the input port. This will be a null pointer if
4806 * flow->in_port does not designate a known input port (in which case
4807 * is_admissible() returns false).
4809 * When returning true, sets '*vlanp' to the effective VLAN of the input
4810 * packet, as returned by flow_get_vlan().
4812 * May also add tags to '*tags', although the current implementation only does
4813 * so in one special case.
4816 is_admissible(struct ofproto_dpif
*ofproto
, const struct flow
*flow
,
4818 tag_type
*tags
, int *vlanp
, struct ofbundle
**in_bundlep
)
4820 struct ofport_dpif
*in_port
;
4821 struct ofbundle
*in_bundle
;
4824 /* Find the port and bundle for the received packet. */
4825 in_port
= get_ofp_port(ofproto
, flow
->in_port
);
4826 *in_bundlep
= in_bundle
= in_port
? in_port
->bundle
: NULL
;
4827 if (!in_port
|| !in_bundle
) {
4828 /* No interface? Something fishy... */
4830 /* Odd. A few possible reasons here:
4832 * - We deleted a port but there are still a few packets queued up
4835 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
4836 * we don't know about.
4838 * - Packet arrived on the local port but the local port is not
4841 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4843 VLOG_WARN_RL(&rl
, "bridge %s: received packet on unknown "
4845 ofproto
->up
.name
, flow
->in_port
);
4850 *vlanp
= vlan
= flow_get_vlan(ofproto
, flow
, in_bundle
, have_packet
);
4855 /* Drop frames for reserved multicast addresses only if forward_bpdu
4856 * option is absent. */
4857 if (eth_addr_is_reserved(flow
->dl_dst
) && !ofproto
->up
.forward_bpdu
) {
4861 /* Drop frames on bundles reserved for mirroring. */
4862 if (in_bundle
->mirror_out
) {
4864 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4865 VLOG_WARN_RL(&rl
, "bridge %s: dropping packet received on port "
4866 "%s, which is reserved exclusively for mirroring",
4867 ofproto
->up
.name
, in_bundle
->name
);
4872 if (in_bundle
->bond
) {
4873 struct mac_entry
*mac
;
4875 switch (bond_check_admissibility(in_bundle
->bond
, in_port
,
4876 flow
->dl_dst
, tags
)) {
4883 case BV_DROP_IF_MOVED
:
4884 mac
= mac_learning_lookup(ofproto
->ml
, flow
->dl_src
, vlan
, NULL
);
4885 if (mac
&& mac
->port
.p
!= in_bundle
&&
4886 (!is_gratuitous_arp(flow
)
4887 || mac_entry_is_grat_arp_locked(mac
))) {
4898 xlate_normal(struct action_xlate_ctx
*ctx
)
4900 struct ofbundle
*in_bundle
;
4901 struct ofbundle
*out_bundle
;
4902 struct mac_entry
*mac
;
4905 ctx
->has_normal
= true;
4907 /* Check whether we should drop packets in this flow. */
4908 if (!is_admissible(ctx
->ofproto
, &ctx
->flow
, ctx
->packet
!= NULL
,
4909 &ctx
->tags
, &vlan
, &in_bundle
)) {
4914 /* Learn source MAC. */
4915 if (ctx
->may_learn
) {
4916 update_learning_table(ctx
->ofproto
, &ctx
->flow
, vlan
, in_bundle
);
4919 /* Determine output bundle. */
4920 mac
= mac_learning_lookup(ctx
->ofproto
->ml
, ctx
->flow
.dl_dst
, vlan
,
4923 out_bundle
= mac
->port
.p
;
4924 } else if (!ctx
->packet
&& !eth_addr_is_multicast(ctx
->flow
.dl_dst
)) {
4925 /* If we are revalidating but don't have a learning entry then eject
4926 * the flow. Installing a flow that floods packets opens up a window
4927 * of time where we could learn from a packet reflected on a bond and
4928 * blackhole packets before the learning table is updated to reflect
4929 * the correct port. */
4930 ctx
->may_set_up_flow
= false;
4933 out_bundle
= OFBUNDLE_FLOOD
;
4936 /* Don't send packets out their input bundles. */
4937 if (in_bundle
== out_bundle
) {
4943 compose_actions(ctx
, vlan
, in_bundle
, out_bundle
);
4947 /* Optimized flow revalidation.
4949 * It's a difficult problem, in general, to tell which facets need to have
4950 * their actions recalculated whenever the OpenFlow flow table changes. We
4951 * don't try to solve that general problem: for most kinds of OpenFlow flow
4952 * table changes, we recalculate the actions for every facet. This is
4953 * relatively expensive, but it's good enough if the OpenFlow flow table
4954 * doesn't change very often.
4956 * However, we can expect one particular kind of OpenFlow flow table change to
4957 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
4958 * of CPU on revalidating every facet whenever MAC learning modifies the flow
4959 * table, we add a special case that applies to flow tables in which every rule
4960 * has the same form (that is, the same wildcards), except that the table is
4961 * also allowed to have a single "catch-all" flow that matches all packets. We
4962 * optimize this case by tagging all of the facets that resubmit into the table
4963 * and invalidating the same tag whenever a flow changes in that table. The
4964 * end result is that we revalidate just the facets that need it (and sometimes
4965 * a few more, but not all of the facets or even all of the facets that
4966 * resubmit to the table modified by MAC learning). */
4968 /* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
4969 * into an OpenFlow table with the given 'basis'. */
4971 rule_calculate_tag(const struct flow
*flow
, const struct flow_wildcards
*wc
,
4974 if (flow_wildcards_is_catchall(wc
)) {
4977 struct flow tag_flow
= *flow
;
4978 flow_zero_wildcards(&tag_flow
, wc
);
4979 return tag_create_deterministic(flow_hash(&tag_flow
, secret
));
4983 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
4984 * taggability of that table.
4986 * This function must be called after *each* change to a flow table. If you
4987 * skip calling it on some changes then the pointer comparisons at the end can
4988 * be invalid if you get unlucky. For example, if a flow removal causes a
4989 * cls_table to be destroyed and then a flow insertion causes a cls_table with
4990 * different wildcards to be created with the same address, then this function
4991 * will incorrectly skip revalidation. */
4993 table_update_taggable(struct ofproto_dpif
*ofproto
, uint8_t table_id
)
4995 struct table_dpif
*table
= &ofproto
->tables
[table_id
];
4996 const struct classifier
*cls
= &ofproto
->up
.tables
[table_id
];
4997 struct cls_table
*catchall
, *other
;
4998 struct cls_table
*t
;
5000 catchall
= other
= NULL
;
5002 switch (hmap_count(&cls
->tables
)) {
5004 /* We could tag this OpenFlow table but it would make the logic a
5005 * little harder and it's a corner case that doesn't seem worth it
5011 HMAP_FOR_EACH (t
, hmap_node
, &cls
->tables
) {
5012 if (cls_table_is_catchall(t
)) {
5014 } else if (!other
) {
5017 /* Indicate that we can't tag this by setting both tables to
5018 * NULL. (We know that 'catchall' is already NULL.) */
5025 /* Can't tag this table. */
5029 if (table
->catchall_table
!= catchall
|| table
->other_table
!= other
) {
5030 table
->catchall_table
= catchall
;
5031 table
->other_table
= other
;
5032 ofproto
->need_revalidate
= true;
5036 /* Given 'rule' that has changed in some way (either it is a rule being
5037 * inserted, a rule being deleted, or a rule whose actions are being
5038 * modified), marks facets for revalidation to ensure that packets will be
5039 * forwarded correctly according to the new state of the flow table.
5041 * This function must be called after *each* change to a flow table. See
5042 * the comment on table_update_taggable() for more information. */
5044 rule_invalidate(const struct rule_dpif
*rule
)
5046 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(rule
->up
.ofproto
);
5048 table_update_taggable(ofproto
, rule
->up
.table_id
);
5050 if (!ofproto
->need_revalidate
) {
5051 struct table_dpif
*table
= &ofproto
->tables
[rule
->up
.table_id
];
5053 if (table
->other_table
&& rule
->tag
) {
5054 tag_set_add(&ofproto
->revalidate_set
, rule
->tag
);
5056 ofproto
->need_revalidate
= true;
5062 set_frag_handling(struct ofproto
*ofproto_
,
5063 enum ofp_config_flags frag_handling
)
5065 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5067 if (frag_handling
!= OFPC_FRAG_REASM
) {
5068 ofproto
->need_revalidate
= true;
5076 packet_out(struct ofproto
*ofproto_
, struct ofpbuf
*packet
,
5077 const struct flow
*flow
,
5078 const union ofp_action
*ofp_actions
, size_t n_ofp_actions
)
5080 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5083 error
= validate_actions(ofp_actions
, n_ofp_actions
, flow
,
5084 ofproto
->max_ports
);
5086 struct odputil_keybuf keybuf
;
5087 struct action_xlate_ctx ctx
;
5088 struct ofpbuf
*odp_actions
;
5091 ofpbuf_use_stack(&key
, &keybuf
, sizeof keybuf
);
5092 odp_flow_key_from_flow(&key
, flow
);
5094 action_xlate_ctx_init(&ctx
, ofproto
, flow
, packet
);
5095 odp_actions
= xlate_actions(&ctx
, ofp_actions
, n_ofp_actions
);
5096 dpif_execute(ofproto
->dpif
, key
.data
, key
.size
,
5097 odp_actions
->data
, odp_actions
->size
, packet
);
5098 ofpbuf_delete(odp_actions
);
5104 get_netflow_ids(const struct ofproto
*ofproto_
,
5105 uint8_t *engine_type
, uint8_t *engine_id
)
5107 struct ofproto_dpif
*ofproto
= ofproto_dpif_cast(ofproto_
);
5109 dpif_get_netflow_ids(ofproto
->dpif
, engine_type
, engine_id
);
5112 static struct ofproto_dpif
*
5113 ofproto_dpif_lookup(const char *name
)
5115 struct ofproto
*ofproto
= ofproto_lookup(name
);
5116 return (ofproto
&& ofproto
->ofproto_class
== &ofproto_dpif_class
5117 ? ofproto_dpif_cast(ofproto
)
5122 ofproto_unixctl_fdb_show(struct unixctl_conn
*conn
,
5123 const char *args
, void *aux OVS_UNUSED
)
5125 struct ds ds
= DS_EMPTY_INITIALIZER
;
5126 const struct ofproto_dpif
*ofproto
;
5127 const struct mac_entry
*e
;
5129 ofproto
= ofproto_dpif_lookup(args
);
5131 unixctl_command_reply(conn
, 501, "no such bridge");
5135 ds_put_cstr(&ds
, " port VLAN MAC Age\n");
5136 LIST_FOR_EACH (e
, lru_node
, &ofproto
->ml
->lrus
) {
5137 struct ofbundle
*bundle
= e
->port
.p
;
5138 ds_put_format(&ds
, "%5d %4d "ETH_ADDR_FMT
" %3d\n",
5139 ofbundle_get_a_port(bundle
)->odp_port
,
5140 e
->vlan
, ETH_ADDR_ARGS(e
->mac
), mac_entry_age(e
));
5142 unixctl_command_reply(conn
, 200, ds_cstr(&ds
));
5146 struct ofproto_trace
{
5147 struct action_xlate_ctx ctx
;
5153 trace_format_rule(struct ds
*result
, uint8_t table_id
, int level
,
5154 const struct rule_dpif
*rule
)
5156 ds_put_char_multiple(result
, '\t', level
);
5158 ds_put_cstr(result
, "No match\n");
5162 ds_put_format(result
, "Rule: table=%"PRIu8
" cookie=%#"PRIx64
" ",
5163 table_id
, ntohll(rule
->up
.flow_cookie
));
5164 cls_rule_format(&rule
->up
.cr
, result
);
5165 ds_put_char(result
, '\n');
5167 ds_put_char_multiple(result
, '\t', level
);
5168 ds_put_cstr(result
, "OpenFlow ");
5169 ofp_print_actions(result
, rule
->up
.actions
, rule
->up
.n_actions
);
5170 ds_put_char(result
, '\n');
5174 trace_format_flow(struct ds
*result
, int level
, const char *title
,
5175 struct ofproto_trace
*trace
)
5177 ds_put_char_multiple(result
, '\t', level
);
5178 ds_put_format(result
, "%s: ", title
);
5179 if (flow_equal(&trace
->ctx
.flow
, &trace
->flow
)) {
5180 ds_put_cstr(result
, "unchanged");
5182 flow_format(result
, &trace
->ctx
.flow
);
5183 trace
->flow
= trace
->ctx
.flow
;
5185 ds_put_char(result
, '\n');
5189 trace_format_regs(struct ds
*result
, int level
, const char *title
,
5190 struct ofproto_trace
*trace
)
5194 ds_put_char_multiple(result
, '\t', level
);
5195 ds_put_format(result
, "%s:", title
);
5196 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
5197 ds_put_format(result
, " reg%zu=0x%"PRIx32
, i
, trace
->flow
.regs
[i
]);
5199 ds_put_char(result
, '\n');
5203 trace_resubmit(struct action_xlate_ctx
*ctx
, struct rule_dpif
*rule
)
5205 struct ofproto_trace
*trace
= CONTAINER_OF(ctx
, struct ofproto_trace
, ctx
);
5206 struct ds
*result
= trace
->result
;
5208 ds_put_char(result
, '\n');
5209 trace_format_flow(result
, ctx
->recurse
+ 1, "Resubmitted flow", trace
);
5210 trace_format_regs(result
, ctx
->recurse
+ 1, "Resubmitted regs", trace
);
5211 trace_format_rule(result
, ctx
->table_id
, ctx
->recurse
+ 1, rule
);
5215 ofproto_unixctl_trace(struct unixctl_conn
*conn
, const char *args_
,
5216 void *aux OVS_UNUSED
)
5218 char *dpname
, *arg1
, *arg2
, *arg3
;
5219 char *args
= xstrdup(args_
);
5220 char *save_ptr
= NULL
;
5221 struct ofproto_dpif
*ofproto
;
5222 struct ofpbuf odp_key
;
5223 struct ofpbuf
*packet
;
5224 struct rule_dpif
*rule
;
5230 ofpbuf_init(&odp_key
, 0);
5233 dpname
= strtok_r(args
, " ", &save_ptr
);
5234 arg1
= strtok_r(NULL
, " ", &save_ptr
);
5235 arg2
= strtok_r(NULL
, " ", &save_ptr
);
5236 arg3
= strtok_r(NULL
, "", &save_ptr
); /* Get entire rest of line. */
5237 if (dpname
&& arg1
&& (!arg2
|| !strcmp(arg2
, "-generate")) && !arg3
) {
5238 /* ofproto/trace dpname flow [-generate] */
5241 /* Convert string to datapath key. */
5242 ofpbuf_init(&odp_key
, 0);
5243 error
= odp_flow_key_from_string(arg1
, &odp_key
);
5245 unixctl_command_reply(conn
, 501, "Bad flow syntax");
5249 /* Convert odp_key to flow. */
5250 error
= odp_flow_key_to_flow(odp_key
.data
, odp_key
.size
, &flow
);
5252 unixctl_command_reply(conn
, 501, "Invalid flow");
5256 /* Generate a packet, if requested. */
5258 packet
= ofpbuf_new(0);
5259 flow_compose(packet
, &flow
);
5261 } else if (dpname
&& arg1
&& arg2
&& arg3
) {
5262 /* ofproto/trace dpname tun_id in_port packet */
5266 tun_id
= htonll(strtoull(arg1
, NULL
, 0));
5267 in_port
= ofp_port_to_odp_port(atoi(arg2
));
5269 packet
= ofpbuf_new(strlen(args
) / 2);
5270 arg3
= ofpbuf_put_hex(packet
, arg3
, NULL
);
5271 arg3
+= strspn(arg3
, " ");
5272 if (*arg3
!= '\0') {
5273 unixctl_command_reply(conn
, 501, "Trailing garbage in command");
5276 if (packet
->size
< ETH_HEADER_LEN
) {
5277 unixctl_command_reply(conn
, 501,
5278 "Packet data too short for Ethernet");
5282 ds_put_cstr(&result
, "Packet: ");
5283 s
= ofp_packet_to_string(packet
->data
, packet
->size
, packet
->size
);
5284 ds_put_cstr(&result
, s
);
5287 flow_extract(packet
, tun_id
, in_port
, &flow
);
5289 unixctl_command_reply(conn
, 501, "Bad command syntax");
5293 ofproto
= ofproto_dpif_lookup(dpname
);
5295 unixctl_command_reply(conn
, 501, "Unknown ofproto (use ofproto/list "
5300 ds_put_cstr(&result
, "Flow: ");
5301 flow_format(&result
, &flow
);
5302 ds_put_char(&result
, '\n');
5304 rule
= rule_dpif_lookup(ofproto
, &flow
, 0);
5305 trace_format_rule(&result
, 0, 0, rule
);
5307 struct ofproto_trace trace
;
5308 struct ofpbuf
*odp_actions
;
5310 trace
.result
= &result
;
5312 action_xlate_ctx_init(&trace
.ctx
, ofproto
, &flow
, packet
);
5313 trace
.ctx
.resubmit_hook
= trace_resubmit
;
5314 odp_actions
= xlate_actions(&trace
.ctx
,
5315 rule
->up
.actions
, rule
->up
.n_actions
);
5317 ds_put_char(&result
, '\n');
5318 trace_format_flow(&result
, 0, "Final flow", &trace
);
5319 ds_put_cstr(&result
, "Datapath actions: ");
5320 format_odp_actions(&result
, odp_actions
->data
, odp_actions
->size
);
5321 ofpbuf_delete(odp_actions
);
5323 if (!trace
.ctx
.may_set_up_flow
) {
5325 ds_put_cstr(&result
, "\nThis flow is not cachable.");
5327 ds_put_cstr(&result
, "\nThe datapath actions are incomplete--"
5328 "for complete actions, please supply a packet.");
5333 unixctl_command_reply(conn
, 200, ds_cstr(&result
));
5336 ds_destroy(&result
);
5337 ofpbuf_delete(packet
);
5338 ofpbuf_uninit(&odp_key
);
5343 ofproto_dpif_clog(struct unixctl_conn
*conn OVS_UNUSED
,
5344 const char *args_ OVS_UNUSED
, void *aux OVS_UNUSED
)
5347 unixctl_command_reply(conn
, 200, NULL
);
5351 ofproto_dpif_unclog(struct unixctl_conn
*conn OVS_UNUSED
,
5352 const char *args_ OVS_UNUSED
, void *aux OVS_UNUSED
)
5355 unixctl_command_reply(conn
, 200, NULL
);
5359 ofproto_dpif_unixctl_init(void)
5361 static bool registered
;
5367 unixctl_command_register("ofproto/trace",
5368 "bridge {tun_id in_port packet | odp_flow [-generate]}",
5369 ofproto_unixctl_trace
, NULL
);
5370 unixctl_command_register("fdb/show", "bridge", ofproto_unixctl_fdb_show
,
5372 unixctl_command_register("ofproto/clog", "", ofproto_dpif_clog
, NULL
);
5373 unixctl_command_register("ofproto/unclog", "", ofproto_dpif_unclog
, NULL
);
5376 const struct ofproto_class ofproto_dpif_class
= {
5403 port_is_lacp_current
,
5404 NULL
, /* rule_choose_table */
5411 rule_modify_actions
,
5419 get_cfm_remote_mpids
,
5423 get_stp_port_status
,
5428 is_mirror_output_bundle
,
5429 forward_bpdu_changed
,