]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif.c
ofproto: connmgr_send_packet_in() doesn't need buffer_id and total_len.
[ovs.git] / ofproto / ofproto-dpif.c
CommitLineData
abe529af 1/*
4dd1e3ca 2 * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks.
abe529af
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
5bee6e26 19#include "ofproto/ofproto-provider.h"
abe529af
BP
20
21#include <errno.h>
22
23#include "autopath.h"
24#include "bond.h"
daff3353 25#include "bundle.h"
abe529af
BP
26#include "byte-order.h"
27#include "connmgr.h"
28#include "coverage.h"
29#include "cfm.h"
30#include "dpif.h"
31#include "dynamic-string.h"
32#include "fail-open.h"
33#include "hmapx.h"
34#include "lacp.h"
75a75043 35#include "learn.h"
abe529af 36#include "mac-learning.h"
816fd533 37#include "meta-flow.h"
abe529af
BP
38#include "multipath.h"
39#include "netdev.h"
40#include "netlink.h"
41#include "nx-match.h"
42#include "odp-util.h"
43#include "ofp-util.h"
44#include "ofpbuf.h"
45#include "ofp-print.h"
bae473fe 46#include "ofproto-dpif-sflow.h"
abe529af
BP
47#include "poll-loop.h"
48#include "timer.h"
6c1491fb 49#include "unaligned.h"
abe529af
BP
50#include "unixctl.h"
51#include "vlan-bitmap.h"
52#include "vlog.h"
53
54VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
55
56COVERAGE_DEFINE(ofproto_dpif_ctlr_action);
57COVERAGE_DEFINE(ofproto_dpif_expired);
58COVERAGE_DEFINE(ofproto_dpif_no_packet_in);
59COVERAGE_DEFINE(ofproto_dpif_xlate);
60COVERAGE_DEFINE(facet_changed_rule);
61COVERAGE_DEFINE(facet_invalidated);
62COVERAGE_DEFINE(facet_revalidate);
63COVERAGE_DEFINE(facet_unexpected);
64
29901626 65/* Maximum depth of flow table recursion (due to resubmit actions) in a
abe529af 66 * flow translation. */
642a5c05 67#define MAX_RESUBMIT_RECURSION 32
abe529af 68
9cdaaebe
BP
69/* Number of implemented OpenFlow tables. */
70enum { N_TABLES = 255 };
71BUILD_ASSERT_DECL(N_TABLES >= 1 && N_TABLES <= 255);
72
abe529af
BP
73struct ofport_dpif;
74struct ofproto_dpif;
75
76struct rule_dpif {
77 struct rule up;
78
abe529af
BP
79 /* These statistics:
80 *
81 * - Do include packets and bytes from facets that have been deleted or
82 * whose own statistics have been folded into the rule.
83 *
84 * - Do include packets and bytes sent "by hand" that were accounted to
85 * the rule without any facet being involved (this is a rare corner
86 * case in rule_execute()).
87 *
88 * - Do not include packet or bytes that can be obtained from any facet's
89 * packet_count or byte_count member or that can be obtained from the
b0f7b9b5 90 * datapath by, e.g., dpif_flow_get() for any subfacet.
abe529af
BP
91 */
92 uint64_t packet_count; /* Number of packets received. */
93 uint64_t byte_count; /* Number of bytes received. */
94
54a9cbc9
BP
95 tag_type tag; /* Caches rule_calculate_tag() result. */
96
abe529af
BP
97 struct list facets; /* List of "struct facet"s. */
98};
99
100static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
101{
102 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
103}
104
29901626
BP
105static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
106 const struct flow *, uint8_t table);
abe529af 107
18b2a258 108static void flow_push_stats(struct rule_dpif *, const struct flow *,
b0f7b9b5
BP
109 uint64_t packets, uint64_t bytes,
110 long long int used);
111
112static uint32_t rule_calculate_tag(const struct flow *,
113 const struct flow_wildcards *,
114 uint32_t basis);
115static void rule_invalidate(const struct rule_dpif *);
116
abe529af
BP
117#define MAX_MIRRORS 32
118typedef uint32_t mirror_mask_t;
119#define MIRROR_MASK_C(X) UINT32_C(X)
120BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
121struct ofmirror {
122 struct ofproto_dpif *ofproto; /* Owning ofproto. */
123 size_t idx; /* In ofproto's "mirrors" array. */
124 void *aux; /* Key supplied by ofproto's client. */
125 char *name; /* Identifier for log messages. */
126
127 /* Selection criteria. */
128 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
129 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
130 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
131
9ba15e2a 132 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
abe529af
BP
133 struct ofbundle *out; /* Output port or NULL. */
134 int out_vlan; /* Output VLAN or -1. */
9ba15e2a 135 mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */
9d24de3b
JP
136
137 /* Counters. */
138 int64_t packet_count; /* Number of packets sent. */
139 int64_t byte_count; /* Number of bytes sent. */
abe529af
BP
140};
141
142static void mirror_destroy(struct ofmirror *);
9d24de3b
JP
143static void update_mirror_stats(struct ofproto_dpif *ofproto,
144 mirror_mask_t mirrors,
145 uint64_t packets, uint64_t bytes);
abe529af 146
abe529af
BP
147struct ofbundle {
148 struct ofproto_dpif *ofproto; /* Owning ofproto. */
149 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
150 void *aux; /* Key supplied by ofproto's client. */
151 char *name; /* Identifier for log messages. */
152
153 /* Configuration. */
154 struct list ports; /* Contains "struct ofport"s. */
ecac4ebf 155 enum port_vlan_mode vlan_mode; /* VLAN mode */
abe529af
BP
156 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
157 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
158 * NULL if all VLANs are trunked. */
159 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
160 struct bond *bond; /* Nonnull iff more than one port. */
5e9ceccd 161 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
abe529af
BP
162
163 /* Status. */
9e1fd49b 164 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
abe529af
BP
165
166 /* Port mirroring info. */
167 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
168 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
169 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
170};
171
172static void bundle_remove(struct ofport *);
7bde8dd8 173static void bundle_update(struct ofbundle *);
abe529af
BP
174static void bundle_destroy(struct ofbundle *);
175static void bundle_del_port(struct ofport_dpif *);
176static void bundle_run(struct ofbundle *);
177static void bundle_wait(struct ofbundle *);
3581c12c
JP
178static struct ofbundle *lookup_input_bundle(struct ofproto_dpif *,
179 uint16_t in_port, bool warn);
abe529af 180
33158a18
JP
181/* A controller may use OFPP_NONE as the ingress port to indicate that
182 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
183 * when an input bundle is needed for validation (e.g., mirroring or
184 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
185 * any 'port' structs, so care must be taken when dealing with it. */
186static struct ofbundle ofpp_none_bundle = {
187 .name = "OFPP_NONE",
188 .vlan_mode = PORT_VLAN_TRUNK
189};
190
21f7563c
JP
191static void stp_run(struct ofproto_dpif *ofproto);
192static void stp_wait(struct ofproto_dpif *ofproto);
851bf71d
EJ
193static int set_stp_port(struct ofport *,
194 const struct ofproto_port_stp_settings *);
21f7563c 195
5da5ec37
BP
196static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
197
abe529af
BP
198struct action_xlate_ctx {
199/* action_xlate_ctx_init() initializes these members. */
200
201 /* The ofproto. */
202 struct ofproto_dpif *ofproto;
203
204 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
205 * this flow when actions change header fields. */
206 struct flow flow;
207
208 /* The packet corresponding to 'flow', or a null pointer if we are
209 * revalidating without a packet to refer to. */
210 const struct ofpbuf *packet;
211
75a75043
BP
212 /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
213 * want to execute them if we are actually processing a packet, or if we
214 * are accounting for packets that the datapath has processed, but not if
215 * we are just revalidating. */
216 bool may_learn;
217
18b2a258
BP
218 /* The rule that we are currently translating, or NULL. */
219 struct rule_dpif *rule;
54834960 220
0e553d9c
BP
221 /* Union of the set of TCP flags seen so far in this flow. (Used only by
222 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
223 * timeouts.) */
224 uint8_t tcp_flags;
225
6a6455e5
EJ
226 /* If nonnull, called just before executing a resubmit action. In
227 * addition, disables logging of traces when the recursion depth is
228 * exceeded.
abe529af
BP
229 *
230 * This is normally null so the client has to set it manually after
231 * calling action_xlate_ctx_init(). */
232 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *);
233
abe529af
BP
234/* xlate_actions() initializes and uses these members. The client might want
235 * to look at them after it returns. */
236
237 struct ofpbuf *odp_actions; /* Datapath actions. */
75a75043 238 tag_type tags; /* Tags associated with actions. */
abe529af
BP
239 bool may_set_up_flow; /* True ordinarily; false if the actions must
240 * be reassessed for every packet. */
75a75043
BP
241 bool has_learn; /* Actions include NXAST_LEARN? */
242 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 243 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
abe529af 244 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
9d24de3b 245 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
abe529af
BP
246
247/* xlate_actions() initializes and uses these members, but the client has no
248 * reason to look at them. */
249
250 int recurse; /* Recursion level, via xlate_table_action. */
6a6455e5 251 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
b3e9b2ed 252 struct flow base_flow; /* Flow at the last commit. */
deedf7e7 253 uint32_t orig_skb_priority; /* Priority when packet arrived. */
29901626 254 uint8_t table_id; /* OpenFlow table ID where flow was found. */
6ff686f2
PS
255 uint32_t sflow_n_outputs; /* Number of output ports. */
256 uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
257 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
848e8809 258 bool exit; /* No further actions should be processed. */
abe529af
BP
259};
260
261static void action_xlate_ctx_init(struct action_xlate_ctx *,
262 struct ofproto_dpif *, const struct flow *,
18b2a258 263 ovs_be16 initial_tci, struct rule_dpif *,
0e553d9c 264 uint8_t tcp_flags, const struct ofpbuf *);
abe529af
BP
265static struct ofpbuf *xlate_actions(struct action_xlate_ctx *,
266 const union ofp_action *in, size_t n_in);
267
b0f7b9b5
BP
268/* An exact-match instantiation of an OpenFlow flow.
269 *
270 * A facet associates a "struct flow", which represents the Open vSwitch
b95fc6ba
BP
271 * userspace idea of an exact-match flow, with one or more subfacets. Each
272 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
273 * the facet. When the kernel module (or other dpif implementation) and Open
274 * vSwitch userspace agree on the definition of a flow key, there is exactly
275 * one subfacet per facet. If the dpif implementation supports more-specific
276 * flow matching than userspace, however, a facet can have more than one
277 * subfacet, each of which corresponds to some distinction in flow that
278 * userspace simply doesn't understand.
b0f7b9b5
BP
279 *
280 * Flow expiration works in terms of subfacets, so a facet must have at least
281 * one subfacet or it will never expire, leaking memory. */
abe529af 282struct facet {
b0f7b9b5
BP
283 /* Owners. */
284 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
285 struct list list_node; /* In owning rule's 'facets' list. */
286 struct rule_dpif *rule; /* Owning rule. */
287
288 /* Owned data. */
289 struct list subfacets;
abe529af
BP
290 long long int used; /* Time last used; time created if not used. */
291
b0f7b9b5
BP
292 /* Key. */
293 struct flow flow;
294
abe529af
BP
295 /* These statistics:
296 *
297 * - Do include packets and bytes sent "by hand", e.g. with
298 * dpif_execute().
299 *
300 * - Do include packets and bytes that were obtained from the datapath
b0f7b9b5 301 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
abe529af 302 * DPIF_FP_ZERO_STATS).
b0f7b9b5
BP
303 *
304 * - Do not include packets or bytes that can be obtained from the
305 * datapath for any existing subfacet.
abe529af
BP
306 */
307 uint64_t packet_count; /* Number of packets received. */
308 uint64_t byte_count; /* Number of bytes received. */
309
b0f7b9b5 310 /* Resubmit statistics. */
9d24de3b
JP
311 uint64_t prev_packet_count; /* Number of packets from last stats push. */
312 uint64_t prev_byte_count; /* Number of bytes from last stats push. */
313 long long int prev_used; /* Used time from last stats push. */
abe529af 314
b0f7b9b5 315 /* Accounting. */
907a4c5e 316 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
b0f7b9b5 317 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
0e553d9c 318 uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
abe529af 319
b95fc6ba
BP
320 /* Properties of datapath actions.
321 *
322 * Every subfacet has its own actions because actions can differ slightly
323 * between splintered and non-splintered subfacets due to the VLAN tag
324 * being initially different (present vs. absent). All of them have these
325 * properties in common so we just store one copy of them here. */
b0f7b9b5 326 bool may_install; /* Reassess actions for every packet? */
75a75043
BP
327 bool has_learn; /* Actions include NXAST_LEARN? */
328 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 329 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
b0f7b9b5 330 tag_type tags; /* Tags that would require revalidation. */
9d24de3b 331 mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
abe529af
BP
332};
333
f3827897 334static struct facet *facet_create(struct rule_dpif *, const struct flow *);
15baa734 335static void facet_remove(struct facet *);
abe529af
BP
336static void facet_free(struct facet *);
337
338static struct facet *facet_find(struct ofproto_dpif *, const struct flow *);
339static struct facet *facet_lookup_valid(struct ofproto_dpif *,
340 const struct flow *);
15baa734 341static bool facet_revalidate(struct facet *);
6814e51f 342static bool facet_check_consistency(struct facet *);
abe529af 343
15baa734 344static void facet_flush_stats(struct facet *);
abe529af 345
15baa734 346static void facet_update_time(struct facet *, long long int used);
bbb5d219 347static void facet_reset_counters(struct facet *);
abe529af 348static void facet_push_stats(struct facet *);
15baa734 349static void facet_account(struct facet *);
abe529af
BP
350
351static bool facet_is_controller_flow(struct facet *);
352
b95fc6ba 353/* A dpif flow and actions associated with a facet.
b0f7b9b5
BP
354 *
355 * See also the large comment on struct facet. */
356struct subfacet {
357 /* Owners. */
358 struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
359 struct list list_node; /* In struct facet's 'facets' list. */
360 struct facet *facet; /* Owning facet. */
361
362 /* Key.
363 *
364 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
365 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
366 * regenerate the ODP flow key from ->facet->flow. */
367 enum odp_key_fitness key_fitness;
368 struct nlattr *key;
369 int key_len;
abe529af 370
b0f7b9b5
BP
371 long long int used; /* Time last used; time created if not used. */
372
373 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
374 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
375
b95fc6ba
BP
376 /* Datapath actions.
377 *
378 * These should be essentially identical for every subfacet in a facet, but
379 * may differ in trivial ways due to VLAN splinters. */
380 size_t actions_len; /* Number of bytes in actions[]. */
381 struct nlattr *actions; /* Datapath actions. */
382
b0f7b9b5 383 bool installed; /* Installed in datapath? */
e84173dc
BP
384
385 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
386 * splinters can cause it to differ. This value should be removed when
387 * the VLAN splinters feature is no longer needed. */
388 ovs_be16 initial_tci; /* Initial VLAN TCI value. */
b0f7b9b5
BP
389};
390
15baa734 391static struct subfacet *subfacet_create(struct facet *, enum odp_key_fitness,
b0f7b9b5 392 const struct nlattr *key,
e84173dc 393 size_t key_len, ovs_be16 initial_tci);
b0f7b9b5 394static struct subfacet *subfacet_find(struct ofproto_dpif *,
6a542738 395 const struct nlattr *key, size_t key_len);
15baa734
BP
396static void subfacet_destroy(struct subfacet *);
397static void subfacet_destroy__(struct subfacet *);
6814e51f
BP
398static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
399 struct ofpbuf *key);
b0f7b9b5
BP
400static void subfacet_reset_dp_stats(struct subfacet *,
401 struct dpif_flow_stats *);
15baa734
BP
402static void subfacet_update_time(struct subfacet *, long long int used);
403static void subfacet_update_stats(struct subfacet *,
b0f7b9b5 404 const struct dpif_flow_stats *);
15baa734 405static void subfacet_make_actions(struct subfacet *,
b95fc6ba 406 const struct ofpbuf *packet);
15baa734 407static int subfacet_install(struct subfacet *,
b0f7b9b5
BP
408 const struct nlattr *actions, size_t actions_len,
409 struct dpif_flow_stats *);
15baa734 410static void subfacet_uninstall(struct subfacet *);
54a9cbc9 411
abe529af
BP
412struct ofport_dpif {
413 struct ofport up;
414
415 uint32_t odp_port;
416 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
417 struct list bundle_node; /* In struct ofbundle's "ports" list. */
418 struct cfm *cfm; /* Connectivity Fault Management, if any. */
419 tag_type tag; /* Tag associated with this port. */
00794817 420 uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
015e08bc 421 bool may_enable; /* May be enabled in bonds. */
3e5b3fdb 422 long long int carrier_seq; /* Carrier status changes. */
21f7563c 423
52a90c29 424 /* Spanning tree. */
21f7563c
JP
425 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
426 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
427 long long int stp_state_entered;
8b36f51e
EJ
428
429 struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
52a90c29
BP
430
431 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
432 *
433 * This is deprecated. It is only for compatibility with broken device
434 * drivers in old versions of Linux that do not properly support VLANs when
435 * VLAN devices are not used. When broken device drivers are no longer in
436 * widespread use, we will delete these interfaces. */
437 uint16_t realdev_ofp_port;
438 int vlandev_vid;
8b36f51e
EJ
439};
440
441/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
442 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
443 * traffic egressing the 'ofport' with that priority should be marked with. */
444struct priority_to_dscp {
445 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
446 uint32_t priority; /* Priority of this queue (see struct flow). */
447
448 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
abe529af
BP
449};
450
52a90c29
BP
451/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
452 *
453 * This is deprecated. It is only for compatibility with broken device drivers
454 * in old versions of Linux that do not properly support VLANs when VLAN
455 * devices are not used. When broken device drivers are no longer in
456 * widespread use, we will delete these interfaces. */
457struct vlan_splinter {
458 struct hmap_node realdev_vid_node;
459 struct hmap_node vlandev_node;
460 uint16_t realdev_ofp_port;
461 uint16_t vlandev_ofp_port;
462 int vid;
463};
464
465static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
466 uint32_t realdev, ovs_be16 vlan_tci);
467static uint16_t vsp_vlandev_to_realdev(const struct ofproto_dpif *,
468 uint16_t vlandev, int *vid);
469static void vsp_remove(struct ofport_dpif *);
470static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
471
abe529af
BP
472static struct ofport_dpif *
473ofport_dpif_cast(const struct ofport *ofport)
474{
475 assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
476 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
477}
478
479static void port_run(struct ofport_dpif *);
480static void port_wait(struct ofport_dpif *);
a5610457 481static int set_cfm(struct ofport *, const struct cfm_settings *);
8b36f51e 482static void ofport_clear_priorities(struct ofport_dpif *);
abe529af 483
7ee20df1
BP
484struct dpif_completion {
485 struct list list_node;
486 struct ofoperation *op;
487};
488
54a9cbc9
BP
489/* Extra information about a classifier table.
490 * Currently used just for optimized flow revalidation. */
491struct table_dpif {
492 /* If either of these is nonnull, then this table has a form that allows
493 * flows to be tagged to avoid revalidating most flows for the most common
494 * kinds of flow table changes. */
495 struct cls_table *catchall_table; /* Table that wildcards all fields. */
496 struct cls_table *other_table; /* Table with any other wildcard set. */
497 uint32_t basis; /* Keeps each table's tags separate. */
498};
499
abe529af 500struct ofproto_dpif {
b44a10b7 501 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
abe529af
BP
502 struct ofproto up;
503 struct dpif *dpif;
504 int max_ports;
505
6c1491fb
BP
506 /* Statistics. */
507 uint64_t n_matches;
508
abe529af
BP
509 /* Bridging. */
510 struct netflow *netflow;
bae473fe 511 struct dpif_sflow *sflow;
abe529af
BP
512 struct hmap bundles; /* Contains "struct ofbundle"s. */
513 struct mac_learning *ml;
514 struct ofmirror *mirrors[MAX_MIRRORS];
515 bool has_bonded_bundles;
516
517 /* Expiration. */
518 struct timer next_expiration;
519
520 /* Facets. */
521 struct hmap facets;
b0f7b9b5 522 struct hmap subfacets;
54a9cbc9
BP
523
524 /* Revalidation. */
525 struct table_dpif tables[N_TABLES];
abe529af
BP
526 bool need_revalidate;
527 struct tag_set revalidate_set;
7ee20df1
BP
528
529 /* Support for debugging async flow mods. */
530 struct list completions;
daff3353
EJ
531
532 bool has_bundle_action; /* True when the first bundle action appears. */
6527c598
PS
533 struct netdev_stats stats; /* To account packets generated and consumed in
534 * userspace. */
21f7563c
JP
535
536 /* Spanning tree. */
537 struct stp *stp;
538 long long int stp_last_tick;
52a90c29
BP
539
540 /* VLAN splinters. */
541 struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
542 struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
abe529af
BP
543};
544
7ee20df1
BP
545/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
546 * for debugging the asynchronous flow_mod implementation.) */
547static bool clogged;
548
b44a10b7
BP
549/* All existing ofproto_dpif instances, indexed by ->up.name. */
550static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
551
abe529af
BP
552static void ofproto_dpif_unixctl_init(void);
553
554static struct ofproto_dpif *
555ofproto_dpif_cast(const struct ofproto *ofproto)
556{
557 assert(ofproto->ofproto_class == &ofproto_dpif_class);
558 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
559}
560
561static struct ofport_dpif *get_ofp_port(struct ofproto_dpif *,
562 uint16_t ofp_port);
563static struct ofport_dpif *get_odp_port(struct ofproto_dpif *,
564 uint32_t odp_port);
6a6455e5
EJ
565static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
566 const struct ofpbuf *, ovs_be16 initial_tci,
567 struct ds *);
abe529af
BP
568
569/* Packet processing. */
570static void update_learning_table(struct ofproto_dpif *,
571 const struct flow *, int vlan,
572 struct ofbundle *);
501f8d1f
BP
573/* Upcalls. */
574#define FLOW_MISS_MAX_BATCH 50
9b16c439 575static int handle_upcalls(struct ofproto_dpif *, unsigned int max_batch);
abe529af
BP
576
577/* Flow expiration. */
578static int expire(struct ofproto_dpif *);
579
6fca1ffb
BP
580/* NetFlow. */
581static void send_netflow_active_timeouts(struct ofproto_dpif *);
582
abe529af 583/* Utilities. */
52a90c29 584static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet);
6ff686f2
PS
585static size_t
586compose_sflow_action(const struct ofproto_dpif *, struct ofpbuf *odp_actions,
587 const struct flow *, uint32_t odp_port);
c06bba01
JP
588static void add_mirror_actions(struct action_xlate_ctx *ctx,
589 const struct flow *flow);
abe529af
BP
590/* Global variables. */
591static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
592\f
593/* Factory functions. */
594
595static void
596enumerate_types(struct sset *types)
597{
598 dp_enumerate_types(types);
599}
600
601static int
602enumerate_names(const char *type, struct sset *names)
603{
604 return dp_enumerate_names(type, names);
605}
606
607static int
608del(const char *type, const char *name)
609{
610 struct dpif *dpif;
611 int error;
612
613 error = dpif_open(name, type, &dpif);
614 if (!error) {
615 error = dpif_delete(dpif);
616 dpif_close(dpif);
617 }
618 return error;
619}
620\f
621/* Basic life-cycle. */
622
623static struct ofproto *
624alloc(void)
625{
626 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
627 return &ofproto->up;
628}
629
630static void
631dealloc(struct ofproto *ofproto_)
632{
633 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
634 free(ofproto);
635}
636
637static int
0f5f95a9 638construct(struct ofproto *ofproto_)
abe529af
BP
639{
640 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
641 const char *name = ofproto->up.name;
642 int error;
643 int i;
644
645 error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
646 if (error) {
647 VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
648 return error;
649 }
650
651 ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
6c1491fb 652 ofproto->n_matches = 0;
abe529af 653
be8194bb
JG
654 dpif_flow_flush(ofproto->dpif);
655 dpif_recv_purge(ofproto->dpif);
656
a12b3ead 657 error = dpif_recv_set(ofproto->dpif, true);
abe529af
BP
658 if (error) {
659 VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
660 dpif_close(ofproto->dpif);
661 return error;
662 }
abe529af
BP
663
664 ofproto->netflow = NULL;
665 ofproto->sflow = NULL;
21f7563c 666 ofproto->stp = NULL;
abe529af 667 hmap_init(&ofproto->bundles);
e764773c 668 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
abe529af
BP
669 for (i = 0; i < MAX_MIRRORS; i++) {
670 ofproto->mirrors[i] = NULL;
671 }
672 ofproto->has_bonded_bundles = false;
673
674 timer_set_duration(&ofproto->next_expiration, 1000);
675
676 hmap_init(&ofproto->facets);
b0f7b9b5 677 hmap_init(&ofproto->subfacets);
54a9cbc9
BP
678
679 for (i = 0; i < N_TABLES; i++) {
680 struct table_dpif *table = &ofproto->tables[i];
681
682 table->catchall_table = NULL;
683 table->other_table = NULL;
684 table->basis = random_uint32();
685 }
abe529af
BP
686 ofproto->need_revalidate = false;
687 tag_set_init(&ofproto->revalidate_set);
688
7ee20df1
BP
689 list_init(&ofproto->completions);
690
abe529af
BP
691 ofproto_dpif_unixctl_init();
692
daff3353
EJ
693 ofproto->has_bundle_action = false;
694
52a90c29
BP
695 hmap_init(&ofproto->vlandev_map);
696 hmap_init(&ofproto->realdev_vid_map);
697
b44a10b7
BP
698 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
699 hash_string(ofproto->up.name, 0));
6527c598 700 memset(&ofproto->stats, 0, sizeof ofproto->stats);
0f5f95a9
BP
701
702 ofproto_init_tables(ofproto_, N_TABLES);
703
abe529af
BP
704 return 0;
705}
706
7ee20df1
BP
707static void
708complete_operations(struct ofproto_dpif *ofproto)
709{
710 struct dpif_completion *c, *next;
711
712 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
713 ofoperation_complete(c->op, 0);
714 list_remove(&c->list_node);
715 free(c);
716 }
717}
718
abe529af
BP
719static void
720destruct(struct ofproto *ofproto_)
721{
722 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7ee20df1 723 struct rule_dpif *rule, *next_rule;
d0918789 724 struct oftable *table;
abe529af
BP
725 int i;
726
b44a10b7 727 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
7ee20df1
BP
728 complete_operations(ofproto);
729
0697b5c3
BP
730 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
731 struct cls_cursor cursor;
732
d0918789 733 cls_cursor_init(&cursor, &table->cls, NULL);
0697b5c3
BP
734 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
735 ofproto_rule_destroy(&rule->up);
736 }
7ee20df1
BP
737 }
738
abe529af
BP
739 for (i = 0; i < MAX_MIRRORS; i++) {
740 mirror_destroy(ofproto->mirrors[i]);
741 }
742
743 netflow_destroy(ofproto->netflow);
bae473fe 744 dpif_sflow_destroy(ofproto->sflow);
abe529af
BP
745 hmap_destroy(&ofproto->bundles);
746 mac_learning_destroy(ofproto->ml);
747
748 hmap_destroy(&ofproto->facets);
b0f7b9b5 749 hmap_destroy(&ofproto->subfacets);
abe529af 750
52a90c29
BP
751 hmap_destroy(&ofproto->vlandev_map);
752 hmap_destroy(&ofproto->realdev_vid_map);
753
abe529af
BP
754 dpif_close(ofproto->dpif);
755}
756
757static int
5fcc0d00 758run_fast(struct ofproto *ofproto_)
abe529af
BP
759{
760 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
9b16c439 761 unsigned int work;
abe529af 762
9b16c439
BP
763 /* Handle one or more batches of upcalls, until there's nothing left to do
764 * or until we do a fixed total amount of work.
765 *
766 * We do work in batches because it can be much cheaper to set up a number
767 * of flows and fire off their patches all at once. We do multiple batches
768 * because in some cases handling a packet can cause another packet to be
769 * queued almost immediately as part of the return flow. Both
770 * optimizations can make major improvements on some benchmarks and
771 * presumably for real traffic as well. */
772 work = 0;
773 while (work < FLOW_MISS_MAX_BATCH) {
774 int retval = handle_upcalls(ofproto, FLOW_MISS_MAX_BATCH - work);
5fcc0d00 775 if (retval <= 0) {
9b16c439 776 return -retval;
501f8d1f 777 }
5fcc0d00
BP
778 work += retval;
779 }
780 return 0;
781}
782
783static int
784run(struct ofproto *ofproto_)
785{
786 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
787 struct ofport_dpif *ofport;
788 struct ofbundle *bundle;
789 int error;
790
791 if (!clogged) {
792 complete_operations(ofproto);
793 }
794 dpif_run(ofproto->dpif);
795
796 error = run_fast(ofproto_);
797 if (error) {
798 return error;
abe529af
BP
799 }
800
801 if (timer_expired(&ofproto->next_expiration)) {
802 int delay = expire(ofproto);
803 timer_set_duration(&ofproto->next_expiration, delay);
804 }
805
806 if (ofproto->netflow) {
6fca1ffb
BP
807 if (netflow_run(ofproto->netflow)) {
808 send_netflow_active_timeouts(ofproto);
809 }
abe529af
BP
810 }
811 if (ofproto->sflow) {
bae473fe 812 dpif_sflow_run(ofproto->sflow);
abe529af
BP
813 }
814
815 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
816 port_run(ofport);
817 }
818 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
819 bundle_run(bundle);
820 }
821
21f7563c 822 stp_run(ofproto);
1c313b88
BP
823 mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
824
abe529af
BP
825 /* Now revalidate if there's anything to do. */
826 if (ofproto->need_revalidate
827 || !tag_set_is_empty(&ofproto->revalidate_set)) {
828 struct tag_set revalidate_set = ofproto->revalidate_set;
829 bool revalidate_all = ofproto->need_revalidate;
830 struct facet *facet, *next;
831
832 /* Clear the revalidation flags. */
833 tag_set_init(&ofproto->revalidate_set);
834 ofproto->need_revalidate = false;
835
836 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
837 if (revalidate_all
838 || tag_set_intersects(&revalidate_set, facet->tags)) {
15baa734 839 facet_revalidate(facet);
abe529af
BP
840 }
841 }
842 }
843
6814e51f
BP
844 /* Check the consistency of a random facet, to aid debugging. */
845 if (!hmap_is_empty(&ofproto->facets) && !ofproto->need_revalidate) {
846 struct facet *facet;
847
848 facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
849 struct facet, hmap_node);
850 if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
851 if (!facet_check_consistency(facet)) {
852 ofproto->need_revalidate = true;
853 }
854 }
855 }
856
abe529af
BP
857 return 0;
858}
859
860static void
861wait(struct ofproto *ofproto_)
862{
863 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
864 struct ofport_dpif *ofport;
865 struct ofbundle *bundle;
866
7ee20df1
BP
867 if (!clogged && !list_is_empty(&ofproto->completions)) {
868 poll_immediate_wake();
869 }
870
abe529af
BP
871 dpif_wait(ofproto->dpif);
872 dpif_recv_wait(ofproto->dpif);
873 if (ofproto->sflow) {
bae473fe 874 dpif_sflow_wait(ofproto->sflow);
abe529af
BP
875 }
876 if (!tag_set_is_empty(&ofproto->revalidate_set)) {
877 poll_immediate_wake();
878 }
879 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
880 port_wait(ofport);
881 }
882 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
883 bundle_wait(bundle);
884 }
6fca1ffb
BP
885 if (ofproto->netflow) {
886 netflow_wait(ofproto->netflow);
887 }
1c313b88 888 mac_learning_wait(ofproto->ml);
21f7563c 889 stp_wait(ofproto);
abe529af
BP
890 if (ofproto->need_revalidate) {
891 /* Shouldn't happen, but if it does just go around again. */
892 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
893 poll_immediate_wake();
894 } else {
895 timer_wait(&ofproto->next_expiration);
896 }
897}
898
899static void
900flush(struct ofproto *ofproto_)
901{
902 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
903 struct facet *facet, *next_facet;
904
905 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
906 /* Mark the facet as not installed so that facet_remove() doesn't
907 * bother trying to uninstall it. There is no point in uninstalling it
908 * individually since we are about to blow away all the facets with
909 * dpif_flow_flush(). */
b0f7b9b5
BP
910 struct subfacet *subfacet;
911
912 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
913 subfacet->installed = false;
914 subfacet->dp_packet_count = 0;
915 subfacet->dp_byte_count = 0;
916 }
15baa734 917 facet_remove(facet);
abe529af
BP
918 }
919 dpif_flow_flush(ofproto->dpif);
920}
921
6c1491fb
BP
922static void
923get_features(struct ofproto *ofproto_ OVS_UNUSED,
9e1fd49b 924 bool *arp_match_ip, enum ofputil_action_bitmap *actions)
6c1491fb
BP
925{
926 *arp_match_ip = true;
9e1fd49b
BP
927 *actions = (OFPUTIL_A_OUTPUT |
928 OFPUTIL_A_SET_VLAN_VID |
929 OFPUTIL_A_SET_VLAN_PCP |
930 OFPUTIL_A_STRIP_VLAN |
931 OFPUTIL_A_SET_DL_SRC |
932 OFPUTIL_A_SET_DL_DST |
933 OFPUTIL_A_SET_NW_SRC |
934 OFPUTIL_A_SET_NW_DST |
935 OFPUTIL_A_SET_NW_TOS |
936 OFPUTIL_A_SET_TP_SRC |
937 OFPUTIL_A_SET_TP_DST |
938 OFPUTIL_A_ENQUEUE);
6c1491fb
BP
939}
940
941static void
942get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
943{
944 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
a8d9304d 945 struct dpif_dp_stats s;
6c1491fb
BP
946
947 strcpy(ots->name, "classifier");
948
949 dpif_get_dp_stats(ofproto->dpif, &s);
950 put_32aligned_be64(&ots->lookup_count, htonll(s.n_hit + s.n_missed));
951 put_32aligned_be64(&ots->matched_count,
952 htonll(s.n_hit + ofproto->n_matches));
953}
954
abe529af
BP
955static struct ofport *
956port_alloc(void)
957{
958 struct ofport_dpif *port = xmalloc(sizeof *port);
959 return &port->up;
960}
961
962static void
963port_dealloc(struct ofport *port_)
964{
965 struct ofport_dpif *port = ofport_dpif_cast(port_);
966 free(port);
967}
968
969static int
970port_construct(struct ofport *port_)
971{
972 struct ofport_dpif *port = ofport_dpif_cast(port_);
973 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
974
f11c28c4 975 ofproto->need_revalidate = true;
abe529af
BP
976 port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
977 port->bundle = NULL;
978 port->cfm = NULL;
979 port->tag = tag_create_random();
d5ffa7f2 980 port->may_enable = true;
21f7563c
JP
981 port->stp_port = NULL;
982 port->stp_state = STP_DISABLED;
8b36f51e 983 hmap_init(&port->priorities);
52a90c29
BP
984 port->realdev_ofp_port = 0;
985 port->vlandev_vid = 0;
3e5b3fdb 986 port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
abe529af
BP
987
988 if (ofproto->sflow) {
392c7182 989 dpif_sflow_add_port(ofproto->sflow, port_);
abe529af
BP
990 }
991
992 return 0;
993}
994
995static void
996port_destruct(struct ofport *port_)
997{
998 struct ofport_dpif *port = ofport_dpif_cast(port_);
999 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1000
f11c28c4 1001 ofproto->need_revalidate = true;
abe529af 1002 bundle_remove(port_);
a5610457 1003 set_cfm(port_, NULL);
abe529af 1004 if (ofproto->sflow) {
bae473fe 1005 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
abe529af 1006 }
8b36f51e
EJ
1007
1008 ofport_clear_priorities(port);
1009 hmap_destroy(&port->priorities);
abe529af
BP
1010}
1011
1012static void
1013port_modified(struct ofport *port_)
1014{
1015 struct ofport_dpif *port = ofport_dpif_cast(port_);
1016
1017 if (port->bundle && port->bundle->bond) {
1018 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1019 }
1020}
1021
1022static void
9e1fd49b 1023port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
abe529af
BP
1024{
1025 struct ofport_dpif *port = ofport_dpif_cast(port_);
1026 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
9e1fd49b 1027 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
abe529af 1028
9e1fd49b
BP
1029 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
1030 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD)) {
abe529af 1031 ofproto->need_revalidate = true;
7bde8dd8 1032
9e1fd49b 1033 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
7bde8dd8
JP
1034 bundle_update(port->bundle);
1035 }
abe529af
BP
1036 }
1037}
1038
1039static int
1040set_sflow(struct ofproto *ofproto_,
1041 const struct ofproto_sflow_options *sflow_options)
1042{
1043 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bae473fe 1044 struct dpif_sflow *ds = ofproto->sflow;
6ff686f2 1045
abe529af 1046 if (sflow_options) {
bae473fe 1047 if (!ds) {
abe529af
BP
1048 struct ofport_dpif *ofport;
1049
bae473fe 1050 ds = ofproto->sflow = dpif_sflow_create(ofproto->dpif);
abe529af 1051 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
392c7182 1052 dpif_sflow_add_port(ds, &ofport->up);
abe529af 1053 }
6ff686f2 1054 ofproto->need_revalidate = true;
abe529af 1055 }
bae473fe 1056 dpif_sflow_set_options(ds, sflow_options);
abe529af 1057 } else {
6ff686f2
PS
1058 if (ds) {
1059 dpif_sflow_destroy(ds);
1060 ofproto->need_revalidate = true;
1061 ofproto->sflow = NULL;
1062 }
abe529af
BP
1063 }
1064 return 0;
1065}
1066
1067static int
a5610457 1068set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
abe529af
BP
1069{
1070 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1071 int error;
1072
a5610457 1073 if (!s) {
abe529af
BP
1074 error = 0;
1075 } else {
1076 if (!ofport->cfm) {
8c977421
EJ
1077 struct ofproto_dpif *ofproto;
1078
1079 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1080 ofproto->need_revalidate = true;
6f629657 1081 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
abe529af
BP
1082 }
1083
a5610457 1084 if (cfm_configure(ofport->cfm, s)) {
abe529af
BP
1085 return 0;
1086 }
1087
1088 error = EINVAL;
1089 }
1090 cfm_destroy(ofport->cfm);
1091 ofport->cfm = NULL;
1092 return error;
1093}
1094
1095static int
a5610457 1096get_cfm_fault(const struct ofport *ofport_)
abe529af
BP
1097{
1098 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
a5610457
EJ
1099
1100 return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
abe529af 1101}
1de11730
EJ
1102
1103static int
1104get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
1105 size_t *n_rmps)
1106{
1107 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1108
1109 if (ofport->cfm) {
1110 cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
1111 return 0;
1112 } else {
1113 return -1;
1114 }
1115}
abe529af 1116\f
21f7563c
JP
1117/* Spanning Tree. */
1118
1119static void
1120send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
1121{
1122 struct ofproto_dpif *ofproto = ofproto_;
1123 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
1124 struct ofport_dpif *ofport;
1125
1126 ofport = stp_port_get_aux(sp);
1127 if (!ofport) {
1128 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
1129 ofproto->up.name, port_num);
1130 } else {
1131 struct eth_header *eth = pkt->l2;
1132
1133 netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
1134 if (eth_addr_is_zero(eth->eth_src)) {
1135 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
1136 "with unknown MAC", ofproto->up.name, port_num);
1137 } else {
97d6520b 1138 send_packet(ofport, pkt);
21f7563c
JP
1139 }
1140 }
1141 ofpbuf_delete(pkt);
1142}
1143
1144/* Configures STP on 'ofproto_' using the settings defined in 's'. */
1145static int
1146set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
1147{
1148 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1149
1150 /* Only revalidate flows if the configuration changed. */
1151 if (!s != !ofproto->stp) {
1152 ofproto->need_revalidate = true;
1153 }
1154
1155 if (s) {
1156 if (!ofproto->stp) {
1157 ofproto->stp = stp_create(ofproto_->name, s->system_id,
1158 send_bpdu_cb, ofproto);
1159 ofproto->stp_last_tick = time_msec();
1160 }
1161
1162 stp_set_bridge_id(ofproto->stp, s->system_id);
1163 stp_set_bridge_priority(ofproto->stp, s->priority);
1164 stp_set_hello_time(ofproto->stp, s->hello_time);
1165 stp_set_max_age(ofproto->stp, s->max_age);
1166 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
1167 } else {
851bf71d
EJ
1168 struct ofport *ofport;
1169
1170 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
1171 set_stp_port(ofport, NULL);
1172 }
1173
21f7563c
JP
1174 stp_destroy(ofproto->stp);
1175 ofproto->stp = NULL;
1176 }
1177
1178 return 0;
1179}
1180
1181static int
1182get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
1183{
1184 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1185
1186 if (ofproto->stp) {
1187 s->enabled = true;
1188 s->bridge_id = stp_get_bridge_id(ofproto->stp);
1189 s->designated_root = stp_get_designated_root(ofproto->stp);
1190 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
1191 } else {
1192 s->enabled = false;
1193 }
1194
1195 return 0;
1196}
1197
1198static void
1199update_stp_port_state(struct ofport_dpif *ofport)
1200{
1201 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1202 enum stp_state state;
1203
1204 /* Figure out new state. */
1205 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
1206 : STP_DISABLED;
1207
1208 /* Update state. */
1209 if (ofport->stp_state != state) {
9e1fd49b 1210 enum ofputil_port_state of_state;
21f7563c
JP
1211 bool fwd_change;
1212
1213 VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
1214 netdev_get_name(ofport->up.netdev),
1215 stp_state_name(ofport->stp_state),
1216 stp_state_name(state));
1217 if (stp_learn_in_state(ofport->stp_state)
1218 != stp_learn_in_state(state)) {
1219 /* xxx Learning action flows should also be flushed. */
d0040604 1220 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
21f7563c
JP
1221 }
1222 fwd_change = stp_forward_in_state(ofport->stp_state)
1223 != stp_forward_in_state(state);
1224
1225 ofproto->need_revalidate = true;
1226 ofport->stp_state = state;
1227 ofport->stp_state_entered = time_msec();
1228
b308140a 1229 if (fwd_change && ofport->bundle) {
21f7563c
JP
1230 bundle_update(ofport->bundle);
1231 }
1232
1233 /* Update the STP state bits in the OpenFlow port description. */
9e1fd49b
BP
1234 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
1235 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
1236 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
1237 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
1238 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
1239 : 0);
21f7563c
JP
1240 ofproto_port_set_state(&ofport->up, of_state);
1241 }
1242}
1243
1244/* Configures STP on 'ofport_' using the settings defined in 's'. The
1245 * caller is responsible for assigning STP port numbers and ensuring
1246 * there are no duplicates. */
1247static int
1248set_stp_port(struct ofport *ofport_,
1249 const struct ofproto_port_stp_settings *s)
1250{
1251 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1252 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1253 struct stp_port *sp = ofport->stp_port;
1254
1255 if (!s || !s->enable) {
1256 if (sp) {
1257 ofport->stp_port = NULL;
1258 stp_port_disable(sp);
ecd12731 1259 update_stp_port_state(ofport);
21f7563c
JP
1260 }
1261 return 0;
1262 } else if (sp && stp_port_no(sp) != s->port_num
1263 && ofport == stp_port_get_aux(sp)) {
1264 /* The port-id changed, so disable the old one if it's not
1265 * already in use by another port. */
1266 stp_port_disable(sp);
1267 }
1268
1269 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
1270 stp_port_enable(sp);
1271
1272 stp_port_set_aux(sp, ofport);
1273 stp_port_set_priority(sp, s->priority);
1274 stp_port_set_path_cost(sp, s->path_cost);
1275
1276 update_stp_port_state(ofport);
1277
1278 return 0;
1279}
1280
1281static int
1282get_stp_port_status(struct ofport *ofport_,
1283 struct ofproto_port_stp_status *s)
1284{
1285 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1286 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1287 struct stp_port *sp = ofport->stp_port;
1288
1289 if (!ofproto->stp || !sp) {
1290 s->enabled = false;
1291 return 0;
1292 }
1293
1294 s->enabled = true;
1295 s->port_id = stp_port_get_id(sp);
1296 s->state = stp_port_get_state(sp);
1297 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
1298 s->role = stp_port_get_role(sp);
80740385 1299 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
21f7563c
JP
1300
1301 return 0;
1302}
1303
1304static void
1305stp_run(struct ofproto_dpif *ofproto)
1306{
1307 if (ofproto->stp) {
1308 long long int now = time_msec();
1309 long long int elapsed = now - ofproto->stp_last_tick;
1310 struct stp_port *sp;
1311
1312 if (elapsed > 0) {
1313 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
1314 ofproto->stp_last_tick = now;
1315 }
1316 while (stp_get_changed_port(ofproto->stp, &sp)) {
1317 struct ofport_dpif *ofport = stp_port_get_aux(sp);
1318
1319 if (ofport) {
1320 update_stp_port_state(ofport);
1321 }
1322 }
6ae50723
EJ
1323
1324 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
1325 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
1326 }
21f7563c
JP
1327 }
1328}
1329
1330static void
1331stp_wait(struct ofproto_dpif *ofproto)
1332{
1333 if (ofproto->stp) {
1334 poll_timer_wait(1000);
1335 }
1336}
1337
1338/* Returns true if STP should process 'flow'. */
1339static bool
1340stp_should_process_flow(const struct flow *flow)
1341{
1342 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
1343}
1344
1345static void
1346stp_process_packet(const struct ofport_dpif *ofport,
1347 const struct ofpbuf *packet)
1348{
1349 struct ofpbuf payload = *packet;
1350 struct eth_header *eth = payload.data;
1351 struct stp_port *sp = ofport->stp_port;
1352
1353 /* Sink packets on ports that have STP disabled when the bridge has
1354 * STP enabled. */
1355 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1356 return;
1357 }
1358
1359 /* Trim off padding on payload. */
c573540b
BP
1360 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1361 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
21f7563c
JP
1362 }
1363
1364 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1365 stp_received_bpdu(sp, payload.data, payload.size);
1366 }
1367}
1368\f
8b36f51e
EJ
1369static struct priority_to_dscp *
1370get_priority(const struct ofport_dpif *ofport, uint32_t priority)
1371{
1372 struct priority_to_dscp *pdscp;
1373 uint32_t hash;
1374
1375 hash = hash_int(priority, 0);
1376 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
1377 if (pdscp->priority == priority) {
1378 return pdscp;
1379 }
1380 }
1381 return NULL;
1382}
1383
1384static void
1385ofport_clear_priorities(struct ofport_dpif *ofport)
1386{
1387 struct priority_to_dscp *pdscp, *next;
1388
1389 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
1390 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
1391 free(pdscp);
1392 }
1393}
1394
1395static int
1396set_queues(struct ofport *ofport_,
1397 const struct ofproto_port_queue *qdscp_list,
1398 size_t n_qdscp)
1399{
1400 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1401 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1402 struct hmap new = HMAP_INITIALIZER(&new);
1403 size_t i;
1404
1405 for (i = 0; i < n_qdscp; i++) {
1406 struct priority_to_dscp *pdscp;
1407 uint32_t priority;
1408 uint8_t dscp;
1409
1410 dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1411 if (dpif_queue_to_priority(ofproto->dpif, qdscp_list[i].queue,
1412 &priority)) {
1413 continue;
1414 }
1415
1416 pdscp = get_priority(ofport, priority);
1417 if (pdscp) {
1418 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
1419 } else {
1420 pdscp = xmalloc(sizeof *pdscp);
1421 pdscp->priority = priority;
1422 pdscp->dscp = dscp;
1423 ofproto->need_revalidate = true;
1424 }
1425
1426 if (pdscp->dscp != dscp) {
1427 pdscp->dscp = dscp;
1428 ofproto->need_revalidate = true;
1429 }
1430
1431 hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
1432 }
1433
1434 if (!hmap_is_empty(&ofport->priorities)) {
1435 ofport_clear_priorities(ofport);
1436 ofproto->need_revalidate = true;
1437 }
1438
1439 hmap_swap(&new, &ofport->priorities);
1440 hmap_destroy(&new);
1441
1442 return 0;
1443}
1444\f
abe529af
BP
1445/* Bundles. */
1446
b44a10b7
BP
1447/* Expires all MAC learning entries associated with 'bundle' and forces its
1448 * ofproto to revalidate every flow.
1449 *
1450 * Normally MAC learning entries are removed only from the ofproto associated
1451 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
1452 * are removed from every ofproto. When patch ports and SLB bonds are in use
1453 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
1454 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
1455 * with the host from which it migrated. */
abe529af 1456static void
b44a10b7 1457bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
abe529af
BP
1458{
1459 struct ofproto_dpif *ofproto = bundle->ofproto;
1460 struct mac_learning *ml = ofproto->ml;
1461 struct mac_entry *mac, *next_mac;
1462
1463 ofproto->need_revalidate = true;
1464 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
1465 if (mac->port.p == bundle) {
b44a10b7
BP
1466 if (all_ofprotos) {
1467 struct ofproto_dpif *o;
1468
1469 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
1470 if (o != ofproto) {
1471 struct mac_entry *e;
1472
1473 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
1474 NULL);
1475 if (e) {
1476 tag_set_add(&o->revalidate_set, e->tag);
1477 mac_learning_expire(o->ml, e);
1478 }
1479 }
1480 }
1481 }
1482
abe529af
BP
1483 mac_learning_expire(ml, mac);
1484 }
1485 }
1486}
1487
1488static struct ofbundle *
1489bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
1490{
1491 struct ofbundle *bundle;
1492
1493 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
1494 &ofproto->bundles) {
1495 if (bundle->aux == aux) {
1496 return bundle;
1497 }
1498 }
1499 return NULL;
1500}
1501
1502/* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
1503 * ones that are found to 'bundles'. */
1504static void
1505bundle_lookup_multiple(struct ofproto_dpif *ofproto,
1506 void **auxes, size_t n_auxes,
1507 struct hmapx *bundles)
1508{
1509 size_t i;
1510
1511 hmapx_init(bundles);
1512 for (i = 0; i < n_auxes; i++) {
1513 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
1514 if (bundle) {
1515 hmapx_add(bundles, bundle);
1516 }
1517 }
1518}
1519
7bde8dd8
JP
1520static void
1521bundle_update(struct ofbundle *bundle)
1522{
1523 struct ofport_dpif *port;
1524
1525 bundle->floodable = true;
1526 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
9e1fd49b
BP
1527 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
1528 || !stp_forward_in_state(port->stp_state)) {
7bde8dd8
JP
1529 bundle->floodable = false;
1530 break;
1531 }
1532 }
1533}
1534
abe529af
BP
1535static void
1536bundle_del_port(struct ofport_dpif *port)
1537{
1538 struct ofbundle *bundle = port->bundle;
1539
6f77f4ae
BP
1540 bundle->ofproto->need_revalidate = true;
1541
abe529af
BP
1542 list_remove(&port->bundle_node);
1543 port->bundle = NULL;
1544
1545 if (bundle->lacp) {
1546 lacp_slave_unregister(bundle->lacp, port);
1547 }
1548 if (bundle->bond) {
1549 bond_slave_unregister(bundle->bond, port);
1550 }
1551
7bde8dd8 1552 bundle_update(bundle);
abe529af
BP
1553}
1554
1555static bool
1556bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
00794817
BP
1557 struct lacp_slave_settings *lacp,
1558 uint32_t bond_stable_id)
abe529af
BP
1559{
1560 struct ofport_dpif *port;
1561
1562 port = get_ofp_port(bundle->ofproto, ofp_port);
1563 if (!port) {
1564 return false;
1565 }
1566
1567 if (port->bundle != bundle) {
6f77f4ae 1568 bundle->ofproto->need_revalidate = true;
abe529af
BP
1569 if (port->bundle) {
1570 bundle_del_port(port);
1571 }
1572
1573 port->bundle = bundle;
1574 list_push_back(&bundle->ports, &port->bundle_node);
9e1fd49b
BP
1575 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
1576 || !stp_forward_in_state(port->stp_state)) {
abe529af
BP
1577 bundle->floodable = false;
1578 }
1579 }
1580 if (lacp) {
4a86aece 1581 port->bundle->ofproto->need_revalidate = true;
abe529af
BP
1582 lacp_slave_register(bundle->lacp, port, lacp);
1583 }
1584
00794817
BP
1585 port->bond_stable_id = bond_stable_id;
1586
abe529af
BP
1587 return true;
1588}
1589
1590static void
1591bundle_destroy(struct ofbundle *bundle)
1592{
1593 struct ofproto_dpif *ofproto;
1594 struct ofport_dpif *port, *next_port;
1595 int i;
1596
1597 if (!bundle) {
1598 return;
1599 }
1600
1601 ofproto = bundle->ofproto;
1602 for (i = 0; i < MAX_MIRRORS; i++) {
1603 struct ofmirror *m = ofproto->mirrors[i];
1604 if (m) {
1605 if (m->out == bundle) {
1606 mirror_destroy(m);
1607 } else if (hmapx_find_and_delete(&m->srcs, bundle)
1608 || hmapx_find_and_delete(&m->dsts, bundle)) {
1609 ofproto->need_revalidate = true;
1610 }
1611 }
1612 }
1613
1614 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
1615 bundle_del_port(port);
1616 }
1617
b44a10b7 1618 bundle_flush_macs(bundle, true);
abe529af
BP
1619 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
1620 free(bundle->name);
1621 free(bundle->trunks);
1622 lacp_destroy(bundle->lacp);
1623 bond_destroy(bundle->bond);
1624 free(bundle);
1625}
1626
1627static int
1628bundle_set(struct ofproto *ofproto_, void *aux,
1629 const struct ofproto_bundle_settings *s)
1630{
1631 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1632 bool need_flush = false;
abe529af
BP
1633 struct ofport_dpif *port;
1634 struct ofbundle *bundle;
ecac4ebf
BP
1635 unsigned long *trunks;
1636 int vlan;
abe529af
BP
1637 size_t i;
1638 bool ok;
1639
1640 if (!s) {
1641 bundle_destroy(bundle_lookup(ofproto, aux));
1642 return 0;
1643 }
1644
1645 assert(s->n_slaves == 1 || s->bond != NULL);
1646 assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
1647
1648 bundle = bundle_lookup(ofproto, aux);
1649 if (!bundle) {
1650 bundle = xmalloc(sizeof *bundle);
1651
1652 bundle->ofproto = ofproto;
1653 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
1654 hash_pointer(aux, 0));
1655 bundle->aux = aux;
1656 bundle->name = NULL;
1657
1658 list_init(&bundle->ports);
ecac4ebf 1659 bundle->vlan_mode = PORT_VLAN_TRUNK;
abe529af
BP
1660 bundle->vlan = -1;
1661 bundle->trunks = NULL;
5e9ceccd 1662 bundle->use_priority_tags = s->use_priority_tags;
abe529af
BP
1663 bundle->lacp = NULL;
1664 bundle->bond = NULL;
1665
1666 bundle->floodable = true;
1667
1668 bundle->src_mirrors = 0;
1669 bundle->dst_mirrors = 0;
1670 bundle->mirror_out = 0;
1671 }
1672
1673 if (!bundle->name || strcmp(s->name, bundle->name)) {
1674 free(bundle->name);
1675 bundle->name = xstrdup(s->name);
1676 }
1677
1678 /* LACP. */
1679 if (s->lacp) {
1680 if (!bundle->lacp) {
8c977421 1681 ofproto->need_revalidate = true;
abe529af
BP
1682 bundle->lacp = lacp_create();
1683 }
1684 lacp_configure(bundle->lacp, s->lacp);
1685 } else {
1686 lacp_destroy(bundle->lacp);
1687 bundle->lacp = NULL;
1688 }
1689
1690 /* Update set of ports. */
1691 ok = true;
1692 for (i = 0; i < s->n_slaves; i++) {
1693 if (!bundle_add_port(bundle, s->slaves[i],
00794817
BP
1694 s->lacp ? &s->lacp_slaves[i] : NULL,
1695 s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
abe529af
BP
1696 ok = false;
1697 }
1698 }
1699 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
1700 struct ofport_dpif *next_port;
1701
1702 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
1703 for (i = 0; i < s->n_slaves; i++) {
56c769ab 1704 if (s->slaves[i] == port->up.ofp_port) {
abe529af
BP
1705 goto found;
1706 }
1707 }
1708
1709 bundle_del_port(port);
1710 found: ;
1711 }
1712 }
1713 assert(list_size(&bundle->ports) <= s->n_slaves);
1714
1715 if (list_is_empty(&bundle->ports)) {
1716 bundle_destroy(bundle);
1717 return EINVAL;
1718 }
1719
ecac4ebf 1720 /* Set VLAN tagging mode */
5e9ceccd
BP
1721 if (s->vlan_mode != bundle->vlan_mode
1722 || s->use_priority_tags != bundle->use_priority_tags) {
ecac4ebf 1723 bundle->vlan_mode = s->vlan_mode;
5e9ceccd 1724 bundle->use_priority_tags = s->use_priority_tags;
ecac4ebf
BP
1725 need_flush = true;
1726 }
1727
abe529af 1728 /* Set VLAN tag. */
ecac4ebf
BP
1729 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
1730 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
1731 : 0);
1732 if (vlan != bundle->vlan) {
1733 bundle->vlan = vlan;
abe529af
BP
1734 need_flush = true;
1735 }
1736
1737 /* Get trunked VLANs. */
ecac4ebf
BP
1738 switch (s->vlan_mode) {
1739 case PORT_VLAN_ACCESS:
1740 trunks = NULL;
1741 break;
1742
1743 case PORT_VLAN_TRUNK:
1744 trunks = (unsigned long *) s->trunks;
1745 break;
1746
1747 case PORT_VLAN_NATIVE_UNTAGGED:
1748 case PORT_VLAN_NATIVE_TAGGED:
1749 if (vlan != 0 && (!s->trunks
1750 || !bitmap_is_set(s->trunks, vlan)
1751 || bitmap_is_set(s->trunks, 0))) {
1752 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
1753 if (s->trunks) {
1754 trunks = bitmap_clone(s->trunks, 4096);
1755 } else {
1756 trunks = bitmap_allocate1(4096);
1757 }
1758 bitmap_set1(trunks, vlan);
1759 bitmap_set0(trunks, 0);
1760 } else {
1761 trunks = (unsigned long *) s->trunks;
1762 }
1763 break;
1764
1765 default:
1766 NOT_REACHED();
1767 }
abe529af
BP
1768 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
1769 free(bundle->trunks);
ecac4ebf
BP
1770 if (trunks == s->trunks) {
1771 bundle->trunks = vlan_bitmap_clone(trunks);
1772 } else {
1773 bundle->trunks = trunks;
1774 trunks = NULL;
1775 }
abe529af
BP
1776 need_flush = true;
1777 }
ecac4ebf
BP
1778 if (trunks != s->trunks) {
1779 free(trunks);
1780 }
abe529af
BP
1781
1782 /* Bonding. */
1783 if (!list_is_short(&bundle->ports)) {
1784 bundle->ofproto->has_bonded_bundles = true;
1785 if (bundle->bond) {
1786 if (bond_reconfigure(bundle->bond, s->bond)) {
1787 ofproto->need_revalidate = true;
1788 }
1789 } else {
1790 bundle->bond = bond_create(s->bond);
6f77f4ae 1791 ofproto->need_revalidate = true;
abe529af
BP
1792 }
1793
1794 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
00794817 1795 bond_slave_register(bundle->bond, port, port->bond_stable_id,
abe529af
BP
1796 port->up.netdev);
1797 }
1798 } else {
1799 bond_destroy(bundle->bond);
1800 bundle->bond = NULL;
1801 }
1802
1803 /* If we changed something that would affect MAC learning, un-learn
1804 * everything on this port and force flow revalidation. */
1805 if (need_flush) {
b44a10b7 1806 bundle_flush_macs(bundle, false);
abe529af
BP
1807 }
1808
1809 return 0;
1810}
1811
1812static void
1813bundle_remove(struct ofport *port_)
1814{
1815 struct ofport_dpif *port = ofport_dpif_cast(port_);
1816 struct ofbundle *bundle = port->bundle;
1817
1818 if (bundle) {
1819 bundle_del_port(port);
1820 if (list_is_empty(&bundle->ports)) {
1821 bundle_destroy(bundle);
1822 } else if (list_is_short(&bundle->ports)) {
1823 bond_destroy(bundle->bond);
1824 bundle->bond = NULL;
1825 }
1826 }
1827}
1828
1829static void
5f877369 1830send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
abe529af
BP
1831{
1832 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
1833 struct ofport_dpif *port = port_;
1834 uint8_t ea[ETH_ADDR_LEN];
1835 int error;
1836
1837 error = netdev_get_etheraddr(port->up.netdev, ea);
1838 if (!error) {
abe529af 1839 struct ofpbuf packet;
5f877369 1840 void *packet_pdu;
abe529af
BP
1841
1842 ofpbuf_init(&packet, 0);
1843 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
5f877369
EJ
1844 pdu_size);
1845 memcpy(packet_pdu, pdu, pdu_size);
1846
97d6520b 1847 send_packet(port, &packet);
abe529af
BP
1848 ofpbuf_uninit(&packet);
1849 } else {
1850 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
1851 "%s (%s)", port->bundle->name,
1852 netdev_get_name(port->up.netdev), strerror(error));
1853 }
1854}
1855
1856static void
1857bundle_send_learning_packets(struct ofbundle *bundle)
1858{
1859 struct ofproto_dpif *ofproto = bundle->ofproto;
1860 int error, n_packets, n_errors;
1861 struct mac_entry *e;
1862
1863 error = n_packets = n_errors = 0;
1864 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
1865 if (e->port.p != bundle) {
ea131871
JG
1866 struct ofpbuf *learning_packet;
1867 struct ofport_dpif *port;
4dd1e3ca 1868 void *port_void;
ea131871
JG
1869 int ret;
1870
4dd1e3ca
BP
1871 /* The assignment to "port" is unnecessary but makes "grep"ing for
1872 * struct ofport_dpif more effective. */
1873 learning_packet = bond_compose_learning_packet(bundle->bond,
1874 e->mac, e->vlan,
1875 &port_void);
1876 port = port_void;
97d6520b 1877 ret = send_packet(port, learning_packet);
ea131871 1878 ofpbuf_delete(learning_packet);
abe529af
BP
1879 if (ret) {
1880 error = ret;
1881 n_errors++;
1882 }
1883 n_packets++;
1884 }
1885 }
1886
1887 if (n_errors) {
1888 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1889 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
1890 "packets, last error was: %s",
1891 bundle->name, n_errors, n_packets, strerror(error));
1892 } else {
1893 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1894 bundle->name, n_packets);
1895 }
1896}
1897
1898static void
1899bundle_run(struct ofbundle *bundle)
1900{
1901 if (bundle->lacp) {
1902 lacp_run(bundle->lacp, send_pdu_cb);
1903 }
1904 if (bundle->bond) {
1905 struct ofport_dpif *port;
1906
1907 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
015e08bc 1908 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
abe529af
BP
1909 }
1910
1911 bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
bdebeece 1912 lacp_status(bundle->lacp));
abe529af
BP
1913 if (bond_should_send_learning_packets(bundle->bond)) {
1914 bundle_send_learning_packets(bundle);
1915 }
1916 }
1917}
1918
1919static void
1920bundle_wait(struct ofbundle *bundle)
1921{
1922 if (bundle->lacp) {
1923 lacp_wait(bundle->lacp);
1924 }
1925 if (bundle->bond) {
1926 bond_wait(bundle->bond);
1927 }
1928}
1929\f
1930/* Mirrors. */
1931
1932static int
1933mirror_scan(struct ofproto_dpif *ofproto)
1934{
1935 int idx;
1936
1937 for (idx = 0; idx < MAX_MIRRORS; idx++) {
1938 if (!ofproto->mirrors[idx]) {
1939 return idx;
1940 }
1941 }
1942 return -1;
1943}
1944
1945static struct ofmirror *
1946mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
1947{
1948 int i;
1949
1950 for (i = 0; i < MAX_MIRRORS; i++) {
1951 struct ofmirror *mirror = ofproto->mirrors[i];
1952 if (mirror && mirror->aux == aux) {
1953 return mirror;
1954 }
1955 }
1956
1957 return NULL;
1958}
1959
9ba15e2a
BP
1960/* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
1961static void
1962mirror_update_dups(struct ofproto_dpif *ofproto)
1963{
1964 int i;
1965
1966 for (i = 0; i < MAX_MIRRORS; i++) {
1967 struct ofmirror *m = ofproto->mirrors[i];
1968
1969 if (m) {
1970 m->dup_mirrors = MIRROR_MASK_C(1) << i;
1971 }
1972 }
1973
1974 for (i = 0; i < MAX_MIRRORS; i++) {
1975 struct ofmirror *m1 = ofproto->mirrors[i];
1976 int j;
1977
1978 if (!m1) {
1979 continue;
1980 }
1981
1982 for (j = i + 1; j < MAX_MIRRORS; j++) {
1983 struct ofmirror *m2 = ofproto->mirrors[j];
1984
edb0540b 1985 if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) {
9ba15e2a
BP
1986 m1->dup_mirrors |= MIRROR_MASK_C(1) << j;
1987 m2->dup_mirrors |= m1->dup_mirrors;
1988 }
1989 }
1990 }
1991}
1992
abe529af
BP
1993static int
1994mirror_set(struct ofproto *ofproto_, void *aux,
1995 const struct ofproto_mirror_settings *s)
1996{
1997 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1998 mirror_mask_t mirror_bit;
1999 struct ofbundle *bundle;
2000 struct ofmirror *mirror;
2001 struct ofbundle *out;
2002 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
2003 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
2004 int out_vlan;
2005
2006 mirror = mirror_lookup(ofproto, aux);
2007 if (!s) {
2008 mirror_destroy(mirror);
2009 return 0;
2010 }
2011 if (!mirror) {
2012 int idx;
2013
2014 idx = mirror_scan(ofproto);
2015 if (idx < 0) {
2016 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2017 "cannot create %s",
2018 ofproto->up.name, MAX_MIRRORS, s->name);
2019 return EFBIG;
2020 }
2021
2022 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
2023 mirror->ofproto = ofproto;
2024 mirror->idx = idx;
8b28d864 2025 mirror->aux = aux;
abe529af
BP
2026 mirror->out_vlan = -1;
2027 mirror->name = NULL;
2028 }
2029
2030 if (!mirror->name || strcmp(s->name, mirror->name)) {
2031 free(mirror->name);
2032 mirror->name = xstrdup(s->name);
2033 }
2034
2035 /* Get the new configuration. */
2036 if (s->out_bundle) {
2037 out = bundle_lookup(ofproto, s->out_bundle);
2038 if (!out) {
2039 mirror_destroy(mirror);
2040 return EINVAL;
2041 }
2042 out_vlan = -1;
2043 } else {
2044 out = NULL;
2045 out_vlan = s->out_vlan;
2046 }
2047 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
2048 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
2049
2050 /* If the configuration has not changed, do nothing. */
2051 if (hmapx_equals(&srcs, &mirror->srcs)
2052 && hmapx_equals(&dsts, &mirror->dsts)
2053 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
2054 && mirror->out == out
2055 && mirror->out_vlan == out_vlan)
2056 {
2057 hmapx_destroy(&srcs);
2058 hmapx_destroy(&dsts);
2059 return 0;
2060 }
2061
2062 hmapx_swap(&srcs, &mirror->srcs);
2063 hmapx_destroy(&srcs);
2064
2065 hmapx_swap(&dsts, &mirror->dsts);
2066 hmapx_destroy(&dsts);
2067
2068 free(mirror->vlans);
2069 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
2070
2071 mirror->out = out;
2072 mirror->out_vlan = out_vlan;
2073
2074 /* Update bundles. */
2075 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2076 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
2077 if (hmapx_contains(&mirror->srcs, bundle)) {
2078 bundle->src_mirrors |= mirror_bit;
2079 } else {
2080 bundle->src_mirrors &= ~mirror_bit;
2081 }
2082
2083 if (hmapx_contains(&mirror->dsts, bundle)) {
2084 bundle->dst_mirrors |= mirror_bit;
2085 } else {
2086 bundle->dst_mirrors &= ~mirror_bit;
2087 }
2088
2089 if (mirror->out == bundle) {
2090 bundle->mirror_out |= mirror_bit;
2091 } else {
2092 bundle->mirror_out &= ~mirror_bit;
2093 }
2094 }
2095
2096 ofproto->need_revalidate = true;
d0040604 2097 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
9ba15e2a 2098 mirror_update_dups(ofproto);
abe529af
BP
2099
2100 return 0;
2101}
2102
2103static void
2104mirror_destroy(struct ofmirror *mirror)
2105{
2106 struct ofproto_dpif *ofproto;
2107 mirror_mask_t mirror_bit;
2108 struct ofbundle *bundle;
2109
2110 if (!mirror) {
2111 return;
2112 }
2113
2114 ofproto = mirror->ofproto;
2115 ofproto->need_revalidate = true;
d0040604 2116 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
abe529af
BP
2117
2118 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2119 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2120 bundle->src_mirrors &= ~mirror_bit;
2121 bundle->dst_mirrors &= ~mirror_bit;
2122 bundle->mirror_out &= ~mirror_bit;
2123 }
2124
2125 hmapx_destroy(&mirror->srcs);
2126 hmapx_destroy(&mirror->dsts);
2127 free(mirror->vlans);
2128
2129 ofproto->mirrors[mirror->idx] = NULL;
2130 free(mirror->name);
2131 free(mirror);
9ba15e2a
BP
2132
2133 mirror_update_dups(ofproto);
abe529af
BP
2134}
2135
9d24de3b
JP
2136static int
2137mirror_get_stats(struct ofproto *ofproto_, void *aux,
2138 uint64_t *packets, uint64_t *bytes)
2139{
2140 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2141 struct ofmirror *mirror = mirror_lookup(ofproto, aux);
2142
2143 if (!mirror) {
2144 *packets = *bytes = UINT64_MAX;
2145 return 0;
2146 }
2147
2148 *packets = mirror->packet_count;
2149 *bytes = mirror->byte_count;
2150
2151 return 0;
2152}
2153
abe529af
BP
2154static int
2155set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2156{
2157 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2158 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
d0040604 2159 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
abe529af
BP
2160 }
2161 return 0;
2162}
2163
2164static bool
b4affc74 2165is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
abe529af
BP
2166{
2167 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2168 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2169 return bundle && bundle->mirror_out != 0;
2170}
8402c74b
SS
2171
2172static void
b53055f4 2173forward_bpdu_changed(struct ofproto *ofproto_)
8402c74b
SS
2174{
2175 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2176 /* Revalidate cached flows whenever forward_bpdu option changes. */
2177 ofproto->need_revalidate = true;
2178}
e764773c
BP
2179
2180static void
2181set_mac_idle_time(struct ofproto *ofproto_, unsigned int idle_time)
2182{
2183 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2184 mac_learning_set_idle_time(ofproto->ml, idle_time);
2185}
abe529af
BP
2186\f
2187/* Ports. */
2188
2189static struct ofport_dpif *
2190get_ofp_port(struct ofproto_dpif *ofproto, uint16_t ofp_port)
2191{
7df6a8bd
BP
2192 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2193 return ofport ? ofport_dpif_cast(ofport) : NULL;
abe529af
BP
2194}
2195
2196static struct ofport_dpif *
2197get_odp_port(struct ofproto_dpif *ofproto, uint32_t odp_port)
2198{
2199 return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
2200}
2201
2202static void
2203ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
2204 struct dpif_port *dpif_port)
2205{
2206 ofproto_port->name = dpif_port->name;
2207 ofproto_port->type = dpif_port->type;
2208 ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
2209}
2210
2211static void
2212port_run(struct ofport_dpif *ofport)
2213{
3e5b3fdb
EJ
2214 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2215 bool carrier_changed = carrier_seq != ofport->carrier_seq;
015e08bc
EJ
2216 bool enable = netdev_get_carrier(ofport->up.netdev);
2217
3e5b3fdb
EJ
2218 ofport->carrier_seq = carrier_seq;
2219
abe529af
BP
2220 if (ofport->cfm) {
2221 cfm_run(ofport->cfm);
2222
2223 if (cfm_should_send_ccm(ofport->cfm)) {
2224 struct ofpbuf packet;
abe529af
BP
2225
2226 ofpbuf_init(&packet, 0);
9e1fd49b 2227 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
97d6520b 2228 send_packet(ofport, &packet);
abe529af
BP
2229 ofpbuf_uninit(&packet);
2230 }
015e08bc 2231
86dc6501
EJ
2232 enable = enable && !cfm_get_fault(ofport->cfm)
2233 && cfm_get_opup(ofport->cfm);
abe529af 2234 }
015e08bc
EJ
2235
2236 if (ofport->bundle) {
2237 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3e5b3fdb
EJ
2238 if (carrier_changed) {
2239 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
2240 }
015e08bc
EJ
2241 }
2242
daff3353
EJ
2243 if (ofport->may_enable != enable) {
2244 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2245
2246 if (ofproto->has_bundle_action) {
2247 ofproto->need_revalidate = true;
2248 }
2249 }
2250
015e08bc 2251 ofport->may_enable = enable;
abe529af
BP
2252}
2253
2254static void
2255port_wait(struct ofport_dpif *ofport)
2256{
2257 if (ofport->cfm) {
2258 cfm_wait(ofport->cfm);
2259 }
2260}
2261
2262static int
2263port_query_by_name(const struct ofproto *ofproto_, const char *devname,
2264 struct ofproto_port *ofproto_port)
2265{
2266 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2267 struct dpif_port dpif_port;
2268 int error;
2269
2270 error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
2271 if (!error) {
2272 ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
2273 }
2274 return error;
2275}
2276
2277static int
2278port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
2279{
2280 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2281 uint16_t odp_port;
2282 int error;
2283
2284 error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
2285 if (!error) {
2286 *ofp_portp = odp_port_to_ofp_port(odp_port);
2287 }
2288 return error;
2289}
2290
2291static int
2292port_del(struct ofproto *ofproto_, uint16_t ofp_port)
2293{
2294 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2295 int error;
2296
2297 error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
2298 if (!error) {
2299 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
2300 if (ofport) {
2301 /* The caller is going to close ofport->up.netdev. If this is a
2302 * bonded port, then the bond is using that netdev, so remove it
2303 * from the bond. The client will need to reconfigure everything
2304 * after deleting ports, so then the slave will get re-added. */
2305 bundle_remove(&ofport->up);
2306 }
2307 }
2308 return error;
2309}
2310
6527c598
PS
2311static int
2312port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
2313{
2314 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2315 int error;
2316
2317 error = netdev_get_stats(ofport->up.netdev, stats);
2318
2319 if (!error && ofport->odp_port == OVSP_LOCAL) {
2320 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2321
2322 /* ofproto->stats.tx_packets represents packets that we created
2323 * internally and sent to some port (e.g. packets sent with
2324 * send_packet()). Account for them as if they had come from
2325 * OFPP_LOCAL and got forwarded. */
2326
2327 if (stats->rx_packets != UINT64_MAX) {
2328 stats->rx_packets += ofproto->stats.tx_packets;
2329 }
2330
2331 if (stats->rx_bytes != UINT64_MAX) {
2332 stats->rx_bytes += ofproto->stats.tx_bytes;
2333 }
2334
2335 /* ofproto->stats.rx_packets represents packets that were received on
2336 * some port and we processed internally and dropped (e.g. STP).
2337 * Account fro them as if they had been forwarded to OFPP_LOCAL. */
2338
2339 if (stats->tx_packets != UINT64_MAX) {
2340 stats->tx_packets += ofproto->stats.rx_packets;
2341 }
2342
2343 if (stats->tx_bytes != UINT64_MAX) {
2344 stats->tx_bytes += ofproto->stats.rx_bytes;
2345 }
2346 }
2347
2348 return error;
2349}
2350
2351/* Account packets for LOCAL port. */
2352static void
2353ofproto_update_local_port_stats(const struct ofproto *ofproto_,
2354 size_t tx_size, size_t rx_size)
2355{
2356 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2357
2358 if (rx_size) {
2359 ofproto->stats.rx_packets++;
2360 ofproto->stats.rx_bytes += rx_size;
2361 }
2362 if (tx_size) {
2363 ofproto->stats.tx_packets++;
2364 ofproto->stats.tx_bytes += tx_size;
2365 }
2366}
2367
abe529af
BP
2368struct port_dump_state {
2369 struct dpif_port_dump dump;
2370 bool done;
2371};
2372
2373static int
2374port_dump_start(const struct ofproto *ofproto_, void **statep)
2375{
2376 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2377 struct port_dump_state *state;
2378
2379 *statep = state = xmalloc(sizeof *state);
2380 dpif_port_dump_start(&state->dump, ofproto->dpif);
2381 state->done = false;
2382 return 0;
2383}
2384
2385static int
2386port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
2387 struct ofproto_port *port)
2388{
2389 struct port_dump_state *state = state_;
2390 struct dpif_port dpif_port;
2391
2392 if (dpif_port_dump_next(&state->dump, &dpif_port)) {
2393 ofproto_port_from_dpif_port(port, &dpif_port);
2394 return 0;
2395 } else {
2396 int error = dpif_port_dump_done(&state->dump);
2397 state->done = true;
2398 return error ? error : EOF;
2399 }
2400}
2401
2402static int
2403port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
2404{
2405 struct port_dump_state *state = state_;
2406
2407 if (!state->done) {
2408 dpif_port_dump_done(&state->dump);
2409 }
2410 free(state);
2411 return 0;
2412}
2413
2414static int
2415port_poll(const struct ofproto *ofproto_, char **devnamep)
2416{
2417 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2418 return dpif_port_poll(ofproto->dpif, devnamep);
2419}
2420
2421static void
2422port_poll_wait(const struct ofproto *ofproto_)
2423{
2424 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2425 dpif_port_poll_wait(ofproto->dpif);
2426}
2427
2428static int
2429port_is_lacp_current(const struct ofport *ofport_)
2430{
2431 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2432 return (ofport->bundle && ofport->bundle->lacp
2433 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
2434 : -1);
2435}
2436\f
2437/* Upcall handling. */
2438
501f8d1f
BP
2439/* Flow miss batching.
2440 *
2441 * Some dpifs implement operations faster when you hand them off in a batch.
2442 * To allow batching, "struct flow_miss" queues the dpif-related work needed
2443 * for a given flow. Each "struct flow_miss" corresponds to sending one or
2444 * more packets, plus possibly installing the flow in the dpif.
2445 *
2446 * So far we only batch the operations that affect flow setup time the most.
2447 * It's possible to batch more than that, but the benefit might be minimal. */
2448struct flow_miss {
2449 struct hmap_node hmap_node;
2450 struct flow flow;
b0f7b9b5 2451 enum odp_key_fitness key_fitness;
501f8d1f
BP
2452 const struct nlattr *key;
2453 size_t key_len;
e84173dc 2454 ovs_be16 initial_tci;
501f8d1f
BP
2455 struct list packets;
2456};
2457
2458struct flow_miss_op {
c2b565b5 2459 struct dpif_op dpif_op;
b0f7b9b5 2460 struct subfacet *subfacet;
501f8d1f
BP
2461};
2462
62cd7072
BP
2463/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
2464 * OpenFlow controller as necessary according to their individual
29ebe880 2465 * configurations. */
62cd7072 2466static void
a39edbd4 2467send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet,
29ebe880 2468 const struct flow *flow)
62cd7072
BP
2469{
2470 struct ofputil_packet_in pin;
2471
3e3252fa
EJ
2472 pin.packet = packet->data;
2473 pin.packet_len = packet->size;
62cd7072 2474 pin.reason = OFPR_NO_MATCH;
a7349929 2475 pin.controller_id = 0;
54834960
EJ
2476
2477 pin.table_id = 0;
2478 pin.cookie = 0;
2479
62cd7072 2480 pin.send_len = 0; /* not used for flow table misses */
5d6c3af0
EJ
2481
2482 flow_get_metadata(flow, &pin.fmd);
2483
2484 /* Registers aren't meaningful on a miss. */
2485 memset(pin.fmd.reg_masks, 0, sizeof pin.fmd.reg_masks);
2486
d8653c38 2487 connmgr_send_packet_in(ofproto->up.connmgr, &pin);
62cd7072
BP
2488}
2489
abe529af
BP
2490static bool
2491process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
2492 const struct ofpbuf *packet)
2493{
b6e001b6
EJ
2494 struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
2495
2496 if (!ofport) {
2497 return false;
2498 }
2499
ef9819b5 2500 if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
b6e001b6 2501 if (packet) {
abe529af
BP
2502 cfm_process_heartbeat(ofport->cfm, packet);
2503 }
2504 return true;
b6e001b6
EJ
2505 } else if (ofport->bundle && ofport->bundle->lacp
2506 && flow->dl_type == htons(ETH_TYPE_LACP)) {
2507 if (packet) {
2508 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
abe529af 2509 }
da37ebac 2510 return true;
21f7563c
JP
2511 } else if (ofproto->stp && stp_should_process_flow(flow)) {
2512 if (packet) {
2513 stp_process_packet(ofport, packet);
2514 }
2515 return true;
abe529af
BP
2516 }
2517 return false;
2518}
2519
501f8d1f
BP
2520static struct flow_miss *
2521flow_miss_create(struct hmap *todo, const struct flow *flow,
b0f7b9b5 2522 enum odp_key_fitness key_fitness,
e84173dc
BP
2523 const struct nlattr *key, size_t key_len,
2524 ovs_be16 initial_tci)
abe529af 2525{
501f8d1f
BP
2526 uint32_t hash = flow_hash(flow, 0);
2527 struct flow_miss *miss;
abe529af 2528
501f8d1f
BP
2529 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
2530 if (flow_equal(&miss->flow, flow)) {
2531 return miss;
2532 }
2533 }
abe529af 2534
501f8d1f
BP
2535 miss = xmalloc(sizeof *miss);
2536 hmap_insert(todo, &miss->hmap_node, hash);
2537 miss->flow = *flow;
b0f7b9b5 2538 miss->key_fitness = key_fitness;
501f8d1f
BP
2539 miss->key = key;
2540 miss->key_len = key_len;
e84173dc 2541 miss->initial_tci = initial_tci;
501f8d1f
BP
2542 list_init(&miss->packets);
2543 return miss;
2544}
abe529af 2545
501f8d1f
BP
2546static void
2547handle_flow_miss(struct ofproto_dpif *ofproto, struct flow_miss *miss,
2548 struct flow_miss_op *ops, size_t *n_ops)
2549{
2550 const struct flow *flow = &miss->flow;
2551 struct ofpbuf *packet, *next_packet;
b0f7b9b5 2552 struct subfacet *subfacet;
501f8d1f 2553 struct facet *facet;
abe529af 2554
501f8d1f 2555 facet = facet_lookup_valid(ofproto, flow);
abe529af 2556 if (!facet) {
501f8d1f
BP
2557 struct rule_dpif *rule;
2558
2559 rule = rule_dpif_lookup(ofproto, flow, 0);
abe529af 2560 if (!rule) {
9e1fd49b 2561 /* Don't send a packet-in if OFPUTIL_PC_NO_PACKET_IN asserted. */
501f8d1f 2562 struct ofport_dpif *port = get_ofp_port(ofproto, flow->in_port);
abe529af 2563 if (port) {
9e1fd49b 2564 if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
abe529af
BP
2565 COVERAGE_INC(ofproto_dpif_no_packet_in);
2566 /* XXX install 'drop' flow entry */
abe529af
BP
2567 return;
2568 }
2569 } else {
2570 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
501f8d1f
BP
2571 flow->in_port);
2572 }
2573
29ebe880
EJ
2574 LIST_FOR_EACH (packet, list_node, &miss->packets) {
2575 send_packet_in_miss(ofproto, packet, flow);
abe529af
BP
2576 }
2577
abe529af
BP
2578 return;
2579 }
2580
501f8d1f 2581 facet = facet_create(rule, flow);
abe529af
BP
2582 }
2583
15baa734 2584 subfacet = subfacet_create(facet,
e84173dc
BP
2585 miss->key_fitness, miss->key, miss->key_len,
2586 miss->initial_tci);
b0f7b9b5 2587
501f8d1f 2588 LIST_FOR_EACH_SAFE (packet, next_packet, list_node, &miss->packets) {
67d91f78 2589 struct dpif_flow_stats stats;
999fba59
EJ
2590 struct flow_miss_op *op;
2591 struct dpif_execute *execute;
67d91f78 2592
501f8d1f
BP
2593 ofproto->n_matches++;
2594
2595 if (facet->rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
2596 /*
2597 * Extra-special case for fail-open mode.
2598 *
2599 * We are in fail-open mode and the packet matched the fail-open
2600 * rule, but we are connected to a controller too. We should send
2601 * the packet up to the controller in the hope that it will try to
2602 * set up a flow and thereby allow us to exit fail-open.
2603 *
2604 * See the top-level comment in fail-open.c for more information.
2605 */
29ebe880 2606 send_packet_in_miss(ofproto, packet, flow);
501f8d1f
BP
2607 }
2608
b95fc6ba 2609 if (!facet->may_install || !subfacet->actions) {
15baa734 2610 subfacet_make_actions(subfacet, packet);
501f8d1f 2611 }
67d91f78 2612
67d91f78 2613 dpif_flow_stats_extract(&facet->flow, packet, &stats);
15baa734 2614 subfacet_update_stats(subfacet, &stats);
67d91f78 2615
8338659a
BP
2616 if (!subfacet->actions_len) {
2617 /* No actions to execute, so skip talking to the dpif. */
2618 continue;
2619 }
2620
999fba59
EJ
2621 if (flow->vlan_tci != subfacet->initial_tci) {
2622 /* This packet was received on a VLAN splinter port. We added
2623 * a VLAN to the packet to make the packet resemble the flow,
2624 * but the actions were composed assuming that the packet
2625 * contained no VLAN. So, we must remove the VLAN header from
2626 * the packet before trying to execute the actions. */
2627 eth_pop_vlan(packet);
501f8d1f 2628 }
999fba59
EJ
2629
2630 op = &ops[(*n_ops)++];
c2b565b5 2631 execute = &op->dpif_op.u.execute;
999fba59 2632 op->subfacet = subfacet;
c2b565b5 2633 op->dpif_op.type = DPIF_OP_EXECUTE;
999fba59
EJ
2634 execute->key = miss->key;
2635 execute->key_len = miss->key_len;
2636 execute->actions = (facet->may_install
2637 ? subfacet->actions
2638 : xmemdup(subfacet->actions,
2639 subfacet->actions_len));
2640 execute->actions_len = subfacet->actions_len;
2641 execute->packet = packet;
501f8d1f
BP
2642 }
2643
b0f7b9b5 2644 if (facet->may_install && subfacet->key_fitness != ODP_FIT_TOO_LITTLE) {
501f8d1f 2645 struct flow_miss_op *op = &ops[(*n_ops)++];
c2b565b5 2646 struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
501f8d1f 2647
b0f7b9b5 2648 op->subfacet = subfacet;
c2b565b5 2649 op->dpif_op.type = DPIF_OP_FLOW_PUT;
501f8d1f
BP
2650 put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
2651 put->key = miss->key;
2652 put->key_len = miss->key_len;
b95fc6ba
BP
2653 put->actions = subfacet->actions;
2654 put->actions_len = subfacet->actions_len;
501f8d1f
BP
2655 put->stats = NULL;
2656 }
2657}
2658
e2a6ca36
BP
2659/* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
2660 * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
2661 * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
2662 * what a flow key should contain.
2663 *
2664 * This function also includes some logic to help make VLAN splinters
2665 * transparent to the rest of the upcall processing logic. In particular, if
2666 * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
2667 * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
2668 * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
2669 *
2670 * Sets '*initial_tci' to the VLAN TCI with which the packet was really
2671 * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
2672 * (This differs from the value returned in flow->vlan_tci only for packets
2673 * received on VLAN splinters.)
2674 */
e84173dc 2675static enum odp_key_fitness
52a90c29 2676ofproto_dpif_extract_flow_key(const struct ofproto_dpif *ofproto,
e84173dc 2677 const struct nlattr *key, size_t key_len,
e2a6ca36
BP
2678 struct flow *flow, ovs_be16 *initial_tci,
2679 struct ofpbuf *packet)
e84173dc
BP
2680{
2681 enum odp_key_fitness fitness;
52a90c29
BP
2682 uint16_t realdev;
2683 int vid;
e84173dc
BP
2684
2685 fitness = odp_flow_key_to_flow(key, key_len, flow);
2686 if (fitness == ODP_FIT_ERROR) {
2687 return fitness;
2688 }
2689 *initial_tci = flow->vlan_tci;
2690
52a90c29
BP
2691 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
2692 if (realdev) {
2693 /* Cause the flow to be processed as if it came in on the real device
2694 * with the VLAN device's VLAN ID. */
2695 flow->in_port = realdev;
2696 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
e2a6ca36
BP
2697 if (packet) {
2698 /* Make the packet resemble the flow, so that it gets sent to an
2699 * OpenFlow controller properly, so that it looks correct for
2700 * sFlow, and so that flow_extract() will get the correct vlan_tci
2701 * if it is called on 'packet'.
2702 *
2703 * The allocated space inside 'packet' probably also contains
2704 * 'key', that is, both 'packet' and 'key' are probably part of a
2705 * struct dpif_upcall (see the large comment on that structure
2706 * definition), so pushing data on 'packet' is in general not a
2707 * good idea since it could overwrite 'key' or free it as a side
2708 * effect. However, it's OK in this special case because we know
2709 * that 'packet' is inside a Netlink attribute: pushing 4 bytes
2710 * will just overwrite the 4-byte "struct nlattr", which is fine
2711 * since we don't need that header anymore. */
2712 eth_push_vlan(packet, flow->vlan_tci);
2713 }
52a90c29
BP
2714
2715 /* Let the caller know that we can't reproduce 'key' from 'flow'. */
2716 if (fitness == ODP_FIT_PERFECT) {
2717 fitness = ODP_FIT_TOO_MUCH;
2718 }
2719 }
2720
e84173dc
BP
2721 return fitness;
2722}
2723
501f8d1f
BP
2724static void
2725handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
2726 size_t n_upcalls)
2727{
2728 struct dpif_upcall *upcall;
2729 struct flow_miss *miss, *next_miss;
2730 struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
c2b565b5 2731 struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
501f8d1f
BP
2732 struct hmap todo;
2733 size_t n_ops;
2734 size_t i;
2735
2736 if (!n_upcalls) {
2737 return;
2738 }
2739
2740 /* Construct the to-do list.
2741 *
2742 * This just amounts to extracting the flow from each packet and sticking
2743 * the packets that have the same flow in the same "flow_miss" structure so
2744 * that we can process them together. */
2745 hmap_init(&todo);
2746 for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
b0f7b9b5 2747 enum odp_key_fitness fitness;
501f8d1f 2748 struct flow_miss *miss;
e84173dc 2749 ovs_be16 initial_tci;
501f8d1f
BP
2750 struct flow flow;
2751
b0f7b9b5
BP
2752 /* Obtain metadata and check userspace/kernel agreement on flow match,
2753 * then set 'flow''s header pointers. */
e84173dc
BP
2754 fitness = ofproto_dpif_extract_flow_key(ofproto,
2755 upcall->key, upcall->key_len,
e2a6ca36
BP
2756 &flow, &initial_tci,
2757 upcall->packet);
b0f7b9b5 2758 if (fitness == ODP_FIT_ERROR) {
6b17ca5c 2759 ofpbuf_delete(upcall->packet);
b0f7b9b5
BP
2760 continue;
2761 }
deedf7e7 2762 flow_extract(upcall->packet, flow.skb_priority, flow.tun_id,
abff858b 2763 flow.in_port, &flow);
501f8d1f 2764
21f7563c 2765 /* Handle 802.1ag, LACP, and STP specially. */
501f8d1f 2766 if (process_special(ofproto, &flow, upcall->packet)) {
6527c598
PS
2767 ofproto_update_local_port_stats(&ofproto->up,
2768 0, upcall->packet->size);
501f8d1f
BP
2769 ofpbuf_delete(upcall->packet);
2770 ofproto->n_matches++;
2771 continue;
2772 }
2773
2774 /* Add other packets to a to-do list. */
b0f7b9b5 2775 miss = flow_miss_create(&todo, &flow, fitness,
e84173dc 2776 upcall->key, upcall->key_len, initial_tci);
501f8d1f
BP
2777 list_push_back(&miss->packets, &upcall->packet->list_node);
2778 }
2779
2780 /* Process each element in the to-do list, constructing the set of
2781 * operations to batch. */
2782 n_ops = 0;
33bb0caa 2783 HMAP_FOR_EACH (miss, hmap_node, &todo) {
501f8d1f 2784 handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
abe529af 2785 }
501f8d1f 2786 assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
501f8d1f
BP
2787
2788 /* Execute batch. */
2789 for (i = 0; i < n_ops; i++) {
2790 dpif_ops[i] = &flow_miss_ops[i].dpif_op;
2791 }
2792 dpif_operate(ofproto->dpif, dpif_ops, n_ops);
2793
2794 /* Free memory and update facets. */
2795 for (i = 0; i < n_ops; i++) {
2796 struct flow_miss_op *op = &flow_miss_ops[i];
2797 struct dpif_execute *execute;
501f8d1f
BP
2798
2799 switch (op->dpif_op.type) {
2800 case DPIF_OP_EXECUTE:
c2b565b5 2801 execute = &op->dpif_op.u.execute;
b95fc6ba 2802 if (op->subfacet->actions != execute->actions) {
501f8d1f
BP
2803 free((struct nlattr *) execute->actions);
2804 }
501f8d1f 2805 break;
abe529af 2806
501f8d1f 2807 case DPIF_OP_FLOW_PUT:
c2b565b5 2808 if (!op->dpif_op.error) {
b0f7b9b5 2809 op->subfacet->installed = true;
501f8d1f
BP
2810 }
2811 break;
2812 }
2813 }
33bb0caa
BP
2814 HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &todo) {
2815 ofpbuf_list_delete(&miss->packets);
2816 hmap_remove(&todo, &miss->hmap_node);
2817 free(miss);
2818 }
2819 hmap_destroy(&todo);
abe529af
BP
2820}
2821
2822static void
6ff686f2
PS
2823handle_userspace_upcall(struct ofproto_dpif *ofproto,
2824 struct dpif_upcall *upcall)
abe529af 2825{
6ff686f2 2826 struct user_action_cookie cookie;
e84173dc
BP
2827 enum odp_key_fitness fitness;
2828 ovs_be16 initial_tci;
2829 struct flow flow;
abe529af 2830
6ff686f2 2831 memcpy(&cookie, &upcall->userdata, sizeof(cookie));
abe529af 2832
e84173dc
BP
2833 fitness = ofproto_dpif_extract_flow_key(ofproto, upcall->key,
2834 upcall->key_len, &flow,
e2a6ca36 2835 &initial_tci, upcall->packet);
e84173dc 2836 if (fitness == ODP_FIT_ERROR) {
6b17ca5c 2837 ofpbuf_delete(upcall->packet);
e84173dc
BP
2838 return;
2839 }
2840
6ff686f2 2841 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
abe529af 2842 if (ofproto->sflow) {
e84173dc
BP
2843 dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
2844 &cookie);
abe529af 2845 }
6ff686f2
PS
2846 } else {
2847 VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
2848 }
29ebe880 2849 ofpbuf_delete(upcall->packet);
6ff686f2
PS
2850}
2851
9b16c439
BP
2852static int
2853handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
6ff686f2 2854{
9b16c439
BP
2855 struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
2856 int n_misses;
2857 int i;
abe529af 2858
9b16c439 2859 assert (max_batch <= FLOW_MISS_MAX_BATCH);
abe529af 2860
9b16c439
BP
2861 n_misses = 0;
2862 for (i = 0; i < max_batch; i++) {
2863 struct dpif_upcall *upcall = &misses[n_misses];
2864 int error;
2865
2866 error = dpif_recv(ofproto->dpif, upcall);
2867 if (error) {
9b16c439
BP
2868 break;
2869 }
2870
2871 switch (upcall->type) {
2872 case DPIF_UC_ACTION:
2873 handle_userspace_upcall(ofproto, upcall);
2874 break;
2875
2876 case DPIF_UC_MISS:
2877 /* Handle it later. */
2878 n_misses++;
2879 break;
2880
2881 case DPIF_N_UC_TYPES:
2882 default:
2883 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
2884 upcall->type);
2885 break;
2886 }
abe529af 2887 }
9b16c439
BP
2888
2889 handle_miss_upcalls(ofproto, misses, n_misses);
2890
2891 return i;
abe529af
BP
2892}
2893\f
2894/* Flow expiration. */
2895
b0f7b9b5 2896static int subfacet_max_idle(const struct ofproto_dpif *);
abe529af
BP
2897static void update_stats(struct ofproto_dpif *);
2898static void rule_expire(struct rule_dpif *);
b0f7b9b5 2899static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
abe529af
BP
2900
2901/* This function is called periodically by run(). Its job is to collect
2902 * updates for the flows that have been installed into the datapath, most
2903 * importantly when they last were used, and then use that information to
2904 * expire flows that have not been used recently.
2905 *
2906 * Returns the number of milliseconds after which it should be called again. */
2907static int
2908expire(struct ofproto_dpif *ofproto)
2909{
2910 struct rule_dpif *rule, *next_rule;
d0918789 2911 struct oftable *table;
abe529af
BP
2912 int dp_max_idle;
2913
2914 /* Update stats for each flow in the datapath. */
2915 update_stats(ofproto);
2916
b0f7b9b5
BP
2917 /* Expire subfacets that have been idle too long. */
2918 dp_max_idle = subfacet_max_idle(ofproto);
2919 expire_subfacets(ofproto, dp_max_idle);
abe529af
BP
2920
2921 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
0697b5c3
BP
2922 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
2923 struct cls_cursor cursor;
2924
d0918789 2925 cls_cursor_init(&cursor, &table->cls, NULL);
0697b5c3
BP
2926 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
2927 rule_expire(rule);
2928 }
abe529af
BP
2929 }
2930
2931 /* All outstanding data in existing flows has been accounted, so it's a
2932 * good time to do bond rebalancing. */
2933 if (ofproto->has_bonded_bundles) {
2934 struct ofbundle *bundle;
2935
2936 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2937 if (bundle->bond) {
2938 bond_rebalance(bundle->bond, &ofproto->revalidate_set);
2939 }
2940 }
2941 }
2942
2943 return MIN(dp_max_idle, 1000);
2944}
2945
2946/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
2947 *
2948 * This function also pushes statistics updates to rules which each facet
2949 * resubmits into. Generally these statistics will be accurate. However, if a
2950 * facet changes the rule it resubmits into at some time in between
2951 * update_stats() runs, it is possible that statistics accrued to the
2952 * old rule will be incorrectly attributed to the new rule. This could be
2953 * avoided by calling update_stats() whenever rules are created or
2954 * deleted. However, the performance impact of making so many calls to the
2955 * datapath do not justify the benefit of having perfectly accurate statistics.
2956 */
2957static void
2958update_stats(struct ofproto_dpif *p)
2959{
2960 const struct dpif_flow_stats *stats;
2961 struct dpif_flow_dump dump;
2962 const struct nlattr *key;
2963 size_t key_len;
2964
2965 dpif_flow_dump_start(&dump, p->dpif);
2966 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
b0f7b9b5 2967 struct subfacet *subfacet;
abe529af 2968
6a542738 2969 subfacet = subfacet_find(p, key, key_len);
b0f7b9b5
BP
2970 if (subfacet && subfacet->installed) {
2971 struct facet *facet = subfacet->facet;
abe529af 2972
b0f7b9b5
BP
2973 if (stats->n_packets >= subfacet->dp_packet_count) {
2974 uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
abe529af
BP
2975 facet->packet_count += extra;
2976 } else {
2977 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
2978 }
2979
b0f7b9b5
BP
2980 if (stats->n_bytes >= subfacet->dp_byte_count) {
2981 facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
abe529af
BP
2982 } else {
2983 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
2984 }
2985
b0f7b9b5
BP
2986 subfacet->dp_packet_count = stats->n_packets;
2987 subfacet->dp_byte_count = stats->n_bytes;
abe529af 2988
0e553d9c
BP
2989 facet->tcp_flags |= stats->tcp_flags;
2990
15baa734
BP
2991 subfacet_update_time(subfacet, stats->used);
2992 facet_account(facet);
abe529af
BP
2993 facet_push_stats(facet);
2994 } else {
6a542738
PS
2995 if (!VLOG_DROP_WARN(&rl)) {
2996 struct ds s;
2997
2998 ds_init(&s);
2999 odp_flow_key_format(key, key_len, &s);
3000 VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s));
3001 ds_destroy(&s);
3002 }
3003
3004 COVERAGE_INC(facet_unexpected);
b0f7b9b5
BP
3005 /* There's a flow in the datapath that we know nothing about, or a
3006 * flow that shouldn't be installed but was anyway. Delete it. */
abe529af
BP
3007 dpif_flow_del(p->dpif, key, key_len, NULL);
3008 }
3009 }
3010 dpif_flow_dump_done(&dump);
3011}
3012
3013/* Calculates and returns the number of milliseconds of idle time after which
b0f7b9b5
BP
3014 * subfacets should expire from the datapath. When a subfacet expires, we fold
3015 * its statistics into its facet, and when a facet's last subfacet expires, we
3016 * fold its statistic into its rule. */
abe529af 3017static int
b0f7b9b5 3018subfacet_max_idle(const struct ofproto_dpif *ofproto)
abe529af
BP
3019{
3020 /*
3021 * Idle time histogram.
3022 *
b0f7b9b5
BP
3023 * Most of the time a switch has a relatively small number of subfacets.
3024 * When this is the case we might as well keep statistics for all of them
3025 * in userspace and to cache them in the kernel datapath for performance as
abe529af
BP
3026 * well.
3027 *
b0f7b9b5 3028 * As the number of subfacets increases, the memory required to maintain
abe529af 3029 * statistics about them in userspace and in the kernel becomes
b0f7b9b5
BP
3030 * significant. However, with a large number of subfacets it is likely
3031 * that only a few of them are "heavy hitters" that consume a large amount
3032 * of bandwidth. At this point, only heavy hitters are worth caching in
3033 * the kernel and maintaining in userspaces; other subfacets we can
3034 * discard.
abe529af
BP
3035 *
3036 * The technique used to compute the idle time is to build a histogram with
b0f7b9b5 3037 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
abe529af
BP
3038 * that is installed in the kernel gets dropped in the appropriate bucket.
3039 * After the histogram has been built, we compute the cutoff so that only
b0f7b9b5 3040 * the most-recently-used 1% of subfacets (but at least
084f5290 3041 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
b0f7b9b5
BP
3042 * the most-recently-used bucket of subfacets is kept, so actually an
3043 * arbitrary number of subfacets can be kept in any given expiration run
084f5290
SH
3044 * (though the next run will delete most of those unless they receive
3045 * additional data).
abe529af 3046 *
b0f7b9b5
BP
3047 * This requires a second pass through the subfacets, in addition to the
3048 * pass made by update_stats(), because the former function never looks at
3049 * uninstallable subfacets.
abe529af
BP
3050 */
3051 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
3052 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
3053 int buckets[N_BUCKETS] = { 0 };
f11c1ef4 3054 int total, subtotal, bucket;
b0f7b9b5 3055 struct subfacet *subfacet;
abe529af
BP
3056 long long int now;
3057 int i;
3058
b0f7b9b5 3059 total = hmap_count(&ofproto->subfacets);
084f5290 3060 if (total <= ofproto->up.flow_eviction_threshold) {
abe529af
BP
3061 return N_BUCKETS * BUCKET_WIDTH;
3062 }
3063
3064 /* Build histogram. */
3065 now = time_msec();
b0f7b9b5
BP
3066 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
3067 long long int idle = now - subfacet->used;
abe529af
BP
3068 int bucket = (idle <= 0 ? 0
3069 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
3070 : (unsigned int) idle / BUCKET_WIDTH);
3071 buckets[bucket]++;
3072 }
3073
3074 /* Find the first bucket whose flows should be expired. */
f11c1ef4
SH
3075 subtotal = bucket = 0;
3076 do {
3077 subtotal += buckets[bucket++];
084f5290
SH
3078 } while (bucket < N_BUCKETS &&
3079 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
abe529af
BP
3080
3081 if (VLOG_IS_DBG_ENABLED()) {
3082 struct ds s;
3083
3084 ds_init(&s);
3085 ds_put_cstr(&s, "keep");
3086 for (i = 0; i < N_BUCKETS; i++) {
3087 if (i == bucket) {
3088 ds_put_cstr(&s, ", drop");
3089 }
3090 if (buckets[i]) {
3091 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
3092 }
3093 }
3094 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
3095 ds_destroy(&s);
3096 }
3097
3098 return bucket * BUCKET_WIDTH;
3099}
3100
abe529af 3101static void
b0f7b9b5 3102expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
abe529af
BP
3103{
3104 long long int cutoff = time_msec() - dp_max_idle;
b0f7b9b5 3105 struct subfacet *subfacet, *next_subfacet;
abe529af 3106
b0f7b9b5
BP
3107 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
3108 &ofproto->subfacets) {
3109 if (subfacet->used < cutoff) {
15baa734 3110 subfacet_destroy(subfacet);
abe529af
BP
3111 }
3112 }
3113}
3114
3115/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3116 * then delete it entirely. */
3117static void
3118rule_expire(struct rule_dpif *rule)
3119{
abe529af
BP
3120 struct facet *facet, *next_facet;
3121 long long int now;
3122 uint8_t reason;
3123
3124 /* Has 'rule' expired? */
3125 now = time_msec();
3126 if (rule->up.hard_timeout
308881af 3127 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
abe529af 3128 reason = OFPRR_HARD_TIMEOUT;
8ea6ac3e 3129 } else if (rule->up.idle_timeout
1745cd08 3130 && now > rule->up.used + rule->up.idle_timeout * 1000) {
abe529af
BP
3131 reason = OFPRR_IDLE_TIMEOUT;
3132 } else {
3133 return;
3134 }
3135
3136 COVERAGE_INC(ofproto_dpif_expired);
3137
3138 /* Update stats. (This is a no-op if the rule expired due to an idle
3139 * timeout, because that only happens when the rule has no facets left.) */
3140 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 3141 facet_remove(facet);
abe529af
BP
3142 }
3143
3144 /* Get rid of the rule. */
3145 ofproto_rule_expire(&rule->up, reason);
3146}
3147\f
3148/* Facets. */
3149
f3827897 3150/* Creates and returns a new facet owned by 'rule', given a 'flow'.
abe529af
BP
3151 *
3152 * The caller must already have determined that no facet with an identical
3153 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
f3827897
BP
3154 * the ofproto's classifier table.
3155 *
b0f7b9b5
BP
3156 * The facet will initially have no subfacets. The caller should create (at
3157 * least) one subfacet with subfacet_create(). */
abe529af 3158static struct facet *
f3827897 3159facet_create(struct rule_dpif *rule, const struct flow *flow)
abe529af
BP
3160{
3161 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3162 struct facet *facet;
3163
3164 facet = xzalloc(sizeof *facet);
3165 facet->used = time_msec();
3166 hmap_insert(&ofproto->facets, &facet->hmap_node, flow_hash(flow, 0));
3167 list_push_back(&rule->facets, &facet->list_node);
3168 facet->rule = rule;
3169 facet->flow = *flow;
b0f7b9b5 3170 list_init(&facet->subfacets);
abe529af
BP
3171 netflow_flow_init(&facet->nf_flow);
3172 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
3173
abe529af
BP
3174 return facet;
3175}
3176
3177static void
3178facet_free(struct facet *facet)
3179{
abe529af
BP
3180 free(facet);
3181}
3182
3d9e05f8
BP
3183/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
3184 * 'packet', which arrived on 'in_port'.
3185 *
3186 * Takes ownership of 'packet'. */
3187static bool
3188execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
3189 const struct nlattr *odp_actions, size_t actions_len,
3190 struct ofpbuf *packet)
3191{
3192 struct odputil_keybuf keybuf;
3193 struct ofpbuf key;
3194 int error;
3195
6ff686f2
PS
3196 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
3197 odp_flow_key_from_flow(&key, flow);
80e5eed9 3198
6ff686f2
PS
3199 error = dpif_execute(ofproto->dpif, key.data, key.size,
3200 odp_actions, actions_len, packet);
80e5eed9 3201
6ff686f2
PS
3202 ofpbuf_delete(packet);
3203 return !error;
abe529af
BP
3204}
3205
abe529af
BP
3206/* Remove 'facet' from 'ofproto' and free up the associated memory:
3207 *
3208 * - If 'facet' was installed in the datapath, uninstalls it and updates its
b0f7b9b5 3209 * rule's statistics, via subfacet_uninstall().
abe529af
BP
3210 *
3211 * - Removes 'facet' from its rule and from ofproto->facets.
3212 */
3213static void
15baa734 3214facet_remove(struct facet *facet)
abe529af 3215{
15baa734 3216 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
3217 struct subfacet *subfacet, *next_subfacet;
3218
551a2f6c
BP
3219 assert(!list_is_empty(&facet->subfacets));
3220
3221 /* First uninstall all of the subfacets to get final statistics. */
3222 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
15baa734 3223 subfacet_uninstall(subfacet);
551a2f6c
BP
3224 }
3225
3226 /* Flush the final stats to the rule.
3227 *
3228 * This might require us to have at least one subfacet around so that we
3229 * can use its actions for accounting in facet_account(), which is why we
3230 * have uninstalled but not yet destroyed the subfacets. */
15baa734 3231 facet_flush_stats(facet);
551a2f6c
BP
3232
3233 /* Now we're really all done so destroy everything. */
b0f7b9b5
BP
3234 LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
3235 &facet->subfacets) {
15baa734 3236 subfacet_destroy__(subfacet);
b0f7b9b5 3237 }
abe529af
BP
3238 hmap_remove(&ofproto->facets, &facet->hmap_node);
3239 list_remove(&facet->list_node);
3240 facet_free(facet);
3241}
3242
abe529af 3243static void
15baa734 3244facet_account(struct facet *facet)
abe529af 3245{
15baa734 3246 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
55af77bb 3247 uint64_t n_bytes;
b95fc6ba 3248 struct subfacet *subfacet;
abe529af 3249 const struct nlattr *a;
abe529af 3250 unsigned int left;
d78be13b 3251 ovs_be16 vlan_tci;
abe529af 3252
55af77bb 3253 if (facet->byte_count <= facet->accounted_bytes) {
abe529af
BP
3254 return;
3255 }
55af77bb
EJ
3256 n_bytes = facet->byte_count - facet->accounted_bytes;
3257 facet->accounted_bytes = facet->byte_count;
abe529af 3258
75a75043 3259 /* Feed information from the active flows back into the learning table to
abe529af
BP
3260 * ensure that table is always in sync with what is actually flowing
3261 * through the datapath. */
0e553d9c
BP
3262 if (facet->has_learn || facet->has_normal
3263 || (facet->has_fin_timeout
3264 && facet->tcp_flags & (TCP_FIN | TCP_RST))) {
75a75043 3265 struct action_xlate_ctx ctx;
abe529af 3266
e84173dc 3267 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
54834960 3268 facet->flow.vlan_tci,
0e553d9c 3269 facet->rule, facet->tcp_flags, NULL);
75a75043
BP
3270 ctx.may_learn = true;
3271 ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions,
3272 facet->rule->up.n_actions));
3273 }
abe529af 3274
75a75043 3275 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
abe529af
BP
3276 return;
3277 }
d78be13b
BP
3278
3279 /* This loop feeds byte counters to bond_account() for rebalancing to use
3280 * as a basis. We also need to track the actual VLAN on which the packet
3281 * is going to be sent to ensure that it matches the one passed to
3282 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
b95fc6ba
BP
3283 * hash bucket.)
3284 *
3285 * We use the actions from an arbitrary subfacet because they should all
3286 * be equally valid for our purpose. */
3287 subfacet = CONTAINER_OF(list_front(&facet->subfacets),
3288 struct subfacet, list_node);
d78be13b 3289 vlan_tci = facet->flow.vlan_tci;
b95fc6ba
BP
3290 NL_ATTR_FOR_EACH_UNSAFE (a, left,
3291 subfacet->actions, subfacet->actions_len) {
fea393b1 3292 const struct ovs_action_push_vlan *vlan;
d78be13b 3293 struct ofport_dpif *port;
abe529af 3294
d78be13b 3295 switch (nl_attr_type(a)) {
df2c07f4 3296 case OVS_ACTION_ATTR_OUTPUT:
abe529af
BP
3297 port = get_odp_port(ofproto, nl_attr_get_u32(a));
3298 if (port && port->bundle && port->bundle->bond) {
d78be13b 3299 bond_account(port->bundle->bond, &facet->flow,
dc155bff 3300 vlan_tci_to_vid(vlan_tci), n_bytes);
abe529af 3301 }
d78be13b
BP
3302 break;
3303
fea393b1
BP
3304 case OVS_ACTION_ATTR_POP_VLAN:
3305 vlan_tci = htons(0);
d78be13b
BP
3306 break;
3307
fea393b1
BP
3308 case OVS_ACTION_ATTR_PUSH_VLAN:
3309 vlan = nl_attr_get(a);
3310 vlan_tci = vlan->vlan_tci;
d78be13b 3311 break;
abe529af
BP
3312 }
3313 }
3314}
3315
abe529af
BP
3316/* Returns true if the only action for 'facet' is to send to the controller.
3317 * (We don't report NetFlow expiration messages for such facets because they
3318 * are just part of the control logic for the network, not real traffic). */
3319static bool
3320facet_is_controller_flow(struct facet *facet)
3321{
3322 return (facet
3323 && facet->rule->up.n_actions == 1
3324 && action_outputs_to_port(&facet->rule->up.actions[0],
3325 htons(OFPP_CONTROLLER)));
3326}
3327
3328/* Folds all of 'facet''s statistics into its rule. Also updates the
3329 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
3330 * 'facet''s statistics in the datapath should have been zeroed and folded into
3331 * its packet and byte counts before this function is called. */
3332static void
15baa734 3333facet_flush_stats(struct facet *facet)
abe529af 3334{
15baa734 3335 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
3336 struct subfacet *subfacet;
3337
3338 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3339 assert(!subfacet->dp_byte_count);
3340 assert(!subfacet->dp_packet_count);
3341 }
abe529af
BP
3342
3343 facet_push_stats(facet);
15baa734 3344 facet_account(facet);
abe529af
BP
3345
3346 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
3347 struct ofexpired expired;
3348 expired.flow = facet->flow;
3349 expired.packet_count = facet->packet_count;
3350 expired.byte_count = facet->byte_count;
3351 expired.used = facet->used;
3352 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
3353 }
3354
3355 facet->rule->packet_count += facet->packet_count;
3356 facet->rule->byte_count += facet->byte_count;
3357
3358 /* Reset counters to prevent double counting if 'facet' ever gets
3359 * reinstalled. */
bbb5d219 3360 facet_reset_counters(facet);
abe529af
BP
3361
3362 netflow_flow_clear(&facet->nf_flow);
0e553d9c 3363 facet->tcp_flags = 0;
abe529af
BP
3364}
3365
3366/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3367 * Returns it if found, otherwise a null pointer.
3368 *
3369 * The returned facet might need revalidation; use facet_lookup_valid()
3370 * instead if that is important. */
3371static struct facet *
3372facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
3373{
3374 struct facet *facet;
3375
3376 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, flow_hash(flow, 0),
3377 &ofproto->facets) {
3378 if (flow_equal(flow, &facet->flow)) {
3379 return facet;
3380 }
3381 }
3382
3383 return NULL;
3384}
3385
3386/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3387 * Returns it if found, otherwise a null pointer.
3388 *
3389 * The returned facet is guaranteed to be valid. */
3390static struct facet *
3391facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow)
3392{
3393 struct facet *facet = facet_find(ofproto, flow);
3394
3395 /* The facet we found might not be valid, since we could be in need of
3396 * revalidation. If it is not valid, don't return it. */
3397 if (facet
0e4b3771
BP
3398 && (ofproto->need_revalidate
3399 || tag_set_intersects(&ofproto->revalidate_set, facet->tags))
15baa734 3400 && !facet_revalidate(facet)) {
abe529af
BP
3401 COVERAGE_INC(facet_invalidated);
3402 return NULL;
3403 }
3404
3405 return facet;
3406}
3407
6814e51f
BP
3408static bool
3409facet_check_consistency(struct facet *facet)
3410{
3411 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3412
3413 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3414
3415 struct rule_dpif *rule;
3416 struct subfacet *subfacet;
c53e1132 3417 bool may_log = false;
6814e51f
BP
3418 bool ok;
3419
3420 /* Check the rule for consistency. */
3421 rule = rule_dpif_lookup(ofproto, &facet->flow, 0);
3422 if (!rule) {
3423 if (!VLOG_DROP_WARN(&rl)) {
3424 char *s = flow_to_string(&facet->flow);
3425 VLOG_WARN("%s: facet should not exist", s);
3426 free(s);
3427 }
3428 return false;
3429 } else if (rule != facet->rule) {
c53e1132
BP
3430 may_log = !VLOG_DROP_WARN(&rl);
3431 ok = false;
3432 if (may_log) {
3433 struct ds s;
6814e51f 3434
c53e1132
BP
3435 ds_init(&s);
3436 flow_format(&s, &facet->flow);
3437 ds_put_format(&s, ": facet associated with wrong rule (was "
3438 "table=%"PRIu8",", facet->rule->up.table_id);
3439 cls_rule_format(&facet->rule->up.cr, &s);
3440 ds_put_format(&s, ") (should have been table=%"PRIu8",",
3441 rule->up.table_id);
3442 cls_rule_format(&rule->up.cr, &s);
3443 ds_put_char(&s, ')');
6814e51f 3444
c53e1132
BP
3445 VLOG_WARN("%s", ds_cstr(&s));
3446 ds_destroy(&s);
3447 }
6814e51f
BP
3448 } else {
3449 ok = true;
3450 }
3451
3452 /* Check the datapath actions for consistency. */
3453 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3454 struct action_xlate_ctx ctx;
3455 struct ofpbuf *odp_actions;
3456 bool actions_changed;
3457 bool should_install;
3458
3459 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 3460 subfacet->initial_tci, rule, 0, NULL);
6814e51f
BP
3461 odp_actions = xlate_actions(&ctx, rule->up.actions,
3462 rule->up.n_actions);
3463
3464 should_install = (ctx.may_set_up_flow
3465 && subfacet->key_fitness != ODP_FIT_TOO_LITTLE);
3466 if (!should_install && !subfacet->installed) {
3467 /* The actions for uninstallable flows may vary from one packet to
3468 * the next, so don't compare the actions. */
3469 goto next;
3470 }
3471
3472 actions_changed = (subfacet->actions_len != odp_actions->size
3473 || memcmp(subfacet->actions, odp_actions->data,
3474 subfacet->actions_len));
3475 if (should_install != subfacet->installed || actions_changed) {
c53e1132
BP
3476 if (ok) {
3477 may_log = !VLOG_DROP_WARN(&rl);
3478 ok = false;
3479 }
6814e51f 3480
c53e1132
BP
3481 if (may_log) {
3482 struct odputil_keybuf keybuf;
3483 struct ofpbuf key;
3484 struct ds s;
6814e51f 3485
c53e1132
BP
3486 ds_init(&s);
3487 subfacet_get_key(subfacet, &keybuf, &key);
3488 odp_flow_key_format(key.data, key.size, &s);
3489
3490 ds_put_cstr(&s, ": inconsistency in subfacet");
3491 if (should_install != subfacet->installed) {
3492 enum odp_key_fitness fitness = subfacet->key_fitness;
3493
3494 ds_put_format(&s, " (should%s have been installed)",
3495 should_install ? "" : " not");
3496 ds_put_format(&s, " (may_set_up_flow=%s, fitness=%s)",
3497 ctx.may_set_up_flow ? "true" : "false",
3498 odp_key_fitness_to_string(fitness));
3499 }
3500 if (actions_changed) {
3501 ds_put_cstr(&s, " (actions were: ");
3502 format_odp_actions(&s, subfacet->actions,
3503 subfacet->actions_len);
3504 ds_put_cstr(&s, ") (correct actions: ");
3505 format_odp_actions(&s, odp_actions->data,
3506 odp_actions->size);
3507 ds_put_char(&s, ')');
3508 } else {
3509 ds_put_cstr(&s, " (actions: ");
3510 format_odp_actions(&s, subfacet->actions,
3511 subfacet->actions_len);
3512 ds_put_char(&s, ')');
3513 }
3514 VLOG_WARN("%s", ds_cstr(&s));
3515 ds_destroy(&s);
6814e51f 3516 }
6814e51f
BP
3517 }
3518
3519 next:
3520 ofpbuf_delete(odp_actions);
3521 }
3522
3523 return ok;
3524}
3525
15baa734 3526/* Re-searches the classifier for 'facet':
abe529af
BP
3527 *
3528 * - If the rule found is different from 'facet''s current rule, moves
3529 * 'facet' to the new rule and recompiles its actions.
3530 *
3531 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
3532 * where it is and recompiles its actions anyway.
3533 *
3534 * - If there is none, destroys 'facet'.
3535 *
3536 * Returns true if 'facet' still exists, false if it has been destroyed. */
3537static bool
15baa734 3538facet_revalidate(struct facet *facet)
abe529af 3539{
15baa734 3540 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b95fc6ba
BP
3541 struct actions {
3542 struct nlattr *odp_actions;
3543 size_t actions_len;
3544 };
3545 struct actions *new_actions;
3546
abe529af 3547 struct action_xlate_ctx ctx;
abe529af 3548 struct rule_dpif *new_rule;
b0f7b9b5 3549 struct subfacet *subfacet;
abe529af 3550 bool actions_changed;
b95fc6ba 3551 int i;
abe529af
BP
3552
3553 COVERAGE_INC(facet_revalidate);
3554
3555 /* Determine the new rule. */
29901626 3556 new_rule = rule_dpif_lookup(ofproto, &facet->flow, 0);
abe529af
BP
3557 if (!new_rule) {
3558 /* No new rule, so delete the facet. */
15baa734 3559 facet_remove(facet);
abe529af
BP
3560 return false;
3561 }
3562
df2c07f4 3563 /* Calculate new datapath actions.
abe529af
BP
3564 *
3565 * We do not modify any 'facet' state yet, because we might need to, e.g.,
3566 * emit a NetFlow expiration and, if so, we need to have the old state
3567 * around to properly compose it. */
abe529af 3568
df2c07f4
JP
3569 /* If the datapath actions changed or the installability changed,
3570 * then we need to talk to the datapath. */
b95fc6ba
BP
3571 i = 0;
3572 new_actions = NULL;
3573 memset(&ctx, 0, sizeof ctx);
b0f7b9b5 3574 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
b95fc6ba
BP
3575 struct ofpbuf *odp_actions;
3576 bool should_install;
3577
e84173dc 3578 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
0e553d9c 3579 subfacet->initial_tci, new_rule, 0, NULL);
b95fc6ba
BP
3580 odp_actions = xlate_actions(&ctx, new_rule->up.actions,
3581 new_rule->up.n_actions);
3582 actions_changed = (subfacet->actions_len != odp_actions->size
3583 || memcmp(subfacet->actions, odp_actions->data,
3584 subfacet->actions_len));
3585
3586 should_install = (ctx.may_set_up_flow
3587 && subfacet->key_fitness != ODP_FIT_TOO_LITTLE);
b0f7b9b5
BP
3588 if (actions_changed || should_install != subfacet->installed) {
3589 if (should_install) {
3590 struct dpif_flow_stats stats;
3591
15baa734 3592 subfacet_install(subfacet,
b0f7b9b5 3593 odp_actions->data, odp_actions->size, &stats);
15baa734 3594 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 3595 } else {
15baa734 3596 subfacet_uninstall(subfacet);
b0f7b9b5 3597 }
b95fc6ba
BP
3598
3599 if (!new_actions) {
3600 new_actions = xcalloc(list_size(&facet->subfacets),
3601 sizeof *new_actions);
3602 }
3603 new_actions[i].odp_actions = xmemdup(odp_actions->data,
3604 odp_actions->size);
3605 new_actions[i].actions_len = odp_actions->size;
abe529af 3606 }
b95fc6ba
BP
3607
3608 ofpbuf_delete(odp_actions);
3609 i++;
b0f7b9b5 3610 }
b95fc6ba 3611 if (new_actions) {
15baa734 3612 facet_flush_stats(facet);
abe529af
BP
3613 }
3614
3615 /* Update 'facet' now that we've taken care of all the old state. */
3616 facet->tags = ctx.tags;
3617 facet->nf_flow.output_iface = ctx.nf_output_iface;
3618 facet->may_install = ctx.may_set_up_flow;
75a75043
BP
3619 facet->has_learn = ctx.has_learn;
3620 facet->has_normal = ctx.has_normal;
0e553d9c 3621 facet->has_fin_timeout = ctx.has_fin_timeout;
9d24de3b 3622 facet->mirrors = ctx.mirrors;
b95fc6ba
BP
3623 if (new_actions) {
3624 i = 0;
3625 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3626 if (new_actions[i].odp_actions) {
3627 free(subfacet->actions);
3628 subfacet->actions = new_actions[i].odp_actions;
3629 subfacet->actions_len = new_actions[i].actions_len;
3630 }
3631 i++;
3632 }
3633 free(new_actions);
abe529af
BP
3634 }
3635 if (facet->rule != new_rule) {
3636 COVERAGE_INC(facet_changed_rule);
3637 list_remove(&facet->list_node);
3638 list_push_back(&new_rule->facets, &facet->list_node);
3639 facet->rule = new_rule;
3640 facet->used = new_rule->up.created;
9d24de3b 3641 facet->prev_used = facet->used;
abe529af
BP
3642 }
3643
abe529af
BP
3644 return true;
3645}
3646
3647/* Updates 'facet''s used time. Caller is responsible for calling
3648 * facet_push_stats() to update the flows which 'facet' resubmits into. */
3649static void
15baa734 3650facet_update_time(struct facet *facet, long long int used)
abe529af 3651{
15baa734 3652 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
abe529af
BP
3653 if (used > facet->used) {
3654 facet->used = used;
1745cd08 3655 ofproto_rule_update_used(&facet->rule->up, used);
abe529af
BP
3656 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
3657 }
3658}
3659
bbb5d219
EJ
3660static void
3661facet_reset_counters(struct facet *facet)
3662{
3663 facet->packet_count = 0;
3664 facet->byte_count = 0;
9d24de3b
JP
3665 facet->prev_packet_count = 0;
3666 facet->prev_byte_count = 0;
bbb5d219
EJ
3667 facet->accounted_bytes = 0;
3668}
3669
abe529af
BP
3670static void
3671facet_push_stats(struct facet *facet)
3672{
9d24de3b 3673 uint64_t new_packets, new_bytes;
abe529af 3674
9d24de3b
JP
3675 assert(facet->packet_count >= facet->prev_packet_count);
3676 assert(facet->byte_count >= facet->prev_byte_count);
3677 assert(facet->used >= facet->prev_used);
abe529af 3678
9d24de3b
JP
3679 new_packets = facet->packet_count - facet->prev_packet_count;
3680 new_bytes = facet->byte_count - facet->prev_byte_count;
abe529af 3681
9d24de3b
JP
3682 if (new_packets || new_bytes || facet->used > facet->prev_used) {
3683 facet->prev_packet_count = facet->packet_count;
3684 facet->prev_byte_count = facet->byte_count;
3685 facet->prev_used = facet->used;
abe529af
BP
3686
3687 flow_push_stats(facet->rule, &facet->flow,
9d24de3b
JP
3688 new_packets, new_bytes, facet->used);
3689
3690 update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
3691 facet->mirrors, new_packets, new_bytes);
abe529af
BP
3692 }
3693}
3694
3695struct ofproto_push {
3696 struct action_xlate_ctx ctx;
3697 uint64_t packets;
3698 uint64_t bytes;
3699 long long int used;
3700};
3701
3702static void
3703push_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
3704{
3705 struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx);
3706
3707 if (rule) {
3708 rule->packet_count += push->packets;
3709 rule->byte_count += push->bytes;
1745cd08 3710 ofproto_rule_update_used(&rule->up, push->used);
abe529af
BP
3711 }
3712}
3713
3714/* Pushes flow statistics to the rules which 'flow' resubmits into given
9d24de3b 3715 * 'rule''s actions and mirrors. */
abe529af 3716static void
18b2a258 3717flow_push_stats(struct rule_dpif *rule,
59d0f2c8 3718 const struct flow *flow, uint64_t packets, uint64_t bytes,
abe529af
BP
3719 long long int used)
3720{
3721 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3722 struct ofproto_push push;
3723
3724 push.packets = packets;
3725 push.bytes = bytes;
3726 push.used = used;
3727
f3b50afb
BP
3728 ofproto_rule_update_used(&rule->up, used);
3729
18b2a258 3730 action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci, rule,
0e553d9c 3731 0, NULL);
abe529af
BP
3732 push.ctx.resubmit_hook = push_resubmit;
3733 ofpbuf_delete(xlate_actions(&push.ctx,
3734 rule->up.actions, rule->up.n_actions));
3735}
3736\f
b0f7b9b5
BP
3737/* Subfacets. */
3738
3739static struct subfacet *
3740subfacet_find__(struct ofproto_dpif *ofproto,
3741 const struct nlattr *key, size_t key_len, uint32_t key_hash,
3742 const struct flow *flow)
3743{
3744 struct subfacet *subfacet;
3745
3746 HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
3747 &ofproto->subfacets) {
3748 if (subfacet->key
3749 ? (subfacet->key_len == key_len
3750 && !memcmp(key, subfacet->key, key_len))
3751 : flow_equal(flow, &subfacet->facet->flow)) {
3752 return subfacet;
3753 }
3754 }
3755
3756 return NULL;
3757}
3758
3759/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
3760 * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
b95fc6ba
BP
3761 * there is one, otherwise creates and returns a new subfacet.
3762 *
3763 * If the returned subfacet is new, then subfacet->actions will be NULL, in
3764 * which case the caller must populate the actions with
3765 * subfacet_make_actions(). */
b0f7b9b5 3766static struct subfacet *
15baa734 3767subfacet_create(struct facet *facet, enum odp_key_fitness key_fitness,
e84173dc 3768 const struct nlattr *key, size_t key_len, ovs_be16 initial_tci)
b0f7b9b5 3769{
15baa734 3770 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
3771 uint32_t key_hash = odp_flow_key_hash(key, key_len);
3772 struct subfacet *subfacet;
3773
3774 subfacet = subfacet_find__(ofproto, key, key_len, key_hash, &facet->flow);
3775 if (subfacet) {
3776 if (subfacet->facet == facet) {
3777 return subfacet;
3778 }
3779
3780 /* This shouldn't happen. */
3781 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
15baa734 3782 subfacet_destroy(subfacet);
b0f7b9b5
BP
3783 }
3784
3785 subfacet = xzalloc(sizeof *subfacet);
3786 hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
3787 list_push_back(&facet->subfacets, &subfacet->list_node);
3788 subfacet->facet = facet;
3789 subfacet->used = time_msec();
3790 subfacet->key_fitness = key_fitness;
3791 if (key_fitness != ODP_FIT_PERFECT) {
3792 subfacet->key = xmemdup(key, key_len);
3793 subfacet->key_len = key_len;
3794 }
3795 subfacet->installed = false;
e84173dc 3796 subfacet->initial_tci = initial_tci;
b0f7b9b5
BP
3797
3798 return subfacet;
3799}
3800
3801/* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
3802 * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
3803static struct subfacet *
3804subfacet_find(struct ofproto_dpif *ofproto,
6a542738 3805 const struct nlattr *key, size_t key_len)
b0f7b9b5
BP
3806{
3807 uint32_t key_hash = odp_flow_key_hash(key, key_len);
6a542738
PS
3808 enum odp_key_fitness fitness;
3809 struct flow flow;
3810
3811 fitness = odp_flow_key_to_flow(key, key_len, &flow);
3812 if (fitness == ODP_FIT_ERROR) {
3813 return NULL;
3814 }
b0f7b9b5 3815
6a542738 3816 return subfacet_find__(ofproto, key, key_len, key_hash, &flow);
b0f7b9b5
BP
3817}
3818
3819/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
3820 * its facet within 'ofproto', and frees it. */
3821static void
15baa734 3822subfacet_destroy__(struct subfacet *subfacet)
b0f7b9b5 3823{
15baa734
BP
3824 struct facet *facet = subfacet->facet;
3825 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3826
3827 subfacet_uninstall(subfacet);
b0f7b9b5
BP
3828 hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
3829 list_remove(&subfacet->list_node);
3830 free(subfacet->key);
b95fc6ba 3831 free(subfacet->actions);
b0f7b9b5
BP
3832 free(subfacet);
3833}
3834
3835/* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
3836 * last remaining subfacet in its facet destroys the facet too. */
3837static void
15baa734 3838subfacet_destroy(struct subfacet *subfacet)
b0f7b9b5
BP
3839{
3840 struct facet *facet = subfacet->facet;
3841
551a2f6c
BP
3842 if (list_is_singleton(&facet->subfacets)) {
3843 /* facet_remove() needs at least one subfacet (it will remove it). */
15baa734 3844 facet_remove(facet);
551a2f6c 3845 } else {
15baa734 3846 subfacet_destroy__(subfacet);
b0f7b9b5
BP
3847 }
3848}
3849
3850/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
3851 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
3852 * for use as temporary storage. */
3853static void
3854subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
3855 struct ofpbuf *key)
3856{
3857 if (!subfacet->key) {
3858 ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
3859 odp_flow_key_from_flow(key, &subfacet->facet->flow);
3860 } else {
3861 ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
3862 }
3863}
3864
b95fc6ba
BP
3865/* Composes the datapath actions for 'subfacet' based on its rule's actions. */
3866static void
15baa734 3867subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet)
b95fc6ba
BP
3868{
3869 struct facet *facet = subfacet->facet;
18b2a258 3870 struct rule_dpif *rule = facet->rule;
15baa734 3871 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
b95fc6ba
BP
3872 struct ofpbuf *odp_actions;
3873 struct action_xlate_ctx ctx;
3874
15baa734 3875 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
0e553d9c 3876 rule, 0, packet);
b95fc6ba
BP
3877 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
3878 facet->tags = ctx.tags;
3879 facet->may_install = ctx.may_set_up_flow;
3880 facet->has_learn = ctx.has_learn;
3881 facet->has_normal = ctx.has_normal;
0e553d9c 3882 facet->has_fin_timeout = ctx.has_fin_timeout;
b95fc6ba 3883 facet->nf_flow.output_iface = ctx.nf_output_iface;
9d24de3b 3884 facet->mirrors = ctx.mirrors;
b95fc6ba
BP
3885
3886 if (subfacet->actions_len != odp_actions->size
3887 || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
3888 free(subfacet->actions);
3889 subfacet->actions_len = odp_actions->size;
3890 subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
3891 }
3892
3893 ofpbuf_delete(odp_actions);
3894}
3895
b0f7b9b5
BP
3896/* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
3897 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
3898 * in the datapath will be zeroed and 'stats' will be updated with traffic new
3899 * since 'subfacet' was last updated.
3900 *
3901 * Returns 0 if successful, otherwise a positive errno value. */
3902static int
15baa734 3903subfacet_install(struct subfacet *subfacet,
b0f7b9b5
BP
3904 const struct nlattr *actions, size_t actions_len,
3905 struct dpif_flow_stats *stats)
3906{
15baa734
BP
3907 struct facet *facet = subfacet->facet;
3908 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
3909 struct odputil_keybuf keybuf;
3910 enum dpif_flow_put_flags flags;
3911 struct ofpbuf key;
3912 int ret;
3913
3914 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
3915 if (stats) {
3916 flags |= DPIF_FP_ZERO_STATS;
3917 }
3918
3919 subfacet_get_key(subfacet, &keybuf, &key);
3920 ret = dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
3921 actions, actions_len, stats);
3922
3923 if (stats) {
3924 subfacet_reset_dp_stats(subfacet, stats);
3925 }
3926
3927 return ret;
3928}
3929
3930/* If 'subfacet' is installed in the datapath, uninstalls it. */
3931static void
15baa734 3932subfacet_uninstall(struct subfacet *subfacet)
b0f7b9b5
BP
3933{
3934 if (subfacet->installed) {
15baa734
BP
3935 struct rule_dpif *rule = subfacet->facet->rule;
3936 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
b0f7b9b5
BP
3937 struct odputil_keybuf keybuf;
3938 struct dpif_flow_stats stats;
3939 struct ofpbuf key;
3940 int error;
3941
3942 subfacet_get_key(subfacet, &keybuf, &key);
15baa734 3943 error = dpif_flow_del(ofproto->dpif, key.data, key.size, &stats);
b0f7b9b5
BP
3944 subfacet_reset_dp_stats(subfacet, &stats);
3945 if (!error) {
15baa734 3946 subfacet_update_stats(subfacet, &stats);
b0f7b9b5
BP
3947 }
3948 subfacet->installed = false;
3949 } else {
3950 assert(subfacet->dp_packet_count == 0);
3951 assert(subfacet->dp_byte_count == 0);
3952 }
3953}
3954
3955/* Resets 'subfacet''s datapath statistics counters. This should be called
3956 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
3957 * non-null, it should contain the statistics returned by dpif when 'subfacet'
3958 * was reset in the datapath. 'stats' will be modified to include only
3959 * statistics new since 'subfacet' was last updated. */
3960static void
3961subfacet_reset_dp_stats(struct subfacet *subfacet,
3962 struct dpif_flow_stats *stats)
3963{
3964 if (stats
3965 && subfacet->dp_packet_count <= stats->n_packets
3966 && subfacet->dp_byte_count <= stats->n_bytes) {
3967 stats->n_packets -= subfacet->dp_packet_count;
3968 stats->n_bytes -= subfacet->dp_byte_count;
3969 }
3970
3971 subfacet->dp_packet_count = 0;
3972 subfacet->dp_byte_count = 0;
3973}
3974
3975/* Updates 'subfacet''s used time. The caller is responsible for calling
3976 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
3977static void
15baa734 3978subfacet_update_time(struct subfacet *subfacet, long long int used)
b0f7b9b5
BP
3979{
3980 if (used > subfacet->used) {
3981 subfacet->used = used;
15baa734 3982 facet_update_time(subfacet->facet, used);
b0f7b9b5
BP
3983 }
3984}
3985
3986/* Folds the statistics from 'stats' into the counters in 'subfacet'.
3987 *
3988 * Because of the meaning of a subfacet's counters, it only makes sense to do
3989 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
3990 * represents a packet that was sent by hand or if it represents statistics
3991 * that have been cleared out of the datapath. */
3992static void
15baa734 3993subfacet_update_stats(struct subfacet *subfacet,
b0f7b9b5
BP
3994 const struct dpif_flow_stats *stats)
3995{
3996 if (stats->n_packets || stats->used > subfacet->used) {
3997 struct facet *facet = subfacet->facet;
3998
15baa734 3999 subfacet_update_time(subfacet, stats->used);
b0f7b9b5
BP
4000 facet->packet_count += stats->n_packets;
4001 facet->byte_count += stats->n_bytes;
0e553d9c 4002 facet->tcp_flags |= stats->tcp_flags;
b0f7b9b5
BP
4003 facet_push_stats(facet);
4004 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
4005 }
4006}
4007\f
abe529af
BP
4008/* Rules. */
4009
4010static struct rule_dpif *
29901626
BP
4011rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
4012 uint8_t table_id)
abe529af 4013{
7257b535
BP
4014 struct cls_rule *cls_rule;
4015 struct classifier *cls;
4016
9cdaaebe
BP
4017 if (table_id >= N_TABLES) {
4018 return NULL;
4019 }
4020
d0918789 4021 cls = &ofproto->up.tables[table_id].cls;
eadef313 4022 if (flow->nw_frag & FLOW_NW_FRAG_ANY
7257b535
BP
4023 && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
4024 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
4025 * are unavailable. */
4026 struct flow ofpc_normal_flow = *flow;
4027 ofpc_normal_flow.tp_src = htons(0);
4028 ofpc_normal_flow.tp_dst = htons(0);
4029 cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
4030 } else {
4031 cls_rule = classifier_lookup(cls, flow);
4032 }
4033 return rule_dpif_cast(rule_from_cls_rule(cls_rule));
abe529af
BP
4034}
4035
7ee20df1
BP
4036static void
4037complete_operation(struct rule_dpif *rule)
4038{
4039 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4040
54a9cbc9 4041 rule_invalidate(rule);
7ee20df1
BP
4042 if (clogged) {
4043 struct dpif_completion *c = xmalloc(sizeof *c);
4044 c->op = rule->up.pending;
4045 list_push_back(&ofproto->completions, &c->list_node);
4046 } else {
4047 ofoperation_complete(rule->up.pending, 0);
4048 }
4049}
4050
abe529af
BP
4051static struct rule *
4052rule_alloc(void)
4053{
4054 struct rule_dpif *rule = xmalloc(sizeof *rule);
4055 return &rule->up;
4056}
4057
4058static void
4059rule_dealloc(struct rule *rule_)
4060{
4061 struct rule_dpif *rule = rule_dpif_cast(rule_);
4062 free(rule);
4063}
4064
90bf1e07 4065static enum ofperr
abe529af
BP
4066rule_construct(struct rule *rule_)
4067{
4068 struct rule_dpif *rule = rule_dpif_cast(rule_);
4069 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7ee20df1 4070 struct rule_dpif *victim;
54a9cbc9 4071 uint8_t table_id;
90bf1e07 4072 enum ofperr error;
5bf0e941
BP
4073
4074 error = validate_actions(rule->up.actions, rule->up.n_actions,
4075 &rule->up.cr.flow, ofproto->max_ports);
4076 if (error) {
4077 return error;
4078 }
abe529af 4079
abe529af
BP
4080 rule->packet_count = 0;
4081 rule->byte_count = 0;
abe529af 4082
7ee20df1
BP
4083 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
4084 if (victim && !list_is_empty(&victim->facets)) {
4085 struct facet *facet;
4086
4087 rule->facets = victim->facets;
4088 list_moved(&rule->facets);
4089 LIST_FOR_EACH (facet, list_node, &rule->facets) {
bbb5d219
EJ
4090 /* XXX: We're only clearing our local counters here. It's possible
4091 * that quite a few packets are unaccounted for in the datapath
4092 * statistics. These will be accounted to the new rule instead of
4093 * cleared as required. This could be fixed by clearing out the
4094 * datapath statistics for this facet, but currently it doesn't
4095 * seem worth it. */
4096 facet_reset_counters(facet);
7ee20df1
BP
4097 facet->rule = rule;
4098 }
4099 } else {
4100 /* Must avoid list_moved() in this case. */
4101 list_init(&rule->facets);
4102 }
abe529af 4103
54a9cbc9
BP
4104 table_id = rule->up.table_id;
4105 rule->tag = (victim ? victim->tag
4106 : table_id == 0 ? 0
4107 : rule_calculate_tag(&rule->up.cr.flow, &rule->up.cr.wc,
4108 ofproto->tables[table_id].basis));
4109
7ee20df1 4110 complete_operation(rule);
abe529af
BP
4111 return 0;
4112}
4113
4114static void
4115rule_destruct(struct rule *rule_)
4116{
4117 struct rule_dpif *rule = rule_dpif_cast(rule_);
abe529af
BP
4118 struct facet *facet, *next_facet;
4119
abe529af 4120 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 4121 facet_revalidate(facet);
abe529af 4122 }
7ee20df1
BP
4123
4124 complete_operation(rule);
abe529af
BP
4125}
4126
4127static void
4128rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
4129{
4130 struct rule_dpif *rule = rule_dpif_cast(rule_);
4131 struct facet *facet;
4132
4133 /* Start from historical data for 'rule' itself that are no longer tracked
4134 * in facets. This counts, for example, facets that have expired. */
4135 *packets = rule->packet_count;
4136 *bytes = rule->byte_count;
4137
4138 /* Add any statistics that are tracked by facets. This includes
4139 * statistical data recently updated by ofproto_update_stats() as well as
4140 * stats for packets that were executed "by hand" via dpif_execute(). */
4141 LIST_FOR_EACH (facet, list_node, &rule->facets) {
4142 *packets += facet->packet_count;
4143 *bytes += facet->byte_count;
4144 }
4145}
4146
90bf1e07 4147static enum ofperr
59d0f2c8
BP
4148rule_execute(struct rule *rule_, const struct flow *flow,
4149 struct ofpbuf *packet)
abe529af
BP
4150{
4151 struct rule_dpif *rule = rule_dpif_cast(rule_);
4152 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4153 struct action_xlate_ctx ctx;
4154 struct ofpbuf *odp_actions;
abe529af
BP
4155 size_t size;
4156
54834960 4157 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
0e553d9c 4158 rule, packet_get_tcp_flags(packet, flow), packet);
abe529af
BP
4159 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
4160 size = packet->size;
4161 if (execute_odp_actions(ofproto, flow, odp_actions->data,
4162 odp_actions->size, packet)) {
abe529af
BP
4163 rule->packet_count++;
4164 rule->byte_count += size;
f3b50afb 4165 flow_push_stats(rule, flow, 1, size, time_msec());
abe529af
BP
4166 }
4167 ofpbuf_delete(odp_actions);
5bf0e941
BP
4168
4169 return 0;
abe529af
BP
4170}
4171
7ee20df1
BP
4172static void
4173rule_modify_actions(struct rule *rule_)
abe529af
BP
4174{
4175 struct rule_dpif *rule = rule_dpif_cast(rule_);
4176 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
90bf1e07 4177 enum ofperr error;
abe529af 4178
7ee20df1
BP
4179 error = validate_actions(rule->up.actions, rule->up.n_actions,
4180 &rule->up.cr.flow, ofproto->max_ports);
4181 if (error) {
4182 ofoperation_complete(rule->up.pending, error);
4183 return;
abe529af 4184 }
7ee20df1
BP
4185
4186 complete_operation(rule);
abe529af
BP
4187}
4188\f
97d6520b 4189/* Sends 'packet' out 'ofport'.
52a90c29 4190 * May modify 'packet'.
abe529af
BP
4191 * Returns 0 if successful, otherwise a positive errno value. */
4192static int
52a90c29 4193send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
abe529af 4194{
97d6520b 4195 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
80e5eed9
BP
4196 struct ofpbuf key, odp_actions;
4197 struct odputil_keybuf keybuf;
52a90c29 4198 uint16_t odp_port;
80e5eed9 4199 struct flow flow;
abe529af
BP
4200 int error;
4201
abff858b 4202 flow_extract((struct ofpbuf *) packet, 0, 0, 0, &flow);
52a90c29
BP
4203 odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
4204 flow.vlan_tci);
4205 if (odp_port != ofport->odp_port) {
4206 eth_pop_vlan(packet);
4207 flow.vlan_tci = htons(0);
4208 }
4209
80e5eed9
BP
4210 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
4211 odp_flow_key_from_flow(&key, &flow);
4212
abe529af 4213 ofpbuf_init(&odp_actions, 32);
6ff686f2
PS
4214 compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
4215
df2c07f4 4216 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
80e5eed9
BP
4217 error = dpif_execute(ofproto->dpif,
4218 key.data, key.size,
4219 odp_actions.data, odp_actions.size,
abe529af
BP
4220 packet);
4221 ofpbuf_uninit(&odp_actions);
4222
4223 if (error) {
4224 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
4225 ofproto->up.name, odp_port, strerror(error));
4226 }
6527c598 4227 ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
abe529af
BP
4228 return error;
4229}
4230\f
df2c07f4 4231/* OpenFlow to datapath action translation. */
abe529af
BP
4232
4233static void do_xlate_actions(const union ofp_action *in, size_t n_in,
4234 struct action_xlate_ctx *ctx);
4cd78906 4235static void xlate_normal(struct action_xlate_ctx *);
abe529af 4236
98403001
BP
4237static size_t
4238put_userspace_action(const struct ofproto_dpif *ofproto,
4239 struct ofpbuf *odp_actions,
4240 const struct flow *flow,
4241 const struct user_action_cookie *cookie)
4242{
98403001
BP
4243 uint32_t pid;
4244
4245 pid = dpif_port_get_pid(ofproto->dpif,
4246 ofp_port_to_odp_port(flow->in_port));
4247
39db78a0 4248 return odp_put_userspace_action(pid, cookie, odp_actions);
98403001
BP
4249}
4250
6ff686f2
PS
4251/* Compose SAMPLE action for sFlow. */
4252static size_t
4253compose_sflow_action(const struct ofproto_dpif *ofproto,
4254 struct ofpbuf *odp_actions,
4255 const struct flow *flow,
4256 uint32_t odp_port)
4257{
4258 uint32_t port_ifindex;
4259 uint32_t probability;
98403001 4260 struct user_action_cookie cookie;
6ff686f2 4261 size_t sample_offset, actions_offset;
98403001 4262 int cookie_offset, n_output;
6ff686f2
PS
4263
4264 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
4265 return 0;
4266 }
4267
4268 if (odp_port == OVSP_NONE) {
4269 port_ifindex = 0;
4270 n_output = 0;
4271 } else {
4272 port_ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
4273 n_output = 1;
4274 }
4275
4276 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
4277
4278 /* Number of packets out of UINT_MAX to sample. */
4279 probability = dpif_sflow_get_probability(ofproto->sflow);
4280 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
4281
4282 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
4283
98403001
BP
4284 cookie.type = USER_ACTION_COOKIE_SFLOW;
4285 cookie.data = port_ifindex;
4286 cookie.n_output = n_output;
4287 cookie.vlan_tci = 0;
4288 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
6ff686f2
PS
4289
4290 nl_msg_end_nested(odp_actions, actions_offset);
4291 nl_msg_end_nested(odp_actions, sample_offset);
98403001 4292 return cookie_offset;
6ff686f2
PS
4293}
4294
4295/* SAMPLE action must be first action in any given list of actions.
4296 * At this point we do not have all information required to build it. So try to
4297 * build sample action as complete as possible. */
4298static void
4299add_sflow_action(struct action_xlate_ctx *ctx)
4300{
4301 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
4302 ctx->odp_actions,
4303 &ctx->flow, OVSP_NONE);
4304 ctx->sflow_odp_port = 0;
4305 ctx->sflow_n_outputs = 0;
4306}
4307
4308/* Fix SAMPLE action according to data collected while composing ODP actions.
4309 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
4310 * USERSPACE action's user-cookie which is required for sflow. */
4311static void
4312fix_sflow_action(struct action_xlate_ctx *ctx)
4313{
4314 const struct flow *base = &ctx->base_flow;
4315 struct user_action_cookie *cookie;
4316
4317 if (!ctx->user_cookie_offset) {
4318 return;
4319 }
4320
4321 cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
4322 sizeof(*cookie));
4323 assert(cookie != NULL);
4324 assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
4325
4326 if (ctx->sflow_n_outputs) {
4327 cookie->data = dpif_sflow_odp_port_to_ifindex(ctx->ofproto->sflow,
4328 ctx->sflow_odp_port);
4329 }
4330 if (ctx->sflow_n_outputs >= 255) {
4331 cookie->n_output = 255;
4332 } else {
4333 cookie->n_output = ctx->sflow_n_outputs;
4334 }
4335 cookie->vlan_tci = base->vlan_tci;
4336}
4337
6ff686f2 4338static void
81b1afb1
EJ
4339compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
4340 bool check_stp)
6ff686f2 4341{
d59906fb 4342 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
5e48dc2b 4343 uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
52a90c29 4344 ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
8b36f51e 4345 uint8_t flow_nw_tos = ctx->flow.nw_tos;
52a90c29 4346 uint16_t out_port;
d59906fb 4347
81b1afb1 4348 if (ofport) {
8b36f51e
EJ
4349 struct priority_to_dscp *pdscp;
4350
9e1fd49b 4351 if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD
81b1afb1
EJ
4352 || (check_stp && !stp_forward_in_state(ofport->stp_state))) {
4353 return;
4354 }
8b36f51e 4355
deedf7e7 4356 pdscp = get_priority(ofport, ctx->flow.skb_priority);
8b36f51e
EJ
4357 if (pdscp) {
4358 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
4359 ctx->flow.nw_tos |= pdscp->dscp;
4360 }
81b1afb1
EJ
4361 } else {
4362 /* We may not have an ofport record for this port, but it doesn't hurt
4363 * to allow forwarding to it anyhow. Maybe such a port will appear
4364 * later and we're pre-populating the flow table. */
d59906fb
EJ
4365 }
4366
52a90c29
BP
4367 out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
4368 ctx->flow.vlan_tci);
4369 if (out_port != odp_port) {
4370 ctx->flow.vlan_tci = htons(0);
4371 }
5bbda0aa 4372 commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
52a90c29
BP
4373 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
4374
6ff686f2
PS
4375 ctx->sflow_odp_port = odp_port;
4376 ctx->sflow_n_outputs++;
81b1afb1 4377 ctx->nf_output_iface = ofp_port;
52a90c29 4378 ctx->flow.vlan_tci = flow_vlan_tci;
8b36f51e 4379 ctx->flow.nw_tos = flow_nw_tos;
6ff686f2
PS
4380}
4381
abe529af 4382static void
5e48dc2b 4383compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
abe529af 4384{
81b1afb1 4385 compose_output_action__(ctx, ofp_port, true);
abe529af
BP
4386}
4387
4388static void
29901626
BP
4389xlate_table_action(struct action_xlate_ctx *ctx,
4390 uint16_t in_port, uint8_t table_id)
abe529af
BP
4391{
4392 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
54a9cbc9 4393 struct ofproto_dpif *ofproto = ctx->ofproto;
abe529af
BP
4394 struct rule_dpif *rule;
4395 uint16_t old_in_port;
29901626
BP
4396 uint8_t old_table_id;
4397
4398 old_table_id = ctx->table_id;
4399 ctx->table_id = table_id;
abe529af 4400
54a9cbc9 4401 /* Look up a flow with 'in_port' as the input port. */
abe529af
BP
4402 old_in_port = ctx->flow.in_port;
4403 ctx->flow.in_port = in_port;
54a9cbc9
BP
4404 rule = rule_dpif_lookup(ofproto, &ctx->flow, table_id);
4405
4406 /* Tag the flow. */
4407 if (table_id > 0 && table_id < N_TABLES) {
4408 struct table_dpif *table = &ofproto->tables[table_id];
4409 if (table->other_table) {
4410 ctx->tags |= (rule
4411 ? rule->tag
4412 : rule_calculate_tag(&ctx->flow,
4413 &table->other_table->wc,
4414 table->basis));
4415 }
4416 }
4417
4418 /* Restore the original input port. Otherwise OFPP_NORMAL and
4419 * OFPP_IN_PORT will have surprising behavior. */
abe529af
BP
4420 ctx->flow.in_port = old_in_port;
4421
4422 if (ctx->resubmit_hook) {
4423 ctx->resubmit_hook(ctx, rule);
4424 }
4425
4426 if (rule) {
18b2a258 4427 struct rule_dpif *old_rule = ctx->rule;
54834960 4428
abe529af 4429 ctx->recurse++;
18b2a258 4430 ctx->rule = rule;
abe529af 4431 do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
18b2a258 4432 ctx->rule = old_rule;
abe529af
BP
4433 ctx->recurse--;
4434 }
29901626
BP
4435
4436 ctx->table_id = old_table_id;
abe529af
BP
4437 } else {
4438 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
4439
29901626 4440 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
abe529af 4441 MAX_RESUBMIT_RECURSION);
6a6455e5 4442 ctx->max_resubmit_trigger = true;
abe529af
BP
4443 }
4444}
4445
29901626
BP
4446static void
4447xlate_resubmit_table(struct action_xlate_ctx *ctx,
4448 const struct nx_action_resubmit *nar)
4449{
4450 uint16_t in_port;
4451 uint8_t table_id;
4452
4453 in_port = (nar->in_port == htons(OFPP_IN_PORT)
4454 ? ctx->flow.in_port
4455 : ntohs(nar->in_port));
4456 table_id = nar->table == 255 ? ctx->table_id : nar->table;
4457
4458 xlate_table_action(ctx, in_port, table_id);
4459}
4460
abe529af 4461static void
d59906fb 4462flood_packets(struct action_xlate_ctx *ctx, bool all)
abe529af
BP
4463{
4464 struct ofport_dpif *ofport;
4465
b3e9b2ed 4466 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
abe529af 4467 uint16_t ofp_port = ofport->up.ofp_port;
d59906fb
EJ
4468
4469 if (ofp_port == ctx->flow.in_port) {
4470 continue;
4471 }
4472
5e48dc2b 4473 if (all) {
81b1afb1 4474 compose_output_action__(ctx, ofp_port, false);
9e1fd49b 4475 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
5e48dc2b 4476 compose_output_action(ctx, ofp_port);
abe529af
BP
4477 }
4478 }
b3e9b2ed
EJ
4479
4480 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af
BP
4481}
4482
6ff686f2 4483static void
f0fd1a17 4484execute_controller_action(struct action_xlate_ctx *ctx, int len,
a7349929
BP
4485 enum ofp_packet_in_reason reason,
4486 uint16_t controller_id)
6ff686f2 4487{
999fba59
EJ
4488 struct ofputil_packet_in pin;
4489 struct ofpbuf *packet;
6ff686f2 4490
999fba59
EJ
4491 ctx->may_set_up_flow = false;
4492 if (!ctx->packet) {
4493 return;
4494 }
4495
4496 packet = ofpbuf_clone(ctx->packet);
4497
4498 if (packet->l2 && packet->l3) {
4499 struct eth_header *eh;
4500
4501 eth_pop_vlan(packet);
4502 eh = packet->l2;
4503 assert(eh->eth_type == ctx->flow.dl_type);
4504 memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
4505 memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
4506
4507 if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
4508 eth_push_vlan(packet, ctx->flow.vlan_tci);
4509 }
4510
4511 if (packet->l4) {
4512 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
4513 packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
4514 ctx->flow.nw_tos, ctx->flow.nw_ttl);
4515 }
4516
4517 if (packet->l7) {
4518 if (ctx->flow.nw_proto == IPPROTO_TCP) {
4519 packet_set_tcp_port(packet, ctx->flow.tp_src,
4520 ctx->flow.tp_dst);
4521 } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
4522 packet_set_udp_port(packet, ctx->flow.tp_src,
4523 ctx->flow.tp_dst);
4524 }
4525 }
4526 }
4527 }
4528
4529 pin.packet = packet->data;
4530 pin.packet_len = packet->size;
f0fd1a17 4531 pin.reason = reason;
a7349929 4532 pin.controller_id = controller_id;
54834960 4533 pin.table_id = ctx->table_id;
18b2a258 4534 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
54834960 4535
999fba59 4536 pin.send_len = len;
999fba59
EJ
4537 flow_get_metadata(&ctx->flow, &pin.fmd);
4538
d8653c38 4539 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
999fba59 4540 ofpbuf_delete(packet);
6ff686f2
PS
4541}
4542
f0fd1a17
PS
4543static bool
4544compose_dec_ttl(struct action_xlate_ctx *ctx)
4545{
4546 if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
4547 ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
4548 return false;
4549 }
4550
4551 if (ctx->flow.nw_ttl > 1) {
4552 ctx->flow.nw_ttl--;
4553 return false;
4554 } else {
a7349929 4555 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
f0fd1a17
PS
4556
4557 /* Stop processing for current table. */
4558 return true;
4559 }
4560}
4561
abe529af
BP
4562static void
4563xlate_output_action__(struct action_xlate_ctx *ctx,
4564 uint16_t port, uint16_t max_len)
4565{
4566 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
4567
4568 ctx->nf_output_iface = NF_OUT_DROP;
4569
4570 switch (port) {
4571 case OFPP_IN_PORT:
81b1afb1 4572 compose_output_action(ctx, ctx->flow.in_port);
abe529af
BP
4573 break;
4574 case OFPP_TABLE:
29901626 4575 xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id);
abe529af
BP
4576 break;
4577 case OFPP_NORMAL:
4578 xlate_normal(ctx);
4579 break;
4580 case OFPP_FLOOD:
d59906fb 4581 flood_packets(ctx, false);
abe529af
BP
4582 break;
4583 case OFPP_ALL:
d59906fb 4584 flood_packets(ctx, true);
abe529af
BP
4585 break;
4586 case OFPP_CONTROLLER:
a7349929 4587 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
abe529af 4588 break;
e81d2933
EJ
4589 case OFPP_NONE:
4590 break;
a0fbe94a 4591 case OFPP_LOCAL:
abe529af
BP
4592 default:
4593 if (port != ctx->flow.in_port) {
81b1afb1 4594 compose_output_action(ctx, port);
abe529af
BP
4595 }
4596 break;
4597 }
4598
4599 if (prev_nf_output_iface == NF_OUT_FLOOD) {
4600 ctx->nf_output_iface = NF_OUT_FLOOD;
4601 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4602 ctx->nf_output_iface = prev_nf_output_iface;
4603 } else if (prev_nf_output_iface != NF_OUT_DROP &&
4604 ctx->nf_output_iface != NF_OUT_FLOOD) {
4605 ctx->nf_output_iface = NF_OUT_MULTI;
4606 }
4607}
4608
f694937d
EJ
4609static void
4610xlate_output_reg_action(struct action_xlate_ctx *ctx,
4611 const struct nx_action_output_reg *naor)
4612{
816fd533 4613 struct mf_subfield src;
f694937d
EJ
4614 uint64_t ofp_port;
4615
816fd533
BP
4616 nxm_decode(&src, naor->src, naor->ofs_nbits);
4617 ofp_port = mf_get_subfield(&src, &ctx->flow);
f694937d
EJ
4618
4619 if (ofp_port <= UINT16_MAX) {
4620 xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len));
4621 }
4622}
4623
abe529af
BP
4624static void
4625xlate_output_action(struct action_xlate_ctx *ctx,
4626 const struct ofp_action_output *oao)
4627{
4628 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
4629}
4630
abe529af
BP
4631static void
4632xlate_enqueue_action(struct action_xlate_ctx *ctx,
4633 const struct ofp_action_enqueue *oae)
4634{
e479e41e 4635 uint16_t ofp_port;
abff858b 4636 uint32_t flow_priority, priority;
abe529af
BP
4637 int error;
4638
4639 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
4640 &priority);
4641 if (error) {
4642 /* Fall back to ordinary output action. */
4643 xlate_output_action__(ctx, ntohs(oae->port), 0);
4644 return;
4645 }
4646
df2c07f4 4647 /* Figure out datapath output port. */
abe529af
BP
4648 ofp_port = ntohs(oae->port);
4649 if (ofp_port == OFPP_IN_PORT) {
4650 ofp_port = ctx->flow.in_port;
8ba855c1
BP
4651 } else if (ofp_port == ctx->flow.in_port) {
4652 return;
abe529af 4653 }
abe529af 4654
df2c07f4 4655 /* Add datapath actions. */
deedf7e7
BP
4656 flow_priority = ctx->flow.skb_priority;
4657 ctx->flow.skb_priority = priority;
81b1afb1 4658 compose_output_action(ctx, ofp_port);
deedf7e7 4659 ctx->flow.skb_priority = flow_priority;
abe529af
BP
4660
4661 /* Update NetFlow output port. */
4662 if (ctx->nf_output_iface == NF_OUT_DROP) {
4b23aebf 4663 ctx->nf_output_iface = ofp_port;
abe529af
BP
4664 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4665 ctx->nf_output_iface = NF_OUT_MULTI;
4666 }
4667}
4668
4669static void
4670xlate_set_queue_action(struct action_xlate_ctx *ctx,
4671 const struct nx_action_set_queue *nasq)
4672{
4673 uint32_t priority;
4674 int error;
4675
4676 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
4677 &priority);
4678 if (error) {
4679 /* Couldn't translate queue to a priority, so ignore. A warning
4680 * has already been logged. */
4681 return;
4682 }
4683
deedf7e7 4684 ctx->flow.skb_priority = priority;
abe529af
BP
4685}
4686
4687struct xlate_reg_state {
4688 ovs_be16 vlan_tci;
4689 ovs_be64 tun_id;
4690};
4691
abe529af
BP
4692static void
4693xlate_autopath(struct action_xlate_ctx *ctx,
4694 const struct nx_action_autopath *naa)
4695{
4696 uint16_t ofp_port = ntohl(naa->id);
4697 struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
4698
4699 if (!port || !port->bundle) {
4700 ofp_port = OFPP_NONE;
4701 } else if (port->bundle->bond) {
4702 /* Autopath does not support VLAN hashing. */
4703 struct ofport_dpif *slave = bond_choose_output_slave(
dc155bff 4704 port->bundle->bond, &ctx->flow, 0, &ctx->tags);
abe529af
BP
4705 if (slave) {
4706 ofp_port = slave->up.ofp_port;
4707 }
4708 }
4709 autopath_execute(naa, &ctx->flow, ofp_port);
4710}
4711
daff3353
EJ
4712static bool
4713slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
4714{
4715 struct ofproto_dpif *ofproto = ofproto_;
4716 struct ofport_dpif *port;
4717
4718 switch (ofp_port) {
4719 case OFPP_IN_PORT:
4720 case OFPP_TABLE:
4721 case OFPP_NORMAL:
4722 case OFPP_FLOOD:
4723 case OFPP_ALL:
439e4d8c 4724 case OFPP_NONE:
daff3353
EJ
4725 return true;
4726 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
4727 return false;
4728 default:
4729 port = get_ofp_port(ofproto, ofp_port);
4730 return port ? port->may_enable : false;
4731 }
4732}
4733
75a75043
BP
4734static void
4735xlate_learn_action(struct action_xlate_ctx *ctx,
4736 const struct nx_action_learn *learn)
4737{
4738 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
4739 struct ofputil_flow_mod fm;
4740 int error;
4741
4742 learn_execute(learn, &ctx->flow, &fm);
4743
4744 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
4745 if (error && !VLOG_DROP_WARN(&rl)) {
90bf1e07
BP
4746 VLOG_WARN("learning action failed to modify flow table (%s)",
4747 ofperr_get_name(error));
75a75043
BP
4748 }
4749
4750 free(fm.actions);
4751}
4752
0e553d9c
BP
4753/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
4754 * means "infinite". */
4755static void
4756reduce_timeout(uint16_t max, uint16_t *timeout)
4757{
4758 if (max && (!*timeout || *timeout > max)) {
4759 *timeout = max;
4760 }
4761}
4762
4763static void
4764xlate_fin_timeout(struct action_xlate_ctx *ctx,
4765 const struct nx_action_fin_timeout *naft)
4766{
4767 if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
4768 struct rule_dpif *rule = ctx->rule;
4769
4770 reduce_timeout(ntohs(naft->fin_idle_timeout), &rule->up.idle_timeout);
4771 reduce_timeout(ntohs(naft->fin_hard_timeout), &rule->up.hard_timeout);
4772 }
4773}
4774
21f7563c
JP
4775static bool
4776may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
4777{
9e1fd49b
BP
4778 if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
4779 ? OFPUTIL_PC_NO_RECV_STP
4780 : OFPUTIL_PC_NO_RECV)) {
21f7563c
JP
4781 return false;
4782 }
4783
4784 /* Only drop packets here if both forwarding and learning are
4785 * disabled. If just learning is enabled, we need to have
4786 * OFPP_NORMAL and the learning action have a look at the packet
4787 * before we can drop it. */
4788 if (!stp_forward_in_state(port->stp_state)
4789 && !stp_learn_in_state(port->stp_state)) {
4790 return false;
4791 }
4792
4793 return true;
4794}
4795
abe529af
BP
4796static void
4797do_xlate_actions(const union ofp_action *in, size_t n_in,
4798 struct action_xlate_ctx *ctx)
4799{
4800 const struct ofport_dpif *port;
abe529af 4801 const union ofp_action *ia;
254750ce 4802 bool was_evictable = true;
b4b8c781 4803 size_t left;
abe529af
BP
4804
4805 port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
21f7563c 4806 if (port && !may_receive(port, ctx)) {
abe529af
BP
4807 /* Drop this flow. */
4808 return;
4809 }
4810
254750ce
BP
4811 if (ctx->rule) {
4812 /* Don't let the rule we're working on get evicted underneath us. */
4813 was_evictable = ctx->rule->up.evictable;
4814 ctx->rule->up.evictable = false;
4815 }
b4b8c781 4816 OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) {
abe529af 4817 const struct ofp_action_dl_addr *oada;
38f2e360
BP
4818 const struct nx_action_resubmit *nar;
4819 const struct nx_action_set_tunnel *nast;
4820 const struct nx_action_set_queue *nasq;
4821 const struct nx_action_multipath *nam;
4822 const struct nx_action_autopath *naa;
daff3353 4823 const struct nx_action_bundle *nab;
f694937d 4824 const struct nx_action_output_reg *naor;
a7349929 4825 const struct nx_action_controller *nac;
38f2e360
BP
4826 enum ofputil_action_code code;
4827 ovs_be64 tun_id;
4828
848e8809
EJ
4829 if (ctx->exit) {
4830 break;
4831 }
4832
38f2e360
BP
4833 code = ofputil_decode_action_unsafe(ia);
4834 switch (code) {
08f94c0e 4835 case OFPUTIL_OFPAT10_OUTPUT:
abe529af
BP
4836 xlate_output_action(ctx, &ia->output);
4837 break;
4838
08f94c0e 4839 case OFPUTIL_OFPAT10_SET_VLAN_VID:
abe529af
BP
4840 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
4841 ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
abe529af
BP
4842 break;
4843
08f94c0e 4844 case OFPUTIL_OFPAT10_SET_VLAN_PCP:
abe529af
BP
4845 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
4846 ctx->flow.vlan_tci |= htons(
4847 (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
abe529af
BP
4848 break;
4849
08f94c0e 4850 case OFPUTIL_OFPAT10_STRIP_VLAN:
abe529af 4851 ctx->flow.vlan_tci = htons(0);
abe529af
BP
4852 break;
4853
08f94c0e 4854 case OFPUTIL_OFPAT10_SET_DL_SRC:
abe529af 4855 oada = ((struct ofp_action_dl_addr *) ia);
abe529af
BP
4856 memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
4857 break;
4858
08f94c0e 4859 case OFPUTIL_OFPAT10_SET_DL_DST:
abe529af 4860 oada = ((struct ofp_action_dl_addr *) ia);
abe529af
BP
4861 memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
4862 break;
4863
08f94c0e 4864 case OFPUTIL_OFPAT10_SET_NW_SRC:
abe529af
BP
4865 ctx->flow.nw_src = ia->nw_addr.nw_addr;
4866 break;
4867
08f94c0e 4868 case OFPUTIL_OFPAT10_SET_NW_DST:
abe529af
BP
4869 ctx->flow.nw_dst = ia->nw_addr.nw_addr;
4870 break;
4871
08f94c0e 4872 case OFPUTIL_OFPAT10_SET_NW_TOS:
c4f2731d
PS
4873 /* OpenFlow 1.0 only supports IPv4. */
4874 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
4875 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
4876 ctx->flow.nw_tos |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
4877 }
abe529af
BP
4878 break;
4879
08f94c0e 4880 case OFPUTIL_OFPAT10_SET_TP_SRC:
abe529af
BP
4881 ctx->flow.tp_src = ia->tp_port.tp_port;
4882 break;
4883
08f94c0e 4884 case OFPUTIL_OFPAT10_SET_TP_DST:
abe529af
BP
4885 ctx->flow.tp_dst = ia->tp_port.tp_port;
4886 break;
4887
08f94c0e 4888 case OFPUTIL_OFPAT10_ENQUEUE:
38f2e360
BP
4889 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
4890 break;
4891
4892 case OFPUTIL_NXAST_RESUBMIT:
4893 nar = (const struct nx_action_resubmit *) ia;
29901626
BP
4894 xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id);
4895 break;
4896
4897 case OFPUTIL_NXAST_RESUBMIT_TABLE:
4898 xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia);
abe529af
BP
4899 break;
4900
38f2e360
BP
4901 case OFPUTIL_NXAST_SET_TUNNEL:
4902 nast = (const struct nx_action_set_tunnel *) ia;
4903 tun_id = htonll(ntohl(nast->tun_id));
4904 ctx->flow.tun_id = tun_id;
4905 break;
4906
4907 case OFPUTIL_NXAST_SET_QUEUE:
4908 nasq = (const struct nx_action_set_queue *) ia;
4909 xlate_set_queue_action(ctx, nasq);
4910 break;
4911
4912 case OFPUTIL_NXAST_POP_QUEUE:
deedf7e7 4913 ctx->flow.skb_priority = ctx->orig_skb_priority;
38f2e360
BP
4914 break;
4915
4916 case OFPUTIL_NXAST_REG_MOVE:
4917 nxm_execute_reg_move((const struct nx_action_reg_move *) ia,
4918 &ctx->flow);
4919 break;
4920
4921 case OFPUTIL_NXAST_REG_LOAD:
4922 nxm_execute_reg_load((const struct nx_action_reg_load *) ia,
4923 &ctx->flow);
4924 break;
4925
4926 case OFPUTIL_NXAST_NOTE:
4927 /* Nothing to do. */
4928 break;
4929
4930 case OFPUTIL_NXAST_SET_TUNNEL64:
4931 tun_id = ((const struct nx_action_set_tunnel64 *) ia)->tun_id;
4932 ctx->flow.tun_id = tun_id;
4933 break;
4934
4935 case OFPUTIL_NXAST_MULTIPATH:
4936 nam = (const struct nx_action_multipath *) ia;
4937 multipath_execute(nam, &ctx->flow);
abe529af
BP
4938 break;
4939
38f2e360
BP
4940 case OFPUTIL_NXAST_AUTOPATH:
4941 naa = (const struct nx_action_autopath *) ia;
4942 xlate_autopath(ctx, naa);
abe529af 4943 break;
daff3353
EJ
4944
4945 case OFPUTIL_NXAST_BUNDLE:
4946 ctx->ofproto->has_bundle_action = true;
4947 nab = (const struct nx_action_bundle *) ia;
4948 xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow,
4949 slave_enabled_cb,
4950 ctx->ofproto), 0);
4951 break;
a368bb53
EJ
4952
4953 case OFPUTIL_NXAST_BUNDLE_LOAD:
4954 ctx->ofproto->has_bundle_action = true;
4955 nab = (const struct nx_action_bundle *) ia;
4956 bundle_execute_load(nab, &ctx->flow, slave_enabled_cb,
4957 ctx->ofproto);
4958 break;
f694937d
EJ
4959
4960 case OFPUTIL_NXAST_OUTPUT_REG:
4961 naor = (const struct nx_action_output_reg *) ia;
4962 xlate_output_reg_action(ctx, naor);
4963 break;
75a75043
BP
4964
4965 case OFPUTIL_NXAST_LEARN:
4966 ctx->has_learn = true;
4967 if (ctx->may_learn) {
4968 xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
4969 }
4970 break;
848e8809 4971
f0fd1a17
PS
4972 case OFPUTIL_NXAST_DEC_TTL:
4973 if (compose_dec_ttl(ctx)) {
4974 goto out;
4975 }
4976 break;
4977
848e8809
EJ
4978 case OFPUTIL_NXAST_EXIT:
4979 ctx->exit = true;
4980 break;
0e553d9c
BP
4981
4982 case OFPUTIL_NXAST_FIN_TIMEOUT:
4983 ctx->has_fin_timeout = true;
4984 xlate_fin_timeout(ctx, (const struct nx_action_fin_timeout *) ia);
4985 break;
a7349929
BP
4986
4987 case OFPUTIL_NXAST_CONTROLLER:
4988 nac = (const struct nx_action_controller *) ia;
4989 execute_controller_action(ctx, ntohs(nac->max_len), nac->reason,
4990 ntohs(nac->controller_id));
4991 break;
abe529af
BP
4992 }
4993 }
21f7563c 4994
f0fd1a17 4995out:
21f7563c
JP
4996 /* We've let OFPP_NORMAL and the learning action look at the packet,
4997 * so drop it now if forwarding is disabled. */
4998 if (port && !stp_forward_in_state(port->stp_state)) {
4999 ofpbuf_clear(ctx->odp_actions);
5000 add_sflow_action(ctx);
5001 }
254750ce
BP
5002 if (ctx->rule) {
5003 ctx->rule->up.evictable = was_evictable;
5004 }
abe529af
BP
5005}
5006
5007static void
5008action_xlate_ctx_init(struct action_xlate_ctx *ctx,
5009 struct ofproto_dpif *ofproto, const struct flow *flow,
18b2a258 5010 ovs_be16 initial_tci, struct rule_dpif *rule,
0e553d9c 5011 uint8_t tcp_flags, const struct ofpbuf *packet)
abe529af
BP
5012{
5013 ctx->ofproto = ofproto;
5014 ctx->flow = *flow;
e84173dc
BP
5015 ctx->base_flow = ctx->flow;
5016 ctx->base_flow.tun_id = 0;
5017 ctx->base_flow.vlan_tci = initial_tci;
18b2a258 5018 ctx->rule = rule;
abe529af 5019 ctx->packet = packet;
75a75043 5020 ctx->may_learn = packet != NULL;
0e553d9c 5021 ctx->tcp_flags = tcp_flags;
abe529af 5022 ctx->resubmit_hook = NULL;
abe529af
BP
5023}
5024
5025static struct ofpbuf *
5026xlate_actions(struct action_xlate_ctx *ctx,
5027 const union ofp_action *in, size_t n_in)
5028{
c06bba01
JP
5029 struct flow orig_flow = ctx->flow;
5030
abe529af
BP
5031 COVERAGE_INC(ofproto_dpif_xlate);
5032
5033 ctx->odp_actions = ofpbuf_new(512);
b6848f13 5034 ofpbuf_reserve(ctx->odp_actions, NL_A_U32_SIZE);
97e42c92
BP
5035 ctx->tags = 0;
5036 ctx->may_set_up_flow = true;
5037 ctx->has_learn = false;
5038 ctx->has_normal = false;
0e553d9c 5039 ctx->has_fin_timeout = false;
97e42c92 5040 ctx->nf_output_iface = NF_OUT_DROP;
9d24de3b 5041 ctx->mirrors = 0;
97e42c92 5042 ctx->recurse = 0;
6a6455e5 5043 ctx->max_resubmit_trigger = false;
deedf7e7 5044 ctx->orig_skb_priority = ctx->flow.skb_priority;
97e42c92 5045 ctx->table_id = 0;
848e8809 5046 ctx->exit = false;
7257b535 5047
eadef313 5048 if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
7257b535
BP
5049 switch (ctx->ofproto->up.frag_handling) {
5050 case OFPC_FRAG_NORMAL:
5051 /* We must pretend that transport ports are unavailable. */
97e42c92
BP
5052 ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
5053 ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
7257b535
BP
5054 break;
5055
5056 case OFPC_FRAG_DROP:
5057 return ctx->odp_actions;
5058
5059 case OFPC_FRAG_REASM:
5060 NOT_REACHED();
5061
5062 case OFPC_FRAG_NX_MATCH:
5063 /* Nothing to do. */
5064 break;
f0fd1a17
PS
5065
5066 case OFPC_INVALID_TTL_TO_CONTROLLER:
5067 NOT_REACHED();
7257b535
BP
5068 }
5069 }
5070
fc08b7a2 5071 if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
abe529af 5072 ctx->may_set_up_flow = false;
b6848f13 5073 return ctx->odp_actions;
abe529af 5074 } else {
6a6455e5
EJ
5075 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5076 struct flow original_flow = ctx->flow;
5077 ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
5078
6ff686f2 5079 add_sflow_action(ctx);
abe529af 5080 do_xlate_actions(in, n_in, ctx);
abe529af 5081
6a6455e5
EJ
5082 if (ctx->max_resubmit_trigger && !ctx->resubmit_hook
5083 && !VLOG_DROP_ERR(&trace_rl)) {
5084 struct ds ds = DS_EMPTY_INITIALIZER;
5085
5086 ofproto_trace(ctx->ofproto, &original_flow, ctx->packet,
5087 initial_tci, &ds);
5088 VLOG_ERR("Trace triggered by excessive resubmit recursion:\n%s",
5089 ds_cstr(&ds));
5090 ds_destroy(&ds);
5091 }
5092
b6848f13
BP
5093 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
5094 ctx->odp_actions->data,
5095 ctx->odp_actions->size)) {
5096 ctx->may_set_up_flow = false;
5097 if (ctx->packet
5098 && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
5099 ctx->packet)) {
5e48dc2b 5100 compose_output_action(ctx, OFPP_LOCAL);
b6848f13
BP
5101 }
5102 }
c06bba01 5103 add_mirror_actions(ctx, &orig_flow);
a7c4eaf6 5104 fix_sflow_action(ctx);
abe529af
BP
5105 }
5106
5107 return ctx->odp_actions;
5108}
5109\f
5110/* OFPP_NORMAL implementation. */
5111
abe529af
BP
5112static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
5113
ecac4ebf
BP
5114/* Given 'vid', the VID obtained from the 802.1Q header that was received as
5115 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
5116 * the bundle on which the packet was received, returns the VLAN to which the
5117 * packet belongs.
5118 *
5119 * Both 'vid' and the return value are in the range 0...4095. */
5120static uint16_t
5121input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
5122{
5123 switch (in_bundle->vlan_mode) {
5124 case PORT_VLAN_ACCESS:
5125 return in_bundle->vlan;
5126 break;
5127
5128 case PORT_VLAN_TRUNK:
5129 return vid;
5130
5131 case PORT_VLAN_NATIVE_UNTAGGED:
5132 case PORT_VLAN_NATIVE_TAGGED:
5133 return vid ? vid : in_bundle->vlan;
5134
5135 default:
5136 NOT_REACHED();
5137 }
5138}
5139
5da5ec37
BP
5140/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
5141 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
5142 * a warning.
5143 *
5144 * 'vid' should be the VID obtained from the 802.1Q header that was received as
5145 * part of a packet (specify 0 if there was no 802.1Q header), in the range
5146 * 0...4095. */
5147static bool
5148input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
5149{
33158a18
JP
5150 /* Allow any VID on the OFPP_NONE port. */
5151 if (in_bundle == &ofpp_none_bundle) {
5152 return true;
5153 }
5154
5da5ec37
BP
5155 switch (in_bundle->vlan_mode) {
5156 case PORT_VLAN_ACCESS:
5157 if (vid) {
5158 if (warn) {
5159 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5160 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
5161 "packet received on port %s configured as VLAN "
5162 "%"PRIu16" access port",
5163 in_bundle->ofproto->up.name, vid,
5164 in_bundle->name, in_bundle->vlan);
5165 }
5166 return false;
5167 }
5168 return true;
5169
5170 case PORT_VLAN_NATIVE_UNTAGGED:
5171 case PORT_VLAN_NATIVE_TAGGED:
5172 if (!vid) {
5173 /* Port must always carry its native VLAN. */
5174 return true;
5175 }
5176 /* Fall through. */
5177 case PORT_VLAN_TRUNK:
5178 if (!ofbundle_includes_vlan(in_bundle, vid)) {
5179 if (warn) {
5180 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5181 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
5182 "received on port %s not configured for trunking "
5183 "VLAN %"PRIu16,
5184 in_bundle->ofproto->up.name, vid,
5185 in_bundle->name, vid);
5186 }
5187 return false;
5188 }
5189 return true;
5190
5191 default:
5192 NOT_REACHED();
5193 }
5194
5195}
5196
ecac4ebf
BP
5197/* Given 'vlan', the VLAN that a packet belongs to, and
5198 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
5199 * that should be included in the 802.1Q header. (If the return value is 0,
5200 * then the 802.1Q header should only be included in the packet if there is a
5201 * nonzero PCP.)
5202 *
5203 * Both 'vlan' and the return value are in the range 0...4095. */
5204static uint16_t
5205output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
5206{
5207 switch (out_bundle->vlan_mode) {
5208 case PORT_VLAN_ACCESS:
5209 return 0;
5210
5211 case PORT_VLAN_TRUNK:
5212 case PORT_VLAN_NATIVE_TAGGED:
5213 return vlan;
5214
5215 case PORT_VLAN_NATIVE_UNTAGGED:
5216 return vlan == out_bundle->vlan ? 0 : vlan;
5217
5218 default:
5219 NOT_REACHED();
5220 }
5221}
5222
395e68ce
BP
5223static void
5224output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
5225 uint16_t vlan)
abe529af 5226{
395e68ce
BP
5227 struct ofport_dpif *port;
5228 uint16_t vid;
81b1afb1 5229 ovs_be16 tci, old_tci;
ecac4ebf 5230
395e68ce
BP
5231 vid = output_vlan_to_vid(out_bundle, vlan);
5232 if (!out_bundle->bond) {
5233 port = ofbundle_get_a_port(out_bundle);
5234 } else {
5235 port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
5236 vid, &ctx->tags);
5237 if (!port) {
5238 /* No slaves enabled, so drop packet. */
5239 return;
5240 }
5241 }
abe529af 5242
81b1afb1 5243 old_tci = ctx->flow.vlan_tci;
5e9ceccd
BP
5244 tci = htons(vid);
5245 if (tci || out_bundle->use_priority_tags) {
5246 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
5247 if (tci) {
5248 tci |= htons(VLAN_CFI);
5249 }
395e68ce 5250 }
81b1afb1 5251 ctx->flow.vlan_tci = tci;
395e68ce 5252
5e48dc2b 5253 compose_output_action(ctx, port->up.ofp_port);
81b1afb1 5254 ctx->flow.vlan_tci = old_tci;
abe529af
BP
5255}
5256
5257static int
5258mirror_mask_ffs(mirror_mask_t mask)
5259{
5260 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
5261 return ffs(mask);
5262}
5263
abe529af
BP
5264static bool
5265ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
5266{
ecac4ebf 5267 return (bundle->vlan_mode != PORT_VLAN_ACCESS
fc3d7408 5268 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
abe529af
BP
5269}
5270
5271static bool
5272ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
5273{
5274 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
5275}
5276
5277/* Returns an arbitrary interface within 'bundle'. */
5278static struct ofport_dpif *
5279ofbundle_get_a_port(const struct ofbundle *bundle)
5280{
5281 return CONTAINER_OF(list_front(&bundle->ports),
5282 struct ofport_dpif, bundle_node);
5283}
5284
abe529af
BP
5285static bool
5286vlan_is_mirrored(const struct ofmirror *m, int vlan)
5287{
fc3d7408 5288 return !m->vlans || bitmap_is_set(m->vlans, vlan);
abe529af
BP
5289}
5290
07817dfe
BP
5291/* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
5292 * to a VLAN. In general most packets may be mirrored but we want to drop
5293 * protocols that may confuse switches. */
5294static bool
5295eth_dst_may_rspan(const uint8_t dst[ETH_ADDR_LEN])
5296{
5297 /* If you change this function's behavior, please update corresponding
5298 * documentation in vswitch.xml at the same time. */
5299 if (dst[0] != 0x01) {
5300 /* All the currently banned MACs happen to start with 01 currently, so
5301 * this is a quick way to eliminate most of the good ones. */
5302 } else {
5303 if (eth_addr_is_reserved(dst)) {
5304 /* Drop STP, IEEE pause frames, and other reserved protocols
5305 * (01-80-c2-00-00-0x). */
5306 return false;
5307 }
5308
5309 if (dst[0] == 0x01 && dst[1] == 0x00 && dst[2] == 0x0c) {
5310 /* Cisco OUI. */
5311 if ((dst[3] & 0xfe) == 0xcc &&
5312 (dst[4] & 0xfe) == 0xcc &&
5313 (dst[5] & 0xfe) == 0xcc) {
5314 /* Drop the following protocols plus others following the same
5315 pattern:
5316
5317 CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc)
5318 Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd)
5319 STP Uplink Fast (01-00-0c-cd-cd-cd) */
5320 return false;
5321 }
5322
5323 if (!(dst[3] | dst[4] | dst[5])) {
5324 /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */
5325 return false;
5326 }
5327 }
5328 }
5329 return true;
5330}
5331
abe529af 5332static void
c06bba01 5333add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
abe529af
BP
5334{
5335 struct ofproto_dpif *ofproto = ctx->ofproto;
5336 mirror_mask_t mirrors;
c06bba01
JP
5337 struct ofbundle *in_bundle;
5338 uint16_t vlan;
5339 uint16_t vid;
5340 const struct nlattr *a;
5341 size_t left;
5342
3581c12c
JP
5343 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
5344 ctx->packet != NULL);
5345 if (!in_bundle) {
c06bba01
JP
5346 return;
5347 }
c06bba01
JP
5348 mirrors = in_bundle->src_mirrors;
5349
5350 /* Drop frames on bundles reserved for mirroring. */
5351 if (in_bundle->mirror_out) {
5352 if (ctx->packet != NULL) {
5353 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5354 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
5355 "%s, which is reserved exclusively for mirroring",
5356 ctx->ofproto->up.name, in_bundle->name);
5357 }
5358 return;
5359 }
5360
5361 /* Check VLAN. */
5362 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
5363 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
5364 return;
5365 }
5366 vlan = input_vid_to_vlan(in_bundle, vid);
5367
5368 /* Look at the output ports to check for destination selections. */
5369
5370 NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
5371 ctx->odp_actions->size) {
5372 enum ovs_action_attr type = nl_attr_type(a);
5373 struct ofport_dpif *ofport;
5374
5375 if (type != OVS_ACTION_ATTR_OUTPUT) {
5376 continue;
5377 }
5378
5379 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
521472bc
BP
5380 if (ofport && ofport->bundle) {
5381 mirrors |= ofport->bundle->dst_mirrors;
5382 }
c06bba01 5383 }
abe529af
BP
5384
5385 if (!mirrors) {
5386 return;
5387 }
5388
c06bba01
JP
5389 /* Restore the original packet before adding the mirror actions. */
5390 ctx->flow = *orig_flow;
5391
9ba15e2a
BP
5392 while (mirrors) {
5393 struct ofmirror *m;
9ba15e2a
BP
5394
5395 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
5396
5397 if (!vlan_is_mirrored(m, vlan)) {
5398 mirrors &= mirrors - 1;
5399 continue;
5400 }
5401
5402 mirrors &= ~m->dup_mirrors;
9d24de3b 5403 ctx->mirrors |= m->dup_mirrors;
9ba15e2a 5404 if (m->out) {
395e68ce 5405 output_normal(ctx, m->out, vlan);
c06bba01 5406 } else if (eth_dst_may_rspan(orig_flow->dl_dst)
9ba15e2a
BP
5407 && vlan != m->out_vlan) {
5408 struct ofbundle *bundle;
5409
5410 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
5411 if (ofbundle_includes_vlan(bundle, m->out_vlan)
395e68ce
BP
5412 && !bundle->mirror_out) {
5413 output_normal(ctx, bundle, m->out_vlan);
abe529af
BP
5414 }
5415 }
5416 }
abe529af
BP
5417 }
5418}
5419
9d24de3b
JP
5420static void
5421update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors,
5422 uint64_t packets, uint64_t bytes)
5423{
5424 if (!mirrors) {
5425 return;
5426 }
5427
5428 for (; mirrors; mirrors &= mirrors - 1) {
5429 struct ofmirror *m;
5430
5431 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
5432
5433 if (!m) {
5434 /* In normal circumstances 'm' will not be NULL. However,
5435 * if mirrors are reconfigured, we can temporarily get out
5436 * of sync in facet_revalidate(). We could "correct" the
5437 * mirror list before reaching here, but doing that would
5438 * not properly account the traffic stats we've currently
5439 * accumulated for previous mirror configuration. */
5440 continue;
5441 }
5442
5443 m->packet_count += packets;
5444 m->byte_count += bytes;
5445 }
5446}
5447
abe529af
BP
5448/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
5449 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
5450 * indicate this; newer upstream kernels use gratuitous ARP requests. */
5451static bool
5452is_gratuitous_arp(const struct flow *flow)
5453{
5454 return (flow->dl_type == htons(ETH_TYPE_ARP)
5455 && eth_addr_is_broadcast(flow->dl_dst)
5456 && (flow->nw_proto == ARP_OP_REPLY
5457 || (flow->nw_proto == ARP_OP_REQUEST
5458 && flow->nw_src == flow->nw_dst)));
5459}
5460
5461static void
5462update_learning_table(struct ofproto_dpif *ofproto,
5463 const struct flow *flow, int vlan,
5464 struct ofbundle *in_bundle)
5465{
5466 struct mac_entry *mac;
5467
33158a18
JP
5468 /* Don't learn the OFPP_NONE port. */
5469 if (in_bundle == &ofpp_none_bundle) {
5470 return;
5471 }
5472
abe529af
BP
5473 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
5474 return;
5475 }
5476
5477 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
5478 if (is_gratuitous_arp(flow)) {
5479 /* We don't want to learn from gratuitous ARP packets that are
5480 * reflected back over bond slaves so we lock the learning table. */
5481 if (!in_bundle->bond) {
5482 mac_entry_set_grat_arp_lock(mac);
5483 } else if (mac_entry_is_grat_arp_locked(mac)) {
5484 return;
5485 }
5486 }
5487
5488 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
5489 /* The log messages here could actually be useful in debugging,
5490 * so keep the rate limit relatively high. */
5491 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
5492 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
5493 "on port %s in VLAN %d",
5494 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
5495 in_bundle->name, vlan);
5496
5497 mac->port.p = in_bundle;
5498 tag_set_add(&ofproto->revalidate_set,
5499 mac_learning_changed(ofproto->ml, mac));
5500 }
5501}
5502
3581c12c 5503static struct ofbundle *
395e68ce
BP
5504lookup_input_bundle(struct ofproto_dpif *ofproto, uint16_t in_port, bool warn)
5505{
5506 struct ofport_dpif *ofport;
5507
33158a18
JP
5508 /* Special-case OFPP_NONE, which a controller may use as the ingress
5509 * port for traffic that it is sourcing. */
5510 if (in_port == OFPP_NONE) {
5511 return &ofpp_none_bundle;
5512 }
5513
395e68ce
BP
5514 /* Find the port and bundle for the received packet. */
5515 ofport = get_ofp_port(ofproto, in_port);
5516 if (ofport && ofport->bundle) {
3581c12c 5517 return ofport->bundle;
395e68ce
BP
5518 }
5519
5520 /* Odd. A few possible reasons here:
5521 *
5522 * - We deleted a port but there are still a few packets queued up
5523 * from it.
5524 *
5525 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
5526 * we don't know about.
5527 *
5528 * - The ofproto client didn't configure the port as part of a bundle.
5529 */
5530 if (warn) {
5531 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5532
5533 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
5534 "port %"PRIu16, ofproto->up.name, in_port);
5535 }
5536 return NULL;
5537}
5538
5da5ec37 5539/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
abe529af
BP
5540 * dropped. Returns true if they may be forwarded, false if they should be
5541 * dropped.
5542 *
395e68ce
BP
5543 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
5544 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
abe529af 5545 *
395e68ce
BP
5546 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
5547 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
5548 * checked by input_vid_is_valid().
abe529af
BP
5549 *
5550 * May also add tags to '*tags', although the current implementation only does
5551 * so in one special case.
5552 */
5553static bool
5554is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow,
395e68ce 5555 struct ofport_dpif *in_port, uint16_t vlan, tag_type *tags)
abe529af 5556{
395e68ce 5557 struct ofbundle *in_bundle = in_port->bundle;
abe529af 5558
395e68ce
BP
5559 /* Drop frames for reserved multicast addresses
5560 * only if forward_bpdu option is absent. */
21f7563c 5561 if (eth_addr_is_reserved(flow->dl_dst) && !ofproto->up.forward_bpdu) {
abe529af
BP
5562 return false;
5563 }
5564
abe529af
BP
5565 if (in_bundle->bond) {
5566 struct mac_entry *mac;
5567
5568 switch (bond_check_admissibility(in_bundle->bond, in_port,
5569 flow->dl_dst, tags)) {
5570 case BV_ACCEPT:
5571 break;
5572
5573 case BV_DROP:
5574 return false;
5575
5576 case BV_DROP_IF_MOVED:
5577 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
5578 if (mac && mac->port.p != in_bundle &&
5579 (!is_gratuitous_arp(flow)
5580 || mac_entry_is_grat_arp_locked(mac))) {
5581 return false;
5582 }
5583 break;
5584 }
5585 }
5586
5587 return true;
5588}
5589
4cd78906 5590static void
abe529af
BP
5591xlate_normal(struct action_xlate_ctx *ctx)
5592{
395e68ce 5593 struct ofport_dpif *in_port;
abe529af 5594 struct ofbundle *in_bundle;
abe529af 5595 struct mac_entry *mac;
395e68ce
BP
5596 uint16_t vlan;
5597 uint16_t vid;
abe529af 5598
75a75043
BP
5599 ctx->has_normal = true;
5600
3581c12c 5601 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
395e68ce 5602 ctx->packet != NULL);
3581c12c 5603 if (!in_bundle) {
395e68ce
BP
5604 return;
5605 }
3581c12c 5606
33158a18
JP
5607 /* We know 'in_port' exists unless it is "ofpp_none_bundle",
5608 * since lookup_input_bundle() succeeded. */
3581c12c 5609 in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
395e68ce
BP
5610
5611 /* Drop malformed frames. */
5612 if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
5613 !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
5614 if (ctx->packet != NULL) {
5615 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5616 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
5617 "VLAN tag received on port %s",
5618 ctx->ofproto->up.name, in_bundle->name);
5619 }
5620 return;
5621 }
5622
5623 /* Drop frames on bundles reserved for mirroring. */
5624 if (in_bundle->mirror_out) {
5625 if (ctx->packet != NULL) {
5626 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5627 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
5628 "%s, which is reserved exclusively for mirroring",
5629 ctx->ofproto->up.name, in_bundle->name);
5630 }
5631 return;
5632 }
5633
5634 /* Check VLAN. */
5635 vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
5636 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
5637 return;
5638 }
5639 vlan = input_vid_to_vlan(in_bundle, vid);
5640
5641 /* Check other admissibility requirements. */
33158a18
JP
5642 if (in_port &&
5643 !is_admissible(ctx->ofproto, &ctx->flow, in_port, vlan, &ctx->tags)) {
395e68ce 5644 return;
abe529af
BP
5645 }
5646
75a75043
BP
5647 /* Learn source MAC. */
5648 if (ctx->may_learn) {
abe529af
BP
5649 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
5650 }
5651
5652 /* Determine output bundle. */
5653 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
5654 &ctx->tags);
5655 if (mac) {
c06bba01
JP
5656 if (mac->port.p != in_bundle) {
5657 output_normal(ctx, mac->port.p, vlan);
5658 }
abe529af 5659 } else {
c06bba01 5660 struct ofbundle *bundle;
abe529af 5661
c06bba01
JP
5662 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
5663 if (bundle != in_bundle
5664 && ofbundle_includes_vlan(bundle, vlan)
5665 && bundle->floodable
5666 && !bundle->mirror_out) {
5667 output_normal(ctx, bundle, vlan);
5668 }
5669 }
5670 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af 5671 }
abe529af
BP
5672}
5673\f
54a9cbc9
BP
5674/* Optimized flow revalidation.
5675 *
5676 * It's a difficult problem, in general, to tell which facets need to have
5677 * their actions recalculated whenever the OpenFlow flow table changes. We
5678 * don't try to solve that general problem: for most kinds of OpenFlow flow
5679 * table changes, we recalculate the actions for every facet. This is
5680 * relatively expensive, but it's good enough if the OpenFlow flow table
5681 * doesn't change very often.
5682 *
5683 * However, we can expect one particular kind of OpenFlow flow table change to
5684 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
5685 * of CPU on revalidating every facet whenever MAC learning modifies the flow
5686 * table, we add a special case that applies to flow tables in which every rule
5687 * has the same form (that is, the same wildcards), except that the table is
5688 * also allowed to have a single "catch-all" flow that matches all packets. We
5689 * optimize this case by tagging all of the facets that resubmit into the table
5690 * and invalidating the same tag whenever a flow changes in that table. The
5691 * end result is that we revalidate just the facets that need it (and sometimes
5692 * a few more, but not all of the facets or even all of the facets that
5693 * resubmit to the table modified by MAC learning). */
5694
5695/* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
5696 * into an OpenFlow table with the given 'basis'. */
5697static uint32_t
5698rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc,
5699 uint32_t secret)
5700{
5701 if (flow_wildcards_is_catchall(wc)) {
5702 return 0;
5703 } else {
5704 struct flow tag_flow = *flow;
5705 flow_zero_wildcards(&tag_flow, wc);
5706 return tag_create_deterministic(flow_hash(&tag_flow, secret));
5707 }
5708}
5709
5710/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
5711 * taggability of that table.
5712 *
5713 * This function must be called after *each* change to a flow table. If you
5714 * skip calling it on some changes then the pointer comparisons at the end can
5715 * be invalid if you get unlucky. For example, if a flow removal causes a
5716 * cls_table to be destroyed and then a flow insertion causes a cls_table with
5717 * different wildcards to be created with the same address, then this function
5718 * will incorrectly skip revalidation. */
5719static void
5720table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
5721{
5722 struct table_dpif *table = &ofproto->tables[table_id];
d0918789 5723 const struct oftable *oftable = &ofproto->up.tables[table_id];
54a9cbc9
BP
5724 struct cls_table *catchall, *other;
5725 struct cls_table *t;
5726
5727 catchall = other = NULL;
5728
d0918789 5729 switch (hmap_count(&oftable->cls.tables)) {
54a9cbc9
BP
5730 case 0:
5731 /* We could tag this OpenFlow table but it would make the logic a
5732 * little harder and it's a corner case that doesn't seem worth it
5733 * yet. */
5734 break;
5735
5736 case 1:
5737 case 2:
d0918789 5738 HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
54a9cbc9
BP
5739 if (cls_table_is_catchall(t)) {
5740 catchall = t;
5741 } else if (!other) {
5742 other = t;
5743 } else {
5744 /* Indicate that we can't tag this by setting both tables to
5745 * NULL. (We know that 'catchall' is already NULL.) */
5746 other = NULL;
5747 }
5748 }
5749 break;
5750
5751 default:
5752 /* Can't tag this table. */
5753 break;
5754 }
5755
5756 if (table->catchall_table != catchall || table->other_table != other) {
5757 table->catchall_table = catchall;
5758 table->other_table = other;
5759 ofproto->need_revalidate = true;
5760 }
5761}
5762
5763/* Given 'rule' that has changed in some way (either it is a rule being
5764 * inserted, a rule being deleted, or a rule whose actions are being
5765 * modified), marks facets for revalidation to ensure that packets will be
5766 * forwarded correctly according to the new state of the flow table.
5767 *
5768 * This function must be called after *each* change to a flow table. See
5769 * the comment on table_update_taggable() for more information. */
5770static void
5771rule_invalidate(const struct rule_dpif *rule)
5772{
5773 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
5774
5775 table_update_taggable(ofproto, rule->up.table_id);
5776
5777 if (!ofproto->need_revalidate) {
5778 struct table_dpif *table = &ofproto->tables[rule->up.table_id];
5779
5780 if (table->other_table && rule->tag) {
5781 tag_set_add(&ofproto->revalidate_set, rule->tag);
5782 } else {
5783 ofproto->need_revalidate = true;
5784 }
5785 }
5786}
5787\f
abe529af 5788static bool
7257b535
BP
5789set_frag_handling(struct ofproto *ofproto_,
5790 enum ofp_config_flags frag_handling)
abe529af
BP
5791{
5792 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
abe529af 5793
7257b535
BP
5794 if (frag_handling != OFPC_FRAG_REASM) {
5795 ofproto->need_revalidate = true;
5796 return true;
5797 } else {
5798 return false;
5799 }
abe529af
BP
5800}
5801
90bf1e07 5802static enum ofperr
abe529af
BP
5803packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
5804 const struct flow *flow,
5805 const union ofp_action *ofp_actions, size_t n_ofp_actions)
5806{
5807 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
90bf1e07 5808 enum ofperr error;
abe529af 5809
e1154f71 5810 if (flow->in_port >= ofproto->max_ports && flow->in_port < OFPP_MAX) {
90bf1e07 5811 return OFPERR_NXBRC_BAD_IN_PORT;
e1154f71
BP
5812 }
5813
abe529af
BP
5814 error = validate_actions(ofp_actions, n_ofp_actions, flow,
5815 ofproto->max_ports);
5816 if (!error) {
80e5eed9 5817 struct odputil_keybuf keybuf;
abe529af 5818 struct ofpbuf *odp_actions;
2284188b 5819 struct ofproto_push push;
80e5eed9
BP
5820 struct ofpbuf key;
5821
5822 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
5823 odp_flow_key_from_flow(&key, flow);
abe529af 5824
18b2a258 5825 action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci, NULL,
0e553d9c 5826 packet_get_tcp_flags(packet, flow), packet);
2284188b
EJ
5827
5828 /* Ensure that resubmits in 'ofp_actions' get accounted to their
5829 * matching rules. */
5830 push.packets = 1;
5831 push.bytes = packet->size;
5832 push.used = time_msec();
5833 push.ctx.resubmit_hook = push_resubmit;
5834
5835 odp_actions = xlate_actions(&push.ctx, ofp_actions, n_ofp_actions);
80e5eed9
BP
5836 dpif_execute(ofproto->dpif, key.data, key.size,
5837 odp_actions->data, odp_actions->size, packet);
abe529af
BP
5838 ofpbuf_delete(odp_actions);
5839 }
5840 return error;
5841}
6fca1ffb
BP
5842\f
5843/* NetFlow. */
5844
5845static int
5846set_netflow(struct ofproto *ofproto_,
5847 const struct netflow_options *netflow_options)
5848{
5849 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5850
5851 if (netflow_options) {
5852 if (!ofproto->netflow) {
5853 ofproto->netflow = netflow_create();
5854 }
5855 return netflow_set_options(ofproto->netflow, netflow_options);
5856 } else {
5857 netflow_destroy(ofproto->netflow);
5858 ofproto->netflow = NULL;
5859 return 0;
5860 }
5861}
abe529af
BP
5862
5863static void
5864get_netflow_ids(const struct ofproto *ofproto_,
5865 uint8_t *engine_type, uint8_t *engine_id)
5866{
5867 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5868
5869 dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
5870}
6fca1ffb
BP
5871
5872static void
5873send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
5874{
5875 if (!facet_is_controller_flow(facet) &&
5876 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
b0f7b9b5 5877 struct subfacet *subfacet;
6fca1ffb
BP
5878 struct ofexpired expired;
5879
b0f7b9b5
BP
5880 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
5881 if (subfacet->installed) {
5882 struct dpif_flow_stats stats;
6fca1ffb 5883
15baa734 5884 subfacet_install(subfacet, subfacet->actions,
b95fc6ba 5885 subfacet->actions_len, &stats);
15baa734 5886 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 5887 }
6fca1ffb
BP
5888 }
5889
5890 expired.flow = facet->flow;
5891 expired.packet_count = facet->packet_count;
5892 expired.byte_count = facet->byte_count;
5893 expired.used = facet->used;
5894 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
5895 }
5896}
5897
5898static void
5899send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
5900{
5901 struct facet *facet;
5902
5903 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
5904 send_active_timeout(ofproto, facet);
5905 }
5906}
abe529af
BP
5907\f
5908static struct ofproto_dpif *
5909ofproto_dpif_lookup(const char *name)
5910{
b44a10b7
BP
5911 struct ofproto_dpif *ofproto;
5912
5913 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
5914 hash_string(name, 0), &all_ofproto_dpifs) {
5915 if (!strcmp(ofproto->up.name, name)) {
5916 return ofproto;
5917 }
5918 }
5919 return NULL;
abe529af
BP
5920}
5921
f0a3aa2e 5922static void
96e466a3 5923ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
0e15264f 5924 const char *argv[], void *aux OVS_UNUSED)
f0a3aa2e 5925{
490df1ef 5926 struct ofproto_dpif *ofproto;
f0a3aa2e 5927
96e466a3
EJ
5928 if (argc > 1) {
5929 ofproto = ofproto_dpif_lookup(argv[1]);
5930 if (!ofproto) {
bde9f75d 5931 unixctl_command_reply_error(conn, "no such bridge");
96e466a3
EJ
5932 return;
5933 }
d0040604 5934 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
96e466a3
EJ
5935 } else {
5936 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
d0040604 5937 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
96e466a3 5938 }
f0a3aa2e 5939 }
f0a3aa2e 5940
bde9f75d 5941 unixctl_command_reply(conn, "table successfully flushed");
f0a3aa2e
AA
5942}
5943
abe529af 5944static void
0e15264f
BP
5945ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
5946 const char *argv[], void *aux OVS_UNUSED)
abe529af
BP
5947{
5948 struct ds ds = DS_EMPTY_INITIALIZER;
5949 const struct ofproto_dpif *ofproto;
5950 const struct mac_entry *e;
5951
0e15264f 5952 ofproto = ofproto_dpif_lookup(argv[1]);
abe529af 5953 if (!ofproto) {
bde9f75d 5954 unixctl_command_reply_error(conn, "no such bridge");
abe529af
BP
5955 return;
5956 }
5957
5958 ds_put_cstr(&ds, " port VLAN MAC Age\n");
5959 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
5960 struct ofbundle *bundle = e->port.p;
5961 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
5962 ofbundle_get_a_port(bundle)->odp_port,
e764773c
BP
5963 e->vlan, ETH_ADDR_ARGS(e->mac),
5964 mac_entry_age(ofproto->ml, e));
abe529af 5965 }
bde9f75d 5966 unixctl_command_reply(conn, ds_cstr(&ds));
abe529af
BP
5967 ds_destroy(&ds);
5968}
5969
6a6455e5 5970struct trace_ctx {
abe529af
BP
5971 struct action_xlate_ctx ctx;
5972 struct flow flow;
5973 struct ds *result;
5974};
5975
5976static void
29901626
BP
5977trace_format_rule(struct ds *result, uint8_t table_id, int level,
5978 const struct rule_dpif *rule)
abe529af
BP
5979{
5980 ds_put_char_multiple(result, '\t', level);
5981 if (!rule) {
5982 ds_put_cstr(result, "No match\n");
5983 return;
5984 }
5985
29901626
BP
5986 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
5987 table_id, ntohll(rule->up.flow_cookie));
79feb7df 5988 cls_rule_format(&rule->up.cr, result);
abe529af
BP
5989 ds_put_char(result, '\n');
5990
5991 ds_put_char_multiple(result, '\t', level);
5992 ds_put_cstr(result, "OpenFlow ");
79feb7df 5993 ofp_print_actions(result, rule->up.actions, rule->up.n_actions);
abe529af
BP
5994 ds_put_char(result, '\n');
5995}
5996
5997static void
5998trace_format_flow(struct ds *result, int level, const char *title,
6a6455e5 5999 struct trace_ctx *trace)
abe529af
BP
6000{
6001 ds_put_char_multiple(result, '\t', level);
6002 ds_put_format(result, "%s: ", title);
6003 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
6004 ds_put_cstr(result, "unchanged");
6005 } else {
6006 flow_format(result, &trace->ctx.flow);
6007 trace->flow = trace->ctx.flow;
6008 }
6009 ds_put_char(result, '\n');
6010}
6011
eb9e1c26
EJ
6012static void
6013trace_format_regs(struct ds *result, int level, const char *title,
6a6455e5 6014 struct trace_ctx *trace)
eb9e1c26
EJ
6015{
6016 size_t i;
6017
6018 ds_put_char_multiple(result, '\t', level);
6019 ds_put_format(result, "%s:", title);
6020 for (i = 0; i < FLOW_N_REGS; i++) {
6021 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
6022 }
6023 ds_put_char(result, '\n');
6024}
6025
1ed8d352
EJ
6026static void
6027trace_format_odp(struct ds *result, int level, const char *title,
6a6455e5 6028 struct trace_ctx *trace)
1ed8d352
EJ
6029{
6030 struct ofpbuf *odp_actions = trace->ctx.odp_actions;
6031
6032 ds_put_char_multiple(result, '\t', level);
6033 ds_put_format(result, "%s: ", title);
6034 format_odp_actions(result, odp_actions->data, odp_actions->size);
6035 ds_put_char(result, '\n');
6036}
6037
abe529af
BP
6038static void
6039trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
6040{
6a6455e5 6041 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
abe529af
BP
6042 struct ds *result = trace->result;
6043
6044 ds_put_char(result, '\n');
6045 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
eb9e1c26 6046 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
1ed8d352 6047 trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
29901626 6048 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
abe529af
BP
6049}
6050
6051static void
0e15264f 6052ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
abe529af
BP
6053 void *aux OVS_UNUSED)
6054{
0e15264f 6055 const char *dpname = argv[1];
abe529af 6056 struct ofproto_dpif *ofproto;
876b0e1c
BP
6057 struct ofpbuf odp_key;
6058 struct ofpbuf *packet;
e84173dc 6059 ovs_be16 initial_tci;
abe529af
BP
6060 struct ds result;
6061 struct flow flow;
abe529af
BP
6062 char *s;
6063
876b0e1c
BP
6064 packet = NULL;
6065 ofpbuf_init(&odp_key, 0);
abe529af
BP
6066 ds_init(&result);
6067
e84173dc
BP
6068 ofproto = ofproto_dpif_lookup(dpname);
6069 if (!ofproto) {
bde9f75d
EJ
6070 unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
6071 "for help)");
e84173dc
BP
6072 goto exit;
6073 }
0e15264f 6074 if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
8b3b8dd1 6075 /* ofproto/trace dpname flow [-generate] */
0e15264f
BP
6076 const char *flow_s = argv[2];
6077 const char *generate_s = argv[3];
876b0e1c
BP
6078 int error;
6079
df2c07f4 6080 /* Convert string to datapath key. */
876b0e1c 6081 ofpbuf_init(&odp_key, 0);
0e15264f 6082 error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
876b0e1c 6083 if (error) {
bde9f75d 6084 unixctl_command_reply_error(conn, "Bad flow syntax");
876b0e1c
BP
6085 goto exit;
6086 }
6087
6088 /* Convert odp_key to flow. */
e84173dc
BP
6089 error = ofproto_dpif_extract_flow_key(ofproto, odp_key.data,
6090 odp_key.size, &flow,
e2a6ca36 6091 &initial_tci, NULL);
e84173dc 6092 if (error == ODP_FIT_ERROR) {
bde9f75d 6093 unixctl_command_reply_error(conn, "Invalid flow");
876b0e1c
BP
6094 goto exit;
6095 }
8b3b8dd1
BP
6096
6097 /* Generate a packet, if requested. */
0e15264f 6098 if (generate_s) {
8b3b8dd1
BP
6099 packet = ofpbuf_new(0);
6100 flow_compose(packet, &flow);
6101 }
0e15264f 6102 } else if (argc == 6) {
abff858b 6103 /* ofproto/trace dpname priority tun_id in_port packet */
0e15264f
BP
6104 const char *priority_s = argv[2];
6105 const char *tun_id_s = argv[3];
6106 const char *in_port_s = argv[4];
6107 const char *packet_s = argv[5];
6108 uint16_t in_port = ofp_port_to_odp_port(atoi(in_port_s));
6109 ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
6110 uint32_t priority = atoi(priority_s);
e22f1753 6111 const char *msg;
0e15264f 6112
e22f1753
BP
6113 msg = eth_from_hex(packet_s, &packet);
6114 if (msg) {
bde9f75d 6115 unixctl_command_reply_error(conn, msg);
876b0e1c
BP
6116 goto exit;
6117 }
6118
6119 ds_put_cstr(&result, "Packet: ");
c499c75d 6120 s = ofp_packet_to_string(packet->data, packet->size);
876b0e1c
BP
6121 ds_put_cstr(&result, s);
6122 free(s);
6123
abff858b 6124 flow_extract(packet, priority, tun_id, in_port, &flow);
e84173dc 6125 initial_tci = flow.vlan_tci;
876b0e1c 6126 } else {
bde9f75d 6127 unixctl_command_reply_error(conn, "Bad command syntax");
abe529af
BP
6128 goto exit;
6129 }
6130
6a6455e5
EJ
6131 ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
6132 unixctl_command_reply(conn, ds_cstr(&result));
6133
6134exit:
6135 ds_destroy(&result);
6136 ofpbuf_delete(packet);
6137 ofpbuf_uninit(&odp_key);
6138}
6139
6140static void
6141ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
6142 const struct ofpbuf *packet, ovs_be16 initial_tci,
6143 struct ds *ds)
6144{
6145 struct rule_dpif *rule;
6146
6147 ds_put_cstr(ds, "Flow: ");
6148 flow_format(ds, flow);
6149 ds_put_char(ds, '\n');
abe529af 6150
6a6455e5
EJ
6151 rule = rule_dpif_lookup(ofproto, flow, 0);
6152 trace_format_rule(ds, 0, 0, rule);
abe529af 6153 if (rule) {
6a6455e5 6154 struct trace_ctx trace;
abe529af 6155 struct ofpbuf *odp_actions;
0e553d9c 6156 uint8_t tcp_flags;
abe529af 6157
6a6455e5
EJ
6158 tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
6159 trace.result = ds;
6160 trace.flow = *flow;
6161 action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
0e553d9c 6162 rule, tcp_flags, packet);
abe529af
BP
6163 trace.ctx.resubmit_hook = trace_resubmit;
6164 odp_actions = xlate_actions(&trace.ctx,
6165 rule->up.actions, rule->up.n_actions);
6166
6a6455e5
EJ
6167 ds_put_char(ds, '\n');
6168 trace_format_flow(ds, 0, "Final flow", &trace);
6169 ds_put_cstr(ds, "Datapath actions: ");
6170 format_odp_actions(ds, odp_actions->data, odp_actions->size);
abe529af 6171 ofpbuf_delete(odp_actions);
876b0e1c
BP
6172
6173 if (!trace.ctx.may_set_up_flow) {
6174 if (packet) {
6a6455e5 6175 ds_put_cstr(ds, "\nThis flow is not cachable.");
876b0e1c 6176 } else {
6a6455e5 6177 ds_put_cstr(ds, "\nThe datapath actions are incomplete--"
876b0e1c
BP
6178 "for complete actions, please supply a packet.");
6179 }
6180 }
abe529af 6181 }
abe529af
BP
6182}
6183
7ee20df1 6184static void
0e15264f
BP
6185ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
6186 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
6187{
6188 clogged = true;
bde9f75d 6189 unixctl_command_reply(conn, NULL);
7ee20df1
BP
6190}
6191
6192static void
0e15264f
BP
6193ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
6194 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
6195{
6196 clogged = false;
bde9f75d 6197 unixctl_command_reply(conn, NULL);
7ee20df1
BP
6198}
6199
6814e51f
BP
6200/* Runs a self-check of flow translations in 'ofproto'. Appends a message to
6201 * 'reply' describing the results. */
6202static void
6203ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
6204{
6205 struct facet *facet;
6206 int errors;
6207
6208 errors = 0;
6209 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
6210 if (!facet_check_consistency(facet)) {
6211 errors++;
6212 }
6213 }
6214 if (errors) {
6215 ofproto->need_revalidate = true;
6216 }
6217
6218 if (errors) {
6219 ds_put_format(reply, "%s: self-check failed (%d errors)\n",
6220 ofproto->up.name, errors);
6221 } else {
6222 ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
6223 }
6224}
6225
6226static void
6227ofproto_dpif_self_check(struct unixctl_conn *conn,
6228 int argc, const char *argv[], void *aux OVS_UNUSED)
6229{
6230 struct ds reply = DS_EMPTY_INITIALIZER;
6231 struct ofproto_dpif *ofproto;
6232
6233 if (argc > 1) {
6234 ofproto = ofproto_dpif_lookup(argv[1]);
6235 if (!ofproto) {
bde9f75d
EJ
6236 unixctl_command_reply_error(conn, "Unknown ofproto (use "
6237 "ofproto/list for help)");
6814e51f
BP
6238 return;
6239 }
6240 ofproto_dpif_self_check__(ofproto, &reply);
6241 } else {
6242 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
6243 ofproto_dpif_self_check__(ofproto, &reply);
6244 }
6245 }
6246
bde9f75d 6247 unixctl_command_reply(conn, ds_cstr(&reply));
6814e51f
BP
6248 ds_destroy(&reply);
6249}
6250
abe529af
BP
6251static void
6252ofproto_dpif_unixctl_init(void)
6253{
6254 static bool registered;
6255 if (registered) {
6256 return;
6257 }
6258 registered = true;
6259
0e15264f
BP
6260 unixctl_command_register(
6261 "ofproto/trace",
6262 "bridge {tun_id in_port packet | odp_flow [-generate]}",
aa3080c9 6263 2, 5, ofproto_unixctl_trace, NULL);
96e466a3 6264 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
0e15264f
BP
6265 ofproto_unixctl_fdb_flush, NULL);
6266 unixctl_command_register("fdb/show", "bridge", 1, 1,
6267 ofproto_unixctl_fdb_show, NULL);
6268 unixctl_command_register("ofproto/clog", "", 0, 0,
6269 ofproto_dpif_clog, NULL);
6270 unixctl_command_register("ofproto/unclog", "", 0, 0,
6271 ofproto_dpif_unclog, NULL);
6814e51f
BP
6272 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
6273 ofproto_dpif_self_check, NULL);
abe529af
BP
6274}
6275\f
52a90c29
BP
6276/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
6277 *
6278 * This is deprecated. It is only for compatibility with broken device drivers
6279 * in old versions of Linux that do not properly support VLANs when VLAN
6280 * devices are not used. When broken device drivers are no longer in
6281 * widespread use, we will delete these interfaces. */
6282
6283static int
6284set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
6285{
6286 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
6287 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
6288
6289 if (realdev_ofp_port == ofport->realdev_ofp_port
6290 && vid == ofport->vlandev_vid) {
6291 return 0;
6292 }
6293
6294 ofproto->need_revalidate = true;
6295
6296 if (ofport->realdev_ofp_port) {
6297 vsp_remove(ofport);
6298 }
6299 if (realdev_ofp_port && ofport->bundle) {
6300 /* vlandevs are enslaved to their realdevs, so they are not allowed to
6301 * themselves be part of a bundle. */
6302 bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
6303 }
6304
6305 ofport->realdev_ofp_port = realdev_ofp_port;
6306 ofport->vlandev_vid = vid;
6307
6308 if (realdev_ofp_port) {
6309 vsp_add(ofport, realdev_ofp_port, vid);
6310 }
6311
6312 return 0;
6313}
6314
6315static uint32_t
6316hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
6317{
6318 return hash_2words(realdev_ofp_port, vid);
6319}
6320
40e05935
BP
6321/* Returns the ODP port number of the Linux VLAN device that corresponds to
6322 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
6323 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
6324 * it would return the port number of eth0.9.
6325 *
6326 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
6327 * function just returns its 'realdev_odp_port' argument. */
52a90c29
BP
6328static uint32_t
6329vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
6330 uint32_t realdev_odp_port, ovs_be16 vlan_tci)
6331{
6332 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
6333 uint16_t realdev_ofp_port = odp_port_to_ofp_port(realdev_odp_port);
6334 int vid = vlan_tci_to_vid(vlan_tci);
6335 const struct vlan_splinter *vsp;
6336
6337 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
6338 hash_realdev_vid(realdev_ofp_port, vid),
6339 &ofproto->realdev_vid_map) {
6340 if (vsp->realdev_ofp_port == realdev_ofp_port
6341 && vsp->vid == vid) {
6342 return ofp_port_to_odp_port(vsp->vlandev_ofp_port);
6343 }
6344 }
6345 }
6346 return realdev_odp_port;
6347}
6348
6349static struct vlan_splinter *
6350vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
6351{
6352 struct vlan_splinter *vsp;
6353
6354 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
6355 &ofproto->vlandev_map) {
6356 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
6357 return vsp;
6358 }
6359 }
6360
6361 return NULL;
6362}
6363
40e05935
BP
6364/* Returns the OpenFlow port number of the "real" device underlying the Linux
6365 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
6366 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
6367 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
6368 * eth0 and store 9 in '*vid'.
6369 *
6370 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
6371 * VLAN device. Unless VLAN splinters are enabled, this is what this function
6372 * always does.*/
52a90c29
BP
6373static uint16_t
6374vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
40e05935 6375 uint16_t vlandev_ofp_port, int *vid)
52a90c29
BP
6376{
6377 if (!hmap_is_empty(&ofproto->vlandev_map)) {
6378 const struct vlan_splinter *vsp;
6379
6380 vsp = vlandev_find(ofproto, vlandev_ofp_port);
6381 if (vsp) {
6382 if (vid) {
6383 *vid = vsp->vid;
6384 }
6385 return vsp->realdev_ofp_port;
6386 }
6387 }
6388 return 0;
6389}
6390
6391static void
6392vsp_remove(struct ofport_dpif *port)
6393{
6394 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6395 struct vlan_splinter *vsp;
6396
6397 vsp = vlandev_find(ofproto, port->up.ofp_port);
6398 if (vsp) {
6399 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
6400 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
6401 free(vsp);
6402
6403 port->realdev_ofp_port = 0;
6404 } else {
6405 VLOG_ERR("missing vlan device record");
6406 }
6407}
6408
6409static void
6410vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
6411{
6412 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6413
6414 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
6415 && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid))
6416 == realdev_ofp_port)) {
6417 struct vlan_splinter *vsp;
6418
6419 vsp = xmalloc(sizeof *vsp);
6420 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
6421 hash_int(port->up.ofp_port, 0));
6422 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
6423 hash_realdev_vid(realdev_ofp_port, vid));
6424 vsp->realdev_ofp_port = realdev_ofp_port;
6425 vsp->vlandev_ofp_port = port->up.ofp_port;
6426 vsp->vid = vid;
6427
6428 port->realdev_ofp_port = realdev_ofp_port;
6429 } else {
6430 VLOG_ERR("duplicate vlan device record");
6431 }
6432}
6433\f
abe529af
BP
6434const struct ofproto_class ofproto_dpif_class = {
6435 enumerate_types,
6436 enumerate_names,
6437 del,
6438 alloc,
6439 construct,
6440 destruct,
6441 dealloc,
6442 run,
5fcc0d00 6443 run_fast,
abe529af
BP
6444 wait,
6445 flush,
6c1491fb
BP
6446 get_features,
6447 get_tables,
abe529af
BP
6448 port_alloc,
6449 port_construct,
6450 port_destruct,
6451 port_dealloc,
6452 port_modified,
6453 port_reconfigured,
6454 port_query_by_name,
6455 port_add,
6456 port_del,
6527c598 6457 port_get_stats,
abe529af
BP
6458 port_dump_start,
6459 port_dump_next,
6460 port_dump_done,
6461 port_poll,
6462 port_poll_wait,
6463 port_is_lacp_current,
0ab6decf 6464 NULL, /* rule_choose_table */
abe529af
BP
6465 rule_alloc,
6466 rule_construct,
6467 rule_destruct,
6468 rule_dealloc,
abe529af
BP
6469 rule_get_stats,
6470 rule_execute,
6471 rule_modify_actions,
7257b535 6472 set_frag_handling,
abe529af
BP
6473 packet_out,
6474 set_netflow,
6475 get_netflow_ids,
6476 set_sflow,
6477 set_cfm,
a5610457 6478 get_cfm_fault,
1de11730 6479 get_cfm_remote_mpids,
21f7563c
JP
6480 set_stp,
6481 get_stp_status,
6482 set_stp_port,
6483 get_stp_port_status,
8b36f51e 6484 set_queues,
abe529af
BP
6485 bundle_set,
6486 bundle_remove,
6487 mirror_set,
9d24de3b 6488 mirror_get_stats,
abe529af
BP
6489 set_flood_vlans,
6490 is_mirror_output_bundle,
8402c74b 6491 forward_bpdu_changed,
e764773c 6492 set_mac_idle_time,
52a90c29 6493 set_realdev,
abe529af 6494};