]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif.c
flow: New function flow_wildcards_is_catchall().
[ovs.git] / ofproto / ofproto-dpif.c
CommitLineData
abe529af
BP
1/*
2 * Copyright (c) 2009, 2010, 2011 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
5bee6e26 19#include "ofproto/ofproto-provider.h"
abe529af
BP
20
21#include <errno.h>
22
23#include "autopath.h"
24#include "bond.h"
daff3353 25#include "bundle.h"
abe529af
BP
26#include "byte-order.h"
27#include "connmgr.h"
28#include "coverage.h"
29#include "cfm.h"
30#include "dpif.h"
31#include "dynamic-string.h"
32#include "fail-open.h"
33#include "hmapx.h"
34#include "lacp.h"
75a75043 35#include "learn.h"
abe529af
BP
36#include "mac-learning.h"
37#include "multipath.h"
38#include "netdev.h"
39#include "netlink.h"
40#include "nx-match.h"
41#include "odp-util.h"
42#include "ofp-util.h"
43#include "ofpbuf.h"
44#include "ofp-print.h"
bae473fe 45#include "ofproto-dpif-sflow.h"
abe529af
BP
46#include "poll-loop.h"
47#include "timer.h"
6c1491fb 48#include "unaligned.h"
abe529af
BP
49#include "unixctl.h"
50#include "vlan-bitmap.h"
51#include "vlog.h"
52
53VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
54
55COVERAGE_DEFINE(ofproto_dpif_ctlr_action);
56COVERAGE_DEFINE(ofproto_dpif_expired);
57COVERAGE_DEFINE(ofproto_dpif_no_packet_in);
58COVERAGE_DEFINE(ofproto_dpif_xlate);
59COVERAGE_DEFINE(facet_changed_rule);
60COVERAGE_DEFINE(facet_invalidated);
61COVERAGE_DEFINE(facet_revalidate);
62COVERAGE_DEFINE(facet_unexpected);
63
29901626 64/* Maximum depth of flow table recursion (due to resubmit actions) in a
abe529af
BP
65 * flow translation. */
66#define MAX_RESUBMIT_RECURSION 16
67
68struct ofport_dpif;
69struct ofproto_dpif;
70
71struct rule_dpif {
72 struct rule up;
73
74 long long int used; /* Time last used; time created if not used. */
75
76 /* These statistics:
77 *
78 * - Do include packets and bytes from facets that have been deleted or
79 * whose own statistics have been folded into the rule.
80 *
81 * - Do include packets and bytes sent "by hand" that were accounted to
82 * the rule without any facet being involved (this is a rare corner
83 * case in rule_execute()).
84 *
85 * - Do not include packet or bytes that can be obtained from any facet's
86 * packet_count or byte_count member or that can be obtained from the
87 * datapath by, e.g., dpif_flow_get() for any facet.
88 */
89 uint64_t packet_count; /* Number of packets received. */
90 uint64_t byte_count; /* Number of bytes received. */
91
92 struct list facets; /* List of "struct facet"s. */
93};
94
95static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
96{
97 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
98}
99
29901626
BP
100static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
101 const struct flow *, uint8_t table);
abe529af
BP
102
103#define MAX_MIRRORS 32
104typedef uint32_t mirror_mask_t;
105#define MIRROR_MASK_C(X) UINT32_C(X)
106BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
107struct ofmirror {
108 struct ofproto_dpif *ofproto; /* Owning ofproto. */
109 size_t idx; /* In ofproto's "mirrors" array. */
110 void *aux; /* Key supplied by ofproto's client. */
111 char *name; /* Identifier for log messages. */
112
113 /* Selection criteria. */
114 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
115 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
116 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
117
118 /* Output (mutually exclusive). */
119 struct ofbundle *out; /* Output port or NULL. */
120 int out_vlan; /* Output VLAN or -1. */
121};
122
123static void mirror_destroy(struct ofmirror *);
124
125/* A group of one or more OpenFlow ports. */
126#define OFBUNDLE_FLOOD ((struct ofbundle *) 1)
127struct ofbundle {
128 struct ofproto_dpif *ofproto; /* Owning ofproto. */
129 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
130 void *aux; /* Key supplied by ofproto's client. */
131 char *name; /* Identifier for log messages. */
132
133 /* Configuration. */
134 struct list ports; /* Contains "struct ofport"s. */
135 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
136 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
137 * NULL if all VLANs are trunked. */
138 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
139 struct bond *bond; /* Nonnull iff more than one port. */
140
141 /* Status. */
142 bool floodable; /* True if no port has OFPPC_NO_FLOOD set. */
143
144 /* Port mirroring info. */
145 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
146 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
147 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
148};
149
150static void bundle_remove(struct ofport *);
151static void bundle_destroy(struct ofbundle *);
152static void bundle_del_port(struct ofport_dpif *);
153static void bundle_run(struct ofbundle *);
154static void bundle_wait(struct ofbundle *);
155
156struct action_xlate_ctx {
157/* action_xlate_ctx_init() initializes these members. */
158
159 /* The ofproto. */
160 struct ofproto_dpif *ofproto;
161
162 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
163 * this flow when actions change header fields. */
164 struct flow flow;
165
166 /* The packet corresponding to 'flow', or a null pointer if we are
167 * revalidating without a packet to refer to. */
168 const struct ofpbuf *packet;
169
75a75043
BP
170 /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
171 * want to execute them if we are actually processing a packet, or if we
172 * are accounting for packets that the datapath has processed, but not if
173 * we are just revalidating. */
174 bool may_learn;
175
abe529af
BP
176 /* If nonnull, called just before executing a resubmit action.
177 *
178 * This is normally null so the client has to set it manually after
179 * calling action_xlate_ctx_init(). */
180 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *);
181
abe529af
BP
182/* xlate_actions() initializes and uses these members. The client might want
183 * to look at them after it returns. */
184
185 struct ofpbuf *odp_actions; /* Datapath actions. */
75a75043 186 tag_type tags; /* Tags associated with actions. */
abe529af
BP
187 bool may_set_up_flow; /* True ordinarily; false if the actions must
188 * be reassessed for every packet. */
75a75043
BP
189 bool has_learn; /* Actions include NXAST_LEARN? */
190 bool has_normal; /* Actions output to OFPP_NORMAL? */
abe529af
BP
191 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
192
193/* xlate_actions() initializes and uses these members, but the client has no
194 * reason to look at them. */
195
196 int recurse; /* Recursion level, via xlate_table_action. */
afabef2b 197 uint32_t priority; /* Current flow priority. 0 if none. */
b3e9b2ed
EJ
198 struct flow base_flow; /* Flow at the last commit. */
199 uint32_t base_priority; /* Priority at the last commit. */
29901626 200 uint8_t table_id; /* OpenFlow table ID where flow was found. */
abe529af
BP
201};
202
203static void action_xlate_ctx_init(struct action_xlate_ctx *,
204 struct ofproto_dpif *, const struct flow *,
205 const struct ofpbuf *);
206static struct ofpbuf *xlate_actions(struct action_xlate_ctx *,
207 const union ofp_action *in, size_t n_in);
208
209/* An exact-match instantiation of an OpenFlow flow. */
210struct facet {
211 long long int used; /* Time last used; time created if not used. */
212
213 /* These statistics:
214 *
215 * - Do include packets and bytes sent "by hand", e.g. with
216 * dpif_execute().
217 *
218 * - Do include packets and bytes that were obtained from the datapath
907a4c5e 219 * when its statistics were reset (e.g. dpif_flow_put() with
abe529af 220 * DPIF_FP_ZERO_STATS).
abe529af
BP
221 */
222 uint64_t packet_count; /* Number of packets received. */
223 uint64_t byte_count; /* Number of bytes received. */
224
225 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
226 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
227
228 uint64_t rs_packet_count; /* Packets pushed to resubmit children. */
229 uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */
230 long long int rs_used; /* Used time pushed to resubmit children. */
231
907a4c5e 232 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
abe529af
BP
233
234 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
235 struct list list_node; /* In owning rule's 'facets' list. */
236 struct rule_dpif *rule; /* Owning rule. */
237 struct flow flow; /* Exact-match flow. */
238 bool installed; /* Installed in datapath? */
239 bool may_install; /* True ordinarily; false if actions must
240 * be reassessed for every packet. */
75a75043
BP
241 bool has_learn; /* Actions include NXAST_LEARN? */
242 bool has_normal; /* Actions output to OFPP_NORMAL? */
abe529af
BP
243 size_t actions_len; /* Number of bytes in actions[]. */
244 struct nlattr *actions; /* Datapath actions. */
245 tag_type tags; /* Tags. */
246 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
247};
248
249static struct facet *facet_create(struct rule_dpif *, const struct flow *,
250 const struct ofpbuf *packet);
251static void facet_remove(struct ofproto_dpif *, struct facet *);
252static void facet_free(struct facet *);
253
254static struct facet *facet_find(struct ofproto_dpif *, const struct flow *);
255static struct facet *facet_lookup_valid(struct ofproto_dpif *,
256 const struct flow *);
257static bool facet_revalidate(struct ofproto_dpif *, struct facet *);
258
259static void facet_execute(struct ofproto_dpif *, struct facet *,
260 struct ofpbuf *packet);
261
262static int facet_put__(struct ofproto_dpif *, struct facet *,
263 const struct nlattr *actions, size_t actions_len,
264 struct dpif_flow_stats *);
265static void facet_install(struct ofproto_dpif *, struct facet *,
266 bool zero_stats);
267static void facet_uninstall(struct ofproto_dpif *, struct facet *);
268static void facet_flush_stats(struct ofproto_dpif *, struct facet *);
269
270static void facet_make_actions(struct ofproto_dpif *, struct facet *,
271 const struct ofpbuf *packet);
272static void facet_update_time(struct ofproto_dpif *, struct facet *,
273 long long int used);
274static void facet_update_stats(struct ofproto_dpif *, struct facet *,
275 const struct dpif_flow_stats *);
bbb5d219 276static void facet_reset_counters(struct facet *);
3a88e544 277static void facet_reset_dp_stats(struct facet *, struct dpif_flow_stats *);
abe529af 278static void facet_push_stats(struct facet *);
55af77bb 279static void facet_account(struct ofproto_dpif *, struct facet *);
abe529af
BP
280
281static bool facet_is_controller_flow(struct facet *);
282
283static void flow_push_stats(const struct rule_dpif *,
284 struct flow *, uint64_t packets, uint64_t bytes,
285 long long int used);
286
287struct ofport_dpif {
288 struct ofport up;
289
290 uint32_t odp_port;
291 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
292 struct list bundle_node; /* In struct ofbundle's "ports" list. */
293 struct cfm *cfm; /* Connectivity Fault Management, if any. */
294 tag_type tag; /* Tag associated with this port. */
00794817 295 uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
015e08bc 296 bool may_enable; /* May be enabled in bonds. */
abe529af
BP
297};
298
299static struct ofport_dpif *
300ofport_dpif_cast(const struct ofport *ofport)
301{
302 assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
303 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
304}
305
306static void port_run(struct ofport_dpif *);
307static void port_wait(struct ofport_dpif *);
a5610457 308static int set_cfm(struct ofport *, const struct cfm_settings *);
abe529af 309
7ee20df1
BP
310struct dpif_completion {
311 struct list list_node;
312 struct ofoperation *op;
313};
314
abe529af
BP
315struct ofproto_dpif {
316 struct ofproto up;
317 struct dpif *dpif;
318 int max_ports;
319
6c1491fb
BP
320 /* Statistics. */
321 uint64_t n_matches;
322
abe529af
BP
323 /* Bridging. */
324 struct netflow *netflow;
bae473fe 325 struct dpif_sflow *sflow;
abe529af
BP
326 struct hmap bundles; /* Contains "struct ofbundle"s. */
327 struct mac_learning *ml;
328 struct ofmirror *mirrors[MAX_MIRRORS];
329 bool has_bonded_bundles;
330
331 /* Expiration. */
332 struct timer next_expiration;
333
334 /* Facets. */
335 struct hmap facets;
336 bool need_revalidate;
337 struct tag_set revalidate_set;
7ee20df1
BP
338
339 /* Support for debugging async flow mods. */
340 struct list completions;
daff3353
EJ
341
342 bool has_bundle_action; /* True when the first bundle action appears. */
abe529af
BP
343};
344
7ee20df1
BP
345/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
346 * for debugging the asynchronous flow_mod implementation.) */
347static bool clogged;
348
abe529af
BP
349static void ofproto_dpif_unixctl_init(void);
350
351static struct ofproto_dpif *
352ofproto_dpif_cast(const struct ofproto *ofproto)
353{
354 assert(ofproto->ofproto_class == &ofproto_dpif_class);
355 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
356}
357
358static struct ofport_dpif *get_ofp_port(struct ofproto_dpif *,
359 uint16_t ofp_port);
360static struct ofport_dpif *get_odp_port(struct ofproto_dpif *,
361 uint32_t odp_port);
362
363/* Packet processing. */
364static void update_learning_table(struct ofproto_dpif *,
365 const struct flow *, int vlan,
366 struct ofbundle *);
367static bool is_admissible(struct ofproto_dpif *, const struct flow *,
368 bool have_packet, tag_type *, int *vlanp,
369 struct ofbundle **in_bundlep);
370static void handle_upcall(struct ofproto_dpif *, struct dpif_upcall *);
371
372/* Flow expiration. */
373static int expire(struct ofproto_dpif *);
374
375/* Utilities. */
b2fda3ef 376static int send_packet(struct ofproto_dpif *, uint32_t odp_port,
abe529af
BP
377 const struct ofpbuf *packet);
378
379/* Global variables. */
380static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
381\f
382/* Factory functions. */
383
384static void
385enumerate_types(struct sset *types)
386{
387 dp_enumerate_types(types);
388}
389
390static int
391enumerate_names(const char *type, struct sset *names)
392{
393 return dp_enumerate_names(type, names);
394}
395
396static int
397del(const char *type, const char *name)
398{
399 struct dpif *dpif;
400 int error;
401
402 error = dpif_open(name, type, &dpif);
403 if (!error) {
404 error = dpif_delete(dpif);
405 dpif_close(dpif);
406 }
407 return error;
408}
409\f
410/* Basic life-cycle. */
411
412static struct ofproto *
413alloc(void)
414{
415 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
416 return &ofproto->up;
417}
418
419static void
420dealloc(struct ofproto *ofproto_)
421{
422 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
423 free(ofproto);
424}
425
426static int
073e2a6f 427construct(struct ofproto *ofproto_, int *n_tablesp)
abe529af
BP
428{
429 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
430 const char *name = ofproto->up.name;
431 int error;
432 int i;
433
434 error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
435 if (error) {
436 VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
437 return error;
438 }
439
440 ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
6c1491fb 441 ofproto->n_matches = 0;
abe529af
BP
442
443 error = dpif_recv_set_mask(ofproto->dpif,
444 ((1u << DPIF_UC_MISS) |
445 (1u << DPIF_UC_ACTION) |
446 (1u << DPIF_UC_SAMPLE)));
447 if (error) {
448 VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
449 dpif_close(ofproto->dpif);
450 return error;
451 }
452 dpif_flow_flush(ofproto->dpif);
453 dpif_recv_purge(ofproto->dpif);
454
455 ofproto->netflow = NULL;
456 ofproto->sflow = NULL;
457 hmap_init(&ofproto->bundles);
458 ofproto->ml = mac_learning_create();
459 for (i = 0; i < MAX_MIRRORS; i++) {
460 ofproto->mirrors[i] = NULL;
461 }
462 ofproto->has_bonded_bundles = false;
463
464 timer_set_duration(&ofproto->next_expiration, 1000);
465
466 hmap_init(&ofproto->facets);
467 ofproto->need_revalidate = false;
468 tag_set_init(&ofproto->revalidate_set);
469
7ee20df1
BP
470 list_init(&ofproto->completions);
471
abe529af
BP
472 ofproto_dpif_unixctl_init();
473
daff3353
EJ
474 ofproto->has_bundle_action = false;
475
0697b5c3 476 *n_tablesp = 255;
abe529af
BP
477 return 0;
478}
479
7ee20df1
BP
480static void
481complete_operations(struct ofproto_dpif *ofproto)
482{
483 struct dpif_completion *c, *next;
484
485 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
486 ofoperation_complete(c->op, 0);
487 list_remove(&c->list_node);
488 free(c);
489 }
490}
491
abe529af
BP
492static void
493destruct(struct ofproto *ofproto_)
494{
495 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7ee20df1 496 struct rule_dpif *rule, *next_rule;
0697b5c3 497 struct classifier *table;
abe529af
BP
498 int i;
499
7ee20df1
BP
500 complete_operations(ofproto);
501
0697b5c3
BP
502 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
503 struct cls_cursor cursor;
504
505 cls_cursor_init(&cursor, table, NULL);
506 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
507 ofproto_rule_destroy(&rule->up);
508 }
7ee20df1
BP
509 }
510
abe529af
BP
511 for (i = 0; i < MAX_MIRRORS; i++) {
512 mirror_destroy(ofproto->mirrors[i]);
513 }
514
515 netflow_destroy(ofproto->netflow);
bae473fe 516 dpif_sflow_destroy(ofproto->sflow);
abe529af
BP
517 hmap_destroy(&ofproto->bundles);
518 mac_learning_destroy(ofproto->ml);
519
520 hmap_destroy(&ofproto->facets);
521
522 dpif_close(ofproto->dpif);
523}
524
525static int
526run(struct ofproto *ofproto_)
527{
528 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
529 struct ofport_dpif *ofport;
530 struct ofbundle *bundle;
531 int i;
532
7ee20df1
BP
533 if (!clogged) {
534 complete_operations(ofproto);
535 }
abe529af
BP
536 dpif_run(ofproto->dpif);
537
538 for (i = 0; i < 50; i++) {
539 struct dpif_upcall packet;
540 int error;
541
542 error = dpif_recv(ofproto->dpif, &packet);
543 if (error) {
544 if (error == ENODEV) {
545 /* Datapath destroyed. */
546 return error;
547 }
548 break;
549 }
550
551 handle_upcall(ofproto, &packet);
552 }
553
554 if (timer_expired(&ofproto->next_expiration)) {
555 int delay = expire(ofproto);
556 timer_set_duration(&ofproto->next_expiration, delay);
557 }
558
559 if (ofproto->netflow) {
560 netflow_run(ofproto->netflow);
561 }
562 if (ofproto->sflow) {
bae473fe 563 dpif_sflow_run(ofproto->sflow);
abe529af
BP
564 }
565
566 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
567 port_run(ofport);
568 }
569 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
570 bundle_run(bundle);
571 }
572
1c313b88
BP
573 mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
574
abe529af
BP
575 /* Now revalidate if there's anything to do. */
576 if (ofproto->need_revalidate
577 || !tag_set_is_empty(&ofproto->revalidate_set)) {
578 struct tag_set revalidate_set = ofproto->revalidate_set;
579 bool revalidate_all = ofproto->need_revalidate;
580 struct facet *facet, *next;
581
582 /* Clear the revalidation flags. */
583 tag_set_init(&ofproto->revalidate_set);
584 ofproto->need_revalidate = false;
585
586 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
587 if (revalidate_all
588 || tag_set_intersects(&revalidate_set, facet->tags)) {
589 facet_revalidate(ofproto, facet);
590 }
591 }
592 }
593
594 return 0;
595}
596
597static void
598wait(struct ofproto *ofproto_)
599{
600 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
601 struct ofport_dpif *ofport;
602 struct ofbundle *bundle;
603
7ee20df1
BP
604 if (!clogged && !list_is_empty(&ofproto->completions)) {
605 poll_immediate_wake();
606 }
607
abe529af
BP
608 dpif_wait(ofproto->dpif);
609 dpif_recv_wait(ofproto->dpif);
610 if (ofproto->sflow) {
bae473fe 611 dpif_sflow_wait(ofproto->sflow);
abe529af
BP
612 }
613 if (!tag_set_is_empty(&ofproto->revalidate_set)) {
614 poll_immediate_wake();
615 }
616 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
617 port_wait(ofport);
618 }
619 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
620 bundle_wait(bundle);
621 }
1c313b88 622 mac_learning_wait(ofproto->ml);
abe529af
BP
623 if (ofproto->need_revalidate) {
624 /* Shouldn't happen, but if it does just go around again. */
625 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
626 poll_immediate_wake();
627 } else {
628 timer_wait(&ofproto->next_expiration);
629 }
630}
631
632static void
633flush(struct ofproto *ofproto_)
634{
635 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
636 struct facet *facet, *next_facet;
637
638 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
639 /* Mark the facet as not installed so that facet_remove() doesn't
640 * bother trying to uninstall it. There is no point in uninstalling it
641 * individually since we are about to blow away all the facets with
642 * dpif_flow_flush(). */
643 facet->installed = false;
644 facet->dp_packet_count = 0;
645 facet->dp_byte_count = 0;
646 facet_remove(ofproto, facet);
647 }
648 dpif_flow_flush(ofproto->dpif);
649}
650
6c1491fb
BP
651static void
652get_features(struct ofproto *ofproto_ OVS_UNUSED,
653 bool *arp_match_ip, uint32_t *actions)
654{
655 *arp_match_ip = true;
656 *actions = ((1u << OFPAT_OUTPUT) |
657 (1u << OFPAT_SET_VLAN_VID) |
658 (1u << OFPAT_SET_VLAN_PCP) |
659 (1u << OFPAT_STRIP_VLAN) |
660 (1u << OFPAT_SET_DL_SRC) |
661 (1u << OFPAT_SET_DL_DST) |
662 (1u << OFPAT_SET_NW_SRC) |
663 (1u << OFPAT_SET_NW_DST) |
664 (1u << OFPAT_SET_NW_TOS) |
665 (1u << OFPAT_SET_TP_SRC) |
666 (1u << OFPAT_SET_TP_DST) |
667 (1u << OFPAT_ENQUEUE));
668}
669
670static void
671get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
672{
673 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
df2c07f4 674 struct ovs_dp_stats s;
6c1491fb
BP
675
676 strcpy(ots->name, "classifier");
677
678 dpif_get_dp_stats(ofproto->dpif, &s);
679 put_32aligned_be64(&ots->lookup_count, htonll(s.n_hit + s.n_missed));
680 put_32aligned_be64(&ots->matched_count,
681 htonll(s.n_hit + ofproto->n_matches));
682}
683
abe529af
BP
684static int
685set_netflow(struct ofproto *ofproto_,
686 const struct netflow_options *netflow_options)
687{
688 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
689
690 if (netflow_options) {
691 if (!ofproto->netflow) {
692 ofproto->netflow = netflow_create();
693 }
694 return netflow_set_options(ofproto->netflow, netflow_options);
695 } else {
696 netflow_destroy(ofproto->netflow);
697 ofproto->netflow = NULL;
698 return 0;
699 }
700}
701
702static struct ofport *
703port_alloc(void)
704{
705 struct ofport_dpif *port = xmalloc(sizeof *port);
706 return &port->up;
707}
708
709static void
710port_dealloc(struct ofport *port_)
711{
712 struct ofport_dpif *port = ofport_dpif_cast(port_);
713 free(port);
714}
715
716static int
717port_construct(struct ofport *port_)
718{
719 struct ofport_dpif *port = ofport_dpif_cast(port_);
720 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
721
722 port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
723 port->bundle = NULL;
724 port->cfm = NULL;
725 port->tag = tag_create_random();
d5ffa7f2 726 port->may_enable = true;
abe529af
BP
727
728 if (ofproto->sflow) {
bae473fe
JP
729 dpif_sflow_add_port(ofproto->sflow, port->odp_port,
730 netdev_get_name(port->up.netdev));
abe529af
BP
731 }
732
733 return 0;
734}
735
736static void
737port_destruct(struct ofport *port_)
738{
739 struct ofport_dpif *port = ofport_dpif_cast(port_);
740 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
741
742 bundle_remove(port_);
a5610457 743 set_cfm(port_, NULL);
abe529af 744 if (ofproto->sflow) {
bae473fe 745 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
abe529af
BP
746 }
747}
748
749static void
750port_modified(struct ofport *port_)
751{
752 struct ofport_dpif *port = ofport_dpif_cast(port_);
753
754 if (port->bundle && port->bundle->bond) {
755 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
756 }
757}
758
759static void
760port_reconfigured(struct ofport *port_, ovs_be32 old_config)
761{
762 struct ofport_dpif *port = ofport_dpif_cast(port_);
763 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
764 ovs_be32 changed = old_config ^ port->up.opp.config;
765
766 if (changed & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP |
767 OFPPC_NO_FWD | OFPPC_NO_FLOOD)) {
768 ofproto->need_revalidate = true;
769 }
770}
771
772static int
773set_sflow(struct ofproto *ofproto_,
774 const struct ofproto_sflow_options *sflow_options)
775{
776 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bae473fe 777 struct dpif_sflow *ds = ofproto->sflow;
abe529af 778 if (sflow_options) {
bae473fe 779 if (!ds) {
abe529af
BP
780 struct ofport_dpif *ofport;
781
bae473fe 782 ds = ofproto->sflow = dpif_sflow_create(ofproto->dpif);
abe529af 783 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
bae473fe
JP
784 dpif_sflow_add_port(ds, ofport->odp_port,
785 netdev_get_name(ofport->up.netdev));
abe529af
BP
786 }
787 }
bae473fe 788 dpif_sflow_set_options(ds, sflow_options);
abe529af 789 } else {
bae473fe 790 dpif_sflow_destroy(ds);
abe529af
BP
791 ofproto->sflow = NULL;
792 }
793 return 0;
794}
795
796static int
a5610457 797set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
abe529af
BP
798{
799 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
800 int error;
801
a5610457 802 if (!s) {
abe529af
BP
803 error = 0;
804 } else {
805 if (!ofport->cfm) {
6f629657 806 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
abe529af
BP
807 }
808
a5610457 809 if (cfm_configure(ofport->cfm, s)) {
abe529af
BP
810 return 0;
811 }
812
813 error = EINVAL;
814 }
815 cfm_destroy(ofport->cfm);
816 ofport->cfm = NULL;
817 return error;
818}
819
820static int
a5610457 821get_cfm_fault(const struct ofport *ofport_)
abe529af
BP
822{
823 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
a5610457
EJ
824
825 return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
abe529af 826}
1de11730
EJ
827
828static int
829get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
830 size_t *n_rmps)
831{
832 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
833
834 if (ofport->cfm) {
835 cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
836 return 0;
837 } else {
838 return -1;
839 }
840}
abe529af
BP
841\f
842/* Bundles. */
843
844/* Expires all MAC learning entries associated with 'port' and forces ofproto
845 * to revalidate every flow. */
846static void
847bundle_flush_macs(struct ofbundle *bundle)
848{
849 struct ofproto_dpif *ofproto = bundle->ofproto;
850 struct mac_learning *ml = ofproto->ml;
851 struct mac_entry *mac, *next_mac;
852
853 ofproto->need_revalidate = true;
854 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
855 if (mac->port.p == bundle) {
856 mac_learning_expire(ml, mac);
857 }
858 }
859}
860
861static struct ofbundle *
862bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
863{
864 struct ofbundle *bundle;
865
866 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
867 &ofproto->bundles) {
868 if (bundle->aux == aux) {
869 return bundle;
870 }
871 }
872 return NULL;
873}
874
875/* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
876 * ones that are found to 'bundles'. */
877static void
878bundle_lookup_multiple(struct ofproto_dpif *ofproto,
879 void **auxes, size_t n_auxes,
880 struct hmapx *bundles)
881{
882 size_t i;
883
884 hmapx_init(bundles);
885 for (i = 0; i < n_auxes; i++) {
886 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
887 if (bundle) {
888 hmapx_add(bundles, bundle);
889 }
890 }
891}
892
893static void
894bundle_del_port(struct ofport_dpif *port)
895{
896 struct ofbundle *bundle = port->bundle;
897
6f77f4ae
BP
898 bundle->ofproto->need_revalidate = true;
899
abe529af
BP
900 list_remove(&port->bundle_node);
901 port->bundle = NULL;
902
903 if (bundle->lacp) {
904 lacp_slave_unregister(bundle->lacp, port);
905 }
906 if (bundle->bond) {
907 bond_slave_unregister(bundle->bond, port);
908 }
909
910 bundle->floodable = true;
911 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
912 if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
913 bundle->floodable = false;
914 }
915 }
916}
917
918static bool
919bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
00794817
BP
920 struct lacp_slave_settings *lacp,
921 uint32_t bond_stable_id)
abe529af
BP
922{
923 struct ofport_dpif *port;
924
925 port = get_ofp_port(bundle->ofproto, ofp_port);
926 if (!port) {
927 return false;
928 }
929
930 if (port->bundle != bundle) {
6f77f4ae 931 bundle->ofproto->need_revalidate = true;
abe529af
BP
932 if (port->bundle) {
933 bundle_del_port(port);
934 }
935
936 port->bundle = bundle;
937 list_push_back(&bundle->ports, &port->bundle_node);
938 if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
939 bundle->floodable = false;
940 }
941 }
942 if (lacp) {
943 lacp_slave_register(bundle->lacp, port, lacp);
944 }
945
00794817
BP
946 port->bond_stable_id = bond_stable_id;
947
abe529af
BP
948 return true;
949}
950
951static void
952bundle_destroy(struct ofbundle *bundle)
953{
954 struct ofproto_dpif *ofproto;
955 struct ofport_dpif *port, *next_port;
956 int i;
957
958 if (!bundle) {
959 return;
960 }
961
962 ofproto = bundle->ofproto;
963 for (i = 0; i < MAX_MIRRORS; i++) {
964 struct ofmirror *m = ofproto->mirrors[i];
965 if (m) {
966 if (m->out == bundle) {
967 mirror_destroy(m);
968 } else if (hmapx_find_and_delete(&m->srcs, bundle)
969 || hmapx_find_and_delete(&m->dsts, bundle)) {
970 ofproto->need_revalidate = true;
971 }
972 }
973 }
974
975 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
976 bundle_del_port(port);
977 }
978
979 bundle_flush_macs(bundle);
980 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
981 free(bundle->name);
982 free(bundle->trunks);
983 lacp_destroy(bundle->lacp);
984 bond_destroy(bundle->bond);
985 free(bundle);
986}
987
988static int
989bundle_set(struct ofproto *ofproto_, void *aux,
990 const struct ofproto_bundle_settings *s)
991{
992 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
993 bool need_flush = false;
994 const unsigned long *trunks;
995 struct ofport_dpif *port;
996 struct ofbundle *bundle;
997 size_t i;
998 bool ok;
999
1000 if (!s) {
1001 bundle_destroy(bundle_lookup(ofproto, aux));
1002 return 0;
1003 }
1004
1005 assert(s->n_slaves == 1 || s->bond != NULL);
1006 assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
1007
1008 bundle = bundle_lookup(ofproto, aux);
1009 if (!bundle) {
1010 bundle = xmalloc(sizeof *bundle);
1011
1012 bundle->ofproto = ofproto;
1013 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
1014 hash_pointer(aux, 0));
1015 bundle->aux = aux;
1016 bundle->name = NULL;
1017
1018 list_init(&bundle->ports);
1019 bundle->vlan = -1;
1020 bundle->trunks = NULL;
1021 bundle->lacp = NULL;
1022 bundle->bond = NULL;
1023
1024 bundle->floodable = true;
1025
1026 bundle->src_mirrors = 0;
1027 bundle->dst_mirrors = 0;
1028 bundle->mirror_out = 0;
1029 }
1030
1031 if (!bundle->name || strcmp(s->name, bundle->name)) {
1032 free(bundle->name);
1033 bundle->name = xstrdup(s->name);
1034 }
1035
1036 /* LACP. */
1037 if (s->lacp) {
1038 if (!bundle->lacp) {
1039 bundle->lacp = lacp_create();
1040 }
1041 lacp_configure(bundle->lacp, s->lacp);
1042 } else {
1043 lacp_destroy(bundle->lacp);
1044 bundle->lacp = NULL;
1045 }
1046
1047 /* Update set of ports. */
1048 ok = true;
1049 for (i = 0; i < s->n_slaves; i++) {
1050 if (!bundle_add_port(bundle, s->slaves[i],
00794817
BP
1051 s->lacp ? &s->lacp_slaves[i] : NULL,
1052 s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
abe529af
BP
1053 ok = false;
1054 }
1055 }
1056 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
1057 struct ofport_dpif *next_port;
1058
1059 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
1060 for (i = 0; i < s->n_slaves; i++) {
56c769ab 1061 if (s->slaves[i] == port->up.ofp_port) {
abe529af
BP
1062 goto found;
1063 }
1064 }
1065
1066 bundle_del_port(port);
1067 found: ;
1068 }
1069 }
1070 assert(list_size(&bundle->ports) <= s->n_slaves);
1071
1072 if (list_is_empty(&bundle->ports)) {
1073 bundle_destroy(bundle);
1074 return EINVAL;
1075 }
1076
1077 /* Set VLAN tag. */
1078 if (s->vlan != bundle->vlan) {
1079 bundle->vlan = s->vlan;
1080 need_flush = true;
1081 }
1082
1083 /* Get trunked VLANs. */
1084 trunks = s->vlan == -1 ? NULL : s->trunks;
1085 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
1086 free(bundle->trunks);
1087 bundle->trunks = vlan_bitmap_clone(trunks);
1088 need_flush = true;
1089 }
1090
1091 /* Bonding. */
1092 if (!list_is_short(&bundle->ports)) {
1093 bundle->ofproto->has_bonded_bundles = true;
1094 if (bundle->bond) {
1095 if (bond_reconfigure(bundle->bond, s->bond)) {
1096 ofproto->need_revalidate = true;
1097 }
1098 } else {
1099 bundle->bond = bond_create(s->bond);
6f77f4ae 1100 ofproto->need_revalidate = true;
abe529af
BP
1101 }
1102
1103 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
00794817 1104 bond_slave_register(bundle->bond, port, port->bond_stable_id,
abe529af
BP
1105 port->up.netdev);
1106 }
1107 } else {
1108 bond_destroy(bundle->bond);
1109 bundle->bond = NULL;
1110 }
1111
1112 /* If we changed something that would affect MAC learning, un-learn
1113 * everything on this port and force flow revalidation. */
1114 if (need_flush) {
1115 bundle_flush_macs(bundle);
1116 }
1117
1118 return 0;
1119}
1120
1121static void
1122bundle_remove(struct ofport *port_)
1123{
1124 struct ofport_dpif *port = ofport_dpif_cast(port_);
1125 struct ofbundle *bundle = port->bundle;
1126
1127 if (bundle) {
1128 bundle_del_port(port);
1129 if (list_is_empty(&bundle->ports)) {
1130 bundle_destroy(bundle);
1131 } else if (list_is_short(&bundle->ports)) {
1132 bond_destroy(bundle->bond);
1133 bundle->bond = NULL;
1134 }
1135 }
1136}
1137
1138static void
5f877369 1139send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
abe529af
BP
1140{
1141 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
1142 struct ofport_dpif *port = port_;
1143 uint8_t ea[ETH_ADDR_LEN];
1144 int error;
1145
1146 error = netdev_get_etheraddr(port->up.netdev, ea);
1147 if (!error) {
abe529af 1148 struct ofpbuf packet;
5f877369 1149 void *packet_pdu;
abe529af
BP
1150
1151 ofpbuf_init(&packet, 0);
1152 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
5f877369
EJ
1153 pdu_size);
1154 memcpy(packet_pdu, pdu, pdu_size);
1155
abe529af
BP
1156 error = netdev_send(port->up.netdev, &packet);
1157 if (error) {
1158 VLOG_WARN_RL(&rl, "port %s: sending LACP PDU on iface %s failed "
1159 "(%s)", port->bundle->name,
1160 netdev_get_name(port->up.netdev), strerror(error));
1161 }
1162 ofpbuf_uninit(&packet);
1163 } else {
1164 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
1165 "%s (%s)", port->bundle->name,
1166 netdev_get_name(port->up.netdev), strerror(error));
1167 }
1168}
1169
1170static void
1171bundle_send_learning_packets(struct ofbundle *bundle)
1172{
1173 struct ofproto_dpif *ofproto = bundle->ofproto;
1174 int error, n_packets, n_errors;
1175 struct mac_entry *e;
1176
1177 error = n_packets = n_errors = 0;
1178 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
1179 if (e->port.p != bundle) {
1180 int ret = bond_send_learning_packet(bundle->bond, e->mac, e->vlan);
1181 if (ret) {
1182 error = ret;
1183 n_errors++;
1184 }
1185 n_packets++;
1186 }
1187 }
1188
1189 if (n_errors) {
1190 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1191 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
1192 "packets, last error was: %s",
1193 bundle->name, n_errors, n_packets, strerror(error));
1194 } else {
1195 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1196 bundle->name, n_packets);
1197 }
1198}
1199
1200static void
1201bundle_run(struct ofbundle *bundle)
1202{
1203 if (bundle->lacp) {
1204 lacp_run(bundle->lacp, send_pdu_cb);
1205 }
1206 if (bundle->bond) {
1207 struct ofport_dpif *port;
1208
1209 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
015e08bc 1210 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
abe529af
BP
1211 }
1212
1213 bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
1214 lacp_negotiated(bundle->lacp));
1215 if (bond_should_send_learning_packets(bundle->bond)) {
1216 bundle_send_learning_packets(bundle);
1217 }
1218 }
1219}
1220
1221static void
1222bundle_wait(struct ofbundle *bundle)
1223{
1224 if (bundle->lacp) {
1225 lacp_wait(bundle->lacp);
1226 }
1227 if (bundle->bond) {
1228 bond_wait(bundle->bond);
1229 }
1230}
1231\f
1232/* Mirrors. */
1233
1234static int
1235mirror_scan(struct ofproto_dpif *ofproto)
1236{
1237 int idx;
1238
1239 for (idx = 0; idx < MAX_MIRRORS; idx++) {
1240 if (!ofproto->mirrors[idx]) {
1241 return idx;
1242 }
1243 }
1244 return -1;
1245}
1246
1247static struct ofmirror *
1248mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
1249{
1250 int i;
1251
1252 for (i = 0; i < MAX_MIRRORS; i++) {
1253 struct ofmirror *mirror = ofproto->mirrors[i];
1254 if (mirror && mirror->aux == aux) {
1255 return mirror;
1256 }
1257 }
1258
1259 return NULL;
1260}
1261
1262static int
1263mirror_set(struct ofproto *ofproto_, void *aux,
1264 const struct ofproto_mirror_settings *s)
1265{
1266 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1267 mirror_mask_t mirror_bit;
1268 struct ofbundle *bundle;
1269 struct ofmirror *mirror;
1270 struct ofbundle *out;
1271 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
1272 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
1273 int out_vlan;
1274
1275 mirror = mirror_lookup(ofproto, aux);
1276 if (!s) {
1277 mirror_destroy(mirror);
1278 return 0;
1279 }
1280 if (!mirror) {
1281 int idx;
1282
1283 idx = mirror_scan(ofproto);
1284 if (idx < 0) {
1285 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
1286 "cannot create %s",
1287 ofproto->up.name, MAX_MIRRORS, s->name);
1288 return EFBIG;
1289 }
1290
1291 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
1292 mirror->ofproto = ofproto;
1293 mirror->idx = idx;
8b28d864 1294 mirror->aux = aux;
abe529af
BP
1295 mirror->out_vlan = -1;
1296 mirror->name = NULL;
1297 }
1298
1299 if (!mirror->name || strcmp(s->name, mirror->name)) {
1300 free(mirror->name);
1301 mirror->name = xstrdup(s->name);
1302 }
1303
1304 /* Get the new configuration. */
1305 if (s->out_bundle) {
1306 out = bundle_lookup(ofproto, s->out_bundle);
1307 if (!out) {
1308 mirror_destroy(mirror);
1309 return EINVAL;
1310 }
1311 out_vlan = -1;
1312 } else {
1313 out = NULL;
1314 out_vlan = s->out_vlan;
1315 }
1316 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
1317 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
1318
1319 /* If the configuration has not changed, do nothing. */
1320 if (hmapx_equals(&srcs, &mirror->srcs)
1321 && hmapx_equals(&dsts, &mirror->dsts)
1322 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
1323 && mirror->out == out
1324 && mirror->out_vlan == out_vlan)
1325 {
1326 hmapx_destroy(&srcs);
1327 hmapx_destroy(&dsts);
1328 return 0;
1329 }
1330
1331 hmapx_swap(&srcs, &mirror->srcs);
1332 hmapx_destroy(&srcs);
1333
1334 hmapx_swap(&dsts, &mirror->dsts);
1335 hmapx_destroy(&dsts);
1336
1337 free(mirror->vlans);
1338 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
1339
1340 mirror->out = out;
1341 mirror->out_vlan = out_vlan;
1342
1343 /* Update bundles. */
1344 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
1345 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
1346 if (hmapx_contains(&mirror->srcs, bundle)) {
1347 bundle->src_mirrors |= mirror_bit;
1348 } else {
1349 bundle->src_mirrors &= ~mirror_bit;
1350 }
1351
1352 if (hmapx_contains(&mirror->dsts, bundle)) {
1353 bundle->dst_mirrors |= mirror_bit;
1354 } else {
1355 bundle->dst_mirrors &= ~mirror_bit;
1356 }
1357
1358 if (mirror->out == bundle) {
1359 bundle->mirror_out |= mirror_bit;
1360 } else {
1361 bundle->mirror_out &= ~mirror_bit;
1362 }
1363 }
1364
1365 ofproto->need_revalidate = true;
1366 mac_learning_flush(ofproto->ml);
1367
1368 return 0;
1369}
1370
1371static void
1372mirror_destroy(struct ofmirror *mirror)
1373{
1374 struct ofproto_dpif *ofproto;
1375 mirror_mask_t mirror_bit;
1376 struct ofbundle *bundle;
1377
1378 if (!mirror) {
1379 return;
1380 }
1381
1382 ofproto = mirror->ofproto;
1383 ofproto->need_revalidate = true;
1384 mac_learning_flush(ofproto->ml);
1385
1386 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
1387 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1388 bundle->src_mirrors &= ~mirror_bit;
1389 bundle->dst_mirrors &= ~mirror_bit;
1390 bundle->mirror_out &= ~mirror_bit;
1391 }
1392
1393 hmapx_destroy(&mirror->srcs);
1394 hmapx_destroy(&mirror->dsts);
1395 free(mirror->vlans);
1396
1397 ofproto->mirrors[mirror->idx] = NULL;
1398 free(mirror->name);
1399 free(mirror);
1400}
1401
1402static int
1403set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
1404{
1405 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1406 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
1407 ofproto->need_revalidate = true;
1408 mac_learning_flush(ofproto->ml);
1409 }
1410 return 0;
1411}
1412
1413static bool
1414is_mirror_output_bundle(struct ofproto *ofproto_, void *aux)
1415{
1416 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1417 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
1418 return bundle && bundle->mirror_out != 0;
1419}
8402c74b
SS
1420
1421static void
b53055f4 1422forward_bpdu_changed(struct ofproto *ofproto_)
8402c74b
SS
1423{
1424 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1425 /* Revalidate cached flows whenever forward_bpdu option changes. */
1426 ofproto->need_revalidate = true;
1427}
abe529af
BP
1428\f
1429/* Ports. */
1430
1431static struct ofport_dpif *
1432get_ofp_port(struct ofproto_dpif *ofproto, uint16_t ofp_port)
1433{
7df6a8bd
BP
1434 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
1435 return ofport ? ofport_dpif_cast(ofport) : NULL;
abe529af
BP
1436}
1437
1438static struct ofport_dpif *
1439get_odp_port(struct ofproto_dpif *ofproto, uint32_t odp_port)
1440{
1441 return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
1442}
1443
1444static void
1445ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
1446 struct dpif_port *dpif_port)
1447{
1448 ofproto_port->name = dpif_port->name;
1449 ofproto_port->type = dpif_port->type;
1450 ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
1451}
1452
1453static void
1454port_run(struct ofport_dpif *ofport)
1455{
015e08bc
EJ
1456 bool enable = netdev_get_carrier(ofport->up.netdev);
1457
abe529af
BP
1458 if (ofport->cfm) {
1459 cfm_run(ofport->cfm);
1460
1461 if (cfm_should_send_ccm(ofport->cfm)) {
1462 struct ofpbuf packet;
abe529af
BP
1463
1464 ofpbuf_init(&packet, 0);
c0a2e71d 1465 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.opp.hw_addr);
abe529af 1466 send_packet(ofproto_dpif_cast(ofport->up.ofproto),
b2fda3ef 1467 ofport->odp_port, &packet);
abe529af
BP
1468 ofpbuf_uninit(&packet);
1469 }
015e08bc
EJ
1470
1471 enable = enable && !cfm_get_fault(ofport->cfm);
abe529af 1472 }
015e08bc
EJ
1473
1474 if (ofport->bundle) {
1475 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
1476 }
1477
daff3353
EJ
1478 if (ofport->may_enable != enable) {
1479 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1480
1481 if (ofproto->has_bundle_action) {
1482 ofproto->need_revalidate = true;
1483 }
1484 }
1485
015e08bc 1486 ofport->may_enable = enable;
abe529af
BP
1487}
1488
1489static void
1490port_wait(struct ofport_dpif *ofport)
1491{
1492 if (ofport->cfm) {
1493 cfm_wait(ofport->cfm);
1494 }
1495}
1496
1497static int
1498port_query_by_name(const struct ofproto *ofproto_, const char *devname,
1499 struct ofproto_port *ofproto_port)
1500{
1501 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1502 struct dpif_port dpif_port;
1503 int error;
1504
1505 error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
1506 if (!error) {
1507 ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
1508 }
1509 return error;
1510}
1511
1512static int
1513port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
1514{
1515 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1516 uint16_t odp_port;
1517 int error;
1518
1519 error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
1520 if (!error) {
1521 *ofp_portp = odp_port_to_ofp_port(odp_port);
1522 }
1523 return error;
1524}
1525
1526static int
1527port_del(struct ofproto *ofproto_, uint16_t ofp_port)
1528{
1529 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1530 int error;
1531
1532 error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
1533 if (!error) {
1534 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
1535 if (ofport) {
1536 /* The caller is going to close ofport->up.netdev. If this is a
1537 * bonded port, then the bond is using that netdev, so remove it
1538 * from the bond. The client will need to reconfigure everything
1539 * after deleting ports, so then the slave will get re-added. */
1540 bundle_remove(&ofport->up);
1541 }
1542 }
1543 return error;
1544}
1545
1546struct port_dump_state {
1547 struct dpif_port_dump dump;
1548 bool done;
1549};
1550
1551static int
1552port_dump_start(const struct ofproto *ofproto_, void **statep)
1553{
1554 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1555 struct port_dump_state *state;
1556
1557 *statep = state = xmalloc(sizeof *state);
1558 dpif_port_dump_start(&state->dump, ofproto->dpif);
1559 state->done = false;
1560 return 0;
1561}
1562
1563static int
1564port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
1565 struct ofproto_port *port)
1566{
1567 struct port_dump_state *state = state_;
1568 struct dpif_port dpif_port;
1569
1570 if (dpif_port_dump_next(&state->dump, &dpif_port)) {
1571 ofproto_port_from_dpif_port(port, &dpif_port);
1572 return 0;
1573 } else {
1574 int error = dpif_port_dump_done(&state->dump);
1575 state->done = true;
1576 return error ? error : EOF;
1577 }
1578}
1579
1580static int
1581port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
1582{
1583 struct port_dump_state *state = state_;
1584
1585 if (!state->done) {
1586 dpif_port_dump_done(&state->dump);
1587 }
1588 free(state);
1589 return 0;
1590}
1591
1592static int
1593port_poll(const struct ofproto *ofproto_, char **devnamep)
1594{
1595 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1596 return dpif_port_poll(ofproto->dpif, devnamep);
1597}
1598
1599static void
1600port_poll_wait(const struct ofproto *ofproto_)
1601{
1602 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1603 dpif_port_poll_wait(ofproto->dpif);
1604}
1605
1606static int
1607port_is_lacp_current(const struct ofport *ofport_)
1608{
1609 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1610 return (ofport->bundle && ofport->bundle->lacp
1611 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
1612 : -1);
1613}
1614\f
1615/* Upcall handling. */
1616
1617/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an
1618 * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to
1619 * their individual configurations.
1620 *
1621 * If 'clone' is true, the caller retains ownership of 'upcall->packet'.
1622 * Otherwise, ownership is transferred to this function. */
1623static void
1624send_packet_in(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall,
1625 const struct flow *flow, bool clone)
1626{
1627 struct ofputil_packet_in pin;
1628
1629 pin.packet = upcall->packet;
1630 pin.in_port = flow->in_port;
1631 pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
1632 pin.buffer_id = 0; /* not yet known */
1633 pin.send_len = upcall->userdata;
78bd1cd0 1634 connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow,
abe529af
BP
1635 clone ? NULL : upcall->packet);
1636}
1637
1638static bool
1639process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
1640 const struct ofpbuf *packet)
1641{
b6e001b6
EJ
1642 struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
1643
1644 if (!ofport) {
1645 return false;
1646 }
1647
ef9819b5 1648 if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
b6e001b6 1649 if (packet) {
abe529af
BP
1650 cfm_process_heartbeat(ofport->cfm, packet);
1651 }
1652 return true;
b6e001b6
EJ
1653 } else if (ofport->bundle && ofport->bundle->lacp
1654 && flow->dl_type == htons(ETH_TYPE_LACP)) {
1655 if (packet) {
1656 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
abe529af 1657 }
da37ebac 1658 return true;
abe529af
BP
1659 }
1660 return false;
1661}
1662
1663static void
1664handle_miss_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
1665{
1666 struct facet *facet;
1667 struct flow flow;
1668
1669 /* Obtain in_port and tun_id, at least. */
1670 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
1671
1672 /* Set header pointers in 'flow'. */
1673 flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
1674
1675 /* Handle 802.1ag and LACP. */
1676 if (process_special(ofproto, &flow, upcall->packet)) {
1677 ofpbuf_delete(upcall->packet);
6c1491fb 1678 ofproto->n_matches++;
abe529af
BP
1679 return;
1680 }
1681
1682 /* Check with in-band control to see if this packet should be sent
1683 * to the local port regardless of the flow table. */
1684 if (connmgr_msg_in_hook(ofproto->up.connmgr, &flow, upcall->packet)) {
df2c07f4 1685 send_packet(ofproto, OVSP_LOCAL, upcall->packet);
abe529af
BP
1686 }
1687
1688 facet = facet_lookup_valid(ofproto, &flow);
1689 if (!facet) {
29901626 1690 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow, 0);
abe529af
BP
1691 if (!rule) {
1692 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
1693 struct ofport_dpif *port = get_ofp_port(ofproto, flow.in_port);
1694 if (port) {
1695 if (port->up.opp.config & htonl(OFPPC_NO_PACKET_IN)) {
1696 COVERAGE_INC(ofproto_dpif_no_packet_in);
1697 /* XXX install 'drop' flow entry */
1698 ofpbuf_delete(upcall->packet);
1699 return;
1700 }
1701 } else {
1702 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
1703 flow.in_port);
1704 }
1705
1706 send_packet_in(ofproto, upcall, &flow, false);
1707 return;
1708 }
1709
1710 facet = facet_create(rule, &flow, upcall->packet);
1711 } else if (!facet->may_install) {
1712 /* The facet is not installable, that is, we need to process every
1713 * packet, so process the current packet's actions into 'facet'. */
1714 facet_make_actions(ofproto, facet, upcall->packet);
1715 }
1716
1717 if (facet->rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
1718 /*
1719 * Extra-special case for fail-open mode.
1720 *
1721 * We are in fail-open mode and the packet matched the fail-open rule,
1722 * but we are connected to a controller too. We should send the packet
1723 * up to the controller in the hope that it will try to set up a flow
1724 * and thereby allow us to exit fail-open.
1725 *
1726 * See the top-level comment in fail-open.c for more information.
1727 */
1728 send_packet_in(ofproto, upcall, &flow, true);
1729 }
1730
1731 facet_execute(ofproto, facet, upcall->packet);
1732 facet_install(ofproto, facet, false);
6c1491fb 1733 ofproto->n_matches++;
abe529af
BP
1734}
1735
1736static void
1737handle_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
1738{
1739 struct flow flow;
1740
1741 switch (upcall->type) {
1742 case DPIF_UC_ACTION:
1743 COVERAGE_INC(ofproto_dpif_ctlr_action);
1744 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
1745 send_packet_in(ofproto, upcall, &flow, false);
1746 break;
1747
1748 case DPIF_UC_SAMPLE:
1749 if (ofproto->sflow) {
1750 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
bae473fe 1751 dpif_sflow_received(ofproto->sflow, upcall, &flow);
abe529af
BP
1752 }
1753 ofpbuf_delete(upcall->packet);
1754 break;
1755
1756 case DPIF_UC_MISS:
1757 handle_miss_upcall(ofproto, upcall);
1758 break;
1759
1760 case DPIF_N_UC_TYPES:
1761 default:
1762 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
1763 break;
1764 }
1765}
1766\f
1767/* Flow expiration. */
1768
1769static int facet_max_idle(const struct ofproto_dpif *);
1770static void update_stats(struct ofproto_dpif *);
1771static void rule_expire(struct rule_dpif *);
1772static void expire_facets(struct ofproto_dpif *, int dp_max_idle);
1773
1774/* This function is called periodically by run(). Its job is to collect
1775 * updates for the flows that have been installed into the datapath, most
1776 * importantly when they last were used, and then use that information to
1777 * expire flows that have not been used recently.
1778 *
1779 * Returns the number of milliseconds after which it should be called again. */
1780static int
1781expire(struct ofproto_dpif *ofproto)
1782{
1783 struct rule_dpif *rule, *next_rule;
0697b5c3 1784 struct classifier *table;
abe529af
BP
1785 int dp_max_idle;
1786
1787 /* Update stats for each flow in the datapath. */
1788 update_stats(ofproto);
1789
1790 /* Expire facets that have been idle too long. */
1791 dp_max_idle = facet_max_idle(ofproto);
1792 expire_facets(ofproto, dp_max_idle);
1793
1794 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
0697b5c3
BP
1795 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1796 struct cls_cursor cursor;
1797
1798 cls_cursor_init(&cursor, table, NULL);
1799 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1800 rule_expire(rule);
1801 }
abe529af
BP
1802 }
1803
1804 /* All outstanding data in existing flows has been accounted, so it's a
1805 * good time to do bond rebalancing. */
1806 if (ofproto->has_bonded_bundles) {
1807 struct ofbundle *bundle;
1808
1809 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1810 if (bundle->bond) {
1811 bond_rebalance(bundle->bond, &ofproto->revalidate_set);
1812 }
1813 }
1814 }
1815
1816 return MIN(dp_max_idle, 1000);
1817}
1818
1819/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
1820 *
1821 * This function also pushes statistics updates to rules which each facet
1822 * resubmits into. Generally these statistics will be accurate. However, if a
1823 * facet changes the rule it resubmits into at some time in between
1824 * update_stats() runs, it is possible that statistics accrued to the
1825 * old rule will be incorrectly attributed to the new rule. This could be
1826 * avoided by calling update_stats() whenever rules are created or
1827 * deleted. However, the performance impact of making so many calls to the
1828 * datapath do not justify the benefit of having perfectly accurate statistics.
1829 */
1830static void
1831update_stats(struct ofproto_dpif *p)
1832{
1833 const struct dpif_flow_stats *stats;
1834 struct dpif_flow_dump dump;
1835 const struct nlattr *key;
1836 size_t key_len;
1837
1838 dpif_flow_dump_start(&dump, p->dpif);
1839 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
1840 struct facet *facet;
1841 struct flow flow;
1842
1843 if (odp_flow_key_to_flow(key, key_len, &flow)) {
1844 struct ds s;
1845
1846 ds_init(&s);
1847 odp_flow_key_format(key, key_len, &s);
df2c07f4 1848 VLOG_WARN_RL(&rl, "failed to convert datapath flow key to flow: %s",
abe529af
BP
1849 ds_cstr(&s));
1850 ds_destroy(&s);
1851
1852 continue;
1853 }
1854 facet = facet_find(p, &flow);
1855
1856 if (facet && facet->installed) {
1857
1858 if (stats->n_packets >= facet->dp_packet_count) {
1859 uint64_t extra = stats->n_packets - facet->dp_packet_count;
1860 facet->packet_count += extra;
1861 } else {
1862 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
1863 }
1864
1865 if (stats->n_bytes >= facet->dp_byte_count) {
1866 facet->byte_count += stats->n_bytes - facet->dp_byte_count;
1867 } else {
1868 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
1869 }
1870
1871 facet->dp_packet_count = stats->n_packets;
1872 facet->dp_byte_count = stats->n_bytes;
1873
1874 facet_update_time(p, facet, stats->used);
55af77bb 1875 facet_account(p, facet);
abe529af
BP
1876 facet_push_stats(facet);
1877 } else {
1878 /* There's a flow in the datapath that we know nothing about.
1879 * Delete it. */
1880 COVERAGE_INC(facet_unexpected);
1881 dpif_flow_del(p->dpif, key, key_len, NULL);
1882 }
1883 }
1884 dpif_flow_dump_done(&dump);
1885}
1886
1887/* Calculates and returns the number of milliseconds of idle time after which
1888 * facets should expire from the datapath and we should fold their statistics
1889 * into their parent rules in userspace. */
1890static int
1891facet_max_idle(const struct ofproto_dpif *ofproto)
1892{
1893 /*
1894 * Idle time histogram.
1895 *
1896 * Most of the time a switch has a relatively small number of facets. When
1897 * this is the case we might as well keep statistics for all of them in
1898 * userspace and to cache them in the kernel datapath for performance as
1899 * well.
1900 *
1901 * As the number of facets increases, the memory required to maintain
1902 * statistics about them in userspace and in the kernel becomes
1903 * significant. However, with a large number of facets it is likely that
1904 * only a few of them are "heavy hitters" that consume a large amount of
1905 * bandwidth. At this point, only heavy hitters are worth caching in the
1906 * kernel and maintaining in userspaces; other facets we can discard.
1907 *
1908 * The technique used to compute the idle time is to build a histogram with
1909 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
1910 * that is installed in the kernel gets dropped in the appropriate bucket.
1911 * After the histogram has been built, we compute the cutoff so that only
084f5290
SH
1912 * the most-recently-used 1% of facets (but at least
1913 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
1914 * the most-recently-used bucket of facets is kept, so actually an
1915 * arbitrary number of facets can be kept in any given expiration run
1916 * (though the next run will delete most of those unless they receive
1917 * additional data).
abe529af
BP
1918 *
1919 * This requires a second pass through the facets, in addition to the pass
1920 * made by update_stats(), because the former function never looks
1921 * at uninstallable facets.
1922 */
1923 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
1924 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
1925 int buckets[N_BUCKETS] = { 0 };
f11c1ef4 1926 int total, subtotal, bucket;
abe529af 1927 struct facet *facet;
abe529af
BP
1928 long long int now;
1929 int i;
1930
1931 total = hmap_count(&ofproto->facets);
084f5290 1932 if (total <= ofproto->up.flow_eviction_threshold) {
abe529af
BP
1933 return N_BUCKETS * BUCKET_WIDTH;
1934 }
1935
1936 /* Build histogram. */
1937 now = time_msec();
1938 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
1939 long long int idle = now - facet->used;
1940 int bucket = (idle <= 0 ? 0
1941 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
1942 : (unsigned int) idle / BUCKET_WIDTH);
1943 buckets[bucket]++;
1944 }
1945
1946 /* Find the first bucket whose flows should be expired. */
f11c1ef4
SH
1947 subtotal = bucket = 0;
1948 do {
1949 subtotal += buckets[bucket++];
084f5290
SH
1950 } while (bucket < N_BUCKETS &&
1951 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
abe529af
BP
1952
1953 if (VLOG_IS_DBG_ENABLED()) {
1954 struct ds s;
1955
1956 ds_init(&s);
1957 ds_put_cstr(&s, "keep");
1958 for (i = 0; i < N_BUCKETS; i++) {
1959 if (i == bucket) {
1960 ds_put_cstr(&s, ", drop");
1961 }
1962 if (buckets[i]) {
1963 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
1964 }
1965 }
1966 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
1967 ds_destroy(&s);
1968 }
1969
1970 return bucket * BUCKET_WIDTH;
1971}
1972
1973static void
1974facet_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
1975{
1976 if (ofproto->netflow && !facet_is_controller_flow(facet) &&
1977 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
1978 struct ofexpired expired;
1979
1980 if (facet->installed) {
1981 struct dpif_flow_stats stats;
1982
1983 facet_put__(ofproto, facet, facet->actions, facet->actions_len,
1984 &stats);
1985 facet_update_stats(ofproto, facet, &stats);
1986 }
1987
1988 expired.flow = facet->flow;
1989 expired.packet_count = facet->packet_count;
1990 expired.byte_count = facet->byte_count;
1991 expired.used = facet->used;
1992 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
1993 }
1994}
1995
1996static void
1997expire_facets(struct ofproto_dpif *ofproto, int dp_max_idle)
1998{
1999 long long int cutoff = time_msec() - dp_max_idle;
2000 struct facet *facet, *next_facet;
2001
2002 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
2003 facet_active_timeout(ofproto, facet);
2004 if (facet->used < cutoff) {
2005 facet_remove(ofproto, facet);
2006 }
2007 }
2008}
2009
2010/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
2011 * then delete it entirely. */
2012static void
2013rule_expire(struct rule_dpif *rule)
2014{
2015 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2016 struct facet *facet, *next_facet;
2017 long long int now;
2018 uint8_t reason;
2019
2020 /* Has 'rule' expired? */
2021 now = time_msec();
2022 if (rule->up.hard_timeout
308881af 2023 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
abe529af
BP
2024 reason = OFPRR_HARD_TIMEOUT;
2025 } else if (rule->up.idle_timeout && list_is_empty(&rule->facets)
2026 && now > rule->used + rule->up.idle_timeout * 1000) {
2027 reason = OFPRR_IDLE_TIMEOUT;
2028 } else {
2029 return;
2030 }
2031
2032 COVERAGE_INC(ofproto_dpif_expired);
2033
2034 /* Update stats. (This is a no-op if the rule expired due to an idle
2035 * timeout, because that only happens when the rule has no facets left.) */
2036 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
2037 facet_remove(ofproto, facet);
2038 }
2039
2040 /* Get rid of the rule. */
2041 ofproto_rule_expire(&rule->up, reason);
2042}
2043\f
2044/* Facets. */
2045
2046/* Creates and returns a new facet owned by 'rule', given a 'flow' and an
2047 * example 'packet' within that flow.
2048 *
2049 * The caller must already have determined that no facet with an identical
2050 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
2051 * the ofproto's classifier table. */
2052static struct facet *
2053facet_create(struct rule_dpif *rule, const struct flow *flow,
2054 const struct ofpbuf *packet)
2055{
2056 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2057 struct facet *facet;
2058
2059 facet = xzalloc(sizeof *facet);
2060 facet->used = time_msec();
2061 hmap_insert(&ofproto->facets, &facet->hmap_node, flow_hash(flow, 0));
2062 list_push_back(&rule->facets, &facet->list_node);
2063 facet->rule = rule;
2064 facet->flow = *flow;
2065 netflow_flow_init(&facet->nf_flow);
2066 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
2067
2068 facet_make_actions(ofproto, facet, packet);
2069
2070 return facet;
2071}
2072
2073static void
2074facet_free(struct facet *facet)
2075{
2076 free(facet->actions);
2077 free(facet);
2078}
2079
2080/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
2081 * 'packet', which arrived on 'in_port'.
2082 *
2083 * Takes ownership of 'packet'. */
2084static bool
2085execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
2086 const struct nlattr *odp_actions, size_t actions_len,
2087 struct ofpbuf *packet)
2088{
2089 if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
df2c07f4 2090 && odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE) {
abe529af
BP
2091 /* As an optimization, avoid a round-trip from userspace to kernel to
2092 * userspace. This also avoids possibly filling up kernel packet
2093 * buffers along the way. */
2094 struct dpif_upcall upcall;
2095
2096 upcall.type = DPIF_UC_ACTION;
2097 upcall.packet = packet;
2098 upcall.key = NULL;
2099 upcall.key_len = 0;
2100 upcall.userdata = nl_attr_get_u64(odp_actions);
2101 upcall.sample_pool = 0;
2102 upcall.actions = NULL;
2103 upcall.actions_len = 0;
2104
2105 send_packet_in(ofproto, &upcall, flow, false);
2106
2107 return true;
2108 } else {
80e5eed9
BP
2109 struct odputil_keybuf keybuf;
2110 struct ofpbuf key;
abe529af
BP
2111 int error;
2112
80e5eed9
BP
2113 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2114 odp_flow_key_from_flow(&key, flow);
2115
2116 error = dpif_execute(ofproto->dpif, key.data, key.size,
2117 odp_actions, actions_len, packet);
2118
abe529af
BP
2119 ofpbuf_delete(packet);
2120 return !error;
2121 }
2122}
2123
2124/* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
2125 * statistics appropriately. 'packet' must have at least sizeof(struct
2126 * ofp_packet_in) bytes of headroom.
2127 *
2128 * For correct results, 'packet' must actually be in 'facet''s flow; that is,
2129 * applying flow_extract() to 'packet' would yield the same flow as
2130 * 'facet->flow'.
2131 *
df2c07f4
JP
2132 * 'facet' must have accurately composed datapath actions; that is, it must
2133 * not be in need of revalidation.
abe529af
BP
2134 *
2135 * Takes ownership of 'packet'. */
2136static void
2137facet_execute(struct ofproto_dpif *ofproto, struct facet *facet,
2138 struct ofpbuf *packet)
2139{
2140 struct dpif_flow_stats stats;
2141
2142 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2143
2144 flow_extract_stats(&facet->flow, packet, &stats);
2145 stats.used = time_msec();
2146 if (execute_odp_actions(ofproto, &facet->flow,
2147 facet->actions, facet->actions_len, packet)) {
2148 facet_update_stats(ofproto, facet, &stats);
2149 }
2150}
2151
2152/* Remove 'facet' from 'ofproto' and free up the associated memory:
2153 *
2154 * - If 'facet' was installed in the datapath, uninstalls it and updates its
2155 * rule's statistics, via facet_uninstall().
2156 *
2157 * - Removes 'facet' from its rule and from ofproto->facets.
2158 */
2159static void
2160facet_remove(struct ofproto_dpif *ofproto, struct facet *facet)
2161{
2162 facet_uninstall(ofproto, facet);
2163 facet_flush_stats(ofproto, facet);
2164 hmap_remove(&ofproto->facets, &facet->hmap_node);
2165 list_remove(&facet->list_node);
2166 facet_free(facet);
2167}
2168
df2c07f4 2169/* Composes the datapath actions for 'facet' based on its rule's actions. */
abe529af
BP
2170static void
2171facet_make_actions(struct ofproto_dpif *p, struct facet *facet,
2172 const struct ofpbuf *packet)
2173{
2174 const struct rule_dpif *rule = facet->rule;
2175 struct ofpbuf *odp_actions;
2176 struct action_xlate_ctx ctx;
2177
2178 action_xlate_ctx_init(&ctx, p, &facet->flow, packet);
2179 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
2180 facet->tags = ctx.tags;
2181 facet->may_install = ctx.may_set_up_flow;
75a75043
BP
2182 facet->has_learn = ctx.has_learn;
2183 facet->has_normal = ctx.has_normal;
abe529af
BP
2184 facet->nf_flow.output_iface = ctx.nf_output_iface;
2185
2186 if (facet->actions_len != odp_actions->size
2187 || memcmp(facet->actions, odp_actions->data, odp_actions->size)) {
2188 free(facet->actions);
2189 facet->actions_len = odp_actions->size;
2190 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
2191 }
2192
2193 ofpbuf_delete(odp_actions);
2194}
2195
3a88e544
BP
2196/* Updates 'facet''s flow in the datapath setting its actions to 'actions_len'
2197 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
2198 * in the datapath will be zeroed and 'stats' will be updated with traffic new
2199 * since 'facet' was last updated.
2200 *
2201 * Returns 0 if successful, otherwise a positive errno value.*/
abe529af
BP
2202static int
2203facet_put__(struct ofproto_dpif *ofproto, struct facet *facet,
2204 const struct nlattr *actions, size_t actions_len,
2205 struct dpif_flow_stats *stats)
2206{
2207 struct odputil_keybuf keybuf;
2208 enum dpif_flow_put_flags flags;
2209 struct ofpbuf key;
3a88e544 2210 int ret;
abe529af
BP
2211
2212 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
2213 if (stats) {
2214 flags |= DPIF_FP_ZERO_STATS;
abe529af
BP
2215 }
2216
2217 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2218 odp_flow_key_from_flow(&key, &facet->flow);
2219
3a88e544
BP
2220 ret = dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
2221 actions, actions_len, stats);
2222
2223 if (stats) {
2224 facet_reset_dp_stats(facet, stats);
2225 }
2226
2227 return ret;
abe529af
BP
2228}
2229
2230/* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If
2231 * 'zero_stats' is true, clears any existing statistics from the datapath for
2232 * 'facet'. */
2233static void
2234facet_install(struct ofproto_dpif *p, struct facet *facet, bool zero_stats)
2235{
2236 struct dpif_flow_stats stats;
2237
2238 if (facet->may_install
2239 && !facet_put__(p, facet, facet->actions, facet->actions_len,
2240 zero_stats ? &stats : NULL)) {
2241 facet->installed = true;
2242 }
2243}
2244
d78be13b
BP
2245static int
2246vlan_tci_to_openflow_vlan(ovs_be16 vlan_tci)
2247{
2248 return vlan_tci != htons(0) ? vlan_tci_to_vid(vlan_tci) : OFP_VLAN_NONE;
2249}
2250
abe529af 2251static void
55af77bb 2252facet_account(struct ofproto_dpif *ofproto, struct facet *facet)
abe529af 2253{
55af77bb 2254 uint64_t n_bytes;
abe529af 2255 const struct nlattr *a;
abe529af 2256 unsigned int left;
d78be13b 2257 ovs_be16 vlan_tci;
abe529af 2258
55af77bb 2259 if (facet->byte_count <= facet->accounted_bytes) {
abe529af
BP
2260 return;
2261 }
55af77bb
EJ
2262 n_bytes = facet->byte_count - facet->accounted_bytes;
2263 facet->accounted_bytes = facet->byte_count;
abe529af 2264
75a75043 2265 /* Feed information from the active flows back into the learning table to
abe529af
BP
2266 * ensure that table is always in sync with what is actually flowing
2267 * through the datapath. */
75a75043
BP
2268 if (facet->has_learn || facet->has_normal) {
2269 struct action_xlate_ctx ctx;
abe529af 2270
75a75043
BP
2271 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL);
2272 ctx.may_learn = true;
2273 ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions,
2274 facet->rule->up.n_actions));
2275 }
abe529af 2276
75a75043 2277 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
abe529af
BP
2278 return;
2279 }
d78be13b
BP
2280
2281 /* This loop feeds byte counters to bond_account() for rebalancing to use
2282 * as a basis. We also need to track the actual VLAN on which the packet
2283 * is going to be sent to ensure that it matches the one passed to
2284 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
2285 * hash bucket.) */
2286 vlan_tci = facet->flow.vlan_tci;
abe529af 2287 NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->actions, facet->actions_len) {
d78be13b 2288 struct ofport_dpif *port;
abe529af 2289
d78be13b 2290 switch (nl_attr_type(a)) {
df2c07f4 2291 case OVS_ACTION_ATTR_OUTPUT:
abe529af
BP
2292 port = get_odp_port(ofproto, nl_attr_get_u32(a));
2293 if (port && port->bundle && port->bundle->bond) {
d78be13b
BP
2294 bond_account(port->bundle->bond, &facet->flow,
2295 vlan_tci_to_openflow_vlan(vlan_tci), n_bytes);
abe529af 2296 }
d78be13b
BP
2297 break;
2298
d9065a90 2299 case OVS_ACTION_ATTR_POP_VLAN:
d78be13b
BP
2300 vlan_tci = htons(0);
2301 break;
2302
d9065a90 2303 case OVS_ACTION_ATTR_PUSH_VLAN:
d78be13b
BP
2304 vlan_tci = nl_attr_get_be16(a);
2305 break;
abe529af
BP
2306 }
2307 }
2308}
2309
2310/* If 'rule' is installed in the datapath, uninstalls it. */
2311static void
2312facet_uninstall(struct ofproto_dpif *p, struct facet *facet)
2313{
2314 if (facet->installed) {
2315 struct odputil_keybuf keybuf;
2316 struct dpif_flow_stats stats;
2317 struct ofpbuf key;
3a88e544 2318 int error;
abe529af
BP
2319
2320 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2321 odp_flow_key_from_flow(&key, &facet->flow);
2322
3a88e544
BP
2323 error = dpif_flow_del(p->dpif, key.data, key.size, &stats);
2324 facet_reset_dp_stats(facet, &stats);
2325 if (!error) {
abe529af
BP
2326 facet_update_stats(p, facet, &stats);
2327 }
2328 facet->installed = false;
abe529af
BP
2329 } else {
2330 assert(facet->dp_packet_count == 0);
2331 assert(facet->dp_byte_count == 0);
2332 }
2333}
2334
2335/* Returns true if the only action for 'facet' is to send to the controller.
2336 * (We don't report NetFlow expiration messages for such facets because they
2337 * are just part of the control logic for the network, not real traffic). */
2338static bool
2339facet_is_controller_flow(struct facet *facet)
2340{
2341 return (facet
2342 && facet->rule->up.n_actions == 1
2343 && action_outputs_to_port(&facet->rule->up.actions[0],
2344 htons(OFPP_CONTROLLER)));
2345}
2346
3a88e544
BP
2347/* Resets 'facet''s datapath statistics counters. This should be called when
2348 * 'facet''s statistics are cleared in the datapath. If 'stats' is non-null,
2349 * it should contain the statistics returned by dpif when 'facet' was reset in
2350 * the datapath. 'stats' will be modified to only included statistics new
2351 * since 'facet' was last updated. */
2352static void
2353facet_reset_dp_stats(struct facet *facet, struct dpif_flow_stats *stats)
2354{
2355 if (stats && facet->dp_packet_count <= stats->n_packets
2356 && facet->dp_byte_count <= stats->n_bytes) {
2357 stats->n_packets -= facet->dp_packet_count;
2358 stats->n_bytes -= facet->dp_byte_count;
2359 }
2360
2361 facet->dp_packet_count = 0;
2362 facet->dp_byte_count = 0;
2363}
2364
abe529af
BP
2365/* Folds all of 'facet''s statistics into its rule. Also updates the
2366 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
2367 * 'facet''s statistics in the datapath should have been zeroed and folded into
2368 * its packet and byte counts before this function is called. */
2369static void
2370facet_flush_stats(struct ofproto_dpif *ofproto, struct facet *facet)
2371{
2372 assert(!facet->dp_byte_count);
2373 assert(!facet->dp_packet_count);
2374
2375 facet_push_stats(facet);
55af77bb 2376 facet_account(ofproto, facet);
abe529af
BP
2377
2378 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
2379 struct ofexpired expired;
2380 expired.flow = facet->flow;
2381 expired.packet_count = facet->packet_count;
2382 expired.byte_count = facet->byte_count;
2383 expired.used = facet->used;
2384 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
2385 }
2386
2387 facet->rule->packet_count += facet->packet_count;
2388 facet->rule->byte_count += facet->byte_count;
2389
2390 /* Reset counters to prevent double counting if 'facet' ever gets
2391 * reinstalled. */
bbb5d219 2392 facet_reset_counters(facet);
abe529af
BP
2393
2394 netflow_flow_clear(&facet->nf_flow);
2395}
2396
2397/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2398 * Returns it if found, otherwise a null pointer.
2399 *
2400 * The returned facet might need revalidation; use facet_lookup_valid()
2401 * instead if that is important. */
2402static struct facet *
2403facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
2404{
2405 struct facet *facet;
2406
2407 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, flow_hash(flow, 0),
2408 &ofproto->facets) {
2409 if (flow_equal(flow, &facet->flow)) {
2410 return facet;
2411 }
2412 }
2413
2414 return NULL;
2415}
2416
2417/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2418 * Returns it if found, otherwise a null pointer.
2419 *
2420 * The returned facet is guaranteed to be valid. */
2421static struct facet *
2422facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow)
2423{
2424 struct facet *facet = facet_find(ofproto, flow);
2425
2426 /* The facet we found might not be valid, since we could be in need of
2427 * revalidation. If it is not valid, don't return it. */
2428 if (facet
2429 && ofproto->need_revalidate
2430 && !facet_revalidate(ofproto, facet)) {
2431 COVERAGE_INC(facet_invalidated);
2432 return NULL;
2433 }
2434
2435 return facet;
2436}
2437
2438/* Re-searches 'ofproto''s classifier for a rule matching 'facet':
2439 *
2440 * - If the rule found is different from 'facet''s current rule, moves
2441 * 'facet' to the new rule and recompiles its actions.
2442 *
2443 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
2444 * where it is and recompiles its actions anyway.
2445 *
2446 * - If there is none, destroys 'facet'.
2447 *
2448 * Returns true if 'facet' still exists, false if it has been destroyed. */
2449static bool
2450facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet)
2451{
2452 struct action_xlate_ctx ctx;
2453 struct ofpbuf *odp_actions;
2454 struct rule_dpif *new_rule;
2455 bool actions_changed;
2456
2457 COVERAGE_INC(facet_revalidate);
2458
2459 /* Determine the new rule. */
29901626 2460 new_rule = rule_dpif_lookup(ofproto, &facet->flow, 0);
abe529af
BP
2461 if (!new_rule) {
2462 /* No new rule, so delete the facet. */
2463 facet_remove(ofproto, facet);
2464 return false;
2465 }
2466
df2c07f4 2467 /* Calculate new datapath actions.
abe529af
BP
2468 *
2469 * We do not modify any 'facet' state yet, because we might need to, e.g.,
2470 * emit a NetFlow expiration and, if so, we need to have the old state
2471 * around to properly compose it. */
2472 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL);
2473 odp_actions = xlate_actions(&ctx,
2474 new_rule->up.actions, new_rule->up.n_actions);
2475 actions_changed = (facet->actions_len != odp_actions->size
2476 || memcmp(facet->actions, odp_actions->data,
2477 facet->actions_len));
2478
df2c07f4
JP
2479 /* If the datapath actions changed or the installability changed,
2480 * then we need to talk to the datapath. */
abe529af
BP
2481 if (actions_changed || ctx.may_set_up_flow != facet->installed) {
2482 if (ctx.may_set_up_flow) {
2483 struct dpif_flow_stats stats;
2484
2485 facet_put__(ofproto, facet,
2486 odp_actions->data, odp_actions->size, &stats);
2487 facet_update_stats(ofproto, facet, &stats);
2488 } else {
2489 facet_uninstall(ofproto, facet);
2490 }
2491
2492 /* The datapath flow is gone or has zeroed stats, so push stats out of
2493 * 'facet' into 'rule'. */
2494 facet_flush_stats(ofproto, facet);
2495 }
2496
2497 /* Update 'facet' now that we've taken care of all the old state. */
2498 facet->tags = ctx.tags;
2499 facet->nf_flow.output_iface = ctx.nf_output_iface;
2500 facet->may_install = ctx.may_set_up_flow;
75a75043
BP
2501 facet->has_learn = ctx.has_learn;
2502 facet->has_normal = ctx.has_normal;
abe529af
BP
2503 if (actions_changed) {
2504 free(facet->actions);
2505 facet->actions_len = odp_actions->size;
2506 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
2507 }
2508 if (facet->rule != new_rule) {
2509 COVERAGE_INC(facet_changed_rule);
2510 list_remove(&facet->list_node);
2511 list_push_back(&new_rule->facets, &facet->list_node);
2512 facet->rule = new_rule;
2513 facet->used = new_rule->up.created;
2514 facet->rs_used = facet->used;
2515 }
2516
2517 ofpbuf_delete(odp_actions);
2518
2519 return true;
2520}
2521
2522/* Updates 'facet''s used time. Caller is responsible for calling
2523 * facet_push_stats() to update the flows which 'facet' resubmits into. */
2524static void
2525facet_update_time(struct ofproto_dpif *ofproto, struct facet *facet,
2526 long long int used)
2527{
2528 if (used > facet->used) {
2529 facet->used = used;
2530 if (used > facet->rule->used) {
2531 facet->rule->used = used;
2532 }
2533 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
2534 }
2535}
2536
2537/* Folds the statistics from 'stats' into the counters in 'facet'.
2538 *
2539 * Because of the meaning of a facet's counters, it only makes sense to do this
2540 * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
2541 * packet that was sent by hand or if it represents statistics that have been
2542 * cleared out of the datapath. */
2543static void
2544facet_update_stats(struct ofproto_dpif *ofproto, struct facet *facet,
2545 const struct dpif_flow_stats *stats)
2546{
2547 if (stats->n_packets || stats->used > facet->used) {
2548 facet_update_time(ofproto, facet, stats->used);
2549 facet->packet_count += stats->n_packets;
2550 facet->byte_count += stats->n_bytes;
2551 facet_push_stats(facet);
2552 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
2553 }
2554}
2555
bbb5d219
EJ
2556static void
2557facet_reset_counters(struct facet *facet)
2558{
2559 facet->packet_count = 0;
2560 facet->byte_count = 0;
2561 facet->rs_packet_count = 0;
2562 facet->rs_byte_count = 0;
2563 facet->accounted_bytes = 0;
2564}
2565
abe529af
BP
2566static void
2567facet_push_stats(struct facet *facet)
2568{
2569 uint64_t rs_packets, rs_bytes;
2570
2571 assert(facet->packet_count >= facet->rs_packet_count);
2572 assert(facet->byte_count >= facet->rs_byte_count);
2573 assert(facet->used >= facet->rs_used);
2574
2575 rs_packets = facet->packet_count - facet->rs_packet_count;
2576 rs_bytes = facet->byte_count - facet->rs_byte_count;
2577
2578 if (rs_packets || rs_bytes || facet->used > facet->rs_used) {
2579 facet->rs_packet_count = facet->packet_count;
2580 facet->rs_byte_count = facet->byte_count;
2581 facet->rs_used = facet->used;
2582
2583 flow_push_stats(facet->rule, &facet->flow,
2584 rs_packets, rs_bytes, facet->used);
2585 }
2586}
2587
2588struct ofproto_push {
2589 struct action_xlate_ctx ctx;
2590 uint64_t packets;
2591 uint64_t bytes;
2592 long long int used;
2593};
2594
2595static void
2596push_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
2597{
2598 struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx);
2599
2600 if (rule) {
2601 rule->packet_count += push->packets;
2602 rule->byte_count += push->bytes;
2603 rule->used = MAX(push->used, rule->used);
2604 }
2605}
2606
2607/* Pushes flow statistics to the rules which 'flow' resubmits into given
2608 * 'rule''s actions. */
2609static void
2610flow_push_stats(const struct rule_dpif *rule,
2611 struct flow *flow, uint64_t packets, uint64_t bytes,
2612 long long int used)
2613{
2614 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2615 struct ofproto_push push;
2616
2617 push.packets = packets;
2618 push.bytes = bytes;
2619 push.used = used;
2620
2621 action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL);
2622 push.ctx.resubmit_hook = push_resubmit;
2623 ofpbuf_delete(xlate_actions(&push.ctx,
2624 rule->up.actions, rule->up.n_actions));
2625}
2626\f
2627/* Rules. */
2628
2629static struct rule_dpif *
29901626
BP
2630rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
2631 uint8_t table_id)
abe529af 2632{
154896e3 2633 return rule_dpif_cast(rule_from_cls_rule(
29901626 2634 classifier_lookup(&ofproto->up.tables[table_id],
6c1491fb 2635 flow)));
abe529af
BP
2636}
2637
7ee20df1
BP
2638static void
2639complete_operation(struct rule_dpif *rule)
2640{
2641 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2642
2643 ofproto->need_revalidate = true;
2644 if (clogged) {
2645 struct dpif_completion *c = xmalloc(sizeof *c);
2646 c->op = rule->up.pending;
2647 list_push_back(&ofproto->completions, &c->list_node);
2648 } else {
2649 ofoperation_complete(rule->up.pending, 0);
2650 }
2651}
2652
abe529af
BP
2653static struct rule *
2654rule_alloc(void)
2655{
2656 struct rule_dpif *rule = xmalloc(sizeof *rule);
2657 return &rule->up;
2658}
2659
2660static void
2661rule_dealloc(struct rule *rule_)
2662{
2663 struct rule_dpif *rule = rule_dpif_cast(rule_);
2664 free(rule);
2665}
2666
2667static int
2668rule_construct(struct rule *rule_)
2669{
2670 struct rule_dpif *rule = rule_dpif_cast(rule_);
2671 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7ee20df1 2672 struct rule_dpif *victim;
5bf0e941
BP
2673 int error;
2674
2675 error = validate_actions(rule->up.actions, rule->up.n_actions,
2676 &rule->up.cr.flow, ofproto->max_ports);
2677 if (error) {
2678 return error;
2679 }
abe529af
BP
2680
2681 rule->used = rule->up.created;
2682 rule->packet_count = 0;
2683 rule->byte_count = 0;
abe529af 2684
7ee20df1
BP
2685 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
2686 if (victim && !list_is_empty(&victim->facets)) {
2687 struct facet *facet;
2688
2689 rule->facets = victim->facets;
2690 list_moved(&rule->facets);
2691 LIST_FOR_EACH (facet, list_node, &rule->facets) {
bbb5d219
EJ
2692 /* XXX: We're only clearing our local counters here. It's possible
2693 * that quite a few packets are unaccounted for in the datapath
2694 * statistics. These will be accounted to the new rule instead of
2695 * cleared as required. This could be fixed by clearing out the
2696 * datapath statistics for this facet, but currently it doesn't
2697 * seem worth it. */
2698 facet_reset_counters(facet);
7ee20df1
BP
2699 facet->rule = rule;
2700 }
2701 } else {
2702 /* Must avoid list_moved() in this case. */
2703 list_init(&rule->facets);
2704 }
abe529af 2705
7ee20df1 2706 complete_operation(rule);
abe529af
BP
2707 return 0;
2708}
2709
2710static void
2711rule_destruct(struct rule *rule_)
2712{
2713 struct rule_dpif *rule = rule_dpif_cast(rule_);
2714 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2715 struct facet *facet, *next_facet;
2716
abe529af
BP
2717 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
2718 facet_revalidate(ofproto, facet);
2719 }
7ee20df1
BP
2720
2721 complete_operation(rule);
abe529af
BP
2722}
2723
2724static void
2725rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
2726{
2727 struct rule_dpif *rule = rule_dpif_cast(rule_);
2728 struct facet *facet;
2729
2730 /* Start from historical data for 'rule' itself that are no longer tracked
2731 * in facets. This counts, for example, facets that have expired. */
2732 *packets = rule->packet_count;
2733 *bytes = rule->byte_count;
2734
2735 /* Add any statistics that are tracked by facets. This includes
2736 * statistical data recently updated by ofproto_update_stats() as well as
2737 * stats for packets that were executed "by hand" via dpif_execute(). */
2738 LIST_FOR_EACH (facet, list_node, &rule->facets) {
2739 *packets += facet->packet_count;
2740 *bytes += facet->byte_count;
2741 }
2742}
2743
5bf0e941 2744static int
abe529af
BP
2745rule_execute(struct rule *rule_, struct flow *flow, struct ofpbuf *packet)
2746{
2747 struct rule_dpif *rule = rule_dpif_cast(rule_);
2748 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2749 struct action_xlate_ctx ctx;
2750 struct ofpbuf *odp_actions;
2751 struct facet *facet;
2752 size_t size;
2753
2754 /* First look for a related facet. If we find one, account it to that. */
2755 facet = facet_lookup_valid(ofproto, flow);
2756 if (facet && facet->rule == rule) {
2757 facet_execute(ofproto, facet, packet);
5bf0e941 2758 return 0;
abe529af
BP
2759 }
2760
2761 /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
2762 * create a new facet for it and use that. */
29901626 2763 if (rule_dpif_lookup(ofproto, flow, 0) == rule) {
abe529af
BP
2764 facet = facet_create(rule, flow, packet);
2765 facet_execute(ofproto, facet, packet);
2766 facet_install(ofproto, facet, true);
5bf0e941 2767 return 0;
abe529af
BP
2768 }
2769
2770 /* We can't account anything to a facet. If we were to try, then that
2771 * facet would have a non-matching rule, busting our invariants. */
2772 action_xlate_ctx_init(&ctx, ofproto, flow, packet);
2773 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
2774 size = packet->size;
2775 if (execute_odp_actions(ofproto, flow, odp_actions->data,
2776 odp_actions->size, packet)) {
2777 rule->used = time_msec();
2778 rule->packet_count++;
2779 rule->byte_count += size;
2780 flow_push_stats(rule, flow, 1, size, rule->used);
2781 }
2782 ofpbuf_delete(odp_actions);
5bf0e941
BP
2783
2784 return 0;
abe529af
BP
2785}
2786
7ee20df1
BP
2787static void
2788rule_modify_actions(struct rule *rule_)
abe529af
BP
2789{
2790 struct rule_dpif *rule = rule_dpif_cast(rule_);
2791 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2792 int error;
2793
7ee20df1
BP
2794 error = validate_actions(rule->up.actions, rule->up.n_actions,
2795 &rule->up.cr.flow, ofproto->max_ports);
2796 if (error) {
2797 ofoperation_complete(rule->up.pending, error);
2798 return;
abe529af 2799 }
7ee20df1
BP
2800
2801 complete_operation(rule);
abe529af
BP
2802}
2803\f
b2fda3ef 2804/* Sends 'packet' out of port 'odp_port' within 'p'.
abe529af
BP
2805 * Returns 0 if successful, otherwise a positive errno value. */
2806static int
b2fda3ef 2807send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port,
abe529af
BP
2808 const struct ofpbuf *packet)
2809{
80e5eed9
BP
2810 struct ofpbuf key, odp_actions;
2811 struct odputil_keybuf keybuf;
2812 struct flow flow;
abe529af
BP
2813 int error;
2814
80e5eed9
BP
2815 flow_extract((struct ofpbuf *) packet, 0, 0, &flow);
2816 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2817 odp_flow_key_from_flow(&key, &flow);
2818
abe529af 2819 ofpbuf_init(&odp_actions, 32);
df2c07f4 2820 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
80e5eed9
BP
2821 error = dpif_execute(ofproto->dpif,
2822 key.data, key.size,
2823 odp_actions.data, odp_actions.size,
abe529af
BP
2824 packet);
2825 ofpbuf_uninit(&odp_actions);
2826
2827 if (error) {
2828 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
2829 ofproto->up.name, odp_port, strerror(error));
2830 }
2831 return error;
2832}
2833\f
df2c07f4 2834/* OpenFlow to datapath action translation. */
abe529af
BP
2835
2836static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2837 struct action_xlate_ctx *ctx);
4cd78906 2838static void xlate_normal(struct action_xlate_ctx *);
abe529af 2839
b3e9b2ed
EJ
2840static void
2841commit_odp_actions(struct action_xlate_ctx *ctx)
2842{
2843 const struct flow *flow = &ctx->flow;
2844 struct flow *base = &ctx->base_flow;
2845 struct ofpbuf *odp_actions = ctx->odp_actions;
2846
2847 if (base->tun_id != flow->tun_id) {
df2c07f4 2848 nl_msg_put_be64(odp_actions, OVS_ACTION_ATTR_SET_TUNNEL, flow->tun_id);
b3e9b2ed
EJ
2849 base->tun_id = flow->tun_id;
2850 }
2851
2852 if (base->nw_src != flow->nw_src) {
df2c07f4 2853 nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_SRC, flow->nw_src);
b3e9b2ed
EJ
2854 base->nw_src = flow->nw_src;
2855 }
2856
2857 if (base->nw_dst != flow->nw_dst) {
df2c07f4 2858 nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_DST, flow->nw_dst);
b3e9b2ed
EJ
2859 base->nw_dst = flow->nw_dst;
2860 }
2861
150a9f15 2862 if (base->nw_tos != flow->nw_tos) {
df2c07f4 2863 nl_msg_put_u8(odp_actions, OVS_ACTION_ATTR_SET_NW_TOS, flow->nw_tos);
150a9f15
BP
2864 base->nw_tos = flow->nw_tos;
2865 }
2866
b3e9b2ed
EJ
2867 if (base->vlan_tci != flow->vlan_tci) {
2868 if (!(flow->vlan_tci & htons(VLAN_CFI))) {
d9065a90 2869 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
b3e9b2ed 2870 } else {
2f2df2f4 2871 if (base->vlan_tci != htons(0)) {
d9065a90
PS
2872 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
2873 }
2874 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
b3e9b2ed
EJ
2875 flow->vlan_tci & ~htons(VLAN_CFI));
2876 }
2877 base->vlan_tci = flow->vlan_tci;
2878 }
2879
2880 if (base->tp_src != flow->tp_src) {
df2c07f4 2881 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_SRC, flow->tp_src);
b3e9b2ed
EJ
2882 base->tp_src = flow->tp_src;
2883 }
2884
2885 if (base->tp_dst != flow->tp_dst) {
df2c07f4 2886 nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_DST, flow->tp_dst);
b3e9b2ed
EJ
2887 base->tp_dst = flow->tp_dst;
2888 }
2889
2890 if (!eth_addr_equals(base->dl_src, flow->dl_src)) {
df2c07f4 2891 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_SRC,
b3e9b2ed
EJ
2892 flow->dl_src, ETH_ADDR_LEN);
2893 memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
2894 }
2895
2896 if (!eth_addr_equals(base->dl_dst, flow->dl_dst)) {
df2c07f4 2897 nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_DST,
b3e9b2ed
EJ
2898 flow->dl_dst, ETH_ADDR_LEN);
2899 memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
2900 }
2901
2902 if (ctx->base_priority != ctx->priority) {
2903 if (ctx->priority) {
df2c07f4 2904 nl_msg_put_u32(odp_actions, OVS_ACTION_ATTR_SET_PRIORITY,
b3e9b2ed
EJ
2905 ctx->priority);
2906 } else {
df2c07f4 2907 nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_PRIORITY);
b3e9b2ed
EJ
2908 }
2909 ctx->base_priority = ctx->priority;
2910 }
2911}
2912
abe529af
BP
2913static void
2914add_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
2915{
2916 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
2917 uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
2918
2919 if (ofport) {
2920 if (ofport->up.opp.config & htonl(OFPPC_NO_FWD)) {
2921 /* Forwarding disabled on port. */
2922 return;
2923 }
2924 } else {
2925 /*
2926 * We don't have an ofport record for this port, but it doesn't hurt to
2927 * allow forwarding to it anyhow. Maybe such a port will appear later
2928 * and we're pre-populating the flow table.
2929 */
2930 }
2931
b3e9b2ed 2932 commit_odp_actions(ctx);
df2c07f4 2933 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
abe529af
BP
2934 ctx->nf_output_iface = ofp_port;
2935}
2936
2937static void
29901626
BP
2938xlate_table_action(struct action_xlate_ctx *ctx,
2939 uint16_t in_port, uint8_t table_id)
abe529af
BP
2940{
2941 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
2942 struct rule_dpif *rule;
2943 uint16_t old_in_port;
29901626
BP
2944 uint8_t old_table_id;
2945
2946 old_table_id = ctx->table_id;
2947 ctx->table_id = table_id;
abe529af
BP
2948
2949 /* Look up a flow with 'in_port' as the input port. Then restore the
2950 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2951 * have surprising behavior). */
2952 old_in_port = ctx->flow.in_port;
2953 ctx->flow.in_port = in_port;
29901626 2954 rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow, table_id);
abe529af
BP
2955 ctx->flow.in_port = old_in_port;
2956
2957 if (ctx->resubmit_hook) {
2958 ctx->resubmit_hook(ctx, rule);
2959 }
2960
2961 if (rule) {
2962 ctx->recurse++;
2963 do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
2964 ctx->recurse--;
2965 }
29901626
BP
2966
2967 ctx->table_id = old_table_id;
abe529af
BP
2968 } else {
2969 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2970
29901626 2971 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
abe529af
BP
2972 MAX_RESUBMIT_RECURSION);
2973 }
2974}
2975
29901626
BP
2976static void
2977xlate_resubmit_table(struct action_xlate_ctx *ctx,
2978 const struct nx_action_resubmit *nar)
2979{
2980 uint16_t in_port;
2981 uint8_t table_id;
2982
2983 in_port = (nar->in_port == htons(OFPP_IN_PORT)
2984 ? ctx->flow.in_port
2985 : ntohs(nar->in_port));
2986 table_id = nar->table == 255 ? ctx->table_id : nar->table;
2987
2988 xlate_table_action(ctx, in_port, table_id);
2989}
2990
abe529af 2991static void
b3e9b2ed 2992flood_packets(struct action_xlate_ctx *ctx, ovs_be32 mask)
abe529af
BP
2993{
2994 struct ofport_dpif *ofport;
2995
b3e9b2ed
EJ
2996 commit_odp_actions(ctx);
2997 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
abe529af 2998 uint16_t ofp_port = ofport->up.ofp_port;
b3e9b2ed 2999 if (ofp_port != ctx->flow.in_port && !(ofport->up.opp.config & mask)) {
df2c07f4 3000 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT,
abe529af
BP
3001 ofport->odp_port);
3002 }
3003 }
b3e9b2ed
EJ
3004
3005 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af
BP
3006}
3007
3008static void
3009xlate_output_action__(struct action_xlate_ctx *ctx,
3010 uint16_t port, uint16_t max_len)
3011{
3012 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
3013
3014 ctx->nf_output_iface = NF_OUT_DROP;
3015
3016 switch (port) {
3017 case OFPP_IN_PORT:
3018 add_output_action(ctx, ctx->flow.in_port);
3019 break;
3020 case OFPP_TABLE:
29901626 3021 xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id);
abe529af
BP
3022 break;
3023 case OFPP_NORMAL:
3024 xlate_normal(ctx);
3025 break;
3026 case OFPP_FLOOD:
b3e9b2ed 3027 flood_packets(ctx, htonl(OFPPC_NO_FLOOD));
abe529af
BP
3028 break;
3029 case OFPP_ALL:
b3e9b2ed 3030 flood_packets(ctx, htonl(0));
abe529af
BP
3031 break;
3032 case OFPP_CONTROLLER:
b3e9b2ed 3033 commit_odp_actions(ctx);
df2c07f4 3034 nl_msg_put_u64(ctx->odp_actions, OVS_ACTION_ATTR_USERSPACE, max_len);
abe529af
BP
3035 break;
3036 case OFPP_LOCAL:
3037 add_output_action(ctx, OFPP_LOCAL);
3038 break;
e81d2933
EJ
3039 case OFPP_NONE:
3040 break;
abe529af
BP
3041 default:
3042 if (port != ctx->flow.in_port) {
3043 add_output_action(ctx, port);
3044 }
3045 break;
3046 }
3047
3048 if (prev_nf_output_iface == NF_OUT_FLOOD) {
3049 ctx->nf_output_iface = NF_OUT_FLOOD;
3050 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
3051 ctx->nf_output_iface = prev_nf_output_iface;
3052 } else if (prev_nf_output_iface != NF_OUT_DROP &&
3053 ctx->nf_output_iface != NF_OUT_FLOOD) {
3054 ctx->nf_output_iface = NF_OUT_MULTI;
3055 }
3056}
3057
f694937d
EJ
3058static void
3059xlate_output_reg_action(struct action_xlate_ctx *ctx,
3060 const struct nx_action_output_reg *naor)
3061{
3062 uint64_t ofp_port;
3063
3064 ofp_port = nxm_read_field_bits(naor->src, naor->ofs_nbits, &ctx->flow);
3065
3066 if (ofp_port <= UINT16_MAX) {
3067 xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len));
3068 }
3069}
3070
abe529af
BP
3071static void
3072xlate_output_action(struct action_xlate_ctx *ctx,
3073 const struct ofp_action_output *oao)
3074{
3075 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
3076}
3077
abe529af
BP
3078static void
3079xlate_enqueue_action(struct action_xlate_ctx *ctx,
3080 const struct ofp_action_enqueue *oae)
3081{
3082 uint16_t ofp_port, odp_port;
b3e9b2ed 3083 uint32_t ctx_priority, priority;
abe529af
BP
3084 int error;
3085
3086 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
3087 &priority);
3088 if (error) {
3089 /* Fall back to ordinary output action. */
3090 xlate_output_action__(ctx, ntohs(oae->port), 0);
3091 return;
3092 }
3093
df2c07f4 3094 /* Figure out datapath output port. */
abe529af
BP
3095 ofp_port = ntohs(oae->port);
3096 if (ofp_port == OFPP_IN_PORT) {
3097 ofp_port = ctx->flow.in_port;
3098 }
3099 odp_port = ofp_port_to_odp_port(ofp_port);
3100
df2c07f4 3101 /* Add datapath actions. */
b3e9b2ed
EJ
3102 ctx_priority = ctx->priority;
3103 ctx->priority = priority;
abe529af 3104 add_output_action(ctx, odp_port);
b3e9b2ed 3105 ctx->priority = ctx_priority;
abe529af
BP
3106
3107 /* Update NetFlow output port. */
3108 if (ctx->nf_output_iface == NF_OUT_DROP) {
3109 ctx->nf_output_iface = odp_port;
3110 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
3111 ctx->nf_output_iface = NF_OUT_MULTI;
3112 }
3113}
3114
3115static void
3116xlate_set_queue_action(struct action_xlate_ctx *ctx,
3117 const struct nx_action_set_queue *nasq)
3118{
3119 uint32_t priority;
3120 int error;
3121
3122 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
3123 &priority);
3124 if (error) {
3125 /* Couldn't translate queue to a priority, so ignore. A warning
3126 * has already been logged. */
3127 return;
3128 }
3129
afabef2b 3130 ctx->priority = priority;
abe529af
BP
3131}
3132
3133struct xlate_reg_state {
3134 ovs_be16 vlan_tci;
3135 ovs_be64 tun_id;
3136};
3137
abe529af
BP
3138static void
3139xlate_autopath(struct action_xlate_ctx *ctx,
3140 const struct nx_action_autopath *naa)
3141{
3142 uint16_t ofp_port = ntohl(naa->id);
3143 struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
3144
3145 if (!port || !port->bundle) {
3146 ofp_port = OFPP_NONE;
3147 } else if (port->bundle->bond) {
3148 /* Autopath does not support VLAN hashing. */
3149 struct ofport_dpif *slave = bond_choose_output_slave(
3150 port->bundle->bond, &ctx->flow, OFP_VLAN_NONE, &ctx->tags);
3151 if (slave) {
3152 ofp_port = slave->up.ofp_port;
3153 }
3154 }
3155 autopath_execute(naa, &ctx->flow, ofp_port);
3156}
3157
daff3353
EJ
3158static bool
3159slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
3160{
3161 struct ofproto_dpif *ofproto = ofproto_;
3162 struct ofport_dpif *port;
3163
3164 switch (ofp_port) {
3165 case OFPP_IN_PORT:
3166 case OFPP_TABLE:
3167 case OFPP_NORMAL:
3168 case OFPP_FLOOD:
3169 case OFPP_ALL:
3170 case OFPP_LOCAL:
3171 return true;
3172 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
3173 return false;
3174 default:
3175 port = get_ofp_port(ofproto, ofp_port);
3176 return port ? port->may_enable : false;
3177 }
3178}
3179
75a75043
BP
3180static void
3181xlate_learn_action(struct action_xlate_ctx *ctx,
3182 const struct nx_action_learn *learn)
3183{
3184 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
3185 struct ofputil_flow_mod fm;
3186 int error;
3187
3188 learn_execute(learn, &ctx->flow, &fm);
3189
3190 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
3191 if (error && !VLOG_DROP_WARN(&rl)) {
3192 char *msg = ofputil_error_to_string(error);
3193 VLOG_WARN("learning action failed to modify flow table (%s)", msg);
3194 free(msg);
3195 }
3196
3197 free(fm.actions);
3198}
3199
abe529af
BP
3200static void
3201do_xlate_actions(const union ofp_action *in, size_t n_in,
3202 struct action_xlate_ctx *ctx)
3203{
3204 const struct ofport_dpif *port;
abe529af 3205 const union ofp_action *ia;
b4b8c781 3206 size_t left;
abe529af
BP
3207
3208 port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
3209 if (port
3210 && port->up.opp.config & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
3211 port->up.opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
3212 ? htonl(OFPPC_NO_RECV_STP)
3213 : htonl(OFPPC_NO_RECV))) {
3214 /* Drop this flow. */
3215 return;
3216 }
3217
b4b8c781 3218 OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) {
abe529af 3219 const struct ofp_action_dl_addr *oada;
38f2e360
BP
3220 const struct nx_action_resubmit *nar;
3221 const struct nx_action_set_tunnel *nast;
3222 const struct nx_action_set_queue *nasq;
3223 const struct nx_action_multipath *nam;
3224 const struct nx_action_autopath *naa;
daff3353 3225 const struct nx_action_bundle *nab;
f694937d 3226 const struct nx_action_output_reg *naor;
38f2e360
BP
3227 enum ofputil_action_code code;
3228 ovs_be64 tun_id;
3229
3230 code = ofputil_decode_action_unsafe(ia);
3231 switch (code) {
3232 case OFPUTIL_OFPAT_OUTPUT:
abe529af
BP
3233 xlate_output_action(ctx, &ia->output);
3234 break;
3235
38f2e360 3236 case OFPUTIL_OFPAT_SET_VLAN_VID:
abe529af
BP
3237 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
3238 ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
abe529af
BP
3239 break;
3240
38f2e360 3241 case OFPUTIL_OFPAT_SET_VLAN_PCP:
abe529af
BP
3242 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
3243 ctx->flow.vlan_tci |= htons(
3244 (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
abe529af
BP
3245 break;
3246
38f2e360 3247 case OFPUTIL_OFPAT_STRIP_VLAN:
abe529af 3248 ctx->flow.vlan_tci = htons(0);
abe529af
BP
3249 break;
3250
38f2e360 3251 case OFPUTIL_OFPAT_SET_DL_SRC:
abe529af 3252 oada = ((struct ofp_action_dl_addr *) ia);
abe529af
BP
3253 memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
3254 break;
3255
38f2e360 3256 case OFPUTIL_OFPAT_SET_DL_DST:
abe529af 3257 oada = ((struct ofp_action_dl_addr *) ia);
abe529af
BP
3258 memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
3259 break;
3260
38f2e360 3261 case OFPUTIL_OFPAT_SET_NW_SRC:
abe529af
BP
3262 ctx->flow.nw_src = ia->nw_addr.nw_addr;
3263 break;
3264
38f2e360 3265 case OFPUTIL_OFPAT_SET_NW_DST:
abe529af
BP
3266 ctx->flow.nw_dst = ia->nw_addr.nw_addr;
3267 break;
3268
38f2e360 3269 case OFPUTIL_OFPAT_SET_NW_TOS:
1fa96cf4 3270 ctx->flow.nw_tos = ia->nw_tos.nw_tos & IP_DSCP_MASK;
abe529af
BP
3271 break;
3272
38f2e360 3273 case OFPUTIL_OFPAT_SET_TP_SRC:
abe529af
BP
3274 ctx->flow.tp_src = ia->tp_port.tp_port;
3275 break;
3276
38f2e360 3277 case OFPUTIL_OFPAT_SET_TP_DST:
abe529af
BP
3278 ctx->flow.tp_dst = ia->tp_port.tp_port;
3279 break;
3280
38f2e360
BP
3281 case OFPUTIL_OFPAT_ENQUEUE:
3282 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
3283 break;
3284
3285 case OFPUTIL_NXAST_RESUBMIT:
3286 nar = (const struct nx_action_resubmit *) ia;
29901626
BP
3287 xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id);
3288 break;
3289
3290 case OFPUTIL_NXAST_RESUBMIT_TABLE:
3291 xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia);
abe529af
BP
3292 break;
3293
38f2e360
BP
3294 case OFPUTIL_NXAST_SET_TUNNEL:
3295 nast = (const struct nx_action_set_tunnel *) ia;
3296 tun_id = htonll(ntohl(nast->tun_id));
3297 ctx->flow.tun_id = tun_id;
3298 break;
3299
3300 case OFPUTIL_NXAST_SET_QUEUE:
3301 nasq = (const struct nx_action_set_queue *) ia;
3302 xlate_set_queue_action(ctx, nasq);
3303 break;
3304
3305 case OFPUTIL_NXAST_POP_QUEUE:
3306 ctx->priority = 0;
3307 break;
3308
3309 case OFPUTIL_NXAST_REG_MOVE:
3310 nxm_execute_reg_move((const struct nx_action_reg_move *) ia,
3311 &ctx->flow);
3312 break;
3313
3314 case OFPUTIL_NXAST_REG_LOAD:
3315 nxm_execute_reg_load((const struct nx_action_reg_load *) ia,
3316 &ctx->flow);
3317 break;
3318
3319 case OFPUTIL_NXAST_NOTE:
3320 /* Nothing to do. */
3321 break;
3322
3323 case OFPUTIL_NXAST_SET_TUNNEL64:
3324 tun_id = ((const struct nx_action_set_tunnel64 *) ia)->tun_id;
3325 ctx->flow.tun_id = tun_id;
3326 break;
3327
3328 case OFPUTIL_NXAST_MULTIPATH:
3329 nam = (const struct nx_action_multipath *) ia;
3330 multipath_execute(nam, &ctx->flow);
abe529af
BP
3331 break;
3332
38f2e360
BP
3333 case OFPUTIL_NXAST_AUTOPATH:
3334 naa = (const struct nx_action_autopath *) ia;
3335 xlate_autopath(ctx, naa);
abe529af 3336 break;
daff3353
EJ
3337
3338 case OFPUTIL_NXAST_BUNDLE:
3339 ctx->ofproto->has_bundle_action = true;
3340 nab = (const struct nx_action_bundle *) ia;
3341 xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow,
3342 slave_enabled_cb,
3343 ctx->ofproto), 0);
3344 break;
a368bb53
EJ
3345
3346 case OFPUTIL_NXAST_BUNDLE_LOAD:
3347 ctx->ofproto->has_bundle_action = true;
3348 nab = (const struct nx_action_bundle *) ia;
3349 bundle_execute_load(nab, &ctx->flow, slave_enabled_cb,
3350 ctx->ofproto);
3351 break;
f694937d
EJ
3352
3353 case OFPUTIL_NXAST_OUTPUT_REG:
3354 naor = (const struct nx_action_output_reg *) ia;
3355 xlate_output_reg_action(ctx, naor);
3356 break;
75a75043
BP
3357
3358 case OFPUTIL_NXAST_LEARN:
3359 ctx->has_learn = true;
3360 if (ctx->may_learn) {
3361 xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
3362 }
3363 break;
abe529af
BP
3364 }
3365 }
3366}
3367
3368static void
3369action_xlate_ctx_init(struct action_xlate_ctx *ctx,
3370 struct ofproto_dpif *ofproto, const struct flow *flow,
3371 const struct ofpbuf *packet)
3372{
3373 ctx->ofproto = ofproto;
3374 ctx->flow = *flow;
3375 ctx->packet = packet;
75a75043 3376 ctx->may_learn = packet != NULL;
abe529af 3377 ctx->resubmit_hook = NULL;
abe529af
BP
3378}
3379
3380static struct ofpbuf *
3381xlate_actions(struct action_xlate_ctx *ctx,
3382 const union ofp_action *in, size_t n_in)
3383{
3384 COVERAGE_INC(ofproto_dpif_xlate);
3385
3386 ctx->odp_actions = ofpbuf_new(512);
3387 ctx->tags = 0;
3388 ctx->may_set_up_flow = true;
75a75043
BP
3389 ctx->has_learn = false;
3390 ctx->has_normal = false;
abe529af
BP
3391 ctx->nf_output_iface = NF_OUT_DROP;
3392 ctx->recurse = 0;
afabef2b 3393 ctx->priority = 0;
b3e9b2ed
EJ
3394 ctx->base_priority = 0;
3395 ctx->base_flow = ctx->flow;
bf1e0371 3396 ctx->base_flow.tun_id = 0;
29901626 3397 ctx->table_id = 0;
abe529af 3398
fc08b7a2 3399 if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
abe529af
BP
3400 ctx->may_set_up_flow = false;
3401 } else {
3402 do_xlate_actions(in, n_in, ctx);
3403 }
3404
abe529af
BP
3405 /* Check with in-band control to see if we're allowed to set up this
3406 * flow. */
3407 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
3408 ctx->odp_actions->data,
3409 ctx->odp_actions->size)) {
3410 ctx->may_set_up_flow = false;
3411 }
3412
3413 return ctx->odp_actions;
3414}
3415\f
3416/* OFPP_NORMAL implementation. */
3417
3418struct dst {
3419 struct ofport_dpif *port;
3420 uint16_t vlan;
3421};
3422
3423struct dst_set {
3424 struct dst builtin[32];
3425 struct dst *dsts;
3426 size_t n, allocated;
3427};
3428
3429static void dst_set_init(struct dst_set *);
3430static void dst_set_add(struct dst_set *, const struct dst *);
3431static void dst_set_free(struct dst_set *);
3432
3433static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
3434
3435static bool
3436set_dst(struct action_xlate_ctx *ctx, struct dst *dst,
3437 const struct ofbundle *in_bundle, const struct ofbundle *out_bundle)
3438{
3439 dst->vlan = (out_bundle->vlan >= 0 ? OFP_VLAN_NONE
3440 : in_bundle->vlan >= 0 ? in_bundle->vlan
3441 : ctx->flow.vlan_tci == 0 ? OFP_VLAN_NONE
3442 : vlan_tci_to_vid(ctx->flow.vlan_tci));
3443
3444 dst->port = (!out_bundle->bond
3445 ? ofbundle_get_a_port(out_bundle)
3446 : bond_choose_output_slave(out_bundle->bond, &ctx->flow,
3447 dst->vlan, &ctx->tags));
3448
3449 return dst->port != NULL;
3450}
3451
3452static int
3453mirror_mask_ffs(mirror_mask_t mask)
3454{
3455 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
3456 return ffs(mask);
3457}
3458
3459static void
3460dst_set_init(struct dst_set *set)
3461{
3462 set->dsts = set->builtin;
3463 set->n = 0;
3464 set->allocated = ARRAY_SIZE(set->builtin);
3465}
3466
3467static void
3468dst_set_add(struct dst_set *set, const struct dst *dst)
3469{
3470 if (set->n >= set->allocated) {
3471 size_t new_allocated;
3472 struct dst *new_dsts;
3473
3474 new_allocated = set->allocated * 2;
3475 new_dsts = xmalloc(new_allocated * sizeof *new_dsts);
3476 memcpy(new_dsts, set->dsts, set->n * sizeof *new_dsts);
3477
3478 dst_set_free(set);
3479
3480 set->dsts = new_dsts;
3481 set->allocated = new_allocated;
3482 }
3483 set->dsts[set->n++] = *dst;
3484}
3485
3486static void
3487dst_set_free(struct dst_set *set)
3488{
3489 if (set->dsts != set->builtin) {
3490 free(set->dsts);
3491 }
3492}
3493
3494static bool
3495dst_is_duplicate(const struct dst_set *set, const struct dst *test)
3496{
3497 size_t i;
3498 for (i = 0; i < set->n; i++) {
3499 if (set->dsts[i].vlan == test->vlan
3500 && set->dsts[i].port == test->port) {
3501 return true;
3502 }
3503 }
3504 return false;
3505}
3506
3507static bool
3508ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
3509{
fc3d7408
BP
3510 return (bundle->vlan < 0
3511 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
abe529af
BP
3512}
3513
3514static bool
3515ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
3516{
3517 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
3518}
3519
3520/* Returns an arbitrary interface within 'bundle'. */
3521static struct ofport_dpif *
3522ofbundle_get_a_port(const struct ofbundle *bundle)
3523{
3524 return CONTAINER_OF(list_front(&bundle->ports),
3525 struct ofport_dpif, bundle_node);
3526}
3527
3528static void
3529compose_dsts(struct action_xlate_ctx *ctx, uint16_t vlan,
3530 const struct ofbundle *in_bundle,
3531 const struct ofbundle *out_bundle, struct dst_set *set)
3532{
3533 struct dst dst;
3534
3535 if (out_bundle == OFBUNDLE_FLOOD) {
3536 struct ofbundle *bundle;
3537
3538 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
3539 if (bundle != in_bundle
3540 && ofbundle_includes_vlan(bundle, vlan)
3541 && bundle->floodable
3542 && !bundle->mirror_out
3543 && set_dst(ctx, &dst, in_bundle, bundle)) {
3544 dst_set_add(set, &dst);
3545 }
3546 }
3547 ctx->nf_output_iface = NF_OUT_FLOOD;
3548 } else if (out_bundle && set_dst(ctx, &dst, in_bundle, out_bundle)) {
3549 dst_set_add(set, &dst);
3550 ctx->nf_output_iface = dst.port->odp_port;
3551 }
3552}
3553
3554static bool
3555vlan_is_mirrored(const struct ofmirror *m, int vlan)
3556{
fc3d7408 3557 return !m->vlans || bitmap_is_set(m->vlans, vlan);
abe529af
BP
3558}
3559
07817dfe
BP
3560/* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
3561 * to a VLAN. In general most packets may be mirrored but we want to drop
3562 * protocols that may confuse switches. */
3563static bool
3564eth_dst_may_rspan(const uint8_t dst[ETH_ADDR_LEN])
3565{
3566 /* If you change this function's behavior, please update corresponding
3567 * documentation in vswitch.xml at the same time. */
3568 if (dst[0] != 0x01) {
3569 /* All the currently banned MACs happen to start with 01 currently, so
3570 * this is a quick way to eliminate most of the good ones. */
3571 } else {
3572 if (eth_addr_is_reserved(dst)) {
3573 /* Drop STP, IEEE pause frames, and other reserved protocols
3574 * (01-80-c2-00-00-0x). */
3575 return false;
3576 }
3577
3578 if (dst[0] == 0x01 && dst[1] == 0x00 && dst[2] == 0x0c) {
3579 /* Cisco OUI. */
3580 if ((dst[3] & 0xfe) == 0xcc &&
3581 (dst[4] & 0xfe) == 0xcc &&
3582 (dst[5] & 0xfe) == 0xcc) {
3583 /* Drop the following protocols plus others following the same
3584 pattern:
3585
3586 CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc)
3587 Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd)
3588 STP Uplink Fast (01-00-0c-cd-cd-cd) */
3589 return false;
3590 }
3591
3592 if (!(dst[3] | dst[4] | dst[5])) {
3593 /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */
3594 return false;
3595 }
3596 }
3597 }
3598 return true;
3599}
3600
abe529af
BP
3601static void
3602compose_mirror_dsts(struct action_xlate_ctx *ctx,
3603 uint16_t vlan, const struct ofbundle *in_bundle,
3604 struct dst_set *set)
3605{
3606 struct ofproto_dpif *ofproto = ctx->ofproto;
3607 mirror_mask_t mirrors;
3608 int flow_vlan;
3609 size_t i;
3610
3611 mirrors = in_bundle->src_mirrors;
3612 for (i = 0; i < set->n; i++) {
3613 mirrors |= set->dsts[i].port->bundle->dst_mirrors;
3614 }
3615
3616 if (!mirrors) {
3617 return;
3618 }
3619
3620 flow_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
3621 if (flow_vlan == 0) {
3622 flow_vlan = OFP_VLAN_NONE;
3623 }
3624
3625 while (mirrors) {
3626 struct ofmirror *m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
3627 if (vlan_is_mirrored(m, vlan)) {
3628 struct dst dst;
3629
3630 if (m->out) {
3631 if (set_dst(ctx, &dst, in_bundle, m->out)
3632 && !dst_is_duplicate(set, &dst)) {
3633 dst_set_add(set, &dst);
3634 }
07817dfe 3635 } else if (eth_dst_may_rspan(ctx->flow.dl_dst)) {
abe529af
BP
3636 struct ofbundle *bundle;
3637
3638 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
3639 if (ofbundle_includes_vlan(bundle, m->out_vlan)
3640 && set_dst(ctx, &dst, in_bundle, bundle))
3641 {
3642 if (bundle->vlan < 0) {
3643 dst.vlan = m->out_vlan;
3644 }
3645 if (dst_is_duplicate(set, &dst)) {
3646 continue;
3647 }
3648
3649 /* Use the vlan tag on the original flow instead of
3650 * the one passed in the vlan parameter. This ensures
3651 * that we compare the vlan from before any implicit
3652 * tagging tags place. This is necessary because
3653 * dst->vlan is the final vlan, after removing implicit
3654 * tags. */
3655 if (bundle == in_bundle && dst.vlan == flow_vlan) {
3656 /* Don't send out input port on same VLAN. */
3657 continue;
3658 }
3659 dst_set_add(set, &dst);
3660 }
3661 }
3662 }
3663 }
3664 mirrors &= mirrors - 1;
3665 }
3666}
3667
3668static void
3669compose_actions(struct action_xlate_ctx *ctx, uint16_t vlan,
3670 const struct ofbundle *in_bundle,
3671 const struct ofbundle *out_bundle)
3672{
3673 uint16_t initial_vlan, cur_vlan;
3674 const struct dst *dst;
3675 struct dst_set set;
3676
3677 dst_set_init(&set);
3678 compose_dsts(ctx, vlan, in_bundle, out_bundle, &set);
3679 compose_mirror_dsts(ctx, vlan, in_bundle, &set);
3680
3681 /* Output all the packets we can without having to change the VLAN. */
3682 initial_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
3683 if (initial_vlan == 0) {
3684 initial_vlan = OFP_VLAN_NONE;
3685 }
3686 for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
3687 if (dst->vlan != initial_vlan) {
3688 continue;
3689 }
3690 nl_msg_put_u32(ctx->odp_actions,
df2c07f4 3691 OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
abe529af
BP
3692 }
3693
3694 /* Then output the rest. */
3695 cur_vlan = initial_vlan;
3696 for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
3697 if (dst->vlan == initial_vlan) {
3698 continue;
3699 }
3700 if (dst->vlan != cur_vlan) {
3701 if (dst->vlan == OFP_VLAN_NONE) {
d9065a90 3702 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
abe529af
BP
3703 } else {
3704 ovs_be16 tci;
d9065a90
PS
3705
3706 if (cur_vlan != OFP_VLAN_NONE) {
3707 nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
3708 }
abe529af
BP
3709 tci = htons(dst->vlan & VLAN_VID_MASK);
3710 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
3711 nl_msg_put_be16(ctx->odp_actions,
d9065a90 3712 OVS_ACTION_ATTR_PUSH_VLAN, tci);
abe529af
BP
3713 }
3714 cur_vlan = dst->vlan;
3715 }
3716 nl_msg_put_u32(ctx->odp_actions,
df2c07f4 3717 OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
abe529af
BP
3718 }
3719
3720 dst_set_free(&set);
3721}
3722
3723/* Returns the effective vlan of a packet, taking into account both the
3724 * 802.1Q header and implicitly tagged ports. A value of 0 indicates that
3725 * the packet is untagged and -1 indicates it has an invalid header and
3726 * should be dropped. */
3727static int
3728flow_get_vlan(struct ofproto_dpif *ofproto, const struct flow *flow,
3729 struct ofbundle *in_bundle, bool have_packet)
3730{
3731 int vlan = vlan_tci_to_vid(flow->vlan_tci);
3732 if (in_bundle->vlan >= 0) {
3733 if (vlan) {
3734 if (have_packet) {
3735 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3736 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
3737 "packet received on port %s configured with "
3738 "implicit VLAN %"PRIu16,
3739 ofproto->up.name, vlan,
3740 in_bundle->name, in_bundle->vlan);
3741 }
3742 return -1;
3743 }
3744 vlan = in_bundle->vlan;
3745 } else {
3746 if (!ofbundle_includes_vlan(in_bundle, vlan)) {
3747 if (have_packet) {
3748 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3749 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
3750 "packet received on port %s not configured for "
3751 "trunking VLAN %d",
3752 ofproto->up.name, vlan, in_bundle->name, vlan);
3753 }
3754 return -1;
3755 }
3756 }
3757
3758 return vlan;
3759}
3760
3761/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
3762 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
3763 * indicate this; newer upstream kernels use gratuitous ARP requests. */
3764static bool
3765is_gratuitous_arp(const struct flow *flow)
3766{
3767 return (flow->dl_type == htons(ETH_TYPE_ARP)
3768 && eth_addr_is_broadcast(flow->dl_dst)
3769 && (flow->nw_proto == ARP_OP_REPLY
3770 || (flow->nw_proto == ARP_OP_REQUEST
3771 && flow->nw_src == flow->nw_dst)));
3772}
3773
3774static void
3775update_learning_table(struct ofproto_dpif *ofproto,
3776 const struct flow *flow, int vlan,
3777 struct ofbundle *in_bundle)
3778{
3779 struct mac_entry *mac;
3780
3781 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
3782 return;
3783 }
3784
3785 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
3786 if (is_gratuitous_arp(flow)) {
3787 /* We don't want to learn from gratuitous ARP packets that are
3788 * reflected back over bond slaves so we lock the learning table. */
3789 if (!in_bundle->bond) {
3790 mac_entry_set_grat_arp_lock(mac);
3791 } else if (mac_entry_is_grat_arp_locked(mac)) {
3792 return;
3793 }
3794 }
3795
3796 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
3797 /* The log messages here could actually be useful in debugging,
3798 * so keep the rate limit relatively high. */
3799 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3800 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
3801 "on port %s in VLAN %d",
3802 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
3803 in_bundle->name, vlan);
3804
3805 mac->port.p = in_bundle;
3806 tag_set_add(&ofproto->revalidate_set,
3807 mac_learning_changed(ofproto->ml, mac));
3808 }
3809}
3810
3811/* Determines whether packets in 'flow' within 'br' should be forwarded or
3812 * dropped. Returns true if they may be forwarded, false if they should be
3813 * dropped.
3814 *
3815 * If 'have_packet' is true, it indicates that the caller is processing a
3816 * received packet. If 'have_packet' is false, then the caller is just
3817 * revalidating an existing flow because configuration has changed. Either
3818 * way, 'have_packet' only affects logging (there is no point in logging errors
3819 * during revalidation).
3820 *
3821 * Sets '*in_portp' to the input port. This will be a null pointer if
3822 * flow->in_port does not designate a known input port (in which case
3823 * is_admissible() returns false).
3824 *
3825 * When returning true, sets '*vlanp' to the effective VLAN of the input
3826 * packet, as returned by flow_get_vlan().
3827 *
3828 * May also add tags to '*tags', although the current implementation only does
3829 * so in one special case.
3830 */
3831static bool
3832is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow,
3833 bool have_packet,
3834 tag_type *tags, int *vlanp, struct ofbundle **in_bundlep)
3835{
3836 struct ofport_dpif *in_port;
3837 struct ofbundle *in_bundle;
3838 int vlan;
3839
3840 /* Find the port and bundle for the received packet. */
3841 in_port = get_ofp_port(ofproto, flow->in_port);
23adee42 3842 *in_bundlep = in_bundle = in_port ? in_port->bundle : NULL;
abe529af
BP
3843 if (!in_port || !in_bundle) {
3844 /* No interface? Something fishy... */
3845 if (have_packet) {
3846 /* Odd. A few possible reasons here:
3847 *
3848 * - We deleted a port but there are still a few packets queued up
3849 * from it.
3850 *
3851 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
3852 * we don't know about.
3853 *
3854 * - Packet arrived on the local port but the local port is not
3855 * part of a bundle.
3856 */
3857 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3858
3859 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
3860 "port %"PRIu16,
3861 ofproto->up.name, flow->in_port);
3862 }
75a75043 3863 *vlanp = -1;
abe529af
BP
3864 return false;
3865 }
3866 *vlanp = vlan = flow_get_vlan(ofproto, flow, in_bundle, have_packet);
3867 if (vlan < 0) {
3868 return false;
3869 }
3870
b53055f4 3871 /* Drop frames for reserved multicast addresses
8402c74b 3872 * only if forward_bpdu option is absent. */
b53055f4 3873 if (eth_addr_is_reserved(flow->dl_dst) &&
8402c74b 3874 !ofproto->up.forward_bpdu) {
abe529af
BP
3875 return false;
3876 }
3877
3878 /* Drop frames on bundles reserved for mirroring. */
3879 if (in_bundle->mirror_out) {
3880 if (have_packet) {
3881 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3882 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
3883 "%s, which is reserved exclusively for mirroring",
3884 ofproto->up.name, in_bundle->name);
3885 }
3886 return false;
3887 }
3888
3889 if (in_bundle->bond) {
3890 struct mac_entry *mac;
3891
3892 switch (bond_check_admissibility(in_bundle->bond, in_port,
3893 flow->dl_dst, tags)) {
3894 case BV_ACCEPT:
3895 break;
3896
3897 case BV_DROP:
3898 return false;
3899
3900 case BV_DROP_IF_MOVED:
3901 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
3902 if (mac && mac->port.p != in_bundle &&
3903 (!is_gratuitous_arp(flow)
3904 || mac_entry_is_grat_arp_locked(mac))) {
3905 return false;
3906 }
3907 break;
3908 }
3909 }
3910
3911 return true;
3912}
3913
4cd78906 3914static void
abe529af
BP
3915xlate_normal(struct action_xlate_ctx *ctx)
3916{
3917 struct ofbundle *in_bundle;
3918 struct ofbundle *out_bundle;
3919 struct mac_entry *mac;
3920 int vlan;
3921
75a75043
BP
3922 ctx->has_normal = true;
3923
abe529af
BP
3924 /* Check whether we should drop packets in this flow. */
3925 if (!is_admissible(ctx->ofproto, &ctx->flow, ctx->packet != NULL,
3926 &ctx->tags, &vlan, &in_bundle)) {
3927 out_bundle = NULL;
3928 goto done;
3929 }
3930
75a75043
BP
3931 /* Learn source MAC. */
3932 if (ctx->may_learn) {
abe529af
BP
3933 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
3934 }
3935
3936 /* Determine output bundle. */
3937 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
3938 &ctx->tags);
3939 if (mac) {
3940 out_bundle = mac->port.p;
3941 } else if (!ctx->packet && !eth_addr_is_multicast(ctx->flow.dl_dst)) {
3942 /* If we are revalidating but don't have a learning entry then eject
3943 * the flow. Installing a flow that floods packets opens up a window
3944 * of time where we could learn from a packet reflected on a bond and
3945 * blackhole packets before the learning table is updated to reflect
3946 * the correct port. */
4cd78906
BP
3947 ctx->may_set_up_flow = false;
3948 return;
abe529af
BP
3949 } else {
3950 out_bundle = OFBUNDLE_FLOOD;
3951 }
3952
3953 /* Don't send packets out their input bundles. */
3954 if (in_bundle == out_bundle) {
3955 out_bundle = NULL;
3956 }
3957
3958done:
3959 if (in_bundle) {
3960 compose_actions(ctx, vlan, in_bundle, out_bundle);
3961 }
abe529af
BP
3962}
3963\f
3964static bool
3965get_drop_frags(struct ofproto *ofproto_)
3966{
3967 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3968 bool drop_frags;
3969
3970 dpif_get_drop_frags(ofproto->dpif, &drop_frags);
3971 return drop_frags;
3972}
3973
3974static void
3975set_drop_frags(struct ofproto *ofproto_, bool drop_frags)
3976{
3977 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3978
3979 dpif_set_drop_frags(ofproto->dpif, drop_frags);
3980}
3981
3982static int
3983packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
3984 const struct flow *flow,
3985 const union ofp_action *ofp_actions, size_t n_ofp_actions)
3986{
3987 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3988 int error;
3989
3990 error = validate_actions(ofp_actions, n_ofp_actions, flow,
3991 ofproto->max_ports);
3992 if (!error) {
80e5eed9 3993 struct odputil_keybuf keybuf;
abe529af
BP
3994 struct action_xlate_ctx ctx;
3995 struct ofpbuf *odp_actions;
80e5eed9
BP
3996 struct ofpbuf key;
3997
3998 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
3999 odp_flow_key_from_flow(&key, flow);
abe529af
BP
4000
4001 action_xlate_ctx_init(&ctx, ofproto, flow, packet);
4002 odp_actions = xlate_actions(&ctx, ofp_actions, n_ofp_actions);
80e5eed9
BP
4003 dpif_execute(ofproto->dpif, key.data, key.size,
4004 odp_actions->data, odp_actions->size, packet);
abe529af
BP
4005 ofpbuf_delete(odp_actions);
4006 }
4007 return error;
4008}
4009
4010static void
4011get_netflow_ids(const struct ofproto *ofproto_,
4012 uint8_t *engine_type, uint8_t *engine_id)
4013{
4014 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4015
4016 dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
4017}
4018\f
4019static struct ofproto_dpif *
4020ofproto_dpif_lookup(const char *name)
4021{
4022 struct ofproto *ofproto = ofproto_lookup(name);
4023 return (ofproto && ofproto->ofproto_class == &ofproto_dpif_class
4024 ? ofproto_dpif_cast(ofproto)
4025 : NULL);
4026}
4027
4028static void
4029ofproto_unixctl_fdb_show(struct unixctl_conn *conn,
4030 const char *args, void *aux OVS_UNUSED)
4031{
4032 struct ds ds = DS_EMPTY_INITIALIZER;
4033 const struct ofproto_dpif *ofproto;
4034 const struct mac_entry *e;
4035
4036 ofproto = ofproto_dpif_lookup(args);
4037 if (!ofproto) {
4038 unixctl_command_reply(conn, 501, "no such bridge");
4039 return;
4040 }
4041
4042 ds_put_cstr(&ds, " port VLAN MAC Age\n");
4043 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
4044 struct ofbundle *bundle = e->port.p;
4045 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
4046 ofbundle_get_a_port(bundle)->odp_port,
4047 e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
4048 }
4049 unixctl_command_reply(conn, 200, ds_cstr(&ds));
4050 ds_destroy(&ds);
4051}
4052
4053struct ofproto_trace {
4054 struct action_xlate_ctx ctx;
4055 struct flow flow;
4056 struct ds *result;
4057};
4058
4059static void
29901626
BP
4060trace_format_rule(struct ds *result, uint8_t table_id, int level,
4061 const struct rule_dpif *rule)
abe529af
BP
4062{
4063 ds_put_char_multiple(result, '\t', level);
4064 if (!rule) {
4065 ds_put_cstr(result, "No match\n");
4066 return;
4067 }
4068
29901626
BP
4069 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
4070 table_id, ntohll(rule->up.flow_cookie));
79feb7df 4071 cls_rule_format(&rule->up.cr, result);
abe529af
BP
4072 ds_put_char(result, '\n');
4073
4074 ds_put_char_multiple(result, '\t', level);
4075 ds_put_cstr(result, "OpenFlow ");
79feb7df 4076 ofp_print_actions(result, rule->up.actions, rule->up.n_actions);
abe529af
BP
4077 ds_put_char(result, '\n');
4078}
4079
4080static void
4081trace_format_flow(struct ds *result, int level, const char *title,
4082 struct ofproto_trace *trace)
4083{
4084 ds_put_char_multiple(result, '\t', level);
4085 ds_put_format(result, "%s: ", title);
4086 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
4087 ds_put_cstr(result, "unchanged");
4088 } else {
4089 flow_format(result, &trace->ctx.flow);
4090 trace->flow = trace->ctx.flow;
4091 }
4092 ds_put_char(result, '\n');
4093}
4094
eb9e1c26
EJ
4095static void
4096trace_format_regs(struct ds *result, int level, const char *title,
4097 struct ofproto_trace *trace)
4098{
4099 size_t i;
4100
4101 ds_put_char_multiple(result, '\t', level);
4102 ds_put_format(result, "%s:", title);
4103 for (i = 0; i < FLOW_N_REGS; i++) {
4104 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
4105 }
4106 ds_put_char(result, '\n');
4107}
4108
abe529af
BP
4109static void
4110trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
4111{
4112 struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx);
4113 struct ds *result = trace->result;
4114
4115 ds_put_char(result, '\n');
4116 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
eb9e1c26 4117 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
29901626 4118 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
abe529af
BP
4119}
4120
4121static void
4122ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_,
4123 void *aux OVS_UNUSED)
4124{
876b0e1c 4125 char *dpname, *arg1, *arg2, *arg3;
abe529af
BP
4126 char *args = xstrdup(args_);
4127 char *save_ptr = NULL;
4128 struct ofproto_dpif *ofproto;
876b0e1c
BP
4129 struct ofpbuf odp_key;
4130 struct ofpbuf *packet;
abe529af
BP
4131 struct rule_dpif *rule;
4132 struct ds result;
4133 struct flow flow;
abe529af
BP
4134 char *s;
4135
876b0e1c
BP
4136 packet = NULL;
4137 ofpbuf_init(&odp_key, 0);
abe529af
BP
4138 ds_init(&result);
4139
4140 dpname = strtok_r(args, " ", &save_ptr);
876b0e1c
BP
4141 arg1 = strtok_r(NULL, " ", &save_ptr);
4142 arg2 = strtok_r(NULL, " ", &save_ptr);
4143 arg3 = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */
8b3b8dd1
BP
4144 if (dpname && arg1 && (!arg2 || !strcmp(arg2, "-generate")) && !arg3) {
4145 /* ofproto/trace dpname flow [-generate] */
876b0e1c
BP
4146 int error;
4147
df2c07f4 4148 /* Convert string to datapath key. */
876b0e1c
BP
4149 ofpbuf_init(&odp_key, 0);
4150 error = odp_flow_key_from_string(arg1, &odp_key);
4151 if (error) {
4152 unixctl_command_reply(conn, 501, "Bad flow syntax");
4153 goto exit;
4154 }
4155
4156 /* Convert odp_key to flow. */
4157 error = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
4158 if (error) {
4159 unixctl_command_reply(conn, 501, "Invalid flow");
4160 goto exit;
4161 }
8b3b8dd1
BP
4162
4163 /* Generate a packet, if requested. */
4164 if (arg2) {
4165 packet = ofpbuf_new(0);
4166 flow_compose(packet, &flow);
4167 }
876b0e1c
BP
4168 } else if (dpname && arg1 && arg2 && arg3) {
4169 /* ofproto/trace dpname tun_id in_port packet */
4170 uint16_t in_port;
4171 ovs_be64 tun_id;
4172
4173 tun_id = htonll(strtoull(arg1, NULL, 0));
4174 in_port = ofp_port_to_odp_port(atoi(arg2));
4175
4176 packet = ofpbuf_new(strlen(args) / 2);
4177 arg3 = ofpbuf_put_hex(packet, arg3, NULL);
4178 arg3 += strspn(arg3, " ");
4179 if (*arg3 != '\0') {
4180 unixctl_command_reply(conn, 501, "Trailing garbage in command");
4181 goto exit;
4182 }
4183 if (packet->size < ETH_HEADER_LEN) {
4184 unixctl_command_reply(conn, 501,
4185 "Packet data too short for Ethernet");
4186 goto exit;
4187 }
4188
4189 ds_put_cstr(&result, "Packet: ");
4190 s = ofp_packet_to_string(packet->data, packet->size, packet->size);
4191 ds_put_cstr(&result, s);
4192 free(s);
4193
4194 flow_extract(packet, tun_id, in_port, &flow);
4195 } else {
abe529af
BP
4196 unixctl_command_reply(conn, 501, "Bad command syntax");
4197 goto exit;
4198 }
4199
4200 ofproto = ofproto_dpif_lookup(dpname);
4201 if (!ofproto) {
4202 unixctl_command_reply(conn, 501, "Unknown ofproto (use ofproto/list "
4203 "for help)");
4204 goto exit;
4205 }
4206
abe529af
BP
4207 ds_put_cstr(&result, "Flow: ");
4208 flow_format(&result, &flow);
4209 ds_put_char(&result, '\n');
4210
29901626
BP
4211 rule = rule_dpif_lookup(ofproto, &flow, 0);
4212 trace_format_rule(&result, 0, 0, rule);
abe529af
BP
4213 if (rule) {
4214 struct ofproto_trace trace;
4215 struct ofpbuf *odp_actions;
4216
4217 trace.result = &result;
4218 trace.flow = flow;
876b0e1c 4219 action_xlate_ctx_init(&trace.ctx, ofproto, &flow, packet);
abe529af
BP
4220 trace.ctx.resubmit_hook = trace_resubmit;
4221 odp_actions = xlate_actions(&trace.ctx,
4222 rule->up.actions, rule->up.n_actions);
4223
4224 ds_put_char(&result, '\n');
4225 trace_format_flow(&result, 0, "Final flow", &trace);
4226 ds_put_cstr(&result, "Datapath actions: ");
4227 format_odp_actions(&result, odp_actions->data, odp_actions->size);
4228 ofpbuf_delete(odp_actions);
876b0e1c
BP
4229
4230 if (!trace.ctx.may_set_up_flow) {
4231 if (packet) {
4232 ds_put_cstr(&result, "\nThis flow is not cachable.");
4233 } else {
4234 ds_put_cstr(&result, "\nThe datapath actions are incomplete--"
4235 "for complete actions, please supply a packet.");
4236 }
4237 }
abe529af
BP
4238 }
4239
4240 unixctl_command_reply(conn, 200, ds_cstr(&result));
4241
4242exit:
4243 ds_destroy(&result);
876b0e1c
BP
4244 ofpbuf_delete(packet);
4245 ofpbuf_uninit(&odp_key);
abe529af
BP
4246 free(args);
4247}
4248
7ee20df1
BP
4249static void
4250ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED,
4251 const char *args_ OVS_UNUSED, void *aux OVS_UNUSED)
4252{
4253 clogged = true;
4254 unixctl_command_reply(conn, 200, NULL);
4255}
4256
4257static void
4258ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED,
4259 const char *args_ OVS_UNUSED, void *aux OVS_UNUSED)
4260{
4261 clogged = false;
4262 unixctl_command_reply(conn, 200, NULL);
4263}
4264
abe529af
BP
4265static void
4266ofproto_dpif_unixctl_init(void)
4267{
4268 static bool registered;
4269 if (registered) {
4270 return;
4271 }
4272 registered = true;
4273
4274 unixctl_command_register("ofproto/trace", ofproto_unixctl_trace, NULL);
4275 unixctl_command_register("fdb/show", ofproto_unixctl_fdb_show, NULL);
7ee20df1
BP
4276
4277 unixctl_command_register("ofproto/clog", ofproto_dpif_clog, NULL);
4278 unixctl_command_register("ofproto/unclog", ofproto_dpif_unclog, NULL);
abe529af
BP
4279}
4280\f
4281const struct ofproto_class ofproto_dpif_class = {
4282 enumerate_types,
4283 enumerate_names,
4284 del,
4285 alloc,
4286 construct,
4287 destruct,
4288 dealloc,
4289 run,
4290 wait,
4291 flush,
6c1491fb
BP
4292 get_features,
4293 get_tables,
abe529af
BP
4294 port_alloc,
4295 port_construct,
4296 port_destruct,
4297 port_dealloc,
4298 port_modified,
4299 port_reconfigured,
4300 port_query_by_name,
4301 port_add,
4302 port_del,
4303 port_dump_start,
4304 port_dump_next,
4305 port_dump_done,
4306 port_poll,
4307 port_poll_wait,
4308 port_is_lacp_current,
0ab6decf 4309 NULL, /* rule_choose_table */
abe529af
BP
4310 rule_alloc,
4311 rule_construct,
4312 rule_destruct,
4313 rule_dealloc,
abe529af
BP
4314 rule_get_stats,
4315 rule_execute,
4316 rule_modify_actions,
4317 get_drop_frags,
4318 set_drop_frags,
4319 packet_out,
4320 set_netflow,
4321 get_netflow_ids,
4322 set_sflow,
4323 set_cfm,
a5610457 4324 get_cfm_fault,
1de11730 4325 get_cfm_remote_mpids,
abe529af
BP
4326 bundle_set,
4327 bundle_remove,
4328 mirror_set,
4329 set_flood_vlans,
4330 is_mirror_output_bundle,
8402c74b 4331 forward_bpdu_changed,
abe529af 4332};